ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b410af215352536830acfc5817a770647856d05c | #
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import threading
class TimeoutError(Exception):
pass
class Datasrc(object):
def __init__(self, length=20):
self.length = length
self.cursor = -1
self.datums = {}
self.history = []
self.sortedkeys = []
self.cond = threading.Condition()
self.newdata = threading.Event()
def __getitem__(self, key):
with self.cond:
if isinstance(key, int):
return self.datums[self.sortedkeys[key]]
else:
return self.datums[key]
def __setitem__(self, key, value):
with self.cond:
if key in self.history:
self.history.remove(key)
self.history.append(key)
self.datums[key] = value
self._eject_old()
self.newdata.set()
self.cond.notify()
def __len__(self):
with self.cond:
return len(self.sortedkeys)
def _eject_old(self):
while len(self.history) > self.length:
oldest = self.history.pop(0)
del self.datums[oldest]
self.sortedkeys = list(self.datums.keys())
self.sortedkeys.sort()
def index(self, key):
with self.cond:
return self.sortedkeys.index(key)
def index2key(self, index):
with self.cond:
return self.sortedkeys[index]
def youngest(self):
return self.datums[self.history[-1]]
def oldest(self):
return self.datums[self.history[0]]
def keys(self, sort='alpha'):
with self.cond:
if sort == 'alpha':
return self.sortedkeys
elif sort == 'time':
return self.history
else:
return self.datums.keys()
def has_key(self, key):
with self.cond:
return self.datums.has_key(key)
def wait(self, timeout=None):
with self.cond:
self.cond.wait(timeout=timeout)
if not self.newdata.isSet():
raise TimeoutError("Timed out waiting for datum")
self.newdata.clear()
return self.history[-1]
def get_bufsize(self):
with self.cond:
return self.length
def set_bufsize(self, length):
with self.cond:
self.length = length
self._eject_old()
#END
|
py | b410af84a711e8f68a2eefcb6872285d0bc739c5 | import pickle
import sys
from lasagne.layers import get_output
from lasagne.layers import BatchNormLayer
from lasagne.layers import ConcatLayer
from lasagne.layers import Conv2DLayer
from lasagne.layers import DenseLayer
from lasagne.layers import InputLayer
from lasagne.layers import RecurrentLayer
from lasagne.layers import Layer
from lasagne.nonlinearities import identity
from lasagne.nonlinearities import softmax
from lasagne.nonlinearities import sigmoid
from lasagne.objectives import categorical_crossentropy
from lasagne.objectives import aggregate
from lasagne.updates import nesterov_momentum
from unittest.mock import Mock
from unittest.mock import patch
import numpy as np
import pytest
from sklearn.base import clone
from sklearn.datasets import make_classification
from sklearn.datasets import make_regression
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
import theano
import theano.tensor as T
floatX = theano.config.floatX
class TestLayers:
@pytest.fixture
def layers(self):
from nolearn.lasagne.base import Layers
return Layers([('one', 1), ('two', 2), ('three', 3)])
def test_getitem_with_key(self, layers):
assert layers['one'] == 1
def test_getitem_with_index(self, layers):
assert layers[0] == 1
def test_getitem_with_slice(self, layers):
from nolearn.lasagne.base import Layers
sliced = layers[:2]
assert isinstance(sliced, Layers)
assert sliced.keys() == ['one', 'two']
assert sliced.values() == [1, 2]
def test_keys_returns_list(self, layers):
assert layers.keys() == ['one', 'two', 'three']
def test_values_returns_list(self, layers):
assert layers.values() == [1, 2, 3]
class TestFunctionalToy:
def classif(self, NeuralNet, X, y):
l = InputLayer(shape=(None, X.shape[1]))
l = DenseLayer(l, num_units=len(np.unique(y)), nonlinearity=softmax)
net = NeuralNet(l, update_learning_rate=0.01)
return net.fit(X, y)
def classif_no_valid(self, NeuralNet, X, y):
from nolearn.lasagne import TrainSplit
l = InputLayer(shape=(None, X.shape[1]))
l = DenseLayer(l, num_units=len(np.unique(y)), nonlinearity=softmax)
net = NeuralNet(
l, update_learning_rate=0.01, train_split=TrainSplit(0))
return net.fit(X, y)
def regr(self, NeuralNet, X, y):
l = InputLayer(shape=(None, X.shape[1]))
l = DenseLayer(l, num_units=y.shape[1], nonlinearity=None)
net = NeuralNet(l, regression=True, update_learning_rate=0.01)
return net.fit(X, y)
def test_classif_two_classes(self, NeuralNet):
X, y = make_classification()
X = X.astype(floatX)
y = y.astype(np.int32)
self.classif(NeuralNet, X, y)
def test_classif_ten_classes(self, NeuralNet):
X, y = make_classification(n_classes=10, n_informative=10)
X = X.astype(floatX)
y = y.astype(np.int32)
self.classif(NeuralNet, X, y)
def test_classif_no_valid_two_classes(self, NeuralNet):
X, y = make_classification()
X = X.astype(floatX)
y = y.astype(np.int32)
self.classif_no_valid(NeuralNet, X, y)
def test_regr_one_target(self, NeuralNet):
X, y = make_regression()
X = X.astype(floatX)
y = y.reshape(-1, 1).astype(np.float32)
self.regr(NeuralNet, X, y)
def test_regr_ten_targets(self, NeuralNet):
X, y = make_regression(n_targets=10)
X = X.astype(floatX)
y = y.astype(floatX)
self.regr(NeuralNet, X, y)
class TestFunctionalMNIST:
def test_accuracy(self, net_fitted, mnist, X_test, y_pred):
X, y = mnist
y_test = y[60000:]
acc = accuracy_score(y_pred, y_test)
assert acc > 0.85
assert net_fitted.score(X_test, y_test) == acc
def test_train_history(self, net_fitted):
history = net_fitted.train_history_
assert len(history) == 2 # due to early stopping
assert history[1]['valid_accuracy'] > 0.85
assert history[1]['valid_accuracy'] > history[0]['valid_accuracy']
assert set(history[0].keys()) == set([
'dur', 'epoch', 'train_loss', 'train_loss_best',
'valid_loss', 'valid_loss_best', 'valid_accuracy',
])
def test_early_stopping(self, net_fitted):
early_stopping = net_fitted.on_epoch_finished[0]
assert early_stopping.train_history == net_fitted.train_history_
def test_pickle(self, net_fitted, X_test, y_pred):
recursionlimit = sys.getrecursionlimit()
sys.setrecursionlimit(10000)
pickled = pickle.dumps(net_fitted, -1)
net_loaded = pickle.loads(pickled)
assert np.array_equal(net_loaded.predict(X_test), y_pred)
sys.setrecursionlimit(recursionlimit)
def test_load_params_from_net(self, net, net_fitted, X_test, y_pred):
net_loaded = clone(net)
net_loaded.load_params_from(net_fitted)
assert np.array_equal(net_loaded.predict(X_test), y_pred)
def test_load_params_from_params_values(self, net, net_fitted,
X_test, y_pred):
net_loaded = clone(net)
net_loaded.load_params_from(net_fitted.get_all_params_values())
assert np.array_equal(net_loaded.predict(X_test), y_pred)
def test_save_params_to_path(self, net_fitted, X_test, y_pred):
path = '/tmp/test_lasagne_functional_mnist.params'
net_fitted.save_params_to(path)
net_loaded = clone(net_fitted)
net_loaded.load_params_from(path)
assert np.array_equal(net_loaded.predict(X_test), y_pred)
def test_load_params_from_message(self, net, net_fitted, capsys):
net2 = clone(net)
net2.verbose = 1
net2.load_params_from(net_fitted)
out = capsys.readouterr()[0]
message = """\
Loaded parameters to layer 'conv1' (shape 8x1x5x5).
Loaded parameters to layer 'conv1' (shape 8).
Loaded parameters to layer 'conv2' (shape 8x8x5x5).
Loaded parameters to layer 'conv2' (shape 8).
Loaded parameters to layer 'hidden1' (shape 128x128).
Loaded parameters to layer 'hidden1' (shape 128).
Loaded parameters to layer 'output' (shape 128x10).
Loaded parameters to layer 'output' (shape 10).
"""
assert out == message
def test_partial_fit(self, net, X_train, y_train):
net2 = clone(net)
assert net2.partial_fit(X_train, y_train) is net2
net2.partial_fit(X_train, y_train)
history = net2.train_history_
assert len(history) == 2
assert history[1]['valid_accuracy'] > 0.85
def test_lasagne_functional_grid_search(mnist, monkeypatch):
# Make sure that we can satisfy the grid search interface.
from nolearn.lasagne import NeuralNet
nn = NeuralNet(
layers=[],
)
param_grid = {
'more_params': [{'hidden_num_units': 100}, {'hidden_num_units': 200}],
'update_momentum': [0.9, 0.98],
}
X, y = mnist
vars_hist = []
def fit(self, X, y):
vars_hist.append(vars(self).copy())
return self
with patch.object(NeuralNet, 'fit', autospec=True) as mock_fit:
mock_fit.side_effect = fit
with patch('nolearn.lasagne.NeuralNet.score') as score:
score.return_value = 0.3
gs = GridSearchCV(nn, param_grid, cv=2, refit=False, verbose=4)
gs.fit(X, y)
assert [entry['update_momentum'] for entry in vars_hist] == [
0.9, 0.9, 0.98, 0.98] * 2
assert [entry['more_params'] for entry in vars_hist] == (
[{'hidden_num_units': 100}] * 4 +
[{'hidden_num_units': 200}] * 4
)
def test_clone():
from nolearn.lasagne import NeuralNet
from nolearn.lasagne import BatchIterator
from nolearn.lasagne import objective
params = dict(
layers=[
('input', InputLayer),
('hidden', DenseLayer),
('output', DenseLayer),
],
input_shape=(100, 784),
output_num_units=10,
output_nonlinearity=softmax,
more_params={
'hidden_num_units': 100,
},
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
regression=False,
objective=objective,
objective_loss_function=categorical_crossentropy,
batch_iterator_train=BatchIterator(batch_size=100),
y_tensor_type=T.ivector,
use_label_encoder=False,
on_epoch_finished=None,
on_training_finished=None,
max_epochs=100,
eval_size=0.1, # BBB
check_input=True,
verbose=0,
)
nn = NeuralNet(**params)
nn2 = clone(nn)
params1 = nn.get_params()
params2 = nn2.get_params()
for ignore in (
'batch_iterator_train',
'batch_iterator_test',
'output_nonlinearity',
'loss',
'objective',
'train_split',
'eval_size',
'X_tensor_type',
'on_epoch_finished',
'on_batch_finished',
'on_training_started',
'on_training_finished',
'custom_scores',
'scores_train',
'scores_valid',
):
for par in (params, params1, params2):
par.pop(ignore, None)
assert params == params1 == params2
def test_lasagne_functional_regression(boston):
from nolearn.lasagne import NeuralNet
X, y = boston
layer1 = InputLayer(shape=(128, 13))
layer2 = DenseLayer(layer1, num_units=100)
output = DenseLayer(layer2, num_units=1, nonlinearity=identity)
nn = NeuralNet(
layers=output,
update_learning_rate=0.01,
update_momentum=0.1,
regression=True,
max_epochs=50,
)
nn.fit(X[:300], y[:300])
assert mean_absolute_error(nn.predict(X[300:]), y[300:]) < 3.0
assert r2_score(nn.predict(X[300:]), y[300:]) == nn.score(X[300:], y[300:])
class TestDefaultObjective:
@pytest.fixture
def get_output(self, monkeypatch):
from nolearn.lasagne import base
get_output_mock = Mock()
monkeypatch.setattr(base, 'get_output', get_output_mock)
return get_output_mock
@pytest.fixture
def objective(self):
from nolearn.lasagne.base import objective
return objective
def test_with_defaults(self, objective, get_output):
loss_function, target = Mock(), Mock()
loss_function.return_value = np.array([1, 2, 3])
result = objective(
[1, 2, 3], loss_function=loss_function, target=target)
assert result == 2.0
get_output.assert_called_with(3, deterministic=False)
loss_function.assert_called_with(get_output.return_value, target)
def test_with_get_output_kw(self, objective, get_output):
loss_function, target = Mock(), Mock()
loss_function.return_value = np.array([1, 2, 3])
objective(
[1, 2, 3], loss_function=loss_function, target=target,
get_output_kw={'i_was': 'here'},
)
get_output.assert_called_with(3, deterministic=False, i_was='here')
class TestTrainSplit:
@pytest.fixture
def TrainSplit(self):
from nolearn.lasagne import TrainSplit
return TrainSplit
def test_reproducable(self, TrainSplit, nn):
X, y = np.random.random((100, 10)), np.repeat([0, 1, 2, 3], 25)
X_train1, X_valid1, y_train1, y_valid1 = TrainSplit(0.2)(
X, y, nn)
X_train2, X_valid2, y_train2, y_valid2 = TrainSplit(0.2)(
X, y, nn)
assert np.all(X_train1 == X_train2)
assert np.all(y_valid1 == y_valid2)
def test_eval_size_zero(self, TrainSplit, nn):
X, y = np.random.random((100, 10)), np.repeat([0, 1, 2, 3], 25)
X_train, X_valid, y_train, y_valid = TrainSplit(0.0)(
X, y, nn)
assert len(X_train) == len(X)
assert len(y_train) == len(y)
assert len(X_valid) == 0
assert len(y_valid) == 0
def test_eval_size_half(self, TrainSplit, nn):
X, y = np.random.random((100, 10)), np.repeat([0, 1, 2, 3], 25)
X_train, X_valid, y_train, y_valid = TrainSplit(0.51)(
X, y, nn)
assert len(X_train) + len(X_valid) == 100
assert len(y_train) + len(y_valid) == 100
assert len(X_train) > 45
def test_regression(self, TrainSplit, nn):
X = np.random.random((100, 10))
y = np.random.random((100))
nn.regression = True
X_train, X_valid, y_train, y_valid = TrainSplit(0.2)(
X, y, nn)
assert len(X_train) == len(y_train) == 80
assert len(X_valid) == len(y_valid) == 20
def test_stratified(self, TrainSplit, nn):
X = np.random.random((100, 10))
y = np.hstack([np.repeat([0, 0, 0], 25), np.repeat([1], 25)])
X_train, X_valid, y_train, y_valid = TrainSplit(0.2)(
X, y, nn)
assert y_train.sum() == 0.8 * 25
assert y_valid.sum() == 0.2 * 25
def test_not_stratified(self, TrainSplit, nn):
X = np.random.random((100, 10))
y = np.hstack([np.repeat([0, 0, 0], 25), np.repeat([1], 25)])
X_train, X_valid, y_train, y_valid = TrainSplit(0.2, stratify=False)(
X, y, nn)
assert y_train.sum() == 25
assert y_valid.sum() == 0
def test_X_is_dict(self, TrainSplit, nn):
X = {
'1': np.random.random((100, 10)),
'2': np.random.random((100, 10)),
}
y = np.repeat([0, 1, 2, 3], 25)
X_train, X_valid, y_train, y_valid = TrainSplit(0.2)(
X, y, nn)
assert len(X_train['1']) == len(X_train['2']) == len(y_train) == 80
assert len(X_valid['1']) == len(X_valid['2']) == len(y_valid) == 20
def test_X_is_dict_eval_size_0(self, TrainSplit, nn):
X = {
'1': np.random.random((100, 10)),
'2': np.random.random((100, 10)),
}
y = np.repeat([0, 1, 2, 3], 25)
X_train, X_valid, y_train, y_valid = TrainSplit(0)(
X, y, nn)
assert len(X_train['1']) == len(X_train['2']) == len(y_train) == 100
assert len(X_valid['1']) == len(X_valid['2']) == len(y_valid) == 0
class TestTrainTestSplitBackwardCompatibility:
@pytest.fixture
def LegacyNet(self, NeuralNet):
class LegacyNet(NeuralNet):
def train_test_split(self, X, y, eval_size):
self.__call_args__ = (X, y, eval_size)
split = int(X.shape[0] * eval_size)
return X[:split], X[split:], y[:split], y[split:]
return LegacyNet
def test_legacy_eval_size(self, NeuralNet):
net = NeuralNet([], eval_size=0.3, max_epochs=0)
assert net.train_split.eval_size == 0.3
def test_legacy_method_default_eval_size(self, LegacyNet):
net = LegacyNet([], max_epochs=0)
X, y = np.ones((10, 3)), np.zeros(10)
net.train_loop(X, y)
assert net.__call_args__ == (X, y, 0.2)
def test_legacy_method_given_eval_size(self, LegacyNet):
net = LegacyNet([], eval_size=0.3, max_epochs=0)
X, y = np.ones((10, 3)), np.zeros(10)
net.train_loop(X, y)
assert net.__call_args__ == (X, y, 0.3)
class TestBatchIterator:
@pytest.fixture
def BatchIterator(self):
from nolearn.lasagne import BatchIterator
return BatchIterator
@pytest.fixture
def X(self):
return np.arange(200).reshape((10, 20)).T.astype('float')
@pytest.fixture
def X_dict(self):
return {
'one': np.arange(200).reshape((10, 20)).T.astype('float'),
'two': np.arange(200).reshape((20, 10)).astype('float'),
}
@pytest.fixture
def y(self):
return np.arange(20)
@pytest.mark.parametrize("shuffle", [True, False])
def test_simple_x_and_y(self, BatchIterator, X, y, shuffle):
bi = BatchIterator(2, shuffle=shuffle)(X, y)
batches = list(bi)
assert len(batches) == 10
X0, y0 = batches[0]
assert X0.shape == (2, 10)
assert y0.shape == (2,)
Xt = np.vstack(b[0] for b in batches)
yt = np.hstack(b[1] for b in batches)
assert Xt.shape == X.shape
assert yt.shape == y.shape
np.testing.assert_equal(Xt[:, 0], yt)
if shuffle is False:
np.testing.assert_equal(X[:2], X0)
np.testing.assert_equal(y[:2], y0)
@pytest.mark.parametrize("shuffle", [True, False])
def test_simple_x_no_y(self, BatchIterator, X, shuffle):
bi = BatchIterator(2, shuffle=shuffle)(X)
batches = list(bi)
assert len(batches) == 10
X0, y0 = batches[0]
assert X0.shape == (2, 10)
assert y0 is None
if shuffle is False:
np.testing.assert_equal(X[:2], X0)
@pytest.mark.parametrize("shuffle", [True, False])
def test_X_is_dict(self, BatchIterator, X_dict, shuffle):
bi = BatchIterator(2, shuffle=shuffle)(X_dict)
batches = list(bi)
assert len(batches) == 10
X0, y0 = batches[0]
assert X0['one'].shape == (2, 10)
assert X0['two'].shape == (2, 10)
assert y0 is None
Xt1 = np.vstack(b[0]['one'] for b in batches)
Xt2 = np.vstack(b[0]['two'] for b in batches)
assert Xt1.shape == X_dict['one'].shape
assert Xt2.shape == X_dict['two'].shape
np.testing.assert_equal(Xt1[:, 0], Xt2[:, 0] / 10)
if shuffle is False:
np.testing.assert_equal(X_dict['one'][:2], X0['one'])
np.testing.assert_equal(X_dict['two'][:2], X0['two'])
def test_shuffle_no_copy(self, BatchIterator, X, y):
bi = BatchIterator(2, shuffle=True)(X, y)
X0, y0 = list(bi)[0]
assert X0.base is X # make sure X0 is a view
class TestCheckForUnusedKwargs:
def test_okay(self, NeuralNet):
net = NeuralNet(
layers=[('input', Mock), ('mylayer', Mock)],
input_shape=(10, 10),
mylayer_hey='hey',
update_foo=1,
update_bar=2,
)
net._create_iter_funcs = lambda *args: (1, 2, 3)
net.initialize()
def test_unused(self, NeuralNet):
net = NeuralNet(
layers=[('input', Mock), ('mylayer', Mock)],
input_shape=(10, 10),
mylayer_hey='hey',
yourlayer_ho='ho',
update_foo=1,
update_bar=2,
)
net._create_iter_funcs = lambda *args: (1, 2, 3)
with pytest.raises(ValueError) as err:
net.initialize()
assert str(err.value) == 'Unused kwarg: yourlayer_ho'
class TestInitializeLayers:
def test_initialization_with_layer_instance(self, NeuralNet):
layer1 = InputLayer(shape=(128, 13)) # name will be assigned
layer2 = DenseLayer(layer1, name='output', num_units=2) # has name
nn = NeuralNet(layers=layer2)
out = nn.initialize_layers()
assert nn.layers_['output'] == layer2 == out[0]
assert nn.layers_['input0'] == layer1
def test_initialization_with_layer_instance_bad_params(self, NeuralNet):
layer = DenseLayer(InputLayer(shape=(128, 13)), num_units=2)
nn = NeuralNet(layers=layer, dense1_num_units=3)
with pytest.raises(ValueError):
nn.initialize_layers()
def test_initialization_with_tuples(self, NeuralNet):
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)]
nn = NeuralNet(
layers=[
(input, {'shape': (10, 10), 'name': 'input'}),
(hidden1, {'some': 'param', 'another': 'param'}),
(hidden2, {}),
(output, {'name': 'output'}),
],
input_shape=(10, 10),
mock1_some='iwin',
)
out = nn.initialize_layers(nn.layers)
input.assert_called_with(
name='input', shape=(10, 10))
assert nn.layers_['input'] is input.return_value
hidden1.assert_called_with(
incoming=input.return_value, name='mock1',
some='iwin', another='param')
assert nn.layers_['mock1'] is hidden1.return_value
hidden2.assert_called_with(
incoming=hidden1.return_value, name='mock2')
assert nn.layers_['mock2'] is hidden2.return_value
output.assert_called_with(
incoming=hidden2.return_value, name='output')
assert out[0] is nn.layers_['output']
def test_initializtion_with_tuples_resolve_layers(self, NeuralNet):
nn = NeuralNet(
layers=[
('lasagne.layers.InputLayer', {'shape': (None, 10)}),
('lasagne.layers.DenseLayer', {'num_units': 33}),
],
)
out, = nn.initialize_layers(nn.layers)
assert out.num_units == 33
def test_initialization_legacy(self, NeuralNet):
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)]
nn = NeuralNet(
layers=[
('input', input),
('hidden1', hidden1),
('hidden2', hidden2),
('output', output),
],
input_shape=(10, 10),
hidden1_some='param',
)
out = nn.initialize_layers(nn.layers)
input.assert_called_with(
name='input', shape=(10, 10))
assert nn.layers_['input'] is input.return_value
hidden1.assert_called_with(
incoming=input.return_value, name='hidden1', some='param')
assert nn.layers_['hidden1'] is hidden1.return_value
hidden2.assert_called_with(
incoming=hidden1.return_value, name='hidden2')
assert nn.layers_['hidden2'] is hidden2.return_value
output.assert_called_with(
incoming=hidden2.return_value, name='output')
assert out[0] is nn.layers_['output']
def test_initializtion_legacy_resolve_layers(self, NeuralNet):
nn = NeuralNet(
layers=[
('input', 'lasagne.layers.InputLayer'),
('output', 'lasagne.layers.DenseLayer'),
],
input_shape=(None, 10),
output_num_units=33,
)
out, = nn.initialize_layers(nn.layers)
assert out.num_units == 33
def test_initialization_legacy_with_unicode_names(self, NeuralNet):
# Test whether legacy initialization is triggered; if not,
# raises error.
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(3)]
nn = NeuralNet(
layers=[
(u'input', input),
(u'hidden1', hidden1),
(u'hidden2', hidden2),
(u'output', output),
],
input_shape=(10, 10),
hidden1_some='param',
)
nn.initialize_layers()
def test_diamond(self, NeuralNet):
input = Mock(__name__='InputLayer', __bases__=(InputLayer,))
hidden1, hidden2, concat, output = [
Mock(__name__='MockLayer', __bases__=(Layer,)) for i in range(4)]
nn = NeuralNet(
layers=[
('input', input),
('hidden1', hidden1),
('hidden2', hidden2),
('concat', concat),
('output', output),
],
input_shape=(10, 10),
hidden2_incoming='input',
concat_incomings=['hidden1', 'hidden2'],
)
nn.initialize_layers(nn.layers)
input.assert_called_with(name='input', shape=(10, 10))
hidden1.assert_called_with(incoming=input.return_value, name='hidden1')
hidden2.assert_called_with(incoming=input.return_value, name='hidden2')
concat.assert_called_with(
incomings=[hidden1.return_value, hidden2.return_value],
name='concat'
)
output.assert_called_with(incoming=concat.return_value, name='output')
def test_initialization_with_mask_input(self, NeuralNet):
nn = NeuralNet(
layers=[
(InputLayer, {'shape': (None, 20, 32), 'name': 'l_in'}),
(InputLayer, {'shape': (None, 20), 'name': 'l_mask'}),
(RecurrentLayer, {'incoming': 'l_in',
'mask_input': 'l_mask',
'num_units': 2,
'name': 'l_rec'}),
])
nn.initialize_layers()
assert nn.layers_['l_rec'].mask_incoming_index == 1
def test_legacy_initialization_with_mask_input(self, NeuralNet):
nn = NeuralNet(
layers=[
('l_in', InputLayer),
('l_mask', InputLayer),
('l_rec', RecurrentLayer),
],
l_in_shape=(None, 20, 32),
l_in_name='l_in',
l_mask_shape=(None, 20),
l_mask_name='l_mask',
l_rec_incoming='l_in',
l_rec_mask_input='l_mask',
l_rec_num_units=2,
l_rec_name='l_rec',
)
nn.initialize_layers()
assert nn.layers_['l_rec'].mask_incoming_index == 1
class TestCheckGoodInput:
@pytest.fixture
def check_good_input(self, nn):
return nn._check_good_input
@pytest.fixture
def X(self):
return np.arange(100).reshape(10, 10).astype(floatX)
@pytest.fixture
def y(self):
return np.arange(10).astype(np.int32)
@pytest.fixture
def y_regr(self):
return np.arange(10).reshape(-1, 1).astype(floatX)
def test_X_OK(self, check_good_input, X):
assert check_good_input(X) == (X, None)
def test_X_and_y_OK(self, check_good_input, X, y):
assert check_good_input(X, y) == (X, y)
def test_X_and_y_OK_regression(self, nn, check_good_input, X, y_regr):
nn.regression = True
assert check_good_input(X, y_regr) == (X, y_regr)
def test_X_and_y_length_mismatch(self, check_good_input, X, y):
with pytest.raises(ValueError):
check_good_input(
X[:9],
y
)
def test_X_dict_and_y_length_mismatch(self, check_good_input, X, y):
with pytest.raises(ValueError):
check_good_input(
{'one': X, 'two': X},
y[:9],
)
def test_X_dict_length_mismatch(self, check_good_input, X):
with pytest.raises(ValueError):
check_good_input({
'one': X,
'two': X[:9],
})
def test_y_regression_1dim(self, nn, check_good_input, X, y_regr):
y = y_regr.reshape(-1)
nn.regression = True
X1, y1 = check_good_input(X, y)
assert (X1 == X).all()
assert (y1 == y.reshape(-1, 1)).all()
def test_y_regression_2dim(self, nn, check_good_input, X, y_regr):
y = y_regr
nn.regression = True
X1, y1 = check_good_input(X, y)
assert (X1 == X).all()
assert (y1 == y).all()
class TestGetOutput:
def test_layer_object(self, net_fitted, X_train):
layer = net_fitted.layers_['conv2']
output = net_fitted.get_output(layer, X_train[:3])
assert output.shape == (3, 8, 8, 8)
def test_layer_name(self, net_fitted, X_train):
output = net_fitted.get_output('conv2', X_train[:3])
assert output.shape == (3, 8, 8, 8)
def test_get_output_last_layer(self, net_fitted, X_train):
result = net_fitted.get_output(net_fitted.layers_[-1], X_train[:129])
expected = net_fitted.predict_proba(X_train[:129])
np.testing.assert_equal(result, expected)
def test_no_conv(self, net_no_conv):
net_no_conv.initialize()
X = np.random.random((10, 100)).astype(floatX)
result = net_no_conv.get_output('output', X)
expected = net_no_conv.predict_proba(X)
np.testing.assert_equal(result, expected)
class TestMultiInputFunctional:
@pytest.fixture(scope='session')
def net(self, NeuralNet):
return NeuralNet(
layers=[
(InputLayer,
{'name': 'input1', 'shape': (None, 392)}),
(DenseLayer,
{'name': 'hidden1', 'num_units': 98}),
(InputLayer,
{'name': 'input2', 'shape': (None, 392)}),
(DenseLayer,
{'name': 'hidden2', 'num_units': 98}),
(ConcatLayer,
{'incomings': ['hidden1', 'hidden2']}),
(DenseLayer,
{'name': 'hidden3', 'num_units': 98}),
(DenseLayer,
{'name': 'output', 'num_units': 10, 'nonlinearity': softmax}),
],
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.9,
max_epochs=2,
verbose=4,
)
@pytest.fixture(scope='session')
def net_fitted(self, net, mnist):
X, y = mnist
X_train, y_train = X[:10000], y[:10000]
X_train1, X_train2 = X_train[:, :392], X_train[:, 392:]
return net.fit({'input1': X_train1, 'input2': X_train2}, y_train)
@pytest.fixture(scope='session')
def y_pred(self, net_fitted, mnist):
X, y = mnist
X_test = X[60000:]
X_test1, X_test2 = X_test[:, :392], X_test[:, 392:]
return net_fitted.predict({'input1': X_test1, 'input2': X_test2})
def test_accuracy(self, net_fitted, mnist, y_pred):
X, y = mnist
y_test = y[60000:]
assert accuracy_score(y_pred, y_test) > 0.85
class TestGradScale:
@pytest.fixture
def grad_scale(self):
from nolearn.lasagne import grad_scale
return grad_scale
@pytest.mark.parametrize("layer", [
BatchNormLayer(InputLayer((None, 16))),
Conv2DLayer(InputLayer((None, 1, 28, 28)), 2, 3),
DenseLayer(InputLayer((None, 16)), 16),
])
def test_it(self, grad_scale, layer):
layer2 = grad_scale(layer, 0.33)
assert layer2 is layer
for param in layer.get_params(trainable=True):
np.testing.assert_almost_equal(param.tag.grad_scale, 0.33)
for param in layer.get_params(trainable=False):
assert hasattr(param.tag, 'grad_scale') is False
class TestMultiOutput:
@pytest.fixture(scope='class')
def mo_net(self, NeuralNet):
def objective(layers_, target, **kwargs):
out_a_layer = layers_['output_a']
out_b_layer = layers_['output_b']
# Get the outputs
out_a, out_b = get_output([out_a_layer, out_b_layer])
# Get the targets
gt_a = T.cast(target[:, 0], 'int32')
gt_b = target[:, 1].reshape((-1, 1))
# Calculate the multi task loss
cls_loss = aggregate(categorical_crossentropy(out_a, gt_a))
reg_loss = aggregate(categorical_crossentropy(out_b, gt_b))
loss = cls_loss + reg_loss
return loss
# test that both branches of the multi output network are included,
# and also that a single layer isn't included multiple times.
l = InputLayer(shape=(None, 1, 28, 28), name="input")
l = Conv2DLayer(l, name='conv1', filter_size=(5, 5), num_filters=8)
l = Conv2DLayer(l, name='conv2', filter_size=(5, 5), num_filters=8)
la = DenseLayer(l, name='hidden_a', num_units=128)
la = DenseLayer(la, name='output_a', nonlinearity=softmax,
num_units=10)
lb = DenseLayer(l, name='hidden_b', num_units=128)
lb = DenseLayer(lb, name='output_b', nonlinearity=sigmoid, num_units=1)
net = NeuralNet(layers=[la, lb],
update_learning_rate=0.5,
y_tensor_type=None,
regression=True,
objective=objective)
net.initialize()
return net
def test_layers_included(self, mo_net):
expected_names = sorted(["input", "conv1", "conv2",
"hidden_a", "output_a",
"hidden_b", "output_b"])
network_names = sorted(list(mo_net.layers_.keys()))
assert (expected_names == network_names)
def test_predict(self, mo_net):
dummy_data = np.zeros((2, 1, 28, 28), np.float32)
p_cls, p_reg = mo_net.predict(dummy_data)
assert(p_cls.shape == (2, 10))
assert(p_reg.shape == (2, 1))
|
py | b410afe5a946eefe4cf036b5c5a04ec128d9aaff | # -*- coding: utf-8 -*-
"""
square
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
import os
import unittest
from square.configuration import Configuration
from square.client import Client
class ApiTestBase(unittest.TestCase):
"""All test classes inherit from this base class. It abstracts out
common functionality and configuration variables set up."""
@classmethod
def setUpClass(cls):
"""Class method called once before running tests in a test class."""
cls.request_timeout = 30
cls.assert_precision = 0.01
cls.config = ApiTestBase.create_configuration()
cls.client = Client()
@staticmethod
def create_configuration():
return Configuration(access_token=os.environ['SQUARE_SANDBOX_TOKEN'],
environment='sandbox')
|
py | b410b0152621cf0a20e58c6888ba6ff5c82c1e98 | # Generalized Adaptive Solvers
# as described in Kottmann, Anand, Aspuru-Guzik: https://doi.org/10.1039/D0SC06627C
from tequila import QCircuit, QubitHamiltonian, gates, paulis, grad, simulate, TequilaWarning, TequilaException, minimize, ExpectationValue
import numpy
import dataclasses
import warnings
from itertools import combinations
@dataclasses.dataclass
class AdaptParameters:
optimizer_args: dict = dataclasses.field(default_factory=lambda : {"method":"bfgs"})
compile_args: dict = dataclasses.field(default_factory=lambda : {})
maxiter:int = 100
batch_size = 1
energy_convergence: float = None
gradient_convergence: float = 1.e-2
max_gradient_convergence: float = 0.0
degeneracy_threshold: float = 1.e-4
def __str__(self):
info = ""
for k,v in self.__dict__.items():
info += "{:30} : {}\n".format(k, v)
return info
class AdaptPoolBase:
"""
Standard class for operator pools in Adapt
The pool is a list of generators (tequila QubitHamiltonians)
"""
generators: list = None
__n: int = 0 # for iterator, don't touch
def __init__(self, generators, trotter_steps=1):
self.generators = generators
self.trotter_steps=1
def make_unitary(self, k, label) -> QCircuit:
return gates.Trotterized(generators=[self.generators[k]], angles=[(str(k), label)], steps=self.trotter_steps)
def __iter__(self):
self.__n = 0
return self
def __next__(self):
if self.__n < len(self.generators):
result = self.__n
self.__n +=1
return result
else:
raise StopIteration
def __str__(self):
return "{} with {} Generators".format(type(self).__name__, len(self.generators))
class ObjectiveFactoryBase:
"""
Default class to create the objective in the Adapt solver
This just creates the single ExpectationValue <H>_{Upre + U + Upost}
and U will be the circuit that is adaptively constructed
"""
Upre: QCircuit=QCircuit()
Upost: QCircuit=QCircuit()
H : QubitHamiltonian=None
def __init__(self, H=None, Upre=None, Upost=None, *args, **kwargs):
if H is None:
raise TequilaException("No Hamiltonian was given to Adapt!")
self.H = H
if Upre is not None:
self.Upre = Upre
else:
self.Upre = QCircuit()
if Upost is not None:
self.Upost = Upost
else:
self.Upost = QCircuit()
def __call__(self, U, screening=False, *args, **kwargs):
return ExpectationValue(H=self.H, U=self.Upre + U + self.Upost, *args, **kwargs)
def grad_objective(self, *args, **kwargs):
return self(*args, **kwargs)
def __str__(self):
return "{}".format(type(self).__name__)
class Adapt:
operator_pool: AdaptPoolBase = None
objective_factory = None
parameters: AdaptParameters = AdaptParameters()
def make_objective(self, U, variables=None, *args, **kwargs):
return self.objective_factory(U=U, variables=variables, *args, **{**self.parameters.compile_args, **kwargs})
def __init__(self, operator_pool, H=None,objective_factory=None, *args, **kwargs):
self.operator_pool = operator_pool
if objective_factory is None:
self.objective_factory = ObjectiveFactoryBase(H, *args, **kwargs)
else:
self.objective_factory = objective_factory
filtered = {k: v for k, v in kwargs.items() if k in self.parameters.__dict__}
self.parameters = AdaptParameters(*args, **filtered)
def __call__(self, static_variables = None, mp_pool=None, label=None, variables=None, *args, **kwargs):
print("Starting Adaptive Solver")
print(self)
# count resources
screening_cycles = 0
objective_expval_evaluations = 0
gradient_expval_evaluations = 0
histories = []
if static_variables is None:
static_variables = {}
if variables is None:
variables = {**static_variables}
else:
variables = {**variables, **static_variables}
U = QCircuit()
initial_objective = self.make_objective(U, variables = variables)
for k in initial_objective.extract_variables():
if k not in variables:
warnings.warn("variable {} of initial objective not given, setting to 0.0 and activate optimization".format(k), TequilaWarning)
variables[k] = 0.0
if len(initial_objective.extract_variables())>0:
active_variables = [k for k in variables if k not in static_variables]
if len(active_variables)>0:
print("initial optimization")
result = minimize(objective=initial_objective,
variables=active_variables,
initial_values=variables,
**self.parameters.compile_args, **self.parameters.optimizer_args)
variables = result.variables
energy = simulate(initial_objective, variables=variables)
for iter in range(self.parameters.maxiter):
current_label = (iter,0)
if label is not None:
current_label = (iter, label)
gradients = self.screen_gradients(U=U, variables=variables, mp_pool=mp_pool)
grad_values = numpy.asarray(list(gradients.values()))
max_grad = max(grad_values)
grad_norm = numpy.linalg.norm(grad_values)
if grad_norm < self.parameters.gradient_convergence:
print("pool gradient norm is {:+2.8f}, convergence criterion met".format(grad_norm))
break
if numpy.abs(max_grad) < self.parameters.max_gradient_convergence:
print("max pool gradient is {:+2.8f}, convergence criterion |max(grad)|<{} met".format(max_grad, self.parameters.max_gradient_convergence))
break
batch_size = self.parameters.batch_size
# detect degeneracies
degeneracies = [k for k in range(batch_size, len(grad_values))
if numpy.isclose(grad_values[batch_size-1],grad_values[k], rtol=self.parameters.degeneracy_threshold) ]
if len(degeneracies) > 0:
batch_size += len(degeneracies)
print("detected degeneracies: increasing batch size temporarily from {} to {}".format(self.parameters.batch_size, batch_size))
count = 0
for k,v in gradients.items():
Ux = self.operator_pool.make_unitary(k, label=current_label)
U += Ux
count += 1
if count >= batch_size:
break
variables = {**variables, **{k:0.0 for k in U.extract_variables() if k not in variables}}
active_variables = [k for k in variables if k not in static_variables]
objective = self.make_objective(U, variables=variables)
result = minimize(objective=objective,
variables=active_variables,
initial_values=variables,
**self.parameters.compile_args, **self.parameters.optimizer_args)
diff = energy - result.energy
energy = result.energy
variables = result.variables
print("-------------------------------------")
print("Finished iteration {}".format(iter))
print("current energy : {:+2.8f}".format(energy))
print("difference : {:+2.8f}".format(diff))
print("grad_norm : {:+2.8f}".format(grad_norm))
print("max_grad : {:+2.8f}".format(max_grad))
print("circuit size : {}".format(len(U.gates)))
screening_cycles += 1
mini_iter=len(result.history.extract_energies())
gradient_expval = sum([v.count_expectationvalues() for k, v in grad(objective).items()])
objective_expval_evaluations += mini_iter*objective.count_expectationvalues()
gradient_expval_evaluations += mini_iter*gradient_expval
histories.append(result.history)
if self.parameters.energy_convergence is not None and numpy.abs(diff) < self.parameters.energy_convergence:
print("energy difference is {:+2.8f}, convergence criterion met".format(diff))
break
if iter == self.parameters.maxiter - 1:
print("reached maximum number of iterations")
break
@dataclasses.dataclass
class AdaptReturn:
U:QCircuit=None
objective_factory:ObjectiveFactoryBase=None
variables:dict=None
energy: float = None
histories: list = None
screening_cycles: int = None
objective_expval_evaluations: int =None
gradient_expval_evaluations: int =None
return AdaptReturn(U=U,
variables=variables,
objective_factory=self.objective_factory,
energy=energy,
histories=histories,
screening_cycles = screening_cycles,
objective_expval_evaluations=objective_expval_evaluations,
gradient_expval_evaluations=gradient_expval_evaluations)
def screen_gradients(self, U, variables, mp_pool=None):
args = []
for k in self.operator_pool:
arg = {}
arg["k"] = k
arg["variables"] = variables
arg["U"] = U
args.append(arg)
if mp_pool is None:
dEs = [self.do_screening(arg) for arg in args]
else:
print("screen with {} workers".format(mp_pool._processes))
dEs = mp_pool.map(self.do_screening, args)
dEs = dict(sorted(dEs, reverse=True, key=lambda x: numpy.fabs(x[1])))
return dEs
def do_screening(self, arg):
Ux = self.operator_pool.make_unitary(k=arg["k"], label="tmp")
Utmp = arg["U"] + Ux
variables = {**arg["variables"]}
objective = self.make_objective(Utmp, screening=True, variables=variables)
dEs = []
for k in Ux.extract_variables():
variables[k] = 0.0
dEs.append(grad(objective, k))
gradients=[numpy.abs(simulate(objective=dE, variables=variables, **self.parameters.compile_args)) for dE in dEs]
return arg["k"], sum(gradients)
def __str__(self):
result = str(self.parameters)
result += str("{:30} : {}\n".format("operator pool: ", self.operator_pool))
result += str("{:30} : {}\n".format("objective factory : ", self.objective_factory))
return result
class MolecularPool(AdaptPoolBase):
def __init__(self, molecule, indices:str):
"""
Parameters
----------
molecule:
a tequila molecule object
indices
a list of indices defining UCC operations
indices refer to spin-orbitals
e.g. indices = [[(0,2),(1,3)], [(0,2)], [(1,3)]]
can be a string for predefined pools supported are UpCCD, UpCCSD, UpCCGD, and UpCCGSD
"""
self.molecule = molecule
if isinstance(indices, str):
if not "CC" in indices.upper():
raise TequilaException("Pool of type {} not yet supported.\nCreate your own by passing the initialized indices".format(indices))
generalized = True if "G" in indices.upper() else False
paired = True if "P" in indices.upper() else False
singles = True if "S" in indices.upper() else False
doubles = True if "D" in indices.upper() else False
indices = []
if doubles: indices += self.make_indices_doubles(generalized=generalized, paired=paired)
if singles: indices += self.make_indices_singles(generalized=generalized)
indices = [tuple(k) for k in indices]
super().__init__(generators=indices)
def make_indices_singles(self, generalized=False):
indices = []
for p in range(self.molecule.n_electrons//2):
for q in range(self.molecule.n_electrons//2, self.molecule.n_orbitals):
indices.append([(2*p, 2*q)])
indices.append([(2*p+1, 2*q+1)])
if not generalized:
return indices
for p in range(self.molecule.n_orbitals):
for q in range(p+1, self.molecule.n_orbitals):
if [(2*p, 2*q)] in indices:
continue
indices.append([(2*p, 2*q)])
indices.append([(2*p+1, 2*q+1)])
return self.sort_and_filter_unique_indices(indices)
def make_indices_doubles(self, generalized=False, paired=True):
indices = []
for p in range(self.molecule.n_electrons//2):
for q in range(self.molecule.n_electrons//2, self.molecule.n_orbitals):
indices.append([(2*p, 2*q),(2*p+1, 2*q+1)])
if not generalized:
return indices
for p in range(self.molecule.n_orbitals):
for q in range(p+1, self.molecule.n_orbitals):
idx = [(2*p, 2*q),(2*p+1, 2*q+1)]
if idx in indices:
continue
indices.append(idx)
if not paired:
indices += self.make_indices_doubles_all(generalized=generalized)
return self.sort_and_filter_unique_indices(indices)
def make_indices_doubles_all(self, generalized=False):
singles = self.make_indices_singles(generalized=generalized)
unwrapped = [x[0] for x in singles]
# now make all combinations of singles
indices = [x for x in combinations(unwrapped, 2)]
return self.sort_and_filter_unique_indices(indices)
def sort_and_filter_unique_indices(self, indices):
# sort as: [[(a,b),(c,d),(e,f)...],...]with a<c, a<b, c<d
sorted_indices = []
for idx in indices:
idx = tuple([tuple(sorted(pair)) for pair in idx]) # sort internal pairs (a<b, c<d, etc)
# avoid having orbitals show up multiple times in excitatin strings
idx = tuple([pair for pair in idx if sum([1 for pair2 in idx if pair[0] in pair2 or pair[1] in pair2 ])==1 ])
if len(idx) == 0:
continue
idx = tuple(list(set(idx))) # avoid repetitions (like ((0,2),(0,2)))
idx = tuple(sorted(idx, key=lambda x:x[0])) # sort pairs by first entry (a<c)
sorted_indices.append(idx)
return list(set(sorted_indices))
def make_unitary(self, k, label):
return self.molecule.make_excitation_gate(indices=self.generators[k], angle=(self.generators[k], label), assume_real=True)
class PseudoSingletMolecularPool(MolecularPool):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
indices = []
for idx in self.generators:
if len(idx) == 1:
combined = ( ((idx[0][0]//2*2, idx[0][1]//2*2)), ((idx[0][0]//2*2+1, idx[0][1]//2*2+1)) )
if combined not in indices:
indices.append(combined)
else:
indices.append(tuple([idx]))
self.generators = list(set(indices))
def make_unitary(self, k, label):
U = QCircuit()
for idx in self.generators[k]:
combined_variable = self.generators[k][0]
U += self.molecule.make_excitation_gate(indices=idx, angle=(combined_variable,label))
return U
class ObjectiveFactorySequentialExcitedState(ObjectiveFactoryBase):
def __init__(self, H, circuits: list, factors: list, *args, **kwargs):
self.circuits = circuits
self.factors = factors
super().__init__(H=H, *args, **kwargs)
def __call__(self, U, *args, **kwargs):
circuit = self.Upre + U + self.Upost
objective = ExpectationValue(H=self.H, U=circuit)
Qp = paulis.Qp(U.qubits)
# get all overlaps
for i,Ux in enumerate(self.circuits):
S2 = ExpectationValue(H=Qp, U=circuit+Ux.dagger())
objective += numpy.abs(self.factors[i])*S2
return objective
|
py | b410b25eac9d55bba158cebb7d1edc6bccd5d84e | # Variables and Data Types
# Variable
character_name = "John"
# Integer
character_age = "35"
# Boolean
is_male = True
#String
print("There once was a man named " + character_name + ", ")
print("he was " + character_age + " years old. ")
# Variable reassignment
character_name = "Mike"
print("He really like the name " + character_name + ",")
print("but didn't like being " + character_age +".")
|
py | b410b300366a19e68982e1f58f9eca2ab091c3bb |
# Escreva um programa que leia a velocidade de um carro.
# Se ele ultrapassar 80km/h, mostre uma mensagem dizendo que ele foi multado.
# A multa vai custar R$7,00 por cada km acima do limite.
velocidade = float(input('Digite uma velocidade em km/h: '))
if velocidade <= 80:
print('Você estava dentro da velocidade correta da via!')
else:
velocidadeAcima = velocidade - 80
multa = velocidadeAcima * 7
print('Você estava a {}km/h e o valor da multa será R${:.2f}'.format(velocidade, multa))
|
py | b410b3bf501ef83f97c3819a811d4b12524d3ea6 | # Importing the dependancies
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Loading the data in numerical format
X = pd.read_csv('Linear Regression\python\Training Data\Linear_X_Train.csv').values
y = pd.read_csv('Linear Regression\python\Training Data\Linear_Y_Train.csv').values
# Standardising the data (We can even use min-max normalization)
u = X.mean()
std = X.std()
X = (X-u)/std
# Visualising the data
plt.style.use('fivethirtyeight') # Setting a plot style
plt.scatter(X, y)
plt.title("Hardwork vs Performance Graph")
plt.xlabel("Hardwork")
plt.ylabel("Performance")
plt.show()
# METHOD-1: Normal Equation
X_norm = np.concatenate((np.ones((X.shape[0], 1)), X), axis=1)
theta_norm = np.dot(np.linalg.inv(np.dot(X_norm.transpose(), X_norm)), np.dot(X_norm.transpose(), y))
print(f'Normal Equation method: {theta_norm}') # Parameter values
# METHOD-2: Gradient Descent
# Hypothesis function
def cost_function(temp, learning_rate):
cost_sigma = 0
for i in range(X_norm.shape[0]):
cost_sigma += (np.dot(temp.transpose(), X_norm[i])-y[i])**2
cost = learning_rate*(1/(2*X_norm.shape[0]))*cost_sigma
return cost
# Updating the parameters
def update_theta(theta, learning_rate):
temp = np.empty((theta.shape[0], 1))
for i in range(theta.shape[0]):
sigma = np.zeros(1, dtype=np.float64)
for j in range(X_norm.shape[0]):
sigma += (np.dot(theta.transpose(), X_norm[j])-y[j])*X_norm[j][i]
temp[i] = (theta[i] - (learning_rate/X_norm.shape[0])*sum(sigma))
return temp
# Iterating 1000 times (Can be reduced if the rate of change of cost is very low)
cost = [] # appending the costs for visualization
theta = np.zeros((X_norm.shape[1], 1)) # initializing the parameters
n = int(input("Enter the number of iterations: "))
for k in range(n):
learning_rate = 0.1 # (Ideal but can be triggered)
theta = update_theta(theta, learning_rate)
cost.append(cost_function(theta, learning_rate))
print(f'Gradient Descent method: {theta}') # Parameter values
# Visualizing the cost function
plt.plot(np.arange(n), cost)
plt.title("Cost function vs iterations")
plt.xlabel("Iterations")
plt.ylabel("Cost")
plt.show()
# Testing the model
X_test = pd.read_csv('Linear Regression\python\Test\Linear_X_Test.csv').values # Loading data
y_test = np.dot(X_test, theta[1:].transpose()) # Computing the predictions
df = pd.DataFrame(data=y_test, columns=["y"]) # Converting to a dataframe
df.to_csv('Linear Regression\python\y_prediction.csv', index=False) # Saving the dataframe |
py | b410b568b612b36f4abefe9058276d71271bcd6c | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Fmt(CMakePackage):
"""fmt (formerly cppformat) is an open-source formatting library.
It can be used as a safe alternative to printf or as a fast alternative
to C++ IOStreams."""
homepage = "http://fmtlib.net/latest/index.html"
url = "https://github.com/fmtlib/fmt/releases/download/5.2.1/fmt-5.2.1.zip"
version('5.3.0', sha256='4c0741e10183f75d7d6f730b8708a99b329b2f942dad5a9da3385ab92bb4a15c')
version('5.2.1', sha256='43894ab8fe561fc9e523a8024efc23018431fa86b95d45b06dbe6ddb29ffb6cd')
version('5.2.0', sha256='c016db7f825bce487a7929e1edb747b9902a2935057af6512cad3df3a080a027')
version('5.1.0', sha256='77ef9fea638dc846e484409fbc1ea710bb9bcea042e7b35b8805041bf7655ad5')
version('5.0.0', sha256='8dd58daf13e7e8adca99f8725ef3ae598f9c97efda7d6d8d4c49db5047879097')
version('4.1.0', sha256='9d49bf02ceb9d0eec51144b203b63b77e69d3798bb402fb82e7d0bdb06c79eeb')
version('4.0.0', sha256='10a9f184d4d66f135093a08396d3b0a0ebe8d97b79f8b3ddb8559f75fe4fcbc3')
version('3.0.2', sha256='51407b62a202b29d1a9c0eb5ecd4095d30031aea65407c42c25cb10cb5c59ad4')
version('3.0.1', sha256='4c9af0dc919a8ae7022b44e1a03c435e42d65c866f44667d8d920d342b098550')
version('3.0.0', sha256='1b050b66fa31b74f1d75a14f15e99e728ab79572f176a53b2f8ad7c201c30ceb')
variant('cxxstd',
default='11',
values=('98', '11', '14', '17'),
multi=False,
description='Use the specified C++ standard when building')
variant('pic', default=True, description='Enable generation of position-independent code')
depends_on('[email protected]:', type='build')
# Supported compilers/standards are detailed here:
# http://fmtlib.net/latest/index.html#portability
conflicts('%gcc@:4.3.999', when='@5:')
conflicts('%llvm@:2.8.999', when='@5:')
# 5 and above require C++11
conflicts('cxxstd=98', when='@5:')
# 5.0.0 enables C++14 auto return types in C++11 mode
conflicts('cxxstd=11', when='@5.0.0')
# 4.1 fails with C++17 (https://github.com/fmtlib/fmt/issues/722)
conflicts('cxxstd=17', when='@4.1.0')
# Use CMAKE_CXX_STANDARD to define C++ flag, as in later versions
patch('fmt-use-cmake-cxx-standard_3.0.0.patch', when='@3.0.0')
# Remove hardcoding of "-std=c++11/0x" in INTERFACE_COMPILE_OPTIONS
patch('fmt-no-export-cpp11flag_3.0.0.patch', when='@3.0.0:3.0.1')
# Only allow [[attributes]] on C++11 and higher
patch('fmt-attributes-cpp11_4.1.0.patch', when='@4.1.0')
def cmake_args(self):
spec = self.spec
args = []
if '+pic' in spec:
args.extend([
'-DCMAKE_C_FLAGS={0}'.format(self.compiler.pic_flag),
'-DCMAKE_CXX_FLAGS={0}'.format(self.compiler.pic_flag)
])
args.append('-DCMAKE_CXX_STANDARD={0}'.format(
spec.variants['cxxstd'].value))
# Require standard at configure time to guarantee the
# compiler supports the selected standard.
args.append('-DCMAKE_CXX_STANDARD_REQUIRED=ON')
# When cxxstd is 98, must disable FMT_USE_CPP11
if 'cxxstd=98' in spec:
args.append('-DFMT_USE_CPP11=OFF')
# Can't build docs without doxygen+python+virtualenv
# and call to build "doc" target
args.append("-DFMT_DOC=OFF")
return args
|
py | b410b57ab274539da5f4dc1244585e0f60ebf3cb | import pytest
from stix2matcher.matcher import match
_observations = [
{
"type": "observed-data",
"number_observed": 1,
"first_observed": "2004-11-26T11:42:29Z",
"last_observed": "2004-11-26T11:42:29Z",
"objects": {
"0": {
"type": u"person",
"name": u"alice",
"age": 10
},
"1": {
"type": u"person",
"name": u"bob",
"age": 15
}
}
}
]
@pytest.mark.parametrize("pattern", [
"[person:name = 'alice' AND person:age < 20]",
"[person:name = 'alice' OR person:age > 20]",
"[person:name = 'alice' OR person:age > 1000 AND person:age < 0]",
"[(person:name = 'carol' OR person:name = 'bob') AND person:age > 10]",
"[(person:name = 'darlene' OR person:name = 'carol') AND person:age < 0 OR person:age > 5]"
])
def test_comparison_and_or_match(pattern):
assert match(pattern, _observations)
@pytest.mark.parametrize("pattern", [
"[person:name = 'alice' AND person:age > 10]",
"[person:name = 'carol' OR person:age > 20]",
"[(person:age = 'alice' OR person:age > 1000) AND person:age < 0]",
"[(person:name = 'darlene' OR person:name = 'carol') AND (person:age < 0 OR person:age > 5)]"
])
def test_comparison_and_or_nomatch(pattern):
assert not match(pattern, _observations)
|
py | b410b5d824d01b433dc44e84ea728420027189cf | # Generated by Django 2.0.2 on 2018-07-27 10:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Channel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('imagem', models.ImageField(upload_to='media')),
('nome', models.CharField(max_length=255, verbose_name='Nome da empresa')),
('slug', models.SlugField(max_length=255, verbose_name='Slug')),
],
options={
'verbose_name': 'Channel',
'verbose_name_plural': 'Channels',
'ordering': ['nome'],
},
),
migrations.CreateModel(
name='New',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(blank=True, max_length=255, null=True, verbose_name='Titulo')),
('descricao', models.TextField(blank=True, null=True, verbose_name='Descrição')),
('texto', models.TextField(blank=True, null=True)),
('autor', models.CharField(blank=True, max_length=255, null=True)),
('data', models.DateTimeField(null=True, verbose_name='Data')),
('imagem', models.ImageField(blank=True, null=True, upload_to='media')),
('slug', models.SlugField(max_length=255, verbose_name='Slug')),
('channels', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Channel')),
],
options={
'verbose_name': 'New',
'verbose_name_plural': 'News',
},
),
migrations.CreateModel(
name='Sugestao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=255)),
('email', models.EmailField(blank=True, max_length=254, null=True)),
('mensagem', models.TextField()),
],
options={
'verbose_name': 'Sugestão',
'verbose_name_plural': 'Sugestões',
'ordering': ['nome'],
},
),
]
|
py | b410b61bc735fb974c52654b766cfca0408cc9c9 | #!/usr/bin/env python
import sys
import os
import usb.core
if sys.platform == "darwin":
os.environ["DYLD_FALLBACK_LIBRARY_PATH"] = "/opt/homebrew/lib/:/usr/local/lib:/opt/local/lib"
os.environ['PYUSB_DEBUG'] = 'debug'
print(usb.core.find())
|
py | b410b7f6f960893f7c35565bb6ea829b75f92033 | #!/usr/bin/python2.7
"""
Read a PAIRWISE maf from stdin and print the fraction of columns whose bases
match for each alignment.
TODO: generalize for more than two speceis.
usage: %prog < maf > out
"""
from __future__ import division
import sys
import psyco_full
from bx.align import maf
def __main__():
maf_reader = maf.Reader( sys.stdin )
for m in maf_reader:
match = 0
total = 0
for i in range( 0, m.text_size ):
a = m.components[0].text[i].lower()
b = m.components[1].text[i].lower()
if a == b:
match += 1
total += 1
print match / total
if __name__ == "__main__": __main__()
|
py | b410b99d8d17b0154f0257189d9937214b8aa594 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetWebAppPublicCertificateResult',
'AwaitableGetWebAppPublicCertificateResult',
'get_web_app_public_certificate',
]
@pulumi.output_type
class GetWebAppPublicCertificateResult:
"""
Public certificate object
"""
def __init__(__self__, blob=None, id=None, kind=None, name=None, public_certificate_location=None, thumbprint=None, type=None):
if blob and not isinstance(blob, str):
raise TypeError("Expected argument 'blob' to be a str")
pulumi.set(__self__, "blob", blob)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if public_certificate_location and not isinstance(public_certificate_location, str):
raise TypeError("Expected argument 'public_certificate_location' to be a str")
pulumi.set(__self__, "public_certificate_location", public_certificate_location)
if thumbprint and not isinstance(thumbprint, str):
raise TypeError("Expected argument 'thumbprint' to be a str")
pulumi.set(__self__, "thumbprint", thumbprint)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def blob(self) -> Optional[str]:
"""
Public Certificate byte array
"""
return pulumi.get(self, "blob")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="publicCertificateLocation")
def public_certificate_location(self) -> Optional[str]:
"""
Public Certificate Location
"""
return pulumi.get(self, "public_certificate_location")
@property
@pulumi.getter
def thumbprint(self) -> str:
"""
Certificate Thumbprint
"""
return pulumi.get(self, "thumbprint")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetWebAppPublicCertificateResult(GetWebAppPublicCertificateResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebAppPublicCertificateResult(
blob=self.blob,
id=self.id,
kind=self.kind,
name=self.name,
public_certificate_location=self.public_certificate_location,
thumbprint=self.thumbprint,
type=self.type)
def get_web_app_public_certificate(name: Optional[str] = None,
public_certificate_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebAppPublicCertificateResult:
"""
Public certificate object
:param str name: Name of the app.
:param str public_certificate_name: Public certificate name.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['name'] = name
__args__['publicCertificateName'] = public_certificate_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web/v20210201:getWebAppPublicCertificate', __args__, opts=opts, typ=GetWebAppPublicCertificateResult).value
return AwaitableGetWebAppPublicCertificateResult(
blob=__ret__.blob,
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
public_certificate_location=__ret__.public_certificate_location,
thumbprint=__ret__.thumbprint,
type=__ret__.type)
|
py | b410bb250a61f8439e0a7987023dc11baa4eb692 | """Machine learning pipelines for data extraction."""
from typing import List
import operator
from beancount.core.data import Transaction
import numpy
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import make_pipeline
class NoFitMixin:
"""Mixin that implements a transformer's fit method that returns self."""
def fit(self, *_, **__):
"""A noop."""
return self
class ArrayCaster(BaseEstimator, TransformerMixin, NoFitMixin):
"""Helper class for casting data into array shape."""
@staticmethod
def transform(data):
"""Turn list into numpy array of the necessary shape."""
return numpy.array(data, ndmin=2).T
class Getter(TransformerMixin, NoFitMixin):
"""Get an entry attribute."""
def transform(self, data: List[Transaction]):
"""Return list of entry attributes."""
return [self._getter(d) for d in data]
def _getter(self, txn):
raise NotImplementedError
class AttrGetter(Getter):
"""Get a transaction attribute."""
def __init__(self, attr, default=None):
self.default = default
self._txn_getter = operator.attrgetter(attr)
def _getter(self, txn):
return self._txn_getter(txn) or self.default
class StringVectorizer(CountVectorizer):
"""Subclass of CountVectorizer that handles empty data."""
def __init__(self):
super().__init__(ngram_range=(1, 3))
def fit_transform(self, raw_documents, y=None):
try:
return super().fit_transform(raw_documents, y)
except ValueError:
return numpy.zeros(shape=(len(raw_documents), 0))
def transform(self, raw_documents):
try:
return super().transform(raw_documents)
except ValueError:
return numpy.zeros(shape=(len(raw_documents), 0))
def get_pipeline(attribute):
"""Make a pipeline for a given entry attribute."""
if attribute.startswith("date."):
return make_pipeline(AttrGetter(attribute), ArrayCaster())
# Treat all other attributes as strings.
return make_pipeline(AttrGetter(attribute, ""), StringVectorizer())
|
py | b410bc84ab1e70236a2cadbda1b4bff5d80758e0 | from abc import ABC, abstractmethod
from concurrent.futures import Executor, ProcessPoolExecutor as Pool
import csv
from functools import partial
from itertools import chain
from math import ceil, exp, log10
import os
from pathlib import Path
import timeit
from typing import Dict, Iterable, List, Optional, Sequence, Tuple, Type
from rdkit import Chem
from tqdm import tqdm
from pyscreener.preprocessing import pdbfix
class Screener(ABC):
"""A Screener conducts virtual screens against an ensemble of receptors.
Classes that implement the Screener interface are responsible for
defining the following methods:
* prepare_receptor
* prepare_from_smi
* prepare_from_file
* run_docking
* parse_ligand_results
NOTE: This is an abstract base class and cannot be instantiated.
Parameters
----------
receptors : List[str]
the filepath(s) of receptors to prepare for docking
pdbids : List[str]
a list of PDB IDs corresponding to receptors to prepare for DOCKing.
repeats : int, default=1
the number of times each docking run will be repeated
score_mode : str, default='best'
the mode used to calculate a score for an individual docking run given
multiple output scored conformations
receptor_score_mode : str, default='best'
the mode used to calculate an overall score for a single receptor
given repeated docking runs against that receptor
ensemble_score_mode : str, default='best'
the mode used to calculate an overall score for an ensemble of receptors
given multiple receptors in an ensemble
distributed : bool, default=False
True if the computation will parallelized over a distributed setup.
False if the computation will parallelized over a local setup
num_workers : int, default=-1
the number of worker processes to initialize when
distributing computation
ncpu : int, default=1
the number of cores allocated to each worker process
path : os.PathLike, default='.'
the path under which input and output folders will be placed
verbose : int
the level of output this Screener should output
Parameters
----------
repeats : int
score_mode : str
receptor_score_mode : str
ensemble_score_mode : str
distributed : bool
num_workers : int, default= -1
ncpu : int, default=1
verbose : int, default=0
**kwargs
additional and unused keyword arguments
"""
def __init__(self, receptors: Optional[Sequence[str]] = None,
pdbids: Optional[Sequence[str]] = None,
repeats: int = 1, score_mode: str = 'best',
receptor_score_mode: str = 'best',
ensemble_score_mode: str = 'best',
distributed: bool = False,
num_workers: int = -1, ncpu: int = 1,
path: str = '.', verbose: int = 0, **kwargs):
self.path = Path(path)
receptors = receptors or []
if pdbids:
receptors.extend((
pdbfix.pdbfix(pdbid=pdbid, path=self.in_path)
for pdbid in pdbids
))
if len(receptors) == 0:
raise ValueError('No receptors or PDBids provided!')
self.receptors = receptors
self.repeats = repeats
self.score_mode = score_mode
self.receptor_score_mode = receptor_score_mode
self.ensemble_score_mode = ensemble_score_mode
self.distributed = distributed
self.num_workers = num_workers
self.ncpu = ncpu
self.verbose = verbose
self.num_docked_ligands = 0
def __len__(self) -> int:
"""The number of ligands this screener has simulated"""
return self.num_docked_ligands
def __call__(self, *args, **kwargs) -> Dict[str, Optional[float]]:
return self.dock(*args, **kwargs)
@property
def path(self) -> Tuple[os.PathLike, os.PathLike]:
"""return the Screener's in_path and out_path"""
return self.__in_path, self.__out_path
@path.setter
def path(self, path: str):
"""set both the in_path and out_path under the input path"""
path = Path(path)
self.in_path = path / 'inputs'
self.out_path = path / 'outputs'
@property
def in_path(self) -> os.PathLike:
return self.__in_path
@in_path.setter
def in_path(self, path: str):
path = Path(path)
if not path.is_dir():
path.mkdir(parents=True)
self.__in_path = path
@property
def out_path(self) -> os.PathLike:
return self.__out_path
@out_path.setter
def out_path(self, path: str):
path = Path(path)
if not path.is_dir():
path.mkdir(parents=True)
self.__out_path = path
def dock(self, *smis_or_files: Iterable,
full_results: bool = False,
**kwargs) -> Dict[str, Optional[float]]:
"""dock the ligands contained in sources
NOTE: the star operator, *, in the function signature.
If intending to pass multiple filepaths as an iterable, first
unpack the iterable in the function call by prepending a *.
If passing multiple SMILES strings, either option is acceptable,
but it is much more efficient to NOT unpack the iterable.
Parameters
----------
smis_or_files: Iterable
an iterable of ligand sources, where each ligand source may be
one of the following:
- a ligand supply file,
- a list of SMILES strings
- a single SMILES string
**kwargs
keyword arguments to pass to the appropriate prepare_from_*
function(s)
Returns
-------
d_smi_score : Dict[str, Optional[float]]
a dictionary mapping SMILES string to the best score among the
corresponding ligands. (None if all corresponding ligands
failed failed to dock)
records : List[Dict]
a list of dictionaries containing the record of every single
docking run performed. Each dictionary contains the following keys:
- smiles: the ligand's SMILES string
- name: the name of the ligand
- in: the filename of the input ligand file
- out: the filename of the output docked ligand file
- log: the filename of the output log file
- score: the ligand's docking score
"""
recordsss = self.dock_ensemble(*smis_or_files, **kwargs)
smis_scores = []
for ligand_results in recordsss:
smi = ligand_results[0][0]['smiles']
score = self.calc_ligand_score(
ligand_results, self.receptor_score_mode,
self.ensemble_score_mode
)
smis_scores.append((smi, score))
d_smi_score = {}
for smi_score in smis_scores:
smi, score = smi_score
if smi not in d_smi_score:
d_smi_score[smi] = score
elif score is None:
continue
else:
curr_score = d_smi_score[smi]
if curr_score is None:
d_smi_score[smi] = score
else:
d_smi_score[smi] = min(d_smi_score[smi], score)
if full_results:
return d_smi_score, list(chain(*list(chain(*recordsss))))
return d_smi_score
def dock_ensemble(self, *smis_or_files: Iterable,
**kwargs) -> List[List[List[Dict]]]:
"""Run the docking program with the ligands contained in *smis_or_files
NOTE: the star operator, *, in the function signature
If intending to pass multiple filepaths as an iterable, first
unpack the iterable in the function call by prepending a *
Parameters
----------
smis_or_files: Iterable
an iterable of ligand sources, where each ligand source may be
one of the following:
* a ligand supply file
* a list of SMILES strings
* a single SMILES string
**kwargs
keyword arguments to pass to the appropriate prepare_from_*
function(s)
Returns
-------
recordsss : List[List[List[Dict]]]
an NxMxO list of dictionaries where each dictionary is a record of
an individual docking run and:
* N is the number of total ligands that will be docked
* M is the number of receptors each ligand is docked against
* O is the number of times each docking run is repeated.
Each dictionary contains the following keys:
* smiles: the ligand's SMILES string
* name: the name of the ligand
* in: the filename of the input ligand file
* out: the filename of the output docked ligand file
* log: the filename of the output log file
* score: the ligand's docking score
"""
begin = timeit.default_timer()
ligands = self.prepare_ligands(*smis_or_files, **kwargs)
recordsss = self.run_docking(ligands)
self.num_docked_ligands += len(recordsss)
total = timeit.default_timer() - begin
mins, secs = divmod(int(total), 60)
hrs, mins = divmod(mins, 60)
if self.verbose > 0 and len(recordsss) > 0:
print(f' Time to dock {len(recordsss)} ligands:',
f'{hrs:d}h {mins:d}m {secs:d}s ' +
f'({total/len(recordsss):0.3f} s/ligand)', flush=True)
return recordsss
@abstractmethod
def run_docking(self, ligands: Sequence[Tuple[str, str]]
) -> List[List[List[Dict]]]:
"""Run the docking simulations for the input ligands
Parameters
----------
ligands : Sequence[Tuple[str, str]]
a sequence of tuples containing a ligand's SMILES string and the
filepath of the corresponding input file
Returns
-------
List[List[List[Dict]]]
an NxMxO list of dictionaries where each individual dictionary is a
record of an individual docking run and
* N is the number of ligands contained in the ligand sources
* M is the number of receptors in the ensemble against which each \
ligand should be docked
* O is the number of times each docking run should be repeated
NOTE: the records contain a 'score' that is None for each entry
as the log/out files must first be parsed to obtain the value
"""
@staticmethod
@abstractmethod
def parse_ligand_results(recs_reps: List[List[Dict]],
score_mode: str = 'best') -> List[List[Dict]]:
"""Parse the results of the docking simulations for a single ligand
Parameters
----------
recs_reps : List[List[Dict]]
an MxO list of list of dictionaries where each individual
dictionary is a record of an individual docking run and
* M is the number of receptors in the ensemble against which each ligand should be docked
* O is the number of times each docking run should be repeated
Returns
-------
recs_reps : List[List[Dict]]
the same List as the input argument, but with the
'score' key of record updated to reflect the desired
score parsed from each docking run
"""
@property
def receptors(self):
return self.__receptors
@receptors.setter
def receptors(self, receptors):
receptors = [self.prepare_receptor(receptor) for receptor in receptors]
receptors = [receptor for receptor in receptors if receptor is not None]
if len(receptors) == 0:
raise RuntimeError('Preparation failed for all receptors!')
self.__receptors = receptors
@abstractmethod
def prepare_receptor(self, *args, **kwargs):
"""Prepare a receptor input file for the docking software"""
@staticmethod
@abstractmethod
def prepare_from_smi(*args, **kwargs):
"""Prepare a ligand input file from a SMILES string"""
@staticmethod
@abstractmethod
def prepare_from_file(*args, **kwargs):
"""Prepare a ligand input file from an input file"""
def prepare_ligands(self, *sources,
path: Optional[str] = None, **kwargs):
path = path or self.in_path
return list(chain(*(
self._prepare_ligands(source, i+len(self), path, **kwargs)
for i, source in enumerate(sources)
)))
def _prepare_ligands(self, source, i: int,
path: Optional[str] = None, **kwargs):
if isinstance(source, str):
p_source = Path(source)
if not p_source.exists():
return [self.prepare_from_smi(source, f'ligand_{i}', path)]
if p_source.suffix == '.csv':
return self.prepare_from_csv(source, **kwargs)
if p_source.suffix == '.smi':
return self.prepare_from_supply(source, **kwargs)
if p_source.suffix == '.sdf':
if kwargs['use_3d']:
return self.prepare_from_file(source, path=path,
**kwargs)
else:
return self.prepare_from_supply(source, **kwargs)
return self.prepare_from_file(source, path=path, **kwargs)
if isinstance(source, Sequence):
return self.prepare_from_smis(source, **kwargs)
raise TypeError('Arg "source" must be of type str or ',
f'Sequence[str]. Got: {type(source)}')
def prepare_from_smis(self, smis: Sequence[str],
names: Optional[Sequence[str]] = None,
start: int = 0, nconvert: Optional[int] = None,
**kwargs) -> List[Tuple]:
"""Convert the list of SMILES strings to their corresponding input files
Parameters
----------
smis : Sequence[str]
a sequence of SMILES strings
names : Optional[Sequence[str]], default=None
a parallel sequence of names for each ligand
start : int, default=0
the index at which to start ligand preparation
nconvert : Optional[int], default=None
the number of ligands to convert. If None, convert all ligands
**kwargs
additional and unused keyword arguments
Returns
-------
ligands : List[Tuple]
a list of tuples containing a ligand's SMILES string and the
filepath of the corresponding input file
"""
begin = timeit.default_timer()
stop = min(len(smis), start+nconvert) if nconvert else len(smis)
if names is None:
width = ceil(log10(len(smis))) + 1
names = (f'ligand_{i:0{width}}' for i in range(start, stop))
else:
# could theoretically handle empty strings
names = names[start:stop]
smis = smis[start:stop]
paths = (self.in_path for _ in range(len(smis)))
CHUNKSIZE = 4
with self.Pool(self.distributed, self.num_workers,
self.ncpu, True) as client:
# if self.distributed:
# p_prepare_from_smi = partial(
# self.pmap, f=self.prepare_from_smi, ncpu=self.ncpu
# )
# ligands = client.map(p_prepare_from_smi, smis, names, paths,
# chunksize=len(smis)/self.num_workers/4)
# else:
ligands = client.map(self.prepare_from_smi, smis, names, paths,
chunksize=CHUNKSIZE)
ligands = [
ligand for ligand in tqdm(
ligands, total=len(smis), desc='Preparing ligands',
unit='ligand', smoothing=0.
) if ligand
]
total = timeit.default_timer() - begin
if self.verbose > 1 and len(ligands) > 0:
m, s = divmod(int(total), 60)
h, m = divmod(m, 60)
print(f' Time to prepare {len(ligands)} ligands: ',
f'{h}h {m}m {s}s ({total/len(ligands):0.4f} s/ligand)',
flush=True)
return ligands
@staticmethod
def pmap(f, *args, ncpu=1, chunksize=4):
with Pool(max_workers=ncpu) as client:
xs = [x for x in client.map(f, *args, chunksize=chunksize) if x]
return xs
def prepare_from_csv(self, csv_filename: str, title_line: bool = True,
smiles_col: int = 0, name_col: Optional[int] = None,
start: int = 0, nconvert: Optional[int] = None,
**kwargs) -> List[Tuple]:
"""Prepare the input files corresponding to the SMILES strings
contained in a CSV file
Parameters
----------
csv_filename : str
the filename of the CSV file containing the ligands to convert
title_line : bool, default=True
does the CSV file contain a title line?
smiles_col : int, default=0
the column containing the SMILES strings
name_col : Optional[int], default=None
the column containing the molecule name
start : int, default=0
the index at which to start conversion
nconvert : Optional[int], default=None
the number of ligands to convert. If None, convert all molecules
**kwargs
additional and unused keyword arguments
Returns
-------
ligands : List[Tuple]
a list of tuples containing a ligand's SMILES string and the
filepath of the corresponding input file. Files are named
<compound_id>.<suffix> if compound_id property exists in the
original supply file. Otherwise, they are named:
lig0.<suffix>, lig1.<suffix>, ...
"""
with open(csv_filename) as fid:
reader = csv.reader(fid)
if title_line:
next(reader)
if name_col is None:
smis = [row[smiles_col] for row in reader]
names = None
else:
smis_names = [(row[smiles_col], row[name_col])
for row in reader]
smis, names = zip(*smis_names)
return self.prepare_from_smis(smis, names=names,
start=start, nconvert=nconvert)
def prepare_from_supply(self, supply: str,
id_prop_name: Optional[str] = None,
start: int = 0, nconvert: Optional[int] = None,
**kwargs) -> List[Tuple]:
"""Prepare the input files corresponding to the molecules contained in
a molecular supply file
Parameters
----------
supply : str
the filename of the SDF or SMI file containing
the ligands to convert
id_prop_name : Optional[str]
the name of the property containing the ID, if one exists
(e.g., "CatalogID", "Chemspace_ID", "Name", etc...)
start : int, default=0
the index at which to start ligand conversion
nconvert : Optional[int], default=None
the number of ligands to convert. If None, convert all molecules
**kwargs
additional and unused keyword arguments
Returns
-------
ligands : List[Tuple[str, str]]
a list of tuples containing a ligand's SMILES string and the
filepath of the corresponding input file. Files are named
<compound_id>.<suffix> if compound_id property exists in the
original supply file. Otherwise, they are named:
lig0.<suffix>, lig1.<suffix>, ...
"""
p_supply = Path(supply)
if p_supply.suffix == '.sdf':
mols = Chem.SDMolSupplier(supply)
elif p_supply.suffix == '.smi':
mols = Chem.SmilesMolSupplier(supply)
else:
raise ValueError(
f'input file: "{supply}" does not have .sdf or .smi extension')
smis = []
names = None
if id_prop_name:
names = []
for mol in mols:
if mol is None:
continue
smis.append(Chem.MolToSmiles(mol))
names.append(mol.GetProp(id_prop_name))
else:
for mol in mols:
if mol is None:
continue
smis.append(Chem.MolToSmiles(mol))
return self.prepare_from_smis(smis, names=names,
start=start, nconvert=nconvert)
@staticmethod
def calc_ligand_score(ligand_results: List[List[Dict]],
receptor_score_mode: str = 'best',
ensemble_score_mode: str = 'best') -> Optional[float]:
"""Calculate the overall score of a ligand given all of its docking
runs
Parameters
----------
ligand_results : List[List[Dict]]
an MxO list of list of dictionaries where each individual
dictionary is a record of an individual docking run and
* M is the number of receptors the ligand was docked against
* O is the number of times each docking run was repeated
receptor_score_mode : str, default='best'
the mode used to calculate the overall score for a given receptor
pose with multiple, repeated runs
ensemble_score_mode : str, default='best'
the mode used to calculate the overall score for a given ensemble
of receptors
Returns
-------
ensemble_score : Optional[float]
the overall score of a ligand's ensemble docking. None if no such
score was calculable
See also
--------
calc_score
for documentation on possible values for receptor_score_mode
and ensemble_score_mode
"""
receptor_scores = []
for receptor in ligand_results:
successful_rep_scores = [
repeat['score']
for repeat in receptor if repeat['score'] is not None
]
if successful_rep_scores:
receptor_scores.append(Screener.calc_score(
successful_rep_scores, receptor_score_mode
))
receptor_scores = [score for score in receptor_scores]
if receptor_scores:
ensemble_score = Screener.calc_score(
receptor_scores, ensemble_score_mode)
else:
ensemble_score = None
return ensemble_score
@staticmethod
def calc_score(scores: Sequence[float], score_mode: str = 'best') -> float:
"""Calculate an overall score from a sequence of scores
Parameters
----------
scores : Sequence[float]
score_mode : str, default='best'
the method used to calculate the overall score
Choices:
* 'best' - return the top score
* 'avg' - return the average of the scores
* 'boltzmann' - return the boltzmann average of the scores
Returns
-------
score : float
"""
scores = sorted(scores)
if score_mode in ('best', 'top'):
score = scores[0]
elif score_mode in ('avg', 'mean'):
score = sum(score for score in scores) / len(scores)
elif score_mode == 'boltzmann':
Z = sum(exp(-score) for score in scores)
score = sum(score * exp(-score) / Z for score in scores)
else:
score = scores[0]
return score
@staticmethod
def Pool(distributed: bool = False, num_workers: int = -1, ncpu: int = 1,
all_cores: bool = False) -> Type[Executor]:
"""build a process pool to parallelize computation over
Parameters
----------
distributed : bool, default=False
whether to return a distributed or a local process pool
num_workers : int, default=-1
if distributed is True, then this argument is ignored. If False,
then it should be equal to the total number of worker processes
desired. Using a value of -1 will spawn as many worker processes
as cores available on this machine.
NOTE: this is usually not a good idea and it's much better to
specify the number of processes explicitly.
ncpu : int, default=1
if distributed is True, then this argument should be the number of
cores allocated to each worker. if False, then this should be the
number of cores that is desired to be allocated to each worker.
NOTE: this is an implicit argument because Screener.dock() will
make subprocess calls to progams that themselves can utilize
multiple cores. It will not actually assign <ncpu> cores to
each worker process.
all_cores : bool (Default = False)
whether to initialize as many processes as cores available
(= num_workers * ncpu).
Returns
-------
Executor
the initialized process pool
Notes
------
in some cases, as shown in the examples below, the values specified for
num_workers and ncpu will be inconsequential. Regardless, it is good
practice for this function to always be called the same way, with only
all_cores changing, depending on the context in which the initialized Executor will be used
**Ex. 1**
*Given:* a single machine with 16 cores, screening using vina-type
docking software (via the docking.Vina class)
the function should be called with distributed=False, all_cores=False,
and both num_workers and ncpu should be specified such that the product
of the two is equal to 16.
Choices: (1, 16), (2, 8), (4, 4), (8, 2), and (16, 1). You will often have to determine the optimal values empirically.
**Ex. 2**
*Given:* a cluster of machines where you've requested resources for 8
tasks with 2 cores each. The software was then initialized with
8 separate MPI processes and screening using vina-type docking
software is to be performed.
the function should be called with distributed=True and all_cores=False
(neither num_workers or ncpu needs to be specified)
**Ex. 3**
*Given:* a single machine with 16 cores, and pure python code is to be
executed in parallel
the function should be called with distributed=False, all_cores=True,
and both num_workers and ncpu should be specified such that the product
of the two is equal to 16.
Choices: see Ex. 1
"""
if distributed:
from mpi4py import MPI
from mpi4py.futures import MPIPoolExecutor as Pool
num_workers = MPI.COMM_WORLD.size
else:
if num_workers == -1:
try:
num_workers = len(os.sched_getaffinity(0))
except AttributeError:
num_workers = os.cpu_count()
ncpu = 1
if all_cores and not distributed:
num_workers *= ncpu
return Pool(max_workers=num_workers)
|
py | b410bcce8f7a19dd89501efec90bc6b281d0067a | """
WSGI config for realtime_graph project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'realtime_graph.settings')
application = get_wsgi_application()
|
py | b410bcd34666b1760562a0e97222c6d5fdc9fa2d | import pprint
import sys
from xml.dom import xmlbuilder, expatbuilder, Node
from xml.dom.NodeFilter import NodeFilter
class Filter(xmlbuilder.DOMBuilderFilter):
whatToShow = NodeFilter.SHOW_ELEMENT
def startContainer(self, node):
assert node.nodeType == Node.ELEMENT_NODE
if node.tagName == "skipthis":
return self.FILTER_SKIP
elif node.tagName == "rejectbefore":
return self.FILTER_REJECT
elif node.tagName == "stopbefore":
return self.FILTER_INTERRUPT
else:
return self.FILTER_ACCEPT
def acceptNode(self, node):
assert node.nodeType == Node.ELEMENT_NODE
if node.tagName == "skipafter":
return self.FILTER_SKIP
elif node.tagName == "rejectafter":
return self.FILTER_REJECT
elif node.tagName == "stopafter":
return self.FILTER_INTERRUPT
else:
return self.FILTER_ACCEPT
class RecordingFilter:
# Inheriting from xml.dom.xmlbuilder.DOMBuilderFilter is not
# required, so we won't inherit from it this time to make sure it
# isn't a problem. We have to implement the entire interface
# directly.
whatToShow = NodeFilter.SHOW_ALL
def __init__(self):
self.events = []
def startContainer(self, node):
self.events.append(("start", node.nodeType, str(node.nodeName)))
return xmlbuilder.DOMBuilderFilter.FILTER_ACCEPT
def acceptNode(self, node):
self.events.append(("accept", node.nodeType, str(node.nodeName)))
return xmlbuilder.DOMBuilderFilter.FILTER_ACCEPT
simple_options = xmlbuilder.Options()
simple_options.filter = Filter()
simple_options.namespaces = 0
record_options = xmlbuilder.Options()
record_options.namespaces = 0
def checkResult(src):
print
dom = expatbuilder.makeBuilder(simple_options).parseString(src)
print dom.toxml()
dom.unlink()
def checkFilterEvents(src, record, what=NodeFilter.SHOW_ALL):
record_options.filter = RecordingFilter()
record_options.filter.whatToShow = what
dom = expatbuilder.makeBuilder(record_options).parseString(src)
if record != record_options.filter.events:
print
print "Received filter events:"
pprint.pprint(record_options.filter.events)
print
print "Expected filter events:"
pprint.pprint(record)
dom.unlink()
# a simple case of skipping an element
checkResult("<doc><e><skipthis>text<e/>more</skipthis>abc</e>xyz</doc>")
# skip an element nested indirectly within another skipped element
checkResult('''\
<doc>Text.
<skipthis>Nested text.
<skipthis>Nested text in skipthis element.</skipthis>
More nested text.
</skipthis>Outer text.</doc>
''')
# skip an element nested indirectly within another skipped element
checkResult('''\
<doc>Text.
<skipthis>Nested text.
<nested-element>
<skipthis>Nested text in skipthis element.</skipthis>
More nested text.
</nested-element>
More text.
</skipthis>Outer text.</doc>
''')
checkResult("<doc><rejectbefore/></doc>")
checkResult("<doc><rejectafter/></doc>")
checkResult('''\
<doc><rejectbefore>
Text.
<?my processing instruction?>
<more stuff="foo"/>
<!-- a comment -->
</rejectbefore></doc>
''')
checkResult('''\
<doc><rejectafter>
Text.
<?my processing instruction?>
<more stuff="foo"/>
<!-- a comment -->
</rejectafter></doc>
''')
# Make sure the document element is not passed to the filter:
checkResult("<rejectbefore/>")
checkResult("<rejectafter/>")
checkResult("<stopbefore/>")
checkResult("<doc>text<stopbefore> and </stopbefore>more</doc>")
checkResult("<doc>text<stopafter> and </stopafter>more</doc>")
checkResult("<doc><a/><skipafter>text</skipafter><a/></doc>")
checkFilterEvents("<doc/>", [])
checkFilterEvents("<doc attr='value'/>", [])
checkFilterEvents("<doc><e/></doc>", [
("start", Node.ELEMENT_NODE, "e"),
("accept", Node.ELEMENT_NODE, "e"),
])
src = """\
<!DOCTYPE doc [
<!ENTITY e 'foo'>
<!NOTATION n SYSTEM 'http://xml.python.org/notation/n'>
]>
<!-- comment -->
<?sample pi?>
<doc><e attr='value'><?pi data?><!--comment--></e></doc>
"""
checkFilterEvents(src, [
("accept", Node.DOCUMENT_TYPE_NODE, "doc"),
("accept", Node.ENTITY_NODE, "e"),
("accept", Node.NOTATION_NODE, "n"),
("accept", Node.COMMENT_NODE, "#comment"),
("accept", Node.PROCESSING_INSTRUCTION_NODE, "sample"),
("start", Node.ELEMENT_NODE, "e"),
("accept", Node.PROCESSING_INSTRUCTION_NODE, "pi"),
("accept", Node.COMMENT_NODE, "#comment"),
("accept", Node.ELEMENT_NODE, "e"),
])
# Show everything except a couple of things to the filter, to check
# that whatToShow is implemented. This isn't sufficient to be a
# black-box test, but will get us started.
checkFilterEvents(src, [
("accept", Node.DOCUMENT_TYPE_NODE, "doc"),
("accept", Node.ENTITY_NODE, "e"),
("accept", Node.NOTATION_NODE, "n"),
("accept", Node.PROCESSING_INSTRUCTION_NODE, "sample"),
("start", Node.ELEMENT_NODE, "e"),
("accept", Node.PROCESSING_INSTRUCTION_NODE, "pi"),
("accept", Node.ELEMENT_NODE, "e"),
], what=NodeFilter.SHOW_ALL & ~NodeFilter.SHOW_COMMENT)
checkFilterEvents(src, [
("accept", Node.DOCUMENT_TYPE_NODE, "doc"),
("accept", Node.ENTITY_NODE, "e"),
("accept", Node.NOTATION_NODE, "n"),
("accept", Node.COMMENT_NODE, "#comment"),
("start", Node.ELEMENT_NODE, "e"),
("accept", Node.COMMENT_NODE, "#comment"),
("accept", Node.ELEMENT_NODE, "e"),
], what=NodeFilter.SHOW_ALL & ~NodeFilter.SHOW_PROCESSING_INSTRUCTION)
|
py | b410bdbb60548b4a8778ea5d240fb2fbaded57b1 | from glob import glob
from collections import namedtuple
import pickle
from multiprocessing import Pool, Queue, Process
from tqdm import tqdm
import os
import sys
import random
import torch
from torch.utils.data import Dataset, DataLoader, ConcatDataset, random_split
from io import BytesIO
from tqdm import tqdm
from array import array
import struct
import gc
import utils
import pdb
from collections import OrderedDict
import numpy as np
logger = utils.get_logger()
from bert.tokenization import BertTokenizer
from bert.tokenization import convert_to_unicode
import spacy
class EvalData:
def __init__(self, name, examples, metrics_fn=None, predict_fn=None):
def accuracy_fn(logits, labels):
return OrderedDict()
def default_pred_fn(logits, output_dir, name, prefix):
output=os.path.join(output_dir, 'submit-{}-{}.tsv'.format(name, prefix))
preds = np.argmax(logits, axis=-1)
with open(output, 'w', encoding='utf-8') as fs:
fs.write('index\tpredictions\n')
for i,p in enumerate(preds):
fs.write('{}\t{}\n'.format(i, p))
self.name = name
self.data = examples
self.metrics_fn = metrics_fn if metrics_fn is not None else accuracy_fn
self.predict_fn = predict_fn if predict_fn is not None else default_pred_fn
def load_tsv(path, columns):
with open(path) as fs:
lines = [l.strip().split('\t') for l in fs]
return [[l[c] for c in columns] for l in lines[1:]]
def token_index(src, tgt, offset = 0):
i = offset
while i < len(src):
k=0
while k<len(tgt) and (i+k)<len(src) and src[i+k]==tgt[k]:
k+=1
if k==len(tgt):
return i
else:
i+=1
return -1
class _ABCDataset(Dataset):
def __init__(self, max_len=256, tid=None):
self.max_len = max_len
self.doc_count = 0
self.tid = tid if tid is not None and len(tid)>0 else ['lm', 'sm']
self.tid = [t.lower() for t in self.tid]
assert 'lm' in self.tid or 'sm' in self.tid, 'tids must be lm|sm'
def __len__(self):
return self.doc_count
def __getitem__(self, index):
input_ids, tids, selected_idx = self.get_example(index)
for k in input_ids:
for z in k:
z.extend([0]*(self.max_len-len(z)))
inputs = input_ids
label = torch.tensor(selected_idx, dtype=torch.float)
tids = torch.tensor(tids, dtype=torch.int)
return [torch.tensor(inputs, dtype=torch.int), tids, label]
def get_data(self, index):
raise NotImplemented('The method must be implmented by sub class')
def get_example(self, index):
raise NotImplemented('The method must be implmented by sub class')
def _make_inputs(self, src_left, src_right, pronoun_tokens, candidates, selected):
lm_i,lm_s,lm_t = self._make_inputs_lm(src_left, src_right, pronoun_tokens, candidates, selected, 1)
sm_i,sm_s,sm_t = self._make_inputs_sm(src_left, src_right, pronoun_tokens, candidates, selected, 0)
input_ids = []
task_ids = []
if 'lm' in self.tid:
input_ids = lm_i
task_ids = lm_t
if 'sm' in self.tid:
input_ids += sm_i
task_ids += sm_t
return (input_ids, task_ids, selected)
# inputs for language modeling
def _make_inputs_lm(self, src_left, src_right, pronoun_tokens, candidates, selected, tid=1):
pronoun_idx = len(src_left) + 1
input_ids = []
for cand in candidates:
if cand:
tokens = ['[CLS]'] + src_left + ['[MASK]' for _ in range(len(cand))] + src_right + ['[SEP]']
token_ids = self.tokenizer.convert_tokens_to_ids(tokens)
type_ids = [0]*len(tokens)
mask_ids = [1]*len(token_ids)
cand_ids = [0]*len(token_ids)
cand_ids[pronoun_idx:pronoun_idx+len(cand)]=self.tokenizer.convert_tokens_to_ids(cand)
input_ids.append([token_ids, mask_ids, type_ids, cand_ids, [0]])
else:
input_ids.append([[0], [0], [0], [0], [0]])
task_ids = [tid for _ in range(len(input_ids))]
return (input_ids, selected, task_ids)
# inputs for semantic matching
def _make_inputs_sm(self, src_left, src_right, pronoun_tokens, candidates, selected, tid=0):
src_tokens = ['[CLS]'] + src_left + pronoun_tokens + src_right + ['[SEP]']
pronoun_idx = len(src_left) + 1
pronoun_mask = [0]*len(src_tokens)
pronoun_mask[pronoun_idx] = 1
input_ids = []
for cand in candidates:
if cand:
cand_ids = self.tokenizer.convert_tokens_to_ids(cand)
token_ids = self.tokenizer.convert_tokens_to_ids(src_tokens + cand + ['[SEP]'])
type_ids = [0]*len(src_tokens) + [1]*(len(cand)+1)
mask_ids = [1]*len(token_ids)
cand_mask = [0]*len(src_tokens) + [1]*(len(cand))
input_ids.append([token_ids, mask_ids, type_ids, cand_mask, pronoun_mask.copy()])
else:
input_ids.append([[0], [0], [0], [0], [0]])
task_ids = [tid for _ in range(len(input_ids))]
return (input_ids, selected, task_ids)
from difflib import SequenceMatcher, get_close_matches
WSCRecord=namedtuple('WSCRecord', ['sentence', 'pron_idx', 'pron', 'selected', 'candidates'])
class WSCDataset(_ABCDataset):
"""
Data set for Winograd Schema Challenge task
"""
def __init__(self, tokenizer, datapaths, max_len=256, tid=None, topn=-1):
super(WSCDataset, self).__init__(max_len, tid)
self.datapaths = datapaths
self.tokenizer = tokenizer
self.topn = topn
self.raw_data = []
self.doc_count = 0
self._load(datapaths, topn)
def _load(self, datapaths, topn):
doc_count = 0
self.raw_data = []
for src in datapaths:
# for DPRD,WikiWSRC
data = load_tsv(src, [0, 1, 2, 3, 4])
doc_count += len(data)
self.raw_data.extend(data)
if doc_count > topn and topn>0:
doc_count = topn
break
self.doc_count = doc_count
def get_data(self, index):
return WSCRecord(*self.raw_data[index])
def get_example(self, index):
data = self.get_data(index)
pronoun_idx = int(data.pron_idx)
pronoun = data.pron
src = data.sentence
left_src = src[:pronoun_idx].strip()
right_src = src[pronoun_idx+len(pronoun):].strip()
assert pronoun==src[pronoun_idx:len(pronoun)+pronoun_idx], data
src_left = self.tokenizer.tokenize(convert_to_unicode(left_src))
src_right = self.tokenizer.tokenize(convert_to_unicode(right_src))
pronoun_tokens = self.tokenizer.tokenize(convert_to_unicode(data.pron))
candidates = [self.tokenizer.tokenize(convert_to_unicode(c)) for c in data.candidates.split(',')]
selected_idx = [i for i,c in enumerate(data.candidates.split(',')) if c.lower().strip()==data.selected.lower().strip()]
assert len(selected_idx)==1, data
assert len(candidates)==2, data
selected = [0] * len(candidates)
selected[selected_idx[0]]=1
return self._make_inputs(src_left, src_right, pronoun_tokens, candidates, selected)
WSC273Record=namedtuple('WSC273Record', ['left', 'pron', 'right', 'candidates', 'selected'])
class WSC273Dataset(_ABCDataset):
"""
Data set for Winograd Schema Challenge task
"""
def __init__(self, tokenizer, datapaths, max_len=256, tid=None, topn=-1, max_candidates=2):
super().__init__(max_len, tid)
self.datapaths = datapaths
self.tokenizer = tokenizer
self.topn = topn
self.raw_data = []
self.doc_count = 0
self.max_candidates=max_candidates
self._load(datapaths, topn)
def _load(self, datapaths, topn):
doc_count = 0
self.raw_data = []
for src in datapaths:
# for DPRD,WikiWSRC
data = load_tsv(src, [0, 1, 2, 3, 4])
doc_count += len(data)
self.raw_data.extend(data)
if doc_count > topn and topn>0:
doc_count = topn
break
self.doc_count = doc_count
def get_data(self, index):
return WSC273Record(*self.raw_data[index])
def get_example(self, index):
data = self.get_data(index)
# left, pron, right, candidates, selected
src_left = self.tokenizer.tokenize(convert_to_unicode(data.left))
src_right = self.tokenizer.tokenize(convert_to_unicode(data.right))
pronoun_tokens = self.tokenizer.tokenize(convert_to_unicode(data.pron))
candidates = [self.tokenizer.tokenize(convert_to_unicode(c)) for c in data.candidates.split(',')]
selected_idx = int(data.selected)
assert len(candidates)<=self.max_candidates, data
candidates.extend([None]*(self.max_candidates - len(candidates)))
selected = [0]*len(candidates)
selected[selected_idx]=1
return self._make_inputs(src_left, src_right, pronoun_tokens, candidates, selected)
WNLIRecord=namedtuple('WNLIRecord', ['sentence', 'hypersis', 'pron_idx' ,'pron', 'selected', 'candidates', 'label'])
class WNLIDataset(_ABCDataset):
"""
Data set for Winograd Schema Challenge task
"""
def __init__(self, tokenizer, datapaths, is_test=False, max_len=256, tid=None, topn=-1):
super(WNLIDataset, self).__init__(max_len, tid)
self.datapaths = datapaths
self.tokenizer = tokenizer
self.topn = topn
self.raw_data = []
self.doc_count = 0
self._load(datapaths, topn, is_test)
self.is_test = is_test
self.nlp = spacy.load('en_core_web_sm')
def _load(self, datapaths, topn, is_test):
doc_count = 0
self.raw_data = []
# sentence, target, pronoun_idx, pronoun, selected, candidates, label
fields = [1, 2, 3, 4, 5, 6, 7]
if is_test:
fields = [1, 2, 3, 4, 5, 6]
for src in datapaths:
data = load_tsv(src, fields)
doc_count += len(data)
self.raw_data.extend(data)
if doc_count > topn and topn>0:
doc_count = topn
break
self.doc_count = doc_count
def get_data(self, index):
if self.is_test:
data = self.raw_data[index] + ['0']
else:
data = self.raw_data[index]
return WNLIRecord(*data)
def get_example(self, index):
data = self.get_data(index)
# source, pronoun_idx, pronoun, selected, candidates, label
pronoun_idx = int(data.pron_idx)
pronoun = data.pron
src = data.sentence
left_src = src[:pronoun_idx].strip()
right_src = src[pronoun_idx+len(pronoun):].strip()
src_left = self.tokenizer.tokenize(convert_to_unicode(left_src))
src_right = self.tokenizer.tokenize(convert_to_unicode(right_src))
pronoun_tokens = self.tokenizer.tokenize(convert_to_unicode(data.pron))
selected = self.tokenizer.tokenize(convert_to_unicode(data.selected).lower().strip())
selected_idx = 0
candidates = [selected]
label = 0
if (not self.is_test):
label = int(data.label)
selected = [label]
return self._make_inputs(src_left, src_right, pronoun_tokens, candidates, selected)
GAPRecord=namedtuple('GAPRecord', ['sentence', 'pron_idx', 'pron', 'selected', 'candidates'])
class GAPDataset(_ABCDataset):
"""
Data set for Winograd Schema Challenge task
"""
def __init__(self, tokenizer, datapaths, max_len=384, tid=None, topn=-1):
super().__init__(max_len, tid)
self.datapaths = datapaths
self.tokenizer = tokenizer
self.topn = topn
self.raw_data = []
self.doc_count = 0
self._load(datapaths, topn)
def _load(self, datapaths, topn):
doc_count = 0
self.raw_data = []
for src in datapaths:
# for DPRD,WikiWSRC
data = load_tsv(src, [0, 1, 2, 3, 4])
doc_count += len(data)
self.raw_data.extend(data)
if doc_count > topn and topn>0:
doc_count = topn
break
self.doc_count = doc_count
def get_data(self, index):
return GAPRecord(*self.raw_data[index])
def get_example(self, index):
data = self.get_data(index)
# source, pronoun_idx, selected, candidates, label
pidx = int(data.pron_idx)
src_left = self.tokenizer.tokenize(convert_to_unicode(data.sentence[:pidx]))
src_right = self.tokenizer.tokenize(convert_to_unicode(data.sentence[pidx+len(data.pron):]))
pronoun_tokens = self.tokenizer.tokenize(data.pron)
candidates = [self.tokenizer.tokenize(convert_to_unicode(c)) for c in data.candidates.split('|')]
selected_idx = [i for i,c in enumerate(data.candidates.split('|')) if c.lower().strip()==data.selected.lower().strip()]
selected = [0]*len(candidates)
selected[selected_idx[0]]=1
return self._make_inputs(src_left, src_right, pronoun_tokens, candidates, selected)
def wsc_accuracy(logits, labels):
# bxc
count = 0
for g,l in zip(logits, labels):
prd = np.argmax(g)
if l[prd] == 1:
count+=1
return count/len(labels)
class _ABCTask(object):
def __init__(self, tokenizer):
self.tokenizer = tokenizer
def get_train_dataset(self, data_dir, maxlen, input_type=None):
raise NotImplemented('This method must be implemented by sub class.')
def get_test_dataset(self, data_dir, maxlen, input_type=None):
return []
def get_dev_dataset(self, data_dir, maxlen, input_type=None):
return []
def get_metric_fn(self):
def metric_fn(logits, labels, *argv, **kwargs):
return OrderedDict(accuracy=wsc_accuracy(logits, labels))
return metric_fn
class DPRDTask(_ABCTask):
def __init__(self, tokenizer):
super().__init__(tokenizer)
def get_train_dataset(self, data_dir, maxlen=128, input_type=None):
paths = glob(os.path.join(data_dir, 'train_annotated.tsv'))
return WSCDataset(self.tokenizer, paths, max_len=maxlen, tid=input_type)
def get_dev_dataset(self, data_dir, maxlen, input_type=None):
eval_set = [
self._mk_eval('DPRD-test', data_dir, 'test_annotated.tsv', maxlen, input_type=input_type)
]
return eval_set
def get_test_dataset(self, data_dir, maxlen, input_type=None):
eval_set = [
#self._mk_eval('DPRD-test', data_dir, 'test_annotated.tsv', maxlen)
]
return eval_set
def _mk_eval(self, name, data_dir, data, maxlen, input_type=None):
paths = glob(os.path.join(data_dir, data))
dataset = WSCDataset(self.tokenizer, paths, max_len=maxlen, tid=input_type)
return EvalData(name=name, examples=dataset, \
metrics_fn=self.get_metric_fn())
class WSC273Task(_ABCTask):
def __init__(self, tokenizer):
super().__init__(tokenizer)
def get_dev_dataset(self, data_dir, maxlen, input_type=None):
eval_set = [
self._mk_eval('wsc273-test', data_dir, 'wsc273.tsv', maxlen, input_type=input_type),
self._mk_eval('pdp60-test', data_dir, 'pdp60.tsv', maxlen, max_candidates=5, input_type=input_type)
]
return eval_set
def get_test_dataset(self, data_dir, maxlen, input_type=None):
eval_set = [
#self._mk_eval('wsc273-test', data_dir, 'wsc273.tsv', maxlen)
]
return eval_set
def _mk_eval(self, name, data_dir, data, maxlen, max_candidates=2, input_type=None):
paths = glob(os.path.join(data_dir, data))
dataset = WSC273Dataset(self.tokenizer, paths, max_len=maxlen, max_candidates=max_candidates, \
tid=input_type)
return EvalData(name=name, examples=dataset, \
metrics_fn=self.get_metric_fn())
class WikiWSCRTask(_ABCTask):
def __init__(self, tokenizer):
super().__init__(tokenizer)
def get_train_dataset(self, data_dir, tokenizer, maxlen=128, input_type=None):
paths = glob(os.path.join(data_dir, 'train_annotated.tsv'))
return WSCDataset(self.tokenizer, paths, max_len=maxlen)
class WNLITask(_ABCTask):
def __init__(self, tokenizer):
super().__init__(tokenizer)
self.threshold = 0.0
self.thred_dict = {}
def get_train_dataset(self, data_dir, maxlen=128, input_type=None):
paths = glob(os.path.join(data_dir, 'train_annotated.tsv'))
return WNLIDataset(self.tokenizer, paths, max_len=maxlen, tid=input_type)
def get_dev_dataset(self, data_dir, maxlen, input_type=None):
eval_set = [
self._mk_eval('wnli-dev', data_dir, 'dev_annotated.tsv', maxlen, scan_thred=True, \
rank=True, input_type=input_type)]
return eval_set
def get_test_dataset(self, data_dir, maxlen, input_type=None):
eval_set = [
self._mk_eval('wnli-test', data_dir, 'test_annotated.tsv', maxlen, is_test=True, input_type=input_type)]
return eval_set
def _mk_eval(self, name, data_dir, data, maxlen, is_test=False, scan_thred=False, rank=False, \
input_type=None):
paths = glob(os.path.join(data_dir, data))
dataset = WNLIDataset(self.tokenizer, paths, max_len=maxlen, is_test=is_test, tid=input_type)
if name != 'wnli-rank':
pred_fn = self._predict_fn(dataset)
else:
pred_fn = self._rank_fn(dataset)
return EvalData(name=name, examples=dataset, \
metrics_fn=self.get_metric_fn(scan_thred, dataset, rank), predict_fn=pred_fn)
def _rank_fn(self, datas):
def predict_fn(logits, output_dir, name, prefix, *argv, **kwargs):
tag = None
if 'tag' in kwargs:
tag = kwargs['tag']
output=os.path.join(output_dir, 'submit-rank-{}-{}-{}.tsv'.format(name, prefix, tag))
with open(output, 'w') as fs:
fs.write('index\tpred\n')
start = 0
for i,l in enumerate(logits):
ids, sel = datas.get_example(i)
cid = np.argmax(l)
cands_mask=[ids[j*4+4] for j in range((len(ids)-1)//4)]
for j,s in enumerate(cands_mask):
if sum(s)==0:
break
pred = 1 if cid==j else 0
fs.write(f'{start}\t{pred}\n')
start+=1
return predict_fn
def _predict_fn(self, datas):
def predict_fn(logits, output_dir, name, prefix, *argv, **kwargs):
logits = np.reshape(logits, [-1])
tag = None
if 'tag' in kwargs:
tag = kwargs['tag']
th = self.threshold if not tag or tag not in self.thred_dict else self.thred_dict[tag]
logger.info(f'Predict with [{name}][prefix][{tag}] {th:0.02}')
output=os.path.join(output_dir, 'submit-{}-{}-{}{:0.02}.tsv'.format(name, prefix, tag, th))
with open(output, 'w') as fs:
fs.write('index\tpred\n')
for i,l in enumerate(logits):
pred = 1 if l>th else 0
fs.write(f'{i}\t{pred}\n')
group = None
count = 0
result = []
for i in range(len(datas)):
d = datas.get_data(i)
if group is None:
group = []
group.append([d, logits[i], i])
elif group[-1][0].sentence==d.sentence and group[-1][0].pron_idx==d.pron_idx:
group.append([d, logits[i], i])
else:
ll = [g[1] for g in group]
m = np.argmax(ll)
labels = [0]*len(group)
labels[m] = 1
result.extend(labels)
group=[[d,logits[i],i]]
if len(group)>0:
ll = [g[1] for g in group]
m = np.argmax(ll)
labels = [0]*len(group)
labels[m] = 1
result.extend(labels)
output=os.path.join(output_dir, 'submit-rank-{}-{}-{}.tsv'.format(name, prefix, tag))
with open(output, 'w') as fs:
fs.write('index\tpred\n')
for i,pred in enumerate(result):
fs.write(f'{i}\t{pred}\n')
return predict_fn
def get_metric_fn(self, threshold_scan=False, data=None, rank=False):
def metric_fn(logits, labels, *argv, **kwargs):
quiet = False
tag = None
if 'quiet' in kwargs:
quiet = kwargs['quiet']
if 'tag' in kwargs:
tag = kwargs['tag']
if rank:
return OrderedDict(accuracy=self.wnli_accuracy(logits, labels, threshold_scan, quiet, tag),
rank_acc=self.wnli_rank_acc(logits, labels, data, quiet, tag)
)
else:
return OrderedDict(accuracy=self.wnli_accuracy(logits, labels, threshold_scan, quiet, tag))
return metric_fn
def wnli_rank_acc(self, logits, labels, data, quiet=False, tag=None):
# bxc
labels = np.reshape(labels, [-1])
logits = np.reshape(logits, [-1])
group = None
count = 0
for i in range(len(data)):
d = data.get_data(i)
if group is None:
group = []
group.append((d, logits[i], labels[i], i))
elif group[-1][0].sentence==d.sentence and group[-1][0].pron_idx==d.pron_idx:
group.append((d, logits[i], labels[i], i))
else:
ll = [g[1] for g in group]
m = np.argmax(ll)
if ll[m]<0:
count += len(group)-1
elif group[m][2]==1:
count += len(group)
else:
count += len(group)-2
group=[(d,logits[i], labels[i], i)]
if len(group)>0:
ll = [g[1] for g in group]
m = np.argmax(ll)
if ll[m]<0:
count += len(group)-1
elif group[m][2]==1:
count += len(group)
else:
count += len(group)-2
return count/len(labels)
def wnli_accuracy(self, logits, labels, threshold_scan, quiet=False, tag=None):
# bxc
labels = np.reshape(labels, [-1])
def acc(thred):
count = 0
for g,l in zip(logits, labels):
idx = 0
if g[idx]>thred:
count += 1 if l>0 else 0
else:
count += 1 if l==0 else 0
return count/len(labels)
def thred_scan(reverse=True):
if reverse:
steps = np.arange(1,0,-0.01)
else:
steps = np.arange(0,1,0.01)
best_th = 0.95
best_score = 0
for th in steps:
score = acc(th)
if score > best_score:
best_score = score
best_th = th
return best_score,best_th
mp = np.mean([l for l,k in zip(logits, labels) if k>0])
mn = np.mean([l for l,k in zip(logits, labels) if k==0])
th = self.threshold if not tag or tag not in self.thred_dict else self.thred_dict[tag]
if not quiet:
logger.info(f'[{tag}] Mean sim score={np.mean(logits):0.02}|[+]{mp:0.02}|[-]{mn:0.02}; th={th:0.03}; [email protected]={acc(0.5):0.02}')
if threshold_scan:
score, th = thred_scan(reverse=True)
score2, th2 = thred_scan(reverse=False)
best_th = (th+th2)/2
if not quiet:
logger.info(f'[{tag}] Best score: BWD={score:0.03}@{th:0.02}, FWD={score2:0.03}@{th2:0.02}, Avg={score:0.03}@{best_th:0.02}')
self.threshold = best_th
if tag:
self.thred_dict[tag] = best_th
return score
else:
return acc(th)
class GAPTask(_ABCTask):
def __init__(self, tokenizer):
super().__init__(tokenizer)
# max len 256
def get_train_dataset(self, data_dir, maxlen=384, input_type=None):
paths = glob(os.path.join(data_dir, 'train_annotated.tsv'))
return GAPDataset(self.tokenizer, paths, max_len=maxlen, tid=input_type)
def get_dev_dataset(self, data_dir, maxlen, input_type=None):
eval_set = [
self._mk_eval('GAP-test', data_dir, 'test_annotated.tsv', maxlen, input_type=input_type),
self._mk_eval('GAP-dev', data_dir, 'dev_annotated.tsv', maxlen, input_type=input_type)
]
return eval_set
def get_test_dataset(self, data_dir, maxlen, input_type=None):
eval_set = [
self._mk_eval('GAP-test', data_dir, 'test_annotated.tsv', maxlen, input_type=input_type)]
return eval_set
def _mk_eval(self, name, data_dir, data, maxlen, input_type=None):
paths = glob(os.path.join(data_dir, data))
dataset = GAPDataset(self.tokenizer, paths, max_len=maxlen, tid=input_type)
return EvalData(name=name, examples=dataset, \
metrics_fn=self.get_metric_fn())
|
py | b410bf4a24afdcddea00193e610ed152a7e8b655 | """Abodepy utility methods."""
import pickle
import uuid
def save_cache(data, filename):
"""Save cookies to a file."""
with open(filename, 'wb') as handle:
pickle.dump(data, handle)
def load_cache(filename):
"""Load cookies from a file."""
with open(filename, 'rb') as handle:
return pickle.load(handle)
def gen_uuid():
"""Generate a new Abode UUID."""
return str(uuid.uuid1())
def update(dct, dct_merge):
"""Recursively merge dicts."""
for key, value in dct_merge.items():
if key in dct and isinstance(dct[key], dict):
dct[key] = update(dct[key], value)
else:
dct[key] = value
return dct
|
py | b410bfd3a4846722815b4cbd86ee62b5a945f373 | # coding: utf-8
class c(object):
def __init__(self):
super(c, self).__init__()
print('c')
|
py | b410c041c1117635a465a6f46ce9ce231a2b00cf | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: flow.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from data_ops_analysis_sdk.model.flow import flow_step_pb2 as data__ops__analysis__sdk_dot_model_dot_flow_dot_flow__step__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='flow.proto',
package='flow',
syntax='proto3',
serialized_options=_b('Z>go.easyops.local/contracts/protorepo-models/easyops/model/flow'),
serialized_pb=_b('\n\nflow.proto\x12\x04\x66low\x1a\x30\x64\x61ta_ops_analysis_sdk/model/flow/flow_step.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xb4\t\n\x04\x46low\x12\x0e\n\x06\x66lowId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0c\n\x04type\x18\x03 \x01(\t\x12\x10\n\x08\x63\x61tegory\x18\x04 \x01(\t\x12\r\n\x05vName\x18\x05 \x01(\t\x12\x12\n\nenableLoop\x18\x06 \x01(\x08\x12\x10\n\x08readOnly\x18\x07 \x01(\x08\x12\x0b\n\x03org\x18\x08 \x01(\x05\x12\x12\n\ncreateTime\x18\t \x01(\t\x12\x0f\n\x07\x63reator\x18\n \x01(\t\x12\x10\n\x08vCreator\x18\x0b \x01(\t\x12\x12\n\nupdateTime\x18\x0c \x01(\t\x12\x0f\n\x07version\x18\r \x01(\x05\x12\r\n\x05vDesc\x18\x0e \x01(\t\x12\x17\n\x0freadAuthorizers\x18\x0f \x03(\t\x12\x19\n\x11updateAuthorizers\x18\x10 \x03(\t\x12\x19\n\x11\x64\x65leteAuthorizers\x18\x11 \x03(\t\x12\x1a\n\x12\x65xecuteAuthorizers\x18\x12 \x03(\t\x12\x0c\n\x04memo\x18\x13 \x01(\t\x12\x13\n\x0bsubscribers\x18\x14 \x03(\t\x12\x19\n\x11subscribedChannel\x18\x15 \x01(\t\x12\x15\n\ris_system_org\x18\x16 \x01(\x08\x12 \n\x08stepList\x18\x17 \x03(\x0b\x32\x0e.flow.FlowStep\x12\'\n\ttableDefs\x18\x18 \x03(\x0b\x32\x14.flow.Flow.TableDefs\x12\'\n\x07\x66lowEnv\x18\x19 \x01(\x0b\x32\x16.google.protobuf.Value\x12\x0c\n\x04tags\x18\x1a \x03(\t\x12%\n\x08metadata\x18\x1b \x01(\x0b\x32\x13.flow.Flow.Metadata\x12*\n\nflowInputs\x18\x1c \x01(\x0b\x32\x16.google.protobuf.Value\x12+\n\x0b\x66lowOutputs\x18\x1d \x03(\x0b\x32\x16.flow.Flow.FlowOutputs\x12)\n\noutputDefs\x18\x1e \x03(\x0b\x32\x15.flow.Flow.OutputDefs\x12*\n\thistories\x18\x1f \x03(\x0b\x32\x17.google.protobuf.Struct\x1a\xd6\x01\n\tTableDefs\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x33\n\ndimensions\x18\x03 \x03(\x0b\x32\x1f.flow.Flow.TableDefs.Dimensions\x12-\n\x07\x63olumns\x18\x04 \x03(\x0b\x32\x1c.flow.Flow.TableDefs.Columns\x1a&\n\nDimensions\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x1a#\n\x07\x43olumns\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x1a&\n\x08Metadata\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x65sc\x18\x02 \x01(\t\x1aq\n\x0b\x46lowOutputs\x12/\n\x07\x63olumns\x18\x01 \x03(\x0b\x32\x1e.flow.Flow.FlowOutputs.Columns\x1a\x31\n\x07\x43olumns\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\t\x1a\x34\n\nOutputDefs\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\t\x12\x0c\n\x04name\x18\x03 \x01(\tB@Z>go.easyops.local/contracts/protorepo-models/easyops/model/flowb\x06proto3')
,
dependencies=[data__ops__analysis__sdk_dot_model_dot_flow_dot_flow__step__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_FLOW_TABLEDEFS_DIMENSIONS = _descriptor.Descriptor(
name='Dimensions',
full_name='flow.Flow.TableDefs.Dimensions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='flow.Flow.TableDefs.Dimensions.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='flow.Flow.TableDefs.Dimensions.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1021,
serialized_end=1059,
)
_FLOW_TABLEDEFS_COLUMNS = _descriptor.Descriptor(
name='Columns',
full_name='flow.Flow.TableDefs.Columns',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='flow.Flow.TableDefs.Columns.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='flow.Flow.TableDefs.Columns.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1061,
serialized_end=1096,
)
_FLOW_TABLEDEFS = _descriptor.Descriptor(
name='TableDefs',
full_name='flow.Flow.TableDefs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='flow.Flow.TableDefs.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='flow.Flow.TableDefs.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dimensions', full_name='flow.Flow.TableDefs.dimensions', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='columns', full_name='flow.Flow.TableDefs.columns', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_FLOW_TABLEDEFS_DIMENSIONS, _FLOW_TABLEDEFS_COLUMNS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=882,
serialized_end=1096,
)
_FLOW_METADATA = _descriptor.Descriptor(
name='Metadata',
full_name='flow.Flow.Metadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='flow.Flow.Metadata.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='desc', full_name='flow.Flow.Metadata.desc', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1098,
serialized_end=1136,
)
_FLOW_FLOWOUTPUTS_COLUMNS = _descriptor.Descriptor(
name='Columns',
full_name='flow.Flow.FlowOutputs.Columns',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='flow.Flow.FlowOutputs.Columns.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='flow.Flow.FlowOutputs.Columns.id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='flow.Flow.FlowOutputs.Columns.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1202,
serialized_end=1251,
)
_FLOW_FLOWOUTPUTS = _descriptor.Descriptor(
name='FlowOutputs',
full_name='flow.Flow.FlowOutputs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='columns', full_name='flow.Flow.FlowOutputs.columns', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_FLOW_FLOWOUTPUTS_COLUMNS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1138,
serialized_end=1251,
)
_FLOW_OUTPUTDEFS = _descriptor.Descriptor(
name='OutputDefs',
full_name='flow.Flow.OutputDefs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='flow.Flow.OutputDefs.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='flow.Flow.OutputDefs.id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='flow.Flow.OutputDefs.name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1253,
serialized_end=1305,
)
_FLOW = _descriptor.Descriptor(
name='Flow',
full_name='flow.Flow',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='flowId', full_name='flow.Flow.flowId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='flow.Flow.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='flow.Flow.type', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='category', full_name='flow.Flow.category', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vName', full_name='flow.Flow.vName', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enableLoop', full_name='flow.Flow.enableLoop', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='readOnly', full_name='flow.Flow.readOnly', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='org', full_name='flow.Flow.org', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='createTime', full_name='flow.Flow.createTime', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='flow.Flow.creator', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vCreator', full_name='flow.Flow.vCreator', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='updateTime', full_name='flow.Flow.updateTime', index=11,
number=12, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='flow.Flow.version', index=12,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='vDesc', full_name='flow.Flow.vDesc', index=13,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='readAuthorizers', full_name='flow.Flow.readAuthorizers', index=14,
number=15, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='updateAuthorizers', full_name='flow.Flow.updateAuthorizers', index=15,
number=16, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='deleteAuthorizers', full_name='flow.Flow.deleteAuthorizers', index=16,
number=17, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='executeAuthorizers', full_name='flow.Flow.executeAuthorizers', index=17,
number=18, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memo', full_name='flow.Flow.memo', index=18,
number=19, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subscribers', full_name='flow.Flow.subscribers', index=19,
number=20, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='subscribedChannel', full_name='flow.Flow.subscribedChannel', index=20,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='is_system_org', full_name='flow.Flow.is_system_org', index=21,
number=22, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='stepList', full_name='flow.Flow.stepList', index=22,
number=23, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tableDefs', full_name='flow.Flow.tableDefs', index=23,
number=24, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='flowEnv', full_name='flow.Flow.flowEnv', index=24,
number=25, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='flow.Flow.tags', index=25,
number=26, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='flow.Flow.metadata', index=26,
number=27, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='flowInputs', full_name='flow.Flow.flowInputs', index=27,
number=28, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='flowOutputs', full_name='flow.Flow.flowOutputs', index=28,
number=29, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='outputDefs', full_name='flow.Flow.outputDefs', index=29,
number=30, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='histories', full_name='flow.Flow.histories', index=30,
number=31, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_FLOW_TABLEDEFS, _FLOW_METADATA, _FLOW_FLOWOUTPUTS, _FLOW_OUTPUTDEFS, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=101,
serialized_end=1305,
)
_FLOW_TABLEDEFS_DIMENSIONS.containing_type = _FLOW_TABLEDEFS
_FLOW_TABLEDEFS_COLUMNS.containing_type = _FLOW_TABLEDEFS
_FLOW_TABLEDEFS.fields_by_name['dimensions'].message_type = _FLOW_TABLEDEFS_DIMENSIONS
_FLOW_TABLEDEFS.fields_by_name['columns'].message_type = _FLOW_TABLEDEFS_COLUMNS
_FLOW_TABLEDEFS.containing_type = _FLOW
_FLOW_METADATA.containing_type = _FLOW
_FLOW_FLOWOUTPUTS_COLUMNS.containing_type = _FLOW_FLOWOUTPUTS
_FLOW_FLOWOUTPUTS.fields_by_name['columns'].message_type = _FLOW_FLOWOUTPUTS_COLUMNS
_FLOW_FLOWOUTPUTS.containing_type = _FLOW
_FLOW_OUTPUTDEFS.containing_type = _FLOW
_FLOW.fields_by_name['stepList'].message_type = data__ops__analysis__sdk_dot_model_dot_flow_dot_flow__step__pb2._FLOWSTEP
_FLOW.fields_by_name['tableDefs'].message_type = _FLOW_TABLEDEFS
_FLOW.fields_by_name['flowEnv'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_FLOW.fields_by_name['metadata'].message_type = _FLOW_METADATA
_FLOW.fields_by_name['flowInputs'].message_type = google_dot_protobuf_dot_struct__pb2._VALUE
_FLOW.fields_by_name['flowOutputs'].message_type = _FLOW_FLOWOUTPUTS
_FLOW.fields_by_name['outputDefs'].message_type = _FLOW_OUTPUTDEFS
_FLOW.fields_by_name['histories'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
DESCRIPTOR.message_types_by_name['Flow'] = _FLOW
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Flow = _reflection.GeneratedProtocolMessageType('Flow', (_message.Message,), {
'TableDefs' : _reflection.GeneratedProtocolMessageType('TableDefs', (_message.Message,), {
'Dimensions' : _reflection.GeneratedProtocolMessageType('Dimensions', (_message.Message,), {
'DESCRIPTOR' : _FLOW_TABLEDEFS_DIMENSIONS,
'__module__' : 'flow_pb2'
# @@protoc_insertion_point(class_scope:flow.Flow.TableDefs.Dimensions)
})
,
'Columns' : _reflection.GeneratedProtocolMessageType('Columns', (_message.Message,), {
'DESCRIPTOR' : _FLOW_TABLEDEFS_COLUMNS,
'__module__' : 'flow_pb2'
# @@protoc_insertion_point(class_scope:flow.Flow.TableDefs.Columns)
})
,
'DESCRIPTOR' : _FLOW_TABLEDEFS,
'__module__' : 'flow_pb2'
# @@protoc_insertion_point(class_scope:flow.Flow.TableDefs)
})
,
'Metadata' : _reflection.GeneratedProtocolMessageType('Metadata', (_message.Message,), {
'DESCRIPTOR' : _FLOW_METADATA,
'__module__' : 'flow_pb2'
# @@protoc_insertion_point(class_scope:flow.Flow.Metadata)
})
,
'FlowOutputs' : _reflection.GeneratedProtocolMessageType('FlowOutputs', (_message.Message,), {
'Columns' : _reflection.GeneratedProtocolMessageType('Columns', (_message.Message,), {
'DESCRIPTOR' : _FLOW_FLOWOUTPUTS_COLUMNS,
'__module__' : 'flow_pb2'
# @@protoc_insertion_point(class_scope:flow.Flow.FlowOutputs.Columns)
})
,
'DESCRIPTOR' : _FLOW_FLOWOUTPUTS,
'__module__' : 'flow_pb2'
# @@protoc_insertion_point(class_scope:flow.Flow.FlowOutputs)
})
,
'OutputDefs' : _reflection.GeneratedProtocolMessageType('OutputDefs', (_message.Message,), {
'DESCRIPTOR' : _FLOW_OUTPUTDEFS,
'__module__' : 'flow_pb2'
# @@protoc_insertion_point(class_scope:flow.Flow.OutputDefs)
})
,
'DESCRIPTOR' : _FLOW,
'__module__' : 'flow_pb2'
# @@protoc_insertion_point(class_scope:flow.Flow)
})
_sym_db.RegisterMessage(Flow)
_sym_db.RegisterMessage(Flow.TableDefs)
_sym_db.RegisterMessage(Flow.TableDefs.Dimensions)
_sym_db.RegisterMessage(Flow.TableDefs.Columns)
_sym_db.RegisterMessage(Flow.Metadata)
_sym_db.RegisterMessage(Flow.FlowOutputs)
_sym_db.RegisterMessage(Flow.FlowOutputs.Columns)
_sym_db.RegisterMessage(Flow.OutputDefs)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
py | b410c3346bbfc9bfe132f7c724f3e4184aa24649 | from typing import Iterable, Callable, Any, Dict
from confluent_kafka.schema_registry import SchemaRegistryClient
from confluent_kafka.schema_registry.avro import AvroSerializer
from doge_datagen import Subject, Transition, KafkaSinkFactory, KafkaSink
class KafkaAvroSinkFactory(object):
def __init__(self,
bootstrap_servers: Iterable[str],
schema_registry_url: str,
client_id: str,
buffer_size=100000):
"""
:param bootstrap_servers: list of bootstrap servers
:type bootstrap_servers: Iterable[str]
:param schema_registry_url: schema registry url for example http://localhost:8081
:type schema_registry_url: str
:param client_id: sink client id
:type client_id: str
"""
self.factory = KafkaSinkFactory(bootstrap_servers, client_id, buffer_size)
schema_registry_conf = {'url': schema_registry_url}
self.schema_registry_client = SchemaRegistryClient(schema_registry_conf)
def create(self, topic: str,
key_function: Callable[[Subject, Transition], Dict[str, Any]],
key_schema: str,
value_function: Callable[[int, Subject, Transition], Dict[str, Any]],
value_schema: str) -> KafkaSink:
"""
:param topic: topic name to which events will be emitted
:type topic: str
:param key_function: function that converts subject and transition to a format consumable by key serializer
:type key_function: Callable[[Subject, Transition], K]
:param key_schema: Avro compliant schema for key serialization
:type key_schema: str
:param value_function: function that converts timestamp, subject and transition to a format consumable by
value serializer
:type value_function: Callable[[int, Subject, Transition], V]
:param value_schema: Avro compliant schema for value serialization
:type value_schema: str
:return: KafkaSink instance
:rtype: KafkaSink
"""
key_serializer = AvroSerializer(self.schema_registry_client, key_schema)
value_serializer = AvroSerializer(self.schema_registry_client, value_schema)
return self.factory.create(topic, key_function, value_function, key_serializer, value_serializer)
|
py | b410c3553dae7766cf4d75f58a250bd742fdfa4f | from .smpl.smpl_webuser.serialization import load_model as load_smpl_model
from .smpl.smpl_webuser.serialization import load_smplx_model, load_flame_model
base_model_folder = 'E:/DataSets/human_models/'
smpl_male_model_path = base_model_folder + 'smpl/basicmodel_m_lbs_10_207_0_v1.0.0.pkl'
smpl_female_model_path = base_model_folder + 'smpl/basicModel_f_lbs_10_207_0_v1.0.0.pkl'
smplx_male_model_path = base_model_folder + 'smplx/SMPLX_MALE.pkl'
smplx_female_model_path = base_model_folder + 'smplx/SMPLX_FEMALE.pkl'
flame_male_model_path = base_model_folder + 'flame/male_model.pkl'
flame_female_model_path = base_model_folder + 'flame/female_model.pkl'
def load_model(model_type='smplx',gender='m'):
if model_type=='smplx':
if gender=='m':
return load_smplx_model(smplx_male_model_path)
else:
return load_smplx_model(smplx_female_model_path)
elif model_type=='smpl':
if gender=='m':
return load_smpl_model(smpl_male_model_path)
else:
return load_smpl_model(smpl_female_model_path)
elif model_type=='flame':
if gender=='m':
return load_flame_model(flame_male_model_path)
else:
return load_flame_model(flame_female_model_path) |
py | b410c4a07bbed8787de5b131c9cdce30af657de0 | ###########################
#
# #226 A Scoop of Blancmange - Project Euler
# https://projecteuler.net/problem=226
#
# Code by Kevin Marciniak
#
###########################
|
py | b410c576fa28b29777121843be471013c69716d5 | import argparse
import os
import numpy as np
import tensorflow as tf
from hparams import hparams, hparams_debug_string
from infolog import log
from tqdm import tqdm
from wavenet_vocoder.synthesizer import Synthesizer
def run_synthesis(args, checkpoint_path, output_dir, hparams):
log_dir = os.path.join(output_dir, 'plots')
wav_dir = os.path.join(output_dir, 'wavs')
#We suppose user will provide correct folder depending on training method
log(hparams_debug_string())
synth = Synthesizer()
synth.load(checkpoint_path, hparams)
if args.model == 'Tacotron-2':
#If running all Tacotron-2, synthesize audio from evaluated mels
metadata_filename = os.path.join(args.mels_dir, 'map.txt')
with open(metadata_filename, encoding='utf-8') as f:
metadata = np.array([line.strip().split('|') for line in f])
speaker_ids = metadata[:, 2]
mel_files = metadata[:, 1]
texts = metadata[:, 0]
speaker_ids = None if (speaker_ids == '<no_g>').all() else speaker_ids
else:
#else Get all npy files in input_dir (supposing they are mels)
mel_files = sorted([os.path.join(args.mels_dir, f) for f in os.listdir(args.mels_dir) if f.split('.')[-1] == 'npy'])
speaker_ids = None if args.speaker_id is None else args.speaker_id.replace(' ', '').split(',')
if speaker_ids is not None:
assert len(speaker_ids) == len(mel_files)
texts = None
log('Starting synthesis! (this will take a while..)')
os.makedirs(log_dir, exist_ok=True)
os.makedirs(wav_dir, exist_ok=True)
mel_files = [mel_files[i: i+hparams.wavenet_synthesis_batch_size] for i in range(0, len(mel_files), hparams.wavenet_synthesis_batch_size)]
speaker_ids = None if speaker_ids is None else [speaker_ids[i: i+hparams.wavenet_synthesis_batch_size] for i in range(0, len(speaker_ids), hparams.wavenet_synthesis_batch_size)]
texts = None if texts is None else [texts[i: i+hparams.wavenet_synthesis_batch_size] for i in range(0, len(texts), hparams.wavenet_synthesis_batch_size)]
with open(os.path.join(wav_dir, 'map.txt'), 'w') as file:
for i, mel_batch in enumerate(tqdm(mel_files)):
mel_spectros = [np.load(mel_file) for mel_file in mel_batch]
basenames = [os.path.basename(mel_file).replace('.npy', '') for mel_file in mel_batch]
speaker_id_batch = None if speaker_ids is None else speaker_ids[i]
audio_files = synth.synthesize(mel_spectros, speaker_id_batch, basenames, wav_dir, log_dir)
speaker_logs = ['<no_g>'] * len(mel_batch) if speaker_id_batch is None else speaker_id_batch
for j, mel_file in enumerate(mel_batch):
if texts is None:
file.write('{}|{}\n'.format(mel_file, audio_files[j], speaker_logs[j]))
else:
file.write('{}|{}|{}\n'.format(texts[i][j], mel_file, audio_files[j], speaker_logs[j]))
log('synthesized audio waveforms at {}'.format(wav_dir))
def wavenet_synthesize(args, hparams, checkpoint):
output_dir = 'wavenet_' + args.output_dir
try:
checkpoint_path = args.checkpoint
#checkpoint_path = tf.train.get_checkpoint_state(checkpoint).model_checkpoint_path
log('loaded model at {}'.format(checkpoint_path))
except:
raise RuntimeError('Failed to load checkpoint at {}'.format(checkpoint))
run_synthesis(args, checkpoint_path, output_dir, hparams)
|
py | b410c5bd9e6cedd6d9fa28dcfb6736cb54c3ac65 | class Solution:
def firstUniqChar(self, s: str) -> int:
count = collections.Counter(s)
for indx, ch in enumerate(s):
if count[ch] < 2:
return indx
return -1
|
py | b410c5bf26d359e39f97da9cbd1d7fce8f4ddf4c | # -*- coding: utf-8 -*-
#
# pyXFOIL documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 11 23:35:51 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import alabaster
from mock import Mock # To include C dependent libs in ReadtheDocs
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'alabaster'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'AeroPy'
copyright = u'2015, Pedro Leal'
author = u'Pedro Leal'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.3'
# The full version, including alpha/beta/rc tags.
release = '0.0.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'description': 'An easy to use aerodynamic tool',
'github_user': 'leal26',
'github_repo': 'AeroPy',
'logo_name': 'AeroPy',
'github_banner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [alabaster.get_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'images/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'AeroPydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AeroPy.tex', u'AeroPy Documentation',
u'Pedro Leal', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'aeropy', u'AeroPy Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AeroPy', u'AeroPy Documentation',
author, 'AeroPy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# For importing C dependent Libraries
class m_Mock(Mock):
@classmethod
def __getattr__(cls, name):
return m_Mock()
MOCK_MODULES = ['numpy'] #'pygtk', 'gtk', 'gobject', 'argparse', 'numpy', 'pandas'
sys.modules.update((mod_name, m_Mock()) for mod_name in MOCK_MODULES)
|
py | b410c5c154648a65200aaeee7670d819e6b17415 | #!/usr/bin/env python3
"""
Parts of this file were taken from the pyzmq project
(https://github.com/zeromq/pyzmq) which have been permitted for use under the
BSD license. Parts are from lxml (https://github.com/lxml/lxml)
"""
import argparse
from distutils.command.build import build
from distutils.sysconfig import get_config_vars
from distutils.version import LooseVersion
import multiprocessing
import os
from os.path import join as pjoin
import platform
import shutil
import sys
import numpy
from setuptools import (
Command,
Extension,
setup,
)
from setuptools.command.build_ext import build_ext as _build_ext
import versioneer
cmdclass = versioneer.get_cmdclass()
def is_platform_windows():
return sys.platform == "win32" or sys.platform == "cygwin"
def is_platform_mac():
return sys.platform == "darwin"
min_cython_ver = "0.29.21" # note: sync with pyproject.toml
try:
from Cython import (
Tempita,
__version__ as _CYTHON_VERSION,
)
from Cython.Build import cythonize
_CYTHON_INSTALLED = _CYTHON_VERSION >= LooseVersion(min_cython_ver)
except ImportError:
_CYTHON_VERSION = None
_CYTHON_INSTALLED = False
cythonize = lambda x, *args, **kwargs: x # dummy func
_pxi_dep_template = {
"algos": ["_libs/algos_common_helper.pxi.in", "_libs/algos_take_helper.pxi.in"],
"hashtable": [
"_libs/hashtable_class_helper.pxi.in",
"_libs/hashtable_func_helper.pxi.in",
"_libs/khash_for_primitive_helper.pxi.in",
],
"index": ["_libs/index_class_helper.pxi.in"],
"sparse": ["_libs/sparse_op_helper.pxi.in"],
"interval": ["_libs/intervaltree.pxi.in"],
}
_pxifiles = []
_pxi_dep = {}
for module, files in _pxi_dep_template.items():
pxi_files = [pjoin("pandas", x) for x in files]
_pxifiles.extend(pxi_files)
_pxi_dep[module] = pxi_files
class build_ext(_build_ext):
@classmethod
def render_templates(cls, pxifiles):
for pxifile in pxifiles:
# build pxifiles first, template extension must be .pxi.in
assert pxifile.endswith(".pxi.in")
outfile = pxifile[:-3]
if (
os.path.exists(outfile)
and os.stat(pxifile).st_mtime < os.stat(outfile).st_mtime
):
# if .pxi.in is not updated, no need to output .pxi
continue
with open(pxifile) as f:
tmpl = f.read()
pyxcontent = Tempita.sub(tmpl)
with open(outfile, "w") as f:
f.write(pyxcontent)
def build_extensions(self):
# if building from c files, don't need to
# generate template output
if _CYTHON_INSTALLED:
self.render_templates(_pxifiles)
super().build_extensions()
class CleanCommand(Command):
"""Custom distutils command to clean the .so and .pyc files."""
user_options = [("all", "a", "")]
def initialize_options(self):
self.all = True
self._clean_me = []
self._clean_trees = []
base = pjoin("pandas", "_libs", "src")
tsbase = pjoin("pandas", "_libs", "tslibs", "src")
dt = pjoin(tsbase, "datetime")
util = pjoin("pandas", "util")
parser = pjoin(base, "parser")
ujson_python = pjoin(base, "ujson", "python")
ujson_lib = pjoin(base, "ujson", "lib")
self._clean_exclude = [
pjoin(dt, "np_datetime.c"),
pjoin(dt, "np_datetime_strings.c"),
pjoin(parser, "tokenizer.c"),
pjoin(parser, "io.c"),
pjoin(ujson_python, "ujson.c"),
pjoin(ujson_python, "objToJSON.c"),
pjoin(ujson_python, "JSONtoObj.c"),
pjoin(ujson_python, "date_conversions.c"),
pjoin(ujson_lib, "ultrajsonenc.c"),
pjoin(ujson_lib, "ultrajsondec.c"),
pjoin(util, "move.c"),
]
for root, dirs, files in os.walk("pandas"):
for f in files:
filepath = pjoin(root, f)
if filepath in self._clean_exclude:
continue
if os.path.splitext(f)[-1] in (
".pyc",
".so",
".o",
".pyo",
".pyd",
".c",
".cpp",
".orig",
):
self._clean_me.append(filepath)
for d in dirs:
if d == "__pycache__":
self._clean_trees.append(pjoin(root, d))
# clean the generated pxi files
for pxifile in _pxifiles:
pxifile = pxifile.replace(".pxi.in", ".pxi")
self._clean_me.append(pxifile)
for d in ("build", "dist"):
if os.path.exists(d):
self._clean_trees.append(d)
def finalize_options(self):
pass
def run(self):
for clean_me in self._clean_me:
try:
os.unlink(clean_me)
except OSError:
pass
for clean_tree in self._clean_trees:
try:
shutil.rmtree(clean_tree)
except OSError:
pass
# we need to inherit from the versioneer
# class as it encodes the version info
sdist_class = cmdclass["sdist"]
class CheckSDist(sdist_class):
"""Custom sdist that ensures Cython has compiled all pyx files to c."""
_pyxfiles = [
"pandas/_libs/arrays.pyx",
"pandas/_libs/lib.pyx",
"pandas/_libs/hashtable.pyx",
"pandas/_libs/tslib.pyx",
"pandas/_libs/index.pyx",
"pandas/_libs/internals.pyx",
"pandas/_libs/algos.pyx",
"pandas/_libs/join.pyx",
"pandas/_libs/indexing.pyx",
"pandas/_libs/interval.pyx",
"pandas/_libs/hashing.pyx",
"pandas/_libs/missing.pyx",
"pandas/_libs/reduction.pyx",
"pandas/_libs/testing.pyx",
"pandas/_libs/sparse.pyx",
"pandas/_libs/ops.pyx",
"pandas/_libs/parsers.pyx",
"pandas/_libs/tslibs/base.pyx",
"pandas/_libs/tslibs/ccalendar.pyx",
"pandas/_libs/tslibs/dtypes.pyx",
"pandas/_libs/tslibs/period.pyx",
"pandas/_libs/tslibs/strptime.pyx",
"pandas/_libs/tslibs/np_datetime.pyx",
"pandas/_libs/tslibs/timedeltas.pyx",
"pandas/_libs/tslibs/timestamps.pyx",
"pandas/_libs/tslibs/timezones.pyx",
"pandas/_libs/tslibs/conversion.pyx",
"pandas/_libs/tslibs/fields.pyx",
"pandas/_libs/tslibs/offsets.pyx",
"pandas/_libs/tslibs/parsing.pyx",
"pandas/_libs/tslibs/tzconversion.pyx",
"pandas/_libs/tslibs/vectorized.pyx",
"pandas/_libs/window/indexers.pyx",
"pandas/_libs/writers.pyx",
"pandas/io/sas/sas.pyx",
]
_cpp_pyxfiles = [
"pandas/_libs/window/aggregations.pyx",
]
def initialize_options(self):
sdist_class.initialize_options(self)
def run(self):
if "cython" in cmdclass:
self.run_command("cython")
else:
# If we are not running cython then
# compile the extensions correctly
pyx_files = [(self._pyxfiles, "c"), (self._cpp_pyxfiles, "cpp")]
for pyxfiles, extension in pyx_files:
for pyxfile in pyxfiles:
sourcefile = pyxfile[:-3] + extension
msg = (
f"{extension}-source file '{sourcefile}' not found.\n"
"Run 'setup.py cython' before sdist."
)
assert os.path.isfile(sourcefile), msg
sdist_class.run(self)
class CheckingBuildExt(build_ext):
"""
Subclass build_ext to get clearer report if Cython is necessary.
"""
def check_cython_extensions(self, extensions):
for ext in extensions:
for src in ext.sources:
if not os.path.exists(src):
print(f"{ext.name}: -> [{ext.sources}]")
raise Exception(
f"""Cython-generated file '{src}' not found.
Cython is required to compile pandas from a development branch.
Please install Cython or download a release package of pandas.
"""
)
def build_extensions(self):
self.check_cython_extensions(self.extensions)
build_ext.build_extensions(self)
class CythonCommand(build_ext):
"""
Custom distutils command subclassed from Cython.Distutils.build_ext
to compile pyx->c, and stop there. All this does is override the
C-compile method build_extension() with a no-op.
"""
def build_extension(self, ext):
pass
class DummyBuildSrc(Command):
"""numpy's build_src command interferes with Cython's build_ext."""
user_options = []
def initialize_options(self):
self.py_modules_dict = {}
def finalize_options(self):
pass
def run(self):
pass
cmdclass.update({"clean": CleanCommand, "build": build})
cmdclass["build_ext"] = CheckingBuildExt
if _CYTHON_INSTALLED:
suffix = ".pyx"
cmdclass["cython"] = CythonCommand
else:
suffix = ".c"
cmdclass["build_src"] = DummyBuildSrc
# ----------------------------------------------------------------------
# Preparation of compiler arguments
debugging_symbols_requested = "--with-debugging-symbols" in sys.argv
if debugging_symbols_requested:
sys.argv.remove("--with-debugging-symbols")
if sys.byteorder == "big":
endian_macro = [("__BIG_ENDIAN__", "1")]
else:
endian_macro = [("__LITTLE_ENDIAN__", "1")]
extra_compile_args = []
extra_link_args = []
if is_platform_windows():
if debugging_symbols_requested:
extra_compile_args.append("/Z7")
extra_link_args.append("/DEBUG")
else:
# PANDAS_CI=1 is set by ci/setup_env.sh
if os.environ.get("PANDAS_CI", "0") == "1":
extra_compile_args.append("-Werror")
if debugging_symbols_requested:
extra_compile_args.append("-g")
extra_compile_args.append("-UNDEBUG")
extra_compile_args.append("-O0")
# Build for at least macOS 10.9 when compiling on a 10.9 system or above,
# overriding CPython distuitls behaviour which is to target the version that
# python was built for. This may be overridden by setting
# MACOSX_DEPLOYMENT_TARGET before calling setup.py
if is_platform_mac():
if "MACOSX_DEPLOYMENT_TARGET" not in os.environ:
current_system = platform.mac_ver()[0]
python_target = get_config_vars().get(
"MACOSX_DEPLOYMENT_TARGET", current_system
)
if (
LooseVersion(str(python_target)) < "10.9"
and LooseVersion(current_system) >= "10.9"
):
os.environ["MACOSX_DEPLOYMENT_TARGET"] = "10.9"
if sys.version_info[:2] == (3, 8): # GH 33239
extra_compile_args.append("-Wno-error=deprecated-declarations")
# https://github.com/pandas-dev/pandas/issues/35559
extra_compile_args.append("-Wno-error=unreachable-code")
# enable coverage by building cython files by setting the environment variable
# "PANDAS_CYTHON_COVERAGE" (with a Truthy value) or by running build_ext
# with `--with-cython-coverage`enabled
linetrace = os.environ.get("PANDAS_CYTHON_COVERAGE", False)
if "--with-cython-coverage" in sys.argv:
linetrace = True
sys.argv.remove("--with-cython-coverage")
# Note: if not using `cythonize`, coverage can be enabled by
# pinning `ext.cython_directives = directives` to each ext in extensions.
# github.com/cython/cython/wiki/enhancements-compilerdirectives#in-setuppy
directives = {"linetrace": False, "language_level": 3}
macros = []
if linetrace:
# https://pypkg.com/pypi/pytest-cython/f/tests/example-project/setup.py
directives["linetrace"] = True
macros = [("CYTHON_TRACE", "1"), ("CYTHON_TRACE_NOGIL", "1")]
# silence build warnings about deprecated API usage
# we can't do anything about these warnings because they stem from
# cython+numpy version mismatches.
macros.append(("NPY_NO_DEPRECATED_API", "0"))
# ----------------------------------------------------------------------
# Specification of Dependencies
# TODO: Need to check to see if e.g. `linetrace` has changed and possibly
# re-compile.
def maybe_cythonize(extensions, *args, **kwargs):
"""
Render tempita templates before calling cythonize. This is skipped for
* clean
* sdist
"""
if "clean" in sys.argv or "sdist" in sys.argv:
# See https://github.com/cython/cython/issues/1495
return extensions
elif not _CYTHON_INSTALLED:
# GH#28836 raise a helfpul error message
if _CYTHON_VERSION:
raise RuntimeError(
f"Cannot cythonize with old Cython version ({_CYTHON_VERSION} "
f"installed, needs {min_cython_ver})"
)
raise RuntimeError("Cannot cythonize without Cython installed.")
# reuse any parallel arguments provided for compilation to cythonize
parser = argparse.ArgumentParser()
parser.add_argument("--parallel", "-j", type=int, default=1)
parsed, _ = parser.parse_known_args()
kwargs["nthreads"] = parsed.parallel
build_ext.render_templates(_pxifiles)
return cythonize(extensions, *args, **kwargs)
def srcpath(name=None, suffix=".pyx", subdir="src"):
return pjoin("pandas", subdir, name + suffix)
lib_depends = ["pandas/_libs/src/parse_helper.h"]
klib_include = ["pandas/_libs/src/klib"]
tseries_depends = [
"pandas/_libs/tslibs/src/datetime/np_datetime.h",
"pandas/_libs/tslibs/src/datetime/np_datetime_strings.h",
]
ext_data = {
"_libs.algos": {
"pyxfile": "_libs/algos",
"include": klib_include,
"depends": _pxi_dep["algos"],
},
"_libs.arrays": {"pyxfile": "_libs/arrays"},
"_libs.groupby": {"pyxfile": "_libs/groupby"},
"_libs.hashing": {"pyxfile": "_libs/hashing", "depends": []},
"_libs.hashtable": {
"pyxfile": "_libs/hashtable",
"include": klib_include,
"depends": (
["pandas/_libs/src/klib/khash_python.h", "pandas/_libs/src/klib/khash.h"]
+ _pxi_dep["hashtable"]
),
},
"_libs.index": {
"pyxfile": "_libs/index",
"include": klib_include,
"depends": _pxi_dep["index"],
},
"_libs.indexing": {"pyxfile": "_libs/indexing"},
"_libs.internals": {"pyxfile": "_libs/internals"},
"_libs.interval": {
"pyxfile": "_libs/interval",
"include": klib_include,
"depends": _pxi_dep["interval"],
},
"_libs.join": {"pyxfile": "_libs/join", "include": klib_include},
"_libs.lib": {
"pyxfile": "_libs/lib",
"depends": lib_depends + tseries_depends,
"include": klib_include, # due to tokenizer import
"sources": ["pandas/_libs/src/parser/tokenizer.c"],
},
"_libs.missing": {"pyxfile": "_libs/missing", "depends": tseries_depends},
"_libs.parsers": {
"pyxfile": "_libs/parsers",
"include": klib_include + ["pandas/_libs/src"],
"depends": [
"pandas/_libs/src/parser/tokenizer.h",
"pandas/_libs/src/parser/io.h",
],
"sources": [
"pandas/_libs/src/parser/tokenizer.c",
"pandas/_libs/src/parser/io.c",
],
},
"_libs.reduction": {"pyxfile": "_libs/reduction"},
"_libs.ops": {"pyxfile": "_libs/ops"},
"_libs.ops_dispatch": {"pyxfile": "_libs/ops_dispatch"},
"_libs.properties": {"pyxfile": "_libs/properties"},
"_libs.reshape": {"pyxfile": "_libs/reshape", "depends": []},
"_libs.sparse": {"pyxfile": "_libs/sparse", "depends": _pxi_dep["sparse"]},
"_libs.tslib": {"pyxfile": "_libs/tslib", "depends": tseries_depends},
"_libs.tslibs.base": {"pyxfile": "_libs/tslibs/base"},
"_libs.tslibs.ccalendar": {"pyxfile": "_libs/tslibs/ccalendar"},
"_libs.tslibs.dtypes": {"pyxfile": "_libs/tslibs/dtypes"},
"_libs.tslibs.conversion": {
"pyxfile": "_libs/tslibs/conversion",
"depends": tseries_depends,
"sources": ["pandas/_libs/tslibs/src/datetime/np_datetime.c"],
},
"_libs.tslibs.fields": {
"pyxfile": "_libs/tslibs/fields",
"depends": tseries_depends,
},
"_libs.tslibs.nattype": {"pyxfile": "_libs/tslibs/nattype"},
"_libs.tslibs.np_datetime": {
"pyxfile": "_libs/tslibs/np_datetime",
"depends": tseries_depends,
"sources": [
"pandas/_libs/tslibs/src/datetime/np_datetime.c",
"pandas/_libs/tslibs/src/datetime/np_datetime_strings.c",
],
},
"_libs.tslibs.offsets": {
"pyxfile": "_libs/tslibs/offsets",
"depends": tseries_depends,
},
"_libs.tslibs.parsing": {
"pyxfile": "_libs/tslibs/parsing",
"include": klib_include,
"depends": ["pandas/_libs/src/parser/tokenizer.h"],
"sources": ["pandas/_libs/src/parser/tokenizer.c"],
},
"_libs.tslibs.period": {
"pyxfile": "_libs/tslibs/period",
"depends": tseries_depends,
"sources": ["pandas/_libs/tslibs/src/datetime/np_datetime.c"],
},
"_libs.tslibs.strptime": {
"pyxfile": "_libs/tslibs/strptime",
"depends": tseries_depends,
},
"_libs.tslibs.timedeltas": {
"pyxfile": "_libs/tslibs/timedeltas",
"depends": tseries_depends,
},
"_libs.tslibs.timestamps": {
"pyxfile": "_libs/tslibs/timestamps",
"depends": tseries_depends,
},
"_libs.tslibs.timezones": {"pyxfile": "_libs/tslibs/timezones"},
"_libs.tslibs.tzconversion": {
"pyxfile": "_libs/tslibs/tzconversion",
"depends": tseries_depends,
},
"_libs.tslibs.vectorized": {"pyxfile": "_libs/tslibs/vectorized"},
"_libs.testing": {"pyxfile": "_libs/testing"},
"_libs.window.aggregations": {
"pyxfile": "_libs/window/aggregations",
"language": "c++",
"suffix": ".cpp",
"depends": ["pandas/_libs/src/skiplist.h"],
},
"_libs.window.indexers": {"pyxfile": "_libs/window/indexers"},
"_libs.writers": {"pyxfile": "_libs/writers"},
"io.sas._sas": {"pyxfile": "io/sas/sas"},
}
extensions = []
for name, data in ext_data.items():
source_suffix = suffix if suffix == ".pyx" else data.get("suffix", ".c")
sources = [srcpath(data["pyxfile"], suffix=source_suffix, subdir="")]
sources.extend(data.get("sources", []))
include = data.get("include", [])
include.append(numpy.get_include())
obj = Extension(
f"pandas.{name}",
sources=sources,
depends=data.get("depends", []),
include_dirs=include,
language=data.get("language", "c"),
define_macros=data.get("macros", macros),
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
)
extensions.append(obj)
# ----------------------------------------------------------------------
# ujson
if suffix == ".pyx":
# undo dumb setuptools bug clobbering .pyx sources back to .c
for ext in extensions:
if ext.sources[0].endswith((".c", ".cpp")):
root, _ = os.path.splitext(ext.sources[0])
ext.sources[0] = root + suffix
ujson_ext = Extension(
"pandas._libs.json",
depends=[
"pandas/_libs/src/ujson/lib/ultrajson.h",
"pandas/_libs/src/ujson/python/date_conversions.h",
],
sources=(
[
"pandas/_libs/src/ujson/python/ujson.c",
"pandas/_libs/src/ujson/python/objToJSON.c",
"pandas/_libs/src/ujson/python/date_conversions.c",
"pandas/_libs/src/ujson/python/JSONtoObj.c",
"pandas/_libs/src/ujson/lib/ultrajsonenc.c",
"pandas/_libs/src/ujson/lib/ultrajsondec.c",
]
+ [
"pandas/_libs/tslibs/src/datetime/np_datetime.c",
"pandas/_libs/tslibs/src/datetime/np_datetime_strings.c",
]
),
include_dirs=[
"pandas/_libs/src/ujson/python",
"pandas/_libs/src/ujson/lib",
"pandas/_libs/src/datetime",
numpy.get_include(),
],
extra_compile_args=(["-D_GNU_SOURCE"] + extra_compile_args),
extra_link_args=extra_link_args,
define_macros=macros,
)
extensions.append(ujson_ext)
# ----------------------------------------------------------------------
if __name__ == "__main__":
# Freeze to support parallel compilation when using spawn instead of fork
multiprocessing.freeze_support()
setup(
version=versioneer.get_version(),
ext_modules=maybe_cythonize(extensions, compiler_directives=directives),
cmdclass=cmdclass,
)
|
py | b410c5d403023ab7d9d49f26b3a9e1590b644652 | """
Module holds JMX handlers implementations
Copyright 2017 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import traceback
from distutils.version import LooseVersion
from bzt import TaurusInternalException, TaurusConfigError
from bzt.engine import Scenario
from bzt.jmx import JMX
from bzt.jmx.threadgroups import ThreadGroup, ConcurrencyThreadGroup, ThreadGroupHandler
from bzt.requests_model import RequestVisitor, has_variable_pattern, HierarchicRequestParser
from bzt.six import etree, iteritems, numeric_types
from bzt.utils import BetterDict, dehumanize_time, ensure_is_dict, guess_csv_dialect, load_class
class RequestCompiler(RequestVisitor):
def __init__(self, jmx_builder):
super(RequestCompiler, self).__init__()
self.jmx_builder = jmx_builder
def visit_hierarchichttprequest(self, request):
return self.jmx_builder.compile_request(request)
def visit_ifblock(self, block):
return self.jmx_builder.compile_if_block(block)
def visit_onceblock(self, block):
return self.jmx_builder.compile_once_block(block)
def visit_loopblock(self, block):
return self.jmx_builder.compile_loop_block(block)
def visit_whileblock(self, block):
return self.jmx_builder.compile_while_block(block)
def visit_foreachblock(self, block):
return self.jmx_builder.compile_foreach_block(block)
def visit_transactionblock(self, block):
return self.jmx_builder.compile_transaction_block(block)
def visit_includescenarioblock(self, block):
scenario_name = block.scenario_name
if scenario_name in self.path:
msg = "Mutual recursion detected in include-scenario blocks (scenario %s)"
raise TaurusConfigError(msg % scenario_name)
self.record_path(scenario_name)
return self.jmx_builder.compile_include_scenario_block(block)
def visit_actionblock(self, block):
return self.jmx_builder.compile_action_block(block)
def visit_setvariables(self, block):
return self.jmx_builder.compile_set_variables_block(block)
class LoadSettingsProcessor(object):
TG = ThreadGroup.__name__
CTG = ConcurrencyThreadGroup.__name__
def __init__(self, executor):
self.log = executor.log.getChild(self.__class__.__name__)
self.load = executor.get_specific_load()
self.tg = self._detect_thread_group(executor)
self.tg_handler = ThreadGroupHandler(self.log)
def _detect_thread_group(self, executor):
"""
Detect preferred thread group
:param executor:
:return:
"""
tg = self.TG
if not executor.settings.get('force-ctg', True):
return tg
msg = 'Thread group detection: %s, regular ThreadGroup will be used'
if not self.load.duration:
self.log.debug(msg, 'duration not found')
elif self.load.iterations:
self.log.debug(msg, 'iterations are found')
elif not executor.tool:
msg = 'You must set executor tool (%s) for choosing of ConcurrencyThreadGroup'
raise TaurusInternalException(msg % executor.tool_name)
elif not executor.tool.ctg_plugin_installed():
self.log.warning(msg % 'plugin for ConcurrentThreadGroup not found')
else:
tg = self.CTG
return tg
def modify(self, jmx):
if not (self.load.iterations or self.load.concurrency or self.load.duration):
self.log.debug('No iterations/concurrency/duration found, thread group modification is skipped')
return
# IMPORTANT: fix groups order as changing of element type changes order of getting of groups
groups = list(self.tg_handler.groups(jmx))
if self.load.concurrency and not isinstance(self.load.concurrency, numeric_types): # property found
for group in groups:
self.tg_handler.convert(group=group, target=self.tg, load=self.load, concurrency=self.load.concurrency)
else:
target_list = zip(groups, self._get_concurrencies(groups))
for group, concurrency in target_list:
self.tg_handler.convert(group=group, target=self.tg, load=self.load, concurrency=concurrency)
if self.load.throughput:
self._add_shaper(jmx)
if self.load.steps and self.tg == self.TG:
self.log.warning("Stepping ramp-up isn't supported for regular ThreadGroup")
def _get_concurrencies(self, groups):
"""
Collect concurrency values and
calculate target concurrency for every thread group
"""
concurrency_list = []
for group in groups:
concurrency_list.append(group.get_concurrency())
if concurrency_list and self.load.concurrency:
total_old_concurrency = sum(concurrency_list) # t_o_c != 0 because of logic of group.get_concurrency()
for idx, concurrency in enumerate(concurrency_list):
part_of_load = 1.0 * self.load.concurrency * concurrency / total_old_concurrency
if part_of_load < 1:
concurrency_list[idx] = 1
else:
concurrency_list[idx] = int(round(part_of_load))
total_new_concurrency = sum(concurrency_list)
leftover = self.load.concurrency - total_new_concurrency
if leftover < 0:
msg = "Had to add %s more threads to maintain thread group proportion"
self.log.warning(msg, -leftover)
elif leftover > 0:
msg = "%s threads left undistributed due to thread group proportion"
self.log.warning(msg, leftover)
return concurrency_list
def _add_shaper(self, jmx):
"""
Add shaper
:param jmx: JMX
:return:
"""
if not self.load.duration:
self.log.warning("You must set 'ramp-up' and/or 'hold-for' when using 'throughput' option")
return
etree_shaper = jmx.get_rps_shaper()
if self.load.ramp_up:
if isinstance(self.load.throughput, numeric_types) and self.load.duration:
start_rps = self.load.throughput / float(self.load.duration)
else:
start_rps = 1
jmx.add_rps_shaper_schedule(etree_shaper, start_rps, self.load.throughput, self.load.ramp_up)
if self.load.hold:
jmx.add_rps_shaper_schedule(etree_shaper, self.load.throughput, self.load.throughput, self.load.hold)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree_shaper)
jmx.append(JMeterScenarioBuilder.TEST_PLAN_SEL, etree.Element("hashTree"))
class ProtocolHandler(object):
def __init__(self, sys_props):
super(ProtocolHandler, self).__init__()
self.system_props = sys_props
def get_toplevel_elements(self, scenario):
return []
def get_sampler_pair(self, scenario, request):
return None, None
@staticmethod
def safe_time(any_time):
try:
smart_time = int(1000 * dehumanize_time(any_time))
except TaurusInternalException:
smart_time = any_time
return smart_time
class JMeterScenarioBuilder(JMX):
"""
Helper to build JMeter test plan from Scenario
:type protocol_handlers: dict[str,ProtocolHandler]
"""
def __init__(self, executor, original=None):
"""
:type executor: ScenarioExecutor
:type original: JMX
"""
super(JMeterScenarioBuilder, self).__init__(original)
self.executor = executor
self.scenario = executor.get_scenario()
self.engine = executor.engine
self.system_props = BetterDict()
self.request_compiler = None
self.default_protocol = self.executor.settings.get('default-protocol', 'http')
self.protocol_handlers = {}
for protocol, cls_name in iteritems(self.executor.settings.get("protocol-handlers")):
cls_obj = load_class(cls_name)
instance = cls_obj(self.system_props)
self.protocol_handlers[protocol] = instance
def __add_think_time(self, children, req):
think_time = req.priority_option('think-time')
if think_time is not None:
children.append(JMX._get_constant_timer(ProtocolHandler.safe_time(think_time)))
children.append(etree.Element("hashTree"))
def __add_extractors(self, children, req):
self.__add_boundary_ext(children, req)
self.__add_regexp_ext(children, req)
self.__add_json_ext(children, req)
self.__add_jquery_ext(children, req)
self.__add_xpath_ext(children, req)
def __add_boundary_ext(self, children, req):
extractors = req.config.get("extract-boundary")
for varname, cfg in iteritems(extractors):
subj = cfg.get('subject', 'body')
left = cfg.get('left', TaurusConfigError("Left boundary is missing for boundary extractor %s" % varname))
right = cfg.get('right', TaurusConfigError("Right boundary is missing for boundary extractor %s" % varname))
match_no = cfg.get('match-no', 1)
defvalue = cfg.get('default', 'NOT_FOUND')
scope = cfg.get("scope", None)
from_var = cfg.get("from-variable", None)
extractor = JMX._get_boundary_extractor(varname, subj, left, right, match_no, defvalue, scope, from_var)
children.append(extractor)
children.append(etree.Element("hashTree"))
def __add_regexp_ext(self, children, req):
extractors = req.config.get("extract-regexp")
for varname in extractors:
cfg = ensure_is_dict(extractors, varname, "regexp")
scope = cfg.get("scope", None)
from_var = cfg.get("from-variable", None)
extractor = JMX._get_extractor(varname, cfg.get('subject', 'body'), cfg['regexp'], cfg.get('template', 1),
cfg.get('match-no', 1), cfg.get('default', 'NOT_FOUND'), scope, from_var)
children.append(extractor)
children.append(etree.Element("hashTree"))
def __add_json_ext(self, children, req):
jextractors = req.config.get("extract-jsonpath")
for varname in jextractors:
cfg = ensure_is_dict(jextractors, varname, "jsonpath")
if LooseVersion(str(self.executor.settings.get("version"))) < LooseVersion("3.0"):
extractor = JMX._get_json_extractor(varname,
cfg["jsonpath"],
cfg.get("default", "NOT_FOUND"),
cfg.get("from-variable", None))
else:
extractor = JMX._get_internal_json_extractor(varname,
cfg["jsonpath"],
cfg.get("default", "NOT_FOUND"),
cfg.get("scope", None),
cfg.get("from-variable", None),
cfg.get("match-no", "0"),
cfg.get("concat", False))
children.append(extractor)
children.append(etree.Element("hashTree"))
def __add_jquery_ext(self, children, req):
css_jquery_extors = req.config.get("extract-css-jquery")
for varname in css_jquery_extors:
cfg = ensure_is_dict(css_jquery_extors, varname, "expression")
extractor = self._get_jquerycss_extractor(varname,
cfg['expression'],
cfg.get('attribute', ""),
cfg.get('match-no', 0),
cfg.get('default', 'NOT_FOUND'),
cfg.get("scope", None),
cfg.get("from-variable", None))
children.append(extractor)
children.append(etree.Element("hashTree"))
def __add_xpath_ext(self, children, req):
xpath_extractors = req.config.get("extract-xpath")
for varname in xpath_extractors:
cfg = ensure_is_dict(xpath_extractors, varname, "xpath")
children.append(JMX._get_xpath_extractor(varname,
cfg['xpath'],
cfg.get('default', 'NOT_FOUND'),
cfg.get('validate-xml', False),
cfg.get('ignore-whitespace', True),
cfg.get("match-no", "-1"),
cfg.get('use-namespaces', False),
cfg.get('use-tolerant-parser', False),
cfg.get("scope", None),
cfg.get("from-variable", None)))
children.append(etree.Element("hashTree"))
@staticmethod
def __add_assertions(children, req):
assertions = req.config.get("assert", [])
for idx, assertion in enumerate(assertions):
assertion = ensure_is_dict(assertions, idx, "contains")
if not isinstance(assertion['contains'], list):
assertion['contains'] = [assertion['contains']]
children.append(JMX._get_resp_assertion(assertion.get("subject", Scenario.FIELD_BODY),
assertion['contains'],
assertion.get('regexp', True),
assertion.get('not', False),
assertion.get('assume-success', False)))
children.append(etree.Element("hashTree"))
jpath_assertions = req.config.get("assert-jsonpath", [])
for idx, assertion in enumerate(jpath_assertions):
assertion = ensure_is_dict(jpath_assertions, idx, "jsonpath")
exc = TaurusConfigError('JSON Path not found in assertion: %s' % assertion)
component = JMX._get_json_path_assertion(assertion.get('jsonpath', exc),
assertion.get('expected-value', ''),
assertion.get('validate', False),
assertion.get('expect-null', False),
assertion.get('invert', False),
assertion.get('regexp', True))
children.append(component)
children.append(etree.Element("hashTree"))
xpath_assertions = req.config.get("assert-xpath", [])
for idx, assertion in enumerate(xpath_assertions):
assertion = ensure_is_dict(xpath_assertions, idx, "xpath")
exc = TaurusConfigError('XPath not found in assertion: %s' % assertion)
component = JMX._get_xpath_assertion(assertion.get('xpath', exc),
assertion.get('validate-xml', False),
assertion.get('ignore-whitespace', True),
assertion.get('use-tolerant-parser', False),
assertion.get('invert', False))
children.append(component)
children.append(etree.Element("hashTree"))
@staticmethod
def __add_jsr_elements(children, req):
"""
:type children: etree.Element
:type req: Request
"""
jsrs = req.config.get("jsr223", [])
if not isinstance(jsrs, list):
jsrs = [jsrs]
for idx, _ in enumerate(jsrs):
jsr = ensure_is_dict(jsrs, idx, sub_key='script-text')
lang = jsr.get("language", "groovy")
script_file = jsr.get("script-file", None)
script_text = jsr.get("script-text", None)
if not script_file and not script_text:
raise TaurusConfigError("jsr223 element must specify one of 'script-file' or 'script-text'")
parameters = jsr.get("parameters", "")
execute = jsr.get("execute", "after")
cache_key = str(jsr.get("compile-cache", True)).lower()
children.append(JMX._get_jsr223_element(lang, script_file, parameters, execute, script_text, cache_key))
children.append(etree.Element("hashTree"))
def __gen_requests(self, scenario):
requests = scenario.get_requests(parser=HierarchicRequestParser)
elements = []
for compiled in self.compile_requests(requests):
elements.extend(compiled)
return elements
def compile_scenario(self, scenario):
elements = []
for _, protocol in iteritems(self.protocol_handlers):
elements.extend(protocol.get_toplevel_elements(scenario))
elements.extend(self.__gen_authorization(scenario))
elements.extend(self.__gen_datasources(scenario))
elements.extend(self.__gen_requests(scenario))
return elements
def compile_request(self, request):
"""
:type request: HierarchicHTTPRequest
:return:
"""
sampler = children = None
protocol_name = request.priority_option('protocol', default=self.default_protocol)
if protocol_name in self.protocol_handlers:
protocol = self.protocol_handlers[protocol_name]
sampler, children = protocol.get_sampler_pair(self.scenario, request)
if sampler is None:
self.log.warning("Problematic request: %s", request.config)
raise TaurusInternalException("Unable to handle request, please review missing options")
self.__add_think_time(children, request)
self.__add_assertions(children, request)
timeout = ProtocolHandler.safe_time(request.priority_option('timeout'))
if timeout is not None:
children.append(JMX._get_dur_assertion(timeout))
children.append(etree.Element("hashTree"))
self.__add_extractors(children, request)
self.__add_jsr_elements(children, request)
return [sampler, children]
def compile_if_block(self, block):
elements = []
# TODO: pass jmeter IfController options
if_controller = JMX._get_if_controller(block.condition)
then_children = etree.Element("hashTree")
for compiled in self.compile_requests(block.then_clause):
for element in compiled:
then_children.append(element)
elements.extend([if_controller, then_children])
if block.else_clause:
inverted_condition = "!(" + block.condition + ")"
else_controller = JMX._get_if_controller(inverted_condition)
else_children = etree.Element("hashTree")
for compiled in self.compile_requests(block.else_clause):
for element in compiled:
else_children.append(element)
elements.extend([else_controller, else_children])
return elements
def compile_once_block(self, block):
elements = []
once_controller = JMX._get_once_controller()
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([once_controller, children])
return elements
def compile_loop_block(self, block):
elements = []
loop_controller = JMX._get_loop_controller(block.loops)
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([loop_controller, children])
return elements
def compile_while_block(self, block):
elements = []
controller = JMX._get_while_controller(block.condition)
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([controller, children])
return elements
def compile_foreach_block(self, block):
"""
:type block: ForEachBlock
"""
elements = []
controller = JMX._get_foreach_controller(block.input_var, block.loop_var)
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([controller, children])
return elements
def compile_transaction_block(self, block):
elements = []
controller = JMX._get_transaction_controller(block.name,
block.priority_option('force-parent-sample', False),
block.include_timers)
children = etree.Element("hashTree")
for compiled in self.compile_requests(block.requests):
for element in compiled:
children.append(element)
elements.extend([controller, children])
return elements
def compile_include_scenario_block(self, block):
elements = []
controller = JMX._get_simple_controller(block.scenario_name)
children = etree.Element("hashTree")
scenario = self.executor.get_scenario(name=block.scenario_name)
for element in self.compile_scenario(scenario):
children.append(element)
elements.extend([controller, children])
return elements
def compile_action_block(self, block):
"""
:type block: ActionBlock
:return:
"""
actions = {
'stop': 0,
'pause': 1,
'stop-now': 2,
'continue': 3,
}
targets = {'current-thread': 0, 'all-threads': 2}
action = actions[block.action]
target = targets[block.target]
duration = 0
if block.duration is not None:
duration = int(block.duration * 1000)
test_action = JMX._get_action_block(action, target, duration)
children = etree.Element("hashTree")
self.__add_jsr_elements(children, block)
return [test_action, children]
@staticmethod
def compile_set_variables_block(block):
set_var_action = JMX.get_set_var_action(block.mapping)
hashtree = etree.Element("hashTree")
return [set_var_action, hashtree]
def compile_requests(self, requests):
if self.request_compiler is None:
self.request_compiler = RequestCompiler(self)
compiled = []
for request in requests:
compiled.append(self.request_compiler.visit(request))
self.request_compiler.clear_path_cache()
return compiled
def __generate(self):
"""
Generate the test plan
"""
thread_group = JMX.get_thread_group(testname=self.executor.label)
thread_group_ht = etree.Element("hashTree", type="tg")
# NOTE: set realistic dns-cache and JVM prop by default?
self.request_compiler = RequestCompiler(self)
for element in self.compile_scenario(self.scenario):
thread_group_ht.append(element)
results_tree = self._get_results_tree()
results_tree_ht = etree.Element("hashTree")
self.append(self.TEST_PLAN_SEL, thread_group)
self.append(self.TEST_PLAN_SEL, thread_group_ht)
self.append(self.TEST_PLAN_SEL, results_tree)
self.append(self.TEST_PLAN_SEL, results_tree_ht)
def save(self, filename):
"""
Generate test plan and save
:type filename: str
"""
# NOTE: bad design, as repetitive save will duplicate stuff
self.__generate()
super(JMeterScenarioBuilder, self).save(filename)
@staticmethod
def __gen_authorization(scenario):
"""
Generates HTTP Authorization Manager
"""
elements = []
authorizations = scenario.get("authorization")
if authorizations:
clear_flag = False
if isinstance(authorizations, dict):
if "clear" in authorizations or "list" in authorizations: # full form
clear_flag = authorizations.get("clear", False)
authorizations = authorizations.get("list", [])
else:
authorizations = [authorizations] # short form
if not isinstance(authorizations, list):
raise TaurusConfigError("Wrong authorization format: %s" % authorizations)
auth_manager = JMX.get_auth_manager(authorizations, clear_flag)
elements.append(auth_manager)
elements.append(etree.Element("hashTree"))
return elements
def __gen_datasources(self, scenario):
sources = scenario.get("data-sources")
if not sources:
return []
if not isinstance(sources, list):
raise TaurusConfigError("data-sources '%s' is not a list" % sources)
elements = []
for idx, source in enumerate(sources):
source = ensure_is_dict(sources, idx, "path")
source_path = source["path"]
delimiter = source.get("delimiter")
if has_variable_pattern(source_path):
msg = "Path to CSV contains JMeter variable/function, can't check for file existence: %s"
self.log.warning(msg, source_path)
if not delimiter:
delimiter = ','
self.log.warning("Can't detect CSV dialect, default delimiter will be '%s'", delimiter)
else:
source_path = self.executor.engine.find_file(source_path)
if not os.path.isfile(source_path):
raise TaurusConfigError("data-sources path not found: %s" % source_path)
if not delimiter:
delimiter = self.__guess_delimiter(source_path)
config = JMX._get_csv_config(source_path, delimiter, source.get("quoted", False), source.get("loop", True),
source.get("variable-names", ""))
elements.append(config)
elements.append(etree.Element("hashTree"))
return elements
def __guess_delimiter(self, path):
with open(path) as fhd:
header = fhd.read(4096) # 4KB is enough for header
try:
delimiter = guess_csv_dialect(header).delimiter
except BaseException as exc:
self.log.debug(traceback.format_exc())
self.log.warning('CSV dialect detection failed (%s), default delimiter selected (",")', exc)
delimiter = "," # default value
return delimiter
|
py | b410c66174b203fe72fe31da6125dab796f2197c | import os
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
def get_mongo_connect_string():
return os.environ.get("MONGO_CONNECT_STRING", "")
|
py | b410c7157832e7b5ae645df95ccdd922ffad9267 | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.banned_ip_address_details import BannedIpAddressDetails
from cryptoapis.model.invalid_api_key import InvalidApiKey
from cryptoapis.model.missing_api_key import MissingApiKey
globals()['BannedIpAddressDetails'] = BannedIpAddressDetails
globals()['InvalidApiKey'] = InvalidApiKey
globals()['MissingApiKey'] = MissingApiKey
class GetBlockDetailsByBlockHeightE401(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'details': ([BannedIpAddressDetails],), # noqa: E501
'code': (str,), # noqa: E501
'message': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'details': 'details', # noqa: E501
'code': 'code', # noqa: E501
'message': 'message', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""GetBlockDetailsByBlockHeightE401 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
details ([BannedIpAddressDetails]): [optional] # noqa: E501
code (str): Specifies an error code, e.g. error 404.. [optional] # noqa: E501
message (str): Specifies the message of the error, i.e. why the error was returned, e.g. error 404 stands for “not found”.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""GetBlockDetailsByBlockHeightE401 - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
details ([BannedIpAddressDetails]): [optional] # noqa: E501
code (str): Specifies an error code, e.g. error 404.. [optional] # noqa: E501
message (str): Specifies the message of the error, i.e. why the error was returned, e.g. error 404 stands for “not found”.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
],
'oneOf': [
InvalidApiKey,
MissingApiKey,
],
}
|
py | b410c7a5e93120444d84a4085996ef76ff0970a9 | import asyncio
import logging
import os
import sys
from aiorm import orm
from pyramid_aiorm import includeme
from creds import models
log = logging.getLogger(__name__)
@asyncio.coroutine
def get_group(transaction, group_name):
group = yield from models.Group.by_name(transaction, group_name)
if not group:
group = models.Group(name=group_name)
yield from orm.Insert(group).run(transaction)
return group
@asyncio.coroutine
def setup(config):
log.info('Setup application')
log.info('Connecting to the database')
yield from includeme(config)
# with (yield from orm.transaction('creds')) as trans:
trans = orm.Transaction('creds')
try:
log.info('Creating the database schema')
yield from orm.CreateSchema('creds').run(trans)
yield from trans.commit()
except Exception:
log.exception('Unexpected exception')
yield from trans.rollback()
|
py | b410c7f1a4ff62070b1c935562f988875d565434 | #########################################################################
##
## Structure of network.
##
#########################################################################
import torch
import torch.nn as nn
from util_hourglass import *
####################################################################
##
## lane_detection_network
##
####################################################################
class lane_detection_network(nn.Module):
def __init__(self):
super(lane_detection_network, self).__init__()
self.resizing = resize_layer(3, 128)
#feature extraction
self.layer1 = hourglass_block(128, 128)
self.layer2 = hourglass_block(128, 128)
self.layer3 = hourglass_block(128, 128)
self.layer4 = hourglass_block(128, 128)
def forward(self, inputs):
#feature extraction
out = self.resizing(inputs)
result1, out, feature1 = self.layer1(out)
result2, out, feature2 = self.layer2(out)
result3, out, feature3 = self.layer3(out)
result4, out, feature4 = self.layer4(out)
return [result1, result2, result3, result4], [feature1, feature2, feature3, feature4]
#return [result1], [feature1]
|
py | b410c886aaee153b1e15960a5f969038f46f2497 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-01-15 13:45
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('studio', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='task',
name='slug',
field=models.UUIDField(blank=True, default=uuid.uuid4, editable=False),
),
]
|
py | b410c894b1094a9672cab3207fa731455f3eb691 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-09 12:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('personal', '0004_tipopersonal_slug'),
]
operations = [
migrations.AddField(
model_name='personal',
name='rne',
field=models.CharField(blank=True, max_length=10, null=True),
),
migrations.AlterField(
model_name='personal',
name='especialidad',
field=models.CharField(blank=True, max_length=300, null=True),
),
migrations.AlterField(
model_name='personal',
name='inforhus',
field=models.CharField(blank=True, max_length=20, null=True),
),
]
|
py | b410c8cc6dc3e90de869e8f693ece6a85c1fa5b4 | import logging
import torch
import torch.nn as nn
from torch.nn.modules.conv import _ConvNd
from count_hooks import *
import os
import sys
import numpy as np
sys.path.append('.')
import darknet
import argparse
import shufflenetv2
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
register_hooks = {
nn.Conv1d: count_convNd,
nn.Conv2d: count_convNd,
nn.Conv3d: count_convNd,
nn.ConvTranspose1d: count_convNd,
nn.ConvTranspose2d: count_convNd,
nn.ConvTranspose3d: count_convNd,
nn.BatchNorm1d: count_bn,
nn.BatchNorm2d: count_bn,
nn.BatchNorm3d: count_bn,
nn.ReLU: zero_ops,
nn.ReLU6: zero_ops,
nn.LeakyReLU: count_relu,
nn.MaxPool1d: zero_ops,
nn.MaxPool2d: zero_ops,
nn.MaxPool3d: zero_ops,
nn.AdaptiveMaxPool1d: zero_ops,
nn.AdaptiveMaxPool2d: zero_ops,
nn.AdaptiveMaxPool3d: zero_ops,
nn.AvgPool1d: count_avgpool,
nn.AvgPool2d: count_avgpool,
nn.AvgPool3d: count_avgpool,
nn.AdaptiveAvgPool1d: count_adap_avgpool,
nn.AdaptiveAvgPool2d: count_adap_avgpool,
nn.AdaptiveAvgPool3d: count_adap_avgpool,
nn.Linear: count_linear,
nn.Dropout: zero_ops,
nn.Upsample: count_upsample,
nn.UpsamplingBilinear2d: count_upsample,
nn.UpsamplingNearest2d: count_upsample
}
def profile(model, inputs, custom_ops=None, verbose=True):
handler_collection = []
if custom_ops is None:
custom_ops = {}
def add_hooks(m):
if len(list(m.children())) > 0:
return
if hasattr(m, "total_ops") or hasattr(m, "total_params"):
logger.warning("Either .total_ops or .total_params is already defined in %s."
"Be careful, it might change your code's behavior." % str(m))
m.register_buffer('total_ops', torch.zeros(1))
m.register_buffer('total_params', torch.zeros(1))
for p in m.parameters():
m.total_params += torch.Tensor([p.numel()])
m_type = type(m)
fn = None
if m_type in custom_ops: # if defined both op maps, use custom_ops to overwrite.
fn = custom_ops[m_type]
elif m_type in register_hooks:
fn = register_hooks[m_type]
if fn is None:
if verbose:
print("THOP has not implemented counting method for ", m)
else:
if verbose:
print("Register FLOP counter for module %s" % str(m))
handler = m.register_forward_hook(fn)
handler_collection.append(handler)
training = model.training
model.eval()
model.apply(add_hooks)
with torch.no_grad():
model(*inputs)
total_ops = 0
total_params = 0
for m in model.modules():
if len(list(m.children())) > 0: # skip for non-leaf module
continue
total_ops += m.total_ops
total_params += m.total_params
total_ops = total_ops.item()
total_params = total_params.item()
# reset model to original status
model.train(training)
for handler in handler_collection:
handler.remove()
# remove temporal buffers
for n, m in model.named_modules():
if len(list(m.children())) > 0:
continue
if "total_ops" in m._buffers:
m._buffers.pop("total_ops")
if "total_params" in m._buffers:
m._buffers.pop("total_params")
return total_ops, total_params
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--in-size', type=str, default='416,416', help='network input size')
parser.add_argument('--model', type=str, default='', help='model file')
parser.add_argument('--dataset', type=str, default='', help='dataset path')
parser.add_argument('--num-classes', type=int, default=3, help='number of classes')
parser.add_argument('--pruned-model', '-pm', action='store_true')
parser.add_argument('--backbone', type=str, default='darknet53', help='backbone architecture[darknet53(default),shufflenetv2]')
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
in_size = [int(insz) for insz in args.in_size.split(',')]
if not args.pruned_model:
anchors = np.loadtxt(os.path.join(args.dataset, 'anchors.txt'))
if args.backbone == 'darknet53':
model = darknet.DarkNet(anchors, in_size, num_classes=args.num_classes).to(device)
elif args.backbone == 'shufflenetv2':
model = shufflenetv2.ShuffleNetV2(anchors, in_size=in_size, num_classes=args.num_classes).to(device)
else:
print('unknown backbone architecture!')
sys.exit(0)
model.load_state_dict(torch.load(args.model, map_location=device))
else:
model = torch.load(args.model, map_location=device)
input = torch.randn(1, 3, in_size[0], in_size[1]).to(device)
flops, params = profile(model, inputs=(input, ))
print(f'flops={flops}, params={params}') |
py | b410c8e9126a7976164f26b6dd67c4926584f529 | #%%
import matplotlib.pyplot as plt
import PIL
import numpy as np
import os
import tensorflow as tf
import keras
from keras import layers
from keras.models import Model,Sequential
import pathlib
from tensorflow._api.v2 import data
from tensorflow.keras import preprocessing
from tensorflow.keras.layers import experimental
from tensorflow.python.keras.layers.preprocessing import normalization
from tensorflow.python.keras.layers.preprocessing.image_preprocessing import Rescaling
#%%
dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
data_dir = keras.utils.get_file('flower_photos',origin=dataset_url,untar=True)
data_dir=pathlib.Path(data_dir)
#%%
image_count=len(list(data_dir.glob('*/*.jpg')))
print(image_count)
#%%
roses =list(data_dir.glob('roses/*'))
PIL.Image.open(str(roses[0]))
# %%
batch_size=32
img_ht = 180
img_bt = 180
# %%
train_ds = keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_ht,img_bt),
batch_size=batch_size
)
val_ds = keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_ht,img_bt),
batch_size=batch_size
)
# %%
class_names = train_ds.class_names
print(class_names)
# %%
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
# %%
for image_batch, labels_batch in train_ds:
print(image_batch.shape)
print(labels.shape)
break;
# %%
Autotune = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size= Autotune)
val_ds = val_ds.cache().prefetch(buffer_size=Autotune)
# %%
normalization_layer = layers.experimental.preprocessing.Rescaling(1./255)
# %%
normal_ds = train_ds.map(lambda x,y: (normalization_layer(x),y))
image_batch,labels_batch = next(iter(normal_ds))
first_image=image_batch[0]
print(np.min(first_image), np.max(first_image))
# %%
num_classes = len(class_names)
model=Sequential([
layers.experimental.preprocessing.Rescaling(1./255,input_shape=(img_ht,img_bt,3)),
layers.Conv2D(16,3,padding='same',activation='relu'),
layers.MaxPool2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
model.compile(
optimizer='adam',
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy']
)
# %%
model.summary()
# %%
epochs=10
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)
# %%
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# %%
data_augmentation = keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip("horizontal",
input_shape=(img_ht,
img_bt,
3)),
layers.experimental.preprocessing.RandomRotation(0.1),
layers.experimental.preprocessing.RandomZoom(0.1),
]
)
# %%
model = Sequential([
data_augmentation,
layers.experimental.preprocessing.Rescaling(1./255),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Dropout(0.2),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
# %%
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# %%
model.summary()
# %%
epochs = 15
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)
# %%
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# %%
|
py | b410c9445f5f72d6c341e140395581d5481c2469 | #-*- coding:utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import warnings
warnings.filterwarnings('ignore')
import torch
import pickle
import argparse
from utils.timer import Timer
import torch.backends.cudnn as cudnn
from layers.functions import Detect, PriorBox
from data import BaseTransform
from configs.CC import Config
from peleenet import build_net
from tqdm import tqdm
from utils.core import *
parser = argparse.ArgumentParser(description='Pelee Evaluation')
parser.add_argument(
'-c', '--config', default='configs/Pelee_VOC.py', type=str)
parser.add_argument('-d', '--dataset', default='VOC',
help='VOC or COCO version')
parser.add_argument('-m', '--trained_model', default=None,
type=str, help='Trained state_dict file path to open')
parser.add_argument('--test', action='store_true',
help='to submit a test file')
args = parser.parse_args()
print_info('----------------------------------------------------------------------\n'
'| Pelee Evaluation Program |\n'
'----------------------------------------------------------------------', ['yellow', 'bold'])
global cfg
cfg = Config.fromfile(args.config)
if not os.path.exists(cfg.test_cfg.save_folder):
os.mkdir(cfg.test_cfg.save_folder)
anchor_config = anchors(cfg.model)
print_info('The Anchor info: \n{}'.format(anchor_config))
priorbox = PriorBox(anchor_config)
with torch.no_grad():
priors = priorbox.forward()
if cfg.test_cfg.cuda:
priors = priors.cuda()
num_classes = cfg.model.num_classes
def test_net(save_folder, net, detector, cuda, testset, transform, max_per_image=300, thresh=0.005):
if not os.path.exists(save_folder):
os.mkdir(save_folder)
num_images = len(testset)
print_info('=> Total {} images to test.'.format(
num_images), ['yellow', 'bold'])
all_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]
_t = {'im_detect': Timer(), 'misc': Timer()}
det_file = os.path.join(save_folder, 'detections.pkl')
tot_detect_time, tot_nms_time = 0, 0
print_info('Begin to evaluate', ['yellow', 'bold'])
for i in tqdm(range(num_images)):
img = testset.pull_image(i)
# step1: CNN detection
_t['im_detect'].tic()
boxes, scores = image_forward(
img, net, cuda, priors, detector, transform)
detect_time = _t['im_detect'].toc()
# step2: Post-process: NMS
_t['misc'].tic()
nms_process(num_classes, i, scores, boxes, cfg,
thresh, all_boxes, max_per_image)
nms_time = _t['misc'].toc()
tot_detect_time += detect_time if i > 0 else 0
tot_nms_time += nms_time if i > 0 else 0
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print_info('===> Evaluating detections', ['yellow', 'bold'])
testset.evaluate_detections(all_boxes, save_folder)
print_info('Detect time per image: {:.3f}s'.format(
tot_detect_time / (num_images - 1)))
print_info('Nms time per image: {:.3f}s'.format(
tot_nms_time / (num_images - 1)))
print_info('Total time per image: {:.3f}s'.format(
(tot_detect_time + tot_nms_time) / (num_images - 1)))
print_info('FPS: {:.3f} fps'.format(
(num_images - 1) / (tot_detect_time + tot_nms_time)))
if __name__ == '__main__':
net = build_net('test', cfg.model.input_size, cfg.model)
init_net(net, cfg, args.trained_model)
print_info('===> Finished constructing and loading model',
['yellow', 'bold'])
net.eval()
_set = 'eval_sets' if not args.test else 'test_sets'
testset = get_dataloader(cfg, args.dataset, _set)
if cfg.test_cfg.cuda:
net = net.cuda()
cudnn.benckmark = True
else:
net = net.cpu()
detector = Detect(num_classes, cfg.loss.bkg_label, anchor_config)
save_folder = os.path.join(cfg.test_cfg.save_folder, args.dataset)
_preprocess = BaseTransform(
cfg.model.input_size, cfg.model.rgb_means, (2, 0, 1))
test_net(save_folder,
net,
detector,
cfg.test_cfg.cuda,
testset,
transform=_preprocess,
max_per_image=cfg.test_cfg.topk,
thresh=cfg.test_cfg.score_threshold)
|
py | b410ca748256ee210f3dae6b8531fb2d950928c8 | # coding: utf-8
import itertools
from operator import itemgetter
import re
import sqlalchemy as sa
from sqlalchemy import Column
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import join
from sqlalchemy import MetaData
from sqlalchemy import PrimaryKeyConstraint
from sqlalchemy import Sequence
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import UniqueConstraint
from sqlalchemy.dialects.postgresql import ARRAY
from sqlalchemy.dialects.postgresql import base as postgresql
from sqlalchemy.dialects.postgresql import ExcludeConstraint
from sqlalchemy.dialects.postgresql import INTEGER
from sqlalchemy.dialects.postgresql import INTERVAL
from sqlalchemy.dialects.postgresql import TSRANGE
from sqlalchemy.engine import reflection
from sqlalchemy.sql.schema import CheckConstraint
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import mock
from sqlalchemy.testing.assertions import assert_raises
from sqlalchemy.testing.assertions import AssertsExecutionResults
from sqlalchemy.testing.assertions import eq_
class ForeignTableReflectionTest(fixtures.TablesTest, AssertsExecutionResults):
"""Test reflection on foreign tables"""
__requires__ = ("postgresql_test_dblink",)
__only_on__ = "postgresql >= 9.3"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
from sqlalchemy.testing import config
dblink = config.file_config.get(
"sqla_testing", "postgres_test_db_link"
)
Table(
"testtable",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(30)),
)
for ddl in [
"CREATE SERVER test_server FOREIGN DATA WRAPPER postgres_fdw "
"OPTIONS (dbname 'test', host '%s')" % dblink,
"CREATE USER MAPPING FOR public \
SERVER test_server options (user 'scott', password 'tiger')",
"CREATE FOREIGN TABLE test_foreigntable ( "
" id INT, "
" data VARCHAR(30) "
") SERVER test_server OPTIONS (table_name 'testtable')",
]:
sa.event.listen(metadata, "after_create", sa.DDL(ddl))
for ddl in [
"DROP FOREIGN TABLE test_foreigntable",
"DROP USER MAPPING FOR public SERVER test_server",
"DROP SERVER test_server",
]:
sa.event.listen(metadata, "before_drop", sa.DDL(ddl))
def test_foreign_table_is_reflected(self):
metadata = MetaData(testing.db)
table = Table("test_foreigntable", metadata, autoload=True)
eq_(
set(table.columns.keys()),
set(["id", "data"]),
"Columns of reflected foreign table didn't equal expected columns",
)
def test_get_foreign_table_names(self):
inspector = inspect(testing.db)
with testing.db.connect():
ft_names = inspector.get_foreign_table_names()
eq_(ft_names, ["test_foreigntable"])
def test_get_table_names_no_foreign(self):
inspector = inspect(testing.db)
with testing.db.connect():
names = inspector.get_table_names()
eq_(names, ["testtable"])
class PartitionedReflectionTest(fixtures.TablesTest, AssertsExecutionResults):
# partitioned table reflection, issue #4237
__only_on__ = "postgresql >= 10"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
# the actual function isn't reflected yet
dv = Table(
"data_values",
metadata,
Column("modulus", Integer, nullable=False),
Column("data", String(30)),
Column("q", Integer),
postgresql_partition_by="range(modulus)",
)
# looks like this is reflected prior to #4237
sa.event.listen(
dv,
"after_create",
sa.DDL(
"CREATE TABLE data_values_4_10 PARTITION OF data_values "
"FOR VALUES FROM (4) TO (10)"
),
)
if testing.against("postgresql >= 11"):
Index("my_index", dv.c.q)
def test_get_tablenames(self):
assert {"data_values", "data_values_4_10"}.issubset(
inspect(testing.db).get_table_names()
)
def test_reflect_cols(self):
cols = inspect(testing.db).get_columns("data_values")
eq_([c["name"] for c in cols], ["modulus", "data", "q"])
def test_reflect_cols_from_partition(self):
cols = inspect(testing.db).get_columns("data_values_4_10")
eq_([c["name"] for c in cols], ["modulus", "data", "q"])
@testing.only_on("postgresql >= 11")
def test_reflect_index(self):
idx = inspect(testing.db).get_indexes("data_values")
eq_(
idx, [{"column_names": ["q"], "name": "my_index", "unique": False}]
)
@testing.only_on("postgresql >= 11")
def test_reflect_index_from_partition(self):
idx = inspect(testing.db).get_indexes("data_values_4_10")
# note the name appears to be generated by PG, currently
# 'data_values_4_10_q_idx'
eq_(idx, [{"column_names": ["q"], "name": mock.ANY, "unique": False}])
class MaterializedViewReflectionTest(
fixtures.TablesTest, AssertsExecutionResults
):
"""Test reflection on materialized views"""
__only_on__ = "postgresql >= 9.3"
__backend__ = True
@classmethod
def define_tables(cls, metadata):
testtable = Table(
"testtable",
metadata,
Column("id", Integer, primary_key=True),
Column("data", String(30)),
)
# insert data before we create the view
@sa.event.listens_for(testtable, "after_create")
def insert_data(target, connection, **kw):
connection.execute(target.insert(), {"id": 89, "data": "d1"})
materialized_view = sa.DDL(
"CREATE MATERIALIZED VIEW test_mview AS " "SELECT * FROM testtable"
)
plain_view = sa.DDL(
"CREATE VIEW test_regview AS " "SELECT * FROM testtable"
)
sa.event.listen(testtable, "after_create", plain_view)
sa.event.listen(testtable, "after_create", materialized_view)
sa.event.listen(
testtable,
"before_drop",
sa.DDL("DROP MATERIALIZED VIEW test_mview"),
)
sa.event.listen(
testtable, "before_drop", sa.DDL("DROP VIEW test_regview")
)
def test_mview_is_reflected(self):
metadata = MetaData(testing.db)
table = Table("test_mview", metadata, autoload=True)
eq_(
set(table.columns.keys()),
set(["id", "data"]),
"Columns of reflected mview didn't equal expected columns",
)
def test_mview_select(self):
metadata = MetaData(testing.db)
table = Table("test_mview", metadata, autoload=True)
eq_(table.select().execute().fetchall(), [(89, "d1")])
def test_get_view_names(self):
insp = inspect(testing.db)
eq_(set(insp.get_view_names()), set(["test_regview", "test_mview"]))
def test_get_view_names_plain(self):
insp = inspect(testing.db)
eq_(
set(insp.get_view_names(include=("plain",))), set(["test_regview"])
)
def test_get_view_names_plain_string(self):
insp = inspect(testing.db)
eq_(set(insp.get_view_names(include="plain")), set(["test_regview"]))
def test_get_view_names_materialized(self):
insp = inspect(testing.db)
eq_(
set(insp.get_view_names(include=("materialized",))),
set(["test_mview"]),
)
def test_get_view_names_reflection_cache_ok(self):
insp = inspect(testing.db)
eq_(
set(insp.get_view_names(include=("plain",))), set(["test_regview"])
)
eq_(
set(insp.get_view_names(include=("materialized",))),
set(["test_mview"]),
)
eq_(set(insp.get_view_names()), set(["test_regview", "test_mview"]))
def test_get_view_names_empty(self):
insp = inspect(testing.db)
assert_raises(ValueError, insp.get_view_names, include=())
def test_get_view_definition(self):
insp = inspect(testing.db)
eq_(
re.sub(
r"[\n\t ]+",
" ",
insp.get_view_definition("test_mview").strip(),
),
"SELECT testtable.id, testtable.data FROM testtable;",
)
class DomainReflectionTest(fixtures.TestBase, AssertsExecutionResults):
"""Test PostgreSQL domains"""
__only_on__ = "postgresql > 8.3"
__backend__ = True
@classmethod
def setup_class(cls):
con = testing.db.connect()
for ddl in [
'CREATE SCHEMA "SomeSchema"',
"CREATE DOMAIN testdomain INTEGER NOT NULL DEFAULT 42",
"CREATE DOMAIN test_schema.testdomain INTEGER DEFAULT 0",
"CREATE TYPE testtype AS ENUM ('test')",
"CREATE DOMAIN enumdomain AS testtype",
"CREATE DOMAIN arraydomain AS INTEGER[]",
'CREATE DOMAIN "SomeSchema"."Quoted.Domain" INTEGER DEFAULT 0',
]:
try:
con.execute(ddl)
except exc.DBAPIError as e:
if "already exists" not in str(e):
raise e
con.execute(
"CREATE TABLE testtable (question integer, answer " "testdomain)"
)
con.execute(
"CREATE TABLE test_schema.testtable(question "
"integer, answer test_schema.testdomain, anything "
"integer)"
)
con.execute(
"CREATE TABLE crosschema (question integer, answer "
"test_schema.testdomain)"
)
con.execute("CREATE TABLE enum_test (id integer, data enumdomain)")
con.execute("CREATE TABLE array_test (id integer, data arraydomain)")
con.execute(
"CREATE TABLE quote_test "
'(id integer, data "SomeSchema"."Quoted.Domain")'
)
@classmethod
def teardown_class(cls):
con = testing.db.connect()
con.execute("DROP TABLE testtable")
con.execute("DROP TABLE test_schema.testtable")
con.execute("DROP TABLE crosschema")
con.execute("DROP TABLE quote_test")
con.execute("DROP DOMAIN testdomain")
con.execute("DROP DOMAIN test_schema.testdomain")
con.execute("DROP TABLE enum_test")
con.execute("DROP DOMAIN enumdomain")
con.execute("DROP TYPE testtype")
con.execute("DROP TABLE array_test")
con.execute("DROP DOMAIN arraydomain")
con.execute('DROP DOMAIN "SomeSchema"."Quoted.Domain"')
con.execute('DROP SCHEMA "SomeSchema"')
def test_table_is_reflected(self):
metadata = MetaData(testing.db)
table = Table("testtable", metadata, autoload=True)
eq_(
set(table.columns.keys()),
set(["question", "answer"]),
"Columns of reflected table didn't equal expected columns",
)
assert isinstance(table.c.answer.type, Integer)
def test_domain_is_reflected(self):
metadata = MetaData(testing.db)
table = Table("testtable", metadata, autoload=True)
eq_(
str(table.columns.answer.server_default.arg),
"42",
"Reflected default value didn't equal expected value",
)
assert (
not table.columns.answer.nullable
), "Expected reflected column to not be nullable."
def test_enum_domain_is_reflected(self):
metadata = MetaData(testing.db)
table = Table("enum_test", metadata, autoload=True)
eq_(table.c.data.type.enums, ["test"])
def test_array_domain_is_reflected(self):
metadata = MetaData(testing.db)
table = Table("array_test", metadata, autoload=True)
eq_(table.c.data.type.__class__, ARRAY)
eq_(table.c.data.type.item_type.__class__, INTEGER)
def test_quoted_remote_schema_domain_is_reflected(self):
metadata = MetaData(testing.db)
table = Table("quote_test", metadata, autoload=True)
eq_(table.c.data.type.__class__, INTEGER)
def test_table_is_reflected_test_schema(self):
metadata = MetaData(testing.db)
table = Table(
"testtable", metadata, autoload=True, schema="test_schema"
)
eq_(
set(table.columns.keys()),
set(["question", "answer", "anything"]),
"Columns of reflected table didn't equal expected columns",
)
assert isinstance(table.c.anything.type, Integer)
def test_schema_domain_is_reflected(self):
metadata = MetaData(testing.db)
table = Table(
"testtable", metadata, autoload=True, schema="test_schema"
)
eq_(
str(table.columns.answer.server_default.arg),
"0",
"Reflected default value didn't equal expected value",
)
assert (
table.columns.answer.nullable
), "Expected reflected column to be nullable."
def test_crosschema_domain_is_reflected(self):
metadata = MetaData(testing.db)
table = Table("crosschema", metadata, autoload=True)
eq_(
str(table.columns.answer.server_default.arg),
"0",
"Reflected default value didn't equal expected value",
)
assert (
table.columns.answer.nullable
), "Expected reflected column to be nullable."
def test_unknown_types(self):
from sqlalchemy.databases import postgresql
ischema_names = postgresql.PGDialect.ischema_names
postgresql.PGDialect.ischema_names = {}
try:
m2 = MetaData(testing.db)
assert_raises(exc.SAWarning, Table, "testtable", m2, autoload=True)
@testing.emits_warning("Did not recognize type")
def warns():
m3 = MetaData(testing.db)
t3 = Table("testtable", m3, autoload=True)
assert t3.c.answer.type.__class__ == sa.types.NullType
finally:
postgresql.PGDialect.ischema_names = ischema_names
class ReflectionTest(fixtures.TestBase):
__only_on__ = "postgresql"
__backend__ = True
@testing.fails_if(
"postgresql < 8.4", "Better int2vector functions not available"
)
@testing.provide_metadata
def test_reflected_primary_key_order(self):
meta1 = self.metadata
subject = Table(
"subject",
meta1,
Column("p1", Integer, primary_key=True),
Column("p2", Integer, primary_key=True),
PrimaryKeyConstraint("p2", "p1"),
)
meta1.create_all()
meta2 = MetaData(testing.db)
subject = Table("subject", meta2, autoload=True)
eq_(subject.primary_key.columns.keys(), ["p2", "p1"])
@testing.provide_metadata
def test_pg_weirdchar_reflection(self):
meta1 = self.metadata
subject = Table(
"subject", meta1, Column("id$", Integer, primary_key=True)
)
referer = Table(
"referer",
meta1,
Column("id", Integer, primary_key=True),
Column("ref", Integer, ForeignKey("subject.id$")),
)
meta1.create_all()
meta2 = MetaData(testing.db)
subject = Table("subject", meta2, autoload=True)
referer = Table("referer", meta2, autoload=True)
self.assert_(
(subject.c["id$"] == referer.c.ref).compare(
subject.join(referer).onclause
)
)
@testing.provide_metadata
def test_reflect_default_over_128_chars(self):
Table(
"t",
self.metadata,
Column("x", String(200), server_default="abcd" * 40),
).create(testing.db)
m = MetaData()
t = Table("t", m, autoload=True, autoload_with=testing.db)
eq_(
t.c.x.server_default.arg.text,
"'%s'::character varying" % ("abcd" * 40),
)
@testing.fails_if("postgresql < 8.1", "schema name leaks in, not sure")
@testing.provide_metadata
def test_renamed_sequence_reflection(self):
metadata = self.metadata
Table("t", metadata, Column("id", Integer, primary_key=True))
metadata.create_all()
m2 = MetaData(testing.db)
t2 = Table("t", m2, autoload=True, implicit_returning=False)
eq_(t2.c.id.server_default.arg.text, "nextval('t_id_seq'::regclass)")
r = t2.insert().execute()
eq_(r.inserted_primary_key, [1])
testing.db.connect().execution_options(autocommit=True).execute(
"alter table t_id_seq rename to foobar_id_seq"
)
m3 = MetaData(testing.db)
t3 = Table("t", m3, autoload=True, implicit_returning=False)
eq_(
t3.c.id.server_default.arg.text,
"nextval('foobar_id_seq'::regclass)",
)
r = t3.insert().execute()
eq_(r.inserted_primary_key, [2])
@testing.provide_metadata
def test_altered_type_autoincrement_pk_reflection(self):
metadata = self.metadata
Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
)
metadata.create_all()
testing.db.connect().execution_options(autocommit=True).execute(
"alter table t alter column id type varchar(50)"
)
m2 = MetaData(testing.db)
t2 = Table("t", m2, autoload=True)
eq_(t2.c.id.autoincrement, False)
eq_(t2.c.x.autoincrement, False)
@testing.provide_metadata
def test_renamed_pk_reflection(self):
metadata = self.metadata
Table("t", metadata, Column("id", Integer, primary_key=True))
metadata.create_all()
testing.db.connect().execution_options(autocommit=True).execute(
"alter table t rename id to t_id"
)
m2 = MetaData(testing.db)
t2 = Table("t", m2, autoload=True)
eq_([c.name for c in t2.primary_key], ["t_id"])
@testing.provide_metadata
def test_has_temporary_table(self):
assert not inspect(testing.db).has_table("some_temp_table")
user_tmp = Table(
"some_temp_table",
self.metadata,
Column("id", Integer, primary_key=True),
Column("name", String(50)),
prefixes=["TEMPORARY"],
)
user_tmp.create(testing.db)
assert inspect(testing.db).has_table("some_temp_table")
@testing.provide_metadata
def test_cross_schema_reflection_one(self):
meta1 = self.metadata
users = Table(
"users",
meta1,
Column("user_id", Integer, primary_key=True),
Column("user_name", String(30), nullable=False),
schema="test_schema",
)
addresses = Table(
"email_addresses",
meta1,
Column("address_id", Integer, primary_key=True),
Column("remote_user_id", Integer, ForeignKey(users.c.user_id)),
Column("email_address", String(20)),
schema="test_schema",
)
meta1.create_all()
meta2 = MetaData(testing.db)
addresses = Table(
"email_addresses", meta2, autoload=True, schema="test_schema"
)
users = Table("users", meta2, mustexist=True, schema="test_schema")
j = join(users, addresses)
self.assert_(
(users.c.user_id == addresses.c.remote_user_id).compare(j.onclause)
)
@testing.provide_metadata
def test_cross_schema_reflection_two(self):
meta1 = self.metadata
subject = Table(
"subject", meta1, Column("id", Integer, primary_key=True)
)
referer = Table(
"referer",
meta1,
Column("id", Integer, primary_key=True),
Column("ref", Integer, ForeignKey("subject.id")),
schema="test_schema",
)
meta1.create_all()
meta2 = MetaData(testing.db)
subject = Table("subject", meta2, autoload=True)
referer = Table("referer", meta2, schema="test_schema", autoload=True)
self.assert_(
(subject.c.id == referer.c.ref).compare(
subject.join(referer).onclause
)
)
@testing.provide_metadata
def test_cross_schema_reflection_three(self):
meta1 = self.metadata
subject = Table(
"subject",
meta1,
Column("id", Integer, primary_key=True),
schema="test_schema_2",
)
referer = Table(
"referer",
meta1,
Column("id", Integer, primary_key=True),
Column("ref", Integer, ForeignKey("test_schema_2.subject.id")),
schema="test_schema",
)
meta1.create_all()
meta2 = MetaData(testing.db)
subject = Table(
"subject", meta2, autoload=True, schema="test_schema_2"
)
referer = Table("referer", meta2, autoload=True, schema="test_schema")
self.assert_(
(subject.c.id == referer.c.ref).compare(
subject.join(referer).onclause
)
)
@testing.provide_metadata
def test_cross_schema_reflection_four(self):
meta1 = self.metadata
subject = Table(
"subject",
meta1,
Column("id", Integer, primary_key=True),
schema="test_schema_2",
)
referer = Table(
"referer",
meta1,
Column("id", Integer, primary_key=True),
Column("ref", Integer, ForeignKey("test_schema_2.subject.id")),
schema="test_schema",
)
meta1.create_all()
conn = testing.db.connect()
conn.detach()
conn.execute("SET search_path TO test_schema, test_schema_2")
meta2 = MetaData(bind=conn)
subject = Table(
"subject",
meta2,
autoload=True,
schema="test_schema_2",
postgresql_ignore_search_path=True,
)
referer = Table(
"referer",
meta2,
autoload=True,
schema="test_schema",
postgresql_ignore_search_path=True,
)
self.assert_(
(subject.c.id == referer.c.ref).compare(
subject.join(referer).onclause
)
)
conn.close()
@testing.provide_metadata
def test_cross_schema_reflection_five(self):
meta1 = self.metadata
# we assume 'public'
default_schema = testing.db.dialect.default_schema_name
subject = Table(
"subject", meta1, Column("id", Integer, primary_key=True)
)
referer = Table(
"referer",
meta1,
Column("id", Integer, primary_key=True),
Column("ref", Integer, ForeignKey("subject.id")),
)
meta1.create_all()
meta2 = MetaData(testing.db)
subject = Table(
"subject",
meta2,
autoload=True,
schema=default_schema,
postgresql_ignore_search_path=True,
)
referer = Table(
"referer",
meta2,
autoload=True,
schema=default_schema,
postgresql_ignore_search_path=True,
)
assert subject.schema == default_schema
self.assert_(
(subject.c.id == referer.c.ref).compare(
subject.join(referer).onclause
)
)
@testing.provide_metadata
def test_cross_schema_reflection_six(self):
# test that the search path *is* taken into account
# by default
meta1 = self.metadata
Table(
"some_table",
meta1,
Column("id", Integer, primary_key=True),
schema="test_schema",
)
Table(
"some_other_table",
meta1,
Column("id", Integer, primary_key=True),
Column("sid", Integer, ForeignKey("test_schema.some_table.id")),
schema="test_schema_2",
)
meta1.create_all()
with testing.db.connect() as conn:
conn.detach()
conn.execute(
"set search_path to test_schema_2, test_schema, public"
)
m1 = MetaData(conn)
Table("some_table", m1, schema="test_schema", autoload=True)
t2_schema = Table(
"some_other_table", m1, schema="test_schema_2", autoload=True
)
t2_no_schema = Table("some_other_table", m1, autoload=True)
t1_no_schema = Table("some_table", m1, autoload=True)
m2 = MetaData(conn)
t1_schema_isp = Table(
"some_table",
m2,
schema="test_schema",
autoload=True,
postgresql_ignore_search_path=True,
)
t2_schema_isp = Table(
"some_other_table",
m2,
schema="test_schema_2",
autoload=True,
postgresql_ignore_search_path=True,
)
# t2_schema refers to t1_schema, but since "test_schema"
# is in the search path, we instead link to t2_no_schema
assert t2_schema.c.sid.references(t1_no_schema.c.id)
# the two no_schema tables refer to each other also.
assert t2_no_schema.c.sid.references(t1_no_schema.c.id)
# but if we're ignoring search path, then we maintain
# those explicit schemas vs. what the "default" schema is
assert t2_schema_isp.c.sid.references(t1_schema_isp.c.id)
@testing.provide_metadata
def test_cross_schema_reflection_seven(self):
# test that the search path *is* taken into account
# by default
meta1 = self.metadata
Table(
"some_table",
meta1,
Column("id", Integer, primary_key=True),
schema="test_schema",
)
Table(
"some_other_table",
meta1,
Column("id", Integer, primary_key=True),
Column("sid", Integer, ForeignKey("test_schema.some_table.id")),
schema="test_schema_2",
)
meta1.create_all()
with testing.db.connect() as conn:
conn.detach()
conn.execute(
"set search_path to test_schema_2, test_schema, public"
)
meta2 = MetaData(conn)
meta2.reflect(schema="test_schema_2")
eq_(
set(meta2.tables),
set(["test_schema_2.some_other_table", "some_table"]),
)
meta3 = MetaData(conn)
meta3.reflect(
schema="test_schema_2", postgresql_ignore_search_path=True
)
eq_(
set(meta3.tables),
set(
[
"test_schema_2.some_other_table",
"test_schema.some_table",
]
),
)
@testing.provide_metadata
def test_cross_schema_reflection_metadata_uses_schema(self):
# test [ticket:3716]
metadata = self.metadata
Table(
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("sid", Integer, ForeignKey("some_other_table.id")),
schema="test_schema",
)
Table(
"some_other_table",
metadata,
Column("id", Integer, primary_key=True),
schema=None,
)
metadata.create_all()
with testing.db.connect() as conn:
meta2 = MetaData(conn, schema="test_schema")
meta2.reflect()
eq_(
set(meta2.tables),
set(["some_other_table", "test_schema.some_table"]),
)
@testing.provide_metadata
def test_uppercase_lowercase_table(self):
metadata = self.metadata
a_table = Table("a", metadata, Column("x", Integer))
A_table = Table("A", metadata, Column("x", Integer))
a_table.create()
assert inspect(testing.db).has_table("a")
assert not inspect(testing.db).has_table("A")
A_table.create(checkfirst=True)
assert inspect(testing.db).has_table("A")
def test_uppercase_lowercase_sequence(self):
a_seq = Sequence("a")
A_seq = Sequence("A")
a_seq.create(testing.db)
assert testing.db.dialect.has_sequence(testing.db, "a")
assert not testing.db.dialect.has_sequence(testing.db, "A")
A_seq.create(testing.db, checkfirst=True)
assert testing.db.dialect.has_sequence(testing.db, "A")
a_seq.drop(testing.db)
A_seq.drop(testing.db)
@testing.provide_metadata
def test_index_reflection(self):
""" Reflecting partial & expression-based indexes should warn
"""
metadata = self.metadata
Table(
"party",
metadata,
Column("id", String(10), nullable=False),
Column("name", String(20), index=True),
Column("aname", String(20)),
)
metadata.create_all()
testing.db.execute(
"""
create index idx1 on party ((id || name))
"""
)
testing.db.execute(
"""
create unique index idx2 on party (id) where name = 'test'
"""
)
testing.db.execute(
"""
create index idx3 on party using btree
(lower(name::text), lower(aname::text))
"""
)
def go():
m2 = MetaData(testing.db)
t2 = Table("party", m2, autoload=True)
assert len(t2.indexes) == 2
# Make sure indexes are in the order we expect them in
tmp = [(idx.name, idx) for idx in t2.indexes]
tmp.sort()
r1, r2 = [idx[1] for idx in tmp]
assert r1.name == "idx2"
assert r1.unique is True
assert r2.unique is False
assert [t2.c.id] == r1.columns
assert [t2.c.name] == r2.columns
testing.assert_warnings(
go,
[
"Skipped unsupported reflection of "
"expression-based index idx1",
"Predicate of partial index idx2 ignored during " "reflection",
"Skipped unsupported reflection of "
"expression-based index idx3",
],
)
@testing.fails_if("postgresql < 8.3", "index ordering not supported")
@testing.provide_metadata
def test_index_reflection_with_sorting(self):
"""reflect indexes with sorting options set"""
t1 = Table(
"party",
self.metadata,
Column("id", String(10), nullable=False),
Column("name", String(20)),
Column("aname", String(20)),
)
with testing.db.connect() as conn:
t1.create(conn)
# check ASC, DESC options alone
conn.execute(
"""
create index idx1 on party
(id, name ASC, aname DESC)
"""
)
# check DESC w/ NULLS options
conn.execute(
"""
create index idx2 on party
(name DESC NULLS FIRST, aname DESC NULLS LAST)
"""
)
# check ASC w/ NULLS options
conn.execute(
"""
create index idx3 on party
(name ASC NULLS FIRST, aname ASC NULLS LAST)
"""
)
# reflect data
with testing.db.connect() as conn:
m2 = MetaData(conn)
t2 = Table("party", m2, autoload=True)
eq_(len(t2.indexes), 3)
# Make sure indexes are in the order we expect them in
r1, r2, r3 = sorted(t2.indexes, key=lambda idx: idx.name)
eq_(r1.name, "idx1")
eq_(r2.name, "idx2")
eq_(r3.name, "idx3")
# "ASC NULLS LAST" is implicit default for indexes,
# and "NULLS FIRST" is implicit default for "DESC".
# (https://www.postgresql.org/docs/11/indexes-ordering.html)
def compile_exprs(exprs):
return list(map(str, exprs))
eq_(
compile_exprs([t2.c.id, t2.c.name, t2.c.aname.desc()]),
compile_exprs(r1.expressions),
)
eq_(
compile_exprs([t2.c.name.desc(), t2.c.aname.desc().nullslast()]),
compile_exprs(r2.expressions),
)
eq_(
compile_exprs([t2.c.name.nullsfirst(), t2.c.aname]),
compile_exprs(r3.expressions),
)
@testing.provide_metadata
def test_index_reflection_modified(self):
"""reflect indexes when a column name has changed - PG 9
does not update the name of the column in the index def.
[ticket:2141]
"""
metadata = self.metadata
Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
)
metadata.create_all()
conn = testing.db.connect().execution_options(autocommit=True)
conn.execute("CREATE INDEX idx1 ON t (x)")
conn.execute("ALTER TABLE t RENAME COLUMN x to y")
ind = testing.db.dialect.get_indexes(conn, "t", None)
eq_(ind, [{"unique": False, "column_names": ["y"], "name": "idx1"}])
conn.close()
@testing.fails_if("postgresql < 8.2", "reloptions not supported")
@testing.provide_metadata
def test_index_reflection_with_storage_options(self):
"""reflect indexes with storage options set"""
metadata = self.metadata
Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("x", Integer),
)
metadata.create_all()
with testing.db.connect().execution_options(autocommit=True) as conn:
conn.execute("CREATE INDEX idx1 ON t (x) WITH (fillfactor = 50)")
ind = testing.db.dialect.get_indexes(conn, "t", None)
eq_(
ind,
[
{
"unique": False,
"column_names": ["x"],
"name": "idx1",
"dialect_options": {
"postgresql_with": {"fillfactor": "50"}
},
}
],
)
m = MetaData()
t1 = Table("t", m, autoload_with=conn)
eq_(
list(t1.indexes)[0].dialect_options["postgresql"]["with"],
{"fillfactor": "50"},
)
@testing.provide_metadata
def test_index_reflection_with_access_method(self):
"""reflect indexes with storage options set"""
metadata = self.metadata
Table(
"t",
metadata,
Column("id", Integer, primary_key=True),
Column("x", ARRAY(Integer)),
)
metadata.create_all()
with testing.db.connect().execution_options(autocommit=True) as conn:
conn.execute("CREATE INDEX idx1 ON t USING gin (x)")
ind = testing.db.dialect.get_indexes(conn, "t", None)
eq_(
ind,
[
{
"unique": False,
"column_names": ["x"],
"name": "idx1",
"dialect_options": {"postgresql_using": "gin"},
}
],
)
m = MetaData()
t1 = Table("t", m, autoload_with=conn)
eq_(
list(t1.indexes)[0].dialect_options["postgresql"]["using"],
"gin",
)
@testing.provide_metadata
def test_foreign_key_option_inspection(self):
metadata = self.metadata
Table(
"person",
metadata,
Column("id", String(length=32), nullable=False, primary_key=True),
Column(
"company_id",
ForeignKey(
"company.id",
name="person_company_id_fkey",
match="FULL",
onupdate="RESTRICT",
ondelete="RESTRICT",
deferrable=True,
initially="DEFERRED",
),
),
)
Table(
"company",
metadata,
Column("id", String(length=32), nullable=False, primary_key=True),
Column("name", String(length=255)),
Column(
"industry_id",
ForeignKey(
"industry.id",
name="company_industry_id_fkey",
onupdate="CASCADE",
ondelete="CASCADE",
deferrable=False, # PG default
# PG default
initially="IMMEDIATE",
),
),
)
Table(
"industry",
metadata,
Column("id", Integer(), nullable=False, primary_key=True),
Column("name", String(length=255)),
)
fk_ref = {
"person_company_id_fkey": {
"name": "person_company_id_fkey",
"constrained_columns": ["company_id"],
"referred_columns": ["id"],
"referred_table": "company",
"referred_schema": None,
"options": {
"onupdate": "RESTRICT",
"deferrable": True,
"ondelete": "RESTRICT",
"initially": "DEFERRED",
"match": "FULL",
},
},
"company_industry_id_fkey": {
"name": "company_industry_id_fkey",
"constrained_columns": ["industry_id"],
"referred_columns": ["id"],
"referred_table": "industry",
"referred_schema": None,
"options": {"onupdate": "CASCADE", "ondelete": "CASCADE"},
},
}
metadata.create_all()
inspector = inspect(testing.db)
fks = inspector.get_foreign_keys(
"person"
) + inspector.get_foreign_keys("company")
for fk in fks:
eq_(fk, fk_ref[fk["name"]])
@testing.provide_metadata
def test_inspect_enums_schema(self):
conn = testing.db.connect()
enum_type = postgresql.ENUM(
"sad",
"ok",
"happy",
name="mood",
schema="test_schema",
metadata=self.metadata,
)
enum_type.create(conn)
inspector = reflection.Inspector.from_engine(conn.engine)
eq_(
inspector.get_enums("test_schema"),
[
{
"visible": False,
"name": "mood",
"schema": "test_schema",
"labels": ["sad", "ok", "happy"],
}
],
)
@testing.provide_metadata
def test_inspect_enums(self):
enum_type = postgresql.ENUM(
"cat", "dog", "rat", name="pet", metadata=self.metadata
)
enum_type.create(testing.db)
inspector = reflection.Inspector.from_engine(testing.db)
eq_(
inspector.get_enums(),
[
{
"visible": True,
"labels": ["cat", "dog", "rat"],
"name": "pet",
"schema": "public",
}
],
)
@testing.provide_metadata
def test_inspect_enums_case_sensitive(self):
sa.event.listen(
self.metadata,
"before_create",
sa.DDL('create schema "TestSchema"'),
)
sa.event.listen(
self.metadata,
"after_drop",
sa.DDL('drop schema "TestSchema" cascade'),
)
for enum in "lower_case", "UpperCase", "Name.With.Dot":
for schema in None, "test_schema", "TestSchema":
postgresql.ENUM(
"CapsOne",
"CapsTwo",
name=enum,
schema=schema,
metadata=self.metadata,
)
self.metadata.create_all(testing.db)
inspector = inspect(testing.db)
for schema in None, "test_schema", "TestSchema":
eq_(
sorted(
inspector.get_enums(schema=schema), key=itemgetter("name")
),
[
{
"visible": schema is None,
"labels": ["CapsOne", "CapsTwo"],
"name": "Name.With.Dot",
"schema": "public" if schema is None else schema,
},
{
"visible": schema is None,
"labels": ["CapsOne", "CapsTwo"],
"name": "UpperCase",
"schema": "public" if schema is None else schema,
},
{
"visible": schema is None,
"labels": ["CapsOne", "CapsTwo"],
"name": "lower_case",
"schema": "public" if schema is None else schema,
},
],
)
@testing.provide_metadata
def test_inspect_enums_case_sensitive_from_table(self):
sa.event.listen(
self.metadata,
"before_create",
sa.DDL('create schema "TestSchema"'),
)
sa.event.listen(
self.metadata,
"after_drop",
sa.DDL('drop schema "TestSchema" cascade'),
)
counter = itertools.count()
for enum in "lower_case", "UpperCase", "Name.With.Dot":
for schema in None, "test_schema", "TestSchema":
enum_type = postgresql.ENUM(
"CapsOne",
"CapsTwo",
name=enum,
metadata=self.metadata,
schema=schema,
)
Table(
"t%d" % next(counter),
self.metadata,
Column("q", enum_type),
)
self.metadata.create_all(testing.db)
inspector = inspect(testing.db)
counter = itertools.count()
for enum in "lower_case", "UpperCase", "Name.With.Dot":
for schema in None, "test_schema", "TestSchema":
cols = inspector.get_columns("t%d" % next(counter))
cols[0]["type"] = (
cols[0]["type"].schema,
cols[0]["type"].name,
cols[0]["type"].enums,
)
eq_(
cols,
[
{
"name": "q",
"type": (schema, enum, ["CapsOne", "CapsTwo"]),
"nullable": True,
"default": None,
"autoincrement": False,
"comment": None,
}
],
)
@testing.provide_metadata
def test_inspect_enums_star(self):
enum_type = postgresql.ENUM(
"cat", "dog", "rat", name="pet", metadata=self.metadata
)
schema_enum_type = postgresql.ENUM(
"sad",
"ok",
"happy",
name="mood",
schema="test_schema",
metadata=self.metadata,
)
enum_type.create(testing.db)
schema_enum_type.create(testing.db)
inspector = reflection.Inspector.from_engine(testing.db)
eq_(
inspector.get_enums(),
[
{
"visible": True,
"labels": ["cat", "dog", "rat"],
"name": "pet",
"schema": "public",
}
],
)
eq_(
inspector.get_enums("*"),
[
{
"visible": True,
"labels": ["cat", "dog", "rat"],
"name": "pet",
"schema": "public",
},
{
"visible": False,
"name": "mood",
"schema": "test_schema",
"labels": ["sad", "ok", "happy"],
},
],
)
@testing.provide_metadata
def test_inspect_enum_empty(self):
enum_type = postgresql.ENUM(name="empty", metadata=self.metadata)
enum_type.create(testing.db)
inspector = reflection.Inspector.from_engine(testing.db)
eq_(
inspector.get_enums(),
[
{
"visible": True,
"labels": [],
"name": "empty",
"schema": "public",
}
],
)
@testing.provide_metadata
def test_inspect_enum_empty_from_table(self):
Table(
"t", self.metadata, Column("x", postgresql.ENUM(name="empty"))
).create(testing.db)
t = Table("t", MetaData(testing.db), autoload_with=testing.db)
eq_(t.c.x.type.enums, [])
@testing.provide_metadata
@testing.only_on("postgresql >= 8.5")
def test_reflection_with_unique_constraint(self):
insp = inspect(testing.db)
meta = self.metadata
uc_table = Table(
"pgsql_uc",
meta,
Column("a", String(10)),
UniqueConstraint("a", name="uc_a"),
)
uc_table.create()
# PostgreSQL will create an implicit index for a unique
# constraint. Separately we get both
indexes = set(i["name"] for i in insp.get_indexes("pgsql_uc"))
constraints = set(
i["name"] for i in insp.get_unique_constraints("pgsql_uc")
)
self.assert_("uc_a" in indexes)
self.assert_("uc_a" in constraints)
# reflection corrects for the dupe
reflected = Table("pgsql_uc", MetaData(testing.db), autoload=True)
indexes = set(i.name for i in reflected.indexes)
constraints = set(uc.name for uc in reflected.constraints)
self.assert_("uc_a" not in indexes)
self.assert_("uc_a" in constraints)
@testing.requires.btree_gist
@testing.provide_metadata
def test_reflection_with_exclude_constraint(self):
m = self.metadata
Table(
"t",
m,
Column("id", Integer, primary_key=True),
Column("period", TSRANGE),
ExcludeConstraint(("period", "&&"), name="quarters_period_excl"),
)
m.create_all()
insp = inspect(testing.db)
# PostgreSQL will create an implicit index for an exclude constraint.
# we don't reflect the EXCLUDE yet.
eq_(
insp.get_indexes("t"),
[
{
"unique": False,
"name": "quarters_period_excl",
"duplicates_constraint": "quarters_period_excl",
"dialect_options": {"postgresql_using": "gist"},
"column_names": ["period"],
}
],
)
# reflection corrects for the dupe
reflected = Table("t", MetaData(testing.db), autoload=True)
eq_(set(reflected.indexes), set())
@testing.provide_metadata
def test_reflect_unique_index(self):
insp = inspect(testing.db)
meta = self.metadata
# a unique index OTOH we are able to detect is an index
# and not a unique constraint
uc_table = Table(
"pgsql_uc",
meta,
Column("a", String(10)),
Index("ix_a", "a", unique=True),
)
uc_table.create()
indexes = dict((i["name"], i) for i in insp.get_indexes("pgsql_uc"))
constraints = set(
i["name"] for i in insp.get_unique_constraints("pgsql_uc")
)
self.assert_("ix_a" in indexes)
assert indexes["ix_a"]["unique"]
self.assert_("ix_a" not in constraints)
reflected = Table("pgsql_uc", MetaData(testing.db), autoload=True)
indexes = dict((i.name, i) for i in reflected.indexes)
constraints = set(uc.name for uc in reflected.constraints)
self.assert_("ix_a" in indexes)
assert indexes["ix_a"].unique
self.assert_("ix_a" not in constraints)
@testing.provide_metadata
def test_reflect_check_constraint(self):
meta = self.metadata
udf_create = """\
CREATE OR REPLACE FUNCTION is_positive(
x integer DEFAULT '-1'::integer)
RETURNS boolean
LANGUAGE 'plpgsql'
COST 100
VOLATILE
AS $BODY$BEGIN
RETURN x > 0;
END;$BODY$;
"""
sa.event.listen(meta, "before_create", sa.DDL(udf_create))
sa.event.listen(
meta, "after_drop", sa.DDL("DROP FUNCTION is_positive(integer)")
)
Table(
"pgsql_cc",
meta,
Column("a", Integer()),
CheckConstraint("a > 1 AND a < 5", name="cc1"),
CheckConstraint("a = 1 OR (a > 2 AND a < 5)", name="cc2"),
CheckConstraint("is_positive(a)", name="cc3"),
)
meta.create_all()
reflected = Table("pgsql_cc", MetaData(), autoload_with=testing.db)
check_constraints = dict(
(uc.name, uc.sqltext.text)
for uc in reflected.constraints
if isinstance(uc, CheckConstraint)
)
eq_(
check_constraints,
{
u"cc1": u"(a > 1) AND (a < 5)",
u"cc2": u"(a = 1) OR ((a > 2) AND (a < 5))",
u"cc3": u"is_positive(a)",
},
)
def test_reflect_check_warning(self):
rows = [("some name", "NOTCHECK foobar")]
conn = mock.Mock(
execute=lambda *arg, **kw: mock.MagicMock(
fetchall=lambda: rows, __iter__=lambda self: iter(rows)
)
)
with mock.patch.object(
testing.db.dialect, "get_table_oid", lambda *arg, **kw: 1
):
with testing.expect_warnings(
"Could not parse CHECK constraint text: 'NOTCHECK foobar'"
):
testing.db.dialect.get_check_constraints(conn, "foo")
def test_reflect_with_not_valid_check_constraint(self):
rows = [("some name", "CHECK ((a IS NOT NULL)) NOT VALID")]
conn = mock.Mock(
execute=lambda *arg, **kw: mock.MagicMock(
fetchall=lambda: rows, __iter__=lambda self: iter(rows)
)
)
with mock.patch.object(
testing.db.dialect, "get_table_oid", lambda *arg, **kw: 1
):
check_constraints = testing.db.dialect.get_check_constraints(
conn, "foo"
)
eq_(
check_constraints,
[
{
"name": "some name",
"sqltext": "a IS NOT NULL",
"dialect_options": {"not_valid": True},
}
],
)
class CustomTypeReflectionTest(fixtures.TestBase):
class CustomType(object):
def __init__(self, arg1=None, arg2=None):
self.arg1 = arg1
self.arg2 = arg2
ischema_names = None
def setup(self):
ischema_names = postgresql.PGDialect.ischema_names
postgresql.PGDialect.ischema_names = ischema_names.copy()
self.ischema_names = ischema_names
def teardown(self):
postgresql.PGDialect.ischema_names = self.ischema_names
self.ischema_names = None
def _assert_reflected(self, dialect):
for sch, args in [
("my_custom_type", (None, None)),
("my_custom_type()", (None, None)),
("my_custom_type(ARG1)", ("ARG1", None)),
("my_custom_type(ARG1, ARG2)", ("ARG1", "ARG2")),
]:
column_info = dialect._get_column_info(
"colname", sch, None, False, {}, {}, "public", None
)
assert isinstance(column_info["type"], self.CustomType)
eq_(column_info["type"].arg1, args[0])
eq_(column_info["type"].arg2, args[1])
def test_clslevel(self):
postgresql.PGDialect.ischema_names["my_custom_type"] = self.CustomType
dialect = postgresql.PGDialect()
self._assert_reflected(dialect)
def test_instancelevel(self):
dialect = postgresql.PGDialect()
dialect.ischema_names = dialect.ischema_names.copy()
dialect.ischema_names["my_custom_type"] = self.CustomType
self._assert_reflected(dialect)
class IntervalReflectionTest(fixtures.TestBase):
__only_on__ = "postgresql"
__backend__ = True
def test_interval_types(self):
for sym in [
"YEAR",
"MONTH",
"DAY",
"HOUR",
"MINUTE",
"SECOND",
"YEAR TO MONTH",
"DAY TO HOUR",
"DAY TO MINUTE",
"DAY TO SECOND",
"HOUR TO MINUTE",
"HOUR TO SECOND",
"MINUTE TO SECOND",
]:
self._test_interval_symbol(sym)
@testing.provide_metadata
def _test_interval_symbol(self, sym):
t = Table(
"i_test",
self.metadata,
Column("id", Integer, primary_key=True),
Column("data1", INTERVAL(fields=sym)),
)
t.create(testing.db)
columns = {
rec["name"]: rec
for rec in inspect(testing.db).get_columns("i_test")
}
assert isinstance(columns["data1"]["type"], INTERVAL)
eq_(columns["data1"]["type"].fields, sym.lower())
eq_(columns["data1"]["type"].precision, None)
@testing.provide_metadata
def test_interval_precision(self):
t = Table(
"i_test",
self.metadata,
Column("id", Integer, primary_key=True),
Column("data1", INTERVAL(precision=6)),
)
t.create(testing.db)
columns = {
rec["name"]: rec
for rec in inspect(testing.db).get_columns("i_test")
}
assert isinstance(columns["data1"]["type"], INTERVAL)
eq_(columns["data1"]["type"].fields, None)
eq_(columns["data1"]["type"].precision, 6)
|
py | b410cab2cb3aa15fd194ff6f7af85066e79fc4de | import os
import sys
import logging
import subprocess
from naf2conll.main import Main
logger = logging.getLogger(None if __name__ == '__main__' else __name__)
def test_empty_dir(caplog, empty_dir):
caplog.set_level(logging.DEBUG)
logger.debug(os.path.realpath(os.curdir))
output_dir = 'output_dir'
try:
subprocess.run(
[
sys.executable,
"-m",
"naf2conll",
output_dir,
"-d",
empty_dir
],
check=True
)
finally:
if os.path.exists(output_dir):
os.rmdir(output_dir)
def test_no_coref_file(caplog, naffile_no_coref):
caplog.set_level(logging.DEBUG)
logger.debug(os.path.realpath(os.curdir))
output_file = 'output.conll'
try:
subprocess.run(
[
sys.executable,
"-m",
"naf2conll",
output_file,
naffile_no_coref
],
check=True
)
finally:
if os.path.exists(output_file):
os.remove(output_file)
def test_coref_file(caplog, naffile_coref):
caplog.set_level(logging.DEBUG)
logger.debug(os.path.realpath(os.curdir))
output_file = 'output.conll'
try:
subprocess.run(
[
sys.executable,
"-m",
"naf2conll",
output_file,
naffile_coref
],
check=True
)
finally:
if os.path.exists(output_file):
os.remove(output_file)
def test_fill_spans(caplog, naffile_not_consec_coref, fill_spans_config):
caplog.set_level(logging.DEBUG)
logger.debug(os.path.realpath(os.curdir))
output_file = 'output.conll'
try:
subprocess.run(
[
sys.executable,
"-m",
"naf2conll",
output_file,
naffile_not_consec_coref,
'-c',
fill_spans_config
],
check=True
)
finally:
if os.path.exists(output_file):
os.remove(output_file)
def test_problem_only(caplog, naffile_not_consec_coref, problem_only_config):
caplog.set_level(logging.DEBUG)
output_file = 'output.conll'
try:
Main.main([
output_file,
naffile_not_consec_coref,
'-c',
problem_only_config
]),
finally:
if os.path.exists(output_file):
os.remove(output_file)
def test_default_config(caplog, naffile_coref, default_config):
caplog.set_level(logging.DEBUG)
output_file = 'output.conll'
try:
Main.main([
output_file,
naffile_coref,
'-c',
default_config
]),
finally:
if os.path.exists(output_file):
os.remove(output_file)
def test_not_fill_spans(caplog, naffile_not_consec_coref):
caplog.set_level(logging.DEBUG)
logger.debug(os.path.realpath(os.curdir))
output_file = 'output.conll'
try:
res = subprocess.run(
[
sys.executable,
"-m",
"naf2conll",
output_file,
naffile_not_consec_coref,
]
)
finally:
if os.path.exists(output_file):
os.remove(output_file)
assert res.returncode != 0
|
py | b410caf1e043a0b63c1895cf060ac0c9d8efc8e2 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Modules."""
import numpy as np
import tensorflow.compat.v1 as tf
SECS_TO_DAYS = 60 * 60 * 24
def positional_encoding(dim, sentence_length, dtype=tf.float32):
"""Positional encoding."""
encoded_vec = np.array([
pos / np.power(10000, 2 * i / dim) # pylint: disable=g-complex-comprehension
for pos in range(sentence_length)
for i in range(dim)
])
encoded_vec[::2] = np.sin(encoded_vec[::2])
encoded_vec[1::2] = np.cos(encoded_vec[1::2])
return tf.convert_to_tensor(
encoded_vec.reshape([sentence_length, dim]), dtype=dtype)
def normalize(inputs, epsilon=1e-8, scope="ln", reuse=None):
"""Applies layer normalization.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`.
epsilon: A floating number. A very small number for preventing
ZeroDivision Error.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer by the
same name.
Returns:
A tensor with the same shape and data dtype as `inputs`.
"""
with tf.variable_scope(scope, reuse=reuse):
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
beta = tf.Variable(tf.zeros(params_shape))
gamma = tf.Variable(tf.ones(params_shape))
normalized = (inputs - mean) / ((variance + epsilon)**(.5))
outputs = gamma * normalized + beta
return outputs
def embedding(inputs,
vocab_size,
num_units,
zero_pad=True,
scale=True,
l2_reg=0.0,
scope="embedding",
with_t=False,
reuse=None):
"""Embeds a given tensor.
Args:
inputs: A `Tensor` with type `int32` or `int64` containing the ids to be
looked up in `lookup table`.
vocab_size: An int. Vocabulary size.
num_units: An int. Number of embedding hidden units.
zero_pad: A boolean. If True, all the values of the fist row (id 0) should
be constant zeros.
scale: A boolean. If True. the outputs is multiplied by sqrt num_units.
l2_reg: L2 regularization weight.
scope: Optional scope for `variable_scope`.
with_t: If True, return the embedding table.
reuse: Boolean, whether to reuse the weights of a previous layer by the
same name.
Returns:
A `Tensor` with one more rank than inputs's. The last dimensionality
should be `num_units`.
For example,
```
import tensorflow as tf
inputs = tf.to_int32(tf.reshape(tf.range(2*3), (2, 3)))
outputs = embedding(inputs, 6, 2, zero_pad=True)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print sess.run(outputs)
>>
[[[ 0. 0. ]
[ 0.09754146 0.67385566]
[ 0.37864095 -0.35689294]]
[[-1.01329422 -1.09939694]
[ 0.7521342 0.38203377]
[-0.04973143 -0.06210355]]]
```
```
import tensorflow as tf
inputs = tf.to_int32(tf.reshape(tf.range(2*3), (2, 3)))
outputs = embedding(inputs, 6, 2, zero_pad=False)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print sess.run(outputs)
>>
[[[-0.19172323 -0.39159766]
[-0.43212751 -0.66207761]
[ 1.03452027 -0.26704335]]
[[-0.11634696 -0.35983452]
[ 0.50208133 0.53509563]
[ 1.22204471 -0.96587461]]]
```
"""
with tf.variable_scope(scope, reuse=reuse):
lookup_table = tf.get_variable(
"lookup_table",
dtype=tf.float32,
shape=[vocab_size, num_units],
# initializer=tf.contrib.layers.xavier_initializer(),
regularizer=tf.keras.regularizers.l2(l2_reg))
if zero_pad:
lookup_table = tf.concat(
(tf.zeros(shape=[1, num_units]), lookup_table[1:, :]), 0)
outputs = tf.nn.embedding_lookup(lookup_table, inputs)
if scale:
outputs = outputs * (num_units**0.5)
if with_t:
return outputs, lookup_table
else:
return outputs
def multihead_attention(queries,
keys,
times=None,
num_units=None,
num_heads=1,
dropout_rate=0,
is_training=True,
use_prior="none",
causality=True,
scope="multihead_attention",
residual=False,
time_exp_base=None,
overlapping_chunks=None,
reuse=None,
with_qk=False):
"""Applies multihead attention.
Args:
queries: A 3d tensor with shape of [N, T_q, C_q].
keys: A 3d tensor with shape of [N, T_k, C_k].
times: A 3d tensor with shape of [N, T_q, T_k].
num_units: A scalar. Attention size.
num_heads: An int. Number of heads.
dropout_rate: A floating point number.
is_training: Boolean. Controller of mechanism for dropout.
use_prior: String. Whether to use prior for attention heads. Supported
values include: none, position.
causality: Boolean. If true, units that reference the future are masked.
scope: Optional scope for `variable_scope`.
residual: Boolean. Whether to use residual connection.
time_exp_base: A scalar. Base for exponential time intervals. Only used for
the case where use_prior='time'.
overlapping_chunks: Boolean. Whether to use (non)/overlapping chunks for the
case where use_prior='time'.
reuse: Boolean, whether to reuse the weights of a previous layer by the
same name. Returns A 3d tensor with shape of (N, T_q, C)
with_qk: Whether to use qk.
Returns:
Output of multihead attention.
"""
tf.logging.info(
"Computing attention with prior: {} and num of heads: {}".format(
use_prior, num_heads))
with tf.variable_scope(scope, reuse=reuse):
# Set the fall back option for num_units
if num_units is None:
num_units = queries.get_shape().as_list[-1]
# pylint: disable=invalid-name
# Linear projections
# Q = tf.layers.dense(queries, num_units, activation=tf.nn.relu)
# K = tf.layers.dense(keys, num_units, activation=tf.nn.relu)
# V = tf.layers.dense(keys, num_units, activation=tf.nn.relu)
Q = tf.layers.dense(queries, num_units, activation=None) # (N, T_q, C)
K = tf.layers.dense(keys, num_units, activation=None) # (N, T_k, C)
V = tf.layers.dense(keys, num_units, activation=None) # (N, T_k, C)
# Split and concat
Q_ = tf.concat(tf.split(Q, num_heads, axis=2), axis=0) # (h*N, T_q, C/h)
K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0) # (h*N, T_k, C/h)
V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0) # (h*N, T_k, C/h)
# pylint: enable=invalid-name
# Multiplication
outputs = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1])) # (h*N, T_q, T_k)
# Scale
outputs = outputs / (K_.get_shape().as_list()[-1]**0.5)
# Key Masking
key_masks = tf.sign(tf.abs(tf.reduce_sum(keys, axis=-1))) # (N, T_k)
key_masks = tf.tile(key_masks, [num_heads, 1]) # (h*N, T_k)
key_masks = tf.tile(
tf.expand_dims(key_masks, 1),
[1, tf.shape(queries)[1], 1]) # (h*N, T_q, T_k)
paddings = tf.ones_like(outputs) * (-2**32 + 1)
outputs = tf.where(tf.equal(key_masks, 0), paddings,
outputs) # (h*N, T_q, T_k)
# Causality = Future blinding
if causality:
diag_vals = tf.ones_like(outputs[0, :, :]) # (T_q, T_k)
tril = tf.linalg.LinearOperatorLowerTriangular(
diag_vals).to_dense() # (T_q, T_k)
masks = tf.tile(tf.expand_dims(tril, 0),
[tf.shape(outputs)[0], 1, 1]) # (h*N, T_q, T_k)
paddings = tf.ones_like(masks) * (-2**32 + 1)
outputs = tf.where(tf.equal(masks, 0), paddings,
outputs) # (h*N, T_q, T_k)
# Position/Time prior is only used in multi-head case.
if num_heads > 1:
# Scaling head weights with position prior.
if use_prior == "position":
# Each head focuses on a window of items whose size is computed below.
attn_size = int(outputs.get_shape().as_list()[-1] / num_heads)
outputs = tf.concat(
_compute_head_weights_with_position_prior(outputs, masks, paddings,
num_heads, attn_size),
axis=0) # (H*N, T_q, T_k)
tf.logging.info("After position-wise sliding window attention.")
tf.logging.info(outputs.shape)
# Scaling head weights with time prior.
elif use_prior == "time":
# Convert time deltas from seconds to days.
if times is None:
raise ValueError("Times tensor is needed.")
time_deltas = _compute_time_deltas(times) / SECS_TO_DAYS
outputs = tf.concat(_compute_head_weights_with_time_prior(
outputs, paddings, time_deltas, num_heads, time_exp_base,
overlapping_chunks), axis=0) # (H*N, T_q, T_k)
# Activation
outputs = tf.nn.softmax(outputs) # (h*N, T_q, T_k)
# Query Masking
query_masks = tf.sign(tf.abs(tf.reduce_sum(queries, axis=-1))) # (N, T_q)
query_masks = tf.tile(query_masks, [num_heads, 1]) # (h*N, T_q)
query_masks = tf.tile(
tf.expand_dims(query_masks, -1),
[1, 1, tf.shape(keys)[1]]) # (h*N, T_q, T_k)
outputs *= query_masks # broadcasting. (h*N, T_q, C)
# Dropouts
outputs = tf.layers.dropout(
outputs, rate=dropout_rate, training=tf.convert_to_tensor(is_training))
# Weighted sum
outputs = tf.matmul(outputs, V_) # (h*N, T_q, C/h)
# Restore shape
outputs = tf.concat(
tf.split(outputs, num_heads, axis=0), axis=2) # (N, T_q, C)
# Residual connection
if residual:
outputs += queries
if with_qk:
return Q, K
else:
return outputs
def _compute_head_weights_with_position_prior(weights, masks, paddings,
num_heads, attn_size):
"""Computes head-specific attention weights with position prior.
This function simply masks out the weights for items if they don't belong to a
certain chunk, using a sliding window technique. I.e., head i only focuses on
ith recent "chunk_size" items with respect to the query. Note that chunks are
non-overlapping, meaning, sliding window stride is also set to attn_size.
Args:
weights: A 3d tensor with shape of [h*N, T_q, T_k].
masks: A 3d tensor with shape of [h*N, T_q, T_k].
paddings: A 3d tensor with shape of [h*N, T_q, T_k].
num_heads: An integer denoting number of chunks.
attn_size: An integer denoting the size of the sliding window.
Returns:
A list of h tensors (each shaped [N, T_q, T_k]) where tensors correspond to
chunk specific weights.
"""
# Masks is a lower triangular tensor with ones in the bottom and zeros in the
# upper section. Since chunks are allocated with respect to query position, we
# first need to count the available items prior to each query. argmin function
# would work for this, except the last query because it returns the smallest
# index in the case of ties. To make sure we have the accurate count for the
# last query, we first append a zero tensor and call the argmin function.
max_idxs = tf.argmin(tf.concat([masks, tf.zeros_like(masks)], axis=-1),
2) # (h*N, T_q)
# Split for heads.
max_idxs_split = tf.split(max_idxs, num_heads, axis=0) # (h x (N, T_q))
weights_split = tf.split(weights, num_heads, axis=0) # (h x (N, T_q, T_k))
paddings_split = tf.split(paddings, num_heads, axis=0) # (h x (N, T_q, T_k))
# Collects output weights per chunk.
chunk_outputs_list = []
for i in range(num_heads):
mask_left = tf.sequence_mask(
tf.maximum(max_idxs_split[i] - (attn_size * (i + 1)), 0),
tf.shape(weights_split[i])[2]) # (N, T_q, T_k)
mask_right = tf.sequence_mask(
tf.maximum(max_idxs_split[i] - (attn_size * i), 0),
tf.shape(weights_split[i])[2]) # (N, T_q, T_k)
mask = tf.logical_and(tf.logical_not(mask_left),
mask_right) # (N, T_q, T_k)
# Adjust weights for chunk i.
output = tf.where(mask, weights_split[i],
paddings_split[i]) # (N, T_q, T_k)
chunk_outputs_list.append(output)
return chunk_outputs_list # (h x (N, T_q, T_k))
def _compute_head_weights_with_time_prior(weights, paddings, time_deltas,
num_heads, time_exp_base,
overlapping_chunks):
"""Computes head-specific attention weights with time prior.
This function simply masks out the weights for items if they don't belong to a
certain chunk. Here, chunks are allocated based on time information. We use
exponential function--pow(time_exp_base,i)--to allocate segment boundaries.
Note that time delta values represent number of days.
Example 1: Let overlapping_chunks=False, time_exp_base=3 and num_heads=3.
1st head focuses on the items within time interval [0, pow(3,0)],
2nd head focuses on the items within time interval (pow(3,0), pow(3,1)],
3rd (last) head focuses on the items within time interval (pow(3,1), inf]
Example 2: Let overlapping_chunks=True, time_exp_base=3 and num_heads=3.
1st head focuses on the items within time interval [0, pow(3,0)],
2nd head focuses on the items within time interval [0, pow(3,1)],
3rd (last) head focuses on the items within time interval [0, inf]
Args:
weights: A 3d tensor with shape of [h*N, T_q, T_k].
paddings: A 3d tensor with shape of [h*N, T_q, T_k].
time_deltas: A 3d tensor with shape of [N, T_q, T_k].
num_heads: An integer denoting number of chunks.
time_exp_base: A scalar. Base for exponential time intervals.
overlapping_chunks: Boolean. Whether to use overlapping chunks.
Returns:
A list of h tensors (each shaped [N, T_q, T_k]) where tensors correspond to
chunk specific weights.
"""
tf.logging.info(
"Computing with time_exp_base:{} and overlapping_chunks:{}".format(
time_exp_base, overlapping_chunks))
chunk_outputs_list = []
weights_split = tf.split(weights, num_heads, axis=0)
paddings_split = tf.split(paddings, num_heads, axis=0)
ones_tensor = tf.ones_like(time_deltas) # (N, T_q, T_k)
# False in previous items and True in future items.
mask_previous_head = time_deltas < 0 # (N, T_q, T_k)
for i in range(num_heads):
if i == (num_heads - 1): # Last chunk considers all the remaining items.
# All True.
mask_next_head = tf.ones_like(time_deltas, dtype=bool) # (N, T_q, T_k)
else:
mask_next_head = tf.math.less_equal(
time_deltas, (time_exp_base**i) * ones_tensor) # (N, T_q, T_k)
mask = tf.logical_and(tf.logical_not(mask_previous_head),
mask_next_head) # (N, T_q, T_k)
output = tf.where(mask, weights_split[i],
paddings_split[i]) # (N, T_q, T_k)
chunk_outputs_list.append(output)
# Update previous mask for non-overlapping chunks.
if not overlapping_chunks:
mask_previous_head = mask_next_head
return chunk_outputs_list
def _compute_time_deltas(times):
"""This function computes time deltas between items.
It is important to note that given timestamps are for queries. Hence, we need
to consider that while calculating the time deltas between queries and items.
Example: For items: [<PAD>, 1, 2, 3] and queries: [q1, q2, q3, q4], the times
vector is [t1, t2, t3, t4]. Then, the time deltas will be:
[
[t1, 0, t1-t2, t1-t3], # time deltas for query 1
[t2, t2-t1, 0, t2-t3], # time deltas for query 2
[t3, t3-t1, t3-t2, 0], # time deltas for query 3
[t4, t4-t1, t4-t2, t4-t3] # time deltas for query 4
]
Args:
times: A 2d tensor with shape of [N, T_q].
Returns:
A 3d tensor with shape of [N, T_q, T_q].
"""
t1 = tf.tile(tf.expand_dims(times, 2), [1, 1, tf.shape(times)[1]])
t2 = tf.tile(tf.expand_dims(times, 1), [1, tf.shape(times)[1], 1])
time_deltas = t1 - t2 # (N, T_q, T_q)
time_deltas = tf.concat([tf.expand_dims(times, 2), time_deltas],
2) # (N, T_q, 1+T_q)
time_deltas = time_deltas[:, :, :-1] # (N, T_q, T_q)
return time_deltas
# pylint: disable=dangerous-default-value
def feedforward(inputs,
num_units=[2048, 512],
scope="multihead_attention",
dropout_rate=0.2,
is_training=True,
reuse=None):
"""Point-wise feed forward net.
Args:
inputs: A 3d tensor with shape of [N, T, C].
num_units: A list of two integers.
scope: Optional scope for `variable_scope`.
dropout_rate: Dropout rate.
is_training: Whether to run in training mode.
reuse: Boolean, whether to reuse the weights of a previous layer by the
same name.
Returns:
A 3d tensor with the same shape and dtype as inputs
"""
with tf.variable_scope(scope, reuse=reuse):
# Inner layer
params = {
"inputs": inputs,
"filters": num_units[0],
"kernel_size": 1,
"activation": tf.nn.relu,
"use_bias": True
}
outputs = tf.layers.conv1d(**params)
outputs = tf.layers.dropout(
outputs, rate=dropout_rate, training=tf.convert_to_tensor(is_training))
# Readout layer
params = {
"inputs": outputs,
"filters": num_units[1],
"kernel_size": 1,
"activation": None,
"use_bias": True
}
outputs = tf.layers.conv1d(**params)
outputs = tf.layers.dropout(
outputs, rate=dropout_rate, training=tf.convert_to_tensor(is_training))
# Residual connection
outputs += inputs
# Normalize
# outputs = normalize(outputs)
return outputs
# pylint: disable=dangerous-default-value
def query_feedforward(inputs,
num_units,
scope="item_and_query_combined_embedding",
dropout_rate=0,
is_training=True,
residual=False,
reuse=None):
"""Point-wise feed forward net for query-item encoder.
Args:
inputs: A 3d tensor with shape of [N, T, C].
num_units: A list of two integers.
scope: Optional scope for `variable_scope`.
dropout_rate: Dropout rate.
is_training: Whether to run in training mode.
residual: Whether to use residual connections.
reuse: Boolean, whether to reuse the weights of a previous layer by the
same name.
Returns:
A 3d tensor with the same shape and dtype as inputs
"""
with tf.variable_scope(scope, reuse=reuse):
outputs = tf.nn.relu(inputs)
for units in num_units:
params = {
"inputs": outputs,
"filters": units,
"kernel_size": 1,
"activation": None,
"use_bias": True
}
outputs = tf.layers.conv1d(**params)
outputs = tf.layers.dropout(
outputs,
rate=dropout_rate,
training=tf.convert_to_tensor(is_training))
# Residual connection
if residual:
outputs += inputs
return outputs
|
py | b410cbd7df61d01a918e4d63b53d0d150fec4222 | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Struct Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.bridge.oleautomation
# Libre Office Version: 7.3
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME, UNO_NONE
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
import uno
def _get_class():
orig_init = None
ordered_keys = ('Value',)
def init(self, *args, **kwargs):
if len(kwargs) == 0 and len(args) == 1 and getattr(args[0], "__class__", None) == self.__class__:
orig_init(self, args[0])
return
kargs = kwargs.copy()
for i, arg in enumerate(args):
kargs[ordered_keys[i]] = arg
orig_init(self, **kargs)
type_name = 'com.sun.star.bridge.oleautomation.Date'
struct = uno.getClass(type_name)
struct.__ooo_ns__ = 'com.sun.star.bridge.oleautomation'
struct.__ooo_full_ns__= type_name
struct.__ooo_type_name__ = 'struct'
orig_init = struct.__init__
struct.__init__ = init
return struct
Date = _get_class()
else:
from ....lo.bridge.oleautomation.date import Date as Date
__all__ = ['Date']
|
py | b410cd57e32f4aa97a720b12a915731870d670f3 | from typing import Any, Dict, Tuple
import numpy as np
import pandas as pd
import streamlit as st
def clean_df(df: pd.DataFrame, cleaning: Dict[Any, Any]) -> pd.DataFrame:
"""Cleans the input dataframe according to cleaning dict specifications.
Parameters
----------
df : pd.DataFrame
Input dataframe that has to be cleaned.
cleaning : Dict
Cleaning specifications.
Returns
-------
pd.DataFrame
Cleaned dataframe.
"""
df = _remove_rows(df, cleaning)
df = _log_transform(df, cleaning)
return df
def clean_future_df(df: pd.DataFrame, cleaning: Dict[Any, Any]) -> pd.DataFrame:
"""Cleans the input dataframe according to cleaning dict specifications.
Parameters
----------
df : pd.DataFrame
Input dataframe that has to be cleaned.
cleaning : Dict
Cleaning specifications.
Returns
-------
pd.DataFrame
Cleaned dataframe.
"""
df_clean = df.copy() # To avoid CachedObjectMutationWarning
df_clean["__to_remove"] = 0
if cleaning["del_days"] is not None:
df_clean["__to_remove"] = np.where(
df_clean.ds.dt.dayofweek.isin(cleaning["del_days"]), 1, df_clean["__to_remove"]
)
df_clean = df_clean.query("__to_remove != 1")
del df_clean["__to_remove"]
return df_clean
@st.cache(suppress_st_warning=True, ttl=300)
def _log_transform(df: pd.DataFrame, cleaning: Dict[Any, Any]) -> pd.DataFrame:
"""Applies a log transform to the y column of input dataframe, if possible.
Raises an error in streamlit dashboard if not possible.
Parameters
----------
df : pd.DataFrame
Input dataframe that has to be cleaned.
cleaning : Dict
Cleaning specifications.
Returns
-------
pd.DataFrame
Cleaned dataframe.
"""
df_clean = df.copy() # To avoid CachedObjectMutationWarning
if cleaning["log_transform"]:
if df_clean.y.min() <= 0:
st.error(
"The target has values <= 0. Please remove negative and 0 values when applying log transform."
)
st.stop()
else:
df_clean["y"] = np.log(df_clean["y"])
return df_clean
@st.cache(ttl=300)
def _remove_rows(df: pd.DataFrame, cleaning: Dict[Any, Any]) -> pd.DataFrame:
"""Removes some rows of the input dataframe according to cleaning dict specifications.
Parameters
----------
df : pd.DataFrame
Input dataframe that has to be cleaned.
cleaning : Dict
Cleaning specifications.
Returns
-------
pd.DataFrame
Cleaned dataframe.
"""
df_clean = df.copy() # To avoid CachedObjectMutationWarning
df_clean["__to_remove"] = 0
if cleaning["del_negative"]:
df_clean["__to_remove"] = np.where(df_clean["y"] < 0, 1, df_clean["__to_remove"])
if cleaning["del_days"] is not None:
df_clean["__to_remove"] = np.where(
df_clean.ds.dt.dayofweek.isin(cleaning["del_days"]), 1, df_clean["__to_remove"]
)
if cleaning["del_zeros"]:
df_clean["__to_remove"] = np.where(df_clean["y"] == 0, 1, df_clean["__to_remove"])
df_clean = df_clean.query("__to_remove != 1")
del df_clean["__to_remove"]
return df_clean
def exp_transform(
datasets: Dict[Any, Any], forecasts: Dict[Any, Any]
) -> Tuple[Dict[Any, Any], Dict[Any, Any]]:
"""Applies an exp transform to the y column of dataframes which are values of input dictionaries.
Parameters
----------
datasets : Dict
A dictionary whose values are dataframes used as an input to fit a Prophet model.
forecasts : Dict
A dictionary whose values are dataframes which are the output of a Prophet prediction.
Returns
-------
dict
The datasets dictionary with transformed values.
dict
The forecasts dictionary with transformed values.
"""
for data in set(datasets.keys()):
if "y" in datasets[data].columns:
df_exp = datasets[data].copy()
df_exp["y"] = np.exp(df_exp["y"])
datasets[data] = df_exp.copy()
for data in set(forecasts.keys()):
if "yhat" in forecasts[data].columns:
df_exp = forecasts[data].copy()
df_exp["yhat"] = np.exp(df_exp["yhat"])
forecasts[data] = df_exp.copy()
return datasets, forecasts
|
py | b410cd5e104205762af1bba256ff466ace8baddd | import pytest
from gcsfs.tests.settings import TEST_BUCKET
from gcsfs.tests.utils import my_vcr, gcs_maker
root = TEST_BUCKET + "/mapping"
pytestmark = pytest.mark.usefixtures("token_restore")
def test_api():
import gcsfs
assert "GCSMap" in dir(gcsfs)
assert "mapping" in dir(gcsfs)
@my_vcr.use_cassette(match=["all"])
def test_map_simple():
with gcs_maker() as gcs:
d = gcs.get_mapper(root)
assert not d
assert list(d) == list(d.keys()) == []
assert list(d.values()) == []
assert list(d.items()) == []
@my_vcr.use_cassette(match=["all"])
def test_map_default_gcsfilesystem():
with gcs_maker() as gcs:
d = gcs.get_mapper(root)
assert d.fs is gcs
@my_vcr.use_cassette(match=["all"])
def test_map_errors():
with gcs_maker() as gcs:
d = gcs.get_mapper(root)
with pytest.raises(KeyError):
d["nonexistent"]
try:
gcs.get_mapper("does-not-exist")
except Exception as e:
assert "does-not-exist" in str(e)
@pytest.mark.xfail(reason="only passes for the py version where it was recorded")
@my_vcr.use_cassette(match=["all"])
def test_map_with_data():
with gcs_maker() as gcs:
d = gcs.get_mapper(root)
d["x"] = b"123"
assert list(d) == list(d.keys()) == ["x"]
assert list(d.values()) == [b"123"]
assert list(d.items()) == [("x", b"123")]
assert d["x"] == b"123"
assert bool(d)
assert gcs.find(root) == [TEST_BUCKET + "/mapping/x"]
d["x"] = b"000"
assert d["x"] == b"000"
d["y"] = b"456"
assert d["y"] == b"456"
assert set(d) == {"x", "y"}
d.clear()
assert list(d) == []
@my_vcr.use_cassette(match=["all"])
def test_map_complex_keys():
with gcs_maker() as gcs:
d = gcs.get_mapper(root)
d[1] = b"hello"
assert d[1] == b"hello"
del d[1]
d[1, 2] = b"world"
assert d[1, 2] == b"world"
del d[1, 2]
d["x", 1, 2] = b"hello world"
assert d["x", 1, 2] == b"hello world"
assert ("x", 1, 2) in d
@my_vcr.use_cassette(match=["all"])
def test_map_clear_empty():
with gcs_maker() as gcs:
d = gcs.get_mapper(root)
d.clear()
assert list(d) == []
d[1] = b"1"
# may repeat the test below, since VCR sometimes picks the wrong call to ls
assert list(d) == ["1"] or list(d) == ["1"]
d.clear()
assert list(d) == []
@my_vcr.use_cassette(match=["all"])
def test_map_pickle():
with gcs_maker() as gcs:
d = gcs.get_mapper(root)
d["x"] = b"1"
assert d["x"] == b"1"
import pickle
d2 = pickle.loads(pickle.dumps(d))
assert d2["x"] == b"1"
@my_vcr.use_cassette(match=["all"])
def test_map_array():
with gcs_maker() as gcs:
from array import array
d = gcs.get_mapper(root)
d["x"] = array("B", [65] * 1000)
assert d["x"] == b"A" * 1000
@my_vcr.use_cassette(match=["all"])
def test_map_bytearray():
with gcs_maker() as gcs:
d = gcs.get_mapper(root)
d["x"] = bytearray(b"123")
assert d["x"] == b"123"
@my_vcr.use_cassette(match=["all"])
def test_new_bucket():
with gcs_maker() as gcs:
new_bucket = TEST_BUCKET + "new-bucket"
try:
gcs.rmdir(new_bucket)
except: # noqa: E722
pass
with pytest.raises(Exception) as e:
d = gcs.get_mapper(new_bucket, check=True)
assert "create=True" in str(e.value)
try:
d = gcs.get_mapper(new_bucket, create=True)
assert not d
d = gcs.get_mapper(new_bucket + "/new-directory")
assert not d
finally:
gcs.rmdir(new_bucket)
@my_vcr.use_cassette(match=["all"])
def test_map_pickle():
import pickle
with gcs_maker() as gcs:
d = gcs.get_mapper(root)
d["x"] = b"1234567890"
b = pickle.dumps(d)
assert b"1234567890" not in b
e = pickle.loads(b)
assert dict(e) == {"x": b"1234567890"}
|
py | b410cd9dcb0f7e426322c2f345531b110c60514b | # -*- coding: utf-8 -*-
# texttable - module for creating simple ASCII tables
# Copyright (C) 2003-2019 Gerome Fournier <jef(at)foutaise.org>
"""module for creating simple ASCII tables
Example:
table = UniTable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([["Name", "Age", "Nickname"],
["Mr\\nXavier\\nHuon", 32, "Xav'"],
["Mr\\nBaptiste\\nClement", 1, "Baby"],
["Mme\\nLouise\\nBourgeau", 28, "Lou\\n\\nLoue"]])
print table.draw() + "\\n"
table = UniTable()
table.set_deco(UniTable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print table.draw()
Result:
+----------+-----+----------+
| Name | Age | Nickname |
+==========+=====+==========+
| Mr | | |
| Xavier | 32 | |
| Huon | | Xav' |
+----------+-----+----------+
| Mr | | |
| Baptiste | 1 | |
| Clement | | Baby |
+----------+-----+----------+
| Mme | | Lou |
| Louise | 28 | |
| Bourgeau | | Loue |
+----------+-----+----------+
text float exp int auto
===========================================
abcd 67.000 6.540e+02 89 128.001
efgh 67.543 6.540e-01 90 1.280e+22
ijkl 0.000 5.000e-78 89 0.000
mnop 0.023 5.000e+78 92 1.280e+22
"""
from __future__ import division
__all__ = ["UniTable", "ArraySizeError"]
__author__ = 'Gerome Fournier <jef(at)foutaise.org>'
__license__ = 'MIT'
__version__ = '1.6.2'
__credits__ = """\
Jeff Kowalczyk:
- textwrap improved import
- comment concerning header output
Anonymous:
- add_rows method, for adding rows in one go
Sergey Simonenko:
- redefined len() function to deal with non-ASCII characters
Roger Lew:
- columns datatype specifications
Brian Peterson:
- better handling of unicode errors
Frank Sachsenheim:
- add Python 2/3-compatibility
Maximilian Hils:
- fix minor bug for Python 3 compatibility
frinkelpi:
- preserve empty lines
gfariello:
- Added unicode box border options, made errors more informative, corrected typos
"""
import sys
import unicodedata
# define a text wrapping function to wrap some text
# to a specific width:
# - use cjkwrap if available (better CJK support)
# - fallback to textwrap otherwise
try:
import cjkwrap
def textwrapper(txt, width):
return cjkwrap.wrap(txt, width)
except ImportError:
try:
import textwrap
def textwrapper(txt, width):
return textwrap.wrap(txt, width)
except ImportError:
sys.stderr.write("Can't import textwrap module!\n")
raise
# define a function to calculate the rendering width of a unicode character
# - use wcwidth if available
# - fallback to unicodedata information otherwise
try:
import wcwidth
def uchar_width(c):
"""Return the rendering width of a unicode character
"""
return max(0, wcwidth.wcwidth(c))
except ImportError:
def uchar_width(c):
"""Return the rendering width of a unicode character
"""
if unicodedata.east_asian_width(c) in 'WF':
return 2
elif unicodedata.combining(c):
return 0
else:
return 1
from functools import reduce
if sys.version_info >= (3, 0):
unicode_type = str
bytes_type = bytes
str_class = str
else:
unicode_type = unicode
bytes_type = str
str_class = basestring
pass
def obj2unicode(obj):
"""Return a unicode representation of a python object"""
if isinstance(obj, unicode_type):
return obj
elif isinstance(obj, bytes_type):
try:
return unicode_type(obj, 'utf-8')
except UnicodeDecodeError as strerror:
sys.stderr.write("UnicodeDecodeError exception for string '%s': %s\n" % (obj, strerror))
return unicode_type(obj, 'utf-8', 'replace')
else:
return unicode_type(obj)
def len(iterable):
"""Redefining len here so it will be able to work with non-ASCII characters """
if isinstance(iterable, bytes_type) or isinstance(iterable, unicode_type):
return sum([uchar_width(c) for c in obj2unicode(iterable)])
else:
return iterable.__len__()
class ArraySizeError(Exception):
"""Exception raised when specified rows don't fit the required size"""
def __init__(self, msg):
self.msg = msg
Exception.__init__(self, msg, '')
def __str__(self):
return self.msg
class FallbackToText(Exception):
"""Used for failed conversion to float"""
pass
2
class UTableBaseClass:
"""
This is the base class for all UTable classes. It should probably not be instantiated directly.
"""
def __init__(self,parent=None):
self.parent = parent
self.needs_recalc = True
pass
def _chktype(self,what,clss,val,none_ok=False):
if none_ok and val is None:
return val
if not isinstance(val, clss):
raise ValueError("ERROR: %s must be an instance of %s, '%s' is a %s." %(what,clss.__name__,val,type(val).__name__))
return val
def set_parent(self,val,clss):
self._parent = self._chktype('parent',clss,val,True)
pass
@property
def parent(self):
return self._parent
@parent.setter
def parent(self,val):
self.set_parent(val)
pass
@property
def needs_recalc(self):
return self._needs_recalc
@needs_recalc.setter
def needs_recalc(self,val):
self._chktype('needs_recalc',bool,val)
if self.parent is not None:
self.parent.needs_recalc = val
pass
self._needs_recalc = val
pass
@property
def width(self):
return self._width;
@width.setter
def width(self,val):
self._width = self._chktype('width',int,val,True)
@property
def witdth(self): return self._width;
@width.setter
def height(self,val):
self._height = self._chktype('height',int,val,True)
class UTable:
def __init__(self,rows=[],parent=None):
super().__init__(self,parent)
self._min_width = None
self._max_width = None
self._height = None
self._rows = []
self._cols = []
pass
pass
class URow:
def __init__(self,table,cells=[]):
super().__init__(parent)
pass
pass
class UCol:
def __init__(self,table,cells=[]):
super().__init__(parent)
pass
pass
class Cell:
def __init__(self,value="",row=None,col=None):
self._recalc = True
self.value = value
self.row = row
self.col = column
pass
@property
def value(self):
return self._value
@value.setter
def value(self,val):
if val == self._value:
return
self._recalc = True
if isinstance(val,str_class):
self._value = val.splitlines()
else:
self._value = [val]
pass
pass
@property
def row(self):
return self._row
@row.setter
def row(self,val):
if val == self._row:
return
if val is None or isinstance(val,int):
self._row = val
self._recalc = True
pass
raise ValueError("ERROR: row must be an integer. Received '%s' which is a %s." %(val,type(val).__name__))
@property
def col(self):
return self._col
@col.setter
def col(self,val):
if val == self._col:
return
if val is None or isinstance(val,int):
self._col = val
self._recalc = True
pass
raise ValueError("ERROR: col must be an integer. Received '%s' which is a %s." %(val,type(val).__name__))
@property
def width(self):
return self._width
class UniTable:
BORDER = 1
HEADER = 1 << 1
HLINES = 1 << 2
VLINES = 1 << 3
# --- gfariello -- Start -- Added to support new styles.
TOP = 0
MIDDLE = 1
BOTTOM = 2
STYLES = {
"bold": "━┃┏┓┗┛┣┫┳┻╋━┣┫╋",
"default": "-|+=",
"double": "═║╔╗╚╝╠╣╦╩╬═╠╣╬",
"very_light": "─│┌┐└┘├┤┬┴┼─├┤┼",
"light": "─│┌┐└┘├┤┬┴┼═╞╡╪",
"round": "─│╭╮╰╯├┤┬┴┼─├┤┼",
"round2": "─│╭╮╰╯├┤┬┴┼═╞╡╪",
"simple": "-|+-",
}
STYLE_MAPPER = {
"heavy": {
"---w": " ",
"--e-": " ",
"--ew": "━",
"-s--": " ",
"-s-w": "┓",
"-se-": "┏",
"-sew": "┳",
"n---": " ",
"n--w": "┛",
"n-e-": "┗",
"n-ew": "┻",
"ns--": "┃",
"ns-w": "┫",
"nse-": "┣",
"nsew": "╋",
},
"light": {
"---w": " ",
"--e-": " ",
"--ew": "-",
"-s--": " ",
"-s-w": "┐",
"-se-": "┌",
"-sew": "┬",
"n---": " ",
"n--w": "┘",
"n-e-": "└",
"n-ew": "┴",
"ns--": "│",
"ns-w": "┤",
"nse-": "├",
"nsew": "┼",
},
"round": {
"---w": " ",
"--e-": " ",
"--ew": "-",
"-s--": " ",
"-s-w": "╮",
"-se-": "╭",
"-sew": "┬",
"n---": " ",
"n--w": "╯",
"n-e-": "╰",
"n-ew": "┴",
"ns--": "│",
"ns-w": "┤",
"nse-": "├",
"nsew": "┼",
},
"double": {
"---w": " ",
"--e-": " ",
"--ew": "═",
"-s--": " ",
"-s-w": "╗",
"-se-": "╔",
"-sew": "╦",
"n---": " ",
"n--w": "╝",
"n-e-": "╚",
"n-ew": "╩",
"ns--": "║",
"ns-w": "╣",
"nse-": "╠",
"nsew": "╬",
},
"heavy:light": {
"---w:--e-": "╾",
"---w:-s--": "┑",
"---w:-se-": "┲",
"---w:n---": "┙",
"---w:n-e-": "┺",
"---w:ns--": "┥",
"---w:nse-": "┽",
"--e-:---w": "╼",
"--e-:-s--": "┍",
"--e-:-s-w": "┮",
"--e-:n---": "┙",
"--e-:n--w": "┶",
"--e-:ns--": "┝",
"--e-:ns-w": "┾",
"--ew:-s--": "┰",
"--ew:n---": "┸",
"--ew:ns--": "┿",
"-s--:---w": "┒",
"-s--:--e-": "┎",
"-s--:--ew": "┰",
"-s--:n---": "╽",
"-s--:n--w": "┧",
"-s--:n-e-": "┟",
"-s--:n-ew": "╁",
"-s-w:--e-": "┱",
"-s-w:n---": "┧",
"-s-w:n-e-": "╅",
"-se-:---w": "┲",
"-se-:n---": "┢",
"-se-:n--w": "╆",
"-sew:n---": "╈",
"n---:---w": "┖",
"n---:--e-": "┚",
"n---:--ew": "┸",
"n---:-s--": "╿",
"n---:-s-w": "┦",
"n---:-se-": "┞",
"n---:-sew": "╀",
"n--w:--e-": "┹",
"n--w:-s--": "┩",
"n--w:-se-": "╃",
"n-e-:---w": "┺",
"n-e-:-s--": "┡",
"n-e-:-s-w": "╄",
"n-ew:-s--": "╇",
"ns--:---w": "┨",
"ns--:--e-": "┠",
"ns--:--ew": "╂",
"ns-w:--e-": "╉",
"nse-:---w": "╊",
}
}
# --- gfariello -- End -- Added to support new styles.
# --- gfariello -- Start -- Added init with table def.
# NOTE: See below about backward compatability
def __init__(self, rows=None, max_width=80):
# --- gfariello -- End -- Added init with table def.
"""Constructor
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._has_border = True
self._has_header = True
self._has_hline_between_headers = True
self._has_hline_header_2_cell = True
self._has_hline_between_cells = True
self._has_vline_between_headers = True
self._has_vline_header_2_cell = True
self._has_vline_between_cells = True
self.set_max_width(max_width)
self._precision = 3
self._deco = UniTable.VLINES | UniTable.HLINES | UniTable.BORDER | \
UniTable.HEADER
self.set_style("default")
self._pad = 1
self.reset()
# --- gfariello -- Start -- Added to support rows arg (i.e., adding
# entire table definition in initilization). NOTE: This changed the
# order (max_width is now one arg later) and therefore has a chance of
# breaking older code that called UniTable(50) but not
# UniTable(max_width=50). It felt less intuitive to have the rows
# definition after the max_width, but if backwards compatibility is
# more important, just swap the order of rows and max_width.
if rows is not None:
self.add_rows(rows)
pass
# --- gfariello -- End -- Added to support rows arg.
pass
@property
def has_border(self):
return self._has_border
@has_border.setter
def has_border(self,value):
self._has_border = value
return value
@property
def has_header(self):
return self._has_header
@has_header.setter
def has_header(self,value):
self._has_header = value
return value
def reset(self):
"""Reset the instance
- reset rows and header
"""
self._hline_string = None
self._row_size = None
self._header = []
self._rows = []
return self
def set_max_width(self, max_width):
"""Set the maximum width of the table
- max_width is an integer, specifying the maximum width of the table
- if set to 0, size is unlimited, therefore cells won't be wrapped
"""
self._max_width = max_width if max_width > 0 else False
return self
def set_style(self, style="light"):
"""Set the characters used to draw lines between rows and columns to one of four box types:
"light": Use unicode light box borders (─│┌┐└┘├┤┬┴┼)
"bold": Use unicode bold box borders (━┃┏┓┗┛┣┫┳┻╋)
"double": Use unicode double box borders (═║╔╗╚╝╠╣╦╩╬)
Default if none provided is "light"
"""
if style in UniTable.STYLES:
return self.set_chars(UniTable.STYLES[style])
raise ValueError("style must be one of '%s' not '%s'" %("','".join(sorted(UniTable.STYLES.keys())),style))
def _set_chars(self, array):
"""Set the characters used to draw lines between rows and columns in the following format:
[
ew, # The character connecting east and west to use for a horizantal line (e.g. "-" or "─" )
ns, # The character connecting north and south to use for a vertical line (e.g. "|" or "|" )
se, # The character connecting south and east to use for the top- and left-most corner (e.g. "+", or "┌")
sw, # The character connecting south and west to use for the top- and right-most corner (e.g. "+" or "┐")
ne, # The character connecting north and east to use for the bottom- and left-most corner (e.g. "+" or "└")
nw, # The character connecting north and west to use for the bottom- and right-most corner (e.g. "+" or "┘")
nse, # The character connecting north, south, and east (e.g., "+" or "┤")
nsw, # The character connecting north, south, and west (e.g., "+" or "├")
sew, # The character connecting south, east, and west (e.g., "+" or "┬")
new, # The character connecting north, east, and west (e.g., "+" or "┴")
nsew, # The character connecting north, south, east, and west (e.g., "+" or "┴")
hew, # The character connecting east and west to use for a line separating headers (e.g. "=" or "═" )
hnse, # The character connecting north, south and east to use for a line separating headers (e.g. "+" or "╞" )
hnsw, # The character connecting north, south, and west to use for a line separating headers (e.g. "+" or "╡" )
hnsew, # The character connecting north, south, east and west to use for a line separating headers (e.g. "+" or "╪" )
]
For legacy default it would be "-|+++++++++=+++"
"""
if len(array) != 15:
raise ArraySizeError("string/array should contain 15 characters not %d as in '%s'" %(len(array),array))
(
self._char_ew,
self._char_ns,
self._char_se,
self._char_sw,
self._char_ne,
self._char_nw,
self._char_nse,
self._char_nsw,
self._char_sew,
self._char_new,
self._char_nsew,
self._char_hew,
self._char_hnse,
self._char_hnsw,
self._char_hnsew,
) = array
return self
def set_chars(self, array):
"""Set the characters used to draw lines between rows and columns
- the array should contain 4 fields:
[horizontal, vertical, corner, header]
- default is set to:
['-', '|', '+', '=']
"""
if len(array) == 15:
return self._set_chars(array)
if len(array) != 4:
raise ArraySizeError("string/array should contain either 4 or 15 characters not %d as in '%s'" %(len(array),array))
(hor,ver,cor,hea) = array
return self._set_chars([hor,ver,cor,cor,cor,cor,cor,cor,cor,cor,cor,hea,cor,cor,cor])
def set_deco(self, deco):
"""Set the table decoration
- 'deco' can be a combinasion of:
UniTable.BORDER: Border around the table
UniTable.HEADER: Horizontal line below the header
UniTable.HLINES: Horizontal lines between rows
UniTable.VLINES: Vertical lines between columns
All of them are enabled by default
- example:
UniTable.BORDER | UniTable.HEADER
"""
self._deco = deco
return self
def set_header_align(self, array):
"""Set the desired header alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._header_align = array
return self
def set_cols_align(self, array):
"""Set the desired columns alignment
- the elements of the array should be either "l", "c" or "r":
* "l": column flushed left
* "c": column centered
* "r": column flushed right
"""
self._check_row_size(array)
self._align = array
return self
def set_cols_valign(self, array):
"""Set the desired columns vertical alignment
- the elements of the array should be either "t", "m" or "b":
* "t": column aligned on the top of the cell
* "m": column aligned on the middle of the cell
* "b": column aligned on the bottom of the cell
"""
self._check_row_size(array)
self._valign = array
return self
def set_cols_dtype(self, array):
"""Set the desired columns datatype for the cols.
- the elements of the array should be either a callable or any of
"a", "t", "f", "e" or "i":
* "a": automatic (try to use the most appropriate datatype)
* "t": treat as text
* "f": treat as float in decimal format
* "e": treat as float in exponential format
* "i": treat as int
* a callable: should return formatted string for any value given
- by default, automatic datatyping is used for each column
"""
self._check_row_size(array)
self._dtype = array
return self
def set_cols_width(self, array):
"""Set the desired columns width
- the elements of the array should be integers, specifying the
width of each column. For example:
[10, 20, 5]
"""
self._check_row_size(array)
try:
array = list(map(int, array))
if reduce(min, array) <= 0:
raise ValueError
except ValueError:
sys.stderr.write("Wrong argument in column width specification\n")
raise
self._width = array
return self
def set_precision(self, width):
"""Set the desired precision for float/exponential formats
- width must be an integer >= 0
- default value is set to 3
"""
if not type(width) is int or width < 0:
raise ValueError('width must be an integer greater then 0')
self._precision = width
return self
def set_padding(self, amount):
"""Set the amount of spaces to pad cells (right and left, we don't do top bottom padding)
- width must be an integer >= 0
- default value is set to 1
"""
if not type(amount) is int or amount < 0:
raise ValueError('padding must be an integer greater then 0')
self._pad = amount
return self
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self
def add_row(self, array):
"""Add a row in the rows stack
- cells can contain newlines and tabs
"""
self._check_row_size(array)
if not hasattr(self, "_dtype"):
self._dtype = ["a"] * self._row_size
cells = []
for i, x in enumerate(array):
cells.append(self._str(i, x))
self._rows.append(cells)
return self
def add_rows(self, rows, header=True):
"""Add several rows in the rows stack
- The 'rows' argument can be either an iterator returning arrays,
or a by-dimensional array
- 'header' specifies if the first row should be used as the header
of the table
"""
# nb: don't use 'iter' on by-dimensional arrays, to get a
# usable code for python 2.1
if header:
if hasattr(rows, '__iter__') and hasattr(rows, 'next'):
self.header(rows.next())
else:
self.header(rows[0])
rows = rows[1:]
for row in rows:
self.add_row(row)
return self
def draw(self):
"""Draw the table
- the table is returned as a whole string
"""
if not self._header and not self._rows:
return
self._compute_cols_width()
self._check_align()
out = ""
if self.has_border:
out += self._hline(location=UniTable.TOP)
if self._header:
out += self._draw_line(self._header, isheader=True)
if self.has_header:
out += self._hline_header(location=UniTable.MIDDLE)
pass
pass
num = 0
length = len(self._rows)
for row in self._rows:
num += 1
out += self._draw_line(row)
if self.has_hlines() and num < length:
out += self._hline(location=UniTable.MIDDLE)
if self._has_border:
out += self._hline(location=UniTable.BOTTOM)
return out[:-1]
@classmethod
def _to_float(cls, x):
if x is None:
raise FallbackToText()
try:
return float(x)
except (TypeError, ValueError):
raise FallbackToText()
@classmethod
def _fmt_int(cls, x, **kw):
"""Integer formatting class-method.
- x will be float-converted and then used.
"""
return str(int(round(cls._to_float(x))))
@classmethod
def _fmt_float(cls, x, **kw):
"""Float formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*f' % (n, cls._to_float(x))
@classmethod
def _fmt_exp(cls, x, **kw):
"""Exponential formatting class-method.
- x parameter is ignored. Instead kw-argument f being x float-converted
will be used.
- precision will be taken from `n` kw-argument.
"""
n = kw.get('n')
return '%.*e' % (n, cls._to_float(x))
@classmethod
def _fmt_text(cls, x, **kw):
"""String formatting class-method."""
return obj2unicode(x)
@classmethod
def _fmt_auto(cls, x, **kw):
"""auto formatting class-method."""
f = cls._to_float(x)
if abs(f) > 1e8:
fn = cls._fmt_exp
elif f != f: # NaN
fn = cls._fmt_text
elif f - round(f) == 0:
fn = cls._fmt_int
else:
fn = cls._fmt_float
return fn(x, **kw)
def _str(self, i, x):
"""Handles string formatting of cell data
i - index of the cell datatype in self._dtype
x - cell data to format
"""
FMT = {
'a':self._fmt_auto,
'i':self._fmt_int,
'f':self._fmt_float,
'e':self._fmt_exp,
't':self._fmt_text,
}
n = self._precision
dtype = self._dtype[i]
try:
if callable(dtype):
return dtype(x)
else:
return FMT[dtype](x, n=n)
except FallbackToText:
return self._fmt_text(x)
def _check_row_size(self, array):
"""Check that the specified array fits the previous rows size
"""
if not self._row_size:
self._row_size = len(array)
elif self._row_size != len(array):
raise ArraySizeError("array should contain %d elements not %s (array=%s)" \
%(self._row_size,len(array),array))
def has_vlines(self):
"""Return a boolean, if vlines are required or not
"""
return self._deco & UniTable.VLINES > 0
def has_hlines(self):
"""Return a boolean, if hlines are required or not
"""
return self._deco & UniTable.HLINES > 0
def _hline_header(self,location=MIDDLE):
"""Print header's horizontal line
"""
return self._build_hline(is_header=True,location=location)
def _hline(self,location):
"""Print an horizontal line
"""
# if not self._hline_string:
# self._hline_string = self._build_hline(location)
# return self._hline_string
return self._build_hline(is_header=False,location=location)
def _build_hline(self, is_header=False, location=MIDDLE):
"""Return a string used to separated rows or separate header from
rows
"""
horiz_char = self._char_hew if is_header else self._char_ew
if UniTable.TOP == location:
left, mid, right = self._char_se, self._char_sew, self._char_sw
elif UniTable.MIDDLE == location:
if is_header:
left, mid, right = self._char_hnse, self._char_hnsew, self._char_hnsw
else:
left, mid, right = self._char_nse, self._char_nsew, self._char_nsw
pass
elif UniTable.BOTTOM == location:
# NOTE: This will not work as expected if the table is only headers.
left, mid, right = self._char_ne, self._char_new, self._char_nw
else:
raise ValueError("Unknown location '%s'. Should be one of UniTable.TOP, UniTable.MIDDLE, or UniTable.BOTTOM." %(location))
# compute cell separator
s = "%s%s%s" % (horiz_char * self._pad, [horiz_char, mid][self.has_vlines()], horiz_char * self._pad)
# build the line
l = s.join([horiz_char * n for n in self._width])
# add border if needed
if self.has_border:
l = "%s%s%s%s%s\n" % (left, horiz_char * self._pad , l, horiz_char * self._pad ,right)
else:
l += "\n"
return l
def _len_cell(self, cell):
"""Return the width of the cell
Special characters are taken into account to return the width of the
cell, such like newlines and tabs
"""
cell_lines = cell.split('\n')
maxi = 0
for line in cell_lines:
length = 0
parts = line.split('\t')
for part, i in zip(parts, list(range(1, len(parts) + 1))):
length = length + len(part)
if i < len(parts):
length = (length//8 + 1) * 8
maxi = max(maxi, length)
return maxi
def _compute_cols_width(self):
"""Return an array with the width of each column
If a specific width has been specified, exit. If the total of the
columns width exceed the table desired width, another width will be
computed to fit, and cells will be wrapped.
"""
if hasattr(self, "_width"):
return
maxi = []
if self._header:
maxi = [ self._len_cell(x) for x in self._header ]
for row in self._rows:
for cell,i in zip(row, list(range(len(row)))):
try:
maxi[i] = max(maxi[i], self._len_cell(cell))
except (TypeError, IndexError):
maxi.append(self._len_cell(cell))
ncols = len(maxi)
content_width = sum(maxi)
deco_width = 3*(ncols-1) + [0,4][self.has_border]
if self._max_width and (content_width + deco_width) > self._max_width:
""" content too wide to fit the expected max_width
let's recompute maximum cell width for each cell
"""
if self._max_width < (ncols + deco_width):
raise ValueError('max_width too low to render data')
available_width = self._max_width - deco_width
newmaxi = [0] * ncols
i = 0
while available_width > 0:
if newmaxi[i] < maxi[i]:
newmaxi[i] += 1
available_width -= 1
i = (i + 1) % ncols
maxi = newmaxi
self._width = maxi
def _check_align(self):
"""Check if alignment has been specified, set default one if not
"""
if not hasattr(self, "_header_align"):
self._header_align = ["c"] * self._row_size
if not hasattr(self, "_align"):
self._align = ["l"] * self._row_size
if not hasattr(self, "_valign"):
self._valign = ["t"] * self._row_size
def _draw_line(self, line, isheader=False):
"""Draw a line
Loop over a single cell length, over all the cells
"""
line = self._splitit(line, isheader)
space = " "
out = ""
topmost,leftmost = True, True
for i in range(len(line[0])):
if self.has_border:
out += "%s%s" %(self._char_ns, " " * self._pad)
length = 0
for cell, width, align in zip(line, self._width, self._align):
length += 1
cell_line = cell[i]
fill = width - len(cell_line)
if isheader:
align = self._header_align[length - 1]
if align == "r":
out += fill * space + cell_line
elif align == "c":
out += (int(fill/2) * space + cell_line \
+ int(fill/2 + fill%2) * space)
else:
out += cell_line + fill * space
if length < len(line):
out += "%s%s%s" %(" " * self._pad, [space, self._char_ns][self.has_vlines()], " " * self._pad)
out += "%s\n" % ['', " " * self._pad + self._char_ns][self.has_border]
return out
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
if c.strip() == "":
array.append("")
else:
array.extend(textwrapper(c, width))
line_wrapped.append(array)
max_cell_lines = reduce(max, list(map(len, line_wrapped)))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * int(missing / 2)
cell.extend([""] * int(missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
def test_styles(table):
row = []
for style in styles:
table.set_style(style)
row.append(t2.draw())
pass
return row
if __name__ == '__main__':
table = UniTable()
table.set_cols_align(["l", "r", "c"])
table.set_cols_valign(["t", "m", "b"])
table.add_rows([["Name", "Age", "Nickname"],
["Mr\nXavier\nHuon", 32, "Xav'"],
["Mr\nBaptiste\nClement", 1, "Baby"],
["Mme\nLouise\nBourgeau", 28, "Lou\n \nLoue"]])
print(table.draw() + "\n")
table = UniTable()
table.set_deco(UniTable.HEADER)
table.set_cols_dtype(['t', # text
'f', # float (decimal)
'e', # float (exponent)
'i', # integer
'a']) # automatic
table.set_cols_align(["l", "r", "r", "r", "l"])
table.add_rows([["text", "float", "exp", "int", "auto"],
["abcd", "67", 654, 89, 128.001],
["efghijk", 67.5434, .654, 89.6, 12800000000000000000000.00023],
["lmn", 5e-78, 5e-78, 89.4, .000000000000128],
["opqrstu", .023, 5e+78, 92., 12800000000000000000000]])
print(table.draw())
# Create a table of tables that shows different table styles
styles = sorted(UniTable.STYLES.keys())
t1 = UniTable([["STYLES"] + styles])
t1.set_max_width(0)
t1.set_cols_align("l" + "c" * len(styles))
t1.set_cols_valign("m" + "t" * len(styles))
t1.set_style("light")
style_rows =[["Header 1","Header 2"],["Cell 1","Cell 2"],["Cell 3","Cell 4"],]
t2 = UniTable(style_rows)
for style in styles:
print("Style \"%s\"" %(style))
t2.set_style(style)
print(t2.draw())
pass
exit()
t1.add_row(["DEFAULT"] + test_styles(t2))
t2.set_padding(0)
t1.add_row(["set_padding(0)"] + test_styles(t2))
t2.set_padding(2)
t1.add_row(["set_padding(2)"] + test_styles(t2))
t2.set_deco(UniTable.HEADER)
t2.set_padding(1)
t1.add_row(["set_deco(HEADER)"] + test_styles(t2))
print(t1.draw() + "\n")
|
py | b410ce3466a3061f663f1b429c0fd681624d8aa7 | import math
import time
import random
# Pytorch packages
import torch
import torch.optim as optim
import torch.nn as nn
import numpy as np
import csv
import math
import time
# Pytorch package
import torch
import torch.nn as nn
import torch.optim as optim
# Torchtest package
from torchtext.datasets import Multi30k
from torchtext.data import Field, BucketIterator
# Tqdm progress bar
from tqdm import tqdm_notebook, tqdm
# Code provide to you for training and evaluation
from utils import train, evaluate, set_seed_nb, unit_test_values
from models.naive.RNN import VanillaRNN
from models.naive.LSTM import LSTM
from models.seq2seq.Encoder import Encoder
from models.seq2seq.Decoder import Decoder
from models.seq2seq.Seq2Seq import Seq2Seq
from models.Transformer import TransformerTranslator
from utils import train, evaluate, set_seed_nb, unit_test_values
def unit_test_values(testcase):
if testcase == 'rnn':
return torch.FloatTensor([[-0.9827, -0.5264, -3.3529],
[-0.9810, -0.5418, -3.1382],
[-0.9813, -0.5594, -2.9257],
[-0.9843, -0.5795, -2.7158]]), torch.FloatTensor([[ 0.7531, 0.8514, 0.0764, 0.3671],
[ 0.4500, 0.7670, 0.2058, 0.0314],
[-0.0109, 0.6440, 0.3284, -0.3116],
[-0.4671, 0.4752, 0.4408, -0.5889]])
if testcase == 'lstm':
ht = torch.FloatTensor([[-0.0325, 0.1519, 0.0063, 0.3199],
[ 0.1009, 0.0199, 0.2986, -0.2799],
[ 0.1365, 0.0128, 0.3263, -0.1227],
[ 0.1156, 0.0043, 0.2449, -0.0333]])
ct = torch.FloatTensor([[-0.0379, 0.3315, 0.0066, 0.5134],
[ 0.1333, 0.1196, 0.3492, -0.9668],
[ 0.2017, 0.2715, 0.4971, -2.4863],
[ 0.1979, 0.3571, 0.6673, -2.8806]])
return ht, ct
if testcase == 'encoder':
expected_out =torch.FloatTensor([[[-0.7773, -0.2031]],
[[-0.4129, -0.1802]],
[[0.0599, -0.0151]],
[[-0.9273, 0.2683]],
[[0.6161, 0.5412]]])
expected_hidden = torch.FloatTensor([[[0.4912, -0.6078],
[0.4912, -0.6078],
[0.4985, -0.6658],
[0.4932, -0.6242],
[0.4880, -0.7841]]])
return expected_out, expected_hidden
if testcase == 'decoder':
expected_out = torch.FloatTensor([[-2.1507, -1.6473, -3.1772, -3.2119, -2.6847, -2.1598, -1.9192, -1.8130,
-2.6142, -3.1621],
[-2.0260, -2.0121, -3.2508, -3.1249, -2.4581, -1.8520, -2.0798, -1.7596,
-2.6393, -3.2001],
[-2.1078, -2.2130, -3.1951, -2.7392, -2.1194, -1.8174, -2.1087, -2.0006,
-2.4518, -3.2652],
[-2.7016, -1.1364, -3.0247, -2.9801, -2.8750, -3.0020, -1.6711, -2.4177,
-2.3906, -3.2773],
[-2.2018, -1.6935, -3.1234, -2.9987, -2.5178, -2.1728, -1.8997, -1.9418,
-2.4945, -3.1804]])
expected_hidden = torch.FloatTensor([[[-0.1854, 0.5561],
[-0.4359, 0.1476],
[-0.0992, -0.3700],
[0.9429, 0.8276],
[0.0372, 0.3287]]])
return expected_out, expected_hidden
if testcase == 'seq2seq':
expected_out = torch.FloatTensor([[[-2.4136, -2.2861, -1.7145, -2.5612, -1.9864, -2.0557, -1.7461,
-2.1898],
[-2.0869, -2.9425, -2.0188, -1.6864, -2.5141, -2.3069, -1.4921,
-2.3045]]])
return expected_out
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
set_seed_nb()
encoderON = False
decoderON = False
seq2seqON = False
if seq2seqON==True:
embedding_size = 32
hidden_size = 32
input_size = 8
output_size = 8
batch, seq = 1, 2
expected_out = unit_test_values('seq2seq') # (1, 1, 2, 8)
encoder = Encoder(input_size, embedding_size, hidden_size, hidden_size)
decoder = Decoder(embedding_size, hidden_size, hidden_size, output_size)
seq2seq = Seq2Seq(encoder, decoder, 'cpu')
x_array = np.random.rand(batch, seq) * 10
x = torch.LongTensor(x_array)
out = seq2seq.forward(x)
print('Close to out: ', expected_out.allclose(out, atol=1e-4))
################# ENCODER #################
if encoderON==True:
expected_out, expected_hidden = unit_test_values('encoder')
i, n, h = 10, 4, 2
encoder = Encoder(i, n, h, h)
x_array = np.random.rand(5,1) * 10
x = torch.LongTensor(x_array)
out, hidden = encoder.forward(x)
print('Close to out: ', expected_out.allclose(out, atol=1e-4))
print('Close to hidden: ', expected_hidden.allclose(hidden, atol=1e-4))
################ DECODER #################
if decoderON==True:
i, n, h = 10, 2, 2
decoder = Decoder(h, n, n, i)
x_array = np.random.rand(5, 1) * 10
x = torch.LongTensor(x_array)
_, enc_hidden = unit_test_values('encoder')
out, hidden = decoder.forward(x,enc_hidden)
expected_out, expected_hidden = unit_test_values('decoder')
print('Close to out: ', expected_out.allclose(out, atol=1e-4))
print('Close to hidden: ', expected_hidden.allclose(hidden, atol=1e-4))
######################################### TRANSFORMER TRAINING ##################################################################
# train_inxs = np.load('./data/train_inxs.npy')
# val_inxs = np.load('./data/val_inxs.npy')
# train_labels = np.load('./data/train_labels.npy')
# val_labels = np.load('./data/val_labels.npy')
#
# # load dictionary
# word_to_ix = {}
# with open("./data/word_to_ix.csv", "r") as f:
# reader = csv.reader(f)
# for line in reader:
# word_to_ix[line[0]] = line[1]
# print("Vocabulary Size:", len(word_to_ix))
#
# print(train_inxs.shape) # 7000 training instances, of (maximum/padded) length 43 words.
# print(val_inxs.shape) # 1551 validation instances, of (maximum/padded) length 43 words.
# print(train_labels.shape)
# print(val_labels.shape)
#
# d1 = torch.load('./data/d1.pt')
# d2 = torch.load('./data/d2.pt')
# d3 = torch.load('./data/d3.pt')
# d4 = torch.load('./data/d4.pt')
#
#
# # Define the maximum length of the sentence. Shorter sentences will be padded to that length and longer sentences will be croped. Given that the average length of the sentence in the corpus is around 13, we can set it to 20
# MAX_LEN = 20
#
# # Define the source and target language
# SRC = Field(tokenize = "spacy",
# tokenizer_language="de",
# init_token = '<sos>',
# eos_token = '<eos>',
# fix_length = MAX_LEN,
# lower = True)
#
# TRG = Field(tokenize = "spacy",
# tokenizer_language="en",
# init_token = '<sos>',
# eos_token = '<eos>',
# fix_length = MAX_LEN,
# lower = True)
#
# # Download and split the data. It should take some time
# train_data, valid_data, test_data = Multi30k.splits(exts = ('.de', '.en'),
# fields = (SRC, TRG))
# # Define Batchsize
# BATCH_SIZE = 128
#
# # Build the vocabulary associated with each language
# SRC.build_vocab(train_data, min_freq = 2)
# TRG.build_vocab(train_data, min_freq = 2)
#
# # Get the padding index to be ignored later in loss calculation
# PAD_IDX = TRG.vocab.stoi['<pad>']
# # Get data-loaders using BucketIterator
# train_loader, valid_loader, test_loader = BucketIterator.splits(
# (train_data, valid_data, test_data),
# batch_size = BATCH_SIZE, device = device)
#
# # Get the input and the output sizes for model
# input_size = len(SRC.vocab)
# output_size = len(TRG.vocab)
#
# # Hyperparameters
# learning_rate = 1e-3
# EPOCHS = 10
#
# # Model
# trans_model = TransformerTranslator(input_size, output_size, device, max_length = MAX_LEN).to(device)
#
# # optimizer = optim.Adam(model.parameters(), lr = learning_rate)
# optimizer = torch.optim.Adam(trans_model.parameters(), lr=learning_rate)
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)
# criterion = nn.CrossEntropyLoss(ignore_index=PAD_IDX)
# for epoch_idx in range(EPOCHS):
# print("-----------------------------------")
# print("Epoch %d" % (epoch_idx + 1))
# print("-----------------------------------")
#
# train_loss, avg_train_loss = train(trans_model, train_loader, optimizer, criterion)
# scheduler.step(train_loss)
#
# val_loss, avg_val_loss = evaluate(trans_model, valid_loader, criterion)
#
# avg_train_loss = avg_train_loss.item()
# avg_val_loss = avg_val_loss.item()
# print("Training Loss: %.4f. Validation Loss: %.4f. " % (avg_train_loss, avg_val_loss))
# print("Training Perplexity: %.4f. Validation Perplexity: %.4f. " % (np.exp(avg_train_loss), np.exp(avg_val_loss)))
######################################### TRANSFORMER UNIT TEST ##################################################################
# train_inxs = np.load('./data/train_inxs.npy')
# val_inxs = np.load('./data/val_inxs.npy')
# train_labels = np.load('./data/train_labels.npy')
# val_labels = np.load('./data/val_labels.npy')
#
# # load dictionary
# word_to_ix = {}
# with open("./data/word_to_ix.csv", "r") as f:
# reader = csv.reader(f)
# for line in reader:
# word_to_ix[line[0]] = line[1]
# print("Vocabulary Size:", len(word_to_ix))
#
# print(train_inxs.shape) # 7000 training instances, of (maximum/padded) length 43 words.
# print(val_inxs.shape) # 1551 validation instances, of (maximum/padded) length 43 words.
# print(train_labels.shape)
# print(val_labels.shape)
#
# d1 = torch.load('./data/d1.pt')
# d2 = torch.load('./data/d2.pt')
# d3 = torch.load('./data/d3.pt')
# d4 = torch.load('./data/d4.pt')
#
#
#
# inputs = train_inxs[0:2]
# inputs = torch.LongTensor(inputs)
#
# model = TransformerTranslator(input_size=len(word_to_ix), output_size=2, device=device, hidden_dim=128, num_heads=2, dim_feedforward=2048, dim_k=96, dim_v=96, dim_q=96, max_length=train_inxs.shape[1])
#
# embeds = model.embed(inputs)
# # attention
# hidden_states = model.multi_head_attention(embeds)
#
# ## feed forward
# outputs = model.feedforward_layer(hidden_states)
#
# # final layer.
# scores = model.final_layer(outputs)
#
# inputs = train_inxs[0:2]
# inputs = torch.LongTensor(inputs)
#
# outputs = model.forward(inputs)
#
# try:
# print("Difference:", torch.sum(torch.pairwise_distance(outputs, scores)).item()) # should be very small (<3e-5)
# except:
# print("NOT IMPLEMENTED")
######################################### SEQUENCER TRAINING ##################################################################
# Define the source and target language
MAX_LEN = 20
SRC = Field(tokenize = "spacy",
tokenizer_language="de",
init_token = '<sos>',
eos_token = '<eos>',
fix_length = MAX_LEN,
lower = True)
TRG = Field(tokenize = "spacy",
tokenizer_language="en",
init_token = '<sos>',
eos_token = '<eos>',
fix_length = MAX_LEN,
lower = True)
# Download and split the data. It should take some time
train_data, valid_data, test_data = Multi30k.splits(exts = ('.de', '.en'),
fields = (SRC, TRG))
# Define Batchsize
BATCH_SIZE = 128
# Build the vocabulary associated with each language
SRC.build_vocab(train_data, min_freq = 2)
TRG.build_vocab(train_data, min_freq = 2)
# Get the padding index to be ignored later in loss calculation
PAD_IDX = TRG.vocab.stoi['<pad>']
# Get data-loaders using BucketIterator
train_loader, valid_loader, test_loader = BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size = BATCH_SIZE, device = device)
# Get the input and the output sizes for model
input_size = len(SRC.vocab)
output_size = len(TRG.vocab)
# Hyperparameters. You are welcome to modify these
encoder_emb_size = 8
encoder_hidden_size = 128
encoder_dropout = 0.2
decoder_emb_size = 8
decoder_hidden_size = 128
decoder_dropout = 0.2
learning_rate = 1e-3
model_type = "LSTM"
EPOCHS = 10
#input size and output size
input_size = len(SRC.vocab)
output_size = len(TRG.vocab)
# Declare models, optimizer, and loss function
encoder = Encoder(input_size, encoder_emb_size, encoder_hidden_size, decoder_hidden_size, dropout = encoder_dropout, model_type = model_type)
decoder = Decoder(decoder_emb_size, encoder_hidden_size, encoder_hidden_size, output_size, dropout = decoder_dropout, model_type = model_type)
seq2seq_model = Seq2Seq(encoder, decoder, device)
optimizer = optim.Adam(seq2seq_model.parameters(), lr = learning_rate)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)
criterion = nn.CrossEntropyLoss(ignore_index=PAD_IDX)
for epoch_idx in range(EPOCHS):
print("-----------------------------------")
print("Epoch %d" % (epoch_idx + 1))
print("-----------------------------------")
train_loss, avg_train_loss = train(seq2seq_model, train_loader, optimizer, criterion)
scheduler.step(train_loss)
val_loss, avg_val_loss = evaluate(seq2seq_model, valid_loader, criterion)
avg_train_loss = avg_train_loss.item()
avg_val_loss = avg_val_loss.item()
print("Training Loss: %.4f. Validation Loss: %.4f. " % (avg_train_loss, avg_val_loss))
print("Training Perplexity: %.4f. Validation Perplexity: %.4f. " % (np.exp(avg_train_loss), np.exp(avg_val_loss)))
|
py | b410cfee47ad2ca88def8a2db9d6224c876cdfd7 | # Generated by Django 3.0 on 2021-08-16 23:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_auto_20210723_1518'),
('orders', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='OrderItems',
new_name='OrderItem',
),
migrations.AlterModelOptions(
name='order',
options={'ordering': ('-created',)},
),
]
|
py | b410d0854d78355ddf23642fdbb5570c8bb8a57e | from abc import ABC, abstractmethod
from operator import itemgetter
from typing import Callable, Optional, Union, Dict, Any, List, Awaitable, Sequence
from aiogram.types import CallbackQuery, InlineKeyboardButton
from aiogram_dialog.context.events import ChatEvent
from aiogram_dialog.dialog import Dialog
from aiogram_dialog.manager.manager import DialogManager
from aiogram_dialog.widgets.text import Text, Case
from aiogram_dialog.widgets.widget_event import (
WidgetEventProcessor, ensure_event_processor,
)
from .base import Keyboard
from ..managed import ManagedWidgetAdapter
from ...deprecation_utils import manager_deprecated
ItemIdGetter = Callable[[Any], Union[str, int]]
ItemsGetter = Callable[[Dict], Sequence]
OnItemStateChanged = Callable[
[ChatEvent, ManagedWidgetAdapter["Select"], DialogManager, str],
Awaitable,
]
OnItemClick = Callable[
[CallbackQuery, ManagedWidgetAdapter["Select"], DialogManager, str],
Awaitable,
]
def get_identity(items: Sequence) -> ItemsGetter:
def identity(data) -> Sequence:
return items
return identity
class Select(Keyboard):
def __init__(self, text: Text,
id: str,
item_id_getter: ItemIdGetter,
items: Union[str, Sequence],
on_click: Union[OnItemClick, WidgetEventProcessor, None] = None,
when: Union[str, Callable] = None):
super().__init__(id, when)
self.text = text
self.widget_id = id
self.on_click = ensure_event_processor(on_click)
self.callback_data_prefix = id + ":"
self.item_id_getter = item_id_getter
if isinstance(items, str):
self.items_getter = itemgetter(items)
else:
self.items_getter = get_identity(items)
async def _render_keyboard(self, data: Dict,
manager: DialogManager) -> List[List[InlineKeyboardButton]]:
return [[
await self._render_button(pos, item, data, manager)
for pos, item in enumerate(self.items_getter(data))
]]
async def _render_button(self, pos: int, item: Any, data: Dict,
manager: DialogManager) -> InlineKeyboardButton:
data = {"data": data, "item": item, "pos": pos + 1, "pos0": pos}
return InlineKeyboardButton(
text=await self.text.render_text(data, manager),
callback_data=self.callback_data_prefix + str(self.item_id_getter(item))
)
async def process_callback(self, c: CallbackQuery, dialog: Dialog,
manager: DialogManager) -> bool:
if not c.data.startswith(self.callback_data_prefix):
return False
item_id = c.data[len(self.callback_data_prefix):]
await self.on_click.process_event(c, self.managed(manager), manager, item_id)
return True
class StatefulSelect(Select, ABC):
def __init__(self, checked_text: Text, unchecked_text: Text,
id: str, item_id_getter: ItemIdGetter,
items: Union[str, Sequence],
on_click: Union[OnItemClick, WidgetEventProcessor, None] = None,
on_state_changed: Union[OnItemStateChanged, WidgetEventProcessor, None] = None,
when: Union[str, Callable] = None):
text = Case({True: checked_text, False: unchecked_text}, selector=self._is_text_checked)
super().__init__(text, id, item_id_getter, items, self._process_click, when)
self.on_item_click = ensure_event_processor(on_click)
self.on_state_changed = ensure_event_processor(on_state_changed)
async def _process_on_state_changed(self, event: ChatEvent, item_id: str,
manager: DialogManager):
if self.on_state_changed:
await self.on_state_changed.process_event(
event, self.managed(manager), manager, item_id
)
@abstractmethod
def _is_text_checked(self, data: Dict, case: Case, manager: DialogManager) -> bool:
raise NotImplementedError
async def _process_click(self, c: CallbackQuery,
select: ManagedWidgetAdapter[Select],
manager: DialogManager, item_id: str):
if self.on_item_click:
await self.on_item_click.process_event(c, select, manager, item_id)
await self._on_click(c, select, manager, item_id)
@abstractmethod
async def _on_click(self, c: CallbackQuery,
select: ManagedWidgetAdapter[Select],
manager: DialogManager, item_id: str):
raise NotImplementedError
class Radio(StatefulSelect):
def get_checked(self, manager: DialogManager) -> Optional[str]:
return manager.current_context().widget_data.get(self.widget_id, None)
async def set_checked(self, event: ChatEvent, item_id: Optional[str], manager: DialogManager):
checked = self.get_checked(manager)
manager.current_context().widget_data[self.widget_id] = item_id
if checked != item_id:
await self._process_on_state_changed(event, item_id, manager)
def is_checked(self, item_id: Union[str, int], manager: DialogManager) -> bool:
return str(item_id) == self.get_checked(manager)
def _preview_checked_id(self, manager: DialogManager, item_id: str) -> str:
data = manager.current_context().widget_data
return data.setdefault(self.widget_id, item_id)
def _is_text_checked(self, data: Dict, case: Case, manager: DialogManager) -> bool:
item_id = str(self.item_id_getter(data["item"]))
if manager.is_preview():
return item_id==self._preview_checked_id(manager, item_id)
return self.is_checked(item_id, manager)
async def _on_click(self, c: CallbackQuery, select: Select,
manager: DialogManager, item_id: str):
await self.set_checked(c, item_id, manager)
def managed(self, manager: DialogManager):
return ManagedRadioAdapter(self, manager)
class ManagedRadioAdapter(ManagedWidgetAdapter[Radio]):
def get_checked(self,
manager: Optional[DialogManager] = None) -> Optional[str]:
manager_deprecated(manager)
return self.widget.get_checked(self.manager)
async def set_checked(self, event: ChatEvent, item_id: Optional[str],
manager: Optional[DialogManager] = None):
manager_deprecated(manager)
return await self.widget.set_checked(event, item_id, self.manager)
def is_checked(self, item_id: Union[str, int],
manager: Optional[DialogManager] = None) -> bool:
manager_deprecated(manager)
return self.widget.is_checked(item_id, self.manager)
class Multiselect(StatefulSelect):
def __init__(self, checked_text: Text, unchecked_text: Text, id: str,
item_id_getter: ItemIdGetter, items: Union[str, Sequence],
min_selected: int = 0, max_selected: int = 0,
on_click: Union[OnItemClick, WidgetEventProcessor, None] = None,
on_state_changed: Union[OnItemStateChanged, WidgetEventProcessor, None] = None,
when: Union[str, Callable] = None):
super().__init__(checked_text, unchecked_text, id, item_id_getter, items, on_click,
on_state_changed, when)
self.min_selected = min_selected
self.max_selected = max_selected
def _is_text_checked(self, data: Dict, case: Case, manager: DialogManager) -> bool:
item_id = str(self.item_id_getter(data["item"]))
if manager.is_preview():
return ord(item_id[-1])%2 == 1 # just stupid way to make it differ
return self.is_checked(item_id, manager)
def is_checked(self, item_id: Union[str, int], manager: DialogManager) -> bool:
data: List = self.get_checked(manager)
return str(item_id) in data
def get_checked(self, manager: DialogManager) -> List[str]:
return manager.current_context().widget_data.get(self.widget_id, [])
async def reset_checked(self, event: ChatEvent, manager: DialogManager):
manager.current_context().widget_data[self.widget_id] = []
async def set_checked(self, event: ChatEvent,
item_id: str, checked: bool, manager: DialogManager) -> None:
data: List = self.get_checked(manager)
changed = False
if item_id in data:
if not checked:
if len(data) > self.min_selected:
data.remove(item_id)
changed = True
else:
if checked:
if self.max_selected == 0 or self.max_selected > len(data):
data.append(item_id)
changed = True
if changed:
manager.current_context().widget_data[self.widget_id] = data
await self._process_on_state_changed(event, item_id, manager)
async def _on_click(self, c: CallbackQuery, select: Select,
manager: DialogManager, item_id: str):
await self.set_checked(c, item_id, not self.is_checked(item_id, manager), manager)
def managed(self, manager: DialogManager):
return ManagedMultiSelectAdapter(self, manager)
class ManagedMultiSelectAdapter(ManagedWidgetAdapter[Multiselect]):
def is_checked(self, item_id: Union[str, int],
manager: Optional[DialogManager] = None) -> bool:
manager_deprecated(manager)
return self.widget.is_checked(item_id, self.manager)
def get_checked(self, manager: Optional[DialogManager] = None) -> List[str]:
manager_deprecated(manager)
return self.widget.get_checked(self.manager)
async def reset_checked(self, event: ChatEvent,
manager: Optional[DialogManager] = None):
manager_deprecated(manager)
return await self.widget.reset_checked(event, self.manager)
async def set_checked(self, event: ChatEvent,
item_id: str, checked: bool,
manager: Optional[DialogManager] = None) -> None:
manager_deprecated(manager)
return await self.widget.set_checked(event, item_id, checked,
self.manager)
|
py | b410d0950b97d6514ee6e1b09a173c62fe0665c5 | from ..core import Provider, Response
from ..utils import requests
from ..utils.schema.helpers import one_or_more, list_to_commas
class PopcornNotify(Provider):
"""Send PopcornNotify notifications"""
base_url = 'https://popcornnotify.com/notify'
site_url = 'https://popcornnotify.com/'
name = 'popcornnotify'
path_to_errors = 'error',
_required = {
'required':
[
'message',
'api_key',
'recipients'
]
}
_schema = {
'type': 'object',
'properties': {
'message': {
'type': 'string',
'title': 'The message to send'
},
'api_key': {
'type': 'string',
'title': 'The API key'
},
'recipients': one_or_more({
'type': 'string',
'format': 'email',
'title': 'The recipient email address or phone number. Or an array of email addresses and phone numbers'
}),
'subject': {
'type': 'string',
'title': 'The subject of the email. It will not be included in text messages.'
}
}
}
def _prepare_data(self, data: dict) -> dict:
if isinstance(data['recipients'], str):
data['recipients'] = [data['recipients']]
data['recipients'] = list_to_commas(data['recipients'])
return data
def _send_notification(self, data: dict) -> Response:
response, errors = requests.post(url=self.base_url,
json=data,
path_to_errors=self.path_to_errors)
return self.create_response(data, response, errors)
|
py | b410d11c58e59011d37683c191db930d0fd8529e | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import sys
from pyflink.table import (EnvironmentSettings, TableEnvironment, DataTypes, TableDescriptor,
Schema)
from pyflink.table.expressions import col
def process_json_data():
t_env = TableEnvironment.create(EnvironmentSettings.in_streaming_mode())
# define the source
table = t_env.from_elements(
elements=[
(1, '{"name": "Flink", "tel": 123, "addr": {"country": "Germany", "city": "Berlin"}}'),
(2, '{"name": "hello", "tel": 135, "addr": {"country": "China", "city": "Shanghai"}}'),
(3, '{"name": "world", "tel": 124, "addr": {"country": "USA", "city": "NewYork"}}'),
(4, '{"name": "PyFlink", "tel": 32, "addr": {"country": "China", "city": "Hangzhou"}}')
],
schema=['id', 'data'])
# define the sink
t_env.create_temporary_table(
'sink',
TableDescriptor.for_connector('print')
.schema(Schema.new_builder()
.column('id', DataTypes.BIGINT())
.column('data', DataTypes.STRING())
.build())
.build())
table = table.select(col('id'), col('data').json_value('$.addr.country', DataTypes.STRING()))
# execute
table.execute_insert('sink') \
.wait()
# remove .wait if submitting to a remote cluster, refer to
# https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
# for more details
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
process_json_data()
|
py | b410d281889fd317e3f65c73c6473bea3c66d064 | from PIL import Image
import numpy as np
import tensorflow as tf
import pydensecrf.densecrf as dcrf
n_classes = 21
# colour map
label_colours = [(0,0,0)
# 0=background
,(128,0,0),(0,128,0),(128,128,0),(0,0,128),(128,0,128)
# 1=aeroplane, 2=bicycle, 3=bird, 4=boat, 5=bottle
,(0,128,128),(128,128,128),(64,0,0),(192,0,0),(64,128,0)
# 6=bus, 7=car, 8=cat, 9=chair, 10=cow
,(192,128,0),(64,0,128),(192,0,128),(64,128,128),(192,128,128)
# 11=diningtable, 12=dog, 13=horse, 14=motorbike, 15=person
,(0,64,0),(128,64,0),(0,192,0),(128,192,0),(0,64,128)]
# 16=potted plant, 17=sheep, 18=sofa, 19=train, 20=tv/monitor
def decode_labels(mask, num_images=1, num_classes=21):
"""Decode batch of segmentation masks.
Args:
mask: result of inference after taking argmax.
num_images: number of images to decode from the batch.
num_classes: number of classes to predict (including background).
Returns:
A batch with num_images RGB images of the same size as the input.
"""
n, h, w, c = mask.shape
assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)
outputs = np.zeros((num_images, h, w, 3), dtype=np.uint8)
for i in range(num_images):
img = Image.new('RGB', (len(mask[i, 0]), len(mask[i])))
pixels = img.load()
for j_, j in enumerate(mask[i, :, :, 0]):
for k_, k in enumerate(j):
if k < num_classes:
pixels[k_,j_] = label_colours[k]
outputs[i] = np.array(img)
return outputs
def prepare_label(input_batch, new_size, num_classes, one_hot=True):
"""Resize masks and perform one-hot encoding.
Args:
input_batch: input tensor of shape [batch_size H W 1].
new_size: a tensor with new height and width.
num_classes: number of classes to predict (including background).
one_hot: whether perform one-hot encoding.
Returns:
Outputs a tensor of shape [batch_size h w 21]
with last dimension comprised of 0's and 1's only.
"""
with tf.name_scope('label_encode'):
input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # as labels are integer numbers, need to use NN interp.
input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) # reducing the channel dimension.
if one_hot:
input_batch = tf.one_hot(input_batch, depth=num_classes)
return input_batch
def inv_preprocess(imgs, num_images=1):
"""Inverse preprocessing of the batch of images.
Add the mean vector and convert from BGR to RGB.
Args:
imgs: batch of input images.
num_images: number of images to apply the inverse transformations on.
Returns:
The batch of the size num_images with the same spatial dimensions as the input.
"""
n, h, w, c = imgs.shape
assert(n >= num_images), 'Batch size %d should be greater or equal than number of images to save %d.' % (n, num_images)
outputs = np.zeros((num_images, h, w, c), dtype=np.uint8)
for i in range(num_images):
outputs[i] = (imgs[i] + IMG_MEAN)[:, :, ::-1].astype(np.uint8)
return outputs
def dense_crf(probs, img=None, n_iters=10,
sxy_gaussian=(1, 1), compat_gaussian=4,
kernel_gaussian=dcrf.DIAG_KERNEL,
normalisation_gaussian=dcrf.NORMALIZE_SYMMETRIC,
sxy_bilateral=(49, 49), compat_bilateral=5,
srgb_bilateral=(13, 13, 13),
kernel_bilateral=dcrf.DIAG_KERNEL,
normalisation_bilateral=dcrf.NORMALIZE_SYMMETRIC):
"""DenseCRF over unnormalised predictions.
More details on the arguments at https://github.com/lucasb-eyer/pydensecrf.
Args:
probs: class probabilities per pixel.
img: if given, the pairwise bilateral potential on raw RGB values will be computed.
n_iters: number of iterations of MAP inference.
sxy_gaussian: standard deviations for the location component of the colour-independent term.
compat_gaussian: label compatibilities for the colour-independent term (can be a number, a 1D array, or a 2D array).
kernel_gaussian: kernel precision matrix for the colour-independent term (can take values CONST_KERNEL, DIAG_KERNEL, or FULL_KERNEL).
normalisation_gaussian: normalisation for the colour-independent term (possible values are NO_NORMALIZATION, NORMALIZE_BEFORE, NORMALIZE_AFTER, NORMALIZE_SYMMETRIC).
sxy_bilateral: standard deviations for the location component of the colour-dependent term.
compat_bilateral: label compatibilities for the colour-dependent term (can be a number, a 1D array, or a 2D array).
srgb_bilateral: standard deviations for the colour component of the colour-dependent term.
kernel_bilateral: kernel precision matrix for the colour-dependent term (can take values CONST_KERNEL, DIAG_KERNEL, or FULL_KERNEL).
normalisation_bilateral: normalisation for the colour-dependent term (possible values are NO_NORMALIZATION, NORMALIZE_BEFORE, NORMALIZE_AFTER, NORMALIZE_SYMMETRIC).
Returns:
Refined predictions after MAP inference.
"""
_, h, w, _ = probs.shape
probs = probs[0].transpose(2, 0, 1).copy(order='C') # Need a contiguous array.
d = dcrf.DenseCRF2D(w, h, n_classes) # Define DenseCRF model.
U = -np.log(probs) # Unary potential.
U = U.reshape((n_classes, -1)) # Needs to be flat.
d.setUnaryEnergy(U)
d.addPairwiseGaussian(sxy=sxy_gaussian, compat=compat_gaussian,
kernel=kernel_gaussian, normalization=normalisation_gaussian)
if img is not None:
assert(img.shape[1:3] == (h, w)), "The image height and width must coincide with dimensions of the logits."
d.addPairwiseBilateral(sxy=sxy_bilateral, compat=compat_bilateral,
kernel=kernel_bilateral, normalization=normalisation_bilateral,
srgb=srgb_bilateral, rgbim=img[0])
Q = d.inference(n_iters)
preds = np.array(Q, dtype=np.float32).reshape((n_classes, h, w)).transpose(1, 2, 0)
return np.expand_dims(preds, 0)
|
py | b410d28ae5b58876191b1c11aadba96517931745 | #! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Osman Baskaya"
import sys
from itertools import count
c = count(1)
def scode_format(sent, X):
sent = sent.split()
for Y in sent:
print "{}\t{}".format(X, Y)
for sent in sys.stdin:
scode_format(sent, "S%d" % c.next())
|
py | b410d36962602076b01dca0ca6d374106ec5d590 | import io
import os
import gzip
import codecs
from collections import Counter
from contextlib import contextmanager, ExitStack
from pathlib import Path
from typing import NamedTuple
from glob import glob
import ir_datasets
from ir_datasets.util import DownloadConfig, GzipExtract, TarExtract
from ir_datasets.formats import TrecQrels, TrecQueries, TrecColonQueries, BaseDocs, GenericQuery, BaseQrels, TrecPrels
from ir_datasets.datasets.base import Dataset, YamlDocumentation
from ir_datasets.indices import Docstore
_logger = ir_datasets.log.easy()
NAME = 'gov2'
QREL_DEFS = {
2: 'Highly Relevant',
1: 'Relevant',
0: 'Not Relevant',
}
NAMED_PAGE_QREL_DEFS = {
1: 'Relevant',
0: 'Not Relevant',
}
NAMED_PAGE_QTYPE_MAP = {
'<num> *(Number:)? *NP': 'query_id', # Remove NP prefix from QIDs
'<title> *(Topic:)?': 'text',
}
EFF_MAP_05 = {'751': '1192', '752': '1330', '753': '5956', '754': '6303', '755': '6939', '756': '7553', '757': '8784', '758': '9121', '759': '9266', '760': '10359', '761': '10406', '762': '11597', '763': '12750', '764': '15502', '765': '16895', '766': '17279', '767': '17615', '768': '18050', '769': '18678', '770': '19280', '771': '19963', '772': '20766', '773': '21329', '774': '21513', '775': '23212', '776': '24289', '777': '24781', '778': '24813', '779': '26593', '780': '27428', '781': '28120', '782': '28627', '783': '29561', '784': '33379', '785': '33820', '786': '34135', '787': '35192', '788': '36242', '789': '36530', '790': '36616', '791': '36738', '792': '37111', '793': '41088', '794': '41192', '795': '41506', '796': '44506', '797': '45081', '798': '47993', '799': '48890', '800': '49462'}
EFF_MAP_06 = {'801': '62937', '802': '63569', '803': '63582', '804': '63641', '805': '64227', '806': '64266', '807': '64310', '808': '64642', '809': '64687', '810': '64704', '811': '64723', '812': '64741', '813': '64752', '814': '64938', '815': '65024', '816': '65070', '817': '65222', '818': '65335', '819': '65486', '820': '65504', '821': '65599', '822': '65821', '823': '65826', '824': '65950', '825': '66084', '826': '66409', '827': '66725', '828': '67326', '829': '67531', '830': '67550', '831': '67782', '832': '67961', '833': '68322', '834': '68492', '835': '68967', '836': '69028', '837': '69127', '838': '69401', '839': '69552', '840': '69564', '841': '69935', '842': '70033', '843': '70041', '844': '70285', '845': '70579', '846': '70707', '847': '70751', '848': '70815', '849': '70935', '850': '71136'}
class Gov2Doc(NamedTuple):
doc_id: str
url: str
http_headers: str
body: bytes
body_content_type: str
class Gov2DocIter:
def __init__(self, gov2_docs, slice):
self.gov2_docs = gov2_docs
self.slice = slice
self.next_index = 0
self.file_iter = gov2_docs._docs_iter_source_files()
self.current_file = None
self.current_file_start_idx = 0
self.current_file_end_idx = 0
def __next__(self):
if self.slice.start >= self.slice.stop:
raise StopIteration
while self.next_index != self.slice.start or self.current_file is None or self.current_file_end_idx <= self.slice.start:
if self.current_file is None or self.current_file_end_idx <= self.slice.start:
# First iteration or no docs remaining in this file
if self.current_file is not None:
self.current_file.close()
self.current_file = None
# jump ahead to the file that contains the desired index
first = True
while first or self.current_file_end_idx < self.slice.start:
source_file = next(self.file_iter)
self.next_index = self.current_file_end_idx
self.current_file_start_idx = self.current_file_end_idx
self.current_file_end_idx = self.current_file_start_idx + self.gov2_docs._docs_file_counts()[source_file]
first = False
self.current_file = self.gov2_docs._docs_ctxt_iter_gov2(source_file)
else:
for _ in zip(range(self.slice.start - self.next_index), self.current_file):
# The zip here will stop at after either as many docs we must advance, or however
# many docs remain in the file. In the latter case, we'll just drop out into the
# next iteration of the while loop and pick up the next file.
self.next_index += 1
result = next(self.current_file)
self.next_index += 1
self.slice = slice(self.slice.start + (self.slice.step or 1), self.slice.stop, self.slice.step)
return result
def close(self):
self.file_iter = None
def __iter__(self):
return self
def __del__(self):
self.close()
def __getitem__(self, key):
if isinstance(key, slice):
# it[start:stop:step]
new_slice = ir_datasets.util.apply_sub_slice(self.slice, key)
return Gov2DocIter(self.gov2_docs, new_slice)
elif isinstance(key, int):
# it[index]
new_slice = ir_datasets.util.slice_idx(self.slice, key)
new_it = Gov2DocIter(self.gov2_docs, new_slice)
try:
return next(new_it)
except StopIteration as e:
raise IndexError((self.slice, slice(key, key+1), new_slice))
raise TypeError('key must be int or slice')
class Gov2Docs(BaseDocs):
def __init__(self, docs_dlc, doccount_dlc):
super().__init__()
self.docs_dlc = docs_dlc
self._doccount_dlc = doccount_dlc
self._docs_file_counts_cache = None
def docs_path(self):
return self.docs_dlc.path()
def _docs_iter_source_files(self):
dirs = sorted((Path(self.docs_dlc.path()) / 'GOV2_data').glob('GX???'))
for source_dir in dirs:
for source_file in sorted(source_dir.glob('*.gz')):
yield str(source_file)
def docs_iter(self):
return Gov2DocIter(self, slice(0, self.docs_count()))
def docs_cls(self):
return Gov2Doc
def _docs_ctxt_iter_gov2(self, gov2f):
if isinstance(gov2f, (str, Path)):
gov2f = gzip.open(gov2f, 'rb')
doc = None
for line in gov2f:
if line == b'<DOC>\n':
assert doc is None
doc = line
elif line == b'</DOC>\n':
doc += line
yield self._process_gov2_doc(doc)
doc = None
elif doc is not None:
doc += line
def _process_gov2_doc(self, raw_doc):
state = 'DOCNO'
doc_id = None
doc_hdr = None
doc_body = b''
with io.BytesIO(raw_doc) as f:
for line in f:
if state == 'DOCNO':
if line.startswith(b'<DOCNO>'):
doc_id = line[len(b'<DOCNO>'):-len(b'</DOCNO>')-1].strip().decode()
state = 'DOCHDR'
elif state == 'DOCHDR':
if line == b'<DOCHDR>\n':
doc_hdr = b''
elif line == b'</DOCHDR>\n':
state = 'BODY'
elif doc_hdr is not None:
doc_hdr += line
elif state == 'BODY':
if line == b'</DOC>\n':
state = 'DONE'
else:
doc_body += line
doc_url, doc_hdr = doc_hdr.decode().split('\n', 1)
headers = [line.split(':', 1) for line in doc_hdr.split('\n') if ':' in line]
headers = {k.lower(): v for k, v in headers}
content_type = 'text/html' # default to text/html
if 'content-type' in headers:
content_type = headers['content-type']
if ';' in content_type:
content_type, _ = content_type.split(';', 1)
content_type = content_type.strip()
return Gov2Doc(doc_id, doc_url, doc_hdr, doc_body, content_type)
def _docs_id_to_source_file(self, doc_id):
parts = doc_id.split('-')
if len(parts) != 3:
return None
s_dir, file, doc = parts
source_file = os.path.join(self.docs_dlc.path(), 'GOV2_data', s_dir, f'{file}.gz')
return source_file
def _docs_file_counts(self):
if self._docs_file_counts_cache is None:
result = {}
with self._doccount_dlc.stream() as f:
f = codecs.getreader('utf8')(f)
for line in f:
path, count = line.strip().split()
file = os.path.join(self.docs_dlc.path(), 'GOV2_data', path)
result[file] = int(count)
self._docs_file_counts_cache = result
return self._docs_file_counts_cache
def docs_store(self):
docstore = Gov2Docstore(self)
return ir_datasets.indices.CacheDocstore(docstore, f'{self.docs_path()}.cache')
def docs_count(self):
return sum(self._docs_file_counts().values())
def docs_namespace(self):
return NAME
def docs_lang(self):
return 'en'
class Gov2Docstore(Docstore):
def __init__(self, gov2_docs):
super().__init__(gov2_docs.docs_cls(), 'doc_id')
self.gov2_docs = gov2_docs
def get_many_iter(self, doc_ids):
result = {}
files_to_search = {}
for doc_id in doc_ids:
source_file = self.gov2_docs._docs_id_to_source_file(doc_id)
if source_file is not None:
if source_file not in files_to_search:
files_to_search[source_file] = []
files_to_search[source_file].append(doc_id)
for source_file, doc_ids in files_to_search.items():
doc_ids = sorted(doc_ids)
for doc in self.gov2_docs._docs_ctxt_iter_gov2(source_file):
if doc_ids[0] == doc.doc_id:
yield doc
doc_ids = doc_ids[1:]
if not doc_ids:
break # file finished
class RewriteQids(BaseQrels):
def __init__(self, base_qrels, qid_map):
self._base_qrels = base_qrels
self._qid_map = qid_map
def qrels_iter(self):
cls = self.qrels_cls()
for qrel in self._base_qrels.qrels_iter():
if qrel.query_id in self._qid_map:
qrel = cls(self._qid_map[qrel.query_id], *qrel[1:])
yield qrel
def qrels_defs(self):
return self._base_qrels.qrels_defs()
def qrels_path(self):
return self._base_qrels.qrels_path()
def qrels_cls(self):
return self._base_qrels.qrels_cls()
class Gov2DocCountFile:
def __init__(self, path, docs_dlc):
self._path = path
self._docs_dlc = docs_dlc
def path(self):
if not os.path.exists(self._path):
docs_urls_path = os.path.join(self._docs_dlc.path(), 'GOV2_extras/url2id.gz')
result = Counter()
with _logger.pbar_raw(desc='building doccounts file', total=25205179, unit='doc') as pbar:
with gzip.open(docs_urls_path, 'rt') as fin:
for line in fin:
url, doc_id = line.rstrip().split()
d, f, i = doc_id.split('-') # formatted like: GX024-52-0546388
file = f'{d}/{f}.gz'
result[file] += 1
pbar.update()
with ir_datasets.util.finialized_file(self._path, 'wt') as fout:
for file in sorted(result):
fout.write(f'{file}\t{result[file]}\n')
return self._path
@contextmanager
def stream(self):
with open(self.path(), 'rb') as f:
yield f
def _init():
documentation = YamlDocumentation(f'docs/{NAME}.yaml')
base_path = ir_datasets.util.home_path()/NAME
dlc = DownloadConfig.context(NAME, base_path)
subsets = {}
docs_dlc = dlc['docs']
doccount_dlc = Gov2DocCountFile(os.path.join(base_path, 'corpus.doccounts'), docs_dlc)
collection = Gov2Docs(docs_dlc, doccount_dlc)
base = Dataset(collection, documentation('_'))
subsets['trec-tb-2004'] = Dataset(
collection,
TrecQueries(dlc['trec-tb-2004/queries'], namespace=NAME, lang='en'),
TrecQrels(dlc['trec-tb-2004/qrels'], QREL_DEFS),
documentation('trec-tb-2004')
)
subsets['trec-tb-2005'] = Dataset(
collection,
TrecQueries(dlc['trec-tb-2005/queries'], namespace=NAME, lang='en'),
TrecQrels(dlc['trec-tb-2005/qrels'], QREL_DEFS),
documentation('trec-tb-2005')
)
subsets['trec-tb-2005/named-page'] = Dataset(
collection,
TrecQueries(dlc['trec-tb-2005/named-page/queries'], qtype=GenericQuery, qtype_map=NAMED_PAGE_QTYPE_MAP, namespace=NAME, lang='en'),
TrecQrels(dlc['trec-tb-2005/named-page/qrels'], NAMED_PAGE_QREL_DEFS),
documentation('trec-tb-2005/named-page')
)
subsets['trec-tb-2005/efficiency'] = Dataset(
collection,
TrecColonQueries(GzipExtract(dlc['trec-tb-2005/efficiency/queries']), encoding='latin1', namespace=NAME, lang='en'),
RewriteQids(TrecQrels(dlc['trec-tb-2005/qrels'], QREL_DEFS), EFF_MAP_05),
documentation('trec-tb-2005/efficiency')
)
subsets['trec-tb-2006'] = Dataset(
collection,
TrecQueries(dlc['trec-tb-2006/queries'], namespace=NAME, lang='en'),
TrecQrels(dlc['trec-tb-2006/qrels'], QREL_DEFS),
documentation('trec-tb-2006')
)
subsets['trec-tb-2006/named-page'] = Dataset(
collection,
TrecQueries(dlc['trec-tb-2006/named-page/queries'], qtype=GenericQuery, qtype_map=NAMED_PAGE_QTYPE_MAP, namespace=NAME, lang='en'),
TrecQrels(dlc['trec-tb-2006/named-page/qrels'], NAMED_PAGE_QREL_DEFS),
documentation('trec-tb-2006/named-page')
)
subsets['trec-tb-2006/efficiency'] = Dataset(
collection,
TrecColonQueries(TarExtract(dlc['trec-tb-2006/efficiency/queries'], '06.efficiency_topics.all'), encoding='latin1', namespace=NAME, lang='en'),
RewriteQids(TrecQrels(dlc['trec-tb-2006/qrels'], QREL_DEFS), EFF_MAP_06),
documentation('trec-tb-2006/efficiency')
)
subsets['trec-tb-2006/efficiency/10k'] = Dataset(
collection,
TrecColonQueries(TarExtract(dlc['trec-tb-2006/efficiency/queries'], '06.efficiency_topics.10k'), encoding='latin1', namespace=NAME, lang='en'),
documentation('trec-tb-2006/efficiency/10k')
)
subsets['trec-tb-2006/efficiency/stream1'] = Dataset(
collection,
TrecColonQueries(TarExtract(dlc['trec-tb-2006/efficiency/queries'], '06.efficiency_topics.stream-1'), encoding='latin1', namespace=NAME, lang='en'),
documentation('trec-tb-2006/efficiency/stream1')
)
subsets['trec-tb-2006/efficiency/stream2'] = Dataset(
collection,
TrecColonQueries(TarExtract(dlc['trec-tb-2006/efficiency/queries'], '06.efficiency_topics.stream-2'), encoding='latin1', namespace=NAME, lang='en'),
documentation('trec-tb-2006/efficiency/stream2')
)
subsets['trec-tb-2006/efficiency/stream3'] = Dataset(
collection,
TrecColonQueries(TarExtract(dlc['trec-tb-2006/efficiency/queries'], '06.efficiency_topics.stream-3'), encoding='latin1', namespace=NAME, lang='en'),
RewriteQids(TrecQrels(dlc['trec-tb-2006/qrels'], QREL_DEFS), EFF_MAP_06),
documentation('trec-tb-2006/efficiency/stream3')
)
subsets['trec-tb-2006/efficiency/stream4'] = Dataset(
collection,
TrecColonQueries(TarExtract(dlc['trec-tb-2006/efficiency/queries'], '06.efficiency_topics.stream-4'), encoding='latin1', namespace=NAME, lang='en'),
documentation('trec-tb-2006/efficiency/stream4')
)
subsets['trec-mq-2007'] = Dataset(
collection,
TrecColonQueries(GzipExtract(dlc['trec-mq-2007/queries']), encoding='latin1'),
TrecPrels(dlc['trec-mq-2007/qrels'], QREL_DEFS),
documentation('trec-mq-2007')
)
subsets['trec-mq-2008'] = Dataset(
collection,
TrecColonQueries(GzipExtract(dlc['trec-mq-2008/queries']), encoding='latin1', namespace='trec-mq', lang='en'),
TrecPrels(TarExtract(dlc['trec-mq-2008/qrels'], '2008.RC1/prels'), QREL_DEFS),
documentation('trec-mq-2008')
)
ir_datasets.registry.register(NAME, base)
for s in sorted(subsets):
ir_datasets.registry.register(f'{NAME}/{s}', subsets[s])
return base, subsets
base, subsets = _init()
|
py | b410d4637123e9b7a482f64c8df27efb7a9d29f8 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2017 The Procyon Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import sys
from .context import procyon, pntest
"""
def test_xlist():
assert lexs("*") == [line_in, star("*"), line_out]
assert lexs("**") == [line_in, star("*"), line_in, star("*"), line_out, line_out]
assert lexs("***") == [
line_in,
star("*"), line_in,
star("*"), line_in,
star("*"), line_out, line_out, line_out
]
assert lexs("***0") == [
line_in,
star("*"), line_in,
star("*"), line_in,
star("*"), line_in,
i("0"), line_out, line_out, line_out, line_out
]
assert lexs("* *") == [line_in, star("*"), line_in, star("*"), line_out, line_out]
assert lexs("* * *") == [
line_in,
star("*"), line_in,
star("*"), line_in,
star("*"), line_out, line_out, line_out
]
assert lexs("*\n"
" *\n"
" *\n") == [
line_in,
star("*"), line_in,
star("*"), line_in,
star("*"), line_out, line_out, line_out
]
assert lexs("* \n"
" * \n"
" *\n") == [
line_in,
star("*"), line_in,
star("*"), line_in,
star("*"), line_out, line_out, line_out
]
assert lexs("***\n"
" **\n"
" *\n") == [
line_in,
star("*"), line_in,
star("*"), line_in,
star("*"), line_out, line_eq,
star("*"), line_in,
star("*"), line_eq,
star("*"), line_out, line_out, line_out
]
assert lexs("* \t *\t*\n"
" *\n") == [
line_in,
star("*"), line_in,
star("*"), line_in,
star("*"), line_eq,
star("*"), line_out, line_out, line_out
]
def test_map():
assert lexs(":") == [line_in, xkey(":"), line_out]
assert lexs("0:") == [line_in, xkey("0:"), line_out]
assert lexs("a:") == [line_in, xkey("a:"), line_out]
assert lexs("+:") == [line_in, xkey("+:"), line_out]
assert lexs("1:1") == [line_in, xkey("1:"), line_in, i("1"), line_out, line_out]
assert lexs("1: 1") == [line_in, xkey("1:"), line_in, i("1"), line_out, line_out]
assert lexs("{1:1}") == [line_in, map_in, key("1:"), i("1"), map_out, line_out]
assert lexs("{1: 1}") == [line_in, map_in, key("1:"), i("1"), map_out, line_out]
assert lexs("1:2\n3:4") == [
line_in,
xkey("1:"), line_in,
i("2"), line_out, line_eq,
xkey("3:"), line_in,
i("4"), line_out, line_out
]
assert lexs("{1:2,3:4}") == [
line_in, map_in,
key("1:"), i("2"), comma,
key("3:"), i("4"), map_out, line_out
]
assert lexs("1: 2\n3: 4") == [
line_in,
xkey("1:"), line_in,
i("2"), line_out, line_eq,
xkey("3:"), line_in,
i("4"), line_out, line_out
]
assert lexs("{1: 2, 3: 4}") == [
line_in, map_in,
key("1:"), i("2"), comma,
key("3:"), i("4"), map_out, line_out
]
def test_comment():
# Missing values
assert lexs("# comment") == [line_in, comment("# comment"), line_out]
assert lexs("* # comment") == [
line_in, star("*"), line_in,
comment("# comment"), line_out, line_out
]
# These won't parse, but should lex.
assert lexs("true# comment") == [line_in, true, comment("# comment"), line_out]
assert lexs("true # comment") == [line_in, true, comment("# comment"), line_out]
assert lexs("1# comment") == [line_in, i("1"), comment("# comment"), line_out]
assert lexs("1 # comment") == [line_in, i("1"), comment("# comment"), line_out]
assert lexs("\"\"# comment") == [line_in, s("\"\""), comment("# comment"), line_out]
assert lexs("\"\" # comment") == [line_in, s("\"\""), comment("# comment"), line_out]
assert lexs("$00# comment") == [line_in, d("$00"), comment("# comment"), line_out]
assert lexs("$00 # comment") == [line_in, d("$00 "), comment("# comment"), line_out]
assert lexs("># comment") == [line_in, wrap("># comment"), line_out]
assert lexs("> # comment") == [line_in, wrap("> # comment"), line_out]
def test_bytes():
assert lexs(b"\342\200\246") == [line_in, err(E.NONASCII, "\u2026"), line_out]
assert lexs(b"> \342\200\246") == [line_in, wrap("> \u2026"), line_out]
def test_unicode():
assert lexs(u"\u2026") == [line_in, err(E.NONASCII, "\u2026"), line_out]
assert lexs(u"> \u2026") == [line_in, wrap("> \u2026"), line_out]
"""
def tokenize(source):
sys.stdin = io.BytesIO(source)
sys.stdout = io.StringIO()
procyon.lex.main(["procyon.lex"])
output = sys.stdout.getvalue()
sys.stdin = sys.__stdin__
sys.stdout = sys.__stdout__
return output.encode("utf-8")
def test_func(run):
run(tokenize)
def pytest_generate_tests(metafunc):
metafunc.parametrize("run", pntest.LEX_CASES, ids=pntest.DIRECTORIES)
if __name__ == "__main__":
import pytest
raise SystemExit(pytest.main())
|
py | b410d58084af92c35df778792da279a687fb0fac | # Implements a proleptic Gregorian calendar date as a Julian day number.
class Date:
# Creates an object instance for the specified Gregorian date.
def __init__(self, year, month, day):
self._julianDay = 0
assert self._isValidGregorian(year, month, day), \
"Invalid Gregorian date."
# The first line of the equation, T = (M - 14) / 12, has to be changed
# since Python's implementation of integer division is not the same
# as the mathematical definition.
tmp = 0
if month < 3:
tmp -= 1
self._julianDay = day - 32075 + \
(1461 * (year + 4800 + tmp) // 4) + \
(367 * (month - 2 - tmp * 12) // 12) - \
(3 * ((year + 4900 + tmp) // 100) // 4)
# Extracts the appropriate Gregorian date component.
def year(self):
return (self._toGregorian())[0] # returning y from (y, M, d)
def month(self):
return (self._toGregorian())[1] # returning M from (y, M, d)
def day(self):
return (self._toGregorian())[2] # returning d from (y, M, d)
# Returns day of the week as an int between 0 (Mon) and 6(Sun).
def dayOfWeek(self):
year, month, day = self._toGregorian()
if month < 3:
month += 12
year -= 1
return ((13 * month + 3) // 5 + day + \
year + year // 4 - year // 100 + year // 400) % 7
# Returns the date as a string in Gregorian format.
def __str__(self):
year, month, day = self._toGregorian()
return "%04d/%02d/%02d" % (year, month, day)
# Logically compares the two dates.
def __eq__(self, otherDate):
return self._julianDay == otherDate._julianDay
def __lt__(self, otherDate):
return self._julianDay < otherDate._julianDay
def __le__(self, otherDate):
return self._julianDay <= otherDate._julianDay
# The remaining methods are to be included at this point.
# TODO : Returns the Gregorian month number of this date.
def monthName(self):
return self.month()
# TODO : Returns the number of days as a positive integer
# between this date and the otherDate
def numDays(self, otherDate):
return abs(self._julianDay - otherDate._julianDay)
# TODO : Determines if this date falls in a leap year and
# Returns the appropriate boolean value.
def isLeapYear(self):
year = self.year()
return False if year < 0 else ((year % 4 == 0 and year % 100 != 0) or year % 400 == 0)
# TODO : Advances the date by the given number of days.
# The date is incremented if days is positive and
# decremented if the days is negative.
# The date is capped to November 24, 4714 BC, if necessary.
def advanceBy(self, days):
self._julianDay += days
year, month, day = self._toGregorian()
return "%04d/%02d/%02d" % (year, month, day)
# Returns the Gregorian date as a tuple: (year, month, day).
def _toGregorian(self):
A = self._julianDay + 68569
B = 4 * A // 146097
A = A - (146097 * B + 3) // 4
year = 4000 * (A + 1) // 1461001
A = A - (1461 * year // 4) + 31
month = 80 * A // 2447
day = A - (2447 * month // 80)
A = month // 11
month = month + 2 - (12 * A)
year = 100 * (B - 49) + year + A
return year, month, day
# TODO : Determine if the three components
# of the given Gregorian date are valid.
def _isValidGregorian(self, year, month, day):
return_code = True
if year == None or month == None or day == None:
return_code = False
if month > 12 or day > 31:
return_code = False
if month < 0 or day < 0:
return_code = False
return return_code
# firstDay = Date(2006, 9, 1)
# otherDay = Date(2008, 10, 27)
# print(firstDay.advanceBy(1099))
|
py | b410d59206b4edd5ab7049519f584cc430c30038 | # ####################### BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel,
ISO2022KRSMModel)
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .compat import wrap_ord
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM:
continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM:
continue
if not codingSM.active:
continue
codingState = codingSM.next_state(wrap_ord(c))
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8
return self.get_state()
return self.get_state()
|
py | b410d69f0d13c1b24d450ba4228a483b04b4b808 | import torch
import torch.nn as nn
import torch.nn.functional as F
from base.base_net import BaseNet
class Casting_LeNet_ELU(BaseNet):
def __init__(self):
super().__init__()
self.rep_dim = 128
self.pool = nn.MaxPool2d(2, 2)
self.conv1 = nn.Conv2d(3, 32, 5, bias=False, padding=2)
self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(32, 64, 5, bias=False, padding=2)
self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.conv3 = nn.Conv2d(64, 128, 5, bias=False, padding=2)
self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.fc1 = nn.Linear(128 * 4 * 4, self.rep_dim, bias=False)
def forward(self, x):
x = self.conv1(x)
x = self.pool(F.elu(self.bn2d1(x)))
x = self.conv2(x)
x = self.pool(F.elu(self.bn2d2(x)))
x = self.conv3(x)
x = self.pool(F.elu(self.bn2d3(x)))
x = x.view(x.size(0), -1)
x = self.fc1(x)
return x
class Casting_LeNet_ELU_Autoencoder(BaseNet):
def __init__(self):
super().__init__()
self.rep_dim = 128
self.pool = nn.MaxPool2d(2, 2)
# Encoder (must match the Deep SVDD network above)
self.conv1 = nn.Conv2d(3, 32, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv1.weight)
self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(32, 64, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv2.weight)
self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.conv3 = nn.Conv2d(64, 128, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv3.weight)
self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.fc1 = nn.Linear(128 * 4 * 4, self.rep_dim, bias=False)
self.bn1d = nn.BatchNorm1d(self.rep_dim, eps=1e-04, affine=False)
# Decoder
self.deconv1 = nn.ConvTranspose2d(int(self.rep_dim / (4 * 4)), 128, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv1.weight)
self.bn2d4 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv2.weight)
self.bn2d5 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.deconv3 = nn.ConvTranspose2d(64, 32, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv3.weight)
self.bn2d6 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.deconv4 = nn.ConvTranspose2d(32, 3, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv4.weight)
def forward(self, x):
x = self.conv1(x)
x = self.pool(F.elu(self.bn2d1(x)))
x = self.conv2(x)
x = self.pool(F.elu(self.bn2d2(x)))
x = self.conv3(x)
x = self.pool(F.elu(self.bn2d3(x)))
x = x.view(x.size(0), -1)
x = self.bn1d(self.fc1(x))
x = x.view(x.size(0), int(self.rep_dim / (4 * 4)), 4, 4)
x = F.elu(x)
x = self.deconv1(x)
x = F.interpolate(F.elu(self.bn2d4(x)), scale_factor=2)
x = self.deconv2(x)
x = F.interpolate(F.elu(self.bn2d5(x)), scale_factor=2)
x = self.deconv3(x)
x = F.interpolate(F.elu(self.bn2d6(x)), scale_factor=2)
x = self.deconv4(x)
x = torch.sigmoid(x)
return x
|
py | b410d7bc707206076bd372c10667e2114d0cb86a | # Copyright 2021 AI Singapore. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .structured_data.int_general_metrics import IntGeneralMetrics
from .structured_data.int_miss_predictions import IntMissPredictions
from .structured_data.int_loss_clusters import IntLossClusterer
from .structured_data.int_xfeature_distribution import IntFeatureDistribution
from .structured_data.int_similarities_counter_factuals import IntSimilaritiesCounterFactuals
__all__ = ['IntGeneralMetrics',
'IntMissPredictions',
'IntLossClusterer',
'IntFeatureDistribution',
'IntSimilaritiesCounterFactuals']
|
py | b410d81f92891f4962b85f805f8a7ed32d8c5124 | import time
from nose.tools import assert_equal
from pylons import config
from email.mime.text import MIMEText
import hashlib
from ckan.tests.pylons_controller import PylonsTestCase
from ckan.tests.mock_mail_server import SmtpServerHarness
from ckan.lib.mailer import mail_recipient
class TestMockMailServer(SmtpServerHarness, PylonsTestCase):
@classmethod
def setup_class(cls):
smtp_server = config.get('test_smtp_server')
if smtp_server:
host, port = smtp_server.split(':')
port = int(port) + int(str(hashlib.md5(cls.__name__).hexdigest())[0], 16)
config['test_smtp_server'] = '%s:%s' % (host, port)
SmtpServerHarness.setup_class()
PylonsTestCase.setup_class()
@classmethod
def teardown_class(cls):
SmtpServerHarness.teardown_class()
def test_basic(self):
msgs = self.get_smtp_messages()
assert_equal(msgs, [])
test_email = {'recipient_name': 'Bob',
'recipient_email':'[email protected]',
'subject': 'Meeting',
'body': 'The meeting is cancelled.',
'headers': {'header1': 'value1'}}
mail_recipient(**test_email)
time.sleep(0.1)
msgs = self.get_smtp_messages()
assert_equal(len(msgs), 1)
|
py | b410d96690608b1c658aadb0200bc929baba0843 | # qubit number=4
# total number=44
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=31
prog.cz(input_qubit[0],input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=33
prog.x(input_qubit[3]) # number=27
prog.h(input_qubit[3]) # number=34
prog.cz(input_qubit[0],input_qubit[3]) # number=35
prog.h(input_qubit[3]) # number=36
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[0]) # number=41
prog.cz(input_qubit[3],input_qubit[0]) # number=42
prog.h(input_qubit[0]) # number=43
prog.z(input_qubit[3]) # number=39
prog.cx(input_qubit[3],input_qubit[0]) # number=40
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.cx(input_qubit[2],input_qubit[0]) # number=10
prog.y(input_qubit[3]) # number=37
prog.h(input_qubit[0]) # number=14
prog.h(input_qubit[1]) # number=30
prog.cz(input_qubit[2],input_qubit[0]) # number=15
prog.h(input_qubit[0]) # number=16
prog.cx(input_qubit[0],input_qubit[2]) # number=20
prog.x(input_qubit[2]) # number=21
prog.cx(input_qubit[0],input_qubit[2]) # number=22
prog.cx(input_qubit[0],input_qubit[2]) # number=17
prog.cx(input_qubit[0],input_qubit[2]) # number=23
prog.x(input_qubit[2]) # number=24
prog.cx(input_qubit[0],input_qubit[2]) # number=25
prog.cx(input_qubit[0],input_qubit[2]) # number=19
# circuit end
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class2902.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
py | b410db5604a785ccb0dbe40a123951698ac89629 | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class IsisSRTunnelList(Base):
"""ISIS MPLS SR Tunnel
The IsisSRTunnelList class encapsulates a required isisSRTunnelList resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'isisSRTunnelList'
_SDM_ATT_MAP = {
'Active': 'active',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'Name': 'name',
'NumberOfSegments': 'numberOfSegments',
'SourceIpv4': 'sourceIpv4',
'SourceIpv6': 'sourceIpv6',
'TunnelDescription': 'tunnelDescription',
'UsingHeadEndNodePrefix': 'usingHeadEndNodePrefix',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(IsisSRTunnelList, self).__init__(parent, list_op)
@property
def IsisSegmentList(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.isissegmentlist_28066a67f7cf6594d73c8fed733b33f6.IsisSegmentList): An instance of the IsisSegmentList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.isissegmentlist_28066a67f7cf6594d73c8fed733b33f6 import IsisSegmentList
if self._properties.get('IsisSegmentList', None) is not None:
return self._properties.get('IsisSegmentList')
else:
return IsisSegmentList(self)
@property
def Tag(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d.Tag): An instance of the Tag class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d import Tag
if self._properties.get('Tag', None) is not None:
return self._properties.get('Tag')
else:
return Tag(self)
@property
def Active(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Activate/Deactivate Configuration.
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NumberOfSegments(self):
# type: () -> int
"""
Returns
-------
- number: Number of Segments
"""
return self._get_attribute(self._SDM_ATT_MAP['NumberOfSegments'])
@NumberOfSegments.setter
def NumberOfSegments(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['NumberOfSegments'], value)
@property
def SourceIpv4(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Source IPv4
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SourceIpv4']))
@property
def SourceIpv6(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Source IPv6
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SourceIpv6']))
@property
def TunnelDescription(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Tunnel Description
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TunnelDescription']))
@property
def UsingHeadEndNodePrefix(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(ixnetwork_restpy.multivalue.Multivalue): Using head end Node prefix
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['UsingHeadEndNodePrefix']))
def update(self, Name=None, NumberOfSegments=None):
# type: (str, int) -> IsisSRTunnelList
"""Updates isisSRTunnelList resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- NumberOfSegments (number): Number of Segments
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def get_device_ids(self, PortNames=None, Active=None, SourceIpv4=None, SourceIpv6=None, TunnelDescription=None, UsingHeadEndNodePrefix=None):
"""Base class infrastructure that gets a list of isisSRTunnelList device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- SourceIpv4 (str): optional regex of sourceIpv4
- SourceIpv6 (str): optional regex of sourceIpv6
- TunnelDescription (str): optional regex of tunnelDescription
- UsingHeadEndNodePrefix (str): optional regex of usingHeadEndNodePrefix
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
|
py | b410dd9f01a411445b56d2415992884188576c04 | """
DAY 44 : Maximum subset XOR.
https://www.geeksforgeeks.org/find-maximum-subset-xor-given-set/
QUESTION : You don't need to read input or print anything. Your task is to complete the function
maxSubarrayXOR() which takes the array and an integer as input and returns the maximum subset XOR value.
Expected Time Complexity: O(N*Log(max(arr[i]))).
Expected Auxiliary Space : O(1).
Contraints :
1 <= N <= 10^5
1 <= arr[i] <= 10^6
"""
INT_BITS=32
def maxSubarrayXOR(set,n):
index = 0
for i in range(INT_BITS-1,-1,-1):
maxInd = index
maxEle = -2147483648
for j in range(index,n):
if ( (set[j] & (1 << i)) != 0
and set[j] > maxEle ):
maxEle = set[j]
maxInd = j
if (maxEle ==-2147483648):
continue
temp=set[index]
set[index]=set[maxInd]
set[maxInd]=temp
maxInd = index
for j in range(n):
if (j != maxInd and
(set[j] & (1 << i)) != 0):
set[j] = set[j] ^ set[maxInd]
index=index + 1
res = 0
for i in range(n):
res =res ^ set[i]
return res
set= [9, 8, 5]
n =len(set)
print("Max subset XOR is ",end="")
print(maxSubarrayXOR(set, n)) |
py | b410decf21ba42d90c042bb38992b0e7dc8f4fda |
class OkexPyexAPI(object):
def __init__(self, config: str):
super().__init__()
self.config = config
|
py | b410e0df579c5068aa1f142eb67e09f650d2835e | ''' setting before run. every notebook should include this code. '''
import os
# os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"]="0"
import sys
_r = os.getcwd().split('/')
_p = '/'.join(_r[:_r.index('gate-decorator-pruning')+1])
print('Change dir from %s to %s' % (os.getcwd(), _p))
os.chdir(_p)
sys.path.append(_p)
from config import parse_from_dict
parse_from_dict({
"base": {
"task_name": "resnet56m_cifar100_ticktock_mutual",
"model_saving_interval": 1,
"cuda": True,
"seed": 1995,
"checkpoint_path": "",
"epoch": 0,
"multi_gpus": True,
"fp16": False
},
"model": {
"name": "resnet56m",
"num_class": 100,
"pretrained": False,
"resolution": [32, 28, 24, 20]
},
"train": {
"trainer": "mutual",
"max_epoch": 200,
"optim": "sgd",
# "steplr": [
# [80, 0.1],
# [120, 0.01],
# [160, 0.001]
# ],
"weight_decay": 5e-4,
"momentum": 0.9,
"nesterov": True
},
"data": {
"type": "cifar100",
"shuffle": True,
"batch_size": 128,
"test_batch_size": 128,
"num_workers": 4
},
"loss": {
"criterion": "softmax"
},
"gbn": {
"sparse_lambda": 1e-3,
"flops_eta": 0,
"lr_min": 1e-3,#3
"lr_max": 1e-2,#2
"tock_epoch": 10,
"T": 10,
"p": 0.0002
}
})
from config import cfg
import torch
import torch.nn as nn
import numpy as np
import torch.optim as optim
from logger import logger
from main import set_seeds, recover_pack, _step_lr, _sgdr
from models import get_model
from utils import dotdict
from prune.universal import Meltable, GatedBatchNorm2d, Conv2dObserver, IterRecoverFramework, FinalLinearObserver
from prune.utils import analyse_model, finetune
set_seeds()
pack = recover_pack()
model_dict = torch.load('logs/resnet56m_cifar100_baseline/ckp.192.torch', map_location='cpu' if not cfg.base.cuda else 'cuda')
pack.net.module.load_state_dict(model_dict)
GBNs = GatedBatchNorm2d.transform(pack.net)
for gbn in GBNs:
gbn.extract_from_bn()
pack.optimizer = optim.SGD(
pack.net.parameters() ,
lr=2e-3,
momentum=cfg.train.momentum,
weight_decay=cfg.train.weight_decay,
nesterov=cfg.train.nesterov
)
print(cfg.base.task_name)
import uuid
def bottleneck_set_group(net):
layers = [
net.module.layer1,
net.module.layer2,
net.module.layer3
]
for m in layers:
masks = []
if m == net.module.layer1:
masks.append(pack.net.module.bn1)
for mm in m.modules():
if mm.__class__.__name__ == 'BasicBlock':
if mm.downsample:
masks.append(mm.downsample._modules['1'])
masks.append(mm.bn2)
group_id = uuid.uuid1()
for mk in masks:
mk.set_groupid(group_id)
# def bottleneck_set_group(net):
# layers = [3,7,13,16]
# masks = []
# for idx, m in enumerate(net.module.features):
# # if idx == 0:
# # masks.append(m[1])
# if m.__class__.__name__ == 'Block':
# if not m.residual_connection:
# masks.append(m.shortcut._modules['1'])
# masks.append(m.body[7])
# if idx in layers:
# group_id = uuid.uuid1()
# for mk in masks:
# mk.set_groupid(group_id)
# masks = []
# def bottleneck_set_group(net):
# layers = [2,5,9,12,15]
# masks = []
# for idx, m in enumerate(net.module.features):
# # if idx == 0:
# # masks.append(m[1])
# if m.__class__.__name__ == 'InvertedResidual':
# if m.residual_connection:
# masks.append(m.body[7])
# if idx in layers:
# group_id = uuid.uuid1()
# for mk in masks:
# mk.set_groupid(group_id)
# masks = []
# #depthwise
# masks = []
# for idx, m in enumerate(net.module.features):
# if idx == 0:
# masks.append(m[1])
# if m.__class__.__name__ == 'InvertedResidual':
# for i in range(0, len(m.body)-1):
# if isinstance(m.body[i], nn.Conv2d):
# if m.body[i].groups > 1:
# masks.append(m.body[i+1])
# break
# else:
# masks.append(m.body[i+1])
# group_id = uuid.uuid1()
# if len(masks) > 1:
# for mk in masks:
# mk.set_groupid(group_id)
# masks = []
bottleneck_set_group(pack.net)
def clone_model(net):
model = get_model()
gbns = GatedBatchNorm2d.transform(model.module)
model.load_state_dict(net.state_dict())
return model, gbns
cloned, _ = clone_model(pack.net)
BASE_FLOPS, BASE_PARAM = [], []
for res in cfg.model.resolution:
f, p = analyse_model(cloned.module, torch.randn(1, 3, res, res).cuda())
BASE_FLOPS.append(f)
BASE_PARAM.append(p)
print('%.3f MFLOPS' % (f / 1e6))
print('%.3f M' % (p / 1e6))
del cloned
def eval_prune(pack):
cloned, _ = clone_model(pack.net)
_ = Conv2dObserver.transform(cloned.module)
# cloned.module.classifier[0] = FinalLinearObserver(cloned.module.classifier[0])
cloned.module.fc = FinalLinearObserver(cloned.module.fc)
cloned_pack = dotdict(pack.copy())
cloned_pack.net = cloned
Meltable.observe(cloned_pack, 0.001)
Meltable.melt_all(cloned_pack.net)
flops = []
params = []
for res in cfg.model.resolution:
f, p = analyse_model(cloned_pack.net.module, torch.randn(1, 3, res, res).cuda())
flops.append(f)
params.append(p)
del cloned
del cloned_pack
return flops, params
pack.trainer.test(pack)
pack.tick_trainset = pack.train_loader
prune_agent = IterRecoverFramework(pack, GBNs, sparse_lambda = cfg.gbn.sparse_lambda, flops_eta = cfg.gbn.flops_eta, minium_filter = 3)
LOGS = []
flops_save_points = set([95, 90, 85, 80, 75, 70, 65, 60, 55, 50, 45, 40, 35, 30, 25, 20, 15, 10, 5])
iter_idx = 0
info = {}
info.update(pack.trainer.test(pack))
print('Test Acc: %.2f' % (info['acc@1']))
prune_agent.tock(lr_min=cfg.gbn.lr_min, lr_max=cfg.gbn.lr_max, tock_epoch=cfg.gbn.tock_epoch)
while True:
left_filter = prune_agent.total_filters - prune_agent.pruned_filters
num_to_prune = int(left_filter * cfg.gbn.p)
info = prune_agent.prune(num_to_prune, tick=True, lr=cfg.gbn.lr_min)
flops, params = eval_prune(pack)
for idx in range(0, len(cfg.model.resolution)):
info.update({
'flops': '[%.2f%%] %.3f MFLOPS' % (flops[idx]/BASE_FLOPS[idx] * 100, flops[idx] / 1e6),
'param': '[%.2f%%] %.3f M' % (params[idx]/BASE_PARAM[idx] * 100, params[idx] / 1e6)
})
LOGS.append(info)
print('Iter: %d,\t FLOPS: %s,\t Param: %s,\t Left: %d,\t Pruned Ratio: %.2f %%,\t Train Loss: %.4f,\t Test Acc: %.2f' %
(iter_idx, info['flops'], info['param'], info['left'], info['total_pruned_ratio'] * 100, info['train_loss'], info['after_prune_test_acc']))
iter_idx += 1
if iter_idx % cfg.gbn.T == 0:
print('Tocking:')
prune_agent.tock(lr_min=cfg.gbn.lr_min, lr_max=cfg.gbn.lr_max, tock_epoch=cfg.gbn.tock_epoch)
flops_ratio = flops[idx]/BASE_FLOPS[idx] * 100
for point in [i for i in list(flops_save_points)]:
if flops_ratio <= point:
torch.save(pack.net.module.state_dict(), './logs/{}/{}.ckp'.format(cfg.base.task_name, point))
flops_save_points.remove(point)
if len(flops_save_points) == 0:
break |
py | b410e1c9c0d830cce1c6441f832eaaf11031a249 | """
init module for vega_admin.users
"""
# pylint: disable=invalid-name
default_app_config = 'vega_admin.contrib.users.apps.UsersConfig' # noqa
|
py | b410e26100333649ab7f8e199a7ea5f6efecd093 | from django.contrib.auth import login
from django.shortcuts import redirect
from django.views.generic import CreateView
from .forms import StudentSignUpForm
from .models import User
class StudentSignUpView(CreateView):
model = User
form_class = StudentSignUpForm
template_name = 'registration/signup_form.html'
def get_context_data(self, **kwargs):
kwargs['user_type'] = 'student'
return super().get_context_data(**kwargs)
def form_valid(self, form):
user = form.save()
login(self.request, user)
return redirect('students:quiz_list')
|
py | b410e29a3c8e9512e10ba9b07b19867f7675ab15 | #__LICENSE_GOES_HERE__
import commands
import os
import sys
scriptDir = os.path.abspath(sys.path[0])
digsbyDir = os.path.abspath(os.path.join(scriptDir, ".."))
sys.path += [digsbyDir]
startDir = os.getcwd()
homeDir = None
from .buildfileutils import which
if sys.platform.startswith("win"):
homeDir = os.environ['USERPROFILE']
else:
homeDir = os.environ["HOME"]
assert os.path.exists(homeDir)
def get_patch_cmd():
if os.name != "nt":
return 'patch'
unix_tools = os.environ.get('UNIX_TOOLS')
if unix_tools is not None:
patch_cmd = os.path.join(unix_tools, 'patch')
elif os.path.isdir(r'c:\Program Files (x86)\Git\bin'):
patch_cmd = r'c:\Program Files (x86)\Git\bin\patch'
else:
patch_cmd = which('patch', r'c:\cygwin\bin\patch')
return patch_cmd
class BuildDirs:
def __init__(self):
self.depsDir = None
self.wxWidgetsDir = None
self.wxPythonDir = None
self.wxWebKitDir = None
self.sipDir = None
self.wxpyDir = None
self.boostDir = None
def initBuildDirs(self, depsDir, **overrides):
self.depsDir = depsDir
self.wxWidgetsDir = os.path.join(self.depsDir, "wxWidgets")
self.wxPythonDir = os.path.join(self.wxWidgetsDir, "wxPython")
self.wxWebKitDir = os.path.join(self.depsDir, overrides.get("wxWebKit", "wxWebKit"))
self.sipDir = os.path.join(self.depsDir, "sip")
self.wxpyDir = os.path.join(self.depsDir, "wxpy")
self.boostDir = os.path.join(self.depsDir, 'boost_1_42_0')
buildDirs = BuildDirs()
if sys.platform.startswith('win'):
common_dir = os.path.dirname(os.path.abspath(__file__))
buildDirs.initBuildDirs(os.path.join(os.path.abspath(os.path.join(common_dir, '..')), 'msw'), wxWebKit='WebKit')
# build boost
from buildfileutils import tardep
boost = tardep('http://mini/mirror/', 'boost_1_42_0', '.tar.gz', 40932853, dirname = buildDirs.boostDir)
#boost.get()
#^ copied from build-deps.py
def checkForDeps(swig=False):
retVal = which("which bakefile")
if retVal != 0:
print "ERROR: You must have Bakefile (http://bakefile.org) installed to continue. Exiting..."
sys.exit(1)
if swig:
retVal = which("which swig")
if retVal != 0:
print "ERROR: You must have Robin's SWIG (http://wxpython.wxcommunity.com/tools/) installed to continue. Exiting..."
sys.exit(1)
if not sys.platform.startswith("win") and commands.getoutput("swig -version").find("1.3.29") == -1:
print "ERROR: Wrong SWIG. You must install Robin's SWIG (http://wxpython.wxcommunity.com/tools/). Exiting..."
sys.exit(1)
|
py | b410e326ccdf9341346e4c502f8c8a9316e17e57 | #! /usr/bin/python3
# Filled orders may not be re‐opened, so only orders not involving BTC (and so
# which cannot have expired order matches) may be filled.
import struct
import decimal
D = decimal.Decimal
import logging
from . import (util, config, exceptions, bitcoin, util)
FORMAT = '>QQQQHQ'
LENGTH = 8 + 8 + 8 + 8 + 2 + 8
ID = 10
def cancel_order (db, order, status, block_index):
cursor = db.cursor()
# Update status of order.
bindings = {
'status': status,
'tx_hash': order['tx_hash']
}
sql='update orders set status = :status where tx_hash = :tx_hash'
cursor.execute(sql, bindings)
util.message(db, block_index, 'update', 'orders', bindings)
if order['give_asset'] != config.BTC: # Can’t credit BTC.
util.credit(db, block_index, order['source'], order['give_asset'], order['give_remaining'], event=order['tx_hash'])
cursor.close()
def cancel_order_match (db, order_match, status, block_index):
'''
May only be cancelled by callbacks.'''
cursor = db.cursor()
# Update status of order match.
bindings = {
'status': status,
'order_match_id': order_match['id']
}
sql='update order_matches set status = :status where id = :order_match_id'
cursor.execute(sql, bindings)
util.message(db, block_index, 'update', 'order_matches', bindings)
order_match_id = order_match['tx0_hash'] + order_match['tx1_hash']
# If tx0 is dead, credit address directly; if not, replenish give remaining, get remaining, and fee required remaining.
orders = list(cursor.execute('''SELECT * FROM orders \
WHERE tx_index = ?''',
(order_match['tx0_index'],)))
assert len(orders) == 1
tx0_order = orders[0]
if tx0_order['status'] in ('expired', 'cancelled'):
tx0_order_status = tx0_order['status']
if order_match['forward_asset'] != config.BTC:
util.credit(db, block_index, order_match['tx0_address'],
order_match['forward_asset'],
order_match['forward_quantity'], event=order_match['id'])
else:
tx0_give_remaining = tx0_order['give_remaining'] + order_match['forward_quantity']
tx0_get_remaining = tx0_order['get_remaining'] + order_match['backward_quantity']
if tx0_order['get_asset'] == config.BTC and (block_index >= 297000 or config.TESTNET): # Protocol change.
tx0_fee_required_remaining = tx0_order['fee_required_remaining'] + order_match['fee_paid']
else:
tx0_fee_required_remaining = tx0_order['fee_required_remaining']
tx0_order_status = tx0_order['status']
bindings = {
'give_remaining': tx0_give_remaining,
'get_remaining': tx0_get_remaining,
'status': tx0_order_status,
'fee_required_remaining': tx0_fee_required_remaining,
'tx_hash': order_match['tx0_hash']
}
sql='update orders set give_remaining = :give_remaining, get_remaining = :get_remaining, fee_required_remaining = :fee_required_remaining where tx_hash = :tx_hash'
cursor.execute(sql, bindings)
util.message(db, block_index, 'update', 'orders', bindings)
# If tx1 is dead, credit address directly; if not, replenish give remaining, get remaining, and fee required remaining.
orders = list(cursor.execute('''SELECT * FROM orders \
WHERE tx_index = ?''',
(order_match['tx1_index'],)))
assert len(orders) == 1
tx1_order = orders[0]
if tx1_order['status'] in ('expired', 'cancelled'):
tx1_order_status = tx1_order['status']
if order_match['backward_asset'] != config.BTC:
util.credit(db, block_index, order_match['tx1_address'],
order_match['backward_asset'],
order_match['backward_quantity'], event=order_match['id'])
else:
tx1_give_remaining = tx1_order['give_remaining'] + order_match['backward_quantity']
tx1_get_remaining = tx1_order['get_remaining'] + order_match['forward_quantity']
if tx1_order['get_asset'] == config.BTC and (block_index >= 297000 or config.TESTNET): # Protocol change.
tx1_fee_required_remaining = tx1_order['fee_required_remaining'] + order_match['fee_paid']
else:
tx1_fee_required_remaining = tx1_order['fee_required_remaining']
tx1_order_status = tx1_order['status']
bindings = {
'give_remaining': tx1_give_remaining,
'get_remaining': tx1_get_remaining,
'status': tx1_order_status,
'fee_required_remaining': tx1_fee_required_remaining,
'tx_hash': order_match['tx1_hash']
}
sql='update orders set give_remaining = :give_remaining, get_remaining = :get_remaining, fee_required_remaining = :fee_required_remaining where tx_hash = :tx_hash'
cursor.execute(sql, bindings)
util.message(db, block_index, 'update', 'orders', bindings)
if block_index < 286500: # Protocol change.
# Sanity check: one of the two must have expired.
tx0_order_time_left = tx0_order['expire_index'] - block_index
tx1_order_time_left = tx1_order['expire_index'] - block_index
assert tx0_order_time_left or tx1_order_time_left
# Re‐match. # Protocol change
if block_index >= 310000 or config.TESTNET:
cursor.execute('''SELECT * FROM transactions\
WHERE tx_hash = ?''', (tx0_order['tx_hash'],))
match(db, list(cursor)[0], block_index)
cursor.execute('''SELECT * FROM transactions\
WHERE tx_hash = ?''', (tx1_order['tx_hash'],))
match(db, list(cursor)[0], block_index)
cursor.close()
def validate (db, source, give_asset, give_quantity, get_asset, get_quantity, expiration, fee_required):
problems = []
cursor = db.cursor()
if give_asset == config.BTC and get_asset == config.BTC:
problems.append('cannot trade {} for itself'.format(config.BTC))
if not isinstance(give_quantity, int):
problems.append('give_quantity must be in satoshis')
return problems
if not isinstance(get_quantity, int):
problems.append('get_quantity must be in satoshis')
return problems
if not isinstance(fee_required, int):
problems.append('fee_required must be in satoshis')
return problems
if not isinstance(expiration, int):
problems.append('expiration must be expressed as an integer block delta')
return problems
if give_quantity <= 0: problems.append('non‐positive give quantity')
if get_quantity <= 0: problems.append('non‐positive get quantity')
if fee_required < 0: problems.append('negative fee_required')
if expiration <= 0: problems.append('non‐positive expiration')
if not give_quantity or not get_quantity:
problems.append('zero give or zero get')
cursor.execute('select * from issuances where (status = ? and asset = ?)', ('valid', give_asset))
if give_asset not in (config.BTC, config.XCP) and not cursor.fetchall():
problems.append('no such asset to give ({})'.format(give_asset))
cursor.execute('select * from issuances where (status = ? and asset = ?)', ('valid', get_asset))
if get_asset not in (config.BTC, config.XCP) and not cursor.fetchall():
problems.append('no such asset to get ({})'.format(get_asset))
if expiration > config.MAX_EXPIRATION:
problems.append('expiration overflow')
# For SQLite3
if give_quantity > config.MAX_INT or get_quantity > config.MAX_INT or fee_required > config.MAX_INT:
problems.append('integer overflow')
cursor.close()
return problems
def compose (db, source, give_asset, give_quantity, get_asset, get_quantity, expiration, fee_required):
cursor = db.cursor()
balances = list(cursor.execute('''SELECT * FROM balances WHERE (address = ? AND asset = ?)''', (source, give_asset)))
if give_asset != config.BTC and (not balances or balances[0]['quantity'] < give_quantity):
raise exceptions.OrderError('insufficient funds')
problems = validate(db, source, give_asset, give_quantity, get_asset, get_quantity, expiration, fee_required)
if problems: raise exceptions.OrderError(problems)
give_id = util.asset_id(give_asset)
get_id = util.asset_id(get_asset)
data = config.PREFIX + struct.pack(config.TXTYPE_FORMAT, ID)
data += struct.pack(FORMAT, give_id, give_quantity, get_id, get_quantity,
expiration, fee_required)
cursor.close()
return (source, [], data)
def parse (db, tx, message):
order_parse_cursor = db.cursor()
# Unpack message.
try:
assert len(message) == LENGTH
give_id, give_quantity, get_id, get_quantity, expiration, fee_required = struct.unpack(FORMAT, message)
give_asset = util.asset_name(give_id)
get_asset = util.asset_name(get_id)
status = 'open'
except (AssertionError, struct.error) as e:
give_asset, give_quantity, get_asset, get_quantity, expiration, fee_required = 0, 0, 0, 0, 0, 0
status = 'invalid: could not unpack'
price = 0
if status == 'open':
try: price = util.price(get_quantity, give_quantity, tx['block_index'])
except Exception as e: pass
# Overorder
order_parse_cursor.execute('''SELECT * FROM balances \
WHERE (address = ? AND asset = ?)''', (tx['source'], give_asset))
balances = list(order_parse_cursor)
if give_asset != config.BTC:
if not balances:
give_quantity = 0
else:
balance = balances[0]['quantity']
if balance < give_quantity:
give_quantity = balance
get_quantity = int(price * give_quantity)
problems = validate(db, tx['source'], give_asset, give_quantity, get_asset, get_quantity, expiration, fee_required)
if problems: status = 'invalid: ' + '; '.join(problems)
# Debit give quantity. (Escrow.)
if status == 'open':
if give_asset != config.BTC: # No need (or way) to debit BTC.
util.debit(db, tx['block_index'], tx['source'], give_asset, give_quantity, event=tx['tx_hash'])
# Add parsed transaction to message-type–specific table.
bindings = {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'source': tx['source'],
'give_asset': give_asset,
'give_quantity': give_quantity,
'give_remaining': give_quantity,
'get_asset': get_asset,
'get_quantity': get_quantity,
'get_remaining': get_quantity,
'expiration': expiration,
'expire_index': tx['block_index'] + expiration,
'fee_required': fee_required,
'fee_required_remaining': fee_required,
'fee_provided': tx['fee'],
'fee_provided_remaining': tx['fee'],
'status': status,
}
sql='insert into orders values(:tx_index, :tx_hash, :block_index, :source, :give_asset, :give_quantity, :give_remaining, :get_asset, :get_quantity, :get_remaining, :expiration, :expire_index, :fee_required, :fee_required_remaining, :fee_provided, :fee_provided_remaining, :status)'
order_parse_cursor.execute(sql, bindings)
# Match.
if status == 'open' and tx['block_index'] != config.MEMPOOL_BLOCK_INDEX:
match(db, tx)
order_parse_cursor.close()
def match (db, tx, block_index=None):
cursor = db.cursor()
# Get order in question.
orders = list(cursor.execute('''SELECT * FROM orders\
WHERE (tx_index = ? AND status = ?)''', (tx['tx_index'], 'open')))
if not orders:
cursor.close()
return
else:
assert len(orders) == 1
tx1 = orders[0]
cursor.execute('''SELECT * FROM orders \
WHERE (give_asset=? AND get_asset=? AND status=? AND tx_hash != ?)''',
(tx1['get_asset'], tx1['give_asset'], 'open', tx1['tx_hash']))
tx1_give_remaining = tx1['give_remaining']
tx1_get_remaining = tx1['get_remaining']
order_matches = cursor.fetchall()
if tx['block_index'] > 284500 or config.TESTNET: # Protocol change.
order_matches = sorted(order_matches, key=lambda x: x['tx_index']) # Sort by tx index second.
order_matches = sorted(order_matches, key=lambda x: util.price(x['get_quantity'], x['give_quantity'], tx1['block_index'])) # Sort by price first.
# Get fee remaining.
tx1_fee_required_remaining = tx1['fee_required_remaining']
tx1_fee_provided_remaining = tx1['fee_provided_remaining']
tx1_status = tx1['status']
for tx0 in order_matches:
order_match_id = tx0['tx_hash'] + tx1['tx_hash']
if not block_index:
block_index = max(tx0['block_index'], tx1['block_index'])
if tx1_status != 'open': break
logging.debug('Considering: ' + tx0['tx_hash'])
tx0_give_remaining = tx0['give_remaining']
tx0_get_remaining = tx0['get_remaining']
# Ignore previous matches. (Both directions, just to be sure.)
cursor.execute('''SELECT * FROM order_matches
WHERE id = ? ''', (tx0['tx_hash'] + tx1['tx_hash'], ))
if list(cursor):
logging.debug('Skipping: previous match')
continue
cursor.execute('''SELECT * FROM order_matches
WHERE id = ? ''', (tx1['tx_hash'] + tx0['tx_hash'], ))
if list(cursor):
logging.debug('Skipping: previous match')
continue
# Get fee provided remaining.
tx0_fee_required_remaining = tx0['fee_required_remaining']
tx0_fee_provided_remaining = tx0['fee_provided_remaining']
# Make sure that that both orders still have funds remaining (if order involves BTC, and so cannot be ‘filled’).
if tx0['give_asset'] == config.BTC or tx0['get_asset'] == config.BTC: # Gratuitous
if tx0_give_remaining <= 0 or tx1_give_remaining <= 0:
logging.debug('Skipping: negative give quantity remaining')
continue
if block_index >= 292000 and block_index <= 310500 and not config.TESTNET: # Protocol changes
if tx0_get_remaining <= 0 or tx1_get_remaining <= 0:
logging.debug('Skipping: negative get quantity remaining')
continue
if block_index >= 294000 or config.TESTNET: # Protocol change.
if tx0['fee_required_remaining'] < 0:
logging.debug('Skipping: negative tx0 fee required remaining')
continue
if tx0['fee_provided_remaining'] < 0:
logging.debug('Skipping: negative tx0 fee provided remaining')
continue
if tx1_fee_provided_remaining < 0:
logging.debug('Skipping: negative tx1 fee provided remaining')
continue
if tx1_fee_required_remaining < 0:
logging.debug('Skipping: negative tx1 fee required remaining')
continue
# If the prices agree, make the trade. The found order sets the price,
# and they trade as much as they can.
tx0_price = util.price(tx0['get_quantity'], tx0['give_quantity'], block_index)
tx1_price = util.price(tx1['get_quantity'], tx1['give_quantity'], block_index)
tx1_inverse_price = util.price(tx1['give_quantity'], tx1['get_quantity'], block_index)
# Protocol change.
if tx['block_index'] < 286000: tx1_inverse_price = util.price(1, tx1_price, block_index)
logging.debug('Tx0 Price: {}; Tx1 Inverse Price: {}'.format(float(tx0_price), float(tx1_inverse_price)))
if tx0_price <= tx1_inverse_price:
logging.debug('Potential forward quantities: {}, {}'.format(tx0_give_remaining, int(util.price(tx1_give_remaining, tx0_price, block_index))))
forward_quantity = int(min(tx0_give_remaining, int(util.price(tx1_give_remaining, tx0_price, block_index))))
logging.debug('Forward Quantity: {}'.format(forward_quantity))
backward_quantity = round(forward_quantity * tx0_price)
logging.debug('Backward Quantity: {}'.format(backward_quantity))
if not forward_quantity:
logging.debug('Skipping: zero forward quantity.')
continue
if block_index >= 286500 or config.TESTNET: # Protocol change.
if not backward_quantity:
logging.debug('Skipping: zero backward quantity.')
continue
# Check and update fee remainings.
fee = 0
if block_index >= 286500 or config.TESTNET: # Protocol change. Deduct fee_required from fee_provided_remaining, etc., if possible (else don’t match).
if tx1['get_asset'] == config.BTC:
if block_index >= 310500 or config.TESTNET: # Protocol change.
fee = int(tx1['fee_required'] * util.price(backward_quantity, tx1['give_quantity'], block_index))
else:
fee = int(tx1['fee_required_remaining'] * util.price(forward_quantity, tx1_get_remaining, block_index))
logging.debug('Tx0 fee provided remaining: {}; required fee: {}'.format(tx0_fee_provided_remaining / config.UNIT, fee / config.UNIT))
if tx0_fee_provided_remaining < fee:
logging.debug('Skipping: tx0 fee provided remaining is too low.')
continue
else:
tx0_fee_provided_remaining -= fee
if block_index >= 287800 or config.TESTNET: # Protocol change.
tx1_fee_required_remaining -= fee
elif tx1['give_asset'] == config.BTC:
if block_index >= 310500 or config.TESTNET: # Protocol change.
fee = int(tx0['fee_required'] * util.price(backward_quantity, tx0['give_quantity'], block_index))
else:
fee = int(tx0['fee_required_remaining'] * util.price(backward_quantity, tx0_get_remaining, block_index))
logging.debug('Tx1 fee provided remaining: {}; required fee: {}'.format(tx1_fee_provided_remaining / config.UNIT, fee / config.UNIT))
if tx1_fee_provided_remaining < fee:
logging.debug('Skipping: tx1 fee provided remaining is too low.')
continue
else:
tx1_fee_provided_remaining -= fee
if block_index >= 287800 or config.TESTNET: # Protocol change.
tx0_fee_required_remaining -= fee
else: # Don’t deduct.
if tx1['get_asset'] == config.BTC:
if tx0_fee_provided_remaining < tx1['fee_required']: continue
elif tx1['give_asset'] == config.BTC:
if tx1_fee_provided_remaining < tx0['fee_required']: continue
forward_asset, backward_asset = tx1['get_asset'], tx1['give_asset']
if config.BTC in (tx1['give_asset'], tx1['get_asset']):
status = 'pending'
else:
status = 'completed'
# Credit.
util.credit(db, tx['block_index'], tx1['source'], tx1['get_asset'],
forward_quantity, event=order_match_id)
util.credit(db, tx['block_index'], tx0['source'], tx0['get_asset'],
backward_quantity, event=order_match_id)
# Debit the order, even if it involves giving bitcoins, and so one
# can't debit the sending account.
# Get remainings may be negative.
tx0_give_remaining -= forward_quantity
tx0_get_remaining -= backward_quantity
tx1_give_remaining -= backward_quantity
tx1_get_remaining -= forward_quantity
# Update give_remaining, get_remaining.
# tx0
tx0_status = 'open'
if tx0_give_remaining <= 0 or (tx0_get_remaining <= 0 and (block_index >= 292000 or config.TESTNET)): # Protocol change
if tx0['give_asset'] != config.BTC and tx0['get_asset'] != config.BTC:
# Fill order, and recredit give_remaining.
tx0_status = 'filled'
util.credit(db, block_index, tx0['source'], tx0['give_asset'], tx0_give_remaining, event=tx1['tx_hash'], action='filled')
bindings = {
'give_remaining': tx0_give_remaining,
'get_remaining': tx0_get_remaining,
'fee_required_remaining': tx0_fee_required_remaining,
'fee_provided_remaining': tx0_fee_provided_remaining,
'status': tx0_status,
'tx_hash': tx0['tx_hash']
}
sql='update orders set give_remaining = :give_remaining, get_remaining = :get_remaining, fee_required_remaining = :fee_required_remaining, fee_provided_remaining = :fee_provided_remaining, status = :status where tx_hash = :tx_hash'
cursor.execute(sql, bindings)
util.message(db, block_index, 'update', 'orders', bindings)
# tx1
if tx1_give_remaining <= 0 or (tx1_get_remaining <= 0 and (block_index >= 292000 or config.TESTNET)): # Protocol change
if tx1['give_asset'] != config.BTC and tx1['get_asset'] != config.BTC:
# Fill order, and recredit give_remaining.
tx1_status = 'filled'
util.credit(db, block_index, tx1['source'], tx1['give_asset'], tx1_give_remaining, event=tx0['tx_hash'], action='filled')
bindings = {
'give_remaining': tx1_give_remaining,
'get_remaining': tx1_get_remaining,
'fee_required_remaining': tx1_fee_required_remaining,
'fee_provided_remaining': tx1_fee_provided_remaining,
'status': tx1_status,
'tx_hash': tx1['tx_hash']
}
sql='update orders set give_remaining = :give_remaining, get_remaining = :get_remaining, fee_required_remaining = :fee_required_remaining, fee_provided_remaining = :fee_provided_remaining, status = :status where tx_hash = :tx_hash'
cursor.execute(sql, bindings)
util.message(db, block_index, 'update', 'orders', bindings)
# Calculate when the match will expire.
if block_index >= 308000 or config.TESTNET: # Protocol change.
match_expire_index = block_index + 20
elif block_index >= 286500 or config.TESTNET: # Protocol change.
match_expire_index = block_index + 10
else:
match_expire_index = min(tx0['expire_index'], tx1['expire_index'])
# Record order match.
bindings = {
'id': tx0['tx_hash'] + tx['tx_hash'],
'tx0_index': tx0['tx_index'],
'tx0_hash': tx0['tx_hash'],
'tx0_address': tx0['source'],
'tx1_index': tx1['tx_index'],
'tx1_hash': tx1['tx_hash'],
'tx1_address': tx1['source'],
'forward_asset': forward_asset,
'forward_quantity': forward_quantity,
'backward_asset': backward_asset,
'backward_quantity': backward_quantity,
'tx0_block_index': tx0['block_index'],
'tx1_block_index': tx1['block_index'],
'block_index': block_index,
'tx0_expiration': tx0['expiration'],
'tx1_expiration': tx1['expiration'],
'match_expire_index': match_expire_index,
'fee_paid': fee,
'status': status,
}
sql='insert into order_matches values(:id, :tx0_index, :tx0_hash, :tx0_address, :tx1_index, :tx1_hash, :tx1_address, :forward_asset, :forward_quantity, :backward_asset, :backward_quantity, :tx0_block_index, :tx1_block_index, :block_index, :tx0_expiration, :tx1_expiration, :match_expire_index, :fee_paid, :status)'
cursor.execute(sql, bindings)
if tx1_status == 'filled':
break
cursor.close()
return
def expire (db, block_index):
cursor = db.cursor()
# Expire orders and give refunds for the quantity give_remaining (if non-zero; if not BTC).
cursor.execute('''SELECT * FROM orders \
WHERE (status = ? AND expire_index < ?)''', ('open', block_index))
for order in cursor.fetchall():
cancel_order(db, order, 'expired', block_index)
# Record offer expiration.
bindings = {
'order_index': order['tx_index'],
'order_hash': order['tx_hash'],
'source': order['source'],
'block_index': block_index
}
sql='insert into order_expirations values(:order_index, :order_hash, :source, :block_index)'
cursor.execute(sql, bindings)
# Expire order_matches for BTC with no BTC.
cursor.execute('''SELECT * FROM order_matches \
WHERE (status = ? and match_expire_index < ?)''', ('pending', block_index))
for order_match in cursor.fetchall():
cancel_order_match(db, order_match, 'expired', block_index)
# Record order match expiration.
bindings = {
'order_match_id': order_match['id'],
'tx0_address': order_match['tx0_address'],
'tx1_address': order_match['tx1_address'],
'block_index': block_index
}
sql='insert into order_match_expirations values(:order_match_id, :tx0_address, :tx1_address, :block_index)'
cursor.execute(sql, bindings)
cursor.close()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
py | b410e327227aaa6c775cb1a97960381eb0f58c96 | if __name__ == '__main__':
print('AY AY AY')
|
py | b410e369064ca6ed0960c57f69fa1b36f0bae73c | import sys
sys.path.append("/datagrid/personal/neoral/repos/raft_debug")
sys.path.append('core')
import argparse
import os
import numpy as np
import torch
from PIL import Image
from RAFT.core.raft import RAFT
import RAFT.core.utils.flow_gen as flow_gen
from tqdm import tqdm
from RAFT.core.utils.utils import InputPadder
DEVICE = 'cuda'
def load_image(imfile):
img = np.array(Image.open(imfile)).astype(np.uint8)
img_torch = torch.from_numpy(img).permute(2, 0, 1).float()
return img_torch[None].to(DEVICE), img
@torch.no_grad()
def gen(args):
for dataset_name in ['testing', 'training']:
if 'kitti' in args.model:
short_model_name = 'kitti'
elif 'sintel' in args.model:
short_model_name = 'sintel'
elif 'things' in args.model:
short_model_name = 'things'
else:
short_model_name = args.model[:-4]
data_root = '/datagrid/public_datasets/KITTI/multiview/{:s}/image_2'.format(dataset_name)
save_root = '/datagrid/personal/neoral/datasets/optical_flow_neomoseg/raft_new_export/kitti_{:s}_model/{:s}'.format(short_model_name, dataset_name)
# save_root = '/datagrid/tlab/personal/neoramic/datasets/optical_flow_neomoseg/raft_new_export/kitti_aug_model/{:s}'.format(dataset_name)
ITERS = args.iters
model = torch.nn.DataParallel(RAFT(args))
model.load_state_dict(torch.load(args.model))
model = model.module
model.to(DEVICE)
model.eval()
model_e = model
for t_scale_i in range(args.time_scale):
t_scale = t_scale_i + 1
pbar = tqdm(range(200))
# pbar = tqdm([11,122,15,177])
for sequence in pbar:
for image_n in range(args.min_frame, args.max_frame + 1):
path_im1 = os.path.join(data_root, '{:06d}_{:02d}.png'.format(sequence, image_n))
path_im2 = os.path.join(data_root, '{:06d}_{:02d}.png'.format(sequence, image_n + t_scale))
if not os.path.exists(path_im1) or not os.path.exists(path_im2):
continue
pbar.set_description('t_scale = {:d}: {:06d}: {:02d} and {:02d}'.format(t_scale, sequence, image_n, image_n + t_scale))
with torch.no_grad():
# kitti images
image1, image1_orig = load_image(path_im1)
image2, image2_orig = load_image(path_im2)
padder = InputPadder(image1.shape, mode='kitti')
image1, image2 = padder.pad(image1.cuda(), image2.cuda())
_, flow_pr = model_e(image1, image2, iters=ITERS, test_mode=True)
flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy()
#output_filename = os.path.join(save_root, 'time_scale_{:d}'.format(t_scale), 'forward'), '{:06d}_{:02d}.png'.format(sequence, image_n)
#frame_utils.writeFlowKITTI(output_filename, flow)
#flow_predictions = model(image1, image2, iters=16, test_mode=True)
flow_gen.save_outputs(image1_orig, image2_orig, flow, os.path.join(save_root, 'time_scale_{:d}'.format(t_scale), 'forward'), '{:06d}_{:02d}.png'.format(sequence, image_n))
if args.backward:
#flow_predictions = model(image2, image1, iters=16, test_mode=True)
_, flow_pr = model_e(image2, image1, iters=ITERS, test_mode=True)
flow = padder.unpad(flow_pr[0]).permute(1, 2, 0).cpu().numpy()
flow_gen.save_outputs(image2_orig, image1_orig, flow, os.path.join(save_root, 'time_scale_{:d}'.format(t_scale), 'backward'), '{:06d}_{:02d}.png'.format(sequence, image_n + t_scale))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model', help="restore checkpoint")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--iters', type=int, default=24)
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation')
parser.add_argument('--time_scale', type=int, default=5)
parser.add_argument('--min_frame', type=int, default=0)
parser.add_argument('--max_frame', type=int, default=20)
parser.add_argument('--backward', action='store_true', help='compute backward flow')
args = parser.parse_args()
gen(args) |
py | b410e3ad9c5598e8052be17753ed25ef2983e4c9 | # -*- coding: utf-8 -*-
# Copyright 2017 New Vector Ltd
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Iterable, Optional, Tuple
from twisted.internet import defer
from synapse.http.client import SimpleHttpClient
from synapse.http.site import SynapseRequest
from synapse.logging.context import make_deferred_yieldable, run_in_background
from synapse.storage.state import StateFilter
from synapse.types import UserID
if TYPE_CHECKING:
from synapse.server import HomeServer
"""
This package defines the 'stable' API which can be used by extension modules which
are loaded into Synapse.
"""
__all__ = ["errors", "make_deferred_yieldable", "run_in_background", "ModuleApi"]
logger = logging.getLogger(__name__)
class ModuleApi:
"""A proxy object that gets passed to various plugin modules so they
can register new users etc if necessary.
"""
def __init__(self, hs, auth_handler):
self._hs = hs
self._store = hs.get_datastore()
self._auth = hs.get_auth()
self._auth_handler = auth_handler
# We expose these as properties below in order to attach a helpful docstring.
self._http_client = hs.get_simple_http_client() # type: SimpleHttpClient
self._public_room_list_manager = PublicRoomListManager(hs)
@property
def http_client(self):
"""Allows making outbound HTTP requests to remote resources.
An instance of synapse.http.client.SimpleHttpClient
"""
return self._http_client
@property
def public_room_list_manager(self):
"""Allows adding to, removing from and checking the status of rooms in the
public room list.
An instance of synapse.module_api.PublicRoomListManager
"""
return self._public_room_list_manager
def get_user_by_req(self, req, allow_guest=False):
"""Check the access_token provided for a request
Args:
req (twisted.web.server.Request): Incoming HTTP request
allow_guest (bool): True if guest users should be allowed. If this
is False, and the access token is for a guest user, an
AuthError will be thrown
Returns:
twisted.internet.defer.Deferred[synapse.types.Requester]:
the requester for this request
Raises:
synapse.api.errors.AuthError: if no user by that token exists,
or the token is invalid.
"""
return self._auth.get_user_by_req(req, allow_guest)
def get_qualified_user_id(self, username):
"""Qualify a user id, if necessary
Takes a user id provided by the user and adds the @ and :domain to
qualify it, if necessary
Args:
username (str): provided user id
Returns:
str: qualified @user:id
"""
if username.startswith("@"):
return username
return UserID(username, self._hs.hostname).to_string()
def check_user_exists(self, user_id):
"""Check if user exists.
Args:
user_id (str): Complete @user:id
Returns:
Deferred[str|None]: Canonical (case-corrected) user_id, or None
if the user is not registered.
"""
return defer.ensureDeferred(self._auth_handler.check_user_exists(user_id))
@defer.inlineCallbacks
def register(self, localpart, displayname=None, emails=[]):
"""Registers a new user with given localpart and optional displayname, emails.
Also returns an access token for the new user.
Deprecated: avoid this, as it generates a new device with no way to
return that device to the user. Prefer separate calls to register_user and
register_device.
Args:
localpart (str): The localpart of the new user.
displayname (str|None): The displayname of the new user.
emails (List[str]): Emails to bind to the new user.
Returns:
Deferred[tuple[str, str]]: a 2-tuple of (user_id, access_token)
"""
logger.warning(
"Using deprecated ModuleApi.register which creates a dummy user device."
)
user_id = yield self.register_user(localpart, displayname, emails)
_, access_token = yield self.register_device(user_id)
return user_id, access_token
def register_user(self, localpart, displayname=None, emails=[]):
"""Registers a new user with given localpart and optional displayname, emails.
Args:
localpart (str): The localpart of the new user.
displayname (str|None): The displayname of the new user.
emails (List[str]): Emails to bind to the new user.
Raises:
SynapseError if there is an error performing the registration. Check the
'errcode' property for more information on the reason for failure
Returns:
defer.Deferred[str]: user_id
"""
return defer.ensureDeferred(
self._hs.get_registration_handler().register_user(
localpart=localpart,
default_display_name=displayname,
bind_emails=emails,
)
)
def register_device(self, user_id, device_id=None, initial_display_name=None):
"""Register a device for a user and generate an access token.
Args:
user_id (str): full canonical @user:id
device_id (str|None): The device ID to check, or None to generate
a new one.
initial_display_name (str|None): An optional display name for the
device.
Returns:
defer.Deferred[tuple[str, str]]: Tuple of device ID and access token
"""
return defer.ensureDeferred(
self._hs.get_registration_handler().register_device(
user_id=user_id,
device_id=device_id,
initial_display_name=initial_display_name,
)
)
def record_user_external_id(
self, auth_provider_id: str, remote_user_id: str, registered_user_id: str
) -> defer.Deferred:
"""Record a mapping from an external user id to a mxid
Args:
auth_provider: identifier for the remote auth provider
external_id: id on that system
user_id: complete mxid that it is mapped to
"""
return defer.ensureDeferred(
self._store.record_user_external_id(
auth_provider_id, remote_user_id, registered_user_id
)
)
def generate_short_term_login_token(
self, user_id: str, duration_in_ms: int = (2 * 60 * 1000)
) -> str:
"""Generate a login token suitable for m.login.token authentication"""
return self._hs.get_macaroon_generator().generate_short_term_login_token(
user_id, duration_in_ms
)
@defer.inlineCallbacks
def invalidate_access_token(self, access_token):
"""Invalidate an access token for a user
Args:
access_token(str): access token
Returns:
twisted.internet.defer.Deferred - resolves once the access token
has been removed.
Raises:
synapse.api.errors.AuthError: the access token is invalid
"""
# see if the access token corresponds to a device
user_info = yield defer.ensureDeferred(
self._auth.get_user_by_access_token(access_token)
)
device_id = user_info.get("device_id")
user_id = user_info["user"].to_string()
if device_id:
# delete the device, which will also delete its access tokens
yield defer.ensureDeferred(
self._hs.get_device_handler().delete_device(user_id, device_id)
)
else:
# no associated device. Just delete the access token.
yield defer.ensureDeferred(
self._auth_handler.delete_access_token(access_token)
)
def run_db_interaction(self, desc, func, *args, **kwargs):
"""Run a function with a database connection
Args:
desc (str): description for the transaction, for metrics etc
func (func): function to be run. Passed a database cursor object
as well as *args and **kwargs
*args: positional args to be passed to func
**kwargs: named args to be passed to func
Returns:
Deferred[object]: result of func
"""
return defer.ensureDeferred(
self._store.db_pool.runInteraction(desc, func, *args, **kwargs)
)
def complete_sso_login(
self, registered_user_id: str, request: SynapseRequest, client_redirect_url: str
):
"""Complete a SSO login by redirecting the user to a page to confirm whether they
want their access token sent to `client_redirect_url`, or redirect them to that
URL with a token directly if the URL matches with one of the whitelisted clients.
This is deprecated in favor of complete_sso_login_async.
Args:
registered_user_id: The MXID that has been registered as a previous step of
of this SSO login.
request: The request to respond to.
client_redirect_url: The URL to which to offer to redirect the user (or to
redirect them directly if whitelisted).
"""
self._auth_handler._complete_sso_login(
registered_user_id, request, client_redirect_url,
)
async def complete_sso_login_async(
self, registered_user_id: str, request: SynapseRequest, client_redirect_url: str
):
"""Complete a SSO login by redirecting the user to a page to confirm whether they
want their access token sent to `client_redirect_url`, or redirect them to that
URL with a token directly if the URL matches with one of the whitelisted clients.
Args:
registered_user_id: The MXID that has been registered as a previous step of
of this SSO login.
request: The request to respond to.
client_redirect_url: The URL to which to offer to redirect the user (or to
redirect them directly if whitelisted).
"""
await self._auth_handler.complete_sso_login(
registered_user_id, request, client_redirect_url,
)
@defer.inlineCallbacks
def get_state_events_in_room(
self, room_id: str, types: Iterable[Tuple[str, Optional[str]]]
) -> defer.Deferred:
"""Gets current state events for the given room.
(This is exposed for compatibility with the old SpamCheckerApi. We should
probably deprecate it and replace it with an async method in a subclass.)
Args:
room_id: The room ID to get state events in.
types: The event type and state key (using None
to represent 'any') of the room state to acquire.
Returns:
twisted.internet.defer.Deferred[list(synapse.events.FrozenEvent)]:
The filtered state events in the room.
"""
state_ids = yield defer.ensureDeferred(
self._store.get_filtered_current_state_ids(
room_id=room_id, state_filter=StateFilter.from_types(types)
)
)
state = yield defer.ensureDeferred(self._store.get_events(state_ids.values()))
return state.values()
class PublicRoomListManager:
"""Contains methods for adding to, removing from and querying whether a room
is in the public room list.
"""
def __init__(self, hs: "HomeServer"):
self._store = hs.get_datastore()
async def room_is_in_public_room_list(self, room_id: str) -> bool:
"""Checks whether a room is in the public room list.
Args:
room_id: The ID of the room.
Returns:
Whether the room is in the public room list. Returns False if the room does
not exist.
"""
room = await self._store.get_room(room_id)
if not room:
return False
return room.get("is_public", False)
async def add_room_to_public_room_list(self, room_id: str) -> None:
"""Publishes a room to the public room list.
Args:
room_id: The ID of the room.
"""
await self._store.set_room_is_public(room_id, True)
async def remove_room_from_public_room_list(self, room_id: str) -> None:
"""Removes a room from the public room list.
Args:
room_id: The ID of the room.
"""
await self._store.set_room_is_public(room_id, False)
|
py | b410e400fb5388afbca504b106499184312bd7da | import gc
import time
from typing import List
import numpy
import pandas
import torch
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.optim.lr_scheduler import ReduceLROnPlateau, CosineAnnealingLR, \
StepLR, MultiStepLR
from helpers._logging import epoch_progress
from helpers.generic import number_h
from helpers.training import save_checkpoint, load_state_by_id
from libs.joeynmt.builders import NoamScheduler
from modules.data.loaders import MultiDataLoader
from modules.optim.lookahead import Lookahead
from modules.optim.radam import RAdam
from modules.callbacks import TrainerCallback
from mylogger.experiment import Experiment
class Trainer:
def __init__(self,
model,
train_loader,
valid_loader,
config,
device,
callbacks: List[TrainerCallback] = None,
resume_state_id: str = None,
resume_state=None,
**kwargs):
self.config = config
self.device = device
self.epoch = 0
self.step = 0
self.failed_batches = 0
self.early_stop = False
self.progress_log = None
self.best_checkpoint = None
self.train_loader = train_loader
self.valid_loader = valid_loader
self.n_batches = len(train_loader)
self.total_steps = self.n_batches * self.config["epochs"]
self.model = model
self.callbacks = callbacks
# -----------------------------------------------------------------
# Optimization
# -----------------------------------------------------------------
self.optimizers = self.__init_optimizer(self.config["optim"])
if len(self.optimizers) == 1:
self.scheduler = self.__init_scheduler(self.config["optim"])
if self.config["optim"]["scheduler"] == "noam":
self.scheduler.step()
else:
self.scheduler = None
raise print("Currently schedulers support only 1 optimizer!")
self.loss_weights = self.__init_loss_weights()
# -----------------------------------------------------------------
# Model definition
# -----------------------------------------------------------------
self.model.to(device)
print(self.model)
total_params = sum(p.numel() for p in self.model.parameters())
total_trainable_params = sum(p.numel() for p in self.model.parameters()
if p.requires_grad)
print("Total Params:", number_h(total_params))
print("Total Trainable Params:", number_h(total_trainable_params))
# -----------------------------------------------------------------
# Experiment definition - Resume training from interrupted state
# -----------------------------------------------------------------
if resume_state_id is not None:
resume_state = load_state_by_id(self.config["name"],
resume_state_id)
self.model_type = self.config["model"].get("type", "rnn")
if resume_state is not None:
self.exp = resume_state["exp"]
self.load_state(resume_state)
if self.exp.has_finished():
print("Experiment is already finished!")
try:
model.tie_weights()
except:
pass
print(f"Resuming from previous state with id:{resume_state_id}...")
else:
print(f"Starting with state id:{resume_state_id}...")
self.exp = Experiment(self.config["name"], config,
src_dirs=kwargs.get("src_dirs"),
resume_state_id=resume_state_id)
# print initial learning rate
self.exp.line("lr", None, "Learning Rate",
self.optimizers[0].param_groups[0]['lr'])
def _cyclical_schedule(self, cycle, n_cycles, floor=0., ceil=1., start=0):
warm = [floor] * start
anneal = numpy.linspace(floor, ceil, cycle).tolist() * n_cycles
anneal = anneal[:self.total_steps]
end = [ceil] * (self.total_steps - len(anneal))
return warm + anneal + end
def _linear_schedule(self, start, stop, floor=0., ceil=1.):
warm = [floor] * start
anneal = numpy.linspace(floor, ceil, stop - start).tolist()
end = [ceil] * (self.total_steps - stop)
return warm + anneal + end
def _sigmoid_schedule(self, start, stop, floor=0., ceil=1., k=10):
warm = [ceil] * start
anneal = numpy.linspace(ceil, floor, stop - start).tolist()
anneal = [(k / (k + numpy.exp(i / k))) for i, x in enumerate(anneal)]
end = [floor] * (self.total_steps - stop)
return warm + anneal + end
def __init_loss_weights(self):
loss_weights = dict()
for loss, p in self.config["losses"].items():
if isinstance(p["weight"], list):
floor = float(p["weight"][0])
ceil = float(p["weight"][1])
if p["annealing_schedule"] == "cyclical":
cycle = p["annealing_cycle"]
n_cycles = self.total_steps // cycle
start = p.get("annealing_start", 0)
schedule = self._cyclical_schedule(cycle, n_cycles,
floor=floor, ceil=ceil,
start=start)
loss_weights[loss] = schedule
elif p["annealing_schedule"] == "linear":
start = p.get("annealing_start", 0)
stop = p.get("annealing_stop", self.total_steps)
stop = min(self.total_steps, stop)
schedule = self._linear_schedule(start, stop, floor, ceil)
loss_weights[loss] = schedule
elif p["annealing_schedule"] == "sigmoid":
start = p.get("annealing_start", 0)
stop = p.get("annealing_stop", self.total_steps)
stop = min(self.total_steps, stop)
schedule = self._linear_schedule(start, stop, floor, ceil)
loss_weights[loss] = schedule
else:
loss_weights[loss] = p["weight"]
return loss_weights
def anneal_init(self, param):
if isinstance(param, list):
if len(param) == 2:
steps = self.total_steps
else:
steps = param[2]
return numpy.linspace(param[0], param[1], num=steps).tolist()
else:
return param
def anneal_step(self, param):
if isinstance(param, list):
try:
_val = param[self.step]
except:
_val = param[-1]
else:
_val = param
return _val
def __tensors_to_device(self, batch):
return list(map(lambda x: x.to(self.device,
non_blocking=False) if x is not None else x,
batch))
def batch_to_device(self, batch):
"""
Move batch tensors to model's device
"""
if torch.is_tensor(batch[0]):
batch = self.__tensors_to_device(batch)
else:
batch = list(map(lambda x: self.__tensors_to_device(x), batch))
return batch
def _aggregate_losses(self, batch_losses):
"""
This function computes a weighted sum of the models losses
Returns:
loss_sum (int): the aggregation of the constituent losses
loss_list (list, int): the constituent losses
"""
if isinstance(batch_losses, (tuple, list)):
if self.loss_weights is None:
_ws = [1.0 for _ in batch_losses]
else:
_ws = [self.anneal_step(w) for w in self.loss_weights]
total = sum(w * x for x, w in zip(batch_losses, _ws)) / len(
batch_losses)
# losses = [w * x.item() for x, w in zip(batch_losses, _ws)]
losses = [x.item() for x, w in zip(batch_losses, _ws)]
elif isinstance(batch_losses, dict):
if self.loss_weights is None:
_ws = {n: 1.0 for n, _ in batch_losses.items()}
else:
_ws = {k: self.anneal_step(w) for k, w
in self.loss_weights.items()}
total = sum(v * _ws[k] for k, v in batch_losses.items()) / len(
batch_losses)
# losses = {n: x.item() * _ws[n] for n, x in batch_losses.items()}
losses = {n: x.item() for n, x in batch_losses.items()}
else:
total = batch_losses
losses = batch_losses.item()
return total, losses
def __init_optimizer(self, config):
parameters = filter(lambda p: p.requires_grad, self.model.parameters())
if config["optimizer"] == "adam":
optimizer = torch.optim.Adam(parameters, lr=config["lr"],
# betas=(0.9, 0.98), eps=1e-9,
weight_decay=config["weight_decay"])
elif config["optimizer"] == "radam":
optimizer = RAdam(parameters, lr=config["lr"])
elif config["optimizer"] == "ranger":
base_optim = RAdam(parameters, lr=config["lr"])
optimizer = Lookahead(base_optim, k=config["k"])
elif config["optimizer"] == "sgd":
optimizer = torch.optim.SGD(parameters, lr=config["lr"],
weight_decay=config["weight_decay"])
else:
raise ValueError
if not isinstance(optimizer, (tuple, list)):
optimizer = [optimizer]
return optimizer
def __init_scheduler(self, config):
if config["scheduler"] == "plateau":
return ReduceLROnPlateau(self.optimizers[0], 'min',
patience=config["patience"],
factor=config["gamma"],
verbose=True,
min_lr=config["min_lr"])
elif config["scheduler"] == "cosine":
return CosineAnnealingLR(self.optimizers[0],
T_max=self.config["epochs"],
eta_min=config["eta_min"])
elif config["scheduler"] == "step":
return StepLR(self.optimizers[0],
step_size=config["step_size"],
gamma=config["gamma"])
elif config["scheduler"] == "multistep":
return MultiStepLR(self.optimizers[0],
milestones=config["milestones"],
gamma=config["gamma"])
elif config["scheduler"] == "noam":
return NoamScheduler(self.model.ninp,
self.optimizers[0],
factor=config.get("factor", 1),
warmup=config.get("warmup", 8000))
else:
return None
def step_scheduler(self, loss=None):
if self.scheduler is not None:
if self.config["optim"]["scheduler"] == "plateau":
if loss is not None:
self.scheduler.step(loss)
else:
self.scheduler.step()
if self.step % self.config["logging"]["log_interval"] == 0:
self.exp.line("lr", None, "Learning Rate",
self.optimizers[0].param_groups[0]['lr'])
def process_batch(self, *args, **kwargs):
raise NotImplementedError
@staticmethod
def cross_entropy_loss(logits, labels, lengths=None, ignore_index=0):
"""
Compute a sequence loss (i.e. per timestep).
Used for tasks such as Translation, Language Modeling and
Sequence Labelling.
"""
_logits = logits.contiguous().view(-1, logits.size(-1))
_labels = labels.contiguous().view(-1)
if lengths is None:
loss = F.cross_entropy(_logits, _labels, ignore_index=ignore_index)
return loss
else:
_loss = F.cross_entropy(_logits, _labels, ignore_index=ignore_index,
reduction='none')
_loss_per_step = _loss.view(labels.size())
loss = _loss.sum() / lengths.float().sum()
return loss, _loss_per_step
def grads(self):
"""
Get the list of the norms of the gradients for each parameter
"""
return [(name, parameter.grad.norm().item())
for name, parameter in self.model.named_parameters()
if parameter.requires_grad and parameter.grad is not None]
@staticmethod
def namestr(obj, namespace):
return [name for name in namespace if namespace[name] is obj]
def empty_batch_outputs(self, outputs):
pass
def train_step(self, batch, epoch_losses, batch_index, epoch_start):
batch = self.batch_to_device(batch)
# forward pass using the model-specific _process_batch()
batch_losses, batch_outputs = self.process_batch(*batch)
# ----------------------------------------------------------------
# Callbacks: Batch Forward End
# ----------------------------------------------------------------
for c in self.callbacks:
c.batch_forward_end(self, batch, epoch_losses,
batch_losses, batch_outputs)
# ----------------------------------------------------------------
# aggregate the losses
loss_sum, loss_list = self._aggregate_losses(batch_losses)
if isinstance(self.train_loader, MultiDataLoader):
loss_list["loader"] = self.train_loader.get_current_loader()
epoch_losses.append(loss_list)
# back-propagate
loss_sum.backward()
# ----------------------------------------------------------------
# Logging
# ----------------------------------------------------------------
if self.step % self.config["logging"]["log_interval"] == 0:
self.progress_log = epoch_progress(self.epoch, batch_index,
self.n_batches, epoch_start,
self.config["name"])
# Callbacks: Batch Backward End
for c in self.callbacks:
c.batch_backward_end(self, batch, epoch_losses, batch_losses,
batch_outputs)
# ----------------------------------------------------------------
if self.config["optim"]["clip"] is not None:
# clip_grad_norm_(self.model.parameters(), self.clip)
for optimizer in self.optimizers:
clip_grad_norm_((p for group in optimizer.param_groups
for p in group['params']),
self.config["optim"]["clip"])
# update weights
for optimizer in self.optimizers:
optimizer.step()
optimizer.zero_grad()
# Callbacks: Batch Backward End
for c in self.callbacks:
c.batch_end(self, batch, epoch_losses, batch_losses,
batch_outputs)
# ----------------------------------------------------------------
# Explicitly free GPU memory
# ----------------------------------------------------------------
if batch_outputs is not None:
self.empty_batch_outputs(batch_outputs)
batch_outputs.clear()
batch_losses.clear()
del batch[:]
del batch_losses, batch_outputs, batch, loss_sum
def free_gpu(self):
for p in self.model.parameters():
if p.grad is not None:
del p.grad # free some memory
try:
gc.collect()
torch.cuda.empty_cache()
except:
print("Failed to free GPU memory!")
def train_epoch(self):
"""
Train the network for one epoch and return the average loss.
* This will be a pessimistic approximation of the true loss
of the network, as the loss of the first batches will be higher
than the true.
Returns:
loss (float, list(float)): list of mean losses
"""
self.model.train()
epoch_losses = []
self.epoch += 1
epoch_start = time.time()
# self.free_gpu()
try:
self.train_loader.reset()
except:
pass
for batch_index, batch in enumerate(self.train_loader, 1):
self.model.train()
self.step += 1
try:
self.train_step(batch, epoch_losses, batch_index, epoch_start)
except RuntimeError as e:
print(f"Error processing batch: {batch_index}. Trying again...")
print(e)
try:
self.free_gpu()
self.train_step(batch, epoch_losses, batch_index,
epoch_start)
except RuntimeError as e:
self.failed_batches += 1
# print('| WARNING: failed again! skipping batch')
continue
for c in self.callbacks:
try:
c.batch_start(self)
except Exception as e:
pass
if self.config["optim"].get("interval", "epoch") == "batch":
self.step_scheduler()
if self.early_stop:
break
# explicitly free memory
try:
del batch[:]
except:
pass
del batch
for c in self.callbacks:
c.train_epoch_end(self, epoch_losses)
# self.free_gpu()
self.exp.save()
return epoch_losses
def aggregate_eval_losses(self, losses):
if "val_loss" in self.config["optim"]:
_key = self.config["optim"]["val_loss"]
return pandas.DataFrame(losses)[_key].mean()
else:
return pandas.DataFrame(losses).mean().sum()
def eval_batch(self, batch):
batch = self.batch_to_device(batch)
batch_losses, batch_outputs = self.process_batch(*batch)
# aggregate the losses into a single loss value
loss, _losses = self._aggregate_losses(batch_losses)
# -------------------------------------------
# Explicitly free GPU memory
# -------------------------------------------
if batch_outputs is not None:
self.empty_batch_outputs(batch_outputs)
batch_outputs.clear()
batch_losses.clear()
del batch[:]
del batch_losses, batch_outputs, batch, loss
return _losses
def eval_epoch(self, only_eval=False, custom_loader=None):
"""
Evaluate the network for one epoch and return the average loss.
Returns:
loss (float, list(float)): list of mean losses
"""
self.model.eval()
losses = []
if custom_loader is not None:
loader = custom_loader
else:
loader = self.valid_loader
# todo: what is that????
try:
loader.reset()
except:
pass
with torch.no_grad():
for i_batch, batch in enumerate(loader, 1):
try:
_losses = self.eval_batch(batch)
except RuntimeError:
try:
self.free_gpu()
_losses = self.eval_batch(batch)
except RuntimeError as e:
raise e
losses.append(_losses)
# explicitly free memory
try:
del batch[:]
except:
pass
del batch
# just return the losses and skip the rest steps. useful for getting
# the loss on the val set without waiting for the end of an epoch
if only_eval:
return losses
for c in self.callbacks:
c.eval_epoch_end(self, losses)
if self.config["optim"].get("interval", "epoch") == "epoch":
self.step_scheduler(self.aggregate_eval_losses(losses))
return losses
def get_vocab(self):
raise NotImplementedError
def get_state(self):
"""
Return a dictionary with the current state of the model.
The state should contain all the important properties which will
be save when taking a model checkpoint.
Returns:
state (dict)
"""
state = {
"config": self.config,
"epoch": self.epoch,
"step": self.step,
"early_stop": self.early_stop,
"callbacks": self.callbacks,
"progress_log": self.progress_log,
"best_checkpoint": self.best_checkpoint,
"loss_weights": self.loss_weights,
"exp": self.exp,
"model": self.model.state_dict(),
"model_class": self.model.__class__.__name__,
"optimizers": [x.state_dict() for x in self.optimizers],
"vocab": self.get_vocab(),
}
if self.scheduler is not None:
state["scheduler"] = self.scheduler.state_dict()
else:
state["scheduler"] = None
return state
def load_state(self, state):
self.config = state["config"]
self.epoch = state["epoch"]
self.step = state["step"]
self.early_stop = state["early_stop"]
self.callbacks = state["callbacks"]
self.progress_log = state["progress_log"]
self.best_checkpoint = state["best_checkpoint"]
self.loss_weights = state["loss_weights"]
self.model.load_state_dict(state["model"])
self.model.to(self.device)
for i, opt in enumerate(self.optimizers):
self.optimizers[i].load_state_dict(state["optimizers"][i])
for s in self.optimizers[i].state.values():
for k, v in s.items():
if torch.is_tensor(v):
s[k] = v.to()
if state["scheduler"] is not None:
self.scheduler.load_state_dict(state["scheduler"])
else:
self.scheduler = None
def checkpoint(self, name=None, timestamp=False, tags=None, verbose=False):
if name is None:
name = self.config["name"]
if self.exp is not None:
self.exp.save()
path = self.exp.output_dir
else:
path = None
self.best_checkpoint = save_checkpoint(self.get_state(),
path=path,
name=name, tag=tags,
timestamp=timestamp,
verbose=verbose)
return self.best_checkpoint
|
py | b410e4ae122283d70bbc33442425dea01761d952 | from uuid import uuid4
import requests
from flask import Flask, jsonify, request
from Framework.blockchain_pow import Blockchain
# Instantiate the Node
app = Flask(__name__)
# Generate a globally unique address for this node
node_identifier = str(uuid4()).replace('-', '')
# Instantiate the Blockchain
blockchain = Blockchain()
@app.route('/mine', methods=['GET'])
def mine():
# We run the proof of work algorithm to get the next proof...
last_block = blockchain.last_block
proof = blockchain.proof_of_work(last_block)
# We must receive a reward for finding the proof.
# The sender is "0" to signify that this node has mined a new coin.
blockchain.new_transaction(
sender="0",
recipient=node_identifier,
amount=1,
)
# Forge the new Block by adding it to the chain
previous_hash = blockchain.hash(last_block)
block = blockchain.new_block(proof, previous_hash)
response = {
'message': "New Block Forged",
'index': block['index'],
'transactions': block['transactions'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
}
return jsonify(response), 200
@app.route('/transactions/new', methods=['POST'])
def new_transaction():
values = request.get_json(force=True)
# Check that the required fields are in the POST'ed data
required = ['sender', 'recipient', 'amount']
if not all(k in values for k in required):
return 'Missing values', 400
# Create a new Transaction
index = blockchain.new_transaction(values['sender'], values['recipient'], values['amount'])
response = {'message': f'Transaction will be added to Block {index}'}
return jsonify(response), 201
@app.route('/chain', methods=['GET'])
def full_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain),
}
return jsonify(response), 200
@app.route('/nodes/register', methods=['POST'])
def register_nodes():
values = request.get_json()
nodes = values.get('nodes')
if nodes is None:
return "Error: Please supply a valid list of nodes", 400
for node in nodes:
blockchain.register_node(node)
response = {
'message': 'New nodes have been added',
'total_nodes': list(blockchain.nodes),
}
return jsonify(response), 201
@app.route('/nodes/resolve', methods=['GET'])
def consensus():
replaced = blockchain.resolve_conflicts()
if replaced:
response = {
'message': 'Our chain was replaced',
'new_chain': blockchain.chain
}
else:
response = {
'message': 'Our chain is authoritative',
'chain': blockchain.chain
}
return jsonify(response), 200
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('-p', '--port', default=5000, type=int, help='port to listen on')
args = parser.parse_args()
port = args.port
app.run(host='0.0.0.0', port=port) |
py | b410e560bd882d8a9a6cb5838d42e6b02217c345 | """Classes and utilities related to U.S. federal taxes."""
from .fica import FICATax, MedicareTax, SocialSecurityTax
from .income import FederalIncomeTax
from .status import FilingStatus
|
py | b410e5f641335e57dfcb37ed79b0d132ecbc3542 | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import inspect
import os
import sys
import warnings
from abc import ABC, abstractmethod
from importlib import import_module
from typing import Any, Dict, List, Mapping, Optional, Sequence, Union
from monai.bundle.utils import EXPR_KEY
from monai.utils import ensure_tuple, first, instantiate, optional_import
__all__ = ["ComponentLocator", "ConfigItem", "ConfigExpression", "ConfigComponent", "Instantiable"]
class Instantiable(ABC):
"""
Base class for an instantiable object.
"""
@abstractmethod
def is_disabled(self, *args: Any, **kwargs: Any) -> bool:
"""
Return a boolean flag to indicate whether the object should be instantiated.
"""
raise NotImplementedError(f"subclass {self.__class__.__name__} must implement this method.")
@abstractmethod
def instantiate(self, *args: Any, **kwargs: Any) -> object:
"""
Instantiate the target component and return the instance.
"""
raise NotImplementedError(f"subclass {self.__class__.__name__} must implement this method.")
class ComponentLocator:
"""
Scan all the available classes and functions in the MONAI package and map them with the module paths in a table.
It's used to locate the module path for provided component name.
Args:
excludes: if any string of the `excludes` exists in the full module name, don't import this module.
"""
MOD_START = "monai"
def __init__(self, excludes: Optional[Union[Sequence[str], str]] = None):
self.excludes = [] if excludes is None else ensure_tuple(excludes)
self._components_table: Optional[Dict[str, List]] = None
def _find_module_names(self) -> List[str]:
"""
Find all the modules start with MOD_START and don't contain any of `excludes`.
"""
return [
m for m in sys.modules.keys() if m.startswith(self.MOD_START) and all(s not in m for s in self.excludes)
]
def _find_classes_or_functions(self, modnames: Union[Sequence[str], str]) -> Dict[str, List]:
"""
Find all the classes and functions in the modules with specified `modnames`.
Args:
modnames: names of the target modules to find all the classes and functions.
"""
table: Dict[str, List] = {}
# all the MONAI modules are already loaded by `load_submodules`
for modname in ensure_tuple(modnames):
try:
# scan all the classes and functions in the module
module = import_module(modname)
for name, obj in inspect.getmembers(module):
if (inspect.isclass(obj) or inspect.isfunction(obj)) and obj.__module__ == modname:
if name not in table:
table[name] = []
table[name].append(modname)
except ModuleNotFoundError:
pass
return table
def get_component_module_name(self, name: str) -> Optional[Union[List[str], str]]:
"""
Get the full module name of the class or function with specified ``name``.
If target component name exists in multiple packages or modules, return a list of full module names.
Args:
name: name of the expected class or function.
"""
if not isinstance(name, str):
raise ValueError(f"`name` must be a valid string, but got: {name}.")
if self._components_table is None:
# init component and module mapping table
self._components_table = self._find_classes_or_functions(self._find_module_names())
mods: Optional[Union[List[str], str]] = self._components_table.get(name, None)
if isinstance(mods, list) and len(mods) == 1:
mods = mods[0]
return mods
class ConfigItem:
"""
Basic data structure to represent a configuration item.
A `ConfigItem` instance can optionally have a string id, so that other items can refer to it.
It has a build-in `config` property to store the configuration object.
Args:
config: content of a config item, can be objects of any types,
a configuration resolver may interpret the content to generate a configuration object.
id: name of the current config item, defaults to empty string.
"""
def __init__(self, config: Any, id: str = "") -> None:
self.config = config
self.id = id
def get_id(self) -> str:
"""
Get the ID name of current config item, useful to identify config items during parsing.
"""
return self.id
def update_config(self, config: Any):
"""
Replace the content of `self.config` with new `config`.
A typical usage is to modify the initial config content at runtime.
Args:
config: content of a `ConfigItem`.
"""
self.config = config
def get_config(self):
"""
Get the config content of current config item.
"""
return self.config
def __repr__(self) -> str:
return str(self.config)
class ConfigComponent(ConfigItem, Instantiable):
"""
Subclass of :py:class:`monai.bundle.ConfigItem`, this class uses a dictionary with string keys to
represent a component of `class` or `function` and supports instantiation.
Currently, three special keys (strings surrounded by ``_``) are defined and interpreted beyond the regular literals:
- class or function identifier of the python module, specified by ``"_target_"``,
indicating a build-in python class or function such as ``"LoadImageDict"``,
or a full module name, such as ``"monai.transforms.LoadImageDict"``.
- ``"_requires_"`` (optional): specifies reference IDs (string starts with ``"@"``) or ``ConfigExpression``
of the dependencies for this ``ConfigComponent`` object. These dependencies will be
evaluated/instantiated before this object is instantiated. It is useful when the
component doesn't explicitly depend on the other `ConfigItems` via its arguments,
but requires the dependencies to be instantiated/evaluated beforehand.
- ``"_disabled_"`` (optional): a flag to indicate whether to skip the instantiation.
Other fields in the config content are input arguments to the python module.
.. code-block:: python
from monai.bundle import ComponentLocator, ConfigComponent
locator = ComponentLocator(excludes=["modules_to_exclude"])
config = {
"_target_": "LoadImaged",
"keys": ["image", "label"]
}
configer = ConfigComponent(config, id="test", locator=locator)
image_loader = configer.instantiate()
print(image_loader) # <monai.transforms.io.dictionary.LoadImaged object at 0x7fba7ad1ee50>
Args:
config: content of a config item.
id: name of the current config item, defaults to empty string.
locator: a ``ComponentLocator`` to convert a module name string into the actual python module.
if `None`, a ``ComponentLocator(excludes=excludes)`` will be used.
excludes: if ``locator`` is None, create a new ``ComponentLocator`` with ``excludes``.
See also: :py:class:`monai.bundle.ComponentLocator`.
"""
non_arg_keys = {"_target_", "_disabled_", "_requires_"}
def __init__(
self,
config: Any,
id: str = "",
locator: Optional[ComponentLocator] = None,
excludes: Optional[Union[Sequence[str], str]] = None,
) -> None:
super().__init__(config=config, id=id)
self.locator = ComponentLocator(excludes=excludes) if locator is None else locator
@staticmethod
def is_instantiable(config: Any) -> bool:
"""
Check whether this config represents a `class` or `function` that is to be instantiated.
Args:
config: input config content to check.
"""
return isinstance(config, Mapping) and "_target_" in config
def resolve_module_name(self):
"""
Resolve the target module name from current config content.
The config content must have ``"_target_"`` key.
"""
config = dict(self.get_config())
target = config.get("_target_")
if not isinstance(target, str):
raise ValueError("must provide a string for the `_target_` of component to instantiate.")
module = self.locator.get_component_module_name(target)
if module is None:
# target is the full module name, no need to parse
return target
if isinstance(module, list):
warnings.warn(
f"there are more than 1 component have name `{target}`: {module}, use the first one `{module[0]}."
f" if want to use others, please set its full module path in `_target_` directly."
)
module = module[0]
return f"{module}.{target}"
def resolve_args(self):
"""
Utility function used in `instantiate()` to resolve the arguments from current config content.
"""
return {k: v for k, v in self.get_config().items() if k not in self.non_arg_keys}
def is_disabled(self) -> bool: # type: ignore
"""
Utility function used in `instantiate()` to check whether to skip the instantiation.
"""
_is_disabled = self.get_config().get("_disabled_", False)
return _is_disabled.lower().strip() == "true" if isinstance(_is_disabled, str) else bool(_is_disabled)
def instantiate(self, **kwargs) -> object: # type: ignore
"""
Instantiate component based on ``self.config`` content.
The target component must be a `class` or a `function`, otherwise, return `None`.
Args:
kwargs: args to override / add the config args when instantiation.
"""
if not self.is_instantiable(self.get_config()) or self.is_disabled():
# if not a class or function or marked as `disabled`, skip parsing and return `None`
return None
modname = self.resolve_module_name()
args = self.resolve_args()
args.update(kwargs)
try:
return instantiate(modname, **args)
except Exception as e:
raise RuntimeError(f"Failed to instantiate {self}.") from e
class ConfigExpression(ConfigItem):
"""
Subclass of :py:class:`monai.bundle.ConfigItem`, the `ConfigItem` represents an executable expression
(execute based on ``eval()``, or import the module to the `globals` if it's an import statement).
See also:
- https://docs.python.org/3/library/functions.html#eval.
For example:
.. code-block:: python
import monai
from monai.bundle import ConfigExpression
config = "$monai.__version__"
expression = ConfigExpression(config, id="test", globals={"monai": monai})
print(expression.evaluate())
Args:
config: content of a config item.
id: name of current config item, defaults to empty string.
globals: additional global context to evaluate the string.
"""
prefix = EXPR_KEY
run_eval = not os.environ.get("MONAI_EVAL_EXPR", "1") == "0"
def __init__(self, config: Any, id: str = "", globals: Optional[Dict] = None) -> None:
super().__init__(config=config, id=id)
self.globals = globals if globals is not None else {}
def _parse_import_string(self, import_string: str):
"""parse single import statement such as "from monai.transforms import Resize"""
node = first(ast.iter_child_nodes(ast.parse(import_string)))
if not isinstance(node, (ast.Import, ast.ImportFrom)):
return None
if len(node.names) < 1:
return None
if len(node.names) > 1:
warnings.warn(f"ignoring multiple import alias '{import_string}'.")
name, asname = f"{node.names[0].name}", node.names[0].asname
asname = name if asname is None else f"{asname}"
if isinstance(node, ast.ImportFrom):
self.globals[asname], _ = optional_import(f"{node.module}", name=f"{name}")
return self.globals[asname]
if isinstance(node, ast.Import):
self.globals[asname], _ = optional_import(f"{name}")
return self.globals[asname]
return None
def evaluate(self, globals: Optional[Dict] = None, locals: Optional[Dict] = None):
"""
Execute the current config content and return the result if it is expression, based on Python `eval()`.
For more details: https://docs.python.org/3/library/functions.html#eval.
Args:
globals: besides ``self.globals``, other global symbols used in the expression at runtime.
locals: besides ``globals``, may also have some local symbols used in the expression at runtime.
"""
value = self.get_config()
if not ConfigExpression.is_expression(value):
return None
optional_module = self._parse_import_string(value[len(self.prefix) :])
if optional_module is not None:
return optional_module
if not self.run_eval:
return f"{value[len(self.prefix) :]}"
globals_ = dict(self.globals)
if globals is not None:
for k, v in globals.items():
if k in globals_:
warnings.warn(f"the new global variable `{k}` conflicts with `self.globals`, override it.")
globals_[k] = v
return eval(value[len(self.prefix) :], globals_, locals)
@classmethod
def is_expression(cls, config: Union[Dict, List, str]) -> bool:
"""
Check whether the config is an executable expression string.
Currently, a string starts with ``"$"`` character is interpreted as an expression.
Args:
config: input config content to check.
"""
return isinstance(config, str) and config.startswith(cls.prefix)
@classmethod
def is_import_statement(cls, config: Union[Dict, List, str]) -> bool:
"""
Check whether the config is an import statement (a special case of expression).
Args:
config: input config content to check.
"""
if not cls.is_expression(config):
return False
if "import" not in config:
return False
return isinstance(
first(ast.iter_child_nodes(ast.parse(f"{config[len(cls.prefix) :]}"))), (ast.Import, ast.ImportFrom)
)
|
py | b410e725de03d959213e50bb0d1640f97633e20e | import copy
from .ssa_types import slots_t
class ProcInfo(object):
def __init__(self, retblock, target):
self.retblock = retblock
self.target = target
self.jsrblocks = []
assert target is retblock.jump.target
def __str__(self): # pragma: no cover
return 'Proc{}<{}>'.format(self.target.key, ', '.join(str(b.key) for b in self.jsrblocks))
__repr__ = __str__
###########################################################################################
class ProcJumpBase(object):
@property
def params(self):
return [v for v in self.input.stack + self.input.localsAsList if v is not None]
# [v for v in self.input.stack if v] + [v for k, v in sorted(self.input.locals.items()) if v]
def replaceBlocks(self, blockDict):
self.target = blockDict.get(self.target, self.target)
def getExceptSuccessors(self): return ()
def getSuccessors(self): return self.getNormalSuccessors()
def getSuccessorPairs(self): return [(x,False) for x in self.getNormalSuccessors()]
def reduceSuccessors(self, pairsToRemove): return self
class ProcCallOp(ProcJumpBase):
def __init__(self, target, fallthrough, inslots, outslots):
self.fallthrough = fallthrough
self.target = target
self.input = inslots
self.output = outslots
for var in self.output.stack + self.output.locals.values():
if var is not None:
assert var.origin is None
var.origin = self
# def flatOutput(self): return [v for v in self.output.stack if v] + [v for k, v in sorted(self.output.locals.items()) if v]
def flatOutput(self): return self.output.stack + self.output.localsAsList
def getNormalSuccessors(self): return self.fallthrough, self.target
class DummyRet(ProcJumpBase):
def __init__(self, inslots, target):
self.target = target
self.input = inslots
def replaceVars(self, varDict):
newstack = [varDict.get(v, v) for v in self.input.stack]
newlocals = {k: varDict.get(v, v) for k, v in self.input.locals.items()}
self.input = slots_t(stack=newstack, locals=newlocals)
def getNormalSuccessors(self): return ()
def clone(self): return copy.copy(self) # target and input will be replaced later by calls to replaceBlocks/Vars
|
py | b410e7e860a85318b777e7c30cff12a334bb8672 | """
Support for RFXtrx sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.rfxtrx/
"""
import logging
import voluptuous as vol
from homeassistant.components import rfxtrx
from homeassistant.components.rfxtrx import (
ATTR_DATA_TYPE, ATTR_FIRE_EVENT, CONF_AUTOMATIC_ADD, CONF_DATA_TYPE,
CONF_DEVICES, CONF_FIRE_EVENT, DATA_TYPES)
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ENTITY_ID, ATTR_NAME, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
DEPENDENCIES = ['rfxtrx']
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_DEVICES, default={}): {
cv.string: vol.Schema({
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean,
vol.Optional(CONF_DATA_TYPE, default=[]):
vol.All(cv.ensure_list, [vol.In(DATA_TYPES.keys())]),
})
},
vol.Optional(CONF_AUTOMATIC_ADD, default=False): cv.boolean,
}, extra=vol.ALLOW_EXTRA)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the RFXtrx platform."""
from RFXtrx import SensorEvent
sensors = []
for packet_id, entity_info in config[CONF_DEVICES].items():
event = rfxtrx.get_rfx_object(packet_id)
device_id = "sensor_{}".format(slugify(event.device.id_string.lower()))
if device_id in rfxtrx.RFX_DEVICES:
continue
_LOGGER.info("Add %s rfxtrx.sensor", entity_info[ATTR_NAME])
sub_sensors = {}
data_types = entity_info[ATTR_DATA_TYPE]
if not data_types:
data_types = ['']
for data_type in DATA_TYPES:
if data_type in event.values:
data_types = [data_type]
break
for _data_type in data_types:
new_sensor = RfxtrxSensor(None, entity_info[ATTR_NAME],
_data_type, entity_info[ATTR_FIRE_EVENT])
sensors.append(new_sensor)
sub_sensors[_data_type] = new_sensor
rfxtrx.RFX_DEVICES[device_id] = sub_sensors
add_devices(sensors)
def sensor_update(event):
"""Handle sensor updates from the RFXtrx gateway."""
if not isinstance(event, SensorEvent):
return
device_id = "sensor_" + slugify(event.device.id_string.lower())
if device_id in rfxtrx.RFX_DEVICES:
sensors = rfxtrx.RFX_DEVICES[device_id]
for data_type in sensors:
# Some multi-sensor devices send individual messages for each
# of their sensors. Update only if event contains the
# right data_type for the sensor.
if data_type not in event.values:
continue
sensor = sensors[data_type]
sensor.event = event
# Fire event
if sensor.should_fire_event:
sensor.hass.bus.fire(
"signal_received", {
ATTR_ENTITY_ID: sensor.entity_id,
}
)
return
# Add entity if not exist and the automatic_add is True
if not config[CONF_AUTOMATIC_ADD]:
return
pkt_id = "".join("{0:02x}".format(x) for x in event.data)
_LOGGER.info("Automatic add rfxtrx.sensor: %s", pkt_id)
data_type = ''
for _data_type in DATA_TYPES:
if _data_type in event.values:
data_type = _data_type
break
new_sensor = RfxtrxSensor(event, pkt_id, data_type)
sub_sensors = {}
sub_sensors[new_sensor.data_type] = new_sensor
rfxtrx.RFX_DEVICES[device_id] = sub_sensors
add_devices([new_sensor])
if sensor_update not in rfxtrx.RECEIVED_EVT_SUBSCRIBERS:
rfxtrx.RECEIVED_EVT_SUBSCRIBERS.append(sensor_update)
class RfxtrxSensor(Entity):
"""Representation of a RFXtrx sensor."""
def __init__(self, event, name, data_type, should_fire_event=False):
"""Initialize the sensor."""
self.event = event
self._name = name
self.should_fire_event = should_fire_event
self.data_type = data_type
self._unit_of_measurement = DATA_TYPES.get(data_type, '')
def __str__(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
if not self.event:
return None
return self.event.values.get(self.data_type)
@property
def name(self):
"""Get the name of the sensor."""
return "{} {}".format(self._name, self.data_type)
@property
def device_state_attributes(self):
"""Return the device state attributes."""
if not self.event:
return None
return self.event.values
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
|
py | b410e8c037ee836258893b9904eba24bd9366bdc | from django.urls import path
from . import views
app_name = 'user'
urlpatterns = [
path('register', views.RegisterView.as_view(), name='register'),
path('login', views.LoginView.as_view(), name='login'),
path('active/<token>', views.ActiveView.as_view(), name='active'),
]
|
py | b410e8c2a9b19fa75567e5b251b6d85a4ea1d459 | #!/usr/bin/python3
import redis
from redis.commands.search.field import VectorField
from redis.commands.search.query import Query
import numpy as np
NUMBER_SAMPLES = 3200
FACE_IMAGE_VECTOR_FIELD='face_image_vector'
IMAGE_VECTOR_DIMENSION=40
r = redis.Redis(host='127.0.0.1', port=6379, password='', decode_responses=True)
r.flushdb()
def store_olivetti_models():
global r
for person in range(1, 41):
person = "s" + str(person)
for face in range(1, 6):
facepath = '../olivetti-database/' + person + "/" + str(face) + '.bmp'
print ("Training face: " + facepath)
file1 = open('../../tulipo/olivetti-models/'+person+'_'+str(face)+'.txt', 'r')
Lines = file1.readlines()
count = 0
# Strips the newline character
for line in Lines:
lst = line.split()
jet = np.array(lst)
count += 1
#print("Line{}: {}".format(count, line.strip()))
face_jet_vector = jet.astype(np.float32).tobytes()
#print(format(face_image_vector,'x'))
face_data_values ={ 'person_id':person,
'person_path':facepath,
FACE_IMAGE_VECTOR_FIELD:face_jet_vector}
r.hset('face_'+person+'_'+str(face)+'_'+str(count),mapping=face_data_values)
def test_olivetti_models_vect():
success = 0
for person in range(1, 41):
person = "s" + str(person)
for face in range(6, 11):
facepath = '../olivetti-database/' + person + "/" + str(face) + '.bmp'
print ("Testing face: " + facepath)
found = find_face(person,str(face))
if (person == found):
success = success +1
print(success/200*100)
def create_hnsw_index (redis_conn,index_name,vector_field_name,number_of_vectors, vector_dimensions=IMAGE_VECTOR_DIMENSION, distance_metric='L2',M=40,EF=200):
global r
schema = (VectorField("face_image_vector", "HNSW", {"TYPE": "FLOAT32", "DIM": IMAGE_VECTOR_DIMENSION, "DISTANCE_METRIC": "L2"}),)
hnsw_index = r.ft().create_index(schema)
return hnsw_index
def find_face(person,face):
global r
r.delete("face")
file1 = open('../../tulipo/olivetti-models/'+person+'_'+str(face)+'.txt', 'r')
Lines = file1.readlines()
count = 0
# Strips the newline character
for line in Lines:
lst = line.split()
jet = np.array(lst)
count += 1
#print("Line{}: {}".format(count, line.strip()))
face_jet_vector = jet.astype(np.float32).tobytes()
q = Query("*=>[KNN 1 @face_image_vector $vec]").return_field("__face_image_vector_score")
res = r.ft().search(q, query_params={"vec": face_jet_vector})
for face in res.docs:
#print ('Recognized face: '+ face.id.split("_")[1])
r.zincrby("face", 1, face.id.split("_")[1])
recognized = r.zrevrangebyscore("face", "inf", "-inf", start=0, num=1)
print(recognized[0])
return recognized[0];
my_hnsw_index = create_hnsw_index(r,'my_hnsw_index',FACE_IMAGE_VECTOR_FIELD,NUMBER_SAMPLES,IMAGE_VECTOR_DIMENSION,'L2',M=40,EF=200)
store_olivetti_models()
test_olivetti_models_vect()
|
py | b410e8ce5ab5fd9554e467fcf43858cdff5acf55 | from django.urls import path
from . import views
app_name = "diary"
urlpatterns = [
path('main/', views.Diary_Main.as_view(), name='main diaries module'),
path('startchat/', views.Startchat.as_view(), name='start chat'),
path('chat/<int:diary_id>/', views.Chat.as_view(), name='chat'),
path('feeling/<int:diary_id>/', views.Feeling.as_view(), name='feeling'),
path('detail/<int:diary_id>/', views.DiaryDetail.as_view(), name='diary detail'),
path('calendar/', views.ThisMonth_Calendar.as_view(), name='this month calendar'),
path('calendar/<int:month>/', views.OtherMonth_Calendar.as_view(), name='other month calendar'),
path('question/', views.Question.as_view(), name='add question'),
path('question/list/', views.QuestionList.as_view(), name='list question'),
path('question_set/', views.CreateQuestionSet.as_view(), name='question_set'), # 임시 url
] |
py | b410e92a7ffd1d166317160f3eb41394e4865e35 | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
INCIDENT_ID = "incident_id"
ORGANIZATION_ID = "organization_id"
PATCH = "patch"
class Output:
PATCH_STATUS = "patch_status"
class PatchIncidentInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"incident_id": {
"type": "number",
"title": "Incident ID",
"description": "The incident ID",
"order": 2
},
"organization_id": {
"type": "number",
"title": "Organization ID",
"description": "The organization ID",
"order": 1
},
"patch": {
"type": "object",
"title": "Patch",
"description": "The incident properties to update, in JSON format. Please see the PatchDTO JSON reference in your Resilient API documentation",
"order": 3
}
},
"required": [
"organization_id",
"incident_id",
"patch"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class PatchIncidentOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"patch_status": {
"$ref": "#/definitions/PatchStatusDTO",
"title": "Patch Status",
"description": "Patch status",
"order": 1
}
},
"definitions": {
"FieldPatchFailureDTO": {
"type": "object",
"title": "FieldPatchFailureDTO",
"properties": {
"actual_current_value": {
"type": "object",
"title": "Actual Current Value",
"order": 3
},
"field": {
"type": "object",
"title": "Field",
"order": 1
},
"your_original_value": {
"type": "object",
"title": "Your Original Value",
"order": 2
}
}
},
"PatchStatusDTO": {
"type": "object",
"title": "PatchStatusDTO",
"properties": {
"field_failure": {
"type": "array",
"title": "Field Failure",
"items": {
"$ref": "#/definitions/FieldPatchFailureDTO"
},
"order": 1
},
"hints": {
"type": "array",
"title": "Hints",
"items": {
"type": "string"
},
"order": 5
},
"message": {
"type": "string",
"title": "Message",
"order": 4
},
"success": {
"type": "boolean",
"title": "Success",
"order": 2
},
"title": {
"type": "string",
"title": "Title",
"order": 3
}
},
"definitions": {
"FieldPatchFailureDTO": {
"type": "object",
"title": "FieldPatchFailureDTO",
"properties": {
"actual_current_value": {
"type": "object",
"title": "Actual Current Value",
"order": 3
},
"field": {
"type": "object",
"title": "Field",
"order": 1
},
"your_original_value": {
"type": "object",
"title": "Your Original Value",
"order": 2
}
}
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
py | b410ea3f89614094eab79223e7bc551807a38ef2 | from copy import deepcopy
import bleach
import markdown
from django.db import models
from django.template.loader import get_template
from django.utils.timezone import now
from django.utils.translation import override, ugettext_lazy as _
from i18nfield.fields import I18nCharField, I18nTextField
from pretalx.common.mail import SendMailException
from pretalx.common.mixins import LogMixin
from pretalx.common.urls import EventUrls
class MailTemplate(LogMixin, models.Model):
event = models.ForeignKey(
to='event.Event',
on_delete=models.PROTECT,
related_name='mail_templates',
)
subject = I18nCharField(
max_length=200,
verbose_name=_('Subject'),
)
text = I18nTextField(
verbose_name=_('Text'),
)
reply_to = models.EmailField(
max_length=200,
blank=True, null=True,
verbose_name=_('Reply-To'),
help_text=_('Change the Reply-To address if you do not want to use the default orga address'),
)
bcc = models.CharField(
max_length=1000,
blank=True, null=True,
verbose_name=_('BCC'),
help_text=_('Enter comma separated addresses. Will receive a blind copy of every mail sent from this template. This may be a LOT!'),
)
class urls(EventUrls):
base = edit = '{self.event.orga_urls.mail_templates}{self.pk}/'
delete = '{base}delete'
def __str__(self):
"""Help with debugging."""
return f'MailTemplate(event={self.event.slug}, subject={self.subject})'
def to_mail(self, user, event, locale=None, context=None, skip_queue=False):
address = user.email if hasattr(user, 'email') else user
with override(locale):
context = context or dict()
try:
subject = str(self.subject).format(**context)
text = str(self.text).format(**context)
except KeyError as e:
raise SendMailException(f'Experienced KeyError when rendering Text: {str(e)}')
mail = QueuedMail(
event=self.event,
to=address,
reply_to=self.reply_to or event.email,
bcc=self.bcc,
subject=subject,
text=text,
)
if skip_queue:
mail.send()
else:
mail.save()
return mail
class QueuedMail(LogMixin, models.Model):
event = models.ForeignKey(
to='event.Event',
on_delete=models.PROTECT,
related_name='queued_mails',
)
to = models.CharField(
max_length=1000,
verbose_name=_('To'),
help_text=_('One email address or several addresses separated by commas.'),
)
reply_to = models.CharField(
max_length=1000,
null=True, blank=True,
verbose_name=_('Reply-To'),
help_text=_('By default, the orga address is used as Reply-To.'),
)
cc = models.CharField(
max_length=1000,
null=True, blank=True,
verbose_name=_('CC'),
help_text=_('One email address or several addresses separated by commas.'),
)
bcc = models.CharField(
max_length=1000,
null=True, blank=True,
verbose_name=_('BCC'),
help_text=_('One email address or several addresses separated by commas.'),
)
subject = models.CharField(
max_length=200,
verbose_name=_('Subject'),
)
text = models.TextField(verbose_name=_('Text'))
sent = models.DateTimeField(null=True, blank=True, verbose_name=_('Sent at'))
class urls(EventUrls):
base = edit = '{self.event.orga_urls.mail}{self.pk}/'
delete = '{base}delete'
send = '{base}send'
copy = '{base}copy'
def __str__(self):
"""Help with debugging."""
sent = self.sent.isoformat() if self.sent else None
return f'OutboxMail(event={self.event.slug}, to={self.to}, subject={self.subject}, sent={sent})'
@classmethod
def make_html(cls, text, event=None):
body_md = bleach.linkify(bleach.clean(markdown.markdown(text), tags=bleach.ALLOWED_TAGS + [
'p', 'pre'
]))
html_context = {
'body': body_md,
'event': event,
'color': (event.primary_color if event else '') or '#1c4a3b',
}
return get_template('mail/mailwrapper.html').render(html_context)
@classmethod
def make_text(cls, text, event=None):
if not event or not event.settings.mail_signature:
return text
sig = event.settings.mail_signature
if not sig.strip().startswith('-- '):
sig = f'-- \n{sig}'
return f'{text}\n{sig}'
@classmethod
def make_subject(cls, text, event=None):
if not event or not event.settings.mail_subject_prefix:
return text
prefix = event.settings.mail_subject_prefix
if not (prefix.startswith('[') and prefix.endswith(']')):
prefix = f'[{prefix}]'
return f'{prefix} {text}'
def send(self):
if self.sent:
raise Exception(_('This mail has been sent already. It cannot be sent again.'))
has_event = getattr(self, 'event', None)
text = self.make_text(self.text, event=has_event)
body_html = self.make_html(text)
from pretalx.common.mail import mail_send_task
mail_send_task.apply_async(
kwargs={
'to': self.to.split(','),
'subject': self.make_subject(self.subject, event=has_event),
'body': text,
'html': body_html,
'reply_to': self.reply_to or (self.event.email if has_event else None),
'event': self.event.pk if has_event else None,
'cc': (self.cc or '').split(','),
'bcc': (self.bcc or '').split(','),
}
)
self.sent = now()
if self.pk:
self.save()
def copy_to_draft(self):
new_mail = deepcopy(self)
new_mail.pk = None
new_mail.sent = None
new_mail.save()
return new_mail
|
py | b410eace6268f7a2daf92b7bd6692027f6f5e937 | import numpy as np
from .VariableUnitTest import VariableUnitTest
from gwlfe.MultiUse_Fxns.Erosion import ErosSum
class TestErosSum(VariableUnitTest):
def test_ErosSum(self):
z = self.z
np.testing.assert_array_almost_equal(
ErosSum.ErosSum_f(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area, z.CNI_0,
z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN, z.UnsatStor_0, z.KV,
z.PcntET,
z.DayHrs, z.MaxWaterCap, z.SatStor_0, z.RecessionCoef, z.SeepCoef, z.Qretention,
z.PctAreaInfil,
z.n25b, z.Landuse, z.TileDrainDensity, z.PointFlow, z.StreamWithdrawal,
z.GroundWithdrawal,
z.NumAnimals, z.AvgAnimalWt, z.StreamFlowVolAdj, z.SedAFactor_0, z.AvKF, z.AvSlope,
z.SedAAdjust,
z.StreamLength, z.n42b, z.n46c, z.n85d, z.AgLength, z.n42, z.n45, z.n85, z.UrbBankStab,
z.SedDelivRatio_0, z.Acoef, z.KF, z.LS, z.C, z.P),
ErosSum.ErosSum(z.NYrs, z.DaysMonth, z.Temp, z.InitSnow_0, z.Prec, z.NRur, z.NUrb, z.Area, z.CNI_0,
z.AntMoist_0, z.Grow_0, z.CNP_0, z.Imper, z.ISRR, z.ISRA, z.CN, z.UnsatStor_0, z.KV,
z.PcntET,
z.DayHrs, z.MaxWaterCap, z.SatStor_0, z.RecessionCoef, z.SeepCoef, z.Qretention,
z.PctAreaInfil,
z.n25b, z.Landuse, z.TileDrainDensity, z.PointFlow, z.StreamWithdrawal, z.GroundWithdrawal,
z.NumAnimals, z.AvgAnimalWt, z.StreamFlowVolAdj, z.SedAFactor_0, z.AvKF, z.AvSlope,
z.SedAAdjust,
z.StreamLength, z.n42b, z.n46c, z.n85d, z.AgLength, z.n42, z.n45, z.n85, z.UrbBankStab,
z.SedDelivRatio_0, z.Acoef, z.KF, z.LS, z.C, z.P), decimal=7)
|
py | b410eb56809ed671c96ef0aad36a441d949b7a61 | def message():
"""TODO: Implement
message command decorator
@interactions.message(...)
async def my_command(self, ctx: MessageContext)
"""
|
py | b410ee317ea52051d41e8c361f00fd0b7092feea | import numpy as np
import tensorflow as tf
import helpers
from tensorflow.contrib.rnn import LSTMCell, GRUCell
from model_new import Seq2SeqModel, train_on_copy_task
import pandas as pd
tf.reset_default_graph()
tf.set_random_seed(1)
with tf.Session() as session:
model = Seq2SeqModel(encoder_cell=LSTMCell(20),
decoder_cell=LSTMCell(20),
embedding_size=20,
vocab_size=10,
attention=False,
bidirectional=False,
beam_search=True,
debug=False)
session.run(tf.global_variables_initializer())
train_on_copy_task(session,
model,
length_from=3,
length_to=8,
vocab_lower=2,
vocab_upper=10,
batch_size=100,
max_batches=3000,
batches_in_epoch=1000,
verbose=True)
|
py | b410eec35b29bab584c91bac1407be5a3c121827 | """
.dat export to SQL
Overview
===============================================================================
+----------+------------------------------------------------------------------+
| Path | PyPoE/cli/exporter/dat/parsers/sql.py |
+----------+------------------------------------------------------------------+
| Version | 1.0.0a0 |
+----------+------------------------------------------------------------------+
| Revision | $Id: b410eec35b29bab584c91bac1407be5a3c121827 $ |
+----------+------------------------------------------------------------------+
| Author | Omega_K2 |
+----------+------------------------------------------------------------------+
Description
===============================================================================
Commandline .dat export to SQL using sqlalchemy.
Currently only MySQL is supported.
Agreement
===============================================================================
See PyPoE/LICENSE
"""
# =============================================================================
# Imports
# =============================================================================
# Python
import argparse
from collections import defaultdict
# 3rd-party
from tqdm import tqdm
import sqlalchemy
from sqlalchemy.types import Boolean, Text, String, Float
from sqlalchemy.dialects.mysql import TINYINT, SMALLINT, INTEGER, BIGINT
# self
from PyPoE.poe.file.dat import load_spec
from PyPoE.cli.core import console, Msg
from PyPoE.cli.exporter.dat.handler import DatExportHandler
# =============================================================================
# Globals
# =============================================================================
__all__ = ['SQLExportHandler']
# =============================================================================
# Classes
# =============================================================================
class SQLExportHandler(DatExportHandler):
_type_to_sql_map = {
'bool': Boolean(),
'byte': TINYINT(),
'ubyte': TINYINT(unsigned=True),
'short': SMALLINT(),
'ushort': SMALLINT(unsigned=True),
'int': INTEGER(),
'uint': INTEGER(unsigned=True),
'long': BIGINT(),
'ulong': BIGINT(unsigned=True),
'string': Text(),
'varchar': String(255),
'float': Float(),
'double': Float(),
}
_data_suffix = ''
_data_key_suffix = 'Key'
def __init__(self, sub_parser):
"""
:type sub_parser: argparse._SubParsersAction
"""
self.sql = sub_parser.add_parser(
'sql',
help='Export to MySQL',
formatter_class=argparse.RawTextHelpFormatter,
)
self.sql.set_defaults(func=self.handle)
self.sql.add_argument(
'--url',
help=
'SQLAlchemy database URL, for more info, see:\n'
'http://docs.sqlalchemy.org/en/rel_1_0/core/'
'engines.html#sqlalchemy.sqlalchemy.create_engine',
default='mysql+pymysql://root@localhost/test?charset=utf8',
)
self.sql.add_argument(
'-v', '--verbose',
help='Verbosity\n'
'-v - \n'
'-vv - ',
action='count',
)
self.sql.add_argument(
'--skip-data',
help='Skip gathering the data and committing to the database',
action='store_true',
)
self.sql.add_argument(
'--skip-child-data',
help='Skips committing of child data (i.e. list entries)',
action='store_true',
)
self.add_default_arguments(self.sql)
def _get_data_table_name(self, name, field):
return '%s_%s%s' % (name, field, self._data_suffix)
def _get_data_reference_key(self, name):
return '%s%s' % (name, self._data_key_suffix)
def _get_field(self, section, type, field=None):
args = []
kwargs = {}
if section['unique']:
kwargs['unique'] = True
if type == 'string':
type = 'varchar'
if section['key']:
# SQL doesn't like mixing types, force ulong
if type != 'varchar':
type = 'ulong'
if section['key_offset']:
foreign_key = 'rid'
elif section['key_id']:
foreign_key = section['key_id']
else:
foreign_key = 'rid'
other = section['key'][:-4]
args.append(sqlalchemy.ForeignKey(
'%s.%s' % (other, foreign_key)
))
kwargs['nullable'] = True
# TODO: This is a bit of a temporary fix
elif section.name.startswith('Key'):
kwargs['nullable'] = True
else:
kwargs['nullable'] = False
if not isinstance(field, str):
field = self._get_data_list_field_name(section, field)
return sqlalchemy.Column(field, self._type_to_sql_map[type], *args, **kwargs)
def _get_list_field_columns(self, parent_name):
return [
sqlalchemy.Column(
'rid',
BIGINT(unsigned=True),
primary_key=True,
autoincrement=True,
),
sqlalchemy.Column(
'index',
SMALLINT,
nullable=False,
),
sqlalchemy.Column(
self._get_data_reference_key(parent_name),
BIGINT(unsigned=True),
sqlalchemy.ForeignKey('%s.rid' % (parent_name, )),
nullable=False,
),
]
def _get_data_list_field_name(self, section, index=None):
if section['key']:
return self._get_data_reference_key(section['key'][:-4])
elif index is not None:
return 'value' + str(index)
else:
return 'value'
def handle(self, args):
"""
:param args:
:type args: argparse.Namespace
:return:
"""
super(SQLExportHandler, self).handle(args)
prefix = 'SQL init - '
console(prefix + 'Establishing DB connection')
engine = sqlalchemy.create_engine(args.url, echo=False, convert_unicode=True, encoding='utf-8')
metadata = sqlalchemy.MetaData(bind=engine)
con = engine.connect()
console(prefix + 'Setting session sql_modes')
result = con.execute('SELECT @@SESSION.sql_mode;')
sql_modes = result.fetchone()[0].split(',')
if 'NO_AUTO_VALUE_ON_ZERO' not in sql_modes:
sql_modes.append('NO_AUTO_VALUE_ON_ZERO')
con.execute("SET SESSION sql_mode=%s", ','.join(sql_modes))
spec = load_spec()
#
# SQL tables
#
prefix = 'SQL tables - '
console(prefix + 'Creating virtual tables from specification...')
tables = {}
for name in tqdm(args.files):
top_section = spec[name]
name = name.replace('.dat', '')
columns = [
sqlalchemy.Column('rid', BIGINT(unsigned=True), primary_key=True)
]
for field in top_section['columns_zip']:
if field in top_section['fields']:
section = top_section['fields'][field]
type_in = section['type']
dim = 0
while type_in.startswith('ref|list|'):
type_in = type_in[9:]
dim += 1
if type_in.startswith('ref|'):
type_in = type_in[4:]
if dim == 1:
table_name = self._get_data_table_name(name, field)
lcols = self._get_list_field_columns(parent_name=name)
lcols.append(self._get_field(section, type_in))
tables[table_name] = sqlalchemy.Table(
table_name,
metadata,
*lcols
)
elif dim >= 2:
raise ValueError('unsupported dim >=2')
else:
col = self._get_field(section, type_in, field)
columns.append(col)
elif field in top_section['virtual_fields']:
# We know we are a list field
fields = top_section['virtual_fields'][field]['fields']
vcolumns = self._get_list_field_columns(parent_name=name)
for i, sub_field in enumerate(fields):
section = top_section['fields'][sub_field]
# We know the type starts with ref|list
vcolumns.append(
self._get_field(section, section['type'][9:], i)
)
table_name = self._get_data_table_name(name, field)
tables[table_name] = sqlalchemy.Table(
table_name,
metadata,
*vcolumns
)
tables[name] = sqlalchemy.Table(name, metadata, *columns)
console(prefix + 'Committing tables to SQL...')
metadata.create_all()
console(prefix + 'Done')
#
# SQL Data
#
if not args.skip_data:
prefix = 'SQL Data - '
dat_files = self._read_dat_files(args, prefix=prefix)
console(prefix + 'Committing data...')
con.execute('SET SESSION foreign_key_checks = 0;')
for name, df in tqdm(dat_files.items()):
name_noext = name.replace('.dat', '')
foreign_key = self._get_data_reference_key(name_noext)
data = []
indexes = defaultdict(int)
dr = df.reader
sub_field_names = {}
for field_name in dr.columns_zip:
if field_name in dr.specification['fields']:
sub_field_names[field_name] = \
self._get_data_list_field_name(
dr.specification['fields'][field_name]
)
elif field_name in dr.specification['virtual_fields']:
vsection = dr.specification['virtual_fields'][field_name]
names = []
for i, fn in enumerate(vsection['fields']):
names.append(self._get_data_list_field_name(
dr.specification['fields'][fn], index=i)
)
sub_field_names[field_name] = names
for row in df.reader:
dt = {
'rid': row.rowid,
}
for k in dr.columns_zip:
v = row[k]
if isinstance(v, (list, zip)) and not args.skip_child_data and v:
if isinstance(v, list):
values = [
{
'rid': indexes[k] + i,
sub_field_names[k]: item,
foreign_key: row.rowid,
'index': i,
} for i, item in enumerate(v)
]
elif isinstance(v, zip):
values = []
for i, items in enumerate(v):
value_data = {
'rid': indexes[k] + i,
foreign_key: row.rowid,
'index': i,
}
for j, item in enumerate(items):
value_data[sub_field_names[k][j]] = item
values.append(value_data)
length = len(values)
if length:
con.execute(
tables[
self._get_data_table_name(name_noext, k)
].insert(
bind=engine,
values=values,
)
)
indexes[k] += length
else:
if df.reader.table_columns[k]['section']['key_offset']:
v -= df.reader.table_columns[k]['section']['key_offset']
if df.reader.table_columns[k]['section']['key'] and v == '':
v = None
dt[k] = v
data.append(dt)
con.execute(
tables[name_noext].insert(bind=engine, values=data)
)
con.execute('SET SESSION foreign_key_checks = 1;')
console(prefix + 'All done.')
# =============================================================================
# Functions
# =============================================================================
|
py | b410eedeae56a97947c004fe8d9410a2f43d032d | import csv
import requests
df = open("bridgeData3.csv",'r').readlines()
fin = open('final.csv','r').readlines()
finCsv = fin[1:]
# url = https://b2ptc.herokuapp.com/bridges
finalCsv = df[1:]
obj = {}
for i in finalCsv:
x = i.split(',')
obj[x[1]] = {'bridge_name':x[0],'proj_code':x[1],'before_img':x[2],'after_img':x[3][0:-1]}
for i in finCsv:
x = i.split(',')
if x[6] in obj:
y= obj[x[6]]
y['province'] = x[0]
y['district'] = x[1]
y['sector'] = x[2]
y['cell'] = x[3]
y['bridge_site'] = x[4]
y['stage'] = x[5]
y['id'] = int(x[6])
y['type'] = x[7]
y['latt'] = float(x[8])
y['long'] = float(x[9])
try:
serv = float(x[10])
except:
serv = x[10]
sv = x[13].split(' ')[2]
y['served'] = serv
y['community_served'] = x[14]
y['provId'] = x[15]
y['districtId'] = x[16]
y['sectorId'] = x[17]
y['cellId'] = x[18][0:-1]
print(y)
# for i in finalCsv:
# x = i.split(',')
# requests.put(url+x[0],data={before:x[2],after:x[3]})
# pull each id,before image and after from df
# for each data item do a put request with the id as the param id
# and then put the before and after image in an dict and place it as the data for the put request
|
py | b410eefdc71f6e36aeada32664eb52e868c52d6f | # Copyright 2016 OpenMarket Ltd
# Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, List, Optional, Tuple
from synapse.logging import issue9533_logger
from synapse.logging.opentracing import log_kv, set_tag, trace
from synapse.replication.tcp.streams import ToDeviceStream
from synapse.storage._base import SQLBaseStore, db_to_json
from synapse.storage.database import (
DatabasePool,
LoggingDatabaseConnection,
LoggingTransaction,
)
from synapse.storage.engines import PostgresEngine
from synapse.storage.util.id_generators import (
AbstractStreamIdGenerator,
MultiWriterIdGenerator,
StreamIdGenerator,
)
from synapse.types import JsonDict
from synapse.util import json_encoder
from synapse.util.caches.expiringcache import ExpiringCache
from synapse.util.caches.stream_change_cache import StreamChangeCache
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class DeviceInboxWorkerStore(SQLBaseStore):
def __init__(
self,
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
):
super().__init__(database, db_conn, hs)
self._instance_name = hs.get_instance_name()
# Map of (user_id, device_id) to the last stream_id that has been
# deleted up to. This is so that we can no op deletions.
self._last_device_delete_cache: ExpiringCache[
Tuple[str, Optional[str]], int
] = ExpiringCache(
cache_name="last_device_delete_cache",
clock=self._clock,
max_len=10000,
expiry_ms=30 * 60 * 1000,
)
if isinstance(database.engine, PostgresEngine):
self._can_write_to_device = (
self._instance_name in hs.config.worker.writers.to_device
)
self._device_inbox_id_gen: AbstractStreamIdGenerator = (
MultiWriterIdGenerator(
db_conn=db_conn,
db=database,
stream_name="to_device",
instance_name=self._instance_name,
tables=[("device_inbox", "instance_name", "stream_id")],
sequence_name="device_inbox_sequence",
writers=hs.config.worker.writers.to_device,
)
)
else:
self._can_write_to_device = True
self._device_inbox_id_gen = StreamIdGenerator(
db_conn, "device_inbox", "stream_id"
)
max_device_inbox_id = self._device_inbox_id_gen.get_current_token()
device_inbox_prefill, min_device_inbox_id = self.db_pool.get_cache_dict(
db_conn,
"device_inbox",
entity_column="user_id",
stream_column="stream_id",
max_value=max_device_inbox_id,
limit=1000,
)
self._device_inbox_stream_cache = StreamChangeCache(
"DeviceInboxStreamChangeCache",
min_device_inbox_id,
prefilled_cache=device_inbox_prefill,
)
# The federation outbox and the local device inbox uses the same
# stream_id generator.
device_outbox_prefill, min_device_outbox_id = self.db_pool.get_cache_dict(
db_conn,
"device_federation_outbox",
entity_column="destination",
stream_column="stream_id",
max_value=max_device_inbox_id,
limit=1000,
)
self._device_federation_outbox_stream_cache = StreamChangeCache(
"DeviceFederationOutboxStreamChangeCache",
min_device_outbox_id,
prefilled_cache=device_outbox_prefill,
)
def process_replication_rows(self, stream_name, instance_name, token, rows):
if stream_name == ToDeviceStream.NAME:
# If replication is happening than postgres must be being used.
assert isinstance(self._device_inbox_id_gen, MultiWriterIdGenerator)
self._device_inbox_id_gen.advance(instance_name, token)
for row in rows:
if row.entity.startswith("@"):
self._device_inbox_stream_cache.entity_has_changed(
row.entity, token
)
else:
self._device_federation_outbox_stream_cache.entity_has_changed(
row.entity, token
)
return super().process_replication_rows(stream_name, instance_name, token, rows)
def get_to_device_stream_token(self):
return self._device_inbox_id_gen.get_current_token()
async def get_new_messages_for_device(
self,
user_id: str,
device_id: Optional[str],
last_stream_id: int,
current_stream_id: int,
limit: int = 100,
) -> Tuple[List[dict], int]:
"""
Args:
user_id: The recipient user_id.
device_id: The recipient device_id.
last_stream_id: The last stream ID checked.
current_stream_id: The current position of the to device
message stream.
limit: The maximum number of messages to retrieve.
Returns:
A tuple containing:
* A list of messages for the device.
* The max stream token of these messages. There may be more to retrieve
if the given limit was reached.
"""
has_changed = self._device_inbox_stream_cache.has_entity_changed(
user_id, last_stream_id
)
if not has_changed:
return [], current_stream_id
def get_new_messages_for_device_txn(txn):
sql = (
"SELECT stream_id, message_json FROM device_inbox"
" WHERE user_id = ? AND device_id = ?"
" AND ? < stream_id AND stream_id <= ?"
" ORDER BY stream_id ASC"
" LIMIT ?"
)
txn.execute(
sql, (user_id, device_id, last_stream_id, current_stream_id, limit)
)
messages = []
stream_pos = current_stream_id
for row in txn:
stream_pos = row[0]
messages.append(db_to_json(row[1]))
# If the limit was not reached we know that there's no more data for this
# user/device pair up to current_stream_id.
if len(messages) < limit:
stream_pos = current_stream_id
return messages, stream_pos
return await self.db_pool.runInteraction(
"get_new_messages_for_device", get_new_messages_for_device_txn
)
@trace
async def delete_messages_for_device(
self, user_id: str, device_id: Optional[str], up_to_stream_id: int
) -> int:
"""
Args:
user_id: The recipient user_id.
device_id: The recipient device_id.
up_to_stream_id: Where to delete messages up to.
Returns:
The number of messages deleted.
"""
# If we have cached the last stream id we've deleted up to, we can
# check if there is likely to be anything that needs deleting
last_deleted_stream_id = self._last_device_delete_cache.get(
(user_id, device_id), None
)
set_tag("last_deleted_stream_id", last_deleted_stream_id)
if last_deleted_stream_id:
has_changed = self._device_inbox_stream_cache.has_entity_changed(
user_id, last_deleted_stream_id
)
if not has_changed:
log_kv({"message": "No changes in cache since last check"})
return 0
def delete_messages_for_device_txn(txn):
sql = (
"DELETE FROM device_inbox"
" WHERE user_id = ? AND device_id = ?"
" AND stream_id <= ?"
)
txn.execute(sql, (user_id, device_id, up_to_stream_id))
return txn.rowcount
count = await self.db_pool.runInteraction(
"delete_messages_for_device", delete_messages_for_device_txn
)
log_kv({"message": f"deleted {count} messages for device", "count": count})
# Update the cache, ensuring that we only ever increase the value
updated_last_deleted_stream_id = self._last_device_delete_cache.get(
(user_id, device_id), 0
)
self._last_device_delete_cache[(user_id, device_id)] = max(
updated_last_deleted_stream_id, up_to_stream_id
)
return count
@trace
async def get_new_device_msgs_for_remote(
self, destination, last_stream_id, current_stream_id, limit
) -> Tuple[List[dict], int]:
"""
Args:
destination(str): The name of the remote server.
last_stream_id(int|long): The last position of the device message stream
that the server sent up to.
current_stream_id(int|long): The current position of the device
message stream.
Returns:
A list of messages for the device and where in the stream the messages got to.
"""
set_tag("destination", destination)
set_tag("last_stream_id", last_stream_id)
set_tag("current_stream_id", current_stream_id)
set_tag("limit", limit)
has_changed = self._device_federation_outbox_stream_cache.has_entity_changed(
destination, last_stream_id
)
if not has_changed or last_stream_id == current_stream_id:
log_kv({"message": "No new messages in stream"})
return [], current_stream_id
if limit <= 0:
# This can happen if we run out of room for EDUs in the transaction.
return [], last_stream_id
@trace
def get_new_messages_for_remote_destination_txn(txn):
sql = (
"SELECT stream_id, messages_json FROM device_federation_outbox"
" WHERE destination = ?"
" AND ? < stream_id AND stream_id <= ?"
" ORDER BY stream_id ASC"
" LIMIT ?"
)
txn.execute(sql, (destination, last_stream_id, current_stream_id, limit))
messages = []
stream_pos = current_stream_id
for row in txn:
stream_pos = row[0]
messages.append(db_to_json(row[1]))
# If the limit was not reached we know that there's no more data for this
# user/device pair up to current_stream_id.
if len(messages) < limit:
log_kv({"message": "Set stream position to current position"})
stream_pos = current_stream_id
return messages, stream_pos
return await self.db_pool.runInteraction(
"get_new_device_msgs_for_remote",
get_new_messages_for_remote_destination_txn,
)
@trace
async def delete_device_msgs_for_remote(
self, destination: str, up_to_stream_id: int
) -> None:
"""Used to delete messages when the remote destination acknowledges
their receipt.
Args:
destination: The destination server_name
up_to_stream_id: Where to delete messages up to.
"""
def delete_messages_for_remote_destination_txn(txn):
sql = (
"DELETE FROM device_federation_outbox"
" WHERE destination = ?"
" AND stream_id <= ?"
)
txn.execute(sql, (destination, up_to_stream_id))
await self.db_pool.runInteraction(
"delete_device_msgs_for_remote", delete_messages_for_remote_destination_txn
)
async def get_all_new_device_messages(
self, instance_name: str, last_id: int, current_id: int, limit: int
) -> Tuple[List[Tuple[int, tuple]], int, bool]:
"""Get updates for to device replication stream.
Args:
instance_name: The writer we want to fetch updates from. Unused
here since there is only ever one writer.
last_id: The token to fetch updates from. Exclusive.
current_id: The token to fetch updates up to. Inclusive.
limit: The requested limit for the number of rows to return. The
function may return more or fewer rows.
Returns:
A tuple consisting of: the updates, a token to use to fetch
subsequent updates, and whether we returned fewer rows than exists
between the requested tokens due to the limit.
The token returned can be used in a subsequent call to this
function to get further updatees.
The updates are a list of 2-tuples of stream ID and the row data
"""
if last_id == current_id:
return [], current_id, False
def get_all_new_device_messages_txn(txn):
# We limit like this as we might have multiple rows per stream_id, and
# we want to make sure we always get all entries for any stream_id
# we return.
upper_pos = min(current_id, last_id + limit)
sql = (
"SELECT max(stream_id), user_id"
" FROM device_inbox"
" WHERE ? < stream_id AND stream_id <= ?"
" GROUP BY user_id"
)
txn.execute(sql, (last_id, upper_pos))
updates = [(row[0], row[1:]) for row in txn]
sql = (
"SELECT max(stream_id), destination"
" FROM device_federation_outbox"
" WHERE ? < stream_id AND stream_id <= ?"
" GROUP BY destination"
)
txn.execute(sql, (last_id, upper_pos))
updates.extend((row[0], row[1:]) for row in txn)
# Order by ascending stream ordering
updates.sort()
limited = False
upto_token = current_id
if len(updates) >= limit:
upto_token = updates[-1][0]
limited = True
return updates, upto_token, limited
return await self.db_pool.runInteraction(
"get_all_new_device_messages", get_all_new_device_messages_txn
)
@trace
async def add_messages_to_device_inbox(
self,
local_messages_by_user_then_device: dict,
remote_messages_by_destination: dict,
) -> int:
"""Used to send messages from this server.
Args:
local_messages_by_user_then_device:
Dictionary of recipient user_id to recipient device_id to message.
remote_messages_by_destination:
Dictionary of destination server_name to the EDU JSON to send.
Returns:
The new stream_id.
"""
assert self._can_write_to_device
def add_messages_txn(txn, now_ms, stream_id):
# Add the local messages directly to the local inbox.
self._add_messages_to_local_device_inbox_txn(
txn, stream_id, local_messages_by_user_then_device
)
# Add the remote messages to the federation outbox.
# We'll send them to a remote server when we next send a
# federation transaction to that destination.
self.db_pool.simple_insert_many_txn(
txn,
table="device_federation_outbox",
values=[
{
"destination": destination,
"stream_id": stream_id,
"queued_ts": now_ms,
"messages_json": json_encoder.encode(edu),
"instance_name": self._instance_name,
}
for destination, edu in remote_messages_by_destination.items()
],
)
if remote_messages_by_destination:
issue9533_logger.debug(
"Queued outgoing to-device messages with stream_id %i for %s",
stream_id,
list(remote_messages_by_destination.keys()),
)
async with self._device_inbox_id_gen.get_next() as stream_id:
now_ms = self._clock.time_msec()
await self.db_pool.runInteraction(
"add_messages_to_device_inbox", add_messages_txn, now_ms, stream_id
)
for user_id in local_messages_by_user_then_device.keys():
self._device_inbox_stream_cache.entity_has_changed(user_id, stream_id)
for destination in remote_messages_by_destination.keys():
self._device_federation_outbox_stream_cache.entity_has_changed(
destination, stream_id
)
return self._device_inbox_id_gen.get_current_token()
async def add_messages_from_remote_to_device_inbox(
self, origin: str, message_id: str, local_messages_by_user_then_device: dict
) -> int:
assert self._can_write_to_device
def add_messages_txn(txn, now_ms, stream_id):
# Check if we've already inserted a matching message_id for that
# origin. This can happen if the origin doesn't receive our
# acknowledgement from the first time we received the message.
already_inserted = self.db_pool.simple_select_one_txn(
txn,
table="device_federation_inbox",
keyvalues={"origin": origin, "message_id": message_id},
retcols=("message_id",),
allow_none=True,
)
if already_inserted is not None:
return
# Add an entry for this message_id so that we know we've processed
# it.
self.db_pool.simple_insert_txn(
txn,
table="device_federation_inbox",
values={
"origin": origin,
"message_id": message_id,
"received_ts": now_ms,
},
)
# Add the messages to the appropriate local device inboxes so that
# they'll be sent to the devices when they next sync.
self._add_messages_to_local_device_inbox_txn(
txn, stream_id, local_messages_by_user_then_device
)
async with self._device_inbox_id_gen.get_next() as stream_id:
now_ms = self._clock.time_msec()
await self.db_pool.runInteraction(
"add_messages_from_remote_to_device_inbox",
add_messages_txn,
now_ms,
stream_id,
)
for user_id in local_messages_by_user_then_device.keys():
self._device_inbox_stream_cache.entity_has_changed(user_id, stream_id)
return stream_id
def _add_messages_to_local_device_inbox_txn(
self, txn, stream_id, messages_by_user_then_device
):
assert self._can_write_to_device
local_by_user_then_device = {}
for user_id, messages_by_device in messages_by_user_then_device.items():
messages_json_for_user = {}
devices = list(messages_by_device.keys())
if len(devices) == 1 and devices[0] == "*":
# Handle wildcard device_ids.
# We exclude hidden devices (such as cross-signing keys) here as they are
# not expected to receive to-device messages.
devices = self.db_pool.simple_select_onecol_txn(
txn,
table="devices",
keyvalues={"user_id": user_id, "hidden": False},
retcol="device_id",
)
message_json = json_encoder.encode(messages_by_device["*"])
for device_id in devices:
# Add the message for all devices for this user on this
# server.
messages_json_for_user[device_id] = message_json
else:
if not devices:
continue
# We exclude hidden devices (such as cross-signing keys) here as they are
# not expected to receive to-device messages.
rows = self.db_pool.simple_select_many_txn(
txn,
table="devices",
keyvalues={"user_id": user_id, "hidden": False},
column="device_id",
iterable=devices,
retcols=("device_id",),
)
for row in rows:
# Only insert into the local inbox if the device exists on
# this server
device_id = row["device_id"]
message_json = json_encoder.encode(messages_by_device[device_id])
messages_json_for_user[device_id] = message_json
if messages_json_for_user:
local_by_user_then_device[user_id] = messages_json_for_user
if not local_by_user_then_device:
return
self.db_pool.simple_insert_many_txn(
txn,
table="device_inbox",
values=[
{
"user_id": user_id,
"device_id": device_id,
"stream_id": stream_id,
"message_json": message_json,
"instance_name": self._instance_name,
}
for user_id, messages_by_device in local_by_user_then_device.items()
for device_id, message_json in messages_by_device.items()
],
)
issue9533_logger.debug(
"Stored to-device messages with stream_id %i for %s",
stream_id,
[
(user_id, device_id)
for (user_id, messages_by_device) in local_by_user_then_device.items()
for device_id in messages_by_device.keys()
],
)
class DeviceInboxBackgroundUpdateStore(SQLBaseStore):
DEVICE_INBOX_STREAM_ID = "device_inbox_stream_drop"
REMOVE_DELETED_DEVICES = "remove_deleted_devices_from_device_inbox"
REMOVE_HIDDEN_DEVICES = "remove_hidden_devices_from_device_inbox"
REMOVE_DEAD_DEVICES_FROM_INBOX = "remove_dead_devices_from_device_inbox"
def __init__(
self,
database: DatabasePool,
db_conn: LoggingDatabaseConnection,
hs: "HomeServer",
):
super().__init__(database, db_conn, hs)
self.db_pool.updates.register_background_index_update(
"device_inbox_stream_index",
index_name="device_inbox_stream_id_user_id",
table="device_inbox",
columns=["stream_id", "user_id"],
)
self.db_pool.updates.register_background_update_handler(
self.DEVICE_INBOX_STREAM_ID, self._background_drop_index_device_inbox
)
# Used to be a background update that deletes all device_inboxes for deleted
# devices.
self.db_pool.updates.register_noop_background_update(
self.REMOVE_DELETED_DEVICES
)
# Used to be a background update that deletes all device_inboxes for hidden
# devices.
self.db_pool.updates.register_noop_background_update(self.REMOVE_HIDDEN_DEVICES)
self.db_pool.updates.register_background_update_handler(
self.REMOVE_DEAD_DEVICES_FROM_INBOX,
self._remove_dead_devices_from_device_inbox,
)
async def _background_drop_index_device_inbox(self, progress, batch_size):
def reindex_txn(conn):
txn = conn.cursor()
txn.execute("DROP INDEX IF EXISTS device_inbox_stream_id")
txn.close()
await self.db_pool.runWithConnection(reindex_txn)
await self.db_pool.updates._end_background_update(self.DEVICE_INBOX_STREAM_ID)
return 1
async def _remove_dead_devices_from_device_inbox(
self,
progress: JsonDict,
batch_size: int,
) -> int:
"""A background update to remove devices that were either deleted or hidden from
the device_inbox table.
Args:
progress: The update's progress dict.
batch_size: The batch size for this update.
Returns:
The number of rows deleted.
"""
def _remove_dead_devices_from_device_inbox_txn(
txn: LoggingTransaction,
) -> Tuple[int, bool]:
if "max_stream_id" in progress:
max_stream_id = progress["max_stream_id"]
else:
txn.execute("SELECT max(stream_id) FROM device_inbox")
# There's a type mismatch here between how we want to type the row and
# what fetchone says it returns, but we silence it because we know that
# res can't be None.
res: Tuple[Optional[int]] = txn.fetchone() # type: ignore[assignment]
if res[0] is None:
# this can only happen if the `device_inbox` table is empty, in which
# case we have no work to do.
return 0, True
else:
max_stream_id = res[0]
start = progress.get("stream_id", 0)
stop = start + batch_size
# delete rows in `device_inbox` which do *not* correspond to a known,
# unhidden device.
sql = """
DELETE FROM device_inbox
WHERE
stream_id >= ? AND stream_id < ?
AND NOT EXISTS (
SELECT * FROM devices d
WHERE
d.device_id=device_inbox.device_id
AND d.user_id=device_inbox.user_id
AND NOT hidden
)
"""
txn.execute(sql, (start, stop))
self.db_pool.updates._background_update_progress_txn(
txn,
self.REMOVE_DEAD_DEVICES_FROM_INBOX,
{
"stream_id": stop,
"max_stream_id": max_stream_id,
},
)
return stop > max_stream_id
finished = await self.db_pool.runInteraction(
"_remove_devices_from_device_inbox_txn",
_remove_dead_devices_from_device_inbox_txn,
)
if finished:
await self.db_pool.updates._end_background_update(
self.REMOVE_DEAD_DEVICES_FROM_INBOX,
)
return batch_size
class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore):
pass
|
py | b410ef57aaa53ccfb3a2f5149227ce46a7c93edd | import asyncio
from pathlib import Path
from secrets import token_bytes
import pytest
from tranzact.simulator.simulator_protocol import FarmNewBlockProtocol
from tranzact.types.peer_info import PeerInfo
from tranzact.util.ints import uint16, uint64
from tranzact.wallet.cc_wallet.cc_wallet import CCWallet
from tranzact.wallet.trade_manager import TradeManager
from tranzact.wallet.trading.trade_status import TradeStatus
from tests.setup_nodes import setup_simulators_and_wallets
from tests.time_out_assert import time_out_assert
from tests.wallet.sync.test_wallet_sync import wallet_height_at_least
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
@pytest.fixture(scope="module")
async def two_wallet_nodes():
async for _ in setup_simulators_and_wallets(1, 2, {}):
yield _
buffer_blocks = 4
@pytest.fixture(scope="module")
async def wallets_prefarm(two_wallet_nodes):
"""
Sets up the node with 10 blocks, and returns a payer and payee wallet.
"""
farm_blocks = 10
buffer = 4
full_nodes, wallets = two_wallet_nodes
full_node_api = full_nodes[0]
full_node_server = full_node_api.server
wallet_node_0, wallet_server_0 = wallets[0]
wallet_node_1, wallet_server_1 = wallets[1]
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
wallet_1 = wallet_node_1.wallet_state_manager.main_wallet
ph0 = await wallet_0.get_new_puzzlehash()
ph1 = await wallet_1.get_new_puzzlehash()
await wallet_server_0.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
await wallet_server_1.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
for i in range(0, farm_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph0))
for i in range(0, farm_blocks):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph1))
for i in range(0, buffer):
await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
return wallet_node_0, wallet_node_1, full_node_api
class TestCCTrades:
@pytest.mark.asyncio
async def test_cc_trade(self, wallets_prefarm):
wallet_node_0, wallet_node_1, full_node = wallets_prefarm
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
wallet_1 = wallet_node_1.wallet_state_manager.main_wallet
cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node_0.wallet_state_manager, wallet_0, uint64(100))
await asyncio.sleep(1)
for i in range(1, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, wallet_height_at_least, True, wallet_node_0, 27)
await time_out_assert(15, cc_wallet.get_confirmed_balance, 100)
await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100)
assert cc_wallet.cc_info.my_genesis_checker is not None
colour = cc_wallet.get_colour()
cc_wallet_2: CCWallet = await CCWallet.create_wallet_for_cc(
wallet_node_1.wallet_state_manager, wallet_1, colour
)
await asyncio.sleep(1)
assert cc_wallet.cc_info.my_genesis_checker == cc_wallet_2.cc_info.my_genesis_checker
for i in range(0, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, wallet_height_at_least, True, wallet_node_0, 31)
# send cc_wallet 2 a coin
cc_hash = await cc_wallet_2.get_new_inner_hash()
tx_record = await cc_wallet.generate_signed_transaction([uint64(1)], [cc_hash])
await wallet_0.wallet_state_manager.add_pending_transaction(tx_record)
await asyncio.sleep(1)
for i in range(0, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, wallet_height_at_least, True, wallet_node_0, 35)
trade_manager_0 = wallet_node_0.wallet_state_manager.trade_manager
trade_manager_1 = wallet_node_1.wallet_state_manager.trade_manager
file = "test_offer_file.offer"
file_path = Path(file)
if file_path.exists():
file_path.unlink()
offer_dict = {1: 10, 2: -30}
success, trade_offer, error = await trade_manager_0.create_offer_for_ids(offer_dict, file)
await asyncio.sleep(1)
assert success is True
assert trade_offer is not None
success, offer, error = await trade_manager_1.get_discrepancies_for_offer(file_path)
await asyncio.sleep(1)
assert error is None
assert success is True
assert offer is not None
assert offer["tranzact"] == -10
assert offer[colour] == 30
success, trade, reason = await trade_manager_1.respond_to_offer(file_path)
await asyncio.sleep(1)
assert success is True
for i in range(0, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, wallet_height_at_least, True, wallet_node_0, 39)
await time_out_assert(15, cc_wallet_2.get_confirmed_balance, 31)
await time_out_assert(15, cc_wallet_2.get_unconfirmed_balance, 31)
trade_2 = await trade_manager_0.get_trade_by_id(trade_offer.trade_id)
assert TradeStatus(trade_2.status) is TradeStatus.CONFIRMED
@pytest.mark.asyncio
async def test_cc_trade_accept_with_zero(self, wallets_prefarm):
wallet_node_0, wallet_node_1, full_node = wallets_prefarm
wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
wallet_1 = wallet_node_1.wallet_state_manager.main_wallet
cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node_0.wallet_state_manager, wallet_0, uint64(100))
await asyncio.sleep(1)
for i in range(1, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, cc_wallet.get_confirmed_balance, 100)
await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100)
assert cc_wallet.cc_info.my_genesis_checker is not None
colour = cc_wallet.get_colour()
cc_wallet_2: CCWallet = await CCWallet.create_wallet_for_cc(
wallet_node_1.wallet_state_manager, wallet_1, colour
)
await asyncio.sleep(1)
assert cc_wallet.cc_info.my_genesis_checker == cc_wallet_2.cc_info.my_genesis_checker
ph = await wallet_1.get_new_puzzlehash()
for i in range(0, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(ph))
trade_manager_0 = wallet_node_0.wallet_state_manager.trade_manager
trade_manager_1 = wallet_node_1.wallet_state_manager.trade_manager
file = "test_offer_file.offer"
file_path = Path(file)
if file_path.exists():
file_path.unlink()
offer_dict = {1: 10, 3: -30}
success, trade_offer, error = await trade_manager_0.create_offer_for_ids(offer_dict, file)
await asyncio.sleep(1)
assert success is True
assert trade_offer is not None
success, offer, error = await trade_manager_1.get_discrepancies_for_offer(file_path)
await asyncio.sleep(1)
assert error is None
assert success is True
assert offer is not None
assert cc_wallet.get_colour() == cc_wallet_2.get_colour()
assert offer["tranzact"] == -10
assert offer[colour] == 30
success, trade, reason = await trade_manager_1.respond_to_offer(file_path)
await asyncio.sleep(1)
assert success is True
for i in range(0, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, cc_wallet_2.get_confirmed_balance, 30)
await time_out_assert(15, cc_wallet_2.get_unconfirmed_balance, 30)
trade_2 = await trade_manager_0.get_trade_by_id(trade_offer.trade_id)
assert TradeStatus(trade_2.status) is TradeStatus.CONFIRMED
@pytest.mark.asyncio
async def test_cc_trade_with_multiple_colours(self, wallets_prefarm):
# This test start with CCWallet in both wallets. wall
# wallet1 {wallet_id: 2 = 70}
# wallet2 {wallet_id: 2 = 30}
wallet_node_a, wallet_node_b, full_node = wallets_prefarm
wallet_a = wallet_node_a.wallet_state_manager.main_wallet
wallet_b = wallet_node_b.wallet_state_manager.main_wallet
# cc_a_2 = coloured coin, Alice, wallet id = 2
cc_a_2 = wallet_node_a.wallet_state_manager.wallets[2]
cc_b_2 = wallet_node_b.wallet_state_manager.wallets[2]
cc_a_3: CCWallet = await CCWallet.create_new_cc(wallet_node_a.wallet_state_manager, wallet_a, uint64(100))
await asyncio.sleep(1)
for i in range(0, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, cc_a_3.get_confirmed_balance, 100)
await time_out_assert(15, cc_a_3.get_unconfirmed_balance, 100)
# store these for asserting change later
cc_balance = await cc_a_2.get_unconfirmed_balance()
cc_balance_2 = await cc_b_2.get_unconfirmed_balance()
assert cc_a_3.cc_info.my_genesis_checker is not None
red = cc_a_3.get_colour()
for i in range(0, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
cc_b_3: CCWallet = await CCWallet.create_wallet_for_cc(wallet_node_b.wallet_state_manager, wallet_b, red)
await asyncio.sleep(1)
assert cc_a_3.cc_info.my_genesis_checker == cc_b_3.cc_info.my_genesis_checker
for i in range(0, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
trade_manager_0 = wallet_node_a.wallet_state_manager.trade_manager
trade_manager_1 = wallet_node_b.wallet_state_manager.trade_manager
file = "test_offer_file.offer"
file_path = Path(file)
if file_path.exists():
file_path.unlink()
# Wallet
offer_dict = {1: 1000, 2: -20, 4: -50}
success, trade_offer, error = await trade_manager_0.create_offer_for_ids(offer_dict, file)
await asyncio.sleep(1)
assert success is True
assert trade_offer is not None
success, offer, error = await trade_manager_1.get_discrepancies_for_offer(file_path)
await asyncio.sleep(1)
assert error is None
assert success is True
assert offer is not None
assert offer["tranzact"] == -1000
colour_2 = cc_a_2.get_colour()
colour_3 = cc_a_3.get_colour()
assert offer[colour_2] == 20
assert offer[colour_3] == 50
success, trade, reason = await trade_manager_1.respond_to_offer(file_path)
await asyncio.sleep(1)
assert success is True
for i in range(0, 10):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, cc_b_3.get_confirmed_balance, 50)
await time_out_assert(15, cc_b_3.get_unconfirmed_balance, 50)
await time_out_assert(15, cc_a_3.get_confirmed_balance, 50)
await time_out_assert(15, cc_a_3.get_unconfirmed_balance, 50)
await time_out_assert(15, cc_a_2.get_unconfirmed_balance, cc_balance - offer[colour_2])
await time_out_assert(15, cc_b_2.get_unconfirmed_balance, cc_balance_2 + offer[colour_2])
trade = await trade_manager_0.get_trade_by_id(trade_offer.trade_id)
status: TradeStatus = TradeStatus(trade.status)
assert status is TradeStatus.CONFIRMED
@pytest.mark.asyncio
async def test_create_offer_with_zero_val(self, wallets_prefarm):
# Wallet A Wallet B
# CCWallet id 2: 50 CCWallet id 2: 50
# CCWallet id 3: 50 CCWallet id 2: 50
# Wallet A will
# Wallet A will create a new CC and wallet B will create offer to buy that coin
wallet_node_a, wallet_node_b, full_node = wallets_prefarm
wallet_a = wallet_node_a.wallet_state_manager.main_wallet
wallet_b = wallet_node_b.wallet_state_manager.main_wallet
trade_manager_a: TradeManager = wallet_node_a.wallet_state_manager.trade_manager
trade_manager_b: TradeManager = wallet_node_b.wallet_state_manager.trade_manager
cc_a_4: CCWallet = await CCWallet.create_new_cc(wallet_node_a.wallet_state_manager, wallet_a, uint64(100))
await asyncio.sleep(1)
for i in range(0, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, cc_a_4.get_confirmed_balance, 100)
colour = cc_a_4.get_colour()
cc_b_4: CCWallet = await CCWallet.create_wallet_for_cc(wallet_node_b.wallet_state_manager, wallet_b, colour)
cc_balance = await cc_a_4.get_confirmed_balance()
cc_balance_2 = await cc_b_4.get_confirmed_balance()
offer_dict = {1: -30, cc_a_4.id(): 50}
file = "test_offer_file.offer"
file_path = Path(file)
if file_path.exists():
file_path.unlink()
success, offer, error = await trade_manager_b.create_offer_for_ids(offer_dict, file)
success, trade_a, reason = await trade_manager_a.respond_to_offer(file_path)
await asyncio.sleep(1)
for i in range(0, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, cc_a_4.get_confirmed_balance, cc_balance - 50)
await time_out_assert(15, cc_b_4.get_confirmed_balance, cc_balance_2 + 50)
async def assert_func():
assert trade_a is not None
trade = await trade_manager_a.get_trade_by_id(trade_a.trade_id)
assert trade is not None
return trade.status
async def assert_func_b():
assert offer is not None
trade = await trade_manager_b.get_trade_by_id(offer.trade_id)
assert trade is not None
return trade.status
await time_out_assert(15, assert_func, TradeStatus.CONFIRMED.value)
await time_out_assert(15, assert_func_b, TradeStatus.CONFIRMED.value)
@pytest.mark.asyncio
async def test_cc_trade_cancel_insecure(self, wallets_prefarm):
# Wallet A Wallet B
# CCWallet id 2: 50 CCWallet id 2: 50
# CCWallet id 3: 50 CCWallet id 3: 50
# CCWallet id 4: 40 CCWallet id 4: 60
# Wallet A will create offer, cancel it by deleting from db only
wallet_node_a, wallet_node_b, full_node = wallets_prefarm
wallet_a = wallet_node_a.wallet_state_manager.main_wallet
trade_manager_a: TradeManager = wallet_node_a.wallet_state_manager.trade_manager
file = "test_offer_file.offer"
file_path = Path(file)
if file_path.exists():
file_path.unlink()
spendable_tranzact = await wallet_a.get_spendable_balance()
offer_dict = {1: 10, 2: -30, 3: 30}
success, trade_offer, error = await trade_manager_a.create_offer_for_ids(offer_dict, file)
await asyncio.sleep(1)
spendable_tranzact_after = await wallet_a.get_spendable_balance()
locked_coin = await trade_manager_a.get_locked_coins(wallet_a.id())
locked_sum = 0
for name, record in locked_coin.items():
locked_sum += record.coin.amount
assert spendable_tranzact == spendable_tranzact_after + locked_sum
assert success is True
assert trade_offer is not None
# Cancel offer 1 by just deleting from db
await trade_manager_a.cancel_pending_offer(trade_offer.trade_id)
await asyncio.sleep(1)
spendable_after_cancel_1 = await wallet_a.get_spendable_balance()
# Spendable should be the same as it was before making offer 1
assert spendable_tranzact == spendable_after_cancel_1
trade_a = await trade_manager_a.get_trade_by_id(trade_offer.trade_id)
assert trade_a is not None
assert trade_a.status == TradeStatus.CANCELED.value
@pytest.mark.asyncio
async def test_cc_trade_cancel_secure(self, wallets_prefarm):
# Wallet A Wallet B
# CCWallet id 2: 50 CCWallet id 2: 50
# CCWallet id 3: 50 CCWallet id 3: 50
# CCWallet id 4: 40 CCWallet id 4: 60
# Wallet A will create offer, cancel it by spending coins back to self
wallet_node_a, wallet_node_b, full_node = wallets_prefarm
wallet_a = wallet_node_a.wallet_state_manager.main_wallet
trade_manager_a: TradeManager = wallet_node_a.wallet_state_manager.trade_manager
file = "test_offer_file.offer"
file_path = Path(file)
if file_path.exists():
file_path.unlink()
spendable_tranzact = await wallet_a.get_spendable_balance()
offer_dict = {1: 10, 2: -30, 3: 30}
success, trade_offer, error = await trade_manager_a.create_offer_for_ids(offer_dict, file)
await asyncio.sleep(1)
spendable_tranzact_after = await wallet_a.get_spendable_balance()
locked_coin = await trade_manager_a.get_locked_coins(wallet_a.id())
locked_sum = 0
for name, record in locked_coin.items():
locked_sum += record.coin.amount
assert spendable_tranzact == spendable_tranzact_after + locked_sum
assert success is True
assert trade_offer is not None
# Cancel offer 1 by spending coins that were offered
await trade_manager_a.cancel_pending_offer_safely(trade_offer.trade_id)
await asyncio.sleep(1)
for i in range(0, buffer_blocks):
await full_node.farm_new_transaction_block(FarmNewBlockProtocol(token_bytes()))
await time_out_assert(15, wallet_a.get_spendable_balance, spendable_tranzact)
# Spendable should be the same as it was before making offer 1
async def get_status():
assert trade_offer is not None
trade_a = await trade_manager_a.get_trade_by_id(trade_offer.trade_id)
assert trade_a is not None
return trade_a.status
await time_out_assert(15, get_status, TradeStatus.CANCELED.value)
|
py | b410efa3e9bb252ee264dbea3128d89d59154c4c | import csv as csv
import numpy as np
#work on train data
csv_file_object = csv.reader(open('../data/train.csv', 'rb'))
header = csv_file_object.next()
data = []
for row in csv_file_object:
data.append(row)
data = np.array(data)
number_passengers = np.size(data[0::,1].astype(np.float))
number_survived = np.sum(data[0::,1].astype(np.float))
proportion_survivors = number_survived / number_passengers
women_only_stats = data[0::,4] == "female"
men_only_stats = data[0::,4] != "female"
women_onboard = data[women_only_stats, 1].astype(np.float)
men_onboard = data[men_only_stats, 1].astype(np.float)
proportion_women_survived = np.sum(women_onboard) / np.size(women_onboard)
proportion_men_survived = np.sum(men_onboard) / np.size(men_onboard)
print 'Proportion of women who survived is %s' % proportion_women_survived
print 'Proportion of men who survived is %s' % proportion_men_survived
#Write test results
test_file = open('../data/test.csv', 'rb')
test_file_object = csv.reader(test_file)
header = test_file_object.next()
prediction_file = open("genderbasedmodel.csv", "wb")
prediction_file_object = csv.writer(prediction_file)
prediction_file_object.writerow(["PassengerId", "Survived"])
for row in test_file_object:
if row[3] == 'female':
prediction_file_object.writerow([row[0], '1'])
else:
prediction_file_object.writerow([row[0], '0'])
test_file.close()
prediction_file.close()
|
py | b410efbcde7d67711f30da6b08a09e9afc8596da | from typing import List, Dict, Tuple, Callable, Any, Optional
import grams.misc as M
from grams.config import DATA_DIR
from grams.kg_data.wikidatamodels import WDProperty, QNode
from grams.main import GRAMS
def update_props(props: List[str]):
props: Dict[str, QNode] = GRAMS._query_wikidata_entities(props)
ser = []
for p in props.values():
np = WDProperty(p.id, str(p.label), str(p.description), str(p.datatype), [str(s) for s in p.aliases],
sorted({stmt.value.as_qnode_id() for stmt in p.props.get("P1647", [])}),
sorted({stmt.value.as_qnode_id() for stmt in p.props.get("P1659", [])}),
sorted({stmt.value.as_string() for stmt in p.props.get("P1628", [])}),
sorted({stmt.value.as_qnode_id() for stmt in p.props.get("P1629", [])}),
sorted({stmt.value.as_qnode_id() for stmt in p.props.get("P1696", [])}),
sorted({stmt.value.as_qnode_id() for stmt in p.props.get("P31", [])}))
ser.append(np.serialize())
M.serialize_byte_lines(ser, DATA_DIR / "new_properties.jl")
if __name__ == '__main__':
update_props(["P8901"])
|
py | b410f15177efdd42c58df9d3da1b4c28ceee51b6 | ###
# Copyright (c) 2010, Daniel Folkinshteyn
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.utils as utils
from supybot.commands import *
import supybot.utils.minisix as minisix
import supybot.plugins as plugins
import supybot.ircutils as ircutils
import supybot.callbacks as callbacks
import supybot.conf as conf
import supybot.ircdb as ircdb
import re
import os
import sys
import time
try:
from supybot.i18n import PluginInternationalization
from supybot.i18n import internationalizeDocstring
_ = PluginInternationalization('MessageParser')
except:
# This are useless functions that's allow to run the plugin on a bot
# without the i18n plugin
_ = lambda x:x
internationalizeDocstring = lambda x:x
#try:
#import sqlite
#except ImportError:
#raise callbacks.Error, 'You need to have PySQLite installed to use this ' \
#'plugin. Download it at ' \
#'<http://code.google.com/p/pysqlite/>'
import sqlite3
# these are needed cuz we are overriding getdb
import threading
import supybot.world as world
import supybot.log as log
class MessageParser(callbacks.Plugin, plugins.ChannelDBHandler):
"""This plugin can set regexp triggers to activate the bot.
Use 'add' command to add regexp trigger, 'remove' to remove."""
threaded = True
def __init__(self, irc):
callbacks.Plugin.__init__(self, irc)
plugins.ChannelDBHandler.__init__(self)
def makeDb(self, filename):
"""Create the database and connect to it."""
if os.path.exists(filename):
db = sqlite3.connect(filename)
if minisix.PY2:
db.text_factory = str
return db
db = sqlite3.connect(filename)
if minisix.PY2:
db.text_factory = str
cursor = db.cursor()
cursor.execute("""CREATE TABLE triggers (
id INTEGER PRIMARY KEY,
regexp TEXT UNIQUE ON CONFLICT REPLACE,
added_by TEXT,
added_at TIMESTAMP,
usage_count INTEGER,
action TEXT,
locked BOOLEAN
)""")
db.commit()
return db
# override this because sqlite3 doesn't have autocommit
# use isolation_level instead.
def getDb(self, channel):
"""Use this to get a database for a specific channel."""
currentThread = threading.currentThread()
if channel not in self.dbCache and currentThread == world.mainThread:
self.dbCache[channel] = self.makeDb(self.makeFilename(channel))
if currentThread != world.mainThread:
db = self.makeDb(self.makeFilename(channel))
else:
db = self.dbCache[channel]
db.isolation_level = None
return db
def _updateRank(self, channel, regexp):
subfolder = None if channel == 'global' else channel
if self.registryValue('keepRankInfo', subfolder):
db = self.getDb(channel)
cursor = db.cursor()
cursor.execute("""SELECT usage_count
FROM triggers
WHERE regexp=?""", (regexp,))
old_count = cursor.fetchall()[0][0]
cursor.execute("UPDATE triggers SET usage_count=? WHERE regexp=?", (old_count + 1, regexp,))
db.commit()
def _runCommandFunction(self, irc, msg, command):
"""Run a command from message, as if command was sent over IRC."""
tokens = callbacks.tokenize(command)
try:
self.Proxy(irc.irc, msg, tokens)
except Exception as e:
log.exception('Uncaught exception in function called by MessageParser:')
def _checkManageCapabilities(self, irc, msg, channel):
"""Check if the user has any of the required capabilities to manage
the regexp database."""
capabilities = self.registryValue('requireManageCapability')
if capabilities:
for capability in re.split(r'\s*;\s*', capabilities):
if capability.startswith('channel,'):
capability = capability[8:]
if channel != 'global':
capability = ircdb.makeChannelCapability(channel, capability)
if capability and ircdb.checkCapability(msg.prefix, capability):
#print "has capability:", capability
return True
return False
else:
return True
def do_privmsg_notice(self, irc, msg):
channel = msg.args[0]
if not irc.isChannel(channel):
return
if self.registryValue('enable', channel):
actions = []
results = []
for channel in set(map(plugins.getChannel, (channel, 'global'))):
db = self.getDb(channel)
cursor = db.cursor()
cursor.execute("SELECT regexp, action FROM triggers")
# Fetch results and prepend channel name or 'global'. This
# prevents duplicating the following lines.
results.extend([(channel,)+x for x in cursor.fetchall()])
if len(results) == 0:
return
max_triggers = self.registryValue('maxTriggers', channel)
for (channel, regexp, action) in results:
for match in re.finditer(regexp, msg.args[1]):
if match is not None:
thisaction = action
self._updateRank(channel, regexp)
for (i, j) in enumerate(match.groups()):
if match.group(i+1) is not None:
thisaction = re.sub(r'\$' + str(i+1), match.group(i+1), thisaction)
actions.append(thisaction)
if max_triggers != 0 and max_triggers == len(actions):
break
if max_triggers != 0 and max_triggers == len(actions):
break
for action in actions:
self._runCommandFunction(irc, msg, action)
def doPrivmsg(self, irc, msg):
if not callbacks.addressed(irc.nick, msg): #message is not direct command
self.do_privmsg_notice(irc, msg)
def doNotice(self, irc, msg):
if self.registryValue('enableForNotices', msg.args[0]):
self.do_privmsg_notice(irc, msg)
@internationalizeDocstring
def add(self, irc, msg, args, channel, regexp, action):
"""[<channel>|global] <regexp> <action>
Associates <regexp> with <action>. <channel> is only
necessary if the message isn't sent on the channel
itself. Action is echoed upon regexp match, with variables $1, $2,
etc. being interpolated from the regexp match groups."""
if not self._checkManageCapabilities(irc, msg, channel):
capabilities = self.registryValue('requireManageCapability')
irc.errorNoCapability(capabilities, Raise=True)
db = self.getDb(channel)
cursor = db.cursor()
cursor.execute("SELECT id, usage_count, locked FROM triggers WHERE regexp=?", (regexp,))
results = cursor.fetchall()
if len(results) != 0:
(id, usage_count, locked) = list(map(int, results[0]))
else:
locked = 0
usage_count = 0
if not locked:
try:
re.compile(regexp)
except Exception as e:
irc.error(_('Invalid python regexp: %s') % (e,))
return
if ircdb.users.hasUser(msg.prefix):
name = ircdb.users.getUser(msg.prefix).name
else:
name = msg.nick
cursor.execute("""INSERT INTO triggers VALUES
(NULL, ?, ?, ?, ?, ?, ?)""",
(regexp, name, int(time.time()), usage_count, action, locked,))
db.commit()
irc.replySuccess()
else:
irc.error(_('That trigger is locked.'))
return
add = wrap(add, ['channelOrGlobal', 'something', 'something'])
@internationalizeDocstring
def remove(self, irc, msg, args, channel, optlist, regexp):
"""[<channel>|global] [--id] <regexp>]
Removes the trigger for <regexp> from the triggers database.
<channel> is only necessary if
the message isn't sent in the channel itself.
If option --id specified, will retrieve by regexp id, not content.
"""
if not self._checkManageCapabilities(irc, msg, channel):
capabilities = self.registryValue('requireManageCapability')
irc.errorNoCapability(capabilities, Raise=True)
db = self.getDb(channel)
cursor = db.cursor()
target = 'regexp'
for (option, arg) in optlist:
if option == 'id':
target = 'id'
sql = "SELECT id, locked FROM triggers WHERE %s=?" % (target,)
cursor.execute(sql, (regexp,))
results = cursor.fetchall()
if len(results) != 0:
(id, locked) = list(map(int, results[0]))
else:
irc.error(_('There is no such regexp trigger.'))
return
if locked:
irc.error(_('This regexp trigger is locked.'))
return
cursor.execute("""DELETE FROM triggers WHERE id=?""", (id,))
db.commit()
irc.replySuccess()
remove = wrap(remove, ['channelOrGlobal',
getopts({'id': '',}),
'something'])
@internationalizeDocstring
def lock(self, irc, msg, args, channel, regexp):
"""[<channel>|global] <regexp>
Locks the <regexp> so that it cannot be
removed or overwritten to. <channel> is only necessary if the message isn't
sent in the channel itself.
"""
if not self._checkManageCapabilities(irc, msg, channel):
capabilities = self.registryValue('requireManageCapability')
irc.errorNoCapability(capabilities, Raise=True)
db = self.getDb(channel)
cursor = db.cursor()
cursor.execute("SELECT id FROM triggers WHERE regexp=?", (regexp,))
results = cursor.fetchall()
if len(results) == 0:
irc.error(_('There is no such regexp trigger.'))
return
cursor.execute("UPDATE triggers SET locked=1 WHERE regexp=?", (regexp,))
db.commit()
irc.replySuccess()
lock = wrap(lock, ['channelOrGlobal', 'text'])
@internationalizeDocstring
def unlock(self, irc, msg, args, channel, regexp):
"""[<channel>|global] <regexp>
Unlocks the entry associated with <regexp> so that it can be
removed or overwritten. <channel> is only necessary if the message isn't
sent in the channel itself.
"""
if not self._checkManageCapabilities(irc, msg, channel):
capabilities = self.registryValue('requireManageCapability')
irc.errorNoCapability(capabilities, Raise=True)
db = self.getDb(channel)
cursor = db.cursor()
cursor.execute("SELECT id FROM triggers WHERE regexp=?", (regexp,))
results = cursor.fetchall()
if len(results) == 0:
irc.error(_('There is no such regexp trigger.'))
return
cursor.execute("UPDATE triggers SET locked=0 WHERE regexp=?", (regexp,))
db.commit()
irc.replySuccess()
unlock = wrap(unlock, ['channelOrGlobal', 'text'])
@internationalizeDocstring
def show(self, irc, msg, args, channel, optlist, regexp):
"""[<channel>|global] [--id] <regexp>
Looks up the value of <regexp> in the triggers database.
<channel> is only necessary if the message isn't sent in the channel
itself.
If option --id specified, will retrieve by regexp id, not content.
"""
db = self.getDb(channel)
cursor = db.cursor()
target = 'regexp'
for (option, arg) in optlist:
if option == 'id':
target = 'id'
sql = "SELECT regexp, action FROM triggers WHERE %s=?" % (target,)
cursor.execute(sql, (regexp,))
results = cursor.fetchall()
if len(results) != 0:
(regexp, action) = results[0]
else:
irc.error(_('There is no such regexp trigger.'))
return
irc.reply("The action for regexp trigger \"%s\" is \"%s\"" % (regexp, action))
show = wrap(show, ['channelOrGlobal',
getopts({'id': '',}),
'something'])
@internationalizeDocstring
def info(self, irc, msg, args, channel, optlist, regexp):
"""[<channel>|global] [--id] <regexp>
Display information about <regexp> in the triggers database.
<channel> is only necessary if the message isn't sent in the channel
itself.
If option --id specified, will retrieve by regexp id, not content.
"""
db = self.getDb(channel)
cursor = db.cursor()
target = 'regexp'
for (option, arg) in optlist:
if option == 'id':
target = 'id'
sql = "SELECT * FROM triggers WHERE %s=?" % (target,)
cursor.execute(sql, (regexp,))
results = cursor.fetchall()
if len(results) != 0:
(id, regexp, added_by, added_at, usage_count,
action, locked) = results[0]
else:
irc.error(_('There is no such regexp trigger.'))
return
irc.reply(_("The regexp id is %d, regexp is \"%s\", and action is"
" \"%s\". It was added by user %s on %s, has been "
"triggered %d times, and is %s.") % (id,
regexp,
action,
added_by,
time.strftime(conf.supybot.reply.format.time(),
time.localtime(int(added_at))),
usage_count,
locked and _("locked") or _("not locked"),))
info = wrap(info, ['channelOrGlobal',
getopts({'id': '',}),
'something'])
@internationalizeDocstring
def list(self, irc, msg, args, channel):
"""[<channel>|global]
Lists regexps present in the triggers database.
<channel> is only necessary if the message isn't sent in the channel
itself. Regexp ID listed in parentheses.
"""
db = self.getDb(channel)
cursor = db.cursor()
cursor.execute("SELECT regexp, id FROM triggers ORDER BY id")
results = cursor.fetchall()
if len(results) != 0:
regexps = results
else:
irc.reply(_('There are no regexp triggers in the database.'))
return
s = [ "%s: %s" % (ircutils.bold('#'+str(regexp[1])), regexp[0]) for regexp in regexps ]
separator = self.registryValue('listSeparator', channel)
irc.reply(separator.join(s))
list = wrap(list, ['channelOrGlobal'])
@internationalizeDocstring
def rank(self, irc, msg, args, channel):
"""[<channel>|global]
Returns a list of top-ranked regexps, sorted by usage count
(rank). The number of regexps returned is set by the
rankListLength registry value. <channel> is only necessary if the
message isn't sent in the channel itself.
"""
numregexps = self.registryValue('rankListLength', channel)
db = self.getDb(channel)
cursor = db.cursor()
cursor.execute("""SELECT regexp, usage_count
FROM triggers
ORDER BY usage_count DESC
LIMIT ?""", (numregexps,))
regexps = cursor.fetchall()
if len(regexps) == 0:
irc.reply(_('There are no regexp triggers in the database.'))
return
s = [ "#%d \"%s\" (%d)" % (i+1, regexp[0], regexp[1]) for i, regexp in enumerate(regexps) ]
irc.reply(", ".join(s))
rank = wrap(rank, ['channelOrGlobal'])
@internationalizeDocstring
def vacuum(self, irc, msg, args, channel):
"""[<channel>|global]
Vacuums the database for <channel>.
See SQLite vacuum doc here: http://www.sqlite.org/lang_vacuum.html
<channel> is only necessary if the message isn't sent in
the channel itself.
First check if user has the required capability specified in plugin
config requireVacuumCapability.
"""
capability = self.registryValue('requireVacuumCapability')
if capability:
if not ircdb.checkCapability(msg.prefix, capability):
irc.errorNoCapability(capability, Raise=True)
db = self.getDb(channel)
cursor = db.cursor()
cursor.execute("""VACUUM""")
db.commit()
irc.replySuccess()
vacuum = wrap(vacuum, ['channelOrGlobal'])
MessageParser = internationalizeDocstring(MessageParser)
Class = MessageParser
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
py | b410f1582d49879bc6d6019edb978da413dd82a3 | #!/usr/bin/python
import os
import sys
# +---------------+
#---| Counting args |---
# +---------------+
numargs = len(sys.argv[1:])
endargs = numargs + 1
print '\nYou entered', numargs, 'command line arguments'
for i in range(1,endargs):
print "arg[" + str(i) + "]: " + sys.argv[i]
# +-----------------------+
#---| Don't bother counting |---
# +-----------------------+
for arg in sys.argv[1:]:
print 'ARG:', arg
# +---------------+
#---| Usage example |---
# +---------------+
if len(sys.argv[1:]) != 1:
print '\nUSAGE:',os.path.basename(__file__),'file\n'
sys.exit(-1)
|
py | b410f2ffeec7571d42d8164bcdd724eb5f6e3ecd | class ReinforcementBarOrientation(Enum, IComparable, IFormattable, IConvertible):
"""
Describes the bar orientation at Path Reinforcement.
enum ReinforcementBarOrientation,values: BottomOrInterior (2),FarSide (3),NearSide (1),TopOrExterior (0)
"""
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self, *args):
pass
def __gt__(self, *args):
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args):
pass
def __lt__(self, *args):
pass
def __ne__(self, *args):
pass
def __reduce_ex__(self, *args):
pass
def __str__(self, *args):
pass
BottomOrInterior = None
FarSide = None
NearSide = None
TopOrExterior = None
value__ = None
|
py | b410f33c4525783a677c449b23fc64ea16b741fd | from functools import wraps
import logging
FORMAT = "%(asctime)s - %(name)-s - %(levelname)-s - %(message)s"
LEVEL = logging.DEBUG
logging.basicConfig(format=FORMAT, level=LEVEL)
log = logging.getLogger(__name__)
def logger(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
log = logging.getLogger(fn.__name__)
log.info("About to run %s" % fn.__name__)
out = fn(*args, **kwargs)
log.info("Done running %s" % fn.__name__)
return out
return wrapper
|
py | b410f398dc4be481c5e9f8ddf83551746a88ec2c | import tensorflow as tf
# To avoid getting the 'No module named _tkinter, please install the python-tk package' error
# when running on GCP
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import time
from skimage.transform import resize
import trainer.model as model
# TODO pasar por flags
# Ver tf.app.flags
DATASET_PATH = "gs://first-ml-project-222122-mlengine/sample-data"
#DATASET_PATH = "/home/gaston/workspace/datasets/CASIA-WebFace/CASIA-WebFace"
# TODO pasar por flags
CHECKPOINTS_DIR = "gs://first-ml-project-222122-mlengine/checkpoints_2018_11_13_sample"
DATASET_TRAIN_PATH = os.path.join(DATASET_PATH, "train")
IMAGE_SIZE = 128
PATCH_SIZE = 32
BATCH_SIZE = 16
DATASET_BUFFER = 10000
SHUFFLE_BUFFER_SIZE = 1000
PARALLEL_MAP_THREADS = 8
EPOCHS = 50
BATCHES_PER_PRINT = 20
BATCHES_PER_CHECKPOINT = 100
# Use tf eager execution for the whole app.
tf.enable_eager_execution()
def get_reference_image(image, image_path):
# Need to do this because when calling this function using tf.py_func,
# the image_path is passed as bytes instead of string.
image_path = image_path.decode('UTF-8')
identity = image_path.split('/')[-2]
references = train_reference_dict[identity]
idx = np.random.randint(len(references))
return (image, references[idx])
def get_reference_image_from_file_fn(train_reference_path, train_reference_paths_dict):
def get_reference_image_from_file(image, image_path):
# Need to do this because when calling this function using tf.py_func,
# the image_path is passed as bytes instead of string.
image_path = image_path.decode('UTF-8')
identity = image_path.split('/')[-2]
reference_paths = train_reference_paths_dict[identity]
idx = np.random.randint(len(reference_paths))
image_file_name = reference_paths[idx]
reference_image_file = tf.gfile.GFile(os.path.join(train_reference_path, identity, image_file_name),
mode='rb')
reference_image = plt.imread(reference_image_file)
reference_image = fix_image_encoding(reference_image)
return (image, reference_image)
return get_reference_image_from_file
def fix_image_encoding(image):
if (image.ndim == 2):
# Add new dimension for channels
image = image[:,:,np.newaxis]
if (image.shape[-1] == 1):
# Convert greyscale to RGB
image = np.concatenate((image,)*3, axis=-1)
return image
def create_reference_paths_dict(base_path):
reference_dict = {}
for identity_dir in tf.gfile.ListDirectory(base_path):
image_paths = []
full_identity_dir = os.path.join(base_path, identity_dir)
for image_path in tf.gfile.ListDirectory(full_identity_dir):
image_paths.append(image_path)
identity = identity_dir.replace('/', '')
reference_dict[identity] = image_paths
assert len(image_paths) > 0
return reference_dict
def get_mask_fn(img_size, patch_size):
patch_start = (img_size - patch_size) // 2
img_size_after_patch = img_size - (patch_start + patch_size)
def mask_fn(image, reference_image):
"""
Applies a mask of zeroes of size (patch_size x patch_size) at the center of the image.
Returns a tuple of the masked image and the original image.
"""
upper_edge = tf.ones([patch_start, img_size, 3], tf.float32)
lower_edge = tf.ones([img_size_after_patch, img_size,3], tf.float32)
middle_left = tf.ones([patch_size, patch_start, 3], tf.float32)
middle_right = tf.ones([patch_size, img_size_after_patch, 3], tf.float32)
zeros = tf.zeros([patch_size, patch_size, 3], tf.float32)
middle = tf.concat([middle_left, zeros, middle_right], axis=1)
mask = tf.concat([upper_edge, middle, lower_edge], axis=0)
return (image * mask, image, reference_image)
return mask_fn
def patch_image(patch, image):
"""
Apply the given patch to the image.
The patch is applied at the center of the image, assuming a 7x7 patch and a 28x28 image.
"""
patch_start = (IMAGE_SIZE - PATCH_SIZE) // 2
patch_end = patch_start + PATCH_SIZE
# TODO: See if this could be done more efficiently.
upper_edge = image[:, :patch_start, :, :]
lower_edge = image[:, patch_end:, :, :]
middle_left = image[:, patch_start:patch_end, :patch_start, :]
middle_right = image[:, patch_start:patch_end, patch_end:, :]
middle = tf.concat([middle_left, patch, middle_right], axis=2)
return tf.concat([upper_edge, middle, lower_edge], axis=1)
def generate_images(generator, masked_images, reference_images):
# make sure the training parameter is set to False because we
# don't want to train the batchnorm layer when doing inference.
patches = generator([masked_images, reference_images], training=False)
generated_images = patch_image(patches, masked_images)
return generated_images
def train_step(full_images,
full_reference_images,
masked_images,
masked_reference_images,
generator,
discriminator,
generator_optimizer,
discriminator_optimizer):
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_patches = generator([masked_images, masked_reference_images], training=True)
generated_images = patch_image(generated_patches, masked_images)
real_output = discriminator([full_images, full_reference_images], training=True)
generated_output = discriminator([generated_images, masked_reference_images], training=True)
gen_loss = model.generator_loss(generated_output)
disc_loss = model.discriminator_loss(real_output, generated_output)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.variables))
return gen_loss, disc_loss
def train(dataset, epochs, generator, discriminator, validation_masked_images, validation_references):
# train_step = tf.contrib.eager.defun(train_step)
generator_optimizer = tf.train.AdamOptimizer(1e-4)
discriminator_optimizer = tf.train.AdamOptimizer(1e-4)
checkpoint_prefix = os.path.join(CHECKPOINTS_DIR, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
gen_losses = []
disc_losses = []
global_step = tf.train.get_or_create_global_step()
logdir = CHECKPOINTS_DIR
writer = tf.contrib.summary.create_file_writer(logdir)
writer.set_as_default()
for epoch in range(epochs):
epoch_start = time.time()
batch_start = time.time()
for images in dataset:
global_step.assign_add(1)
# See if we can get rid of this (we are already checking below)
with tf.contrib.summary.record_summaries_every_n_global_steps(BATCHES_PER_PRINT):
(full_images, full_reference_images) = images[0]
(masked_images, unmasked_images, masked_reference_images) = images[1]
gen_loss, disc_loss = train_step(full_images,
full_reference_images,
masked_images,
masked_reference_images,
generator,
discriminator,
generator_optimizer,
discriminator_optimizer)
tf.contrib.summary.scalar('gen_loss', gen_loss)
tf.contrib.summary.scalar('disc_loss', disc_loss)
if (global_step.numpy() % BATCHES_PER_PRINT == 0):
generated_images = generate_images(generator,
validation_masked_images,
validation_references)
tf.contrib.summary.image('generated_images', generated_images, max_images=9)
batch_end = time.time()
batch_time = (batch_end - batch_start) / BATCHES_PER_PRINT
batch_start = time.time() # Restart the timer.
global_steps_per_second = 1 / batch_time if batch_time > 0 else 0
tf.contrib.summary.scalar('global_step', global_steps_per_second)
tf.logging.info('Gen loss: {} - Disc loss: {} - Steps per second: {} - Current step {}'.format(gen_loss,
disc_loss,
global_steps_per_second,
global_step.numpy()))
if (global_step.numpy() % BATCHES_PER_CHECKPOINT == 0):
checkpoint.save(file_prefix = checkpoint_prefix)
print ('Time taken for epoch {} is {} sec'.format(epoch + 1,
time.time()-epoch_start))
def main():
train_reference_path = os.path.join(DATASET_TRAIN_PATH, "reference")
train_reference_paths_dict = create_reference_paths_dict(train_reference_path)
# Make a Dataset of file names including all the PNG images files in
# the relative image directory.
real_dataset = tf.data.Dataset.list_files(os.path.join(DATASET_TRAIN_PATH, "real/*/*.jpg"))
real_dataset = real_dataset.shuffle(SHUFFLE_BUFFER_SIZE)
# TODO tal vez los maps pueden combinarse
real_dataset = real_dataset.map(lambda x: (tf.image.decode_image(tf.read_file(x), channels=3), x),
num_parallel_calls=PARALLEL_MAP_THREADS)
real_dataset = real_dataset.map(
lambda image, path: tuple(
tf.py_func(get_reference_image_from_file_fn(train_reference_path, train_reference_paths_dict), [image, path], [tf.uint8, tf.uint8])),
num_parallel_calls=PARALLEL_MAP_THREADS)
real_dataset = real_dataset.map(
lambda image, reference: (tf.image.resize_image_with_crop_or_pad(image, IMAGE_SIZE, IMAGE_SIZE),
tf.image.resize_image_with_crop_or_pad(reference, IMAGE_SIZE, IMAGE_SIZE)),
num_parallel_calls=PARALLEL_MAP_THREADS)
real_dataset = real_dataset.map(
lambda image, reference: (tf.image.convert_image_dtype(image, tf.float32),
tf.image.convert_image_dtype(reference, tf.float32)),
num_parallel_calls=PARALLEL_MAP_THREADS)
real_dataset = real_dataset.batch(BATCH_SIZE, drop_remainder=True)
real_dataset = real_dataset.prefetch(1)
masked_dataset = tf.data.Dataset.list_files(os.path.join(DATASET_TRAIN_PATH, "masked/*/*.jpg"))
masked_dataset = masked_dataset.shuffle(SHUFFLE_BUFFER_SIZE)
masked_dataset = masked_dataset.map(lambda x: (tf.image.decode_image(tf.read_file(x), channels=3), x),
num_parallel_calls=PARALLEL_MAP_THREADS)
masked_dataset = masked_dataset.map(
lambda image, path:
tf.py_func(get_reference_image_from_file_fn(train_reference_path, train_reference_paths_dict), [image, path], [tf.uint8, tf.uint8]),
num_parallel_calls=PARALLEL_MAP_THREADS)
masked_dataset = masked_dataset.map(
lambda image, reference: (tf.image.resize_image_with_crop_or_pad(image, IMAGE_SIZE, IMAGE_SIZE),
tf.image.resize_image_with_crop_or_pad(reference, IMAGE_SIZE, IMAGE_SIZE)),
num_parallel_calls=PARALLEL_MAP_THREADS)
masked_dataset = masked_dataset.map(
lambda image, reference: (tf.image.convert_image_dtype(image, tf.float32),
tf.image.convert_image_dtype(reference, tf.float32)),
num_parallel_calls=PARALLEL_MAP_THREADS)
masked_dataset = masked_dataset.map(
get_mask_fn(IMAGE_SIZE, PATCH_SIZE),
num_parallel_calls=PARALLEL_MAP_THREADS)
masked_dataset = masked_dataset.batch(BATCH_SIZE, drop_remainder=True)
masked_dataset = masked_dataset.prefetch(1)
train_dataset = tf.data.Dataset.zip((real_dataset, masked_dataset))
VALIDATION_IDENTITIES = [
"0005366",
"0005367",
"0005370",
"0005371",
"0005373",
"0005376",
"0005378",
"0005379",
"0005381"
]
validation_images = []
validation_references = []
for identity in VALIDATION_IDENTITIES:
full_identity_dir = os.path.join(DATASET_PATH, "validation", identity)
mask_image_file = tf.gfile.GFile(os.path.join(full_identity_dir, "001.jpg"),
mode='rb')
mask_image = plt.imread(mask_image_file)
reference_image_file = tf.gfile.GFile(os.path.join(full_identity_dir, "002.jpg"),
mode='rb')
reference_image = plt.imread(reference_image_file)
mask_image = fix_image_encoding(mask_image)
reference_image = fix_image_encoding(reference_image)
mask_image = resize(mask_image, (IMAGE_SIZE,IMAGE_SIZE))
reference_image = resize(reference_image, (IMAGE_SIZE,IMAGE_SIZE))
validation_images.append(mask_image)
validation_references.append(reference_image)
validation_masked_images = []
mask_fn = get_mask_fn(IMAGE_SIZE, PATCH_SIZE)
for mask_image, reference_image in zip(validation_images, validation_references):
mask_image, _, _ = mask_fn(mask_image, reference_image)
validation_masked_images.append(mask_image.numpy())
validation_images = np.array(validation_images).astype('float32')
validation_references = np.array(validation_references).astype('float32')
validation_masked_images = np.array(validation_masked_images).astype('float32')
generator, discriminator = model.make_models()
train(train_dataset, EPOCHS, generator, discriminator, validation_masked_images, validation_references)
if __name__ == "__main__":
print("Starting training")
main() |
py | b410f3d179b4f4428039881775b75737bfc5b67c | # Generated by Django 3.1.3 on 2020-12-02 11:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0020_content_collection'),
('account', '0020_remove_articleviewhistory_category'),
]
operations = [
migrations.AlterModelOptions(
name='commentmessage',
options={'ordering': ('root_id', 'level', '-time'), 'verbose_name': '博文评论回复记录', 'verbose_name_plural': '博文评论回复记录'},
),
migrations.RemoveField(
model_name='contentviewhistory',
name='note',
),
migrations.CreateModel(
name='NoteMessage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(blank=True, null=True, verbose_name='评论内容')),
('time', models.DateTimeField(auto_now_add=True, verbose_name='评论时间')),
('level', models.IntegerField(default=0, verbose_name='评论等级')),
('like', models.IntegerField(default=0, verbose_name='评论点赞数')),
('reply_id', models.IntegerField(blank=True, default=None, null=True, verbose_name='回复评论ID')),
('root_id', models.IntegerField(blank=True, default=None, null=True, verbose_name='回复根ID')),
('note', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='blog.content', verbose_name='笔记')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户名')),
],
options={
'verbose_name': '笔记评论回复记录',
'verbose_name_plural': '笔记评论回复记录',
'ordering': ('root_id', 'level', '-time'),
},
),
]
|
py | b410f3db8c96e9d832c30692318bbf2dcd18c408 | import os
import numpy as np
import tensorflow as tf
from trainer import Trainer
from hyperparams import Hyperparams as hp
def main():
modeltrainer = Trainer()
modeltrainer.update(hp.__dict__)
params = "{0:25} | {1:25}"
for k,v in modeltrainer.__dict__.items():
if isinstance(v,(str,int,float,np.ndarray)):
print(params.format(k,v))
modeltrainer.build()
modeltrainer.load_ckpts(partial=True)
mobmatt = modeltrainer.mobmatt
input_data = tf.ones((1,hp.image_size,hp.image_size,3), dtype = tf.uint8)
_ = mobmatt(input_data)
tf_model_dir = os.path.join(hp.logdir, 'SavedModel')
export_path = os.path.join(tf_model_dir, f"MobMatt-{hp.image_size}")
mobmatt.save(export_path, include_optimizer=False, save_format='tf')
print(f"TF SavedModel exported to {export_path}")
if __name__ == '__main__':
main() |
py | b410f443e674120b08c9093a8458909d2e1239cd | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Entry point for CloudML training.
CloudML training requires a tarball package and a python module to run. This file
provides such a "main" method and a list of args passed with the program.
"""
import argparse
import json
import logging
import os
import tensorflow as tf
from . import _model
from . import _trainer
from . import _util
def main(_):
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_dir',
type=str,
help='The input dir path for training and evaluation data.')
parser.add_argument(
'--job-dir',
dest='job_dir',
type=str,
help='The GCS path to which checkpoints and other outputs should be saved.')
parser.add_argument(
'--max_steps',
type=int,)
parser.add_argument(
'--batch_size',
type=int,
help='Number of examples to be processed per mini-batch.')
parser.add_argument(
'--checkpoint',
type=str,
default=_util._DEFAULT_CHECKPOINT_GSURL,
help='Pretrained inception checkpoint path.')
args, _ = parser.parse_known_args()
labels = _util.get_labels(args.input_dir)
model = _model.Model(labels, 0.5, args.checkpoint)
env = json.loads(os.environ.get('TF_CONFIG', '{}'))
# Print the job data as provided by the service.
logging.info('Original job data: %s', env.get('job', {}))
task_data = env.get('task', None) or {'type': 'master', 'index': 0}
task = type('TaskSpec', (object,), task_data)
cluster_data = env.get('cluster', None)
cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None
if not cluster or not task or task.type == 'master' or task.type == 'worker':
_trainer.Trainer(args.input_dir, args.batch_size, args.max_steps,
args.job_dir, model, cluster, task).run_training()
elif task.type == 'ps':
server = _trainer.start_server(cluster, task)
server.join()
else:
raise ValueError('invalid task_type %s' % (task.type,))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
tf.app.run()
|
py | b410f47953b194037b503731440f10e68468f8db | import logging
import unittest
from nlglib.microplanning import Clause, CC, PP
import nlglib.realisation.simplenlg.client as snlg
class TestSimplenlgClient(unittest.TestCase):
simplenlg_server = None
client = None
@classmethod
def setUpClass(cls):
# jp = 'nlglib/res/simplenlg.jar'
# port = '50007'
cls.test_result = 'Put the piano and the drum into the truck.'
# cls.simplenlg_server = snlg.SimpleNLGServer(jp, port)
# cls.simplenlg_server.start()
# cls.simplenlg_server.wait_for_init()
cls.client = snlg.SimplenlgClient('nlg.kutlak.info', 40000)
# @classmethod
# def tearDownClass(cls):
# # signal that we would like to shut the server down
# if cls.simplenlg_server:
# cls.simplenlg_server.shutdown()
#
# def test_socket(self):
# self.assertIsNotNone(self.simplenlg_server)
# self.simplenlg_server.wait_for_init()
# mysocket = snlg.Socket('', 50007)
# with mysocket as sock:
# n = sock.send_string(test_data)
# self.assertEqual(n, len(test_data))
# msg = sock.recv_string()
# self.assertEqual(self.test_result, msg)
#
# with mysocket as sock:
# n = sock.send_string(test_data)
# self.assertEqual(n, len(test_data))
# msg = sock.recv_string()
# self.assertEqual(self.test_result, msg)
def test_snlg_1(self):
expected = self.test_result
realisation = self.client.xml_request(test_data)
self.assertEqual(expected, realisation)
def test_snlg_2(self):
expected = 'Is indicated by transfusion of whole blood.'
realisation = self.client.xml_request(test_data2)
self.assertEqual(expected, realisation)
def test_snlg_3(self):
expected = 'Roman is not in the office.'
realisation = self.client.xml_request(test_data3)
self.assertEqual(expected, realisation)
def test_snlg_4(self):
expected = 'Roman is not at work.'
realisation = self.client.xml_request(test_data4)
self.assertEqual(expected, realisation)
def test_snlg_5(self):
expected = 'If p then q.'
realisation = self.client.xml_request(test_data5)
self.assertEqual(expected, realisation)
def test_snlg_6(self):
expected = 'There exists X such that p.'
realisation = self.client.xml_request(test_data6)
self.assertEqual(expected, realisation)
def test_snlg_7(self):
# FIXME: simplenlg realiser seems to have problem with coordinated elements
# - missing upper CASE and period
expected = 'if x equals y and p is at location x then p is not at location y'
realisation = self.client.xml_request(test_data7)
self.assertEqual(expected, realisation)
def test_complex_sentence(self):
c1 = Clause('x', 'equal', 'y', front_modifiers=['if'])
c2 = Clause('p', 'be', PP('at', 'location x'), features={'COMPLEMENTISER': 'and'})
c3 = Clause(
'p',
'be',
PP('at', 'location y'),
features={
'NEGATED': 'true',
'COMPLEMENTISER': 'then'
}
)
c2.complements.append(c3)
c1.complements.append(c2)
expected = 'If x equals y and p is at location x then p is not at location y.'
actual = self.client.xml_request(c1.to_xml(headers=True))
self.assertEqual(expected, actual)
def test_coordination(self):
c1 = Clause('x', 'equal', 'y', front_modifiers=['if'])
c2 = Clause('p', 'be', PP('at', 'location x'))
c3 = Clause(
'p',
'be',
PP('at', 'location y'),
features={
'NEGATED': 'true',
'COMPLEMENTISER': 'then'
}
)
c2.complements.append(c3)
c = CC(c1, c2)
expected = 'if x equals y and p is at location x then p is not at location y'
actual = self.client.xml_request(c.to_xml(headers=True))
self.assertEqual(expected, actual)
test_data = """\
<?xml version="1.0" encoding="utf-8"?>
<nlg:NLGSpec xmlns="http://simplenlg.googlecode.com/svn/trunk/res/xml"
xmlns:nlg="http://simplenlg.googlecode.com/svn/trunk/res/xml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://simplenlg.googlecode.com/svn/trunk/res/xml ">
<nlg:Request>
<Document cat="PARAGRAPH">
<child xsi:type="SPhraseSpec" FORM="IMPERATIVE" >
<vp xsi:type="VPPhraseSpec" >
<head xsi:type="WordElement" cat="VERB">
<base>put</base>
</head>
<compl xsi:type="CoordinatedPhraseElement" conj="and" discourseFunction="OBJECT" >
<coord xsi:type="NPPhraseSpec" >
<spec xsi:type="WordElement" cat="DETERMINER">
<base>the</base>
</spec>
<head xsi:type="WordElement" cat="NOUN">
<base>piano</base>
</head>
</coord>
<coord xsi:type="NPPhraseSpec" >
<spec xsi:type="WordElement" cat="DETERMINER">
<base>the</base>
</spec>
<head xsi:type="WordElement" cat="NOUN">
<base>drum</base>
</head>
</coord>
</compl>
<compl xsi:type="PPPhraseSpec" >
<head xsi:type="WordElement" cat="PREPOSITION">
<base>into</base>
</head>
<compl xsi:type="NPPhraseSpec" >
<spec xsi:type="WordElement" cat="DETERMINER">
<base>the</base>
</spec>
<head xsi:type="WordElement" cat="NOUN">
<base>truck</base>
</head>
</compl>
</compl>
</vp>
</child>
</Document>
</nlg:Request>
</nlg:NLGSpec>
"""
test_data2 = """\
<?xml version="1.0" encoding="utf-8"?>
<nlg:NLGSpec xmlns="http://simplenlg.googlecode.com/svn/trunk/res/xml"
xmlns:nlg="http://simplenlg.googlecode.com/svn/trunk/res/xml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://simplenlg.googlecode.com/svn/trunk/res/xml ">
<nlg:Request>
<Document cat="PARAGRAPH">
<child xsi:type="SPhraseSpec">
<subj xsi:type="NPPhraseSpec">
<head xsi:type="WordElement" cat="NOUN">
<base>transfusion of whole blood</base>
</head>
</subj>
<vp xsi:type="VPPhraseSpec" PASSIVE="true" TENSE="PRESENT">
<head cat="VERB">
<base>indicate</base>
</head>
</vp>
</child>
</Document>
</nlg:Request>
</nlg:NLGSpec>
"""
test_data3 = """\
<?xml version="1.0" encoding="utf-8"?>
<nlg:NLGSpec xmlns="http://simplenlg.googlecode.com/svn/trunk/res/xml"
xmlns:nlg="http://simplenlg.googlecode.com/svn/trunk/res/xml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://simplenlg.googlecode.com/svn/trunk/res/xml ">
<nlg:Request>
<Document cat="PARAGRAPH">
<child xsi:type="SPhraseSpec">
<subj xsi:type="NPPhraseSpec">
<head xsi:type="WordElement" cat="NOUN">
<base>Roman</base>
</head>
</subj>
<vp xsi:type="VPPhraseSpec" NEGATED="true">
<head cat="VERB">
<base>be</base>
</head>
<compl xsi:type="PPPhraseSpec" >
<head xsi:type="WordElement" cat="PREPOSITION">
<base>in</base>
</head>
<compl xsi:type="NPPhraseSpec" >
<spec xsi:type="WordElement" cat="DETERMINER">
<base>the</base>
</spec>
<head xsi:type="WordElement" cat="NOUN">
<base>office</base>
</head>
</compl>
</compl>
</vp>
</child>
</Document>
</nlg:Request>
</nlg:NLGSpec>
"""
test_data4 = """\
<?xml version="1.0" encoding="utf-8"?>
<nlg:NLGSpec xmlns="http://simplenlg.googlecode.com/svn/trunk/res/xml"
xmlns:nlg="http://simplenlg.googlecode.com/svn/trunk/res/xml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://simplenlg.googlecode.com/svn/trunk/res/xml ">
<nlg:Request>
<Document cat="PARAGRAPH">
<child xsi:type="SPhraseSpec" NEGATED="true">
<subj xsi:type="WordElement" canned="true">
<base>Roman</base>
</subj>
<vp xsi:type="VPPhraseSpec">
<head xsi:type="WordElement" cat="VERB">
<base>be</base>
</head>
<compl xsi:type="PPPhraseSpec">
<head xsi:type="WordElement" cat="PREPOSITION">
<base>at</base>
</head>
<compl xsi:type="WordElement" canned="true">
<base>work</base>
</compl>
</compl>
</vp>
</child>
</Document>
</nlg:Request>
</nlg:NLGSpec>
"""
test_data5 = """\
<?xml version="1.0" encoding="utf-8"?>
<nlg:NLGSpec xmlns="http://simplenlg.googlecode.com/svn/trunk/res/xml"
xmlns:nlg="http://simplenlg.googlecode.com/svn/trunk/res/xml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://simplenlg.googlecode.com/svn/trunk/res/xml ">
<nlg:Request>
<Document cat="PARAGRAPH">
<child xsi:type="SPhraseSpec">
<frontMod xsi:type="WordElement" canned="true">
<base>if</base>
</frontMod>
<subj xsi:type="WordElement" canned="true">
<base>p</base>
</subj>
<vp xsi:type="VPPhraseSpec">
<head xsi:type="WordElement" cat="ADVERB">
<base>then</base>
</head>
<compl xsi:type="WordElement" canned="true">
<base>q</base>
</compl>
</vp>
</child>
</Document>
</nlg:Request>
</nlg:NLGSpec>
"""
test_data6 = """\
<?xml version="1.0" encoding="utf-8"?>
<nlg:NLGSpec xmlns="http://simplenlg.googlecode.com/svn/trunk/res/xml"
xmlns:nlg="http://simplenlg.googlecode.com/svn/trunk/res/xml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://simplenlg.googlecode.com/svn/trunk/res/xml ">
<nlg:Request>
<Document cat="PARAGRAPH">
<child xsi:type="SPhraseSpec" PERSON="THIRD">
<subj xsi:type="NPPhraseSpec">
<head xsi:type="WordElement" canned="true" >
<base>there</base>
</head>
</subj>
<vp xsi:type="VPPhraseSpec">
<head xsi:type="WordElement" canned="true" >
<base>exist</base>
</head>
<compl xsi:type="WordElement" canned="true" >
<base>X</base>
</compl>
</vp>
<compl xsi:type="SPhraseSpec" COMPLEMENTISER="such+that">
<subj xsi:type="NPPhraseSpec">
<head xsi:type="WordElement" canned="true" >
<base>p</base>
</head>
</subj>
</compl>
</child>
</Document>
</nlg:Request>
</nlg:NLGSpec>
"""
test_data7 = """\
<?xml version="1.0" encoding="utf-8"?>
<nlg:NLGSpec xmlns="http://simplenlg.googlecode.com/svn/trunk/res/xml"
xmlns:nlg="http://simplenlg.googlecode.com/svn/trunk/res/xml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://simplenlg.googlecode.com/svn/trunk/res/xml ">
<nlg:Request>
<Document cat="PARAGRAPH">
<child xsi:type="CoordinatedPhraseElement" conj="and">
<coord xsi:type="SPhraseSpec">
<frontMod xsi:type="WordElement" canned="true" >
<base>if</base>
</frontMod>
<subj xsi:type="NPPhraseSpec">
<head xsi:type="WordElement" canned="true" >
<base>x</base>
</head>
</subj>
<vp xsi:type="VPPhraseSpec">
<head xsi:type="WordElement" canned="true" >
<base>equal</base>
</head>
<compl xsi:type="WordElement" canned="true" >
<base>y</base>
</compl>
</vp>
</coord>
<coord xsi:type="SPhraseSpec">
<subj xsi:type="NPPhraseSpec">
<head xsi:type="WordElement" canned="true" >
<base>p</base>
</head>
</subj>
<vp xsi:type="VPPhraseSpec">
<head xsi:type="WordElement" canned="true" >
<base>be</base>
</head>
<compl xsi:type="PPPhraseSpec">
<head xsi:type="WordElement" cat="PREPOSITION">
<base>at</base>
</head>
<compl xsi:type="WordElement" canned="true" >
<base>location+x</base>
</compl>
</compl>
</vp>
<compl xsi:type="SPhraseSpec" NEGATED="true" COMPLEMENTISER="then">
<subj xsi:type="NPPhraseSpec">
<head xsi:type="WordElement" canned="true" >
<base>p</base>
</head>
</subj>
<vp xsi:type="VPPhraseSpec">
<head xsi:type="WordElement" canned="true" >
<base>be</base>
</head>
<compl xsi:type="PPPhraseSpec">
<head xsi:type="WordElement" cat="PREPOSITION">
<base>at</base>
</head>
<compl xsi:type="WordElement" canned="true" >
<base>location+y</base>
</compl>
</compl>
</vp>
</compl>
</coord>
</child>
</Document>
</nlg:Request>
</nlg:NLGSpec>
"""
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.