code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from django.conf.urls import include, url
urlpatterns = [
url(r'^avatar/', include('avatar.urls')),
]
| MachineandMagic/django-avatar | tests/urls.py | Python | bsd-3-clause | 108 |
from testing.test_interpreter import BaseTestInterpreter
class TestArray(BaseTestInterpreter):
def test_max(self):
output = self.run('''
$max = array(6, 6, 4);
$res = max(
array(0, 1, 2),
array(2, 3, 4),
array(6, 3, 4),
array(0, 0, 2),
array(6, 3, 4),
array(0, 0, 2),
$max,
array(0, 0, 2)
);
echo $res == $max;
$res = max(
array(0, 1, 2),
array(2, 3, 4),
array(6, 3, 4),
array(0, 0, 2),
array(6, 3, 4),
array(0, 0, 2),
$max,
array(0, 0, 2)
);
echo $res == $max;
$res = max(
array(0, 1, 2),
array(2, 3, 4),
$max,
array(6, 3, 4),
array(0, 0, 2),
array(6, 3, 4),
array(0, 0, 2),
array(0, 0, 2)
);
echo $res == $max;
$res = max(
$max,
array(0, 1, 2),
array(2, 3, 4),
array(6, 3, 4),
array(0, 0, 2),
array(6, 3, 4),
array(0, 0, 2),
array(0, 0, 2)
);
echo $res == $max;
$res = max(
array(0, 1, 2),
array(2, 3, 4),
array(6, 3, 4),
array(0, 0, 2),
array(6, 3, 4),
array(0, 0, 2),
array(0, 0, 2),
$max
);
echo $res == $max;
echo max(1, 3, 5, 6, 7); // 7
echo max(array(2, 4, 5)); // 5
echo max(0, 'hello'); // 0
echo max('hello', 0); // hello
echo max(-1, 'hello'); // hello
$a = max('string', array(2, 5, 7), 42);
echo $a[2];
echo max('7iuwmssuxue', 1); //returns 7iuwmssuxu
echo max('-7suidha', -4); //returns -4
echo max('sdihatewin7wduiw', 3); //returns 3
$d1 = array(450,420,440,430,421);
$d2 = array(460,410,410,430,413,375,256,411,656);
$d3 = array(430,440,470,435,434,255,198);
echo max(max($d1),max($d2),max($d3));
echo max(array("", "t", "b"));
echo max(array(1, true, false, true));
echo max("", "t", "b");
echo max(1, true, false, true);
''')
space = self.space
assert space.int_w(output[0]) == 1
assert space.int_w(output[1]) == 1
assert space.int_w(output[2]) == 1
assert space.int_w(output[3]) == 1
assert space.int_w(output[4]) == 1
assert space.str_w(output[5]) == "7"
assert space.str_w(output[6]) == "5"
assert space.str_w(output[7]) == "0"
assert space.str_w(output[8]) == "hello"
assert space.str_w(output[9]) == "hello"
assert space.int_w(output[10]) == 7
assert space.str_w(output[11]) == "7iuwmssuxue"
assert space.str_w(output[12]) == "-4"
assert space.str_w(output[13]) == "3"
assert space.str_w(output[14]) == "656"
assert space.str_w(output[15]) == "t"
assert space.str_w(output[16]) == "1"
assert space.str_w(output[17]) == "t"
assert space.str_w(output[18]) == "1"
| ericpp/hippyvm | testing/test_math.py | Python | mit | 3,013 |
"""Module for resetting states.py"""
from os import _exit
from states import game_states
while True:
CONFIRM_RESET = input("""
Are you sure you want to reset the game states?\n
Doing so will erase everything the AI has learned while playing.\n
Erase everything? [Y/n] """)
if CONFIRM_RESET.upper() == 'Y':
break
elif CONFIRM_RESET.upper() == 'N':
print("\n\nReset canceled")
_exit(0)
MOVES = ['a', 's', 'd']
for state in game_states:
for move in MOVES:
if type(game_states[state][move]) == int:
game_states[state][move] = 1
with open("states.py", "w") as states:
states.truncate()
states.write('game_states='+str(game_states))
states.close()
print("\n\nReset complete")
| DavidGrey/game-theory_gunner | shotgun/reset_states.py | Python | mit | 750 |
#!/usr/bin/python3 -i
import input_data
from input_data import DataSet
from tensorflow.python.framework import dtypes
from os import path
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pdb
import logging
from analysis_model_divergence import get_dist_model_distance, Metrics
import ilogger
logger = ilogger.setup_logger(__name__)
class MNISTSoftmaxRegression(object):
def __init__(self, minibatch_size, learning_rate, n_iterations, mnist_train=None, model_name='classifier', write_summary=False, alpha=0.1):
self.minibatch_size = minibatch_size
self.learning_rate = learning_rate
self.n_iterations = n_iterations
self.model_name = model_name
# only ONE summary key is supported
self.write_summary = write_summary
self.summaries = ['/'.join([tf.GraphKeys.SUMMARIES, self.model_name])]
self.alpha = alpha
self.total_iterations = 0
self.history_W = []
self.history_b = []
self.history_accuracy = []
logger.info('data for {0} : images.shape = {1}, labels.shape = {2}'.format(model_name, mnist_train.images.shape, mnist_train.labels.shape))
if mnist_train is None:
self.load_mnist_data()
else:
self.mnist_train = mnist_train
self.construct_model()
def load_mnist_data(self):
self.mnist_train = input_data.read_data_sets("MNIST_data/", one_hot=True).train
def construct_model(self):
def weight_variable(shape, name):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name)
def bias_variable(shape, name):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
def l2(weights):
return tf.nn.l2_loss(weights)
def l1(weights):
return tf.reduce_sum( tf.abs(weights) )
with tf.variable_scope(self.model_name):
# place holder for input with unknown number of examples
with tf.variable_scope('features'):
self.x = tf.placeholder(tf.float32, [None, 784], name='x')
# model parameters
# model: y_hat = softmax( W^T x + b )
# W.shape = n_features x n_classes
# also assign model parameters if needed
with tf.variable_scope('model_parameters'):
self.W_name_list = []
self.W_list = []
self.W_shape_list = [
[784, 10],
]
self.W_assign_value_list = []
self.W_assign_list = []
self.b_name_list = []
self.b_list = []
self.b_assign_value_list = []
self.b_assign_list = []
for i, W_shape in enumerate(self.W_shape_list):
b_shape = W_shape[-1:]
W_name = 'W{0}'.format(i)
b_name = 'b{0}'.format(i)
W = weight_variable(W_shape, W_name)
b = bias_variable(b_shape, b_name)
W_assign_value = tf.placeholder(tf.float32, W_shape, name='{0}_assign_value'.format(W_name))
b_assign_value = tf.placeholder(tf.float32, b_shape, name='{0}_assign_value'.format(b_name))
W_assign = tf.assign(W, W_assign_value, name='{0}_assign'.format(W_name))
b_assign = tf.assign(b, b_assign_value, name='{0}_assign'.format(b_name))
self.W_name_list.append(W_name)
self.W_list.append(W)
self.W_assign_value_list.append(W_assign_value)
self.W_assign_list.append(W_assign)
self.b_name_list.append(b_name)
self.b_list.append(b)
self.b_assign_value_list.append(b_assign_value)
self.b_assign_list.append(b_assign)
network = self.x
# output
with tf.variable_scope('softmax_layer'):
network = tf.add(tf.matmul(network, self.W_list[0]), self.b_list[0])
network = tf.nn.softmax(network, name='softmax_output')
self.y = network
# labels and training
with tf.variable_scope('training'):
self.y_ = tf.placeholder(tf.float32, [None, 10], name='labels')
self.cross_entropy = tf.reduce_mean( -tf.reduce_sum(self.y_ * tf.log(self.y), reduction_indices=[1]), name='cross_entropy' )
self.regularizer_list = [
self.alpha * l2(self.W_list[0]),
self.alpha * l2(self.b_list[0]),
]
self.total_loss = self.cross_entropy + sum(self.regularizer_list)
global_step = tf.Variable(0, trainable=False)
adaptive_learning_rate = tf.train.exponential_decay(self.learning_rate, global_step,
1000, 0.9)
optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
self.train_step = optimizer.minimize(self.total_loss)
# evaluation
with tf.variable_scope('evaluation'):
self.correct_prediction = tf.equal(tf.argmax(self.y,1), tf.argmax(self.y_,1))
self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
# summary
tf.summary.scalar('xentropy', self.cross_entropy, collections=self.summaries)
tf.summary.scalar('accuracy', self.accuracy, collections=self.summaries)
self.merge_summaries = tf.summary.merge_all(self.summaries[0])
# get session
if tf.get_default_session():
logger.info('default session available; using default session for model')
self.sess = tf.get_default_session()
else:
self.sess = tf.Session()
# initialization
with tf.variable_scope('initialize'):
model_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.model_name)
self.init = tf.variables_initializer(model_variables)
self.sess.run(self.init)
def get_W_list_values(self):
W_list_values = [ W.eval(session=self.sess) for W in self.W_list ]
return W_list_values
def set_W_list_values(self, W_list_values):
if not hasattr(self, 'history_W'):
self.history_W = []
self.history_W.append( self.get_W_list_values() )
for W_assign, W_assign_value, W_value in zip(self.W_assign_list, self.W_assign_value_list, W_list_values):
self.sess.run(W_assign, feed_dict={W_assign_value: W_value})
def get_b_list_values(self):
b_list_values = [ b.eval(session=self.sess) for b in self.b_list ]
return b_list_values
def set_b_list_values(self, b_list_values):
if not hasattr(self, 'history_b'):
self.history_b = []
self.history_b.append( self.get_b_list_values() )
for b_assign, b_assign_value, b_value in zip(self.b_assign_list, self.b_assign_value_list, b_list_values):
self.sess.run(b_assign, feed_dict={b_assign_value: b_value})
def train_model(self):
if self.write_summary:
summary_dir = path.join(path.curdir, 'summary', 'train', self.model_name)
summary_writer = tf.train.SummaryWriter(summary_dir, self.sess.graph)
for i in range(self.n_iterations):
self.total_iterations += 1
batch_xs, batch_ys = self.mnist_train.next_batch(self.minibatch_size)
if self.write_summary:
summary, _ = self.sess.run([self.merge_summaries, self.train_step], feed_dict={self.x: batch_xs, self.y_: batch_ys})
summary_writer.add_summary(summary, i)
else:
self.sess.run(self.train_step, feed_dict={self.x: batch_xs, self.y_: batch_ys})
if hasattr(self, 'test_data'):
accuracy = self.evaluate_model(self.test_data)
self.history_accuracy.append( (self.total_iterations, accuracy) )
def evaluate_model(self, test_data):
accuracy_eval = self.sess.run(self.accuracy, feed_dict={self.x: test_data.images, self.y_: test_data.labels})
return accuracy_eval
class DistSimulation(MNISTSoftmaxRegression):
def __init__(self, n_machines, common_examples_fraction, sync_iterations, averaging_interval, *args, **kwargs):
self.n_machines = n_machines
self.common_examples_fraction = common_examples_fraction
self.sync_iterations = sync_iterations
self.averaging_interval = averaging_interval
self._initialize_same = False
self._sample_with_replacement = False
self._adaptive_sampling_scheme = False
self.history_dist_model_distance = []
super().__init__(*args, **kwargs)
@property
def sample_with_replacement(self):
return self._sample_with_replacement
@sample_with_replacement.setter
def sample_with_replacement(self, setting):
assert isinstance(setting, bool), "setting should be boolean"
self._sample_with_replacement = setting
@property
def adaptive_sampling_scheme(self):
return self._adaptive_sampling_scheme
@adaptive_sampling_scheme.setter
def adaptive_sampling_scheme(self, setting):
assert isinstance(setting, bool), "setting should be boolean"
self._adaptive_sampling_scheme = setting
@property
def initialize_same(self):
return self._initialize_same
@initialize_same.setter
def initialize_same(self, setting):
assert isinstance(setting, bool), "setting should be boolean"
self._initialize_same = setting
def train_model(self):
self.partition_data()
self.train_distributed_models()
def partition_data(self):
n_examples = self.mnist_train.images.shape[0]
random_order = np.random.permutation(n_examples)
n_common_examples = int(self.common_examples_fraction * n_examples)
n_subset_examples = int( (n_examples-n_common_examples)/self.n_machines )
common_examples_indices = random_order[0:n_common_examples]
common_examples = self.mnist_train.images[common_examples_indices, :]
common_examples_labels = self.mnist_train.labels[common_examples_indices, :]
if self.sync_iterations:
n_examples_per_machine = n_common_examples+n_subset_examples
n_epochs_per_machine = int(np.ceil(self.n_iterations*self.minibatch_size/n_examples_per_machine))
perm_list = self.get_permutations(n_epochs_per_machine, n_examples_per_machine, n_common_examples, n_subset_examples)
self.training_data_sets = []
for i_machine in range(self.n_machines):
slice_start = n_common_examples + n_subset_examples*i_machine
slice_end = n_common_examples + n_subset_examples*(i_machine+1)
subset_examples_indices = random_order[slice_start:slice_end]
subset_examples = self.mnist_train.images[subset_examples_indices, :]
subset_examples_labels = self.mnist_train.labels[subset_examples_indices, :]
images = np.concatenate([common_examples, subset_examples], axis=0)
labels = np.concatenate([common_examples_labels, subset_examples_labels], axis=0)
# using dtype = dtypes.uint8 to prevent the DataSet class to scale the features by 1/255
data_set = DataSet(images, labels, reshape=False, dtype=dtypes.uint8)
if self.sync_iterations:
data_set.perm_list = perm_list
self.training_data_sets.append( data_set )
def train_distributed_models(self):
assert self.averaging_interval <= self.n_iterations, "averaging_interval MUST be <= n_iterations"
self.distributed_models = []
for i_machine, mnist_train in enumerate(self.training_data_sets):
dist_model_name = '/'.join([self.model_name, 'dist_model_{0}'.format(i_machine)])
model = MNISTSoftmaxRegression(self.minibatch_size, self.learning_rate, self.averaging_interval,
mnist_train, model_name=dist_model_name,
write_summary=self.write_summary,
alpha=self.alpha)
if hasattr(self, 'test_data'):
model.test_data = self.test_data
self.distributed_models.append(model)
# why use the slice [1:] ?
# index [0] == 0, zero iterations should not be considered
train_stride_list = np.arange(0, self.n_iterations+1, self.averaging_interval)[1:]
for stride_n, train_stride in enumerate(train_stride_list):
self.total_iterations += self.averaging_interval
for model in self.distributed_models:
if stride_n == 0 and not self._initialize_same:
logger.debug('initializing distributed models with different values')
break
else:
logger.debug('initializing distributed models with same values')
model.set_W_list_values(self.get_W_list_values())
model.set_b_list_values(self.get_b_list_values())
for model in self.distributed_models:
model.train_model()
dist_model_distance_matrix = get_dist_model_distance(self)
self.history_dist_model_distance.append( (self.total_iterations, dist_model_distance_matrix) )
self.combine_distributed_models()
if hasattr(self, 'test_data'):
accuracy = self.evaluate_model(self.test_data)
self.history_accuracy.append( (self.total_iterations, accuracy) )
def combine_distributed_models(self):
logger.debug('combine_distributed_models()')
W_list = []
b_list = []
for i_machine, model in enumerate(self.distributed_models):
W_list.append( model.get_W_list_values() )
b_list.append( model.get_b_list_values() )
W_avg = np.mean(W_list, axis=0)
b_avg = np.mean(b_list, axis=0)
self.set_W_list_values(W_avg)
self.set_b_list_values(b_avg)
def evaluate_distributed_models(self, test_data):
accuracy_list = []
for model in self.distributed_models:
accuracy_list.append( model.evaluate_model(test_data) )
return accuracy_list
def get_permutations(self, num_perms, perm_length, n_common_examples, n_subset_examples):
perm_list = []
for perm_n in range(num_perms):
n_total_examples = n_common_examples + n_subset_examples
indices = np.arange(n_total_examples)
if self._adaptive_sampling_scheme and n_common_examples>0:
# maximum possible probability bias
# factor of 0.9 to prevent sampling error for without replacement
p_bias = n_subset_examples/n_total_examples * 0.9
else:
p_bias = 0
decay_coef = 1
try:
p_common_examples = 1/n_total_examples + p_bias/n_common_examples * np.exp(-decay_coef*perm_n)
except ZeroDivisionError:
p_common_examples = 0
try:
p_subset_examples = 1/n_total_examples - p_bias/n_subset_examples * np.exp(-decay_coef*perm_n)
except ZeroDivisionError:
p_subset_examples = 0
assert p_common_examples >= 0 and p_common_examples <= 1, "0 <= p_common_examples <= 1 MUST hold true"
assert p_subset_examples >= 0 and p_subset_examples <= 1, "0 <= p_subset_examples <= 1 MUST hold true"
p = np.concatenate([ np.repeat(p_common_examples, n_common_examples),
np.repeat(p_subset_examples, n_subset_examples),
], axis=0)
perm = np.random.choice(indices, perm_length, replace=self._sample_with_replacement, p=p)
return perm_list
if __name__=='__main__':
ilogger.setup_root_logger('/dev/null', logging.DEBUG)
minibatch_size = 1
learning_rate = 0.01
n_iterations = 100
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
mnist_classifier = MNISTSoftmaxRegression(minibatch_size, learning_rate,
n_iterations, mnist.train,
model_name='UnifiedClassifier',
write_summary=False,
alpha=0.1)
mnist_classifier.test_data = mnist.test
mnist_classifier.train_model()
accuracy = mnist_classifier.evaluate_model(mnist.test)
logger.info('unified model accuracy = {0}'.format(accuracy))
n_machines = 4
common_examples_fraction = 1
sync_iterations = True
averaging_interval = n_iterations
mnist_distributed = DistSimulation(n_machines, common_examples_fraction, sync_iterations,
averaging_interval,
minibatch_size, learning_rate, n_iterations,
mnist.train, model_name='DistributedClassifier',
write_summary=False,
alpha=0.1)
mnist_distributed.test_data = mnist.test
# mnist_distributed.initialize_same = True
# mnist_distributed.sample_with_replacement = True
# mnist_distributed.adaptive_sampling_scheme = True
mnist_distributed.train_model()
combined_model_accuracy = mnist_distributed.evaluate_model(mnist.test)
dist_model_accuracy_list = mnist_distributed.evaluate_distributed_models(mnist.test)
logger.info('combined_model_accuracy = {0}'.format(combined_model_accuracy))
logger.info('dist_model_accuracy_list: {0}'.format(dist_model_accuracy_list))
| saraghav/DistributedLearningProject | src/mnist_distributed_sim_strongly_convex.py | Python | gpl-3.0 | 18,393 |
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
from datetime import datetime, timedelta
import json
from nose.tools import (
assert_equal,
assert_false,
assert_is_instance,
assert_not_in,
assert_raises,
assert_true
)
from streamalert.shared.alert import Alert, AlertCreationError
class TestAlert:
"""Test shared Alert class."""
# pylint: disable=no-self-use,protected-access,too-many-public-methods
@staticmethod
def _basic_alert():
return Alert('test_rule', {'abc': 123}, {'aws-firehose:alerts', 'aws-sns:test-output'})
@staticmethod
def _customized_alert():
return Alert(
'test_rule',
{'abc': 123},
{'aws-firehose:alerts', 'aws-sns:test-output', 'aws-s3:other-output'},
alert_id='abc-123',
attempts=1,
cluster='',
context={'rule': 'context'},
created=datetime.utcnow(),
dispatched=datetime.utcnow(),
log_source='source',
log_type='csv',
merge_by_keys=['abc'],
merge_window=timedelta(minutes=5),
outputs_sent={'aws-sns:test-output'},
rule_description='A Test Rule',
source_entity='entity',
source_service='s3',
staged=True
)
def test_alert_encoder_invalid_json(self):
"""Alert Class - Alert Encoder - Invalid JSON raises parent exception"""
assert_raises(TypeError, json.dumps, RuntimeWarning, default=list)
def test_init_invalid_kwargs(self):
"""Alert Class - Init With Invalid Kwargs"""
assert_raises(AlertCreationError, Alert, '', {}, set(), cluster='test', invalid='nonsense')
def test_ordering(self):
"""Alert Class - Alerts Are Sorted By Creation"""
alerts = [self._basic_alert() for _ in range(5)]
assert_equal(alerts, sorted([alerts[0], alerts[3], alerts[1], alerts[4], alerts[2]]))
def test_repr(self):
"""Alert Class - Complete Alert Representation"""
assert_is_instance(repr(self._basic_alert()), str)
assert_is_instance(repr(self._customized_alert()), str)
def test_str(self):
"""Alert Class - To String"""
alert = self._customized_alert()
assert_equal('<Alert abc-123 triggered from test_rule>', str(alert))
def test_dynamo_key(self):
"""Alert Class - Dynamo Key"""
alert = self._customized_alert()
assert_equal({'RuleName': 'test_rule', 'AlertID': 'abc-123'}, alert.dynamo_key)
def test_remaining_outputs_merge_disabled(self):
"""Alert Class - Remaining Outputs - No Merge Information"""
alert = self._basic_alert()
assert_equal(alert.outputs, alert.remaining_outputs)
# One output sent successfully
alert.outputs_sent = {'aws-sns:test-output'}
assert_equal({'aws-firehose:alerts'}, alert.remaining_outputs)
# All outputs sent successfully
alert.outputs_sent = {'aws-firehose:alerts', 'aws-sns:test-output'}
assert_equal(set(), alert.remaining_outputs)
def test_remaining_outputs_merge_enabled(self):
"""Alert Class - Remaining Outputs - With Merge Config"""
# Only the required firehose output shows as remaining
assert_equal({'aws-firehose:alerts'}, self._customized_alert().remaining_outputs)
def test_dynamo_record(self):
"""Alert Class - Dynamo Record"""
# Make sure there are no empty strings nor sets (not allowed in Dynamo)
alert = Alert(
'test_rule', {}, {'aws-sns:test-output'},
cluster='',
created='',
log_source='',
log_type='',
outputs_sent=set(),
rule_description='',
source_entity='',
source_service=''
)
record = alert.dynamo_record()
assert_not_in('', list(record.values()))
assert_not_in(set(), list(record.values()))
def test_create_from_dynamo_record(self):
"""Alert Class - Create Alert from Dynamo Record"""
alert = self._customized_alert()
# Converting to a Dynamo record and back again should result in the exact same alert
record = alert.dynamo_record()
new_alert = Alert.create_from_dynamo_record(record)
assert_equal(alert.dynamo_record(), new_alert.dynamo_record())
def test_create_from_dynamo_record_invalid(self):
"""Alert Class - AlertCreationError raised for an invalid Dynamo Record"""
assert_raises(AlertCreationError, Alert.create_from_dynamo_record, {})
def test_output_dict(self):
"""Alert Class - Output Dict"""
alert = self._basic_alert()
result = alert.output_dict()
# Ensure result is JSON-serializable (no sets)
assert_is_instance(json.dumps(result), str)
# Ensure result is Athena compatible (no None values)
assert_not_in(None, list(result.values()))
def test_can_merge_no_config(self):
"""Alert Class - Can Merge - False if Either Alert Does Not Have Merge Config"""
assert_false(self._basic_alert().can_merge(self._customized_alert()))
assert_false(self._customized_alert().can_merge(self._basic_alert()))
def test_can_merge_too_far_apart(self):
"""Alert Class - Can Merge - False if Outside Merge Window"""
alert1 = Alert(
'', {'key': True}, set(),
created=datetime(year=2000, month=1, day=1, minute=0),
merge_by_keys=['key'],
merge_window=timedelta(minutes=10)
)
alert2 = Alert(
'', {'key': True}, set(),
created=datetime(year=2000, month=1, day=1, minute=11),
merge_by_keys=['key'],
merge_window=timedelta(minutes=10)
)
assert_false(alert1.can_merge(alert2))
assert_false(alert2.can_merge(alert1))
def test_can_merge_different_merge_keys(self):
"""Alert Class - Can Merge - False if Different Merge Keys Defined"""
alert1 = Alert(
'', {'key': True}, set(),
merge_by_keys=['key'],
merge_window=timedelta(minutes=10)
)
alert2 = Alert(
'', {'key': True}, set(),
merge_by_keys=['other'],
merge_window=timedelta(minutes=10)
)
assert_false(alert1.can_merge(alert2))
assert_false(alert2.can_merge(alert1))
def test_can_merge_key_not_common(self):
"""Alert Class - Can Merge - False if Merge Key Not Present in Both Records"""
alert1 = Alert(
'', {'key': True}, set(),
merge_by_keys=['key'],
merge_window=timedelta(minutes=10)
)
alert2 = Alert(
'', {'other': True}, set(),
merge_by_keys=['key'],
merge_window=timedelta(minutes=10)
)
assert_false(alert1.can_merge(alert2))
assert_false(alert2.can_merge(alert1))
def test_can_merge_different_values(self):
"""Alert Class - Can Merge - False if Merge Key has Different Values"""
alert1 = Alert(
'', {'key': True}, set(),
merge_by_keys=['key'],
merge_window=timedelta(minutes=10)
)
alert2 = Alert(
'', {'key': False}, set(),
merge_by_keys=['key'],
merge_window=timedelta(minutes=10)
)
assert_false(alert1.can_merge(alert2))
assert_false(alert2.can_merge(alert1))
def test_can_merge_merge_keys_absent(self):
"""Alert Class - Can Merge - True if Merge Keys Do Not Exist in Either Record"""
alert1 = Alert('', {}, set(), merge_by_keys=['key'], merge_window=timedelta(minutes=10))
alert2 = Alert('', {}, set(), merge_by_keys=['key'], merge_window=timedelta(minutes=10))
assert_true(alert1.can_merge(alert2))
assert_true(alert2.can_merge(alert1))
def test_can_merge_true(self):
"""Alert Class - Can Merge - True Result"""
alert1 = Alert(
'', {'key': True}, set(),
created=datetime(year=2000, month=1, day=1, minute=0),
merge_by_keys=['key'],
merge_window=timedelta(minutes=10)
)
alert2 = Alert(
'', {'key': True, 'other': True}, set(),
created=datetime(year=2000, month=1, day=1, minute=10),
merge_by_keys=['key'],
merge_window=timedelta(minutes=10)
)
assert_true(alert1.can_merge(alert2))
assert_true(alert2.can_merge(alert1))
def test_compute_common_empty_record(self):
"""Alert Class - Compute Common - Empty Record List"""
assert_equal({}, Alert._compute_common([]))
def test_compute_common_single_record(self):
"""Alert Class - Compute Common - Single Record"""
# The greatest common subset of a single record is itself
record = {'a': 1, 'b': 2, 'c': {'d': {'e': 3}}}
assert_equal(record, Alert._compute_common([record]))
def test_compute_common_top_level(self):
"""Alert Class - Compute Common - No Nested Dictionaries"""
record1 = {'a': 1, 'b': 2, 'c': 3}
record2 = {'b': 2, 'c': 3, 'd': 4}
record3 = {'c': 3, 'd': 4, 'e': 5}
assert_equal({'c': 3}, Alert._compute_common([record1, record2, record3]))
def test_compute_common_no_similarities(self):
"""Alert Class - Compute Common - Empty Common Set"""
record1 = {'a': -1, 'b': -2, 'c': -3, 'd': {'e': 0}}
record2 = {'a': 1, 'b': 2, 'c': 3}
assert_equal({}, Alert._compute_common([record1, record2]))
def test_compute_common_partial_nested(self):
"""Alert Class - Compute Common - Some Common Features in Nested Dictionary"""
# This is the example given in the docstring
record1 = {'abc': 123, 'nested': {'A': 1, 'B': 2}}
record2 = {'abc': 123, 'def': 456, 'nested': {'A': 1}}
assert_equal({'abc': 123, 'nested': {'A': 1}}, Alert._compute_common([record1, record2]))
def test_compute_common_different_types(self):
"""Alert Class - Compute Common - Same Keys, Different Types"""
record1 = {'a': 1, 'b': None, 'c': {'d': {'e': 5}, 'f': {'g': 6}}}
record2 = {'a': '1', 'b': 0, 'c': []}
assert_equal({}, Alert._compute_common([record1, record2]))
def test_compute_common_many_nested(self):
"""Alert Class - Compute Common - Multiple Levels of Nesting"""
record1 = {
'a': {
'b': {
'c': 3,
'd': 4
},
'e': {
'h': {
'i': 9
}
},
'j': {}
}
}
record2 = {
'a': {
'b': {
'c': 3,
},
'e': {
'f': {
'g': 8
},
'h': {}
},
'j': {}
}
}
expected = {
'a': {
'b': {
'c': 3
},
'j': {}
}
}
assert_equal(expected, Alert._compute_common([record1, record2]))
def test_compute_common_all_identical(self):
"""Alert Class - Compute Common - Identical Records"""
record = {'a': 1, 'b': 2, 'c': {'d': {'e': 3}}}
assert_equal(record, Alert._compute_common([record] * 4))
def test_compute_diff_no_common(self):
"""Alert Class - Compute Diff - No Common Set"""
record = {'a': 1, 'b': 2, 'c': {'d': {'e': 3}}}
assert_equal(record, Alert._compute_diff({}, record))
def test_compute_diff_no_diff(self):
"""Alert Class - Compute Diff - Record Identical to Common"""
record = {'a': 1, 'b': 2, 'c': {'d': {'e': 3}}}
common = record
assert_equal({}, Alert._compute_diff(common, record))
def test_compute_diff_top_level(self):
"""Alert Class - Compute Diff - Top Level Keys"""
common = {'c': 3}
record = {'a': 1, 'b': 2, 'c': 3}
assert_equal({'a': 1, 'b': 2}, Alert._compute_diff(common, record))
def test_compute_diff_different_types(self):
"""Alert Class - Compute Diff - Type Mismatch Short-Circuits Recursion"""
common = {'b': 2}
record = {'a': 1, 'b': {'nested': 'stuff'}}
assert_equal(record, Alert._compute_diff(common, record))
def test_compute_diff_nested(self):
"""Alert Class - Compute Diff - Difference in Nested Dictionary"""
# This is the example given in the docstring
common = {'abc': 123, 'nested': {'A': 1}}
record = {'abc': 123, 'nested': {'A': 1, 'B': 2}}
assert_equal({'nested': {'B': 2}}, Alert._compute_diff(common, record))
def test_compute_diff_many_nested(self):
"""Alert Class - Compute Diff - Multiple Levels of Nesting"""
# These values are the same as those from test_compute_common_many_nested
record1 = {
'a': {
'b': {
'c': 3,
'd': 4
},
'e': {
'h': {
'i': 9
}
},
'j': {}
}
}
record2 = {
'a': {
'b': {
'c': 3,
},
'e': {
'f': {
'g': 8
},
'h': {}
},
'j': {}
}
}
common = {
'a': {
'b': {
'c': 3
},
'j': {}
}
}
expected_diff1 = {
'a': {
'b': {
'd': 4
},
'e': {
'h': {
'i': 9
}
}
}
}
assert_equal(expected_diff1, Alert._compute_diff(common, record1))
expected_diff2 = {
'a': {
'e': {
'f': {
'g': 8
},
'h': {}
}
}
}
assert_equal(expected_diff2, Alert._compute_diff(common, record2))
def test_merge(self):
"""Alert Class - Merge - Create Merged Alert"""
# Example based on a CarbonBlack log
record1 = {
'alliance_data_virustotal': [],
'alliance_link_virustotal': '',
'alliance_score_virustotal': 0,
'cmdline': 'whoami',
'comms_ip': '1.2.3.4',
'hostname': 'my-computer-name',
'path': '/usr/bin/whoami',
'streamalert:ioc': {
'hello': 'world'
},
'timestamp': 1234.5678,
'username': 'user'
}
alert1 = Alert(
'RuleName', record1, {'aws-sns:topic'},
created=datetime(year=2000, month=1, day=1),
merge_by_keys=['hostname', 'username'],
merge_window=timedelta(minutes=5)
)
# Second alert has slightly different record and different outputs
record2 = copy.deepcopy(record1)
record2['streamalert:ioc'] = {'goodbye': 'world'}
record2['timestamp'] = 9999
alert2 = Alert(
'RuleName', record2, {'slack:channel'},
created=datetime(year=2000, month=1, day=2),
merge_by_keys=['hostname', 'username'],
merge_window=timedelta(minutes=5)
)
merged = Alert.merge([alert1, alert2])
assert_is_instance(merged, Alert)
assert_equal({'slack:channel'}, merged.outputs) # Most recent outputs were used
expected_record = {
'AlertCount': 2,
'AlertTimeFirst': '2000-01-01T00:00:00.000000Z',
'AlertTimeLast': '2000-01-02T00:00:00.000000Z',
'MergedBy': {
'hostname': 'my-computer-name',
'username': 'user'
},
'OtherCommonKeys': {
'alliance_data_virustotal': [],
'alliance_link_virustotal': '',
'alliance_score_virustotal': 0,
'cmdline': 'whoami',
'comms_ip': '1.2.3.4',
'path': '/usr/bin/whoami',
},
'ValueDiffs': {
'2000-01-01T00:00:00.000000Z': {
'streamalert:ioc': {'hello': 'world'},
'timestamp': 1234.5678
},
'2000-01-02T00:00:00.000000Z': {
'streamalert:ioc': {'goodbye': 'world'},
'timestamp': 9999
}
}
}
assert_equal(expected_record, merged.record)
def test_merge_nested(self):
"""Alert Class - Merge - Merge with Nested Keys"""
record1 = {
'NumMatchedRules': 1,
'FileInfo': {
'Deleted': None,
'Nested': [1, 2, 'three']
},
'MatchedRules': {
'Rule1': 'MatchedStrings'
}
}
alert1 = Alert(
'RuleName', record1, {'slack:channel'},
created=datetime(year=2000, month=1, day=1),
merge_by_keys=['Nested'],
merge_window=timedelta(minutes=5)
)
record2 = {
'NumMatchedRules': 2,
'FileInfo': {
'Deleted': None,
'Nested': [1, 2, 'three']
},
'MatchedRules': {
'Rule1': 'MatchedStrings'
}
}
alert2 = Alert(
'RuleName', record2, {'slack:channel'},
created=datetime(year=2000, month=1, day=2),
merge_by_keys=['Nested'],
merge_window=timedelta(minutes=5)
)
record3 = {
'MatchedRules': {
'Rule1': 'MatchedStrings'
},
'Nested': [1, 2, 'three'] # This is in a different place in the record
}
alert3 = Alert(
'RuleName', record3, {'slack:channel'},
created=datetime(year=2000, month=1, day=3),
merge_by_keys=['Nested'],
merge_window=timedelta(minutes=5)
)
merged = Alert.merge([alert1, alert2, alert3])
expected_record = {
'AlertCount': 3,
'AlertTimeFirst': '2000-01-01T00:00:00.000000Z',
'AlertTimeLast': '2000-01-03T00:00:00.000000Z',
'MergedBy': {
'Nested': [1, 2, 'three']
},
'OtherCommonKeys': {
'MatchedRules': {
'Rule1': 'MatchedStrings'
}
},
'ValueDiffs': {
'2000-01-01T00:00:00.000000Z': {
'NumMatchedRules': 1,
'FileInfo': {
'Deleted': None
}
},
'2000-01-02T00:00:00.000000Z': {
'NumMatchedRules': 2,
'FileInfo': {
'Deleted': None
}
},
'2000-01-03T00:00:00.000000Z': {}
}
}
assert_equal(expected_record, merged.record)
| airbnb/streamalert | tests/unit/streamalert/shared/test_alert.py | Python | apache-2.0 | 20,111 |
'''
Perform further analysis on 64-bit GO language executables.
This module extends code flow analysis to runtime_main for Windows PE binaries.
GO binaries start from a single export and proceed thru several functions
that initialize GO. Specific application code is launched from the GO function
runtime_main(), which is invoked by "call rax" with an address placed on the
stack many calls earlier. Vivisect code flow analysis does not track the
address; this module finds the address and invokes makeFunction(va).
Samples have been found with different instruction sequences for reaching the
address of runtime_main(); this module attempts all observed sequences.
'''
import envi
import envi.archs.i386.disasm
import envi.archs.amd64.disasm
from vivisect.analysis.i386.golang import golang_collect_opcodes, find_golang_bblock
import logging
logger = logging.getLogger(__name__)
# Opcode sequence, where element [-6] is the important one.
_GOLANG_AMD64_INSTRS = ['cld', 'call', 'mov', 'mov', 'mov', 'mov', 'call',
'call', 'call', 'lea', 'push', 'push', 'call',
'pop', 'pop']
def analyze(vw):
'''
Perform further analysis on GO language executables.
'''
# Make sure it is a PE file, with "Go build ID:" in the first few bytes
# of the .text segment (versus a .upxN segment of a packed sample).
has_go_build = False
for segment in vw.getSegments():
va, size, name, filename = segment
if name == '.text':
bytez = vw.readMemory(va, 10000)
k1 = bytez.find(b'Go build ID: ')
if k1 != -1:
has_go_build = True
break
if not has_go_build:
return
# Search the entry point (public export) for the pointer to runtime_main().
# Most GO executables have a single entry point, but some are more complex.
ep = vw.getEntryPoints()
ptr_va, runtime_va = golang_search_eps(vw, ep, filename)
if runtime_va is None:
return
# Invoke codeflow on runtime_main().
vw.addEntryPoint(runtime_va)
logger.debug('discovered runtime function: 0x%x', runtime_va)
vw.makeFunction(runtime_va)
# Also mark the ptr_va as a pointer to runtime_va.
vw.makePointer(ptr_va, tova=runtime_va)
_GOLANG_AMD64_MEP1A_INSTRS \
= ['sub', 'mov', 'mov', 'call', 'call', 'nop', 'nop', 'add', 'ret']
_GOLANG_AMD64_MEP1B_INSTRS \
= ['sub', 'mov', 'call', 'call', 'nop', 'nop', 'add', 'ret']
_GOLANG_AMD64_MEP2A_INSTRS \
= ['mov', 'mov', 'call', 'mov', 'mov', 'mov', 'mov', 'mov', 'mov', 'mov',
'call', 'mov', 'mov', 'test', 'jz']
_GOLANG_AMD64_MEP2B_INSTRS \
= ['mov', 'mov', 'call', 'mov', 'mov', 'mov', 'mov', 'mov', 'mov',
'call', 'mov', 'mov', 'test', 'jz']
def golang_search_eps(vw, ep , filename):
'''
Search over all entry points to find main(), and then look for the
function that calls runtime_main().
Return two pointers, or None, None if not found.
'''
if len(ep) == 1:
ep_va = ep[0]
bblocks = vw.getFunctionBlocks(ep_va)
if not bblocks:
return None, None
ptr_va, runtime_va = extract_golang_mainmain(vw, bblocks, filename)
return ptr_va, runtime_va
# Look for an entry point with a single basic block with a specific set
# of instructions. One of the call instructions has the next function.
# There can be multiple candidate functions meeting this criteria.
candidate_fns_1 = set()
for next_ep in ep:
bblocks = vw.getFunctionBlocks(next_ep)
if (not bblocks) or (len(bblocks) != 1):
continue
instrs = golang_collect_opcodes(vw, bblocks[0])
if (len(instrs) != len(_GOLANG_AMD64_MEP1A_INSTRS)) and \
(len(instrs) != len(_GOLANG_AMD64_MEP1B_INSTRS)):
continue
instrs_mnem = [ op.mnem for op in instrs ]
if (instrs_mnem != _GOLANG_AMD64_MEP1A_INSTRS) and \
(instrs_mnem != _GOLANG_AMD64_MEP1B_INSTRS):
continue
opcode = instrs[-5]
if not isinstance(opcode.opers[0],
envi.archs.i386.disasm.i386PcRelOper):
continue
try:
candidate_fns_1.add(opcode.opers[0].getOperValue(opcode))
except Exception:
continue
if not candidate_fns_1:
return None, None
# Next function has many basic blocks, one of which makes a call to main().
candidate_fns_2 = set()
for fnptr in candidate_fns_1:
if not vw.isFunction(fnptr):
continue
bblocks = vw.getFunctionBlocks(fnptr)
for bblock in bblocks:
instrs = golang_collect_opcodes(vw, bblock)
if (len(instrs) != len(_GOLANG_AMD64_MEP2A_INSTRS)) and \
(len(instrs) != len(_GOLANG_AMD64_MEP2B_INSTRS)):
continue
instrs_mnem = [ op.mnem for op in instrs ]
if (instrs_mnem != _GOLANG_AMD64_MEP2A_INSTRS) and \
(instrs_mnem != _GOLANG_AMD64_MEP2B_INSTRS):
continue
opcode = instrs[-5]
if not isinstance(opcode.opers[0],
envi.archs.i386.disasm.i386PcRelOper):
continue
try:
candidate_fns_2.add(opcode.opers[0].getOperValue(opcode))
except Exception:
continue
# There should be just one candidate function by now.
if len(candidate_fns_2) != 1:
return None, None
ptr = candidate_fns_2.pop()
# Analyze the function at the pointer if necessary.
if not vw.isFunction(ptr):
return None, None
bblocks = vw.getFunctionBlocks(ptr)
if not bblocks:
return None, None
# This might be the function that calls runtime_main(), or there might
# be one more indirect jump in a single basic block.
if len(bblocks) > 1:
ptr_va, runtime_va = extract_golang_mainmain(vw, bblocks, filename)
if runtime_va is None:
return None, None
return ptr_va, runtime_va
# Look for the indirect jump.
# Expect a function with one BB, with an indirect jump to the
# address loaded by "lea rax,[rip + immediate]".
instrs = golang_collect_opcodes(vw, bblocks[0])
if not instrs:
return None, None
op = instrs[0]
ptr_va, _ = parse_lea_raxriprel(vw, op, filename)
if not vw.isFunction(ptr_va):
return None, None
bblocks = vw.getFunctionBlocks(ptr_va)
ptr_va, runtime_va = extract_golang_mainmain(vw, bblocks, filename)
return ptr_va, runtime_va
def extract_golang_mainmain(vw, basic_blocks, filename):
'''
Find the basic block of interest and return the address of
the pointer and its contents, runtime_main().
The BB will contain the sequence of opcodes in _GOLANG_AMD64_INSTRS.
The push instruction -6 from the end will load the contents of a memory
address, and those contents are the runtime_main() address.
'''
op = find_golang_bblock(vw, basic_blocks, _GOLANG_AMD64_INSTRS, 6)
if op is None:
op = find_golang_bblock_via_ind_jmp(vw, filename)
if op is None:
return None, None
# The key opcode is "lea rax,[rip + immediate]", which points to
# the GO function runtime_mainPC (aka runtime_main).
ptr_va, runtime_va = parse_lea_raxriprel(vw, op, filename, get_content=True)
return ptr_va, runtime_va
def find_golang_bblock_via_ind_jmp(vw, filename):
'''
Find the basic block of interest and return the address where
the special sequence of opcodes begins. Return None if not found.
Some GO executables use an indirect jmp in the entry point,
and special logic is needed to locate the BB of interest.
Example: 1897d2de0837090ec350004ef2f9fa87.
'''
# Start from the entry point (we already know there is only one).
ep_va = vw.getEntryPoints()[0]
# Expect a function with one BB, ending with an indirect jump
# to the address loaded by "lea rax,[rip + immediate]".
basic_blocks = vw.getFunctionBlocks(ep_va)
if len(basic_blocks) != 1:
return None
instrs = golang_collect_opcodes(vw, basic_blocks[0])
if (len(instrs) < 2) or \
(instrs[-1].mnem != 'jmp') or \
(instrs[-2].mnem != 'lea'):
return None
op = instrs[-2]
ptr_va, _ = parse_lea_raxriprel(vw, op, filename)
if not ptr_va:
return None
# Analyze the function at the pointer if necessary.
if not vw.isFunction(ptr_va):
logger.debug('discovered new function (ptr): 0x%x', ptr_va)
vw.makeFunction(ptr_va)
# Expect a function with one BB, ending with an indirect jump
# to the address loaded by "lea rax,[rip + immediate]".
basic_blocks = vw.getFunctionBlocks(ptr_va)
if len(basic_blocks) != 1:
return None
instrs = golang_collect_opcodes(vw, basic_blocks[0])
if (len(instrs) < 2) or \
(instrs[-1].mnem != 'jmp') or \
(instrs[-2].mnem != 'lea'):
return None
op = instrs[-2]
ptr_va, _ = parse_lea_raxriprel(vw, op, filename)
if not ptr_va:
return None
# This function should contain the special basic block.
basic_blocks = vw.getFunctionBlocks(ptr_va)
return find_golang_bblock(vw, basic_blocks, _GOLANG_AMD64_INSTRS, 6)
def parse_lea_raxriprel(vw, opcode, filename, get_content=False):
'''
Parse an opcode that should be "lea rax,[rip + immediate]", returning
the address of the second operand. Also return the content of the
address if so requested.
Return None, None if there is an error.
'''
if len(opcode.opers) != 2:
return None, None
if not isinstance(opcode.opers[1], envi.archs.amd64.disasm.Amd64RipRelOper):
return None, None
try:
ptr_va = opcode.opers[1].getOperValue(opcode)
if len(vw.readMemory(ptr_va, 8)) != 8:
return None, None
if get_content:
runtime_va = vw.castPointer(ptr_va)
if len(vw.readMemory(runtime_va, 8)) != 8:
return None, None
else:
runtime_va = None
return ptr_va, runtime_va
except Exception as e:
return None, None
| bat-serjo/vivisect | vivisect/analysis/amd64/golang.py | Python | apache-2.0 | 10,241 |
"""rinse SOAP client utility functions."""
from __future__ import print_function
import collections
import os.path
import pprint
import textwrap
import defusedxml.lxml
import lxml.builder
from lxml import etree
RINSE_DIR = os.path.dirname(__file__)
ENVELOPE_XSD = 'soap-1.1_envelope.xsd'
NS_SOAPENV = 'http://schemas.xmlsoap.org/soap/envelope/'
NS_MAP = {
'soapenv': NS_SOAPENV,
}
def element_as_tree(element):
"""Convert an element from within an ElementTree to its own tree."""
# XXX: this is a crude hack, but it works - got any better ideas?
return safe_parse_string(
etree.tostring(
etree.ElementTree(element),
),
)
def safe_parse_string(raw_xml, **kwargs):
"""Safely parse raw XML content into an element tree."""
return defusedxml.lxml.fromstring(raw_xml, **kwargs)
def safe_parse_path(xml_path, **kwargs):
"""Safely parse XML content from path into an element tree."""
return defusedxml.lxml.parse(xml_path, **kwargs)
def safe_parse_url(xml_url, **kwargs):
"""Safely parse XML content from path into an element tree."""
return defusedxml.lxml.parse(xml_url, **kwargs)
class ElementMaker(lxml.builder.ElementMaker):
"""Wrapper around lxml ElementMaker that casts ints as strings."""
def __getattr__(self, name):
"""Return a lambda that parses int args as strings."""
_cls = super(ElementMaker, self).__getattr__(name)
def __cls_wraper(*args, **kwargs):
"""Wrapper around Element class."""
return _cls(
*[
str(arg) if isinstance(arg, int) else arg
for arg
in args
],
**kwargs
)
return __cls_wraper
RinseResponse = collections.namedtuple('RinseResponse', ['response', 'doc'])
class SchemaCache(collections.defaultdict):
"""Cache of lxml.etree.XMLSchema instances, keyed by XSD basename."""
def get(self, xsd, xpath=None, namespaces=None):
"""Generate XMLSchema instances as specified."""
if xsd.startswith('/'):
pass # absolute path
elif ':' in xsd:
pass # URL - defused should help protect us.
else:
# assume XSD is in res/ subdir of rinse project.
xsd = os.path.join(RINSE_DIR, 'res', xsd)
doc = safe_parse_path(xsd)
if xpath:
doc = doc.xpath(xpath, namespaces=namespaces)[0]
self[xsd] = schema = etree.XMLSchema(doc)
return schema
def __missing__(self, xsd):
"""Generate XMLSchema instances on demand."""
return self.get(xsd)
SCHEMA = SchemaCache()
class ElementMakerCache(collections.defaultdict):
"""Cache of ElementMaker instances for the given nsmap."""
def __init__(self, nsmap):
"""Keep reference to the nsmap we're given."""
super(ElementMakerCache, self).__init__()
self.nsmap = {
name: url
for name, url
in list(NS_MAP.items()) + list(nsmap.items())
}
def __missing__(self, ns_prefix):
"""Generate ElementMaker instances as required."""
return ElementMaker(
namespace=self.nsmap[ns_prefix],
nsmap=self.nsmap,
)
PRETTY_PARSER = etree.XMLParser(
remove_blank_text=True,
)
PRETTY_TEXT_WRAPPER = textwrap.TextWrapper(
width=78,
initial_indent='',
subsequent_indent=' ',
replace_whitespace=False,
drop_whitespace=False,
break_long_words=False,
break_on_hyphens=False,
)
def printxml(doc):
"""Pretty print an lxml document tree.
The XML printed may not be exactly equivalent to the doc provided, as blank
text within elements will be stripped to allow etree.tostring() to work with
the 'pretty_print' option set.
"""
pretty_tree = safe_parse_string(
etree.tostring(doc), parser=PRETTY_PARSER,
)
pretty_xml = etree.tostring(
pretty_tree, pretty_print=True, encoding='unicode',
).replace('\t', ' ').rstrip('\n')
for line in pretty_xml.split('\n'):
line = PRETTY_TEXT_WRAPPER.fill(line.rstrip('\n')).rstrip('\n')
for subline in line.split('\n'):
if not subline.strip():
continue
print(subline)
def recursive_dict(element):
"""Map an XML tree into a dict of dicts."""
if isinstance(element, (tuple, list)):
return tuple(
recursive_dict(child)
for child
in element
)
return (
'{}{}'.format(
element.tag,
pprint.pformat(element.attrib, compact=True, width=10000),
),
dict(
map(recursive_dict, element)
) or element.text
)
| MarkusH/rinse | rinse/util.py | Python | mit | 4,798 |
from shopdatabase import DatabaseController
from parser import Parser
import sqlite3
ps = Parser()
dbpath = ps.get_db_path()
class CheckController(DatabaseController):
def __init__(self):
DatabaseController.__init__(self)
def check_in(self, username, barcode):
"""
Given a barcode and its corresponding DVD will be re-shelved into the
database, the corresponding borrower's borrowed quantity is reduced by
1.
Returns True on success check-in, False means the dvd is not yet lent
out.
"""
if self.check_dvd_state(barcode, 0):
return False
if not self.check_dvd_borrower(barcode) == username.lower():
return False
conn = sqlite3.connect(dbpath)
c = conn.cursor()
c.execute("UPDATE dvdlist SET out=0 WHERE barcode=(?)", (barcode,))
c.execute("UPDATE userdb SET borrowed=borrowed-1 WHERE username=(?)",
(username.lower(),))
conn.commit()
conn.close()
return True
def check_dvd_borrower(self, barcode):
"""
This checks the borrower of a specific DVD, it returns the username of
the borrower
"""
conn = sqlite3.connect(dbpath)
c = conn.cursor()
raw = c.execute("SELECT borrower FROM dvdlist WHERE barcode=(?)",
(barcode,))
try:
data = raw.fetchone()
name = data[0][0]
except TypeError:
conn.close()
return None
except StopIteration:
conn.close()
return None
else:
conn.close()
return data[0].lower()
def check_dvd_state(self, barcode, state):
"""
Check whether a specific dvd is under a correct state.
Return True or False on yes or no.
"""
conn = sqlite3.connect(dbpath)
c = conn.cursor()
rawdata = c.execute("SELECT * FROM dvdlist WHERE barcode=(?)",
(barcode,))
item = rawdata.fetchone()
conn.close()
try:
item[3] == state
except TypeError:
return False
else:
return item[3] == state
def check_out(self, username, barcode):
"""
Given a barcode and a username we can assign the DVD to the customer.
Return True on success, False on DVD unavailable.
"""
if not self.check_dvd_state(barcode, 0):
return False
if self.check_user_quantity(username.lower()) == 6:
return False
conn = sqlite3.connect(dbpath)
c = conn.cursor()
c.execute("UPDATE dvdlist SET out=1, borrower=(?) WHERE barcode=(?)",
(username.lower(), barcode))
c.execute("UPDATE userdb SET borrowed = borrowed + 1 \
WHERE username=(?)", (username.lower(),))
conn.commit()
conn.close()
return True
def check_user_quantity(self, username):
"""
It returns on a user's borrowed quantity.
"""
conn = sqlite3.connect(dbpath)
c = conn.cursor()
raw = c.execute("SELECT * FROM userdb WHERE username=(?)",
(username.lower(),))
info = raw.fetchone()
conn.close()
try:
info[5]
except TypeError:
return False
else:
return info[5]
| hlx98007/PyDVDShop | modules/checkcontroller.py | Python | lgpl-3.0 | 3,437 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 使用重复棋子的多个图片训练
import tensorflow as tf
import numpy as np
import loadTrainData
# parameters
learning_rate = 0.001
training_epochs = 300
x_data = loadTrainData.getImagesData()
pixels = 70*74
# y_data = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]], dtype=np.float32)
y_data = np.array(loadTrainData
.getLabelsData())
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
W1 = tf.get_variable("W1", shape=[pixels, pixels],
initializer=tf.contrib.layers.xavier_initializer())
b1 = tf.Variable(tf.random_normal([pixels]))
L1 = tf.nn.relu(tf.matmul(X, W1) + b1) # L1 就是原来的layer1
W2 = tf.get_variable("W2", shape=[pixels, 512],
initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.Variable(tf.random_normal([512]), name='bias2')
L2 = tf.nn.relu(tf.matmul(L1, W2) + b2)
W3 = tf.get_variable("W3", shape=[512, 512],
initializer=tf.contrib.layers.xavier_initializer())
b3 = tf.Variable(tf.random_normal([512]))
L3 = tf.nn.relu(tf.matmul(L2, W3) + b3)
W4 = tf.get_variable("W4", shape=[512, 512],
initializer=tf.contrib.layers.xavier_initializer())
b4 = tf.Variable(tf.random_normal([512]))
L4 = tf.nn.relu(tf.matmul(L3, W4) + b4)
W5 = tf.get_variable("W5", shape=[512, 7],
initializer=tf.contrib.layers.xavier_initializer())
b5 = tf.Variable(tf.random_normal([7]))
hypothesis = tf.matmul(L4, W5) + b5
# cost/loss function
# define cost/loss & optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=hypothesis, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Accuracy computation
# True if hypothesis>0.5 else False
predicted = tf.cast(hypothesis > 0.5, dtype=tf.float32)
accuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, Y), dtype=tf.float32))
# Test model
is_correct = tf.equal(tf.arg_max(hypothesis, 1), tf.arg_max(Y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Launch graph
with tf.Session() as sess:
# Initialize TensorFlow variables
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
c, _ = sess.run([cost, optimizer], feed_dict={X: x_data, Y: y_data})
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.9f}'.format(c))
#end for
save_path = saver.save(sess, "saved/model-deep-nn-xavier.ckpt")
| archcra/brt | train/round03/Deep-NN-xavier-train.py | Python | gpl-3.0 | 2,638 |
# -*- coding: utf-8 -*-
"""
Created on Mon May 30 21:15:34 2016
@author: Ben
"""
import clearplot.plot_functions as pf
import numpy as np
#Verification that the cropping algorithm works properly with log data
#(Select x values that are nearly the same as the limits on a linear scale,
#but significantly different on a log scale. Select y values that are totally
#within the y limits, so that the data exceeds the x limits and not the y
#limits.)
xa = np.linspace(5.0e-13, 5.0e-6, 20)
ya = np.linspace(1.0e8, 1.0e11, 20)
#(Select x values that are totally within the x limits, so that the data
#exceeds the y limits and not the x limits. Select y values that are
#significantly bigger/smaller than limits on a linear scale, but are nearly
#the same on a log scale.)
xb = np.linspace(1.0e-11, 1.0e-8, 20)
yb = np.linspace(1.0e7 - 0.4e6, 1.0e12 + 0.04e12, 20)
pf.plot('curve_cropping-log_axes', \
[xa, xb], [ya, yb], \
x_scale = 'log', y_scale = 'log', \
x_lim = [1.0e-12, 1.0e-7], y_lim = [1.0e7, 1.0e12]) | breedlun/clearplot | tests/curve_cropping-log_axes/curve_cropping-log_axes.py | Python | mit | 1,028 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Modulos
import sys
import pygame
from pygame.locals import *
# Constantes
venx = 640
veny = 448
# Clases
class Pieza(pygame.sprite.Sprite): # 64x64 px tamaño
def __init__(self, tipo):
pygame.sprite.Sprite.__init__(self)
if tipo == 0:
self.image = load_image("tablero.png", True)
elif tipo == 1:
self.image = load_image("laser.png", True)
elif tipo == 2:
self.image = load_image("diana.png", True)
elif tipo == 3:
self.image = load_image("diana_espejo.png", True)
elif tipo == 4:
self.image = load_image("espejo.png", True)
elif tipo == 5:
self.image = load_image("espejotraves.png", True)
elif tipo == 6:
self.image = load_image("tunel.png", True)
elif tipo == 7:
self.image = load_image("bloqueo.png", True)
elif tipo == 8:
self.image = load_image("bloqueo_g.png", True)
elif tipo == 9:
self.image = load_image("portal.png", True)
else:
tipo = 0
self.image = load_image("tablero.png", True)
# Funciones
def load_image(filename, transparent=False):
try:
image = pygame.image.load(filename)
except pygame.error:
raise SystemExit
image = image.convert()
if transparent:
color = image.get_at((0, 0))
image.set_colorkey(color, RLEACCEL)
return image
#------------------------------------------
def main():
screen = pygame.display.set_mode((venx, veny))
pygame.display.set_caption("Laser Game")
background_image = load_image('fondo.png')
bola = Bola()
while True:
for eventos in pygame.event.get():
if eventos.type == QUIT:
sys.exit(0)
screen.blit(background_image, (0, 0))
screen.blit(bola.image, bola.rect)
pygame.display.flip()
return 0
if __name__ == '__main__':
pygame.init()
main()
| LordSprit/Laser | main.py | Python | gpl-2.0 | 2,036 |
from ConfigAndPackages.all_odd_even_turn_model import all_odd_even_list
from networkx import all_shortest_paths, all_simple_paths
from ConfigAndPackages import Config
import copy
import sys
from ArchGraphUtilities import AG_Functions
from RoutingAlgorithms import Routing
from RoutingAlgorithms.RoutingGraph_Reports import draw_rg
from RoutingAlgorithms.turn_model_evaluation.list_all_turn_models import check_tm_domination
from SystemHealthMonitoring import SystemHealthMonitoringUnit
from RoutingAlgorithms.Routing_Functions import extended_degree_of_adaptiveness, degree_of_adaptiveness, \
check_deadlock_freeness
from RoutingAlgorithms.Calculate_Reachability import reachability_metric, is_destination_reachable_from_source
from ArchGraphUtilities.AG_Functions import manhattan_distance
import itertools
from random import shuffle
import re
def update_rg_odd_even(ag, turn_model_odd, turn_model_even, shmu, noc_rg):
"""
gets a turn model for odd and even columns, along with an noc_rg and a SHMU
and updates the SHMU and noc_rg connections based on the turn models
"""
for node in ag.nodes():
node_x, node_y, node_z = AG_Functions.return_node_location(node)
if node_x % 2 == 1:
for turn in turn_model_odd:
shmu.restore_broken_turn(node, turn, False)
from_port = str(node)+str(turn[0])+"I"
to_port = str(node)+str(turn[2])+"O"
Routing.update_noc_route_graph(noc_rg, from_port, to_port, 'ADD')
else:
for turn in turn_model_even:
shmu.restore_broken_turn(node, turn, False)
from_port = str(node)+str(turn[0])+"I"
to_port = str(node)+str(turn[2])+"O"
Routing.update_noc_route_graph(noc_rg, from_port, to_port, 'ADD')
return
def clean_rg_from_odd_even(ag, turn_model_odd, turn_model_even, shmu, noc_rg):
"""
gets a turn model for odd and even columns, along with an noc_rg and a SHMU
and removes added connections in the SHMU and noc_rg based on the turn models
"""
for node in ag.nodes():
node_x, node_y, node_z = AG_Functions.return_node_location(node)
if node_x % 2 == 1:
for turn in turn_model_odd:
shmu.restore_broken_turn(node, turn, False)
from_port = str(node)+str(turn[0])+"I"
to_port = str(node)+str(turn[2])+"O"
Routing.update_noc_route_graph(noc_rg, from_port, to_port, 'REMOVE')
else:
for turn in turn_model_even:
shmu.restore_broken_turn(node, turn, False)
from_port = str(node)+str(turn[0])+"I"
to_port = str(node)+str(turn[2])+"O"
Routing.update_noc_route_graph(noc_rg, from_port, to_port, 'REMOVE')
return
def evaluate_actual_odd_even_turn_model():
"""
evaluates the classic odd-even turn model in terms of DoA and DoA_ex
:return: None
"""
turns_health_2d_network = {"N2W": False, "N2E": False, "S2W": False, "S2E": False,
"W2N": False, "W2S": False, "E2N": False, "E2S": False}
Config.ag.topology = '2DMesh'
Config.ag.x_size = 3
Config.ag.y_size = 3
Config.ag.z_size = 1
Config.RotingType = 'MinimalPath'
ag = copy.deepcopy(AG_Functions.generate_ag())
number_of_pairs = len(ag.nodes())*(len(ag.nodes())-1)
turn_model_odd = ['E2N', 'E2S', 'W2N', 'W2S', 'S2E', 'N2E']
turn_model_even = ['E2N', 'E2S', 'S2W', 'S2E', 'N2W', 'N2E']
if not check_tm_domination(turn_model_odd, turn_model_even): # taking out the domination!
turns_health = copy.deepcopy(turns_health_2d_network)
shmu = SystemHealthMonitoringUnit.SystemHealthMonitoringUnit()
shmu.setup_noc_shm(ag, turns_health, False)
noc_rg = copy.deepcopy(Routing.generate_noc_route_graph(ag, shmu, [], False, False))
update_rg_odd_even(ag, turn_model_odd, turn_model_even, shmu, noc_rg)
draw_rg(noc_rg)
connectivity_metric = reachability_metric(ag, noc_rg, False)
print("connectivity_metric:", connectivity_metric)
if check_deadlock_freeness(noc_rg):
print("Deadlock free!")
doa = degree_of_adaptiveness(ag, noc_rg, False)/float(number_of_pairs)
doa_ex = extended_degree_of_adaptiveness(ag, noc_rg, False)/float(number_of_pairs)
print("doa:", doa)
print("doa_ex", doa_ex)
return None
def enumerate_all_odd_even_turn_models(network_size, routing_type):
all_odd_evens_file = open('Generated_Files/Turn_Model_Lists/'+str(network_size)+"x"
+str(network_size)+"_"+str(routing_type)+"_"+'odd_even_tm_list_dl_free.txt', 'w')
turns_health_2d_network = {"N2W": False, "N2E": False, "S2W": False, "S2E": False,
"W2N": False, "W2S": False, "E2N": False, "E2S": False}
Config.ag.topology = '2DMesh'
Config.ag.x_size = network_size
Config.ag.y_size = network_size
Config.ag.z_size = 1
Config.RotingType = routing_type
ag = copy.deepcopy(AG_Functions.generate_ag())
number_of_pairs = len(ag.nodes())*(len(ag.nodes())-1)
turn_model_list = []
for length in range(0, len(turns_health_2d_network.keys())+1):
for item in list(itertools.combinations(turns_health_2d_network.keys(), length)):
if len(item) > 0:
turn_model_list.append(list(item))
connected_counter = 0
deadlock_free_counter = 0
tm_counter = 0
turns_health = copy.deepcopy(turns_health_2d_network)
shmu = SystemHealthMonitoringUnit.SystemHealthMonitoringUnit()
shmu.setup_noc_shm(ag, turns_health, False)
noc_rg = copy.deepcopy(Routing.generate_noc_route_graph(ag, shmu, [], False, False))
for turn_model_odd in turn_model_list:
for turn_model_even in turn_model_list:
if not check_tm_domination(turn_model_odd, turn_model_even): # taking out the domination!
update_rg_odd_even(ag, turn_model_odd, turn_model_even, shmu, noc_rg)
connectivity_metric = reachability_metric(ag, noc_rg, False)
if connectivity_metric == number_of_pairs:
connected_counter += 1
if check_deadlock_freeness(noc_rg):
deadlock_free_counter += 1
all_odd_evens_file.write("["+str(turn_model_odd)+","+str(turn_model_even)+"],\n")
tm_counter += 1
sys.stdout.write("\rchecked TM: %i " % tm_counter +
" number of fully connected TM: %i" % connected_counter +
" number of deadlock free connected TM: %i" % deadlock_free_counter)
sys.stdout.flush()
clean_rg_from_odd_even(ag, turn_model_odd, turn_model_even, shmu, noc_rg)
all_odd_evens_file.write("checked TM: %i " + str(tm_counter) +
" number of fully connected TM: %i" +str(connected_counter) +
" number of deadlock free connected TM: %i"+str(deadlock_free_counter))
all_odd_evens_file.close()
return None
def evaluate_doa_for_all_odd_even_turn_model_list(network_size):
all_odd_evens_file = open('Generated_Files/Turn_Model_Lists/all_odd_evens_doa.txt', 'w')
turns_health_2d_network = {"N2W": False, "N2E": False, "S2W": False, "S2E": False,
"W2N": False, "W2S": False, "E2N": False, "E2S": False}
Config.ag.topology = '2DMesh'
Config.ag.x_size = network_size
Config.ag.y_size = network_size
Config.ag.z_size = 1
ag = copy.deepcopy(AG_Functions.generate_ag())
number_of_pairs = len(ag.nodes())*(len(ag.nodes())-1)
turn_model_list = []
for length in range(0, len(turns_health_2d_network.keys())+1):
for item in list(itertools.combinations(turns_health_2d_network.keys(), length)):
if len(item) > 0:
turn_model_list.append(list(item))
classes_of_doa = {}
classes_of_doax = {}
tm_counter = 0
all_odd_evens_file.write(" # | "+'%51s' % " "+" \t|")
all_odd_evens_file.write(" DoA | DoAx | \tC-metric\n")
all_odd_evens_file.write("-------|--------------------------------------------" +
"----------------------------|--------|--------|-------------"+"\n")
for turn_model in all_odd_even_list:
turn_model_odd = turn_model[0]
turn_model_even = turn_model[1]
turns_health = copy.deepcopy(turns_health_2d_network)
shmu = SystemHealthMonitoringUnit.SystemHealthMonitoringUnit()
shmu.setup_noc_shm(ag, turns_health, False)
noc_rg = copy.deepcopy(Routing.generate_noc_route_graph(ag, shmu, [], False, False))
update_rg_odd_even(ag, turn_model_odd, turn_model_even, shmu, noc_rg)
doa = degree_of_adaptiveness(ag, noc_rg, False)/float(number_of_pairs)
doa_ex = extended_degree_of_adaptiveness(ag, noc_rg, False)/float(number_of_pairs)
if round(doa, 2) not in classes_of_doa.keys():
classes_of_doa[round(doa, 2)] = [tm_counter]
else:
classes_of_doa[round(doa, 2)].append(tm_counter)
if round(doa_ex, 2) not in classes_of_doax.keys():
classes_of_doax[round(doa_ex, 2)] = [tm_counter]
else:
classes_of_doax[round(doa_ex, 2)].append(tm_counter)
all_odd_evens_file.write('%5s' % str(tm_counter)+" | even turn model:"+'%53s' % str(turn_model_even)+"\t|")
all_odd_evens_file.write(" | |\n")
all_odd_evens_file.write(" | odd turn model: "+'%53s' % str(turn_model_odd)+" \t|")
all_odd_evens_file.write('%8s' % str(round(doa, 2)) + "|" + '%8s' % str(round(doa_ex, 2)) +
"|\n") # +'%8s' % str(round(connectivity_metric,2))+"\n")
all_odd_evens_file.write("-------|--------------------------------------------" +
"----------------------------|--------|--------|-------------"+"\n")
tm_counter += 1
sys.stdout.write("\rchecked TM: %i " % tm_counter)
sys.stdout.flush()
all_odd_evens_file.write("----------"*3+"\n")
all_odd_evens_file.write("distribution of turn models"+"\n")
for item in sorted(classes_of_doa.keys()):
temp_list = []
for tm in classes_of_doa[item]:
turn_model = all_odd_even_list[tm]
number_of_turns = len(turn_model[0])+len(turn_model[1])
temp_list.append(number_of_turns)
all_odd_evens_file.write(str(item)+" "+str(temp_list.count(8))+" "+str(temp_list.count(9))+" " +
str(temp_list.count(10))+" "+str(temp_list.count(11))+" " +
str(temp_list.count(12))+"\n")
all_odd_evens_file.write("----------"*3+"\n")
all_odd_evens_file.write("distribution of turn models"+"\n")
for item in sorted(classes_of_doax.keys()):
temp_list = []
for tm in classes_of_doax[item]:
turn_model = all_odd_even_list[tm]
number_of_turns = len(turn_model[0])+len(turn_model[1])
temp_list.append(number_of_turns)
all_odd_evens_file.write(str(item)+" "+str(temp_list.count(8))+" "+str(temp_list.count(9))+" " +
str(temp_list.count(10))+" "+str(temp_list.count(11))+" " +
str(temp_list.count(12))+"\n")
all_odd_evens_file.close()
return classes_of_doa, classes_of_doax
def report_odd_even_turn_model_fault_tolerance(viz, routing_type, combination, network_size, ft_dictionary,
selected_turn_models):
"""
generates 2D architecture graph with all combinations C(len(ag.nodes), combination)
of links and writes the average connectivity metric in a file.
:param viz: if true, generates the visualization files
:param routing_type: can be "minimal" or "nonminimal"
:param combination: number of links to be present in the network
:return: ft_dictionary a dictionary with turn mode id (from selected_turn_models)
as keys and average connectivity_metric as value.
"""
turns_health_2d_network = {"N2W": False, "N2E": False, "S2W": False, "S2E": False,
"W2N": False, "W2S": False, "E2N": False, "E2S": False}
tm_counter = 0
Config.ag.topology = '2DMesh'
Config.ag.x_size = network_size
Config.ag.y_size = network_size
Config.ag.z_size = 1
Config.RotingType = routing_type
ag = copy.deepcopy(AG_Functions.generate_ag(report=False))
sub_ag_list = list(itertools.combinations(ag.edges(), combination))
turns_health = copy.deepcopy(turns_health_2d_network)
shmu = SystemHealthMonitoringUnit.SystemHealthMonitoringUnit()
shmu.setup_noc_shm(ag, turns_health, False)
for turn_id in selected_turn_models:
counter = 0
metric_sum = 0
turn_model = all_odd_even_list[turn_id]
turn_model_odd = turn_model[0]
turn_model_even = turn_model[1]
file_name = str(tm_counter)+'_eval'
turn_model_eval_file = open('Generated_Files/Turn_Model_Eval/'+file_name+'.txt', 'a+')
if viz:
file_name_viz = str(tm_counter)+'_eval_'+str(len(ag.edges())-counter)
turn_model_eval_viz_file = open('Generated_Files/Internal/odd_even'+file_name_viz+'.txt', 'w')
else:
turn_model_eval_viz_file = None
for sub_ag in sub_ag_list:
for link in list(sub_ag):
shmu.break_link(link, False)
noc_rg = copy.deepcopy(Routing.generate_noc_route_graph(ag, shmu, [], False, False))
update_rg_odd_even(ag, turn_model_odd, turn_model_even, shmu, noc_rg)
connectivity_metric = reachability_metric(ag, noc_rg, False)
counter += 1
metric_sum += connectivity_metric
if viz:
turn_model_eval_viz_file.write(str(float(metric_sum)/counter)+"\n")
for link in list(sub_ag):
shmu.restore_broken_link(link, False)
clean_rg_from_odd_even(ag, turn_model_odd, turn_model_even, shmu, noc_rg)
shuffle(sub_ag_list)
if counter > 0:
avg_connectivity = float(metric_sum)/counter
else:
avg_connectivity = 0
turn_model_eval_file.write(str(len(ag.edges())-combination)+"\t\t"+str(avg_connectivity)+"\n")
if turn_id in ft_dictionary.keys():
ft_dictionary[turn_id].append(avg_connectivity)
else:
ft_dictionary[turn_id] = [avg_connectivity]
if viz:
turn_model_eval_viz_file.close()
turn_model_eval_file.close()
sys.stdout.write("\rchecked TM: %i " % tm_counter+"\t\t\tnumber of broken links: %i " % combination)
sys.stdout.flush()
tm_counter += 1
return ft_dictionary
def report_odd_even_turn_model_router_fault_tolerance(viz, routing_type, combination, network_size, ft_dictionary,
selected_turn_models):
"""
generates 2D architecture graph with all combinations C(len(ag.nodes), combination)
of links and writes the average connectivity metric in a file.
:param viz: if true, generates the visualization files
:param routing_type: can be "minimal" or "nonminimal"
:param combination: number of links to be present in the network
:return: ft_dictionary a dictionary with turn mode id (from selected_turn_models)
as keys and average connectivity_metric as value.
"""
turns_health_2d_network = {"N2W": False, "N2E": False, "S2W": False, "S2E": False,
"W2N": False, "W2S": False, "E2N": False, "E2S": False}
tm_counter = 0
Config.ag.topology = '2DMesh'
Config.ag.x_size = network_size
Config.ag.y_size = network_size
Config.ag.z_size = 1
Config.RotingType = routing_type
ag = copy.deepcopy(AG_Functions.generate_ag(report=False))
router_list = list(itertools.combinations(ag.nodes(), combination))
for turn_id in selected_turn_models:
counter = 0
metric_sum = 0
turn_model = all_odd_even_list[turn_id]
turn_model_odd = turn_model[0]
turn_model_even = turn_model[1]
file_name = str(tm_counter)+'_eval'
turn_model_eval_file = open('Generated_Files/Turn_Model_Eval/'+file_name+'.txt', 'a+')
if viz:
file_name_viz = str(tm_counter)+'_eval_'+str(len(ag.nodes())-counter)
turn_model_eval_viz_file = open('Generated_Files/Internal/odd_even'+file_name_viz+'.txt', 'w')
else:
turn_model_eval_viz_file = None
for sub_router_list in router_list:
turns_health = copy.deepcopy(turns_health_2d_network)
shmu = SystemHealthMonitoringUnit.SystemHealthMonitoringUnit()
shmu.setup_noc_shm(ag, turns_health, False)
noc_rg = copy.deepcopy(Routing.generate_noc_route_graph(ag, shmu, [], False, False))
for node in ag.nodes():
if node not in sub_router_list:
node_x, node_y, node_z = AG_Functions.return_node_location(node)
if node_x % 2 == 1:
for turn in turn_model_odd:
shmu.restore_broken_turn(node, turn, False)
from_port = str(node)+str(turn[0])+"I"
to_port = str(node)+str(turn[2])+"O"
Routing.update_noc_route_graph(noc_rg, from_port, to_port, 'ADD')
else:
for turn in turn_model_even:
shmu.restore_broken_turn(node, turn, False)
from_port = str(node)+str(turn[0])+"I"
to_port = str(node)+str(turn[2])+"O"
Routing.update_noc_route_graph(noc_rg, from_port, to_port, 'ADD')
else:
for port_1 in ["N", "S", "E", "W", "L"]:
for port_2 in ["N", "S", "E", "W", "L"]:
if port_1 != port_2:
from_port = str(node)+str(port_1)+"I"
to_port = str(node)+str(port_2)+"O"
if (from_port, to_port) in noc_rg.edges():
Routing.update_noc_route_graph(noc_rg, from_port, to_port, 'REMOVE')
connectivity_metric = reachability_metric(ag, noc_rg, False)
counter += 1
metric_sum += connectivity_metric
if viz:
turn_model_eval_viz_file.write(str(float(metric_sum)/counter)+"\n")
shuffle(router_list)
if counter > 0:
avg_connectivity = float(metric_sum)/counter
else:
avg_connectivity = 0
turn_model_eval_file.write(str(len(ag.nodes())-combination)+"\t\t"+str(avg_connectivity)+"\n")
if turn_id in ft_dictionary.keys():
ft_dictionary[turn_id].append(avg_connectivity)
else:
ft_dictionary[turn_id] = [avg_connectivity]
if viz:
turn_model_eval_viz_file.close()
turn_model_eval_file.close()
sys.stdout.write("\rchecked TM: %i " % tm_counter+"\t\t\tnumber of broken routers: %i " % combination)
sys.stdout.flush()
tm_counter += 1
return ft_dictionary
def return_links_in_path(path):
"""
gets a path (as a list of ports) as an input and returns a list of links
used in the path.
:path a list of ports (in form of "node_number""inputdirection""I/O" for
example 12SO wich is output port of node 12 in south direction!)
:return a list of strings indicating the links in the path in format
["node1_node2", ...]
"""
links = []
for i in range(0, len(path)-1):
start = int(re.findall('\d+', path[i])[0])
end = int(re.findall('\d+', path[i+1])[0])
if start != end:
links.append(str(start)+"_"+str(end))
return links
def find_similarity_in_paths(link_dict, paths):
link_dictionary = {}
path_number = len(paths)
for i in range(0, path_number):
for link in return_links_in_path(paths[i]):
if link in link_dictionary.keys():
link_dictionary[link] += 1
else:
link_dictionary[link] = 1
for link in sorted(link_dictionary.keys()):
if link_dictionary[link] == path_number:
if link in link_dict.keys():
link_dict[link] += 1
else:
link_dict[link] = 1
return link_dict
def odd_even_fault_tolerance_metric(network_size, routing_type):
turns_health_2d_network = {"N2W": False, "N2E": False, "S2W": False, "S2E": False,
"W2N": False, "W2S": False, "E2N": False, "E2S": False}
Config.ag.topology = '2DMesh'
Config.ag.x_size = network_size
Config.ag.y_size = network_size
Config.ag.z_size = 1
Config.RotingType = routing_type
all_odd_evens_file = open('Generated_Files/Turn_Model_Eval/'+str(network_size)+"x"+str(network_size)+
'_OE_metric_'+Config.RotingType+'.txt', 'w')
all_odd_evens_file.write("TOPOLOGY::"+str(Config.ag.topology)+"\n")
all_odd_evens_file.write("X SIZE:"+str(Config.ag.x_size)+"\n")
all_odd_evens_file.write("Y SIZE:"+str(Config.ag.y_size)+"\n")
all_odd_evens_file.write("Z SIZE:"+str(Config.ag.z_size)+"\n")
ag = copy.deepcopy(AG_Functions.generate_ag())
shmu = SystemHealthMonitoringUnit.SystemHealthMonitoringUnit()
turns_health = copy.deepcopy(turns_health_2d_network)
shmu.setup_noc_shm(ag, turns_health, False)
noc_rg = copy.deepcopy(Routing.generate_noc_route_graph(ag, shmu, [], False, False))
classes_of_doa_ratio = []
turn_model_class_dict = {}
tm_counter = 0
for turn_model in all_odd_even_list:
sys.stdout.write("\rnumber of processed turn models: %i " % tm_counter)
sys.stdout.flush()
tm_counter += 1
link_dict = {}
turn_model_index = all_odd_even_list.index(turn_model)
turn_model_odd = turn_model[0]
turn_model_even = turn_model[1]
update_rg_odd_even(ag, turn_model_odd, turn_model_even, shmu, noc_rg)
number_of_pairs = len(ag.nodes())*(len(ag.nodes())-1)
all_paths_in_graph = []
for source_node in ag.nodes():
for destination_node in ag.nodes():
if source_node != destination_node:
if is_destination_reachable_from_source(noc_rg, source_node, destination_node):
if Config.RotingType == 'MinimalPath':
shortest_paths = list(all_shortest_paths(noc_rg, str(source_node)+str('L')+str('I'),
str(destination_node)+str('L')+str('O')))
paths = []
for path in shortest_paths:
minimal_hop_count = manhattan_distance(source_node, destination_node)
if (len(path)/2)-1 <= minimal_hop_count:
paths.append(path)
all_paths_in_graph.append(path)
else:
paths = list(all_simple_paths(noc_rg, str(source_node)+str('L')+str('I'),
str(destination_node)+str('L')+str('O')))
all_paths_in_graph += paths
link_dict = find_similarity_in_paths(link_dict, paths)
metric = 0
for item in link_dict.keys():
metric += link_dict[item]
if Config.RotingType == 'MinimalPath':
doa = degree_of_adaptiveness(ag, noc_rg, False)/float(number_of_pairs)
metric = 1/(float(metric)/len(ag.edges()))
metric = float("{:3.3f}".format(metric))
else:
doa_ex = extended_degree_of_adaptiveness(ag, noc_rg, False)/float(number_of_pairs)
metric = 1/(float(metric)/len(ag.edges()))
metric = float("{:3.3f}".format(metric))
if metric not in classes_of_doa_ratio:
classes_of_doa_ratio.append(metric)
if metric in turn_model_class_dict.keys():
turn_model_class_dict[metric].append(turn_model_index)
else:
turn_model_class_dict[metric] = [turn_model_index]
# return SHMU and RG back to default
clean_rg_from_odd_even(ag, turn_model_odd, turn_model_even, shmu, noc_rg)
all_odd_evens_file.write("classes of metric"+str(classes_of_doa_ratio)+"\n")
all_odd_evens_file.write("----------"*3+"\n")
all_odd_evens_file.write("turn models of class"+"\n")
for item in sorted(turn_model_class_dict.keys()):
all_odd_evens_file.write(str(item)+" "+str(turn_model_class_dict[item])+"\n")
all_odd_evens_file.write("----------"*3+"\n")
all_odd_evens_file.write("distribution of turn models"+"\n")
for item in sorted(turn_model_class_dict.keys()):
temp_list = []
for tm in turn_model_class_dict[item]:
turn_model = all_odd_even_list[tm]
number_of_turns = len(turn_model[0])+len(turn_model[1])
temp_list.append(number_of_turns)
all_odd_evens_file.write(str(item)+" "+str(temp_list.count(8))+" "+str(temp_list.count(9))+" " +
str(temp_list.count(10))+" "+str(temp_list.count(11))+" " +
str(temp_list.count(12))+"\n")
all_odd_evens_file.close()
return turn_model_class_dict
def evaluate_turn_model_fault_tolerance(selected_turn_models, network_size, routing_type, list_of_broken_links, previously_calculated_ft):
ft_dictionary = copy.deepcopy(previously_calculated_ft)
temp_turn_list = []
for item in selected_turn_models:
if item not in ft_dictionary.keys():
temp_turn_list.append(item)
print("number of turn models:", len(selected_turn_models))
print("number of unprocessed turn models:", len(temp_turn_list))
for i in list_of_broken_links:
ft_dictionary = report_odd_even_turn_model_fault_tolerance(True, routing_type, i, network_size,
ft_dictionary, temp_turn_list)
print("\n\n")
print("\t\t\tnumber of broken links")
print("-------------------"*4)
print('%5s' %"#", "\t",end="")
for j in list_of_broken_links:
print('%6s' %j,"\t",end="")
print()
print("-"*80)
for i in range(0, len(selected_turn_models)):
item = selected_turn_models[i]
print('%5s' %item, "\t",end="")
if i>0:
prev_item = selected_turn_models[i-1]
for j in range(0, len(ft_dictionary[item])):
if ft_dictionary[item][j]<ft_dictionary[prev_item][j]:
print('\033[91m'+'%6s' %"{:3.3f}".format(ft_dictionary[item][j])+'\033[0m',"\t",end="")
else:
print('%6s' %"{:3.3f}".format(ft_dictionary[item][j]),"\t",end="")
else:
for value in ft_dictionary[item]:
print('%6s' %"{:3.3f}".format(value),"\t",end="")
print()
return ft_dictionary
def evaluate_turn_model_router_fault_tolerance(selected_turn_models, network_size, routing_type, list_of_broken_routers,
previously_calculated_ft):
ft_dictionary = copy.deepcopy(previously_calculated_ft)
temp_turn_list = []
for item in selected_turn_models:
if item not in ft_dictionary.keys():
temp_turn_list.append(item)
print("number of turn models:", len(selected_turn_models))
print("number of unprocessed turn models:", len(temp_turn_list))
for i in list_of_broken_routers:
ft_dictionary = report_odd_even_turn_model_router_fault_tolerance(True, routing_type, i, network_size,
ft_dictionary, temp_turn_list)
print("\n\n")
print("\t\t\tnumber of broken routers:")
print("-------------------"*4)
print('%5s' %"#", "\t",end="")
for j in list_of_broken_routers:
print('%6s' %j,"\t",end="")
print()
print("-"*80)
for i in range(0, len(selected_turn_models)):
item = selected_turn_models[i]
print('%5s' %item, "\t",end="")
if i>0:
prev_item = selected_turn_models[i-1]
for j in range(0, len(ft_dictionary[item])):
if ft_dictionary[item][j]<ft_dictionary[prev_item][j]:
print('\033[91m'+'%6s' %"{:3.3f}".format(ft_dictionary[item][j])+'\033[0m',"\t",end="")
else:
print('%6s' %"{:3.3f}".format(ft_dictionary[item][j]),"\t",end="")
else:
for value in ft_dictionary[item]:
print('%6s' %"{:3.3f}".format(value),"\t",end="")
print()
return ft_dictionary
def evaluate_robustness_links(max_network_size, max_broken_links):
if max_network_size <= 1:
raise ValueError("max_network_size should be strictly bigger than 1!")
ft_dictionary_minimal = {}
ft_dictionary_non_minimal = {}
for size in range(2, max_network_size+1):
ft_dictionary_minimal = {}
ft_dictionary_non_minimal = {}
list_of_broken_links = range(0, max_broken_links+1)
classes_of_doa, classes_of_doax = evaluate_doa_for_all_odd_even_turn_model_list(size)
print()
print("======================================="*2)
print("starting calculating DoA for size:", size)
selected_turn_models = []
for item in sorted(classes_of_doa.keys()):
selected_turn_models.append(classes_of_doa[item][0])
print("selected turn models:", selected_turn_models)
ft_dictionary_minimal = copy.deepcopy(evaluate_turn_model_fault_tolerance(selected_turn_models, size,
"MinimalPath", list_of_broken_links,
ft_dictionary_minimal))
print()
print("======================================="*2)
print("starting calculating DoA_ex for size:", size)
selected_turn_models = []
for item in sorted(classes_of_doax.keys()):
selected_turn_models.append(classes_of_doax[item][0])
print("selected turn models:", selected_turn_models)
ft_dictionary_non_minimal = copy.deepcopy(evaluate_turn_model_fault_tolerance(selected_turn_models, size,
"NonMinimalPath",
list_of_broken_links,
ft_dictionary_non_minimal))
print()
for routing_type in ["MinimalPath", "NonMinimalPath"]:
selected_turn_models = []
turn_model_class_dict = odd_even_fault_tolerance_metric(size, routing_type)
print()
for item in sorted(turn_model_class_dict.keys()):
selected_turn_models.append(turn_model_class_dict[item][0])
print("======================================="*2)
print("calculating new metric for", routing_type, "routing")
print("selected turn models:", selected_turn_models)
if routing_type == "MinimalPath":
ft_dictionary_minimal = copy.deepcopy(evaluate_turn_model_fault_tolerance(selected_turn_models, size,
routing_type, list_of_broken_links,
ft_dictionary_minimal))
else:
ft_dictionary_non_minimal = copy.deepcopy(evaluate_turn_model_fault_tolerance(selected_turn_models,
size, routing_type,
list_of_broken_links,
ft_dictionary_non_minimal))
print()
return ft_dictionary_minimal, ft_dictionary_non_minimal
def evaluate_robustness_routers(max_network_size):
if max_network_size <= 1:
raise ValueError("max_network_size should be strictly bigger than 1!")
for size in range(2, max_network_size+1):
ft_dictionary_minimal = {}
ft_dictionary_non_minimal = {}
list_of_broken_routers = range(0, size**2+1)
classes_of_doa, classes_of_doax = evaluate_doa_for_all_odd_even_turn_model_list(size)
print()
print("======================================="*2)
print("starting calculating DoA for size:", size)
selected_turn_models = []
for item in sorted(classes_of_doa.keys()):
selected_turn_models.append(classes_of_doa[item][0])
print("selected turn models:", selected_turn_models)
ft_dictionary_minimal = copy.deepcopy(evaluate_turn_model_router_fault_tolerance(selected_turn_models, size,
"MinimalPath",
list_of_broken_routers,
ft_dictionary_minimal))
print()
print("======================================="*2)
print("starting calculating DoA_ex for size:", size)
selected_turn_models = []
for item in sorted(classes_of_doax.keys()):
selected_turn_models.append(classes_of_doax[item][0])
print("selected turn models:", selected_turn_models)
ft_dictionary_non_minimal = copy.deepcopy(evaluate_turn_model_router_fault_tolerance(selected_turn_models,
size, "NoneMinimalPath",
list_of_broken_routers,
ft_dictionary_non_minimal))
print()
for routing_type in ["MinimalPath", "NonMinimalPath"]:
selected_turn_models = []
turn_model_class_dict = odd_even_fault_tolerance_metric(size, routing_type)
print()
for item in sorted(turn_model_class_dict.keys()):
selected_turn_models.append(turn_model_class_dict[item][0])
print("======================================="*2)
print("calculating new metric for", routing_type, "routing")
print("selected turn models:", selected_turn_models)
if routing_type == "MinimalPath":
ft_dictionary_minimal = copy.deepcopy(evaluate_turn_model_router_fault_tolerance(selected_turn_models,
size, routing_type,
list_of_broken_routers,
ft_dictionary_minimal))
else:
ft_dictionary_non_minimal = copy.deepcopy(evaluate_turn_model_router_fault_tolerance(selected_turn_models,
size, routing_type,
list_of_broken_routers,
ft_dictionary_non_minimal))
print()
return ft_dictionary_minimal, ft_dictionary_non_minimal
| siavooshpayandehazad/SoCDep2 | src/main/python/RoutingAlgorithms/turn_model_evaluation/odd_even_evaluation.py | Python | gpl-2.0 | 36,676 |
import multiprocessing
bind = "0.0.0.0:80"
workers = multiprocessing.cpu_count() * 2 + 1
errorlog = '/var/log/django-readonly/error.log'
accesslog = '/var/log/django-readonly/access.log' | WimpyAnalytics/django-readonly-schema | readonly/readonly/settings/gunicorn.py | Python | mit | 188 |
# -*- coding: utf-8 -*-
"""
app.tests.test_site
~~~~~~~~~~~~~~
Provides unit tests for the website.
"""
from json import loads, dumps
import pytest
from app import create_app, db
from app.helper import (
get_table_names, get_models, process, get_init_data, gen_tables,
JSON, get_json)
@pytest.fixture
def client(request):
app = create_app(config_mode='Test')
client = app.test_client()
models = get_models()
tables = list(gen_tables(models))
def get_num_results(table):
r = client.get(client.prefix + 'commodity_type')
return get_json(r)['num_results']
client.prefix = app.config.get('API_URL_PREFIX', '')
client.get_num_results = get_num_results
with app.test_request_context():
db.create_all()
raw = get_init_data()
client.tables = get_table_names(tables)
client.data = process(raw)
return client
def test_home(client):
r = client.get(client.prefix or '/')
assert r.status_code == 200
def test_api_get(client):
for piece in client.data:
url = client.prefix + piece['table']
for d in piece['data']:
r = client.post(url, data=dumps(d), content_type=JSON)
if r.status_code != 201:
json = get_json(r)
# HACK: 'Could not determine specific validation errors'
# only appears on py27 and py35
if json.get('validation_errors', '').startswith('Could not d'):
continue
assert r.status_code == 201
for table in client.tables:
assert client.get_num_results(table) >= 0
def test_api_delete(client):
for table in client.tables:
old = client.get_num_results(table)
if old > 0:
# delete entry and test that the it was deleted
client.delete('{}{}/1'.format(client.prefix, table))
assert client.get_num_results(table) == old - 1
| nerevu/prometheus-api | app/tests/test_site.py | Python | mit | 1,955 |
from django.contrib import messages
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
from tower import ugettext as _
from .forms import ReportForm
from ..base.utils import notify_admins
from ..base.decorators import throttle_view
@throttle_view(methods=['POST'], duration=30)
def report_form(request):
if request.method == 'POST':
form = ReportForm(request.POST)
if form.is_valid():
report = form.save()
context = {'report': report}
subject = render_to_string('reports/email_subject.txt', context)
subject = ''.join(subject.splitlines())
body = render_to_string('reports/email_body.txt', context)
notify_admins(subject, body)
messages.success(request, _('Report sent successfully'))
return redirect('homepage')
else:
form = ReportForm()
context = {'form': form}
return render(request, 'reports/form.html', context)
| mozilla/popcorn_maker | popcorn_gallery/reports/views.py | Python | bsd-3-clause | 1,003 |
# -*- coding: utf-8 -*-
import re
from pyload.plugin.Crypter import Crypter
class SexuriaCom(Crypter):
__name = "SexuriaCom"
__type = "crypter"
__version = "0.01"
__pattern = r'http://(?:www\.)?sexuria\.com/(v1/)?(Pornos_Kostenlos_.+?_(\d+)\.html|dl_links_\d+_\d+\.html|id=\d+\&part=\d+\&link=\d+)'
__config = [("use_subfolder" , "bool", "Save package to subfolder" , True),
("subfolder_per_pack", "bool", "Create a subfolder for each package", True)]
__description = """Sexuria.com decrypter plugin"""
__license = "GPLv3"
__authors = [("NETHead", "NETHead.AT.gmx.DOT.net")]
PATTERN_SUPPORTED_MAIN = re.compile(r'http://(www\.)?sexuria\.com/(v1/)?Pornos_Kostenlos_.+?_(\d+)\.html', re.I)
PATTERN_SUPPORTED_CRYPT = re.compile(r'http://(www\.)?sexuria\.com/(v1/)?dl_links_\d+_(?P<ID>\d+)\.html', re.I)
PATTERN_SUPPORTED_REDIRECT = re.compile(r'http://(www\.)?sexuria\.com/out\.php\?id=(?P<ID>\d+)\&part=\d+\&link=\d+', re.I)
PATTERN_TITLE = re.compile(r'<title> - (?P<TITLE>.*) Sexuria - Kostenlose Pornos - Rapidshare XXX Porn</title>', re.I)
PATTERN_PASSWORD = re.compile(r'<strong>Passwort: </strong></div></td>.*?bgcolor="#EFEFEF">(?P<PWD>.*?)</td>', re.I | re.S)
PATTERN_DL_LINK_PAGE = re.compile(r'"(dl_links_\d+_\d+\.html)"', re.I)
PATTERN_REDIRECT_LINKS = re.compile(r'value="(http://sexuria\.com/out\.php\?id=\d+\&part=\d+\&link=\d+)" readonly', re.I)
def decrypt(self, pyfile):
# Init
self.pyfile = pyfile
self.package = pyfile.package()
# Get package links
package_name, self.links, folder_name, package_pwd = self.decryptLinks(self.pyfile.url)
self.packages = [(package_name, self.links, folder_name)]
def decryptLinks(self, url):
linklist = []
name = self.package.name
folder = self.package.folder
password = None
if re.match(self.PATTERN_SUPPORTED_MAIN, url):
# Processing main page
html = self.load(url)
links = re.findall(self.PATTERN_DL_LINK_PAGE, html)
for link in links:
linklist.append("http://sexuria.com/v1/" + link)
elif re.match(self.PATTERN_SUPPORTED_REDIRECT, url):
# Processing direct redirect link (out.php), redirecting to main page
id = re.search(self.PATTERN_SUPPORTED_REDIRECT, url).group('ID')
if id:
linklist.append("http://sexuria.com/v1/Pornos_Kostenlos_liebe_%s.html" % id)
elif re.match(self.PATTERN_SUPPORTED_CRYPT, url):
# Extract info from main file
id = re.search(self.PATTERN_SUPPORTED_CRYPT, url).group('ID')
html = self.load("http://sexuria.com/v1/Pornos_Kostenlos_info_%s.html" % id, decode=True)
title = re.search(self.PATTERN_TITLE, html).group('TITLE').strip()
if title:
name = folder = title
self.logDebug("Package info found, name [%s] and folder [%s]" % (name, folder))
pwd = re.search(self.PATTERN_PASSWORD, html).group('PWD')
if pwd:
password = pwd.strip()
self.logDebug("Password info [%s] found" % password)
# Process link (dl_link)
html = self.load(url)
links = re.findall(self.PATTERN_REDIRECT_LINKS, html)
if len(links) == 0:
self.LogError("Broken for link %s" % link)
else:
for link in links:
link = link.replace("http://sexuria.com/", "http://www.sexuria.com/")
finallink = self.load(link, just_header=True)['location']
if not finallink or "sexuria.com/" in finallink:
self.LogError("Broken for link %s" % link)
else:
linklist.append(finallink)
# Debug log
self.logDebug("%d supported links" % len(linklist))
for i, link in enumerate(linklist):
self.logDebug("Supported link %d, %s" % (i + 1, link))
return name, linklist, folder, password
| ardi69/pyload-0.4.10 | pyload/plugin/crypter/SexuriaCom.py | Python | gpl-3.0 | 4,207 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
class Migration(migrations.Migration):
dependencies = [
('delivery', '0050_auto_20160505_2144'),
]
operations = [
migrations.AlterField(
model_name='delivery',
name='name',
field=models.CharField(default=b'2016-05-05T21:46:46.069778', max_length=128, null=True, verbose_name='\u0418\u043c\u044f \u0440\u0430\u0441\u0441\u044b\u043b\u043a\u0438', blank=True),
),
migrations.AlterField(
model_name='mailaccount',
name='auto_active_datetime',
field=models.DateTimeField(default=datetime.datetime(2016, 5, 5, 21, 46, 46, 62255), verbose_name='\u0414\u0430\u0442\u0430 \u0437\u0430\u043a\u0440\u044b\u0442\u0438\u044f \u0430\u043a\u043a\u0430\u0443\u043d\u0442\u0430'),
),
]
| AlexStarov/Shop | applications/delivery/migrations/0051_auto_20160505_2146.py | Python | apache-2.0 | 924 |
from .catalogue import remove_detector, register_detector, detector_catalogue
from .base import Detector, RegexDetector, RegionLocalisedRegexDetector
from .credential import CredentialDetector
from .credit_card import CreditCardDetector
from .date_of_birth import DateOfBirthDetector
from .drivers_licence import DriversLicenceDetector
from .email import EmailDetector
from .phone import PhoneDetector
from .postalcode import PostalCodeDetector
from .skype import SkypeDetector
from .tagged import TaggedEvaluationFilthDetector
from .text_blob import TextBlobNameDetector
from .twitter import TwitterDetector
from .url import UrlDetector
from .user_supplied import UserSuppliedFilthDetector
from .vehicle_licence_plate import VehicleLicencePlateDetector
from . import en_GB
from . import en_US
| datascopeanalytics/scrubadub | scrubadub/detectors/__init__.py | Python | mit | 796 |
from Child import Child
from Node import Node # noqa: I201
EXPR_NODES = [
# An inout expression.
# &x
Node('InOutExpr', kind='Expr',
children=[
Child('Ampersand', kind='AmpersandToken'),
Child('Identifier', kind='IdentifierToken'),
]),
# A #column expression.
Node('PoundColumnExpr', kind='Expr',
children=[
Child('PoundColumn', kind='PoundColumnToken'),
]),
Node('FunctionCallArgumentList', kind='SyntaxCollection',
element='FunctionCallArgument'),
# The try operator.
# try foo()
# try? foo()
# try! foo()
Node('TryOperator', kind='Syntax',
children=[
Child('TryKeyword', kind='TryToken'),
Child('QuestionOrExclamationMark', kind='Token',
is_optional=True,
token_choices=[
'PostfixQuestionMarkToken',
'ExclamationMarkToken',
]),
]),
# A #line expression.
Node('PoundLineExpr', kind='Expr',
children=[
Child('PoundLine', kind='PoundLineToken'),
]),
# A #file expression.
Node('PoundFileExpr', kind='Expr',
children=[
Child('PoundFile', kind='PoundFileToken'),
]),
# A #function expression.
Node('PoundFunctionExpr', kind='Expr',
children=[
Child('PoundFunction', kind='PoundFunctionToken'),
]),
# symbolic-reference-expression -> identifier generic-argument-clause?
Node('SymbolicReferenceExpr', kind='Expr',
children=[
Child('Identifier', kind='IdentifierToken'),
Child('GenericArgumentClause', kind='GenericArgumentClause',
is_optional=True),
]),
# A prefix operator expression.
# -x
# !true
Node('PrefixOperatorExpr', kind='Expr',
children=[
Child('OperatorToken', kind='PrefixOperatorToken',
is_optional=True),
Child('PostfixExpression', kind='Expr'),
]),
# A floating-point literal
# 4.0
# -3.9
# +4e20
Node('FloatLiteralExpr', kind='Expr',
children=[
Child('Sign', kind='PrefixOperatorToken',
is_optional=True),
Child('FloatingDigits', kind='FloatingLiteralToken'),
]),
Node('FunctionCallExpr', kind='Expr',
children=[
Child('CalledExpression', kind='Expr'),
Child('LeftParen', kind='LeftParenToken'),
Child('ArgumentList', kind='FunctionCallArgumentList'),
Child('RightParen', kind='RightParenToken'),
]),
# function-call-argument -> label? ':'? expression ','?
Node('FunctionCallArgument', kind='Syntax',
children=[
Child('Label', kind='IdentifierToken',
is_optional=True),
Child('Colon', kind='ColonToken',
is_optional=True),
Child('Expression', kind='Expr'),
Child('TrailingComma', kind='CommaToken',
is_optional=True),
]),
# An integer literal.
# 3
# +3_400
# +0x4f
Node('IntegerLiteralExpr', kind='Expr',
children=[
Child('Sign', kind='PrefixOperatorToken',
is_optional=True),
Child('Digits', kind='IntegerLiteralToken'),
]),
Node('StringLiteralExpr', kind='Expr',
children=[
Child("StringLiteral", kind='StringLiteralToken')
])
]
| return/swift | utils/gyb_syntax_support/ExprNodes.py | Python | apache-2.0 | 3,611 |
def create_tree(parent, tree, refs=()):
"""Creates tree in database by given list of nested lists of tree nodes.
This functions accepts list of nested lists [1] and creates database
records, maintaining given tree structure.
Parameters:
- parent: treebeard.models.Node, parent node for the tree to build on.
- tree: list of lists with treebeard.models.Node instances.
- refs: tuple, used for internaly for protection against endless recursion.
Returns: None
[1] https://docs.djangoproject.com/en/1.8/ref/templates/builtins/#unordered-list
"""
child = None
refs += (id(tree),)
for node in tree:
if isinstance(node, list):
if id(node) not in refs:
create_tree(child, node, refs=refs)
else:
child = parent.add_child(instance=node)
def grow_tree_iter(tree, dfs_tree):
"""Iterator over dfs_tree.
Usually you should use this generator like this:
>>> from wagtail.wagtailcore.models import Page
>>> dfs_tree = [
... Page(slug='a', numchild=2),
... Page(slug='b', numchild=0),
... Page(slug='c', numchild=1),
... Page(slug='d', numchild=0),
... Page(slug='e', numchild=0),
... Page(slug='f', numchild=1),
... Page(slug='g', numchild=0),
... ]
>>> tree = []
>>> for children, node in grow_tree_iter(tree, dfs_tree):
... children.append(node.slug)
>>> tree
['a', ['b', 'c', ['d']], 'e', 'f', ['g']]
You can get dfs_tree using treebeard.models.Node.get_descendants method.
Parameters:
- tree: empty list, this list will be populated with nodes.
- dfs_tree: list of nodes ordered as DFS (Depth-first search) [1].
Returns: generator
[1] http://en.wikipedia.org/wiki/Depth-first_search
"""
stack = [(tree, 0)]
for node in dfs_tree:
children, children_count = stack[-1]
children_count = node.get_children_count()
yield children, node
if children_count > 0:
children.append([])
stack.append((children[-1], children_count))
else:
while children_count <= 1 and stack:
children, children_count = stack.pop()
stack.append((children, children_count - 1))
def grow_tree(dfs_tree, callback=(lambda node: node)):
"""Takes flat tree and grows tree composed of nested lists.
Parameters:
- tree: empty list, this wilst will be populated with nodes.
- dfs_tree: list of nodes ordered as DFS (Depth-first search) [1].
Returns: list of nested lists [2] containing flat tree nodes.
[1] http://en.wikipedia.org/wiki/Depth-first_search
[2] https://docs.djangoproject.com/en/1.8/ref/templates/builtins/#unordered-list
"""
tree = []
for children, node in grow_tree_iter(tree, dfs_tree):
children.append(callback(node))
return tree
def transform(tree):
"""Transform list of lists to list of tuples containing item and children.
It actually transforms ``[a, [b]]`` to ``[(a, [(b, [])])]``. This
transformed structure is more convinient to use in recursive functions.
While input tree is more convinient to write.
Parameters:
- tree: list of lists
Returns: lists of tuples containing item and children.
"""
result = []
for node in tree:
if isinstance(node, list):
result[-1] = (result[-1][0], transform(node))
else:
result.append((node, []))
return result
| python-dirbtuves/akl.lt | akllt/common/treeutils.py | Python | agpl-3.0 | 3,590 |
# -*- coding: utf-8 -*-
{
'name': 'Mail Tests',
'version': '1.0',
'category': 'Hidden',
'sequence': 9876,
'summary': 'Mail Tests: performances and tests specific to mail',
'description': """This module contains tests related to mail. Those are
present in a separate module as it contains models used only to perform
tests independently to functional aspects of other models. """,
'depends': ['test_performance', 'mail'],
'data': [
'security/ir.model.access.csv',
'security/test_mail_security.xml',
'data/data.xml',
'data/subtype_data.xml',
'data/template_data.xml',
],
'demo': [
],
'installable': True,
'application': False,
}
| rven/odoo | addons/test_mail/__manifest__.py | Python | agpl-3.0 | 720 |
from networkx.algorithms.assortativity import *
from networkx.algorithms.boundary import *
from networkx.algorithms.bridges import *
from networkx.algorithms.chains import *
from networkx.algorithms.centrality import *
from networkx.algorithms.chordal import *
from networkx.algorithms.cluster import *
from networkx.algorithms.clique import *
from networkx.algorithms.communicability_alg import *
from networkx.algorithms.components import *
from networkx.algorithms.coloring import *
from networkx.algorithms.core import *
from networkx.algorithms.covering import *
from networkx.algorithms.cycles import *
from networkx.algorithms.cuts import *
from networkx.algorithms.dag import *
from networkx.algorithms.distance_measures import *
from networkx.algorithms.distance_regular import *
from networkx.algorithms.dominance import *
from networkx.algorithms.dominating import *
from networkx.algorithms.efficiency import *
from networkx.algorithms.euler import *
from networkx.algorithms.graphical import *
from networkx.algorithms.hierarchy import *
from networkx.algorithms.hybrid import *
from networkx.algorithms.link_analysis import *
from networkx.algorithms.link_prediction import *
from networkx.algorithms.lowest_common_ancestors import *
from networkx.algorithms.isolate import *
from networkx.algorithms.matching import *
from networkx.algorithms.minors import *
from networkx.algorithms.mis import *
from networkx.algorithms.operators import *
from networkx.algorithms.planarity import *
from networkx.algorithms.reciprocity import *
from networkx.algorithms.richclub import *
from networkx.algorithms.shortest_paths import *
from networkx.algorithms.similarity import *
from networkx.algorithms.simple_paths import *
from networkx.algorithms.smallworld import *
from networkx.algorithms.smetric import *
from networkx.algorithms.structuralholes import *
from networkx.algorithms.sparsifiers import *
from networkx.algorithms.swap import *
from networkx.algorithms.traversal import *
from networkx.algorithms.triads import *
from networkx.algorithms.vitality import *
from networkx.algorithms.voronoi import *
from networkx.algorithms.wiener import *
# Make certain subpackages available to the user as direct imports from
# the `networkx` namespace.
import networkx.algorithms.assortativity
import networkx.algorithms.bipartite
import networkx.algorithms.node_classification
import networkx.algorithms.centrality
import networkx.algorithms.chordal
import networkx.algorithms.cluster
import networkx.algorithms.clique
import networkx.algorithms.components
import networkx.algorithms.connectivity
import networkx.algorithms.community
import networkx.algorithms.coloring
import networkx.algorithms.flow
import networkx.algorithms.isomorphism
import networkx.algorithms.link_analysis
import networkx.algorithms.lowest_common_ancestors
import networkx.algorithms.operators
import networkx.algorithms.shortest_paths
import networkx.algorithms.tournament
import networkx.algorithms.traversal
import networkx.algorithms.tree
# Make certain functions from some of the previous subpackages available
# to the user as direct imports from the `networkx` namespace.
from networkx.algorithms.bipartite import complete_bipartite_graph
from networkx.algorithms.bipartite import is_bipartite
from networkx.algorithms.bipartite import project
from networkx.algorithms.bipartite import projected_graph
from networkx.algorithms.connectivity import all_pairs_node_connectivity
from networkx.algorithms.connectivity import all_node_cuts
from networkx.algorithms.connectivity import average_node_connectivity
from networkx.algorithms.connectivity import edge_connectivity
from networkx.algorithms.connectivity import edge_disjoint_paths
from networkx.algorithms.connectivity import k_components
from networkx.algorithms.connectivity import k_edge_components
from networkx.algorithms.connectivity import k_edge_subgraphs
from networkx.algorithms.connectivity import k_edge_augmentation
from networkx.algorithms.connectivity import is_k_edge_connected
from networkx.algorithms.connectivity import minimum_edge_cut
from networkx.algorithms.connectivity import minimum_node_cut
from networkx.algorithms.connectivity import node_connectivity
from networkx.algorithms.connectivity import node_disjoint_paths
from networkx.algorithms.connectivity import stoer_wagner
from networkx.algorithms.flow import capacity_scaling
from networkx.algorithms.flow import cost_of_flow
from networkx.algorithms.flow import gomory_hu_tree
from networkx.algorithms.flow import max_flow_min_cost
from networkx.algorithms.flow import maximum_flow
from networkx.algorithms.flow import maximum_flow_value
from networkx.algorithms.flow import min_cost_flow
from networkx.algorithms.flow import min_cost_flow_cost
from networkx.algorithms.flow import minimum_cut
from networkx.algorithms.flow import minimum_cut_value
from networkx.algorithms.flow import network_simplex
from networkx.algorithms.isomorphism import could_be_isomorphic
from networkx.algorithms.isomorphism import fast_could_be_isomorphic
from networkx.algorithms.isomorphism import faster_could_be_isomorphic
from networkx.algorithms.isomorphism import is_isomorphic
from networkx.algorithms.tree.branchings import maximum_branching
from networkx.algorithms.tree.branchings import maximum_spanning_arborescence
from networkx.algorithms.tree.branchings import minimum_branching
from networkx.algorithms.tree.branchings import minimum_spanning_arborescence
from networkx.algorithms.tree.coding import *
from networkx.algorithms.tree.operations import *
from networkx.algorithms.tree.recognition import *
from networkx.algorithms.tree.mst import *
| kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/networkx/algorithms/__init__.py | Python | gpl-3.0 | 5,673 |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: junos_netconf
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Configures the Junos Netconf system service
description:
- This module provides an abstraction that enables and configures
the netconf system service running on Junos devices. This module
can be used to easily enable the Netconf API. Netconf provides
a programmatic interface for working with configuration and state
resources as defined in RFC 6242.
extends_documentation_fragment: junos
options:
netconf_port:
description:
- This argument specifies the port the netconf service should
listen on for SSH connections. The default port as defined
in RFC 6242 is 830.
required: false
default: 830
aliases: ['listens_on']
version_added: "2.2"
state:
description:
- Specifies the state of the C(junos_netconf) resource on
the remote device. If the I(state) argument is set to
I(present) the netconf service will be configured. If the
I(state) argument is set to I(absent) the netconf service
will be removed from the configuration.
required: false
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: enable netconf service on port 830
junos_netconf:
listens_on: 830
state: present
- name: disable netconf service
junos_netconf:
state: absent
"""
RETURN = """
commands:
description: Returns the command sent to the remote device
returned: when changed is True
type: str
sample: 'set system services netconf ssh port 830'
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import exec_command
from ansible.module_utils.junos import junos_argument_spec, check_args
from ansible.module_utils.junos import commit_configuration, discard_changes
from ansible.module_utils.network_common import to_list
from ansible.module_utils.six import iteritems
USE_PERSISTENT_CONNECTION = True
def map_obj_to_commands(updates, module):
want, have = updates
commands = list()
if want['state'] == 'present' and have['state'] == 'absent':
commands.append(
'set system services netconf ssh port %s' % want['netconf_port']
)
elif want['state'] == 'absent' and have['state'] == 'present':
commands.append('delete system services netconf')
elif want['state'] == 'present':
if want['netconf_port'] != have.get('netconf_port'):
commands.append(
'set system services netconf ssh port %s' % want['netconf_port']
)
return commands
def parse_port(config):
match = re.search(r'port (\d+)', config)
if match:
return int(match.group(1))
def map_config_to_obj(module):
cmd = 'show configuration system services netconf'
rc, out, err = exec_command(module, cmd)
if rc != 0:
module.fail_json(msg='unable to retrieve current config', stderr=err)
config = str(out).strip()
obj = {'state': 'absent'}
if config:
obj.update({
'state': 'present',
'netconf_port': parse_port(config)
})
return obj
def validate_netconf_port(value, module):
if not 1 <= value <= 65535:
module.fail_json(msg='netconf_port must be between 1 and 65535')
def map_params_to_obj(module):
obj = {
'netconf_port': module.params['netconf_port'],
'state': module.params['state']
}
for key, value in iteritems(obj):
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if callable(validator):
validator(value, module)
return obj
def load_config(module, config, commit=False):
exec_command(module, 'configure')
for item in to_list(config):
rc, out, err = exec_command(module, item)
if rc != 0:
module.fail_json(msg=str(err))
exec_command(module, 'top')
rc, diff, err = exec_command(module, 'show | compare')
if diff:
if commit:
exec_command(module, 'commit and-quit')
else:
for cmd in ['rollback 0', 'exit']:
exec_command(module, cmd)
return str(diff).strip()
def main():
"""main entry point for module execution
"""
argument_spec = dict(
netconf_port=dict(type='int', default=830, aliases=['listens_on']),
state=dict(default='present', choices=['present', 'absent']),
)
argument_spec.update(junos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
diff = load_config(module, commands, commit=commit)
if diff:
if module._diff:
result['diff'] = {'prepared': diff}
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| fernandezcuesta/ansible | lib/ansible/modules/network/junos/junos_netconf.py | Python | gpl-3.0 | 6,106 |
# -*- coding: utf-8 -*-
"""
Downloader Plugin for cfakes.com.
"""
from bs4 import BeautifulSoup
import common
import os
import requests
import scandir
import sys
import time
from yapsy.IPlugin import IPlugin
WEBSITE_NAME = "http://www.deviantart.com"
#WEBSITE_BASE_URL = "%s/photos/" % WEBSITE_NAME
plugin_name = "deviantart"
class PluginOne(IPlugin):
"""
Actual Plugin for the Downloader package.
This contains all the customizable content for the cfakes website.
"""
def __init__(self):
self.session = requests.session()
self.root_checker = {}
#self.safari_cookies = returnBinaryCookies.main()
# self.extracted_cookie = {"www.deviantart.com":urllib.unquote(
# self.safari_cookies["www.hypnopics-collective.net"])}
# print self.extracted_hp_cookie
def parser_options(self, parser):
"""
Add the parser options for this plugin
"""
parser.add_option("--da2", "--deviantart",
action="store_true",
dest="deviantart",
default=False,
help="Download from deviantart.com with Folders",)
parser.add_option("--user",
action="store",
dest="username",
default="",
help="UserName to Login with",)
parser.add_option("--password",
action="store",
dest="password",
default="",
help="Password to Login with",)
return parser
def print_name(self):
"""
Example function from yapsy-example.
"""
print plugin_name
#
# Verify Login
#
def login(self, options):
"""
Not used, since cfakes does not require a login.
"""
# http://stackoverflow.com/questions/17226080
# /parsing-html-forms-input-tag-with-beautiful-soup
print "\nTrying to login to Deviantart\n"
# This is the form data that the page sends when logging in
# <input type="hidden" name="validate_token"
# value="2f1810a29dc923636245" autocomplete="off">
# <input type="hidden" name="validate_key"
# value="1376387679" autocomplete="off">
soup = BeautifulSoup(common.fetch_webpage(
session=self.session,
url="https://www.deviantart.com/users/login",
timeout=45,
binary=False))
data = soup.find_all("input", type="hidden")
validate_token = None
validate_key = None
search_data = soup.find_all('input',
{'type': 'hidden',
'name': 'validate_token'})
validate_token = search_data[0]["value"]
search_data = soup.find_all('input',
{'type': 'hidden',
'name': 'validate_key'})
validate_key = search_data[0]["value"]
form_data = {
'username': options.username,
'password': options.password,
'remember_me': '1',
'submit': 'Login',
'ref': '',
'validate_token': validate_token,
'validate_key': validate_key
}
data = common.post_webpage(session=self.session,
url='https://www.deviantart.com/users/login',
data=form_data)
data = common.fetch_webpage(session=self.session,
url='http://www.deviantart.com/')
if data.find('"loggedIn":true') == -1:
print "\nUnable to Login! Aborting. \n"
print data
sys.exit(1)
elif data.find('<span class="field_error" rel="password">') != -1:
# elif data.find('<span class="field_error" rel="password">The
# password you entered was incorrect.</span>') != -1:
print "\nBad DeviantArt Password. Aborting. \n"
sys.exit(1)
else:
print "\nLogin to DeviantArt successful!\n"
#
# Find Folders in Gallery Page
#
def search_for_folders(self, soup_bowl):
"""
Takes the contents of a BeautifulSoup container, and
checks for known gallery / folder markers.
This will return an list of:
Folder Name, Folder URL
"""
found_urls = []
folder_count = 1
#
# div's with Class tv150
#
tv150_tags = soup_bowl.find_all('div', {'class': 'tv150'})
for tgx in tv150_tags:
folder_name = tgx.find_all("div", {"class": "tv150-tag"})[0].text
folder_url = tgx.find_all("a", {'class': 'tv150-cover'})[0]["href"]
found_urls.append((folder_name, folder_url))
# http://nirufe.deviantart.com/gallery/
# <a href="http://yayacosplay.deviantart.com/gallery/33798593"
# class="tv150-cover"></a>
#
# div's with class rs-customicon-cont
#
customicons = soup_bowl.find_all(
'div', {'class': 'rs-customicon-cont'})
for c_icon in customicons:
folder_url = c_icon.find_all("a",\
{"class": "rs-customicon-link"})[0]["href"]
try:
folder_name = c_icon.find_all("a", {'class': ''})[0].text
except IndexError:
print "\t\tFailure to find description"
folder_name = "FolderName %s" % folder_count
folder_count += 1
found_urls.append((folder_name, folder_url))
# <a href="http://ulorinvex.deviantart.com/gallery/977878"
# class="rs-customicon-link">
#<img src="http://a.deviantart.net/gallerythumbs/8/
# 7/000977878.jpg?3"
# alt="">
# </a>
#
# div with class of gl-text
#
class_a_tags = soup_bowl.find_all('div', {'class': 'gl-text'})
for at_x in class_a_tags:
# Class A tags require a filter, since those custom setups
# seem to use the class="a" on all hyperlinks, not just for
# the gallery folders.
#
# So check to see if the href has the server name.
folder_name = at_x.find_all("a", {"class": "a"})[0].text
folder_url = at_x.find_all("a", {"class": "a"})[0]["href"]
found_urls.append((folder_name, folder_url))
#<div style="line-height:1.3em" class="gl-text"
# collect_rid="20:32170436">
#<a class="a"
# href="http://arconius.deviantart.com/gallery/32170436">
# Ponies</a>
#</div>
return found_urls
def download(self, options):
"""
Start the download process, meta manager.
Grab the folder list from DA, and process each folder
"""
self.session = common.setup_requests(self.session, WEBSITE_NAME)
if options.username:
#
# Use login information, if provided.
#
self.login(options)
status = common.status()
status = self.download_gallery(options.url_to_fetch,
options.download_folder,
options,
status,
root=True)
return status.return_counts()
def download_gallery(self,
gallery_url,
download_path,
options,
status,
root=False):
"""
Download an complete gallery, calls download_gallery_images
for the actual image download.
This creates the folder structure, and walks through it
calling download_gallery_images to download the images.
"""
current_webpage = common.fetch_webpage(session=self.session,
url=gallery_url,
timeout=45)
soup = BeautifulSoup(current_webpage)
#
# Grab the main web page from the URL to fetch
#
# Search for folders
folder_list = self.search_for_folders(soup_bowl=soup)
for (subgallery_name, subgallery_url) in folder_list:
#
# Process the folder list, and download
# the images for the subfolders
#
if options.downloadlimit > 0 and \
status.return_downloads() >= options.downloadlimit:
print "X",
return status
if subgallery_name != None:
subgallery_dl_path = download_path + os.sep +\
common.clean_filename(subgallery_name) + os.sep
if subgallery_url != gallery_url:
#
# Clubs typically have the featured gallery which points to
# itself and can cause a recursion loop
#
status = self.download_gallery(subgallery_url,
subgallery_dl_path,
options,
status,
root=False)
time.sleep(1)
gallery_name = soup.title.text
gallery_name = gallery_name[0:gallery_name.find(" by ")].strip()
if root:
for root, dirnames, filenames in scandir.walk(download_path):
for filename in filenames:
self.root_checker[filename.lower().strip()] = True
status = self.download_gallery_images(gallery_url,
download_path,
options,
status,
root=root)
return status
#
# Download Gallery
#
def download_gallery_images(self,
gallery_url,
download_path,
options,
status,
root=False):
"""
Download images from a deviantart gallery
"""
#
# Download and process the webpage
current_skips = 0
subfolder_data = common.fetch_webpage(session=self.session,
url=gallery_url, timeout=60)
subfolder = BeautifulSoup(subfolder_data)
if gallery_url.find("?offset") == -1:
print "\n\tProcessing Gallery - %30s" % (gallery_url),
else:
print "R",
links = subfolder.find_all('a', {'class': 'thumb',
'data-super-img': True})
for xlink in links:
if options.downloadlimit > 0 and \
status.return_downloads() >= options.downloadlimit:
print "X"
return status
image_file = xlink["data-super-img"]
file_to_download = image_file.replace("-t", "").strip()
file_to_download = common.clean_filename(
file_to_download,
max_length=240)
#
# Does directory exist? If not create it
#
if not os.path.exists(
download_path):
os.makedirs(download_path)
#
# Check for file already existing,
# if so, don't download
#
if root and os.path.split(file_to_download)[1].lower().strip() in\
self.root_checker:
status.add_skipped(filename=file_to_download,
options=options)
current_skips += 1
if options.skiplimit != 0 and \
current_skips >= options.skiplimit:
print "S"
return status
continue
if os.path.exists(
download_path + # + gallery_name + os.sep +
os.path.split(file_to_download)[1]):
status.add_skipped(filename=file_to_download,
options=options)
current_skips += 1
if options.skiplimit != 0 and \
current_skips >= options.skiplimit:
print "S"
return status
else:
if common.download_file(
session=self.session,
url=file_to_download,
filename=os.path.split(file_to_download)[1],
download_folder=download_path,
timeout=45):
status.add_download(filename=file_to_download,
options=options)
else:
status.add_error(filename=file_to_download,
options=options)
time.sleep(.10)
next_data = subfolder.find_all('li', {'class': 'next'})
if next_data:
next_data = next_data[0].find("a", {"class": "away"})
if next_data != None:
next_data = next_data.get("href")
next_gallery_url = \
gallery_url[0:(gallery_url.find(r"/gallery"))]\
+ next_data
time.sleep(.5)
status = self.download_gallery_images(next_gallery_url,
download_path,
options,
status,
root=root)
return status
def plugin_run(self):
"""
meta function for plugin
"""
pass
#
# Add empty folder support
#
# e.g. http://amazonmandy.deviantart.com/gallery/41236523
#
| bschollnick/downloader | plugins/deviantart.py | Python | mpl-2.0 | 14,502 |
import view
import wx
import wx.gizmos as gizmos
import wx.lib.mixins.listctrl as listmix
import sys
from odict import OrderedDict
from controls import DictListCtrl
from util import rgb, ArtListMixin, has_icon, bidict, button
from functools import partial
import gdb
import os
class VariableEditor(wx.Panel):
def __init__(self, parent, widget=None):
wx.Panel.__init__(self, parent, id=wx.ID_ANY)
sizer = wx.BoxSizer(wx.HORIZONTAL)
self.widget=widget
if widget:
self.widget = widget
widget.Reparent(self)
sizer.AddStretchSpacer(1)
sizer.Add(widget, flag=wx.EXPAND)
self.SetSizer(sizer)
def DoGetBestSize(self):
return wx.Size(100,100)
class RuntimeTree(gizmos.TreeListCtrl, ArtListMixin):
def __init__(self, parent):
super(RuntimeTree, self).__init__(id=-1, parent=parent, style=wx.TR_DEFAULT_STYLE | wx.TR_FULL_ROW_HIGHLIGHT | wx.TR_HIDE_ROOT | wx.TR_HAS_BUTTONS | wx.TR_LINES_AT_ROOT)
ArtListMixin.__init__(self)
self.SetFont(wx.Font(8, wx.FONTFAMILY_MODERN, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL))
self.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.on_expanding)
self.Bind(wx.EVT_TREE_ITEM_GETTOOLTIP, self.on_get_tooltip)
self.Bind(wx.EVT_TREE_BEGIN_LABEL_EDIT, self.on_begin_label_edit)
self.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.on_end_label_edit)
self.Bind(wx.EVT_TREE_SEL_CHANGED, self.on_select_item)
self.model = None
self.AddColumn('Context')
self.AddColumn('Value')
self.SetColumnEditable(1, True)
self.SetColumnAlignment(1, wx.ALIGN_RIGHT)
self.clear()
def on_begin_label_edit(self, evt):
pass
def on_select_item(self, evt):
item = evt.GetItem()
if item in self.vars:
print self.vars[item]
def on_end_label_edit(self, evt):
item = evt.GetItem()
data = self.GetPyData(item)
if data in self.vars and data in self.model.vars:
new_var_value = evt.GetLabel()
self.model.var_assign(data, new_var_value)
evt.Veto()
def on_get_tooltip(self, evt):
item = evt.GetItem()
if self.model and item:
if item == self.stack_item:
evt.SetToolTip(wx.ToolTip("Stack Depth: %d frames" % self.model.stack.depth))
data = self.GetPyData(item)
if hasattr(data, 'file'): # This is a stack frame
evt.SetToolTip(wx.ToolTip("Stack frame %s() at 0x%x %s" % (data.func, data.addr, "in file %s" % data.file if data.file else "")))
elif data in self.vars:
evt.SetToolTip(wx.ToolTip(self.model.vars[data].expression))
def on_expanding(self, evt):
item=evt.GetItem()
item_data=self.GetPyData(item)
if hasattr(item_data, 'level'): #item_data is a stack frame, and we wish to list its locals
self.model.stack_list_locals(frame=item_data.level, callback=partial(self.on_listed_locals, item_data))
elif item_data in self.vars:
self.model.var_list_children(item_data, callback=partial(self.on_listed_children, item))
def on_listed_children(self, parent, result):
if hasattr(result, 'children'):
for child in result.children:
varname= child['child']['name']
self.pending_vars[varname] = parent
def on_listed_locals(self, frame, result):
if result.cls != 'error':
if hasattr(result, 'locals') and frame.key in self.frames and frame.key not in self.expanded_frames:
self.expanded_frames = frame.key
for item in result.locals:
varname = self.model.var_create(item['name'], frame=frame.level, callback=partial(self.on_created_framevar, frame))
def on_created_framevar(self, frame, result):
if hasattr(result, 'name') and frame.key in self.frames:
self.pending_vars[result.name] = self.frames[frame.key]
self.model.var_update(result.name)
#wx.CallAfter(self.add_var_item, self.frames[frame], result.name)
def add_var_item(self, parent, name):
var = self.model.vars[name]
var_item = self.AppendItem(parent, var.expression)
self.SetItemPyData(var_item, name)
if var.children:
self.SetItemHasChildren(var_item, bool(var.children))
else:
self.SetItemHasChildren(var_item, False)
self.SetItemText(var_item, var.data, 1)
icon_name = var.type.icon_name
if has_icon(icon_name):
self.set_item_art(var_item, icon_name)
self.vars[name] = var_item
def update_var_item(self, name):
var = self.model.vars[name]
if name in self.vars:
var_item = self.vars[name]
self.SetItemText(var_item, var.expression, 0)
self.SetItemText(var_item, str(var.data), 1)
if var.children:
self.SetItemHasChildren(var_item, True)
else:
self.SetItemHasChildren(var_item, False)
def delete_var_item(self, name):
if name in self.vars:
var_item = self.vars[name]
self.Delete(var_item)
self.vars.pop(name)
def add_frame_item(self, frame):
item = self.AppendItem(self.stack_item, frame.func + "( )")
self.set_item_art(item, 'frame.png' if frame.level != 0 else 'frame_active.png')
self.SetItemHasChildren(item)
self.SetItemBold(item, True)
self.SetPyData(item, frame)
self.frames[frame.key] = item
def __get_evt_item(self, evt):
pt = evt.GetPosition()
item, flags = self.HitTest(pt)
return item
def set_model(self, model):
self.model = model
self.model.Bind(gdb.EVT_GDB_UPDATE_VARS, self.on_var_update)
self.model.Bind(gdb.EVT_GDB_UPDATE_STACK, self.on_stack_update)
self.model.Bind(gdb.EVT_GDB_UPDATE_BREAKPOINTS, self.on_breakpoint_update)
def set_item_art(self, item, name, style=wx.TreeItemIcon_Normal):
if name not in self.art:
self.add_art(name)
self.SetItemImage(item, self.art[name], style)
def on_var_update(self, evt):
names = evt.data
for name in names:
if name in self.model.vars:
if name in self.pending_vars: # Vars that are waiting to get added to the tree
parent = self.pending_vars.pop(name)
wx.CallAfter(self.add_var_item, parent, name)
elif name in self.vars: # Vars that are already in the tree
wx.CallAfter(self.update_var_item, name)
else:
if name in self.pending_vars: self.pending_vars.pop(name)
if name in self.vars: wx.CallAfter(self.delete_var_item,name)
def on_stack_update(self, evt):
if self.model:
stack = self.model.stack
stack_keys = set([frame.key for frame in stack])
items_to_remove = set()
for frame_key, frame_item in self.frames.iteritems():
if not stack.has_key(frame_key):
self.Delete(frame_item)
items_to_remove.add(frame_key)
for frame_key in items_to_remove: self.frames.pop(frame_key)
item = None
for frame in reversed(list(stack)):
if frame.key not in self.frames:
self.add_frame_item(frame)
if item:
self.set_item_art(item, 'frame_active.png')
def on_breakpoint_update(self, evt):
if self.model:
breakpoints = self.model.breakpoints
#self.DeletChildren(self.breakpoints_item)
self.DeleteChildren(self.breakpoints_item)
print breakpoints
for bp in breakpoints:
if bp.fullname:
name = os.path.split(os.path.abspath(bp.fullname))[1]
else:
name = '0x%x' % bp.address
item = self.AppendItem(self.breakpoints_item, name)
self.SetPyData(item, bp)
self.SetItemText(item, str(bp.line), 1)
self.set_item_art(item, 'stop.png' if bp.enabled else 'stop_disabled.png')
def update(self):
pass
def clear(self):
self.DeleteAllItems()
root_item = self.AddRoot('root')
self.root_item = root_item
self.stack_item = self.AppendItem(root_item,'Call Stack')
self.breakpoints_item = self.AppendItem(root_item, 'Breakpoints')
self.registers_item = self.AppendItem(root_item, 'Registers')
self.set_item_art(self.registers_item, 'chip.png')
self.set_item_art(self.stack_item, 'stack.png')
self.set_item_art(self.breakpoints_item, 'breakpoint.png')
self.frames = bidict({})
self.vars = bidict({})
self.pending_vars = {}
self.expanded_frames = set()
class DataView(view.View):
def __init__(self, *args, **kwargs):
super(DataView, self).__init__(*args, **kwargs)
self.tree = RuntimeTree(self)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(self.tree, 1, wx.EXPAND)
self.SetSizer(sizer)
def set_model(self, model):
self.tree.set_model(model)
def update(self, stack):
self.tree.update() | ryansturmer/cuttlebug | cuttlebug/ui/views/data_view.py | Python | mit | 9,919 |
# Initialize variable
if 'old_max_temp' not in locals():
old_max_temp = heat_storage.target_temperature
if (env.get_day_of_year() > 120) and (env.get_day_of_year() < 273):
# In summer the heat_storage can cool down to min_temperature if
# target_temperature is reached
if abs(heat_storage.target_temperature - heat_storage.get_temperature()) < 5:
if old_max_temp == heat_storage.target_temperature:
old_max_temp = heat_storage.target_temperature
heat_storage.target_temperature = heat_storage.min_temperature
# min_temperature is reached and heating should start
else:
heat_storage.target_temperature = old_max_temp
# Set back in winter
elif old_max_temp != heat_storage.target_temperature:
heat_storage.target_temperature = old_max_temp
# 2000 € more expensive (longer bhkw up-time) but 9000 power on's less of bhkw and 1000 more of plb
| SEC-i/ecoControl | snippets/hs_summer_max_temp.py | Python | mit | 923 |
from time import sleep
import pyupm_servo as s
import pyupm_grove as g
servo = s.ES08A(5)
servo.setAngle(30)
temp = g.GroveTemp(0)
print 'Setting servo at home position'
sleep(1)
if __name__ == '__main__':
while True:
angle = (temp.value()*24)-570
if angle > 180 or angle < 0:
servo.setAngle(30)
else:
servo.setAngle(angle)
sleep(1.5)
del servo
| TheIoTLearningInitiative/CodeLabs | Sandbox/GroveExamples/projects/temperature-monitor/temperature-gauge.py | Python | apache-2.0 | 435 |
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.Model import Model
logger = logging.getLogger(__name__)
class OverrideableCpe2IdRefType(Model):
MODEL_MAP = {
'attributes': {
'override': {'type': 'BooleanType', 'default': False},
}
}
| cjaymes/pyscap | src/scap/model/xccdf_1_2/OverrideableCpe2IdRefType.py | Python | gpl-3.0 | 929 |
from twisted.internet.defer import Deferred
def out(s): print s
d = Deferred()
d.addCallbacks(out, out)
d.errback(Exception('First error'))
d.callback('First result')
print 'Finished'
#[Failure instance: Traceback (failure with no frames): <type 'exceptions.Exception'>: First error
#]
#Traceback (most recent call last):
# File "defer-7.py", line 6, in <module>
# d.callback('First result')
# File "/usr/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 459, in callback
# self._startRunCallbacks(result)
# File "/usr/local/lib/python2.7/site-packages/twisted/internet/defer.py", line 560, in _startRunCallbacks
# raise AlreadyCalledError
#twisted.internet.defer.AlreadyCalledError
| tidalmelon/twisted-intro | twisted-deferred/defer-7.py | Python | mit | 714 |
#!/usr/bin/env python
import six
import os
from lxml import etree
from io import StringIO, BytesIO, IOBase
from . import utils
from . import elements
from . import dtd
def create(root_tag, dtd_url=None, dtd_str=None):
"""Create a python object for the given root_tag
:param root_tag: The root tag to create
:param dtd_url: The dtd url
:param dtd_str: The dtd as string
"""
url = dtd_url if dtd_url else StringIO(dtd_str)
dtd_obj = dtd.DTD(url)
dic = dtd_obj.parse()
if root_tag not in dic:
raise Exception('Bad root_tag %s, '
'it\'s not supported by the dtd' % root_tag)
obj = dic[root_tag]()
obj.dtd_url = dtd_url
obj.encoding = elements.DEFAULT_ENCODING
return obj
def load(filename, validate=True):
"""Generate a python object
:param filename: XML filename or Byte like object we should load
:param validate: validate the XML before generating the python object.
:type filename: str
:type validate: bool
:return: the generated python object
:rtype: :class:`Element`
"""
parser = etree.XMLParser(strip_cdata=False)
tree = etree.parse(filename, parser=parser)
dtd_url = tree.docinfo.system_url
path = (os.path.dirname(filename)
if not isinstance(filename, BytesIO) else None)
dtd_obj = dtd.DTD(dtd_url, path)
if validate:
dtd_obj.validate_xml(tree)
dic = dtd_obj.parse()
root = tree.getroot()
obj = dic[root.tag]()
obj.load_from_xml(root)
obj.filename = filename
obj.dtd_url = dtd_url
obj.encoding = tree.docinfo.encoding
return obj
def load_string(xml_str, validate=True):
"""Generate a python object
:param xml_str: the XML file as string
:type xml_str: str
:param validate: validate the XML before generating the python object.
:type validate: bool
:return: the generated python object
:rtype: :class:`Element`
"""
if not isinstance(xml_str, BytesIO):
# TODO: Get encoding from the dtd file (xml tag).
xml_str = BytesIO(xml_str.encode('utf-8'))
return load(xml_str, validate)
def generate_form(filename, form_action=None, form_filename=None, validate=True):
"""Generate the HTML form for the given filename.
:param filename: the XML filename we should load
:type filename: str
:param form_action: the action to put on the HTML form
:type form_action: str
:param validate: validate the XML before generating the form.
:type validate: bool
:return: the generated HTML form
:rtype: str
"""
if not form_filename:
form_filename = filename
obj = load(filename, validate)
return generate_form_from_obj(obj, form_action, form_filename, validate)
def generate_form_from_obj(obj, form_action=None, form_filename=None,
validate=True, form_attrs=None):
hidden_inputs = (
'<input type="hidden" name="_xml_filename" '
'id="_xml_filename" value="%s" />'
'<input type="hidden" name="_xml_dtd_url" '
'id="_xml_dtd_url" value="%s" />'
'<input type="hidden" name="_xml_encoding" '
'id="_xml_encoding" value="%s" />'
) % (
form_filename or '',
obj.dtd_url,
obj.encoding or elements.DEFAULT_ENCODING,
)
attrs = {}
if form_attrs:
attrs = form_attrs.copy()
if 'id' not in attrs:
attrs['id'] = 'xmltool-form'
if form_action:
attrs['action'] = form_action
attrs_str = ' '.join(sorted(['%s="%s"' % tple for tple in attrs.items()]))
html = ['<form method="POST" %s>' % attrs_str]
html += [hidden_inputs]
html += [obj._to_html()]
html += ['</form>']
return ''.join(html)
def update(filename, data, validate=True, transform=None):
"""Update the file named filename with data.
:param filename: the XML filename we should update
:param data: the result of the submitted data.
:param validate: validate the updated XML before writing it.
:type filename: str
:type data: dict style like: dict, webob.MultiDict, ...
:type validate: bool
:param transform: function to transform the XML string just before
writing it.
:type transform: function
:return: the object generated from the data
:rtype: :class:`Element`
"""
data = utils.unflatten_params(data)
encoding = data.pop('_xml_encoding')
dtd_url = data.pop('_xml_dtd_url')
if len(data) != 1:
raise Exception('Bad data')
root_tag = list(data.keys())[0]
dic = dtd.DTD(dtd_url, path=os.path.dirname(filename)).parse()
obj = dic[root_tag]()
obj.load_from_dict(data)
obj.write(filename, encoding, dtd_url=dtd_url, validate=validate,
transform=transform)
return obj
def new(dtd_url, root_tag, form_action=None, form_attrs=None):
dic = dtd.DTD(dtd_url).parse()
obj = dic[root_tag]()
# Merge the following line with the function which generate the form!
hidden_inputs = (
'<input type="hidden" name="_xml_filename" '
'id="_xml_filename" value="" />'
'<input type="hidden" name="_xml_dtd_url" '
'id="_xml_dtd_url" value="%s" />'
'<input type="hidden" name="_xml_encoding" '
'id="_xml_encoding" value="%s" />'
) % (
dtd_url,
elements.DEFAULT_ENCODING,
)
attrs = {}
if form_attrs:
attrs = form_attrs.copy()
if 'id' not in attrs:
attrs['id'] = 'xmltool-form'
if form_action:
attrs['action'] = form_action
attrs_str = ' '.join(sorted(['%s="%s"' % tple for tple in attrs.items()]))
html = ['<form method="POST" %s>' % attrs_str]
html += [hidden_inputs]
html += [obj._to_html()]
html += ['</form>']
return ''.join(html)
def getElementData(elt_id, data):
"""Get the dic from data to load last element of elt_id
"""
data = utils.unflatten_params(data)
lis = elt_id.split(':')
tagname = lis[-1]
for v in lis:
try:
if isinstance(data, list):
v = int(v)
data = data[v]
except (KeyError, IndexError):
data = {}
break
return {tagname: data}
def _get_obj_from_str_id(str_id, dtd_url=None, dtd_str=None, data=None):
"""Load object according to the given str_id
..note:: If data is passed load the data to this object
"""
url = dtd_url if dtd_url else StringIO(dtd_str)
dic = dtd.DTD(url).parse()
splitted = str_id.split(':')
s = splitted.pop(0)
cls = dic[s]
obj = cls()
if data:
obj.load_from_dict(data)
index = None
while splitted:
s = splitted.pop(0)
obj = obj.get_or_add(s, index=index)
if len(splitted) > 0:
index = None
if isinstance(obj, list):
index = int(splitted.pop(0))
if isinstance(obj, elements.TextElement) and obj.text is None:
obj.set_text('')
return obj
def _get_parent_to_add_obj(elt_id, tagname, data, dtd_url=None, dtd_str=None):
"""Create element from data and elt_id and determine if tagname can be
added to it or its parent.
"""
target_obj = _get_obj_from_str_id(elt_id, dtd_url=dtd_url, dtd_str=dtd_str,
data=data)
if target_obj.is_addable(tagname):
return target_obj, 0
if target_obj._parent_obj and target_obj._parent_obj.is_addable(tagname):
index = target_obj.position
if index is not None:
index += 1
else:
index = None
return target_obj._parent_obj, index
return None, None
def _get_data_for_html_display(obj):
"""Returns the data need to display the object in HTML.
"""
return {
'jstree_data': obj.to_jstree_dict(),
'previous': obj.get_previous_js_selectors(),
'html': obj.to_html(),
'elt_id': ':'.join(obj.prefixes),
}
def _add_new_element_from_id(elt_id, data, clipboard_data, dtd_url=None,
dtd_str=None, skip_extra=False):
"""Create an element from data and elt_id. This function should be used to
make some copy/paste.
:param skip_extra: If True we don't load the attributes nor the comments
:type skip_extra: bool
"""
keys = list(clipboard_data.keys())
assert(len(keys) == 1)
tagname = keys[0]
parentobj, index = _get_parent_to_add_obj(
elt_id, tagname, data, dtd_url=dtd_url, dtd_str=dtd_str)
if not parentobj:
return None
obj = parentobj.add(tagname, index=index)
obj.load_from_dict(clipboard_data, skip_extra=skip_extra)
return obj
def get_new_element_data_for_html_display(*args, **kw):
"""Create new sub object according to the given params and returns the data
to display it.
"""
html_renderer = kw.pop('html_renderer', None)
obj = _add_new_element_from_id(*args, **kw)
if not obj:
return None
obj.root.html_renderer = html_renderer
return _get_data_for_html_display(obj)
def get_data_from_str_id_for_html_display(str_id, dtd_url=None, dtd_str=None,
html_renderer=None):
"""Get the sub object corresponding to the given str_id and returns the
data to display it.
"""
obj = _get_obj_from_str_id(str_id, dtd_url, dtd_str)
if not obj:
return None
obj.root.html_renderer = html_renderer
return _get_data_for_html_display(obj)
| LeResKP/xmltool | xmltool/factory.py | Python | mit | 9,492 |
import theano
from utils import srng
def dropout(input, dropout_rate=0):
if dropout_rate > 0:
retain = 1 - dropout_rate
d_output = (input / retain) * srng.binomial(input.shape, p=retain,
dtype='int32').astype('float32')
else:
d_output = input
return d_output
| ozanarkancan/KuConnect | kuconnect/dropout.py | Python | mit | 309 |
import numpy
import time
import sys
import subprocess
import os
import random
from rnn_slu.rnn.jordan import model
from rnn_slu.metrics.accuracy import conlleval
from rnn_slu.utils.tools import shuffle, minibatch, contextwin
if __name__ == '__main__':
if (len(sys.argv) < 7):
print "Usage: " + sys.argv[0] + " <model_directory> <test_file> <word_dictionary> <label_dictionary> <config_file> <output_file>"
sys.exit(0)
#Read word dict
words2idx={}
word_dict = open(sys.argv[3], "r")
for line in word_dict:
key,val=line.split()
words2idx[key] = int(val)
idx2word = dict((k,v) for v,k in words2idx.iteritems())
#Read label dict
labels2idx={}
label_dict = open(sys.argv[4], "r")
for line in label_dict:
key,val=line.split()
labels2idx[key] = int(val)
idx2label = dict((k,v) for v,k in labels2idx.iteritems())
#Read Test Data
test_lex=[]
test_y = []
temp_lex = []
temp_y = []
test_file = open(sys.argv[2], "r")
for line in test_file:
if line in ['\n', '\r\n']:
if len(temp_lex) > 0:
test_lex.append(temp_lex)
test_y.append(temp_y)
temp_lex=[]
temp_y=[]
continue
token,label=line.split()
if token in words2idx.keys():
temp_lex.append(words2idx[token])
else:
temp_lex.append(words2idx["<UNK>"])
if label in labels2idx.keys():
temp_y.append(labels2idx[label])
else:
temp_y.append(labels2idx["<UNK>"])
folder = os.path.basename(sys.argv[1])
s = {'lr':0.0627142536696559,
'verbose':1,
'decay':False, # decay on the learning rate if improvement stops
'win':7, # number of words in the context window
'bs':10, # mini-batch size
'nhidden':100, # number of hidden units
'seed':345,
'emb_dimension':100, # dimension of word embedding
'nepochs':10}
config_file = open(sys.argv[5], "r")
for line in config_file:
param,val=line.split()
if param == "lr:":
s['lr'] = float(val)
elif param == "win:":
s['win'] = int(val)
elif param == "bs:":
s['bs'] = int(val)
elif param == "nhidden:":
s['nhidden'] = int(val)
elif param == "seed:":
s['seed'] = int(val)
elif param == "emb_dimension:":
s['emb_dimension'] = int(val)
elif param == "nepochs:":
s['nepochs'] = int(val)
vocsize = len(words2idx)
nclasses = len(labels2idx)
nsentences = len(test_lex)
numpy.random.seed(s['seed'])
random.seed(s['seed'])
rnn = model( nh = s['nhidden'],
nc = nclasses,
ne = vocsize,
de = s['emb_dimension'],
cs = s['win'] )
rnn.load(folder)
predictions_test = [ map(lambda x: idx2label[x], \
rnn.classify(numpy.asarray(contextwin(x, s['win'])).astype('int32')))\
for x in test_lex ]
groundtruth_test = [ map(lambda x: idx2label[x], y) for y in test_y ]
words_test = [ map(lambda x: idx2word[x], w) for w in test_lex]
res_test = conlleval(predictions_test, groundtruth_test, words_test, sys.argv[6])
print 'Test set performance -- F1: ', res_test['f1'], ' '*20
| marcomanciniunitn/Final-LUS-project | RNN/rnn/lus_rnn_lab/rnn_slu/lus/rnn_jordan_test.py | Python | gpl-3.0 | 3,463 |
"""
Learning python3
"""
def document_it(func):
'''
decractor for func, only print doc of func.
'''
def new_function(*args, **kwargs):
'''
internal function for wrappering of func and print out function parameter and result.
'''
print('Running functions:', func.__name__)
print('Positional arguments:', args)
print('Keyword arguments:', kwargs)
result = func(*args, **kwargs)
print('Result:', result)
return result
return new_function
@document_it
def add_ints0(add_a, add_b):
'''
add with decrator of @document_it.
'''
return add_a + add_b
def square_it(func):
'''
decractor for func, return square of func returned value.
'''
def new_function(*args, **kwargs):
'''
internal function for wrappering of func and return square of func as result.
'''
result = func(*args, **kwargs)
return result * result
return new_function
@document_it
@square_it
def add_ints1(add_a, add_b):
'''
add with decrator of @square_it @document_it in order.
'''
return add_a + add_b
@square_it
@document_it
def add_ints2(add_a, add_b):
'''
add with decrator of @document_it @square_it in order.
'''
return add_a + add_b
| llv22/python3_learning | chapter4/sample.py | Python | apache-2.0 | 1,306 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2015-2018 by ExopyHqcLegacy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Tasks to used to load a file in memory.
"""
import os
import numpy as np
from atom.api import (Bool, Unicode, List, set_default)
from past.builtins import basestring
from exopy.tasks.api import SimpleTask, InterfaceableTaskMixin, TaskInterface
def _make_array(names, dtypes='f8'):
if isinstance(dtypes, basestring):
dtypes = [dtypes for i in range(len(names))]
dtype = {'names': names, 'formats': dtypes}
return np.ones((5,), dtype=dtype)
class LoadArrayTask(InterfaceableTaskMixin, SimpleTask):
""" Load an array from the disc into the database.
"""
#: Folder from which to load the data.
folder = Unicode().tag(pref=True, fmt=True)
#: Name of the file from which to load the data.
filename = Unicode().tag(pref=True, fmt=True)
#: Kind of file to load.
selected_format = Unicode().tag(pref=True)
database_entries = set_default({'array': _make_array(['var1', 'var2'])})
def check(self, *args, **kwargs):
"""Check that the provided path and filename make sense.
"""
test, traceback = super(LoadArrayTask, self).check(*args, **kwargs)
err_path = self.get_error_path()
if not test:
return test, traceback
full_folder_path = self.format_string(self.folder)
filename = self.format_string(self.filename)
full_path = os.path.join(full_folder_path, filename)
if not os.path.isfile(full_path):
msg = ('File does not exist, be sure that your measure will '
'create it before this task is executed.')
traceback[err_path + '-file'] = msg
return test, traceback
class CSVLoadInterface(TaskInterface):
"""Interface used to load CSV files.
"""
#: Delimiter used in the file to load.
delimiter = Unicode('\t').tag(pref=True)
#: Character used to signal a comment.
comments = Unicode('#').tag(pref=True)
#: Flag indicating whether or not to use the first row as column names.
names = Bool(True).tag(pref=True)
#: The users can provide the names which will be available in its file
#: if the file cannot be found when checks are run.
c_names = List(Unicode()).tag(pref=True)
#: Class attr used in the UI.
file_formats = ['CSV']
def perform(self):
"""Load a file stored in csv format.
"""
task = self.task
folder = task.format_string(task.folder)
filename = task.format_string(task.filename)
full_path = os.path.join(folder, filename)
comment_lines = 0
with open(full_path) as f:
while True:
if f.readline().startswith(self.comments):
comment_lines += 1
else:
break
data = np.genfromtxt(full_path, comments=self.comments,
delimiter=self.delimiter, names=self.names,
skip_header=comment_lines)
task.write_in_database('array', data)
def check(self, *args, **kwargs):
"""Try to find the names of the columns to add the array in the
database.
"""
task = self.task
if self.c_names:
return True, {}
try:
full_folder_path = task.format_string(task.folder)
filename = task.format_string(task.filename)
except Exception:
return True, {}
full_path = os.path.join(full_folder_path, filename)
if os.path.isfile(full_path):
with open(full_path) as f:
while True:
line = f.readline()
if not line.startswith(self.comments):
names = line.split(self.delimiter)
names = [n.strip() for n in names if n]
self.task.write_in_database('array',
_make_array(names))
break
return True, {}
def _post_setattr_c_names(self, old, new):
"""Keep the c_names in sync with the array in the database.
"""
if new:
self.task.write_in_database('array', _make_array(new))
| Ecpy/ecpy_hqc_legacy | exopy_hqc_legacy/tasks/tasks/util/load_tasks.py | Python | bsd-3-clause | 4,610 |
#!/usr/bin/env python
from setuptools import setup
setup(
name='django-changuito',
version='1.2',
description='A fork of django-cart with the same simplicity but updated',
maintainer='Angel Velasquez',
maintainer_email='[email protected]',
license="LGPL v3",
url='https://github.com/angvp/django-changuito',
packages=['changuito'],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| angvp/django-changuito | setup.py | Python | lgpl-3.0 | 725 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "FastReading.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| vaquer/FastReadingPy | manage.py | Python | gpl-2.0 | 254 |
# Copyright 2013 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from euca2ools.commands.autoscaling import AutoScalingRequest
from requestbuilder import Arg
from requestbuilder.mixins import TabifyingMixin
from requestbuilder.response import PaginatedResponse
class DescribeLaunchConfigurations(AutoScalingRequest, TabifyingMixin):
DESCRIPTION = 'Describe auto-scaling instance launch configurations'
ARGS = [Arg('LaunchConfigurationNames.member', metavar='LAUNCHCONFIG',
nargs='*',
help='limit results to specific launch configurations'),
Arg('--show-long', action='store_true', route_to=None,
help="show all of the launch configurations' info")]
LIST_TAGS = ['LaunchConfigurations', 'SecurityGroups',
'BlockDeviceMappings']
def main(self):
return PaginatedResponse(self, (None,), ('LaunchConfigurations',))
def prepare_for_page(self, page):
# Pages are defined by NextToken
self.params['NextToken'] = page
def get_next_page(self, response):
return response.get('NextToken') or None
def print_result(self, result):
for config in result.get('LaunchConfigurations', []):
bits = ['LAUNCH-CONFIG',
config.get('LaunchConfigurationName'),
config.get('ImageId'),
config.get('InstanceType')]
if self.args['show_long']:
bits.append(config.get('KeyName'))
bits.append(config.get('KernelId'))
bits.append(config.get('RamdiskId'))
block_maps = [convert_block_mapping_to_str(mapping) for mapping
in config.get('BlockDeviceMappings', [])]
if len(block_maps) > 0:
bits.append('{' + ','.join(block_maps) + '}')
else:
bits.append(None)
bits.append(','.join(config.get('SecurityGroups', [])) or None)
bits.append(config.get('CreatedTime'))
bits.append(config.get('InstanceMonitoring', {}).get(
'Enabled'))
bits.append(config.get('LaunchConfigurationARN'))
bits.append(config.get('SpotPrice'))
bits.append(config.get('IamInstanceProfile'))
if self.args['show_long']:
bits.append(config.get('EbsOptimized'))
print self.tabify(bits)
def convert_block_mapping_to_str(mapping):
if mapping.get('Ebs'):
mapped = ':'.join((mapping['Ebs'].get('SnapshotId') or '',
mapping['Ebs'].get('VolumeSize') or ''))
elif mapping.get('VirtualName'):
mapped = mapping['VirtualName']
else:
raise ValueError('unexpected block device mapping: {0}'.format(
mapping))
return mapping['DeviceName'] + '=' + mapped
| Juniper/euca2ools | euca2ools/commands/autoscaling/describelaunchconfigurations.py | Python | bsd-2-clause | 4,176 |
# -*- coding: utf-8 -*-
"""
This module provide structures for data representation of received model.
"""
from collections import namedtuple
__all__ = ('FieldInfo', 'RelationInfo')
FieldInfo = namedtuple('FieldResult', [
'pk', # Model field instance
'fields', # Dict of field name -> model field instance
'forward_relations', # Dict of field name -> RelationInfo
'reverse_relations', # Dict of field name -> RelationInfo
'fields_and_pk', # Shortcut for 'pk' + 'fields'
'relations' # Shortcut for 'forward_relations' + 'reverse_relations'
])
RelationInfo = namedtuple('RelationInfo', [
'model_field',
'related_model',
'to_many',
'to_field',
'has_through_model'
])
| Relrin/aiorest-ws | aiorest_ws/utils/structures.py | Python | bsd-3-clause | 717 |
from django.conf import settings
from hashlib import sha1
from mediagenerator.base import Generator
from mediagenerator.utils import get_media_dirs, find_file, prepare_patterns
from mimetypes import guess_type
import os
COPY_MEDIA_FILETYPES = getattr(settings, 'COPY_MEDIA_FILETYPES',
('gif', 'jpg', 'jpeg', 'png', 'svg', 'svgz', 'ico', 'swf', 'ttf', 'otf',
'eot', 'woff'))
IGNORE_PATTERN = prepare_patterns(getattr(settings,
'IGNORE_MEDIA_COPY_PATTERNS', ()), 'IGNORE_MEDIA_COPY_PATTERNS')
class CopyFiles(Generator):
def get_dev_output(self, name):
path = find_file(name)
fp = open(path, 'rb')
content = fp.read()
fp.close()
mimetype = guess_type(path)[0]
return content, mimetype
def get_dev_output_names(self):
media_files = {}
for root in get_media_dirs():
self.collect_copyable_files(media_files, root)
for name, source in media_files.items():
fp = open(source, 'rb')
hash = sha1(fp.read()).hexdigest()
fp.close()
yield name, name, hash
def collect_copyable_files(self, media_files, root):
for root_path, dirs, files in os.walk(root):
for file in files:
ext = os.path.splitext(file)[1].lstrip('.')
path = os.path.join(root_path, file)
media_path = path[len(root) + 1:].replace(os.sep, '/')
if ext in COPY_MEDIA_FILETYPES and \
not IGNORE_PATTERN.match(media_path):
media_files[media_path] = path
| umitproject/tease-o-matic | mediagenerator/generators/copyfiles.py | Python | bsd-3-clause | 1,592 |
"""
Simple enumeration class and metaclass that can be used in other
Biopython modules (or even outside Biopython).
"""
__all__ = ["Enum"]
__author__ = "Tamas Nepusz"
__email__ = "[email protected]"
__copyright__ = "Copyright (c) 2010, Tamas Nepusz"
def first(iter):
"""Helper function that takes an iterable and returns the
first element. No big deal, but it makes the code readable
in some cases. It is typically useful when `iter` is (or
can be) a generator expression as you can use the indexing
operator for ordinary lists and tuples."""
for item in iter:
return item
raise ValueError("iterable is empty")
class EnumMeta(type):
"""Metaclass for enum classes.
Do not use this class directly, there's no need to do that.
Derive your enum class from `Enum` instead.
"""
def __init__(cls, name, bases, attrs):
# Call the super-metaclass constructor
super(EnumMeta, cls).__init__(name, bases, attrs)
# This dict will contain the enum values
enum_values = {}
# For all the base classes, fetch the inherited items
for base in bases:
if hasattr(base, "__enum__"):
enum_values.update(base.__enum__)
# Extend enum_values with the items directly declared here
for key, value in attrs.iteritems():
# Skip internal methods, properties and callables
if key[:2] != "__" and not callable(value) \
and not isinstance(value, property):
inst = cls(key, value, override=True)
enum_values[key] = inst
super(EnumMeta, cls).__setattr__(key, inst)
# Store enum_values in the class
cls.__enum__ = enum_values
def __setattr__(self, name, value):
"""Raises an `AttributeError` to prevent users from messing
around with enum values"""
if name == "__enum__" or name == "__doc__" \
or not self._finalized:
return super(EnumMeta, self).__setattr__(name, value)
raise AttributeError("Enum attributes are read-only")
def __delattr__(self, name, value):
"""Raises an `AttributeError` to prevent users from messing
around with enum values"""
raise AttributeError("Enum attributes cannot be deleted")
def __repr__(self):
return "<Enum '%s'>" % self.__name__
def __len__(self):
return len(self.__enum__)
def __iter__(self):
return iter(self.__enum__)
def __contains__(self, key):
return key in self.__enum__
def from_name(self, name):
"""Constructs an instance of this enum from its name"""
try:
return self.__enum__[name]
except KeyError:
raise NameError("no enum item with the given name: %r" % name)
def from_value(self, value):
"""Constructs an instance of this enum from its value"""
try:
return first(val for val in self.__enum__.itervalues() \
if val.value == value)
except ValueError:
raise ValueError("no enum item with the given value: %r" % value)
def has_key(self, key):
"""Returns whether this enum has the given key or not"""
return self.__enum__.has_key(key)
def iteritems(self):
"""Returns an iterator over key-value pairs of this enum"""
return self.__enum__.iteritems()
def itervalues(self):
"""Returns an iterator over key-value pairs of this enum"""
return self.__enum__.itervalues()
def keys(self):
"""Returns the keys in this enum"""
return self.__enum__.keys()
def values(self):
"""Returns the values in this enum"""
return self.__enum__.values()
class Enum(object):
"""An instance of an enumeration value and a class representing a
whole enum.
This is mainly used as a superclass for enumerations. There is
a clear distinction between using the class itself or using one
of its instances. Using the class means that you are referring to
the enum as a whole (with all its possible keys and values).
Using one of the instances means that you are using a single
key-value pair from the enum. Instances should never be created
directly, as all the valid instances are accessible as attributes
of the class itself.
Usage example::
class GOEvidenceCode(Enum):
EXP = "Inferred from Experiment"
IDA = "Inferred from Direct Assay"
IPI = "Inferred from Physical Interaction"
IMP = "Inferred from Mutant Phenotype"
[...]
>>> GOEvidenceCode.EXP
GOEvidenceCode.EXP
>>> GOEvidenceCode.EXP.value
'Inferred from Experiment'
Think about enums as Python dictionaries that map symbolic keys to
values. Enums even provide methods similar to the non-mutating methods
of Python dictionaries::
>>> values = GOEvidenceCode.values()
[GOEvidenceCode.EXP, GOEvidenceCode.IDA, ...]
"""
__metaclass__ = EnumMeta
__slots__ = ("_key", "_value")
def __init__(self, key, value, **kwds):
if "override" not in kwds:
raise TypeError("Enums should not be instantiated directly")
self._key = key
self._value = value
def __repr__(self):
"""Returns an executable representation of this instance.
Strictly speaking, this method should return a string that allows
one to construct this instance, but we don't want anyone to
start instantiating `Enum`s by hand, so we return an expression
that refers to this very same instance instead.
"""
return "%s.%s" % (self.__class__.__name__, self._key)
def __str__(self):
return "%s.%s" % (self.__class__.__name__, self._key)
| marco-mariotti/selenoprofiles | libraries/annotations/Enum.py | Python | gpl-2.0 | 5,881 |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from unittest import TestCase, main
import time
import os
from pulsar import Client, MessageId, \
CompressionType, ConsumerType, PartitionsRoutingMode, \
AuthenticationTLS, Authentication, AuthenticationToken
from _pulsar import ProducerConfiguration, ConsumerConfiguration
from schema_test import *
try:
# For Python 3.0 and later
from urllib.request import urlopen, Request
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen, Request
def doHttpPost(url, data):
req = Request(url, data.encode())
req.add_header('Content-Type', 'application/json')
urlopen(req)
def doHttpPut(url, data):
try:
req = Request(url, data.encode())
req.add_header('Content-Type', 'application/json')
req.get_method = lambda: 'PUT'
urlopen(req)
except Exception as ex:
# ignore conflicts exception to have test idempotency
if '409' in str(ex):
pass
else:
raise ex
def doHttpGet(url):
req = Request(url)
req.add_header('Accept', 'application/json')
return urlopen(req).read()
class PulsarTest(TestCase):
serviceUrl = 'pulsar://localhost:6650'
adminUrl = 'http://localhost:8080'
serviceUrlTls = 'pulsar+ssl://localhost:6651'
def test_producer_config(self):
conf = ProducerConfiguration()
conf.send_timeout_millis(12)
self.assertEqual(conf.send_timeout_millis(), 12)
self.assertEqual(conf.compression_type(), CompressionType.NONE)
conf.compression_type(CompressionType.LZ4)
self.assertEqual(conf.compression_type(), CompressionType.LZ4)
conf.max_pending_messages(120)
self.assertEqual(conf.max_pending_messages(), 120)
def test_consumer_config(self):
conf = ConsumerConfiguration()
self.assertEqual(conf.consumer_type(), ConsumerType.Exclusive)
conf.consumer_type(ConsumerType.Shared)
self.assertEqual(conf.consumer_type(), ConsumerType.Shared)
self.assertEqual(conf.consumer_name(), '')
conf.consumer_name("my-name")
self.assertEqual(conf.consumer_name(), "my-name")
def test_simple_producer(self):
client = Client(self.serviceUrl)
producer = client.create_producer('my-python-topic')
producer.send(b'hello')
producer.close()
client.close()
def test_producer_send_async(self):
client = Client(self.serviceUrl)
producer = client.create_producer('my-python-topic')
sent_messages = []
def send_callback(producer, msg):
sent_messages.append(msg)
producer.send_async(b'hello', send_callback)
producer.send_async(b'hello', send_callback)
producer.send_async(b'hello', send_callback)
i = 0
while len(sent_messages) < 3 and i < 100:
time.sleep(0.1)
i += 1
self.assertEqual(len(sent_messages), 3)
client.close()
def test_producer_consumer(self):
client = Client(self.serviceUrl)
consumer = client.subscribe('my-python-topic-producer-consumer',
'my-sub',
consumer_type=ConsumerType.Shared)
producer = client.create_producer('my-python-topic-producer-consumer')
producer.send(b'hello')
msg = consumer.receive(1000)
self.assertTrue(msg)
self.assertEqual(msg.data(), b'hello')
try:
msg = consumer.receive(100)
self.assertTrue(False) # Should not reach this point
except:
pass # Exception is expected
consumer.unsubscribe()
client.close()
def test_message_properties(self):
client = Client(self.serviceUrl)
topic = 'my-python-test-message-properties'
consumer = client.subscribe(topic=topic,
subscription_name='my-subscription',
schema=pulsar.schema.StringSchema())
producer = client.create_producer(topic=topic,
schema=StringSchema())
producer.send('hello',
properties={
'a': '1',
'b': '2'
})
msg = consumer.receive()
self.assertTrue(msg)
self.assertEqual(msg.value(), 'hello')
self.assertEqual(msg.properties(), {
'a': '1',
'b': '2'
})
consumer.unsubscribe()
client.close()
def test_tls_auth(self):
certs_dir = '/pulsar/pulsar-broker/src/test/resources/authentication/tls/'
if not os.path.exists(certs_dir):
certs_dir = "../../pulsar-broker/src/test/resources/authentication/tls/"
client = Client(self.serviceUrlTls,
tls_trust_certs_file_path=certs_dir + 'cacert.pem',
tls_allow_insecure_connection=False,
authentication=AuthenticationTLS(certs_dir + 'client-cert.pem', certs_dir + 'client-key.pem'))
consumer = client.subscribe('my-python-topic-producer-consumer',
'my-sub',
consumer_type=ConsumerType.Shared)
producer = client.create_producer('my-python-topic-producer-consumer')
producer.send(b'hello')
msg = consumer.receive(1000)
self.assertTrue(msg)
self.assertEqual(msg.data(), b'hello')
try:
msg = consumer.receive(100)
self.assertTrue(False) # Should not reach this point
except:
pass # Exception is expected
client.close()
def test_tls_auth2(self):
certs_dir = '/pulsar/pulsar-broker/src/test/resources/authentication/tls/'
if not os.path.exists(certs_dir):
certs_dir = "../../pulsar-broker/src/test/resources/authentication/tls/"
authPlugin = "org.apache.pulsar.client.impl.auth.AuthenticationTls"
authParams = "tlsCertFile:%s/client-cert.pem,tlsKeyFile:%s/client-key.pem" % (certs_dir, certs_dir)
client = Client(self.serviceUrlTls,
tls_trust_certs_file_path=certs_dir + 'cacert.pem',
tls_allow_insecure_connection=False,
authentication=Authentication(authPlugin, authParams))
consumer = client.subscribe('my-python-topic-producer-consumer',
'my-sub',
consumer_type=ConsumerType.Shared)
producer = client.create_producer('my-python-topic-producer-consumer')
producer.send(b'hello')
msg = consumer.receive(1000)
self.assertTrue(msg)
self.assertEqual(msg.data(), b'hello')
try:
msg = consumer.receive(100)
self.assertTrue(False) # Should not reach this point
except:
pass # Exception is expected
client.close()
def test_tls_auth3(self):
certs_dir = '/pulsar/pulsar-broker/src/test/resources/authentication/tls/'
if not os.path.exists(certs_dir):
certs_dir = "../../pulsar-broker/src/test/resources/authentication/tls/"
authPlugin = "tls"
authParams = "tlsCertFile:%s/client-cert.pem,tlsKeyFile:%s/client-key.pem" % (certs_dir, certs_dir)
client = Client(self.serviceUrlTls,
tls_trust_certs_file_path=certs_dir + 'cacert.pem',
tls_allow_insecure_connection=False,
authentication=Authentication(authPlugin, authParams))
consumer = client.subscribe('my-python-topic-producer-consumer',
'my-sub',
consumer_type=ConsumerType.Shared)
producer = client.create_producer('my-python-topic-producer-consumer')
producer.send(b'hello')
msg = consumer.receive(1000)
self.assertTrue(msg)
self.assertEqual(msg.data(), b'hello')
try:
msg = consumer.receive(100)
self.assertTrue(False) # Should not reach this point
except:
pass # Exception is expected
client.close()
def test_auth_junk_params(self):
certs_dir = '/pulsar/pulsar-broker/src/test/resources/authentication/tls/'
if not os.path.exists(certs_dir):
certs_dir = "../../pulsar-broker/src/test/resources/authentication/tls/"
authPlugin = "someoldjunk.so"
authParams = "blah"
client = Client(self.serviceUrlTls,
tls_trust_certs_file_path=certs_dir + 'cacert.pem',
tls_allow_insecure_connection=False,
authentication=Authentication(authPlugin, authParams))
try:
client.subscribe('my-python-topic-producer-consumer',
'my-sub',
consumer_type=ConsumerType.Shared)
except:
pass # Exception is expected
def test_message_listener(self):
client = Client(self.serviceUrl)
received_messages = []
def listener(consumer, msg):
print("Got message: %s" % msg)
received_messages.append(msg)
consumer.acknowledge(msg)
client.subscribe('my-python-topic-listener',
'my-sub',
consumer_type=ConsumerType.Exclusive,
message_listener=listener)
producer = client.create_producer('my-python-topic-listener')
producer.send(b'hello-1')
producer.send(b'hello-2')
producer.send(b'hello-3')
time.sleep(0.1)
self.assertEqual(len(received_messages), 3)
self.assertEqual(received_messages[0].data(), b"hello-1")
self.assertEqual(received_messages[1].data(), b"hello-2")
self.assertEqual(received_messages[2].data(), b"hello-3")
client.close()
def test_reader_simple(self):
client = Client(self.serviceUrl)
reader = client.create_reader('my-python-topic-reader-simple',
MessageId.earliest)
producer = client.create_producer('my-python-topic-reader-simple')
producer.send(b'hello')
msg = reader.read_next()
self.assertTrue(msg)
self.assertEqual(msg.data(), b'hello')
try:
msg = reader.read_next(100)
self.assertTrue(False) # Should not reach this point
except:
pass # Exception is expected
reader.close()
client.close()
def test_reader_on_last_message(self):
client = Client(self.serviceUrl)
producer = client.create_producer('my-python-topic-reader-on-last-message')
for i in range(10):
producer.send(b'hello-%d' % i)
reader = client.create_reader('my-python-topic-reader-on-last-message',
MessageId.latest)
for i in range(10, 20):
producer.send(b'hello-%d' % i)
for i in range(10, 20):
msg = reader.read_next()
self.assertTrue(msg)
self.assertEqual(msg.data(), b'hello-%d' % i)
reader.close()
client.close()
def test_reader_on_specific_message(self):
client = Client(self.serviceUrl)
producer = client.create_producer(
'my-python-topic-reader-on-specific-message')
for i in range(10):
producer.send(b'hello-%d' % i)
reader1 = client.create_reader(
'my-python-topic-reader-on-specific-message',
MessageId.earliest)
for i in range(5):
msg = reader1.read_next()
last_msg_id = msg.message_id()
reader2 = client.create_reader(
'my-python-topic-reader-on-specific-message',
last_msg_id)
for i in range(5, 10):
msg = reader2.read_next()
self.assertTrue(msg)
self.assertEqual(msg.data(), b'hello-%d' % i)
reader1.close()
reader2.close()
client.close()
def test_reader_on_specific_message_with_batches(self):
client = Client(self.serviceUrl)
producer = client.create_producer(
'my-python-topic-reader-on-specific-message-with-batches',
batching_enabled=True,
batching_max_publish_delay_ms=1000)
for i in range(10):
producer.send_async(b'hello-%d' % i, None)
# Send one sync message to make sure everything was published
producer.send(b'hello-10')
reader1 = client.create_reader(
'my-python-topic-reader-on-specific-message-with-batches',
MessageId.earliest)
for i in range(5):
msg = reader1.read_next()
last_msg_id = msg.message_id()
reader2 = client.create_reader(
'my-python-topic-reader-on-specific-message-with-batches',
last_msg_id)
for i in range(5, 11):
msg = reader2.read_next()
self.assertTrue(msg)
self.assertEqual(msg.data(), b'hello-%d' % i)
reader1.close()
reader2.close()
client.close()
def test_producer_sequence_after_reconnection(self):
# Enable deduplication on namespace
doHttpPost(self.adminUrl + '/admin/v2/namespaces/public/default/deduplication',
'true')
client = Client(self.serviceUrl)
topic = 'my-python-test-producer-sequence-after-reconnection-' \
+ str(time.time())
producer = client.create_producer(topic, producer_name='my-producer-name')
self.assertEqual(producer.last_sequence_id(), -1)
for i in range(10):
producer.send(b'hello-%d' % i)
self.assertEqual(producer.last_sequence_id(), i)
producer.close()
producer = client.create_producer(topic, producer_name='my-producer-name')
self.assertEqual(producer.last_sequence_id(), 9)
for i in range(10, 20):
producer.send(b'hello-%d' % i)
self.assertEqual(producer.last_sequence_id(), i)
doHttpPost(self.adminUrl + '/admin/v2/namespaces/public/default/deduplication',
'false')
def test_producer_deduplication(self):
# Enable deduplication on namespace
doHttpPost(self.adminUrl + '/admin/v2/namespaces/public/default/deduplication',
'true')
client = Client(self.serviceUrl)
topic = 'my-python-test-producer-deduplication-' + str(time.time())
producer = client.create_producer(topic, producer_name='my-producer-name')
self.assertEqual(producer.last_sequence_id(), -1)
consumer = client.subscribe(topic, 'my-sub')
producer.send(b'hello-0', sequence_id=0)
producer.send(b'hello-1', sequence_id=1)
producer.send(b'hello-2', sequence_id=2)
self.assertEqual(producer.last_sequence_id(), 2)
# Repeat the messages and verify they're not received by consumer
producer.send(b'hello-1', sequence_id=1)
producer.send(b'hello-2', sequence_id=2)
self.assertEqual(producer.last_sequence_id(), 2)
for i in range(3):
msg = consumer.receive()
self.assertEqual(msg.data(), b'hello-%d' % i)
consumer.acknowledge(msg)
try:
# No other messages should be received
consumer.receive(timeout_millis=1000)
self.assertTrue(False)
except:
# Exception is expected
pass
producer.close()
producer = client.create_producer(topic, producer_name='my-producer-name')
self.assertEqual(producer.last_sequence_id(), 2)
# Repeat the messages and verify they're not received by consumer
producer.send(b'hello-1', sequence_id=1)
producer.send(b'hello-2', sequence_id=2)
self.assertEqual(producer.last_sequence_id(), 2)
try:
# No other messages should be received
consumer.receive(timeout_millis=1000)
self.assertTrue(False)
except:
# Exception is expected
pass
doHttpPost(self.adminUrl + '/admin/v2/namespaces/public/default/deduplication',
'false')
def test_producer_routing_mode(self):
client = Client(self.serviceUrl)
producer = client.create_producer('my-python-test-producer',
message_routing_mode=PartitionsRoutingMode.UseSinglePartition)
producer.send(b'test')
client.close()
def test_message_argument_errors(self):
client = Client(self.serviceUrl)
topic = 'my-python-test-producer'
producer = client.create_producer(topic)
content = 'test'.encode('utf-8')
self._check_type_error(lambda: producer.send(5))
self._check_value_error(lambda: producer.send(content, properties='test'))
self._check_value_error(lambda: producer.send(content, partition_key=5))
self._check_value_error(lambda: producer.send(content, sequence_id='test'))
self._check_value_error(lambda: producer.send(content, replication_clusters=5))
self._check_value_error(lambda: producer.send(content, disable_replication='test'))
self._check_value_error(lambda: producer.send(content, event_timestamp='test'))
client.close()
def test_client_argument_errors(self):
self._check_value_error(lambda: Client(None))
self._check_value_error(lambda: Client(self.serviceUrl, authentication="test"))
self._check_value_error(lambda: Client(self.serviceUrl, operation_timeout_seconds="test"))
self._check_value_error(lambda: Client(self.serviceUrl, io_threads="test"))
self._check_value_error(lambda: Client(self.serviceUrl, message_listener_threads="test"))
self._check_value_error(lambda: Client(self.serviceUrl, concurrent_lookup_requests="test"))
self._check_value_error(lambda: Client(self.serviceUrl, log_conf_file_path=5))
self._check_value_error(lambda: Client(self.serviceUrl, use_tls="test"))
self._check_value_error(lambda: Client(self.serviceUrl, tls_trust_certs_file_path=5))
self._check_value_error(lambda: Client(self.serviceUrl, tls_allow_insecure_connection="test"))
def test_producer_argument_errors(self):
client = Client(self.serviceUrl)
self._check_value_error(lambda: client.create_producer(None))
topic = 'my-python-test-producer'
self._check_value_error(lambda: client.create_producer(topic, producer_name=5))
self._check_value_error(lambda: client.create_producer(topic, initial_sequence_id='test'))
self._check_value_error(lambda: client.create_producer(topic, send_timeout_millis='test'))
self._check_value_error(lambda: client.create_producer(topic, compression_type=None))
self._check_value_error(lambda: client.create_producer(topic, max_pending_messages='test'))
self._check_value_error(lambda: client.create_producer(topic, block_if_queue_full='test'))
self._check_value_error(lambda: client.create_producer(topic, batching_enabled='test'))
self._check_value_error(lambda: client.create_producer(topic, batching_enabled='test'))
self._check_value_error(lambda: client.create_producer(topic, batching_max_allowed_size_in_bytes='test'))
self._check_value_error(lambda: client.create_producer(topic, batching_max_publish_delay_ms='test'))
client.close()
def test_consumer_argument_errors(self):
client = Client(self.serviceUrl)
topic = 'my-python-test-producer'
sub_name = 'my-sub-name'
self._check_value_error(lambda: client.subscribe(None, sub_name))
self._check_value_error(lambda: client.subscribe(topic, None))
self._check_value_error(lambda: client.subscribe(topic, sub_name, consumer_type=None))
self._check_value_error(lambda: client.subscribe(topic, sub_name, receiver_queue_size='test'))
self._check_value_error(lambda: client.subscribe(topic, sub_name, consumer_name=5))
self._check_value_error(lambda: client.subscribe(topic, sub_name, unacked_messages_timeout_ms='test'))
self._check_value_error(lambda: client.subscribe(topic, sub_name, broker_consumer_stats_cache_time_ms='test'))
client.close()
def test_reader_argument_errors(self):
client = Client(self.serviceUrl)
topic = 'my-python-test-producer'
# This should not raise exception
client.create_reader(topic, MessageId.earliest)
self._check_value_error(lambda: client.create_reader(None, MessageId.earliest))
self._check_value_error(lambda: client.create_reader(topic, None))
self._check_value_error(lambda: client.create_reader(topic, MessageId.earliest, receiver_queue_size='test'))
self._check_value_error(lambda: client.create_reader(topic, MessageId.earliest, reader_name=5))
client.close()
def test_publish_compact_and_consume(self):
client = Client(self.serviceUrl)
topic = 'my-python-test_publish_compact_and_consume'
producer = client.create_producer(topic, producer_name='my-producer-name', batching_enabled=False)
self.assertEqual(producer.last_sequence_id(), -1)
consumer = client.subscribe(topic, 'my-sub1', is_read_compacted=True)
consumer.close()
consumer2 = client.subscribe(topic, 'my-sub2', is_read_compacted=False)
# producer create 2 messages with same key.
producer.send(b'hello-0', partition_key='key0')
producer.send(b'hello-1', partition_key='key0')
producer.close()
# issue compact command, and wait success
url=self.adminUrl + '/admin/v2/persistent/public/default/my-python-test_publish_compact_and_consume/compaction'
doHttpPut(url, '')
while True:
s=doHttpGet(url).decode('utf-8')
if 'RUNNING' in s:
print("Compact still running")
print(s)
time.sleep(0.2)
else:
self.assertTrue('SUCCESS' in s)
print("Compact Complete now")
print(s)
break
# after compact, consumer with `is_read_compacted=True`, expected read only the second message for same key.
consumer1 = client.subscribe(topic, 'my-sub1', is_read_compacted=True)
msg0 = consumer1.receive()
self.assertEqual(msg0.data(), b'hello-1')
consumer1.acknowledge(msg0)
consumer1.close()
# after compact, consumer with `is_read_compacted=False`, expected read 2 messages for same key.
msg0 = consumer2.receive()
self.assertEqual(msg0.data(), b'hello-0')
consumer2.acknowledge(msg0)
msg1 = consumer2.receive()
self.assertEqual(msg1.data(), b'hello-1')
consumer2.acknowledge(msg1)
consumer2.close()
client.close()
def test_reader_has_message_available(self):
# create client, producer, reader
client = Client(self.serviceUrl)
producer = client.create_producer('my-python-topic-reader-has-message-available')
reader = client.create_reader('my-python-topic-reader-has-message-available',
MessageId.latest)
# before produce data, expected not has message available
self.assertFalse(reader.has_message_available());
for i in range(10):
producer.send(b'hello-%d' % i)
# produced data, expected has message available
self.assertTrue(reader.has_message_available());
for i in range(10):
msg = reader.read_next()
self.assertTrue(msg)
self.assertEqual(msg.data(), b'hello-%d' % i)
# consumed all data, expected not has message available
self.assertFalse(reader.has_message_available());
for i in range(10, 20):
producer.send(b'hello-%d' % i)
# produced data again, expected has message available
self.assertTrue(reader.has_message_available());
reader.close()
producer.close()
client.close()
def test_seek(self):
client = Client(self.serviceUrl)
consumer = client.subscribe('my-python-topic-seek',
'my-sub',
consumer_type=ConsumerType.Shared)
producer = client.create_producer('my-python-topic-seek')
for i in range(100):
producer.send(b'hello-%d' % i)
for i in range(100):
msg = consumer.receive()
self.assertEqual(msg.data(), b'hello-%d' % i)
consumer.acknowledge(msg)
# seek, and after reconnect, expected receive first message.
consumer.seek(MessageId.earliest)
time.sleep(0.5)
msg = consumer.receive()
self.assertEqual(msg.data(), b'hello-0')
client.close()
def test_v2_topics(self):
self._v2_topics(self.serviceUrl)
def test_v2_topics_http(self):
self._v2_topics(self.adminUrl)
def _v2_topics(self, url):
client = Client(url)
consumer = client.subscribe('my-v2-topic-producer-consumer',
'my-sub',
consumer_type=ConsumerType.Shared)
producer = client.create_producer('my-v2-topic-producer-consumer')
producer.send(b'hello')
msg = consumer.receive(1000)
self.assertTrue(msg)
self.assertEqual(msg.data(), b'hello')
consumer.acknowledge(msg)
try:
msg = consumer.receive(100)
self.assertTrue(False) # Should not reach this point
except:
pass # Exception is expected
client.close()
def test_topics_consumer(self):
client = Client(self.serviceUrl)
topic1 = 'persistent://public/default/my-python-topics-consumer-1'
topic2 = 'persistent://public/default/my-python-topics-consumer-2'
topic3 = 'persistent://public/default/my-python-topics-consumer-3'
topics = [topic1, topic2, topic3]
url1 = self.adminUrl + '/admin/v2/persistent/public/default/my-python-topics-consumer-1/partitions'
url2 = self.adminUrl + '/admin/v2/persistent/public/default/my-python-topics-consumer-2/partitions'
url3 = self.adminUrl + '/admin/v2/persistent/public/default/my-python-topics-consumer-3/partitions'
doHttpPut(url1, '2')
doHttpPut(url2, '3')
doHttpPut(url3, '4')
producer1 = client.create_producer(topic1)
producer2 = client.create_producer(topic2)
producer3 = client.create_producer(topic3)
consumer = client.subscribe(topics,
'my-topics-consumer-sub',
consumer_type=ConsumerType.Shared,
receiver_queue_size=10
)
for i in range(100):
producer1.send(b'hello-1-%d' % i)
for i in range(100):
producer2.send(b'hello-2-%d' % i)
for i in range(100):
producer3.send(b'hello-3-%d' % i)
for i in range(300):
msg = consumer.receive()
consumer.acknowledge(msg)
try:
# No other messages should be received
consumer.receive(timeout_millis=500)
self.assertTrue(False)
except:
# Exception is expected
pass
client.close()
def test_topics_pattern_consumer(self):
import re
client = Client(self.serviceUrl)
topics_pattern = 'persistent://public/default/my-python-pattern-consumer.*'
topic1 = 'persistent://public/default/my-python-pattern-consumer-1'
topic2 = 'persistent://public/default/my-python-pattern-consumer-2'
topic3 = 'persistent://public/default/my-python-pattern-consumer-3'
url1 = self.adminUrl + '/admin/v2/persistent/public/default/my-python-pattern-consumer-1/partitions'
url2 = self.adminUrl + '/admin/v2/persistent/public/default/my-python-pattern-consumer-2/partitions'
url3 = self.adminUrl + '/admin/v2/persistent/public/default/my-python-pattern-consumer-3/partitions'
doHttpPut(url1, '2')
doHttpPut(url2, '3')
doHttpPut(url3, '4')
producer1 = client.create_producer(topic1)
producer2 = client.create_producer(topic2)
producer3 = client.create_producer(topic3)
consumer = client.subscribe(re.compile(topics_pattern),
'my-pattern-consumer-sub',
consumer_type = ConsumerType.Shared,
receiver_queue_size = 10,
pattern_auto_discovery_period = 1
)
# wait enough time to trigger auto discovery
time.sleep(2)
for i in range(100):
producer1.send(b'hello-1-%d' % i)
for i in range(100):
producer2.send(b'hello-2-%d' % i)
for i in range(100):
producer3.send(b'hello-3-%d' % i)
for i in range(300):
msg = consumer.receive()
consumer.acknowledge(msg)
try:
# No other messages should be received
consumer.receive(timeout_millis=500)
self.assertTrue(False)
except:
# Exception is expected
pass
client.close()
def test_message_id(self):
s = MessageId.earliest.serialize()
self.assertEqual(MessageId.deserialize(s), MessageId.earliest)
s = MessageId.latest.serialize()
self.assertEqual(MessageId.deserialize(s), MessageId.latest)
def test_get_topics_partitions(self):
client = Client(self.serviceUrl)
topic_partitioned = 'persistent://public/default/test_get_topics_partitions'
topic_non_partitioned = 'persistent://public/default/test_get_topics_not-partitioned'
url1 = self.adminUrl + '/admin/v2/persistent/public/default/test_get_topics_partitions/partitions'
doHttpPut(url1, '3')
self.assertEqual(client.get_topic_partitions(topic_partitioned),
['persistent://public/default/test_get_topics_partitions-partition-0',
'persistent://public/default/test_get_topics_partitions-partition-1',
'persistent://public/default/test_get_topics_partitions-partition-2'])
self.assertEqual(client.get_topic_partitions(topic_non_partitioned),
[topic_non_partitioned])
client.close()
def test_token_auth(self):
with open('/tmp/pulsar-test-data/tokens/token.txt') as tf:
token = tf.read().strip()
# Use adminUrl to test both HTTP request and binary protocol
client = Client(self.adminUrl,
authentication=AuthenticationToken(token))
consumer = client.subscribe('persistent://private/auth/my-python-topic-token-auth',
'my-sub',
consumer_type=ConsumerType.Shared)
producer = client.create_producer('persistent://private/auth/my-python-topic-token-auth')
producer.send(b'hello')
msg = consumer.receive(1000)
self.assertTrue(msg)
self.assertEqual(msg.data(), b'hello')
client.close()
def test_token_auth_supplier(self):
def read_token():
with open('/tmp/pulsar-test-data/tokens/token.txt') as tf:
return tf.read().strip()
client = Client(self.serviceUrl,
authentication=AuthenticationToken(read_token))
consumer = client.subscribe('persistent://private/auth/my-python-topic-token-auth',
'my-sub',
consumer_type=ConsumerType.Shared)
producer = client.create_producer('persistent://private/auth/my-python-topic-token-auth')
producer.send(b'hello')
msg = consumer.receive(1000)
self.assertTrue(msg)
self.assertEqual(msg.data(), b'hello')
client.close()
def test_producer_consumer_zstd(self):
client = Client(self.serviceUrl)
consumer = client.subscribe('my-python-topic-producer-consumer-zstd',
'my-sub',
consumer_type=ConsumerType.Shared)
producer = client.create_producer('my-python-topic-producer-consumer-zstd',
compression_type=CompressionType.ZSTD)
producer.send(b'hello')
msg = consumer.receive(1000)
self.assertTrue(msg)
self.assertEqual(msg.data(), b'hello')
try:
msg = consumer.receive(100)
self.assertTrue(False) # Should not reach this point
except:
pass # Exception is expected
consumer.unsubscribe()
client.close()
#####
def test_get_topic_name(self):
client = Client(self.serviceUrl)
consumer = client.subscribe('persistent://public/default/topic_name_test',
'topic_name_test_sub',
consumer_type=ConsumerType.Shared)
producer = client.create_producer('persistent://public/default/topic_name_test')
producer.send(b'hello')
msg = consumer.receive(1000)
self.assertEqual(msg.topic_name(), 'persistent://public/default/topic_name_test')
client.close()
def test_get_partitioned_topic_name(self):
client = Client(self.serviceUrl)
url1 = self.adminUrl + '/admin/v2/persistent/public/default/partitioned_topic_name_test/partitions'
doHttpPut(url1, '3')
partitions = ['persistent://public/default/partitioned_topic_name_test-partition-0',
'persistent://public/default/partitioned_topic_name_test-partition-1',
'persistent://public/default/partitioned_topic_name_test-partition-2']
self.assertEqual(client.get_topic_partitions('persistent://public/default/partitioned_topic_name_test'),
partitions)
consumer = client.subscribe('persistent://public/default/partitioned_topic_name_test',
'partitioned_topic_name_test_sub',
consumer_type=ConsumerType.Shared)
producer = client.create_producer('persistent://public/default/partitioned_topic_name_test')
producer.send(b'hello')
msg = consumer.receive(1000)
self.assertTrue(msg.topic_name() in partitions)
client.close()
def _check_value_error(self, fun):
try:
fun()
# Should throw exception
self.assertTrue(False)
except ValueError:
pass # Expected
def _check_type_error(self, fun):
try:
fun()
# Should throw exception
self.assertTrue(False)
except TypeError:
pass # Expected
if __name__ == '__main__':
main()
| nkurihar/pulsar | pulsar-client-cpp/python/pulsar_test.py | Python | apache-2.0 | 36,453 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0003_auto_20150510_1707'),
]
operations = [
migrations.RemoveField(
model_name='job',
name='reviewers_comment',
),
migrations.RemoveField(
model_name='meetup',
name='reviewers_comment',
),
migrations.AddField(
model_name='job',
name='internal_comment',
field=models.TextField(null=True, blank=True, help_text="Write you comments here. They won't be sent to the company/organisation."),
),
migrations.AddField(
model_name='job',
name='message_to_organisation',
field=models.TextField(null=True, blank=True, help_text='Write your message to the company/organisation here.'),
),
migrations.AddField(
model_name='meetup',
name='internal_comment',
field=models.TextField(null=True, blank=True, help_text="Write you comments here. They won't be sent to the company/organisation."),
),
migrations.AddField(
model_name='meetup',
name='message_to_organisation',
field=models.TextField(null=True, blank=True, help_text='Write your message to the company/organisation here.'),
),
migrations.AlterField(
model_name='job',
name='expiration_date',
field=models.DateField(null=True, blank=True, help_text='Automatically is set 60 days from posting. You can override this.'),
),
migrations.AlterField(
model_name='job',
name='review_status',
field=models.CharField(choices=[('OPN', 'Open'), ('URE', 'Under review'), ('RTP', 'Ready to publish'), ('REJ', 'Rejected'), ('PUB', 'Published')], max_length=3, default='OPN'),
),
migrations.AlterField(
model_name='job',
name='website',
field=models.URLField(null=True, blank=True, help_text='Link to your offer or company website.'),
),
migrations.AlterField(
model_name='meetup',
name='expiration_date',
field=models.DateField(null=True, blank=True, help_text='Automatically is set 60 days from posting. You can override this.'),
),
migrations.AlterField(
model_name='meetup',
name='meetup_end_date',
field=models.DateTimeField(null=True, blank=True, help_text='Date format: YYYY-MM-DD'),
),
migrations.AlterField(
model_name='meetup',
name='meetup_start_date',
field=models.DateTimeField(null=True, help_text='If this is a recurring meetup/event, please enter a start date. Date format: YYYY-MM-DD'),
),
migrations.AlterField(
model_name='meetup',
name='meetup_type',
field=models.CharField(choices=[('MEET', 'meetup'), ('CONF', 'conference'), ('WORK', 'workshop')], max_length=4, default='MEET'),
),
migrations.AlterField(
model_name='meetup',
name='recurrence',
field=models.CharField(null=True, blank=True, max_length=255, help_text='Provide details of recurrence if applicable.'),
),
migrations.AlterField(
model_name='meetup',
name='review_status',
field=models.CharField(choices=[('OPN', 'Open'), ('URE', 'Under review'), ('RTP', 'Ready to publish'), ('REJ', 'Rejected'), ('PUB', 'Published')], max_length=3, default='OPN'),
),
migrations.AlterField(
model_name='meetup',
name='website',
field=models.URLField(null=True, blank=True, help_text='Link to your meetup or organisation website.'),
),
]
| patjouk/djangogirls | jobs/migrations/0004_auto_20150712_1803.py | Python | bsd-3-clause | 3,951 |
"""
column 'instances' will be deleted later. Has to be nullable for transition
Revision ID: 266658781c00
Revises: 204aae05372a
Create Date: 2019-04-15 16:27:22.362244
"""
# revision identifiers, used by Alembic.
revision = '266658781c00'
down_revision = '204aae05372a'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.alter_column('equipments_provider', 'instances', existing_type=postgresql.ARRAY(sa.TEXT()), nullable=True)
def downgrade():
op.alter_column(
'equipments_provider', 'instances', existing_type=postgresql.ARRAY(sa.TEXT()), nullable=False
)
| pbougue/navitia | source/tyr/migrations/versions/266658781c00_instances_nullable_in_equipments_provider.py | Python | agpl-3.0 | 643 |
from .wikiapi import *
class Auxiliar_Functions(object):
def getClubShieldImage(self,response_xml):
wikiapi = WikiApi({ 'locale' : 'en'})
response_xml = wikiapi.replace(response_xml)
ind_i = wikiapi.getIndex_substring("infobox",response_xml)
ind_f = wikiapi.getIndex_substring("scope",response_xml)
response_xml = response_xml[ind_i:ind_f]
ind_i = wikiapi.getIndex_substring("src=\"",response_xml)
ind_f = wikiapi.getIndex_substring("\" width",response_xml)
return "https:"+response_xml[ind_i+len("src=\""):ind_f]
def getStadiumByClub(self,club, hash_stadium_club):
for key, value in hash_stadium_club.items():
if key == club:
return value
def get_river_dynamic_points(self,river_name):
data_points = []
with open("static/river_points/"+str(river_name)+".txt", 'r+') as file:
for line in file:
data_points.append(line.splitlines())
return data_points
def do_data_list(self,hash_data):
for key in hash_data:
if "city" in key:
host_city = hash_data[key]
if "Motto" in key:
lema = hash_data[key]
if "Nations" in key:
num_nations = hash_data[key]
if len(num_nations)>3:
num_nations = num_nations.split(" ")[0]
if "Athletes" in key:
num_athletes = hash_data[key]
if len(num_athletes)>6:
num_athletes = num_athletes.split(" (")[0]
if "Events" in key:
num_events = hash_data[key].split(" in")[0]
if "Opening" in key:
opening_date = hash_data[key]
if "Closing" in key:
closing_date = hash_data[key]
if "Stadium" in key:
stadium = hash_data[key]
data_list = [host_city,lema,num_nations,num_athletes,num_events,opening_date,closing_date,stadium]
return data_list
def getCoord_byCity(self,city):
city = "\""+city+"\""
sparql = SPARQLWrapper("https://query.wikidata.org/sparql")
sparql.setReturnFormat(JSON)
sparql.setQuery("""SELECT DISTINCT ?coord
WHERE
{
?city wdt:P31/wdt:P279* wd:Q515 .
?city wdt:P625 ?coord .
?city rdfs:label ?cityname .
FILTER (regex(?cityname,"""+city+"""))
FILTER (lang(?cityname) = "en")
SERVICE wikibase:label {
bd:serviceParam wikibase:language "en" .
}
}
LIMIT 1""")
queryResults = sparql.query().convert()
for result in queryResults["results"]["bindings"]:
coord = result["coord"]["value"]
return coord
| gbarbosa4/WikimediaDataProject | WDLG/utils/auxiliar_functions.py | Python | mit | 3,029 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitsOperations:
"""ExpressRouteCircuitsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
circuit_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
circuit_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def get(
self,
resource_group_name: str,
circuit_name: str,
**kwargs: Any
) -> "_models.ExpressRouteCircuit":
"""Gets information about the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuit, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuit
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
circuit_name: str,
parameters: "_models.ExpressRouteCircuit",
**kwargs: Any
) -> "_models.ExpressRouteCircuit":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExpressRouteCircuit')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
circuit_name: str,
parameters: "_models.ExpressRouteCircuit",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuit"]:
"""Creates or updates an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param parameters: Parameters supplied to the create or update express route circuit operation.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuit
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuit or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuit]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
circuit_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ExpressRouteCircuit":
"""Updates an express route circuit tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:param parameters: Parameters supplied to update express route circuit tags.
:type parameters: ~azure.mgmt.network.v2020_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuit, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuit
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuit"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuit', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}'} # type: ignore
async def _list_arp_table_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> Optional["_models.ExpressRouteCircuitsArpTableListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsArpTableListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._list_arp_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_arp_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/arpTables/{devicePath}'} # type: ignore
async def begin_list_arp_table(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitsArpTableListResult"]:
"""Gets the currently advertised ARP table associated with the express route circuit in a resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitsArpTableListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitsArpTableListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsArpTableListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_arp_table_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_arp_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/arpTables/{devicePath}'} # type: ignore
async def _list_routes_table_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> Optional["_models.ExpressRouteCircuitsRoutesTableListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsRoutesTableListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._list_routes_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_routes_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTables/{devicePath}'} # type: ignore
async def begin_list_routes_table(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitsRoutesTableListResult"]:
"""Gets the currently advertised routes table associated with the express route circuit in a
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitsRoutesTableListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitsRoutesTableListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsRoutesTableListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_routes_table_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_routes_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTables/{devicePath}'} # type: ignore
async def _list_routes_table_summary_initial(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> Optional["_models.ExpressRouteCircuitsRoutesTableSummaryListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsRoutesTableSummaryListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._list_routes_table_summary_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableSummaryListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_routes_table_summary_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'} # type: ignore
async def begin_list_routes_table_summary(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitsRoutesTableSummaryListResult"]:
"""Gets the currently advertised routes table summary associated with the express route circuit in
a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitsRoutesTableSummaryListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitsRoutesTableSummaryListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsRoutesTableSummaryListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_routes_table_summary_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableSummaryListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_routes_table_summary.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'} # type: ignore
async def get_stats(
self,
resource_group_name: str,
circuit_name: str,
**kwargs: Any
) -> "_models.ExpressRouteCircuitStats":
"""Gets all the stats from an express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitStats, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitStats
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitStats"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get_stats.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitStats', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/stats'} # type: ignore
async def get_peering_stats(
self,
resource_group_name: str,
circuit_name: str,
peering_name: str,
**kwargs: Any
) -> "_models.ExpressRouteCircuitStats":
"""Gets all stats from an express route circuit in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitStats, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitStats
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitStats"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self.get_peering_stats.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitStats', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_peering_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/peerings/{peeringName}/stats'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ExpressRouteCircuitListResult"]:
"""Gets all the express route circuits in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.ExpressRouteCircuitListResult"]:
"""Gets all the express route circuits in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCircuitListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_04_01.models.ExpressRouteCircuitListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCircuits'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/aio/operations/_express_route_circuits_operations.py | Python | mit | 53,747 |
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
#! /usr/bin/python
# example script to compute and plot onsetdetection related descriptors
from essentia.extractor.onsetdetection import compute
def parse_args():
from optparse import OptionParser
import sys
usage = "usage: %s [-v] <-i input_soundfile> [-g ground_truth_file]" % sys.argv[0]
parser = OptionParser(usage=usage)
parser.add_option("-v","--verbose",
action="store_true", dest="verbose", default=False,
help="verbose mode")
parser.add_option("-i","--input",
action="store", dest="input_file", type="string",
help="input file")
parser.add_option("-w","--wave-output", default=None,
action="store", dest="wave_output", type="string",
help="wave output filename")
(options, args) = parser.parse_args()
if options.input_file is None:
print usage
sys.exit(1)
return options, args
if __name__ == '__main__':
import sys, os.path, essentia
options, args = parse_args()
input_file = options.input_file
# load audio file
audio_file = essentia.AudioFileInput(filename = input_file)
audio = audio_file()
sampleRate = 44100.
pool = essentia.Pool(input_file)
compute(audio, pool, sampleRate = sampleRate, verbose = options.verbose)
onsets = list(pool.descriptors['rhythm_onsets']['values'][0])
if (options.verbose): print onsets
while ((onsets[-1] + 0.020) * sampleRate) > len(audio):
onsets.pop(len(onsets)-1)
if len(onsets) > 0 and options.wave_output != None:
#audio *= 0.# only ticks
tick_length = 0.020 # in seconds
for tick in onsets:
for sample in range( int(round(tick*sampleRate)), int(round( (tick + tick_length ) *sampleRate )) ):
audio[sample] += 0.4 * ( sample % 200. - 100. )
output_file = essentia.WaveFileOutput(filename = options.wave_output)
output_file(audio)
| ChristianFrisson/essentia | test/src/pythontests/onsetdetection_test.py | Python | agpl-3.0 | 2,659 |
import SocketServer
class ProtoHandler(SocketServer.BaseRequestHandler):
def handle(self):
msg = self.request.recv(1024)
a = msg.split(" ",2)
if len(a) >1 and a[0] == "GET":
a = a[1].split("/")
a =[i for i in a if i != '']
if len(a) == 0:
self.request.sendall(self.server.ret)
else:
self.server.data=a
print a
class ProtoServer(SocketServer.TCPServer):
def __init__(self,hostport,default):
self.allow_reuse_address = True
SocketServer.TCPServer.__init__(self,hostport, ProtoHandler)
with open (default, "r") as myfile:
self.ret=myfile.read()
if __name__ == "__main__":
s = ProtoServer(("192.168.1.253", 6661),"index.html")
s.serve_forever()
| wizgrav/protobot | server.py | Python | bsd-3-clause | 861 |
# -*- coding: utf-8 -*-
# Copyright 2018 OpenSynergy Indonesia
# Copyright 2020 PT. Simetri Sinergi Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from dateutil.relativedelta import relativedelta
from openerp import _, api, fields, models
from openerp.exceptions import Warning as UserError
class HrAttendance(models.Model):
_inherit = "hr.attendance"
@api.multi
@api.depends("name")
def _compute_schedule(self):
obj_schedule = self.env["hr.timesheet_attendance_schedule"]
for attn in self:
company = attn.employee_id.company_id
early_buffer = fields.Datetime.from_string(attn.name) + relativedelta(
hours=company.early_attendance_buffer
)
early_buffer = fields.Datetime.to_string(early_buffer)
late_buffer = fields.Datetime.from_string(attn.name) + relativedelta(
hours=-company.late_attendance_buffer
)
late_buffer = fields.Datetime.to_string(late_buffer)
criteria = [
("employee_id", "=", attn.employee_id.id),
("date_start", "<=", early_buffer),
("date_end", ">=", late_buffer),
]
schedules = obj_schedule.search(criteria, limit=1)
attn.schedule_id = schedules[0].id if len(schedules) > 0 else False
schedule_id = fields.Many2one(
string="Attendance Schedule",
comodel_name="hr.timesheet_attendance_schedule",
compute="_compute_schedule",
store=True,
)
@api.constrains(
"schedule_id",
)
def _check_att_sign_in_out(self):
for document in self:
company = document.employee_id.company_id
if not document.schedule_id:
continue
if company.max_att_sign_in > 0:
len_att_sign_in = len(
document.schedule_id.attendance_ids.filtered(
lambda x: x.action == "sign_in"
)
)
if len_att_sign_in > company.max_att_sign_in:
msg = _(
"Total Sign In has reached maximum " "attempts per schedule"
)
raise UserError(msg)
if company.max_att_sign_out > 0:
len_att_sign_out = len(
document.schedule_id.attendance_ids.filtered(
lambda x: x.action == "sign_out"
)
)
if len_att_sign_out > company.max_att_sign_out:
msg = _(
"Total Sign Out has reached maximum " "attempts per schedule"
)
raise UserError(msg)
| open-synergy/opnsynid-hr | hr_timesheet_attendance_schedule/models/hr_attendance.py | Python | agpl-3.0 | 2,790 |
import pytest
import logging
import os
import subprocess
import tempfile
from dtest import Tester
from shutil import rmtree
since = pytest.mark.since
logger = logging.getLogger(__name__)
@since('4.0')
class TestFQLTool(Tester):
"""
Makes sure fqltool replay and fqltool compare work
@jira_ticket CASSANDRA-14690
"""
def test_replay(self):
"""
Generates a full query log, wipes the nodes and replays the
query log, then makes sure that the data is correct.
@jira_ticket CASSANDRA-14690
"""
self.cluster.populate(2).start(wait_for_binary_proto=True)
node1, node2 = self.cluster.nodelist()
with tempfile.TemporaryDirectory() as temp_dir:
tmpdir = tempfile.mkdtemp(dir=temp_dir)
tmpdir2 = tempfile.mkdtemp(dir=temp_dir)
node1.nodetool("enablefullquerylog --path={}".format(tmpdir))
node2.nodetool("enablefullquerylog --path={}".format(tmpdir2))
node1.stress(['write', 'n=1000'])
node1.flush()
node2.flush()
node1.nodetool("disablefullquerylog")
node2.nodetool("disablefullquerylog")
node1.stop(wait_other_notice=True)
node2.stop(wait_other_notice=True)
node1.clear()
node2.clear()
node1.start(wait_for_binary_proto=True)
node2.start(wait_for_binary_proto=True, wait_other_notice=True)
# make sure the node is empty:
got_exception = False
try:
node1.stress(['read', 'n=1000'])
except Exception:
got_exception = True
assert got_exception
# replay the log files
self._run_fqltool_replay(node1, [tmpdir, tmpdir2], "127.0.0.1", None, None)
# and verify the data is there
node1.stress(['read', 'n=1000'])
def test_compare(self):
"""
uses fqltool replay to compare two runs of the same query log and makes
sure that the results match
@jira_ticket CASSANDRA-14690
"""
self.cluster.populate(1).start(wait_for_binary_proto=True)
node1 = self.cluster.nodelist()[0]
with tempfile.TemporaryDirectory() as temp_dir:
results1 = tempfile.mkdtemp(dir=temp_dir)
queries1 = tempfile.mkdtemp(dir=temp_dir)
results2 = tempfile.mkdtemp(dir=temp_dir)
queries2 = tempfile.mkdtemp(dir=temp_dir)
fqldir = tempfile.mkdtemp(dir=temp_dir)
node1.stress(['write', 'n=1000'])
node1.flush()
node1.nodetool("enablefullquerylog --path={}".format(fqldir))
node1.stress(['read', 'n=1000'])
node1.nodetool("disablefullquerylog")
self._run_fqltool_replay(node1, [fqldir], "127.0.0.1", queries1, results1)
self._run_fqltool_replay(node1, [fqldir], "127.0.0.1", queries2, results2)
output = self._run_fqltool_compare(node1, queries1, [results1, results2])
assert b"MISMATCH" not in output # running the same reads against the same data
def test_compare_mismatch(self):
"""
generates two fql log files with different data (seq is different when running stress)
then asserts that the replays of each generates a mismatch
@jira_ticket CASSANDRA-14690
"""
self.cluster.populate(1).start(wait_for_binary_proto=True)
node1 = self.cluster.nodelist()[0]
with tempfile.TemporaryDirectory() as temp_dir:
fqldir1 = tempfile.mkdtemp(dir=temp_dir)
fqldir2 = tempfile.mkdtemp(dir=temp_dir)
results1 = tempfile.mkdtemp(dir=temp_dir)
queries1 = tempfile.mkdtemp(dir=temp_dir)
results2 = tempfile.mkdtemp(dir=temp_dir)
queries2 = tempfile.mkdtemp(dir=temp_dir)
node1.nodetool("enablefullquerylog --path={}".format(fqldir1))
node1.stress(['write', 'n=1000'])
node1.flush()
node1.stress(['read', 'n=1000'])
node1.nodetool("disablefullquerylog")
node1.stop()
for d in node1.data_directories():
rmtree(d)
os.mkdir(d)
node1.start(wait_for_binary_proto=True)
node1.nodetool("enablefullquerylog --path={}".format(fqldir2))
node1.stress(['write', 'n=1000', '-pop', 'seq=1000..2000'])
node1.flush()
node1.stress(['read', 'n=1000', '-pop', 'seq=1000..2000'])
node1.nodetool("disablefullquerylog")
node1.stop()
for d in node1.data_directories():
rmtree(d)
os.mkdir(d)
node1.start(wait_for_binary_proto=True)
self._run_fqltool_replay(node1, [fqldir1], "127.0.0.1", queries1, results1)
node1.stop()
for d in node1.data_directories():
rmtree(d)
os.mkdir(d)
node1.start(wait_for_binary_proto=True)
self._run_fqltool_replay(node1, [fqldir2], "127.0.0.1", queries2, results2)
output = self._run_fqltool_compare(node1, queries1, [results1, results2])
assert b"MISMATCH" in output # compares two different stress runs, should mismatch
def _run_fqltool_replay(self, node, logdirs, target, queries, results):
fqltool = self.fqltool(node)
args = [fqltool, "replay", "--target {}".format(target)]
if queries is not None:
args.append("--store-queries {}".format(queries))
if results is not None:
args.append("--results {}".format(results))
args.extend(logdirs)
rc = subprocess.call(args)
assert rc == 0
def _run_fqltool_compare(self, node, queries, results):
fqltool = self.fqltool(node)
args = [fqltool, "compare", "--queries {}".format(queries)]
args.extend([os.path.join(r, "127.0.0.1") for r in results])
logger.info(args)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
logger.info(stdout)
return stdout
def fqltool(self, node):
cdir = node.get_install_dir()
fqltool = os.path.join(cdir, 'tools', 'bin', 'fqltool')
return fqltool
| bdeggleston/cassandra-dtest | fqltool_test.py | Python | apache-2.0 | 6,364 |
# Lint as: python3
# ==============================================================================
# Copyright 2020 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements support for auto-quantization."""
import collections
import json
import os
import re
import copy
from absl import logging
import keras_tuner as kt
from keras_tuner import HyperModel
from keras_tuner import BayesianOptimization
from keras_tuner import Hyperband
from keras_tuner import RandomSearch
import numpy as np
import six
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.metrics import binary_accuracy
from tensorflow.keras.metrics import categorical_accuracy
from tensorflow.keras.metrics import sparse_categorical_accuracy
from qkeras.autoqkeras.forgiving_metrics import forgiving_factor # pylint: disable=line-too-long
from qkeras.autoqkeras.forgiving_metrics import ForgivingFactor # pylint: disable=line-too-long
from qkeras.autoqkeras.quantization_config import default_quantization_config # pylint: disable=line-too-long
from qkeras.autoqkeras.utils import print_qmodel_summary
from qkeras.utils import clone_model
from qkeras.utils import model_quantize
# AutoQKHyperModel is implemented on top of keras_tuner
# It basically creates a quantized model based on some rules
# and it computes a acc_delta that boosts the accuracy when
# choosing smaller models.
# Boosting function behaves like this.
# We use the following formula to compute the decrease factor:
# reference_size: number of parameters + activations of the model,
# assuming an 8-bit implementation.
# trial_size: number of parameters + activations of trial.
#
# 1) First, we compute how many times we decresed/increased the model
# i = log(reference_size / trial_size) / log(rate)
#
# 2) Then, we use delta_p / delta_n if model is smaller/bigger
# than reference model.
#
# delta = i * (
# (i < 0) * delta_n + (i >= 0) * delta_p
# )
#
# 3) the accuracy of the model (score) is adjusted by acc * delta
#
# The delta "boosts" the accuracy to allow worse model to be
# chosen by hypermodel tuner.
#
REGISTERED_LAYERS = ["Dense", "Conv1D", "Conv2D", "DepthwiseConv2D",
"SimpleRNN", "LSTM", "GRU", "Bidirectional",
"Conv2DTranspose", "SeparableConv1D", "SeparableConv2D"]
Q_LAYERS = list(map(lambda x : 'Q' + x, REGISTERED_LAYERS))
SEQUENCE_LAYERS = ["SimpleRNN", "LSTM", "GRU", "Bidirectional"]
class AutoQKHyperModel(HyperModel):
"""Creates an hypermodel to attempt to quantize a reference model.
Arguments:
model: Model to be quantized.
metrics: List of metrics to be used.
custom_objects: Custom objects used by Keras during quantization.
target: Secondary metric to chase during search ("bits" or "energy").
transfer_weights: if true, transfer weights from unquantized model.
frozen_layers: if true, these layers will not be quantized but
weights transferred from original model.
activation_bits: parameter to be used by 'model_quantize'.
limit: limit the number of bits in quantizers, specified as dictionary.
tune_filters: one of "block", "layer", "none" for tuning entire
network, each layer separately, or no tuning.
tune_filters_exceptions: name of layers that will not be tuned.
layer_indexes: we only quantize layers whose ids are in layer_indexes.
learning_rate_optimizer: if true, we optimize learning rate along with
other parameters.
head_name: specify which head to calcuate score/trial-size from in
autoqkeras
quantization_config: dictionary containing configuration of
quantizers for kernel, bias and activation.
extend_model_metrics: If to append the trial size and score metrics to
model metrics, which are used for AutoQKeras to determine the quality
of a model.
Returns:
quantized model in trial and boosted accuracy function compiled
into quantized model.
"""
def __init__(
self, model, metrics, custom_objects=None, target=None,
transfer_weights=False, frozen_layers=None, activation_bits=4, limit=None,
tune_filters="none", tune_filters_exceptions=None,
layer_indexes=None, learning_rate_optimizer=False,
head_name=None, quantization_config=None, extend_model_metrics=True,
):
self.model = model
self.metrics = metrics
self.custom_objects = custom_objects if custom_objects else {}
self.target = target
self.reference_size = self.target.get_reference(model)
self.transfer_weights = transfer_weights
self.frozen_layers = frozen_layers if frozen_layers else []
self.activation_bits = activation_bits
self.head_name = head_name
self.extend_model_metrics = extend_model_metrics
# make sure we have at least 3 elements in list
# first one for kernel, second one for bias and thid one for activations.
#
# limit is in the format, where default replaces missing values:
# '{
# "Conv2D":[weight,bias,activation],
# "RNN":[weight,bias,recurrent,activation],
# "Dense":[weight,bias,activation],
# "Activation":[activation]
# "default": value
# }'
if limit is None:
self.limit = {}
else:
self.limit = limit
self.groups = {}
assert isinstance(self.limit, dict)
if self.limit.get("default", None) is None:
default = 8
else:
default = self.limit["default"]
# make sure we have entries for every type of layer we process
self._adjust_limit(default)
print("Limit configuration:" + json.dumps(self.limit))
assert tune_filters in ["block", "layer", "none"]
self.tune_filters = tune_filters
self.tune_filters_exceptions = re.compile(tune_filters_exceptions)
self.layer_indexes = layer_indexes
self.learning_rate_optimizer = learning_rate_optimizer
# load quantizer types for each type of quantizer
if quantization_config is None:
self.quantization_config = default_quantization_config
else:
self.quantization_config = quantization_config
def _adjust_limit(self, default):
"""Makes sure limit has all the fields required."""
if isinstance(default, list):
assert 3 <= len(default) <= 4
else:
default = [default] * 3
# we consider that if name is not there, we will ignore the layer
for name in REGISTERED_LAYERS:
if name in self.limit:
length = len(self.limit[name])
if length < 4 and name in SEQUENCE_LAYERS:
assert len(default) == 4
self.limit[name] = self.limit[name] + default[length:]
elif length < 3:
# No recurrent limit needed for non recurrent layers
self.limit[name] = self.limit[name] + default[length:2] + default[-1:]
def _n(self, name, s_list):
"""Creates a unique name for the tuner."""
return name + "_".join([str(v) for v in s_list])
def _get_quantizer(self, hp, head, layer_name, layer_class_name,
i_list=None, is_kernel=True, is_linear=False):
"""Gets a quantizer randomly for kernels/bias/activations."""
# first pick up which group we belong to.
if not i_list:
i_list = []
if is_linear:
# linear quantizers
field_name = "linear"
kq = self.quantization_config["linear"]
index = 0
q_list = list(kq.keys())
q_dict = kq
elif "kernel" in head:
# kernel quantizers
field_name = "kernel"
kq = self.quantization_config["kernel"]
index = 0
q_list = list(kq.keys())
q_dict = kq
elif "bias" in head:
# bias quantizers
field_name = "bias"
bq = self.quantization_config["bias"]
index = 1
q_list = list(bq.keys())
q_dict = bq
elif "pointwise_kernel" in head: # limit is same as kernel
# pointwise kernel quantizers
field_name = "pointwise_kernel"
kq = self.quantization_config["pointwise_kernel"]
index = 2
q_list = list(kq.keys())
q_dict = kq
elif "recurrent_kernel" in head: # limit is same as kernel
# recurrent kernel quantizers
field_name = "recurrent_kernel"
kq = self.quantization_config["recurrent_kernel"]
index = 2
q_list = list(kq.keys())
q_dict = kq
elif "recurrent_activation" in head: # limit is same as kernel
# recurrent activation quantizers
field_name = "recurrent_activation"
raq = self.quantization_config["recurrent_activation"]
index = -1
q_list = list(raq.keys())
q_dict = raq
else:
# activation quantizers
field_name = "activation"
aq = self.quantization_config["activation"]
index = -1
q_list = list(aq.keys())
q_dict = aq
# we first we search for layer name. If it is not there, we switch to
# layer class name.
found_pattern = False
name = layer_class_name
count = -1
for i, pattern in enumerate(self.limit):
if re.match(pattern, layer_name):
found_pattern = True
name = pattern
count = i
break
# for partially quantized networks we may not have
# the layer class name in the set.
if name == layer_class_name and name not in self.limit:
return None, -1
# groups is a dictionary that contains dictionary of the
# patterns so that we can group everything together
if found_pattern:
if name in self.groups and index in self.groups[name]:
return self.groups[name][index]
# not there, let's use a different name for
# the head and field
head = "qk_group_" + str(count) + "_" + field_name
head = name + "_" + field_name
# limit group can be a list of quantizers or a
# number that tells us maximum number of bits
if isinstance(self.limit[name][index], list):
# we assume this is a subset of the q_keys
# entry in quantization_config will be like:
# "Conv2D": [ ["q1", "q2", "q3"], ... ]
#
# we always assume this list is a subset of
# the original list or we will raise an
# error.
q_list = self.limit[name][index]
q_dict = {
key: q_dict[key] for key in q_list
}
else:
q_dict = {
key: value for (key, value) in q_dict.items()
if value <= self.limit[name][index]
}
q_list = list(q_dict.keys())
# didn't found a match in groups, create one.
if len(q_list) == 1:
q_name = hp.Fixed(self._n(head + "_quantizer", i_list), q_list[0])
else:
q_name = hp.Choice(self._n(head + "_quantizer", i_list), q_list)
if found_pattern:
if name not in self.groups:
self.groups[name] = {index: (q_name, q_dict[q_name])}
else:
self.groups[name][index] = (q_name, q_dict[q_name])
return (q_name, q_dict[q_name])
def quantize_model(self, hp):
"""Quantize model by hyperparameter search and extracting size schema."""
# configuration for quantization.
q_dict = {}
model = clone_model(self.model, self.custom_objects)
fanin = []
filter_range = [0.5, 0.75, 1.0, 1.5, 2.0]
# network_filters=hp.Choice(...) should only be defined if we are sure
# current blocks has any layer that need filter sweep.
# Otherwise, when no layer needs filter sweep and a hp variable is defined,
# there will be uneffective trials that loop around the network
# filter range, even though none of the filter sweep was ever applied to
# any layers. Therfore, we use filter_sweep_enabled to mark if any layer
# in current block needs filter sweep.
kernel_quantizer_dict = {}
filter_sweep_enabled = False
for layer in model.layers:
if layer.__class__.__name__ in REGISTERED_LAYERS:
kernel_quantizer, bits = self._get_quantizer(
hp, layer.name + "_kernel", layer.name, layer.__class__.__name__,
is_kernel=True)
kernel_quantizer_dict[layer.name] = (kernel_quantizer, bits)
# kernel_quantizer is not None -> layer in the current block need
# to be quantized
if kernel_quantizer:
if (
not filter_sweep_enabled and self.tune_filters in
["layer", "block"]
and not self.tune_filters_exceptions.search(layer.name) and
layer.__class__.__name__ in
["Dense", "Conv1D", "Conv2D", "Conv2DTranspose"]
):
filter_sweep_enabled = True
if layer.__class__.__name__ in SEQUENCE_LAYERS:
recurrent_quantizer, _ = self._get_quantizer(
hp, layer.name + "_recurrent_kernel", layer.name, layer.__class__.__name__,
is_kernel=True)
if layer.__class__.__name__ in ["SeparableConv1D", "SeparableConv2D"]:
pointwise_quantizer, _ = self._get_quantizer(
hp, layer.name + "_pointwise_kernel", layer.name, layer.__class__.__name__,
is_kernel=True)
if self.tune_filters == "block" and filter_sweep_enabled:
network_filters = hp.Choice(
"network_filters",
values=filter_range,
default=1.0
)
else:
network_filters = 1.0
for layer_id, layer in enumerate(model.layers):
# we can use these indexes to disable some layers, like the last
# layer
if self.layer_indexes is not None and layer_id not in self.layer_indexes:
continue
layer_d = {}
if layer.__class__.__name__ in Q_LAYERS:
weights = layer.get_weights()[0]
if (
layer.get_quantizers()[0] and
hasattr(layer.get_quantizers()[0], "bits")
):
bits = layer.get_quantizers()[0].bits
else:
bits = 8
fanin.append(np.prod(weights.shape[:-1]) * (8. - bits) / 8.)
if layer.__class__.__name__ in REGISTERED_LAYERS:
# difference between depthwise and the rest is just the name
# of the kernel.
if layer.__class__.__name__ in [
"DepthwiseConv2D", "SeparableConv1D", "SeparableConv2D"
]:
kernel_name = "depthwise_quantizer"
else:
kernel_name = "kernel_quantizer"
# sample kernel quantizer.
(kernel_quantizer, bits) = kernel_quantizer_dict[layer.name]
if not kernel_quantizer:
continue
# process fanin here
if bits < 8:
weights = layer.get_weights()[0]
fanin.append(np.prod(weights.shape[:-1]) * (8. - bits) / 8.)
# we only want to do that if we are going to quantize layer
if (
self.tune_filters in ["layer", "block"] and
not self.tune_filters_exceptions.search(layer.name) and
layer.__class__.__name__ in [
"Dense", "Conv1D", "Conv2D", "Conv2DTranspose",
"SeparableConv1D", "SeparableConv2D"
]
):
if self.tune_filters == "layer":
layer_filters = hp.Choice(
"network_filters_" + layer.name,
values=filter_range,
default=1.0
)
else:
layer_filters = network_filters
if layer.__class__.__name__ == "Dense":
layer.units = max(int(layer.units * layer_filters), 1)
elif layer.__class__.__name__ in [
"Conv1D", "Conv2D", "Conv2DTranspose",
"SeparableConv1D", "SeparableConv2D"
]:
layer.filters = max(int(layer.filters * layer_filters), 1)
layer_d[kernel_name] = kernel_quantizer
if layer.__class__.__name__ in SEQUENCE_LAYERS:
layer_d['recurrent_quantizer'] = recurrent_quantizer
if layer.__class__.__name__ in ["SeparableConv1D", "SeparableConv2D"]:
layer_d['pointwise_quantizer'] = pointwise_quantizer
if layer.__class__.__name__ in ["LSTM", "GRU", "Bidirectional"]:
layer_d['recurrent_activation'], _ = self._get_quantizer(
hp, layer.name + "_recurrent_activation", layer.name,
layer.__class__.__name__, is_kernel=False)
# if we use bias, sample quantizer.
if layer.__class__.__name__ == "Bidirectional":
layer_d["bias_quantizer"], bits = self._get_quantizer(
hp, layer.name + "_bias", layer.name, layer.__class__.__name__,
is_kernel=False)
layer_d["activation"], bits = self._get_quantizer(
hp, layer.name + "_activation", layer.name,
layer.__class__.__name__, is_kernel=False)
q_dict[layer.name] = layer_d
else:
if layer.use_bias:
layer_d["bias_quantizer"], bits = self._get_quantizer(
hp, layer.name + "_bias", layer.name, layer.__class__.__name__,
is_kernel=False)
# if activation is not linear/softmax we need to process it.
if layer.activation is None:
is_softmax = False
is_linear = False
else:
if isinstance(layer.activation, six.string_types):
is_softmax = layer.activation == "softmax"
is_linear = layer.activation == "linear"
else:
is_softmax = layer.activation.__name__ == "softmax"
is_linear = layer.activation.__name__ == "linear"
if not is_softmax and not is_linear:
layer_d["activation"], bits = self._get_quantizer(
hp, layer.name + "_activation", layer.name,
layer.__class__.__name__, is_kernel=False)
q_dict[layer.name] = layer_d
elif layer.__class__.__name__ in ["Reshape"]:
# we cannot handle fine tuning filters per layer right now.
assert self.tune_filters in ["none", "block"]
# we need to make sure this pattern exists, this should only occur for
# "scheduler", so the name will be complete and not a pattern.
if (
self.tune_filters == "none" or
layer.name not in self.limit or
self.tune_filters_exceptions.search(layer.name)
):
continue
if K.image_data_format() == "channels_last":
layer.target_shape = layer.target_shape[:-1] + (
min(int(layer.target_shape[-1] * network_filters), 1),)
else:
layer.target_shape = (int(layer.target_shape[0] * network_filters),
) + layer.target_shape[1:]
elif layer.__class__.__name__ in ["Activation"]:
if isinstance(layer.activation, six.string_types):
is_linear = layer.activation == "linear"
is_softmax = layer.activation == "softmax"
else:
is_linear = layer.activation.__name__ == "linear"
is_softmax = layer.activation.__name__ == "softmax"
# if it is a linear activation, we will notify the
# quantizer we are searching for linear type of
# quantizers
if not is_softmax:
activation, bits = self._get_quantizer(
hp, layer.name + "_activation", layer.name,
layer.__class__.__name__, is_kernel=False,
is_linear=is_linear)
if not activation:
continue
# look at documentation on model_quantize
q_dict[layer.name] = activation
elif layer.__class__.__name__ in self.limit:
# mark it for conversion
q_dict[layer.name] = {}
else:
for pattern in self.limit:
if re.match(pattern, layer.name):
q_dict[layer.name] = {}
break
q_model = model_quantize(
model, q_dict, self.activation_bits,
custom_objects=self.custom_objects,
transfer_weights=self.transfer_weights)
return q_model, fanin
def build(self, hp):
"""Builds hyperparameterized quantized model."""
self.groups = {}
# we are not using the fanin right now.
q_model, _ = self.quantize_model(hp)
# transfer weights from previous run as we know we will not
if self.learning_rate_optimizer:
# if learning_rate_optimizer, we try to transfer weights from previous run
print("... freezing layers {}.".format(", ".join(self.frozen_layers)))
for layer_name in self.frozen_layers:
o_weights = self.model.get_layer(layer_name).get_weights()
layer = q_model.get_layer(layer_name)
# don't know if setting trainable to False is good or not yet
# try to do "soft-freeze" by transferring weights. More experiments
# needed before we decide what to do.
# layer.trainable = False
weights = layer.get_weights()
# because we can be changing number of layers, we do not know
# if we can really use some of the weights or not.
equal_layer = True
for w in range(len(o_weights)):
if o_weights[w].shape != weights[w].shape:
equal_layer = False
break
if equal_layer:
layer.set_weights(o_weights)
self.trial_size = self.target.get_trial(q_model)
# we will use a boosted accuracy computation
delta = self.target.delta()
# by default, we use the first metric specified by the
# user to be the target metric.
if not self.metrics:
score_metric = None
elif isinstance(self.metrics, dict):
if not self.head_name:
# if head_name not provided, find the first metric from the dict
score_key = list(self.metrics.keys())[0]
else:
# find the metric assoicated with the head_name
score_key = self.head_name
score_metric = self.metrics[score_key]
if isinstance(score_metric, list):
score_metric = score_metric[0]
elif isinstance(self.metrics, list):
score_metric = self.metrics[0]
self.score = AutoQKHyperModel.adjusted_score(
self, delta, score_metric)
# some papers suggest that we use learning_rate * sqrt(fanin) / layer
# we cannot do that right now, but we can definitely do that
# if we are quantizing one layer at a time
#
# https://arxiv.org/pdf/1511.00363.pdf
# we use the magic number to smooth out the average
total_factor = self.target.get_total_factor()
delta_lr = 1.0 + (total_factor < 0) * total_factor
# we assume model has been compiled at least.
lr = float(self.model.optimizer.lr.numpy())
# we assume that delta_lr can lower lr to accommodate
# for more quantization
#
# if learning rate scheduler is used, we assume the callback to manage
# learning rate. Just set it to constant.
if self.learning_rate_optimizer:
lr_range = list(lr * np.linspace(delta_lr, 1.1, 5))
lr_choice = hp.Choice("learning_rate", lr_range)
self.model.optimizer.learning_rate = lr_choice
else:
lr_choice = lr
print("learning_rate: {}".format(lr))
optimizer = self.model.optimizer
q_model.summary()
metrics = self.metrics
# extend metrics by including score and trial_size metrics
if self.extend_model_metrics:
ext_metrics = copy.deepcopy(metrics)
if isinstance(ext_metrics, dict):
# for dict, add trial_size_metric and score metric to target output
if not self.head_name:
# if head_name not provided, find the first metric from the dict
score_key = list(ext_metrics.keys())[0]
else:
# find the metric assoicated with the head_name
score_key = self.head_name
score_metric = ext_metrics[score_key]
if isinstance(score_metric, list):
score_metric += [self.trial_size_metric(self.trial_size), self.score]
else:
score_metric = [score_metric]
score_metric += [self.trial_size_metric(self.trial_size), self.score]
ext_metrics[score_key] = score_metric
else:
ext_metrics += [
self.trial_size_metric(self.trial_size),
self.score]
metrics = ext_metrics
q_model.compile(
optimizer=optimizer,
loss=self.model.loss,
metrics=metrics
)
self.q_model = q_model
# this just prints a summary of the quantization for debugging
# purposes
self.target.print_stats()
print_qmodel_summary(q_model)
return q_model
@staticmethod
def adjusted_score(hyper_model, delta, metric_function=None):
def score(y_true, y_pred):
y_t_rank = len(y_true.shape.as_list())
y_p_rank = len(y_pred.shape.as_list())
y_t_last_dim = y_true.shape.as_list()[-1]
y_p_last_dim = y_pred.shape.as_list()[-1]
is_binary = y_p_last_dim == 1
is_sparse_categorical = (
y_t_rank < y_p_rank or y_t_last_dim == 1 and y_p_last_dim > 1)
if isinstance(metric_function, six.string_types):
if metric_function in ["accuracy", "acc"]:
if is_binary:
metric = binary_accuracy(y_true, y_pred)
elif is_sparse_categorical:
metric = sparse_categorical_accuracy(y_true, y_pred)
else:
metric = categorical_accuracy(y_true, y_pred)
else:
metric = categorical_accuracy(y_true, y_pred)
else:
metric = metric_function(y_true, y_pred)
return K.cast(metric * (1.0 + delta), K.floatx())
if not metric_function:
metric_function = "accuracy"
return score
@staticmethod
def trial_size_metric(trial_size):
def trial(y_true, y_pred): # pylint: disable=unused-argument
return K.cast(trial_size, K.floatx())
return trial
class AutoQKeras:
"""Performs autoquantization in Keras model.
Arguments:
model: Model to be quantized.
metrics: List of metrics to be used.
custom_objects: Custom objects used by Keras during quantization.
goal: Metric to compute secondary goal of search (bits or energy)
output_dir: name of output directory to store results.
mode: random, hyperband or bayesian used by keras_tuner.
custom_tuner: The Keras Tuner class to use to search hyperparams
transfer_weights: if true, transfer weights from unquantized model.
frozen_layers: if true, these layers will not be quantized but
weights transferred from original model.
activation_bits: parameter to be used by 'model_quantize'.
limit: limit the number of bits in quantizers specified as a dictionary.
tune_filters: one of "block", "layer", "none" for tuning entire
network, each layer separately, or no tuning.
tune_filters_exceptions: name of layers that will not be tuned.
layer_indexes: indexes of layers we will quantize.
learning_rate_optimizer: if true, user will provide lr scheduler
callback.
quantization_config: file name of dictionary containing configuration of
quantizers for kernel, bias and activation.
head_name: specify which head to calcuate score/trial-size from in
autoqkeras
score_metric: Str. Optional metric name to use to evaluate the trials.
Defaults to val_score
tuner_kwargs: parameters for keras_tuner depending on whether
mode is random, hyperband or baeysian. Please refer to the
documentation of kerstuner Tuners.
"""
def __init__(
self, model, metrics=None, custom_objects=None, goal=None,
output_dir="result", mode="random", custom_tuner=None,
transfer_weights=False, frozen_layers=None, activation_bits=4,
limit=None, tune_filters="none",
tune_filters_exceptions=None, learning_rate_optimizer=False,
layer_indexes=None, quantization_config=None, overwrite=True,
head_name=None, score_metric=None, **tuner_kwargs):
# Collect input arguments to AutoQKeras for usage by custom tuner
autoqkeras_input_args = locals()
if not metrics:
metrics = []
if not custom_objects:
custom_objects = {}
# goal: { "type": ["bits", "energy"], "params": {...} } or ForgivingFactor
# type
# For type == "bits":
# delta_p: increment (in %) of the accuracy if trial is smaller.
# delta_n: decrement (in %) of the accuracy if trial is bigger.
# rate: rate of decrease/increase in model size in terms of bits.
# input_bits; size of input tensors.
# output_bits; size of output tensors.
# stress: parameter to reduce reference size to force tuner to
# choose smaller models.
# config: configuration on what to compute for each layer
# minimum configuration is { "default": ["parameters", "activations"] }
# use simplest one - number of bits
if not goal:
goal = {
"type": "bits",
"params": {
"delta_p": 8.0,
"delta_n": 8.0,
"rate": 2.0,
"stress": 1.0,
"input_bits": 8,
"output_bits": 8,
"ref_bits": 8,
"config": {
"default": ["parameters", "activations"]
}
}
}
self.overwrite = overwrite
# for multi-head model, we need to specify which head(/output) that
# score and trial metric needs to calculate from
self.head_name = head_name
# if we have not created it already, create new one.
if not isinstance(goal, ForgivingFactor):
target = forgiving_factor[goal["type"]](**goal["params"])
else:
target = goal
# if no metrics were specified, we want to make sure we monitor at least
# accuracy.
if not metrics:
metrics = ["acc"]
self.hypermodel = AutoQKHyperModel(
model, metrics, custom_objects, target,
transfer_weights=transfer_weights,
frozen_layers=frozen_layers,
activation_bits=activation_bits,
limit=limit,
tune_filters=tune_filters,
tune_filters_exceptions=tune_filters_exceptions,
layer_indexes=layer_indexes,
learning_rate_optimizer=learning_rate_optimizer,
head_name=head_name,
quantization_config=quantization_config
)
# right now we create unique results directory
idx = 0
name = output_dir
if self.overwrite:
while os.path.exists(name):
idx += 1
name = output_dir + "_" + str(idx)
output_dir = name
self.output_dir = output_dir
if score_metric is None:
if self.head_name:
score_metric = "val_" + self.head_name + "_score"
else:
score_metric = "val_score"
assert mode in ["random", "bayesian", "hyperband"]
if custom_tuner is not None:
self.tuner = custom_tuner(
self.hypermodel,
autoqkeras_config=autoqkeras_input_args,
objective=kt.Objective(score_metric, "max"),
project_name=output_dir,
**tuner_kwargs)
elif mode == "random":
self.tuner = RandomSearch(
self.hypermodel,
objective=kt.Objective(score_metric, "max"),
project_name=output_dir,
**tuner_kwargs)
elif mode == "bayesian":
self.tuner = BayesianOptimization(
self.hypermodel,
objective=kt.Objective(score_metric, "max"),
project_name=output_dir,
**tuner_kwargs)
elif mode == "hyperband":
self.tuner = Hyperband(
self.hypermodel,
objective=kt.Objective(score_metric, "max"),
project_name=output_dir,
**tuner_kwargs)
else:
pass
self.tuner.search_space_summary()
def _has_earlystopping(self, callbacks):
"""Check if EarlyStopping has been defined or not."""
if callbacks is None:
return False
for callback in callbacks:
if isinstance(callback, tf.keras.callbacks.EarlyStopping):
return True
return False
def history(self, number_of_trials=-1):
"""Returns the history of the model search."""
trials = self.tuner.oracle.get_best_trials(number_of_trials)
state = [trial.get_state() for trial in trials]
result = {}
result["score"] = [
state[i]["score"] for i in range(len(state))
if trials[i].score is not None
]
for i in range(len(state)):
if trials[i].score is not None:
keys = state[i]["metrics"]["metrics"].keys()
for key in keys:
if key != "score" and not key.startswith(
"val_") and key != "loss" and key != "trial":
cur_accuracy = state[i]["metrics"]["metrics"][key][
"observations"][0]["value"][0]
if "val_" + key in state[i]["metrics"]["metrics"].keys():
cur_val_accuracy = state[i]["metrics"]["metrics"]["val_" + key][
"observations"][0]["value"][0]
else:
cur_val_accuracy = None
# only update result if both key and val_key exist
if cur_val_accuracy:
if key not in result.keys():
result[key] = [cur_accuracy]
result["val_" + key] = [cur_val_accuracy]
else:
result[key].append(cur_accuracy)
result["val_" + key].append(cur_val_accuracy)
if self.head_name:
trial_from_output = self.head_name + "_trial"
else:
trial_from_output = "trial"
result["trial_size"] = [
state[i]["metrics"]["metrics"][trial_from_output]["observations"][0]
["value"][0] for i in range(len(state)) if trials[i].score is not None
]
return result
def fit(self, *fit_args, **fit_kwargs):
"""Invokes tuner fit algorithm."""
callbacks = fit_kwargs.get("callbacks", None)
if callbacks is None:
callbacks = []
epochs = fit_kwargs.get("epochs", None)
if epochs is None:
epochs = 10
if not self._has_earlystopping(callbacks):
callbacks = callbacks + [
tf.keras.callbacks.EarlyStopping(
"val_loss", patience=min(20, epochs//5))
]
fit_kwargs["callbacks"] = callbacks
self.tuner.search(*fit_args, **fit_kwargs)
@staticmethod
def get_best_lr(qmodel):
"""Extracts best lr of model."""
return qmodel.optimizer.lr.numpy()
def get_best_model(self):
params = self.tuner.get_best_hyperparameters()[0]
q_model = self.tuner.hypermodel.build(params)
self.learning_rate = q_model.optimizer.lr.numpy()
return q_model
def get_learning_rate(self):
return self.learning_rate
class AutoQKerasScheduler:
"""Performs autoquantization one layer/group at a time.
Arguments:
model: Model to be quantized.
metrics: List of metrics to be monitored.
custom_objects: Custom objects used by Keras during quantization.
goal: Metric to compute secondary goal of search (bits or energy)
output_dir: name of output directory to store results.
mode: random, hyperband or bayesian used by keras_tuner.
transfer_weights: if true, transfer weights from unquantized model.
activation_bits: parameter to be used by 'model_quantize'.
limit: limit the number of bits in quantizers specified as a dictionary.
tune_filters: one of "block", "layer", "none" for tuning entire
network, each layer separately, or no tuning.
tune_filters_exceptions: name of layers that will not be tuned.
layer_indexes: indexes of layer to be quantized.
learning_rate_optimizer: if true, user will provide lr scheduler
callback.
blocks: list of re patterns specifygin group configuration for layers.
schedule_block: "sequential" or "cost". Schedule blocks using the
order of the groups or decreasing cost (energy or bits).
quantization_config: file name of dictionary containing configuration of
quantizers for kernel, bias and activation.
debug: if True, fit will just print the groups for debugging purposes.
head_name: specify which head to calcuate score/trial-size from in
autoqkeras
tuner_kwargs: parameters for keras_tuner depending on whether
mode is random, hyperband or baeysian. Please refer to the
documentation of kerstuner Tuners.
"""
def __init__(
self, model, metrics=None, custom_objects=None, goal=None,
output_dir="result", mode="random", transfer_weights=False,
activation_bits=4, limit=None, tune_filters="none",
tune_filters_exceptions=None, layer_indexes=None,
learning_rate_optimizer=False, blocks=None, schedule_block="sequential",
quantization_config=None, overwrite=True, debug=False, head_name=None,
**tuner_kwargs):
if not metrics:
metrics = []
if not custom_objects:
custom_objects = {}
# goal: { "type": ["bits", "energy"], "params": {...} }
# For type == "bits":
# delta_p: increment (in %) of the accuracy if trial is smaller.
# delta_n: decrement (in %) of the accuracy if trial is bigger.
# rate: rate of decrease/increase in model size in terms of bits.
# input_bits; size of input tensors.
# output_bits; size of output tensors.
# stress: parameter to reduce reference size to force tuner to
# choose smaller models.
# config: configuration on what to compute for each layer
# minimum configuration is { "default": ["parameters", "activations"] }
# use simplest one - number of bits
if not goal:
goal = {
"type": "bits",
"params": {
"delta_p": 8.0,
"delta_n": 8.0,
"rate": 2.0,
"stress": 1.0,
"input_bits": 8,
"output_bits": 8,
"ref_bits": 8,
"config": {
"default": ["parameters", "activations"]
}
}
}
self.target = forgiving_factor[goal["type"]](**goal["params"])
self.model = model
self.metrics = metrics
self.custom_objects = custom_objects
self.mode = mode
self.transfer_weights = transfer_weights
self.activation_bits = activation_bits
self.limit = limit
self.tune_filters = tune_filters
self.tune_filters_exceptions = tune_filters_exceptions
self.layer_indexes = layer_indexes
self.learning_rate_optimizer = learning_rate_optimizer
self.blocks = blocks
self.schedule_block = schedule_block
self.quantization_config = quantization_config
self.tuner_kwargs = tuner_kwargs
self.debug = debug
self.head_name = head_name
self.autoqk = None
self.learning_rate = model.optimizer.lr.numpy()
self.overwrite = overwrite
assert self.schedule_block in ["sequential", "cost"]
# right now we create unique results directory
idx = 0
name = output_dir
if self.overwrite:
while os.path.exists(name):
idx += 1
name = output_dir + "_" + str(idx)
output_dir = name
self.output_dir = output_dir
self.next_block = self.get_next_block(overwrite)
if self.next_block > 0:
strategy = self.tuner_kwargs.get("distribution_strategy", None)
if strategy:
with strategy.scope():
self.model = tf.keras.models.load_model(
os.path.join(
self.output_dir, "model_block_" + str(self.next_block - 1)),
custom_objects=self.custom_objects)
else:
self.model = tf.keras.models.load_model(
os.path.join(
self.output_dir, "model_block_" + str(self.next_block - 1)),
custom_objects=self.custom_objects)
print("Load model completed")
def get_next_block(self, overwrite):
"""Get the next block id to be worked on."""
if overwrite:
return 0
else:
try:
with tf.io.gfile.GFile(os.path.join(self.output_dir, "scheduler.json"),
"r") as f:
scheduler_json = f.read()
scheduler = json.loads(scheduler_json)
return scheduler["next_block"]
except: # pylint: disable=bare-except
return 0
def get_limit(self, model, pattern):
"""Apply patterned group to limit to obtain new limit set."""
limit = self.limit
new_limit = {}
new_pattern = collections.defaultdict(list)
for layer_name in self.grouped_patterns[pattern]:
layer = model.get_layer(layer_name)
layer_class_name = layer.__class__.__name__
target_quantizers = limit.get(layer_class_name, -1)
for limit_pattern in limit:
if re.match(limit_pattern, layer_name):
target_quantizers = limit[limit_pattern]
new_pattern[limit_pattern].append(layer_name)
layer_name = limit_pattern
break
if target_quantizers != -1:
new_limit[layer_name] = target_quantizers
for key in new_pattern:
# grouped pattern in regex need to be ^(word1|word2|...)$ instead of
# ^word1|word2|...$; otherwise it cause non-exact match,
# e.g., fc.*_0 and fc.*_0_relu were miss-matched
new_key = "^" + "(" + "|".join(new_pattern[key]) + ")" + "$"
new_limit[new_key] = new_limit[key]
if new_key != key:
del new_limit[key]
return new_limit
def fit(self, *fit_args, **fit_kwargs):
"""Invokes tuner fit algorithm."""
self.history = []
self.compute_block_costs(self.blocks, self.model)
if self.tuner_kwargs.get("max_trials", None):
max_trials = float(self.tuner_kwargs["max_trials"])
lr = self.model.optimizer.lr.numpy()
model = self.model
frozen_layers = []
for i, (pattern, cost) in enumerate(self.retrieve_max_block()):
# now create new limit pattern
if not self.overwrite:
if i < self.next_block:
print("Resume tuning. Skipping block ", i)
continue
print("... block cost: {:.0f} / {:.0f}".format(cost, self.reference_size))
if self.tuner_kwargs.get("max_trials", None):
self.tuner_kwargs["max_trials"] = int(
max(10, max_trials * cost / self.reference_size))
print("... adjusting max_trials for this block to {}".format(
self.tuner_kwargs["max_trials"]))
limit = self.get_limit(model, pattern)
new_frozen_layers = self.grouped_patterns[pattern]
# if dictionary is empty we did not match anything.
# we have a bug in the patterns specified by the
# user.
assert limit
print("Pattern {} is : {}".format(i, limit))
if self.debug:
frozen_layers = frozen_layers + new_frozen_layers
continue
self.autoqk = AutoQKeras(
model, self.metrics,
custom_objects=self.custom_objects,
goal=self.target,
output_dir=self.output_dir + "/" + str(i),
mode=self.mode,
transfer_weights=self.transfer_weights,
frozen_layers=frozen_layers,
activation_bits=self.activation_bits,
limit=limit,
tune_filters=self.tune_filters,
tune_filters_exceptions=self.tune_filters_exceptions,
layer_indexes=self.layer_indexes,
learning_rate_optimizer=self.learning_rate_optimizer,
quantization_config=self.quantization_config,
overwrite=self.overwrite,
head_name=self.head_name,
**self.tuner_kwargs)
self.autoqk.fit(*fit_args, **fit_kwargs)
self.autoqk.tuner.results_summary()
self.history.append(self.autoqk.history())
model = self.autoqk.get_best_model()
self.learning_rate = model.optimizer.lr.numpy()
# restore learning rate
# this is just a placeholder for the optimizer.
model.compile(
model.optimizer,
loss=self.model.loss,
metrics=self.model.metrics)
frozen_layers = frozen_layers + new_frozen_layers
filename = self.output_dir + "/model_block_" + str(i)
model.save(filename)
self.next_block = i + 1
# update scheduler json
with tf.io.gfile.GFile(os.path.join(self.output_dir, "scheduler.json"),
"w") as f:
f.write(json.dumps({"next_block": self.next_block}))
if self.debug:
return
self.best_model = model
# make all layers trainable again
for layer_name in frozen_layers:
layer = model.get_layer(layer_name)
layer.trainable = True
def compute_block_costs(self, patterns, model):
"""Computes costs for each block."""
# get block cost for original model
self.reference_size = self.target.get_reference(model)
self.model_size = self.target.get_reference_stats()
# first group layers into the patterns
groups = {pattern: [] for pattern in patterns}
for layer_id, layer in enumerate(model.layers):
if (
self.layer_indexes is not None and
layer_id not in self.layer_indexes
):
continue
for pattern in groups:
if re.match(pattern, layer.name):
groups[pattern].append(layer.name)
self.grouped_patterns = groups
# now compute cost for each group
self.costs = []
for pattern in patterns: # self.grouped_patterns:
total = 0
for layer in self.grouped_patterns[pattern]:
if layer in self.model_size:
total += self.model_size[layer]["total"]
self.costs.append((pattern, total))
# the costs will be sorted by the total cost of the group
if self.schedule_block == "cost":
self.costs = sorted(self.costs, key=lambda cost_tuple: -cost_tuple[1])
def retrieve_max_block(self):
for cost in self.costs:
yield cost
def get_history(self):
"""Returns the history of the model search."""
return self.history
def get_best_model(self):
"""Returns the best model."""
# check if we have run fit first.
if not self.autoqk:
return None
self.autoqk.hypermodel.target.print_stats()
print_qmodel_summary(self.best_model)
return self.best_model
def get_learning_rate(self):
return self.learning_rate
| google/qkeras | qkeras/autoqkeras/autoqkeras_internal.py | Python | apache-2.0 | 46,533 |
import tempfile
import os
from sinz.Util import Util
from sinz.cli.PluginManager import PluginManager
from sinz.cli.Registry import Registry
from sinz.cli.CLI import CLI
class TestProject(object):
def __init__(self,initCmd = None):
self.basepath = tempfile.mkdtemp()
self.projectpath = os.path.join(self.basepath,"project")
Util.runCmd("git clone . %s"%(self.projectpath,))
self.initCmd = initCmd
self.keep = False
def reloadPlugins(self):
PluginManager.setForceReload(True)
Registry.INSTANCE = None
self.cli = CLI()
PluginManager.setForceReload(False)
def __enter__(self):
self.oldcwd = os.getcwd()
os.chdir(self.projectpath)
if self.initCmd:
Util.runCmd(self.initCmd)
self.reloadPlugins()
return self
def __exit__(self, tipe, value, traceback):
os.chdir(self.oldcwd)
self.reloadPlugins()
if self.keep:
print("keeping %s"%(self.basepath,))
else:
cmd = "rm -rf %s" % (self.basepath, )
Util.runCmd(cmd)
| magwas/sinz | tests/tests/TestProject.py | Python | gpl-2.0 | 1,119 |
import numpy as np
import mxnet as mx
from import_msgpack import import_params
# define encoder model
def encoder(data):
conv11 = mx.symbol.Convolution(name = 'l1_conv', data = data, kernel = (4, 4), stride = (2, 2), pad = (1, 1), num_filter = 64)
relu11 = mx.symbol.LeakyReLU(name = 'l2', data = conv11, slope = 0.2, act_type = 'leaky')
conv12 = mx.symbol.Convolution(name = 'l3_conv', data = relu11, kernel = (4, 4), stride = (2, 2), pad = (1, 1), num_filter = 64)
bn12 = mx.symbol.BatchNorm(name='l4_bn', data = conv12, eps = 1e-05, momentum = 0.1, fix_gamma = False)
relu12 = mx.symbol.LeakyReLU(name = 'l5', data = bn12, slope = 0.2, act_type = 'leaky')
conv13 = mx.symbol.Convolution(name = 'l6_conv', data = relu12, kernel = (4, 4), stride = (2, 2), pad = (1, 1), num_filter = 128)
bn13 = mx.symbol.BatchNorm(name='l7_bn', data = conv13, eps = 1e-05, momentum = 0.1, fix_gamma = False)
relu13 = mx.symbol.LeakyReLU(name = 'l8', data = bn13, slope = 0.2, act_type = 'leaky')
conv14 = mx.symbol.Convolution(name = 'l9_conv', data = relu13, kernel = (4, 4), stride = (2, 2), pad = (1, 1), num_filter = 256)
bn14 = mx.symbol.BatchNorm(name='l10_bn', data = conv14, eps = 1e-05, momentum = 0.1, fix_gamma = False)
relu14 = mx.symbol.LeakyReLU(name = 'l11', data = bn14, slope = 0.2, act_type = 'leaky')
conv15 = mx.symbol.Convolution(name = 'l12_conv', data = relu14, kernel = (4, 4), stride = (2, 2), pad = (1, 1), num_filter =512)
bn15 = mx.symbol.BatchNorm(name='l13_bn', data = conv15, eps = 1e-05, momentum = 0.1, fix_gamma = False)
relu15 = mx.symbol.LeakyReLU(name = 'l14', data = bn15, slope = 0.2, act_type = 'leaky')
conv16 = mx.symbol.Convolution(name = 'l15_conv', data = relu15, kernel = (4, 4), stride = (1, 1), pad = (0, 0), num_filter =4000)
return conv16
#define inpainting model
def inpaintModel(data):
data = encoder(data)
bn21 = mx.symbol.BatchNorm(name='l16_bn', data = data, eps = 1e-05, momentum = 0.1, fix_gamma = False)
relu21 = mx.symbol.LeakyReLU(name = 'l17', data = bn21, slope = 0.2, act_type = 'leaky')
convt21 = mx.symbol.Deconvolution(name = 'l18_conv', data = relu21, kernel = (4, 4), stride = (1, 1), pad = (0, 0), num_filter = 512)
bn22 = mx.symbol.BatchNorm(name='l19_bn', data = convt21, eps = 1e-05, momentum = 0.1, fix_gamma = False)
relu22 = mx.symbol.Activation(name = 'l20', data = bn22, act_type = 'relu')
convt22 = mx.symbol.Deconvolution(name = 'l21_conv', data = relu22, kernel = (4, 4), stride = (2, 2), pad = (1, 1), num_filter = 256)
bn23 = mx.symbol.BatchNorm(name='l22_bn', data = convt22, eps = 1e-05, momentum = 0.1, fix_gamma = False)
relu23 = mx.symbol.Activation(name = 'l23', data = bn23, act_type = 'relu')
convt23 = mx.symbol.Deconvolution(name = 'l24_conv', data = relu23, kernel = (4, 4), stride = (2, 2), pad = (1, 1), num_filter = 128)
bn24 = mx.symbol.BatchNorm(name='l25_bn', data = convt23, eps = 1e-05, momentum = 0.1, fix_gamma = False)
relu24 = mx.symbol.Activation(name = 'l26', data = bn24, act_type = 'relu')
convt24 = mx.symbol.Deconvolution(name = 'l27_conv', data = relu24, kernel = (4, 4), stride = (2, 2), pad = (1, 1), num_filter = 64)
bn25 = mx.symbol.BatchNorm(name='l28_bn', data = convt24, eps = 1e-05, momentum = 0.1, fix_gamma = False)
relu25 = mx.symbol.Activation(name = 'l29', data = bn25, act_type = 'relu')
convt25 = mx.symbol.Deconvolution(name = 'l30_conv', data = relu25, kernel = (4, 4), stride = (2, 2), pad = (1, 1), num_filter = 3)
tanh21 = mx.symbol.Activation(name = 'l31', data = convt25, act_type = 'tanh')
return tanh21
save_path = './msgpack/'
arg_params, aux_params = import_params(save_path)
print('Successfully imported %d argument parameters and %d auxiliary states' % (len(arg_params), len(aux_params)) )
print(arg_params)
print(aux_params)
data = mx.symbol.Variable('data')
sym = inpaintModel(data)
mod_mx = mx.mod.Module(symbol = sym, context = mx.cpu())
mod_mx.bind(for_training = False, data_shapes = [('data', (1, 3, 128, 128))])
mod_mx.set_params(arg_params, aux_params)
mod_mx.save_checkpoint('ip_mxnet', 0)
print 'ok'
| June01/torch2mxnet_msgpack | build_model_mx.py | Python | mit | 4,192 |
####################################
# This file was created by Bohrium.
# It allows you to run NumPy code (cells) as Bohrium, by using the magic command
# `%%bohrium` in your cells, e.g.:
#
# %%bohrium
# print(numpy)
# print(numpy.arange(10))
####################################
from IPython.core.magic import register_cell_magic
try:
import bohrium
have_bohrium = True
@bohrium.replace_numpy
def execute(__code):
exec(__code, globals(), locals())
__excludes = set(["__excludes", "__code", "np", "bohrium"])
try:
# Python 2.x
for key, value in locals().iteritems():
if key not in __excludes:
globals()[key] = value
except:
# Python 3.x
for key, value in locals().items():
if key not in __excludes:
globals()[key] = value
except ImportError:
warning_shown = False # Warning about missin bohrium has been shown
def execute(__code):
global warning_shown
if not warning_shown:
print("WARNING: Module bohrium could not be imported.\n"
" The magic command '%%bohrium' will have no effect.")
warning_shown = True
exec(__code, globals())
@register_cell_magic
def bohrium(line, cell):
# Code must end with \n
code = cell if cell.endswith("\n") else cell + "\n"
execute(code)
return
| madsbk/bohrium | ipython-magic.py | Python | apache-2.0 | 1,459 |
from ztag.transform import ZGrabTransform, ZMapTransformOutput
from ztag import protocols, errors
from ztag.transform import Transformable
class FTPTransform(ZGrabTransform):
name = "ftp/generic"
port = 21
protocol = protocols.FTP
subprotocol = protocols.FTP.BANNER
def __init__(self, *args, **kwargs):
super(FTPTransform, self).__init__(*args, **kwargs)
def _transform_object(self, obj):
ftp_banner = obj
ftp = Transformable(obj)
zout = ZMapTransformOutput()
error = ftp['error'].resolve()
if error is not None:
raise errors.IgnoreObject("Error")
out = dict()
banner = ftp['data']['banner'].resolve()
if banner is not None:
out['banner'] = self.clean_banner(banner)
if len(out) == 0:
raise errors.IgnoreObject("Empty output dict")
out['ip_address'] = obj['ip']
out['timestamp'] = obj['timestamp']
zout.transformed = out
return zout
| zmap/ztag | ztag/transforms/ftp.py | Python | apache-2.0 | 1,011 |
import urlparse
from functools import wraps
from django.db.models import Q
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.decorators import available_attrs, method_decorator
from django.views.generic import ListView, CreateView, UpdateView, DeleteView
from django.contrib.auth import REDIRECT_FIELD_NAME
from privileges.forms import GrantForm
from privileges.models import Grant
def owner_required(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if request.user.is_authenticated():
if request.user.username == kwargs["username"] or \
request.user.is_superuser:
return view_func(request, *args, **kwargs)
path = request.build_absolute_uri()
login_scheme, login_netloc = urlparse.urlparse(settings.LOGIN_URL)[:2]
current_scheme, current_netloc = urlparse.urlparse(path)[:2]
if (
(not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)
):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(path, redirect_field_name=REDIRECT_FIELD_NAME)
return _wrapped_view
def cbv_decorator(decorator):
def _decorator(cls):
cls.dispatch = method_decorator(decorator)(cls.dispatch)
return cls
return _decorator
class UsernameContextMixin(object):
def get_context_data(self, **kwargs):
context = super(UsernameContextMixin, self).get_context_data(**kwargs)
context.update({
"username": self.kwargs.get("username")
})
return context
@cbv_decorator(owner_required)
class GrantListView(UsernameContextMixin, ListView):
model = Grant
def get_queryset(self):
username = self.kwargs["username"]
return super(GrantListView, self).get_queryset().filter(
Q(grantor__username=username) | Q(grantee__username=username)
)
@cbv_decorator(owner_required)
class GrantCreateView(UsernameContextMixin, CreateView):
model = Grant
form_class = GrantForm
def get_form_kwargs(self):
kwargs = super(GrantCreateView, self).get_form_kwargs()
kwargs.update({
"user": self.request.user
})
return kwargs
def get_success_url(self):
return reverse(
"privileges_grant_list",
kwargs={"username": self.request.user.username}
)
@cbv_decorator(owner_required)
class GrantUpdateView(UsernameContextMixin, UpdateView):
model = Grant
form_class = GrantForm
def get_form_kwargs(self):
kwargs = super(GrantUpdateView, self).get_form_kwargs()
kwargs.update({
"user": self.request.user
})
return kwargs
def get_success_url(self):
return reverse(
"privileges_grant_list",
kwargs={"username": self.request.user.username}
)
@cbv_decorator(owner_required)
class GrantDeleteView(UsernameContextMixin, DeleteView):
model = Grant
def get_success_url(self):
return reverse(
"privileges_grant_list",
kwargs={"username": self.request.user.username}
)
| jacobwegner/privileges | privileges/views.py | Python | bsd-3-clause | 3,352 |
#!/usr/bin/env python
from mininet.topo import Topo
class TowerTopo( Topo ):
"""Create a tower topology"""
def build( self, k=4, h=6 ):
spines = []
leaves = []
hosts = []
# Create the two spine switches
spines.append(self.addSwitch('s1'))
spines.append(self.addSwitch('s2'))
# Create two links between the spine switches
self.addLink(spines[0], spines[1])
#TODO add second link between spines when multi-link topos are supported
#self.addLink(spines[0], spines[1])
# Now create the leaf switches, their hosts and connect them together
i = 1
c = 0
while i <= k:
leaves.append(self.addSwitch('s1%d' % i))
for spine in spines:
self.addLink(leaves[i-1], spine)
j = 1
while j <= h:
hosts.append(self.addHost('h%d%d' % (i, j)))
self.addLink(hosts[c], leaves[i-1])
j+=1
c+=1
i+=1
topos = { 'tower': TowerTopo }
if __name__ == '__main__':
from onosnet import run
run( TowerTopo() )
| gunine/onos-byon | topos/topo.py | Python | apache-2.0 | 1,161 |
import threading
import traceback
import warnings
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
from _pydev_bundle.pydev_imports import xmlrpclib, _queue
Queue = _queue.Queue
from _pydevd_bundle.pydevd_constants import *
#This may happen in IronPython (in Python it shouldn't happen as there are
#'fast' replacements that are used in xmlrpclib.py)
warnings.filterwarnings(
'ignore', 'The xmllib module is obsolete.*', DeprecationWarning)
file_system_encoding = getfilesystemencoding()
#=======================================================================================================================
# _ServerHolder
#=======================================================================================================================
class _ServerHolder:
'''
Helper so that we don't have to use a global here.
'''
SERVER = None
#=======================================================================================================================
# set_server
#=======================================================================================================================
def set_server(server):
_ServerHolder.SERVER = server
#=======================================================================================================================
# ParallelNotification
#=======================================================================================================================
class ParallelNotification(object):
def __init__(self, method, args):
self.method = method
self.args = args
def to_tuple(self):
return self.method, self.args
#=======================================================================================================================
# KillServer
#=======================================================================================================================
class KillServer(object):
pass
#=======================================================================================================================
# ServerFacade
#=======================================================================================================================
class ServerFacade(object):
def __init__(self, notifications_queue):
self.notifications_queue = notifications_queue
def notifyTestsCollected(self, *args):
self.notifications_queue.put_nowait(ParallelNotification('notifyTestsCollected', args))
def notifyConnected(self, *args):
self.notifications_queue.put_nowait(ParallelNotification('notifyConnected', args))
def notifyTestRunFinished(self, *args):
self.notifications_queue.put_nowait(ParallelNotification('notifyTestRunFinished', args))
def notifyStartTest(self, *args):
self.notifications_queue.put_nowait(ParallelNotification('notifyStartTest', args))
def notifyTest(self, *args):
new_args = []
for arg in args:
new_args.append(_encode_if_needed(arg))
args = tuple(new_args)
self.notifications_queue.put_nowait(ParallelNotification('notifyTest', args))
#=======================================================================================================================
# ServerComm
#=======================================================================================================================
class ServerComm(threading.Thread):
def __init__(self, notifications_queue, port, daemon=False):
threading.Thread.__init__(self)
self.setDaemon(daemon) # If False, wait for all the notifications to be passed before exiting!
self.finished = False
self.notifications_queue = notifications_queue
from _pydev_bundle import pydev_localhost
# It is necessary to specify an encoding, that matches
# the encoding of all bytes-strings passed into an
# XMLRPC call: "All 8-bit strings in the data structure are assumed to use the
# packet encoding. Unicode strings are automatically converted,
# where necessary."
# Byte strings most likely come from file names.
encoding = file_system_encoding
if encoding == "mbcs":
# Windos symbolic name for the system encoding CP_ACP.
# We need to convert it into a encoding that is recognized by Java.
# Unfortunately this is not always possible. You could use
# GetCPInfoEx and get a name similar to "windows-1251". Then
# you need a table to translate on a best effort basis. Much to complicated.
# ISO-8859-1 is good enough.
encoding = "ISO-8859-1"
self.server = xmlrpclib.Server('http://%s:%s' % (pydev_localhost.get_localhost(), port),
encoding=encoding)
def run(self):
while True:
kill_found = False
commands = []
command = self.notifications_queue.get(block=True)
if isinstance(command, KillServer):
kill_found = True
else:
assert isinstance(command, ParallelNotification)
commands.append(command.to_tuple())
try:
while True:
command = self.notifications_queue.get(block=False) #No block to create a batch.
if isinstance(command, KillServer):
kill_found = True
else:
assert isinstance(command, ParallelNotification)
commands.append(command.to_tuple())
except:
pass #That's OK, we're getting it until it becomes empty so that we notify multiple at once.
if commands:
try:
self.server.notifyCommands(commands)
except:
traceback.print_exc()
if kill_found:
self.finished = True
return
#=======================================================================================================================
# initialize_server
#=======================================================================================================================
def initialize_server(port, daemon=False):
if _ServerHolder.SERVER is None:
if port is not None:
notifications_queue = Queue()
_ServerHolder.SERVER = ServerFacade(notifications_queue)
_ServerHolder.SERVER_COMM = ServerComm(notifications_queue, port, daemon)
_ServerHolder.SERVER_COMM.start()
else:
#Create a null server, so that we keep the interface even without any connection.
_ServerHolder.SERVER = Null()
_ServerHolder.SERVER_COMM = Null()
try:
_ServerHolder.SERVER.notifyConnected()
except:
traceback.print_exc()
#=======================================================================================================================
# notifyTest
#=======================================================================================================================
def notifyTestsCollected(tests_count):
assert tests_count is not None
try:
_ServerHolder.SERVER.notifyTestsCollected(tests_count)
except:
traceback.print_exc()
#=======================================================================================================================
# notifyStartTest
#=======================================================================================================================
def notifyStartTest(file, test):
'''
@param file: the tests file (c:/temp/test.py)
@param test: the test ran (i.e.: TestCase.test1)
'''
assert file is not None
if test is None:
test = '' #Could happen if we have an import error importing module.
try:
_ServerHolder.SERVER.notifyStartTest(file, test)
except:
traceback.print_exc()
def _encode_if_needed(obj):
# In the java side we expect strings to be ISO-8859-1 (org.python.pydev.debug.pyunit.PyUnitServer.initializeDispatches().new Dispatch() {...}.getAsStr(Object))
if not IS_PY3K:
if isinstance(obj, str):
try:
return xmlrpclib.Binary(obj.decode(sys.stdin.encoding).encode('ISO-8859-1', 'xmlcharrefreplace'))
except:
return xmlrpclib.Binary(obj)
elif isinstance(obj, unicode):
return xmlrpclib.Binary(obj.encode('ISO-8859-1', 'xmlcharrefreplace'))
else:
if isinstance(obj, str): # Unicode in py3
return xmlrpclib.Binary(obj.encode('ISO-8859-1', 'xmlcharrefreplace'))
elif isinstance(obj, bytes):
try:
return xmlrpclib.Binary(obj.decode(sys.stdin.encoding).encode('ISO-8859-1', 'xmlcharrefreplace'))
except:
return xmlrpclib.Binary(obj) #bytes already
return obj
#=======================================================================================================================
# notifyTest
#=======================================================================================================================
def notifyTest(cond, captured_output, error_contents, file, test, time):
'''
@param cond: ok, fail, error
@param captured_output: output captured from stdout
@param captured_output: output captured from stderr
@param file: the tests file (c:/temp/test.py)
@param test: the test ran (i.e.: TestCase.test1)
@param time: float with the number of seconds elapsed
'''
assert cond is not None
assert captured_output is not None
assert error_contents is not None
assert file is not None
if test is None:
test = '' #Could happen if we have an import error importing module.
assert time is not None
try:
captured_output = _encode_if_needed(captured_output)
error_contents = _encode_if_needed(error_contents)
_ServerHolder.SERVER.notifyTest(cond, captured_output, error_contents, file, test, time)
except:
traceback.print_exc()
#=======================================================================================================================
# notifyTestRunFinished
#=======================================================================================================================
def notifyTestRunFinished(total_time):
assert total_time is not None
try:
_ServerHolder.SERVER.notifyTestRunFinished(total_time)
except:
traceback.print_exc()
#=======================================================================================================================
# force_server_kill
#=======================================================================================================================
def force_server_kill():
_ServerHolder.SERVER_COMM.notifications_queue.put_nowait(KillServer())
| asedunov/intellij-community | python/helpers/pydev/_pydev_runfiles/pydev_runfiles_xml_rpc.py | Python | apache-2.0 | 11,010 |
from alleles_fixation import *
| lorix-lpan/alleles-fixation | alleles_fixation/__init__.py | Python | gpl-3.0 | 31 |
from getpass import getpass
from pycuc.japi import UnityAPI
import yaml
class Config:
def __init__(self, filename):
with open(filename, 'r') as stream:
config = yaml.load(stream)
self.unity_url = config['unity_url']
def get_input(message):
return input(message)
def get_username_password():
"""
Uses get_input function to collect username and password
:return: Tuple of username and password
"""
username = get_input("Please enter your username for unity: ")
password = getpass(prompt="Password: ")
return username, password
def find_users_without(api):
"""
Wires together the required API Calls to Unity to find users without SingleInbox
:param api: UnityAPI Object
:return: List of users without SingleInbox or Empty List if None
"""
users = api.get_list_of_users()
no_single_inbox = []
for user in users:
service_uri = user["ExternalServiceAccountsURI"]
service_accounts = api.get(service_uri)
if service_accounts['@total'] == "0":
no_single_inbox.append(user['Alias'])
return no_single_inbox
def ldap_user(api, alias):
"""
Checks the UnityAPI to determine if user is synced to ActiveDirectory. Returns Boolean Result
:param api: UnityAPI Object
:param alias: Alias to search for
:return: Boolean
"""
user_config = api.get_user_config(alias)
if user_config["LdapType"] == "3":
return True
else:
return False
def main():
script_config = Config('config.yml')
un, pw = get_username_password()
api = UnityAPI(username=un, password=pw, url=script_config.unity_url)
no_single_inbox = find_users_without(api)
filtered_users = []
for alias in no_single_inbox:
if ldap_user(api, alias):
filtered_users.append(alias)
print(filtered_users)
if __name__ == "__main__":
main()
| shepherdjay/cisco-unified-scripts | singleinbox.py | Python | mit | 1,928 |
import glob
import inspect
import io
import logging
import os
import pandas as pd
import shutil
from typing import Any, Dict, Union
import ray
import ray.cloudpickle as pickle
from ray.tune.registry import _ParameterRegistry
from ray.tune.utils import detect_checkpoint_function
from ray.util import placement_group
from six import string_types
logger = logging.getLogger(__name__)
class TrainableUtil:
@staticmethod
def process_checkpoint(
checkpoint: Union[Dict, str], parent_dir: str, trainable_state: Dict
) -> str:
"""Creates checkpoint file structure and writes metadata
under `parent_dir`.
The file structure could either look like:
- checkpoint_00000 (returned path)
-- .is_checkpoint
-- .tune_metadata
-- xxx.pkl (or whatever user specifies in their Trainable)
Or,
- checkpoint_00000
-- .is_checkpoint
-- checkpoint (returned path)
-- checkpoint.tune_metadata
"""
saved_as_dict = False
if isinstance(checkpoint, string_types):
if not checkpoint.startswith(parent_dir):
raise ValueError(
"The returned checkpoint path must be within the "
"given checkpoint dir {}: {}".format(parent_dir, checkpoint)
)
checkpoint_path = checkpoint
if os.path.isdir(checkpoint_path):
# Add trailing slash to prevent tune metadata from
# being written outside the directory.
checkpoint_path = os.path.join(checkpoint_path, "")
elif isinstance(checkpoint, dict):
saved_as_dict = True
checkpoint_path = os.path.join(parent_dir, "checkpoint")
with open(checkpoint_path, "wb") as f:
pickle.dump(checkpoint, f)
else:
raise ValueError(
"Returned unexpected type {}. "
"Expected str or dict.".format(type(checkpoint))
)
with open(checkpoint_path + ".tune_metadata", "wb") as f:
trainable_state["saved_as_dict"] = saved_as_dict
pickle.dump(trainable_state, f)
return checkpoint_path
@staticmethod
def pickle_checkpoint(checkpoint_path):
"""Pickles checkpoint data."""
checkpoint_dir = TrainableUtil.find_checkpoint_dir(checkpoint_path)
data = {}
for basedir, _, file_names in os.walk(checkpoint_dir):
for file_name in file_names:
path = os.path.join(basedir, file_name)
with open(path, "rb") as f:
data[os.path.relpath(path, checkpoint_dir)] = f.read()
# Use normpath so that a directory path isn't mapped to empty string.
name = os.path.relpath(os.path.normpath(checkpoint_path), checkpoint_dir)
name += os.path.sep if os.path.isdir(checkpoint_path) else ""
data_dict = pickle.dumps(
{
"checkpoint_name": name,
"data": data,
}
)
return data_dict
@staticmethod
def checkpoint_to_object(checkpoint_path):
data_dict = TrainableUtil.pickle_checkpoint(checkpoint_path)
out = io.BytesIO()
if len(data_dict) > 10e6: # getting pretty large
logger.info("Checkpoint size is {} bytes".format(len(data_dict)))
out.write(data_dict)
return out.getvalue()
@staticmethod
def find_checkpoint_dir(checkpoint_path):
"""Returns the directory containing the checkpoint path.
Raises:
FileNotFoundError if the directory is not found.
"""
if not os.path.exists(checkpoint_path):
raise FileNotFoundError("Path does not exist", checkpoint_path)
if os.path.isdir(checkpoint_path):
checkpoint_dir = checkpoint_path
else:
checkpoint_dir = os.path.dirname(checkpoint_path)
while checkpoint_dir != os.path.dirname(checkpoint_dir):
if os.path.exists(os.path.join(checkpoint_dir, ".is_checkpoint")):
break
checkpoint_dir = os.path.dirname(checkpoint_dir)
else:
raise FileNotFoundError(
"Checkpoint directory not found for {}".format(checkpoint_path)
)
return os.path.normpath(checkpoint_dir)
@staticmethod
def find_rel_checkpoint_dir(logdir, checkpoint_path):
"""Returns the (relative) directory name of the checkpoint.
Note, the assumption here is `logdir` should be the prefix of
`checkpoint_path`.
For example, returns `checkpoint00000/`.
"""
assert checkpoint_path.startswith(
logdir
), "expecting `logdir` to be a prefix of `checkpoint_path`"
rel_path = os.path.relpath(checkpoint_path, logdir)
tokens = rel_path.split(os.sep)
return os.path.join(tokens[0], "")
@staticmethod
def make_checkpoint_dir(checkpoint_dir, index, override=False):
"""Creates a checkpoint directory within the provided path.
Args:
checkpoint_dir (str): Path to checkpoint directory.
index (int|str): A subdirectory will be created
at the checkpoint directory named 'checkpoint_{index}'.
override (bool): Deletes checkpoint_dir before creating
a new one.
"""
suffix = "checkpoint"
if index is not None:
suffix += f"_{index:06d}" if isinstance(index, int) else f"_{index}"
checkpoint_dir = os.path.join(checkpoint_dir, suffix)
if override and os.path.exists(checkpoint_dir):
shutil.rmtree(checkpoint_dir)
os.makedirs(checkpoint_dir, exist_ok=True)
# Drop marker in directory to identify it as a checkpoint dir.
open(os.path.join(checkpoint_dir, ".is_checkpoint"), "a").close()
return checkpoint_dir
@staticmethod
def create_from_pickle(obj, tmpdir):
info = pickle.loads(obj)
data = info["data"]
checkpoint_path = os.path.join(tmpdir, info["checkpoint_name"])
for relpath_name, file_contents in data.items():
path = os.path.join(tmpdir, relpath_name)
# This may be a subdirectory, hence not just using tmpdir
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb") as f:
f.write(file_contents)
return checkpoint_path
@staticmethod
def get_checkpoints_paths(logdir):
"""Finds the checkpoints within a specific folder.
Returns a pandas DataFrame of training iterations and checkpoint
paths within a specific folder.
Raises:
FileNotFoundError if the directory is not found.
"""
marker_paths = glob.glob(
os.path.join(glob.escape(logdir), "checkpoint_*/.is_checkpoint")
)
iter_chkpt_pairs = []
for marker_path in marker_paths:
chkpt_dir = os.path.dirname(marker_path)
metadata_file = glob.glob(
os.path.join(glob.escape(chkpt_dir), "*.tune_metadata")
)
# glob.glob: filenames starting with a dot are special cases
# that are not matched by '*' and '?' patterns.
metadata_file += glob.glob(
os.path.join(glob.escape(chkpt_dir), ".tune_metadata")
)
metadata_file = list(set(metadata_file)) # avoid duplication
if len(metadata_file) != 1:
raise ValueError(
"{} has zero or more than one tune_metadata.".format(chkpt_dir)
)
chkpt_path = metadata_file[0][: -len(".tune_metadata")]
chkpt_iter = int(chkpt_dir[chkpt_dir.rfind("_") + 1 :])
iter_chkpt_pairs.append([chkpt_iter, chkpt_path])
chkpt_df = pd.DataFrame(
iter_chkpt_pairs, columns=["training_iteration", "chkpt_path"]
)
return chkpt_df
class PlacementGroupUtil:
@staticmethod
def get_remote_worker_options(
num_workers,
num_cpus_per_worker,
num_gpus_per_worker,
num_workers_per_host,
timeout_s,
) -> (Dict[str, Any], placement_group):
"""Returns the option for remote workers.
Args:
num_workers (int): Number of training workers to include in
world.
num_cpus_per_worker (int): Number of CPU resources to reserve
per training worker.
num_gpus_per_worker (int): Number of GPU resources to reserve
per training worker.
num_workers_per_host: Optional[int]: Number of workers to
colocate per host.
timeout_s (Optional[int]): Seconds before the torch process group
times out. Useful when machines are unreliable. Defaults
to 60 seconds. This value is also reused for triggering
placement timeouts if forcing colocation.
Returns:
type(Dict[Str, Any]): option that contains CPU/GPU count of
the remote worker and the placement group information.
pg(placement_group): return a reference to the placement group
"""
pg = None
options = dict(num_cpus=num_cpus_per_worker, num_gpus=num_gpus_per_worker)
if num_workers_per_host:
num_hosts = int(num_workers / num_workers_per_host)
cpus_per_node = num_cpus_per_worker * num_workers_per_host
gpus_per_node = num_gpus_per_worker * num_workers_per_host
bundle = {"CPU": cpus_per_node, "GPU": gpus_per_node}
all_bundles = [bundle] * num_hosts
pg = placement_group(all_bundles, strategy="STRICT_SPREAD")
logger.debug("Waiting for placement_group to start.")
ray.get(pg.ready(), timeout=timeout_s)
logger.debug("Placement_group started.")
options["placement_group"] = pg
return options, pg
def with_parameters(trainable, **kwargs):
"""Wrapper for trainables to pass arbitrary large data objects.
This wrapper function will store all passed parameters in the Ray
object store and retrieve them when calling the function. It can thus
be used to pass arbitrary data, even datasets, to Tune trainables.
This can also be used as an alternative to ``functools.partial`` to pass
default arguments to trainables.
When used with the function API, the trainable function is called with
the passed parameters as keyword arguments. When used with the class API,
the ``Trainable.setup()`` method is called with the respective kwargs.
If the data already exists in the object store (are instances of
ObjectRef), using ``tune.with_parameters()`` is not necessary. You can
instead pass the object refs to the training function via the ``config``
or use Python partials.
Args:
trainable: Trainable to wrap.
**kwargs: parameters to store in object store.
Function API example:
.. code-block:: python
from ray import tune
def train(config, data=None):
for sample in data:
loss = update_model(sample)
tune.report(loss=loss)
data = HugeDataset(download=True)
tune.run(
tune.with_parameters(train, data=data),
# ...
)
Class API example:
.. code-block:: python
from ray import tune
class MyTrainable(tune.Trainable):
def setup(self, config, data=None):
self.data = data
self.iter = iter(self.data)
self.next_sample = next(self.iter)
def step(self):
loss = update_model(self.next_sample)
try:
self.next_sample = next(self.iter)
except StopIteration:
return {"loss": loss, done: True}
return {"loss": loss}
data = HugeDataset(download=True)
tune.run(
tune.with_parameters(MyTrainable, data=data),
# ...
)
"""
from ray.tune.trainable import Trainable
if not callable(trainable) or (
inspect.isclass(trainable) and not issubclass(trainable, Trainable)
):
raise ValueError(
f"`tune.with_parameters() only works with function trainables "
f"or classes that inherit from `tune.Trainable()`. Got type: "
f"{type(trainable)}."
)
parameter_registry = _ParameterRegistry()
ray.worker._post_init_hooks.append(parameter_registry.flush)
# Objects are moved into the object store
prefix = f"{str(trainable)}_"
for k, v in kwargs.items():
parameter_registry.put(prefix + k, v)
trainable_name = getattr(trainable, "__name__", "tune_with_parameters")
if inspect.isclass(trainable):
# Class trainable
keys = list(kwargs.keys())
class _Inner(trainable):
def setup(self, config):
setup_kwargs = {}
for k in keys:
setup_kwargs[k] = parameter_registry.get(prefix + k)
super(_Inner, self).setup(config, **setup_kwargs)
_Inner.__name__ = trainable_name
return _Inner
else:
# Function trainable
use_checkpoint = detect_checkpoint_function(trainable, partial=True)
keys = list(kwargs.keys())
def inner(config, checkpoint_dir=None):
fn_kwargs = {}
if use_checkpoint:
default = checkpoint_dir
sig = inspect.signature(trainable)
if "checkpoint_dir" in sig.parameters:
default = sig.parameters["checkpoint_dir"].default or default
fn_kwargs["checkpoint_dir"] = default
for k in keys:
fn_kwargs[k] = parameter_registry.get(prefix + k)
trainable(config, **fn_kwargs)
inner.__name__ = trainable_name
# Use correct function signature if no `checkpoint_dir` parameter
# is set
if not use_checkpoint:
def _inner(config):
inner(config, checkpoint_dir=None)
_inner.__name__ = trainable_name
if hasattr(trainable, "__mixins__"):
_inner.__mixins__ = trainable.__mixins__
return _inner
if hasattr(trainable, "__mixins__"):
inner.__mixins__ = trainable.__mixins__
return inner
| ray-project/ray | python/ray/tune/utils/trainable.py | Python | apache-2.0 | 14,643 |
#!/usr/bin/python
#import websocket
import websocket
import thread
import time
import random
import json
dato_inicial={'com':0,'valorx':0,'valory':0,'valorz':0,'errorx':0,'errory':0,'errorz':0,'control':4}
def on_message(ws, message):
print "MENSAJE RECIBIDO DESDE LA PLATAFORM"
print message
def on_error(ws, error):
print error
def on_close(ws):
print "### closed ###"
def on_open(ws):
def run(*args):
ws.send('{"cliente":"controlador"}')
time.sleep(1)
ws.send(json.dumps(dato_inicial))
for i in range(100):
time.sleep(1)
dato={'com':random.randint(0,100),
'valorx':random.randint(0,100),
'valory':random.randint(0,100),
'valorz':random.randint(0,100),
'errorx':random.randint(0,100),
'errory':random.randint(0,100),
'errorz':random.randint(0,100),
'control':random.randint(0,4)
}
ws.send(json.dumps(dato))
print "DATO ENVIADO DESDE CONTROLADOR CORRECTAMENTE!"
time.sleep(1)
ws.close()
print "thread terminating..."
thread.start_new_thread(run, ())
if __name__ == "__main__":
websocket.enableTrace(True)
ws = websocket.WebSocketApp("ws://192.168.1.8:9300",
on_message = on_message,
on_error = on_error,
on_close = on_close)
ws.on_open = on_open
ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE}) | dexterx17/nodoSocket | clients/controlador.py | Python | mit | 1,468 |
from django.contrib import admin
from badgify.models import Award
from badgify.admin import AwardAdmin
class CustomAwardAdmin(AwardAdmin):
"""Override the default AwardAdmin class."""
list_display = ('user_fullname', 'user_email', 'badge', 'awarded_at')
search_fields = (
'user__username', 'user__email', 'user__first_name', 'user__last_name',
'badge__name', 'badge__description',
)
raw_id_fields = ('user', )
def user_fullname(self, award):
return award.user.get_full_name()
user_fullname.admin_order_field = 'user'
def user_email(self, award):
return award.user.email
user_email.admin_order_field = 'user'
admin.site.unregister(Award)
admin.site.register(Award, CustomAwardAdmin)
| tndatacommons/tndata_backend | tndata_backend/badgify_api/admin.py | Python | mit | 756 |
from core.nav_registry import register
register('sitetracker_nav.html') | evewspace/eve-wspace | evewspace/SiteTracker/nav_entries.py | Python | apache-2.0 | 72 |
"""
PaStA - Patch Stack Analysis
Copyright (c) OTH Regensburg, 2017-2019
Author:
Ralf Ramsauer <[email protected]>
This work is licensed under the terms of the GNU GPL, version 2. See
the COPYING file in the top-level directory.
"""
import os
import sys
from logging import getLogger
from multiprocessing import Pool, cpu_count
from tqdm import tqdm
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from pypasta import *
log = getLogger(__name__[-15:])
repo = None
def get_youngest(repo, commits, commit_date):
commits = list(commits)
youngest = commits[0]
oldest = commits[0]
if len(commits) == 1:
return oldest, youngest
if commit_date:
for commit in commits[1:]:
if repo[youngest].committer.date > repo[commit].committer.date:
youngest = commit
if repo[oldest].committer.date < repo[commit].committer.date:
oldest = commit
else:
for commit in commits[1:]:
if repo[youngest].author.date > repo[commit].author.date:
youngest = commit
if repo[oldest].author.date < repo[commit].author.date:
oldest = commit
return oldest, youngest
def upstream_duration_of_group(group):
def ymd(dt):
return dt.strftime('%Y-%m-%d')
untagged, tagged = group
# get youngest mail and youngest upstream commit
oldest_mail, youngest_mail = get_youngest(repo, untagged, False)
_, youngest_upstream = get_youngest(repo, tagged, True)
oldest_mail_date = repo[oldest_mail].author.date
youngest_mail_date = repo[youngest_mail].author.date
youngest_upstream_date = repo[youngest_upstream].committer.date
delta = youngest_upstream_date - youngest_mail_date
delta = delta.days
return youngest_upstream, ymd(youngest_mail_date), ymd(oldest_mail_date), \
ymd(youngest_upstream_date), len(untagged), len(tagged), delta
def upstream_duration(config, argv):
parser = argparse.ArgumentParser(prog='upstream_duration',
description='upstream_time')
args = parser.parse_args(argv)
global repo
repo = config.repo
_, cluster = config.load_cluster()
if config.mode == Config.Mode.MBOX:
config.load_ccache_mbox()
else:
config.load_ccache_stack()
log.info('Starting evaluation.')
pool = Pool(cpu_count())
tagged_only = [(d, u) for d, u in cluster.iter_split() if len(u) and len(d)]
result = list(tqdm(pool.imap(upstream_duration_of_group, tagged_only), total=len(tagged_only)))
pool.close()
pool.join()
log.info(' ↪ done.')
# sort by upstream duration
result.sort(key = lambda x: x[3])
# save raw results
with open(config.f_upstream_duration, 'w') as f:
f.write('rep first_submission last_submission integration num_equiv num_up dur\n')
for line in result:
f.write('%s %s %s %s %d %d %d\n' % line)
| lfd/PaStA | bin/pasta_upstream_duration.py | Python | gpl-2.0 | 3,015 |
from django.conf.urls import patterns, include, url
from django.conf import settings
from .views import HomeView, AvatarlessProfileEditView
urlpatterns = patterns('',
url(r'^$', HomeView.as_view(), name="home"),
url(r'^lekcje/', include('catalogue.urls')),
url(r'^info/(?P<url>.*)$', 'django.contrib.flatpages.views.flatpage',
name="info"),
url(r'^szukaj/', include('haystack.urls')),
url(r'^zglos/', include('contact.urls')),
url(r'^forum/profile/edit/$', AvatarlessProfileEditView.as_view(), name='edit_profile'),
url(r'^forum/', include('forum.urls')),
url(r'^forum/', include('pybb.urls', namespace='pybb')),
url(r'^kompetencje/', include('curriculum.urls')),
url(r'^wtem/', include('wtem.urls')),
)
# Admin stuff, if necessary.
if 'django.contrib.admin' in settings.INSTALLED_APPS:
from django.contrib import admin
admin.autodiscover()
if 'django_cas' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
(r'^admin/logout/$', 'django_cas.views.logout'),
)
urlpatterns += patterns('',
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
)
# Auth stuff, if necessary
if 'django_cas' in settings.INSTALLED_APPS:
urlpatterns += patterns('',
url(r'^accounts/login/$', 'django_cas.views.login', name='login'),
url(r'^accounts/logout/$', 'django_cas.views.logout', name='logout'),
)
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT,
}),
)
| fnp/edumed | edumed/urls.py | Python | agpl-3.0 | 1,669 |
import contextvars
import gettext
import os
from telebot.asyncio_handler_backends import BaseMiddleware
try:
from babel.support import LazyProxy
babel_imported = True
except ImportError:
babel_imported = False
class I18N(BaseMiddleware):
"""
This middleware provides high-level tool for internationalization
It is based on gettext util.
"""
context_lang = contextvars.ContextVar('language', default=None)
def __init__(self, translations_path, domain_name: str):
super().__init__()
self.update_types = self.process_update_types()
self.path = translations_path
self.domain = domain_name
self.translations = self.find_translations()
@property
def available_translations(self):
return list(self.translations)
def gettext(self, text: str, lang: str = None):
"""
Singular translations
"""
if lang is None:
lang = self.context_lang.get()
if lang not in self.translations:
return text
translator = self.translations[lang]
return translator.gettext(text)
def ngettext(self, singular: str, plural: str, lang: str = None, n=1):
"""
Plural translations
"""
if lang is None:
lang = self.context_lang.get()
if lang not in self.translations:
if n == 1:
return singular
return plural
translator = self.translations[lang]
return translator.ngettext(singular, plural, n)
def lazy_gettext(self, text: str, lang: str = None):
if not babel_imported:
raise RuntimeError('babel module is not imported. Check that you installed it.')
return LazyProxy(self.gettext, text, lang, enable_cache=False)
def lazy_ngettext(self, singular: str, plural: str, lang: str = None, n=1):
if not babel_imported:
raise RuntimeError('babel module is not imported. Check that you installed it.')
return LazyProxy(self.ngettext, singular, plural, lang, n, enable_cache=False)
async def get_user_language(self, obj):
"""
You need to override this method and return user language
"""
raise NotImplementedError
def process_update_types(self) -> list:
"""
You need to override this method and return any update types which you want to be processed
"""
raise NotImplementedError
async def pre_process(self, message, data):
"""
context language variable will be set each time when update from 'process_update_types' comes
value is the result of 'get_user_language' method
"""
self.context_lang.set(await self.get_user_language(obj=message))
async def post_process(self, message, data, exception):
pass
def find_translations(self):
"""
Looks for translations with passed 'domain' in passed 'path'
"""
if not os.path.exists(self.path):
raise RuntimeError(f"Translations directory by path: {self.path!r} was not found")
result = {}
for name in os.listdir(self.path):
translations_path = os.path.join(self.path, name, 'LC_MESSAGES')
if not os.path.isdir(translations_path):
continue
po_file = os.path.join(translations_path, self.domain + '.po')
mo_file = po_file[:-2] + 'mo'
if os.path.isfile(po_file) and not os.path.isfile(mo_file):
raise FileNotFoundError(f"Translations for: {name!r} were not compiled!")
with open(mo_file, 'rb') as file:
result[name] = gettext.GNUTranslations(file)
return result
| eternnoir/pyTelegramBotAPI | examples/asynchronous_telebot/middleware/i18n_middleware_example/i18n_base_midddleware.py | Python | gpl-2.0 | 3,751 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""\
A silly little calculator implemented in JPython using
Java components for the UI.
Rony G. Flatscher, 2006-08-08
"""
import java
from java import awt
p = bsf.lookupBean('centerPanel')
p.setLayout ( awt.BorderLayout () )
p.add ("Center", java.awt.Label ("Middle from Jython"))
p.add ("North", java.awt.TextField ("north text from Jython"))
p.add ("South", java.awt.TextField ("south text from Jython"))
p.add ("East", java.awt.Button ("inner east from Jython"))
p.add ("West", java.awt.Button ("inner west from Jython"))
p.setBackground (java.awt.Color.orange)
f = p.getParent ()
f.setTitle ("Hello from Jython (title reset from Jython)")
| apache/commons-bsf | samples/scriptedui/ui.py | Python | apache-2.0 | 1,440 |
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <[email protected]> (c) 2017-2022
# ryanss <[email protected]> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from datetime import date
from holidays.constants import JAN, MAR, MAY, SEP, OCT, DEC
from holidays.holiday_base import HolidayBase
from holidays.utils import _islamic_to_gre
class Uzbekistan(HolidayBase):
"""
https://www.officeholidays.com/countries/uzbekistan
"""
country = "UZ"
def __init__(self, **kwargs):
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
"""Populate the holidays for a given year"""
# New Year's holiday
self[date(year, JAN, 1)] = "New Year"
# Women's Day
self[date(year, MAR, 8)] = "Women's Day"
# Nauryz Holiday
self[date(year, MAR, 21)] = "Nauryz"
# Ramadan Khait
# Date of observance is announced yearly, This is an estimate.
for hol_date in _islamic_to_gre(year, 10, 1):
self[hol_date] = "Ramadan Khait"
# Memorial Day
self[date(year, MAY, 9)] = "Memorial Day"
# Kurban Khait
# Date of observance is announced yearly, This is an estimate.
for hol_date in _islamic_to_gre(year, 12, 10):
self[hol_date] = "Kurban Khait"
# Independence Day
self[date(year, SEP, 1)] = "Independence Day"
# Teacher's Day
self[date(year, OCT, 1)] = "Teacher's Day"
# Constitution Day
self[date(year, DEC, 8)] = "Constitution"
class UZ(Uzbekistan):
pass
class UZB(Uzbekistan):
pass
| dr-prodigy/python-holidays | holidays/countries/uzbekistan.py | Python | mit | 1,955 |
# -*- coding: utf-8 -*-
import pyparsing as pp
from config_cwr.accessor import CWRConfiguration
from data_cwr.accessor import CWRTables
"""
Grammar rules for concrete CWR Table/List Lookup fields.
These fields all apply the same kind of constraint: all the values not
contained in a defined collection are rejected.
The only exception is when the field is set as optional. Then an empty string
composed of whitespaces is allowed.
Additionally, the usual constraints of the Alphanumeric field (non lowercase
ASCII) apply.
All the values are read from the lists contained in the library, through the
CWRTables class.
"""
__author__ = 'Bernardo Martínez Garrido'
__license__ = 'MIT'
__status__ = 'Development'
"""
Configuration classes.
"""
# Acquires data sources
_tables = CWRTables()
_config = CWRConfiguration()
"""
Fields.
"""
def char_code(columns, name=None):
"""
Character set code field.
:param name: name for the field
:return: an instance of the Character set code field rules
"""
if name is None:
name = 'Char Code Field (' + str(columns) + ' columns)'
if columns <= 0:
raise BaseException()
char_sets = None
for char_set in _tables.get_data('character_set'):
regex = '[ ]{' + str(15 - len(char_set)) + '}' + char_set
if char_sets is None:
char_sets = regex
else:
char_sets += '|' + regex
# Accepted sets
_character_sets = pp.Regex(char_sets)
_unicode_1_16b = pp.Regex('U\+0[0-8,A-F]{3}[ ]{' + str(columns - 6) + '}')
_unicode_2_21b = pp.Regex('U\+0[0-8,A-F]{4}[ ]{' + str(columns - 7) + '}')
# Basic field
char_code_field = (_character_sets | _unicode_1_16b | _unicode_2_21b)
# Parse action
char_code_field = char_code_field.setParseAction(lambda s: s[0].strip())
# Name
char_code_field.setName(name)
return char_code_field
| weso/CWR-DataApi | cwr/grammar/field/table.py | Python | mit | 1,900 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from six import reraise
from tblib import Traceback
import numpy as np
def to_lodtensor(data, place):
"""convert tensor to lodtensor
"""
seq_lens = [len(seq) for seq in data]
cur_len = 0
lod = [cur_len]
for l in seq_lens:
cur_len += l
lod.append(cur_len)
flattened_data = numpy.concatenate(data, axis=0).astype("int64")
flattened_data = flattened_data.reshape([len(flattened_data), 1])
res = fluid.LoDTensor()
res.set(flattened_data, place)
res.set_lod([lod])
return res
def lodtensor_to_ndarray(lod_tensor):
"""conver lodtensor to ndarray
"""
dims = lod_tensor._get_dims()
ret = np.zeros(shape=dims).astype('float32')
for i in xrange(np.product(dims)):
ret.ravel()[i] = lod_tensor.get_float_element(i)
return ret, lod_tensor.lod()
def split_infer_result(infer_seq, lod):
infer_batch = []
for i in xrange(0, len(lod[0]) - 1):
infer_batch.append(infer_seq[lod[0][i]:lod[0][i + 1]])
return infer_batch
class CriticalException(Exception):
pass
def suppress_signal(signo, stack_frame):
pass
def suppress_complaints(verbose, notify=None):
def decorator_maker(func):
def suppress_warpper(*args, **kwargs):
try:
func(*args, **kwargs)
except:
et, ev, tb = sys.exc_info()
if notify is not None:
notify(except_type=et, except_value=ev, traceback=tb)
if verbose == 1 or isinstance(ev, CriticalException):
reraise(et, ev, Traceback(tb).as_traceback())
return suppress_warpper
return decorator_maker
class ForceExitWrapper(object):
def __init__(self, exit_flag):
self._exit_flag = exit_flag
@suppress_complaints(verbose=0)
def __call__(self, *args, **kwargs):
self._exit_flag.value = True
def __eq__(self, flag):
return self._exit_flag.value == flag
| lcy-seso/models | fluid/DeepASR/data_utils/util.py | Python | apache-2.0 | 2,094 |
#!/usr/bin/env python
import os
from app import create_app
from flask.ext.script import Manager, Server
application = create_app(os.getenv('DM_ENVIRONMENT') or 'development')
manager = Manager(application)
manager.add_command("runserver", Server(port=5001))
@manager.command
def update_index(index_name):
from app.main.services.search_service import create_index
with application.app_context():
message, status = create_index(index_name)
assert status == 200, message
application.logger.info("Created index %s", index_name)
if __name__ == '__main__':
manager.run()
| RichardKnop/digitalmarketplace-search-api | application.py | Python | mit | 599 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for separable convolutional layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class SeparableConv1DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs):
num_samples = 2
stack_size = 3
length = 7
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.SeparableConv1D,
kwargs=kwargs,
input_shape=(num_samples, length, stack_size))
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}),
('padding_same', {'padding': 'same'}),
('padding_same_dilation_2', {'padding': 'same', 'dilation_rate': 2}),
('padding_causal', {'padding': 'causal'}),
('strides', {'strides': 2}),
('dilation_rate', {'dilation_rate': 2}),
('depth_multiplier', {'depth_multiplier': 2}),
)
def test_separable_conv1d(self, kwargs):
kwargs['filters'] = 2
kwargs['kernel_size'] = 3
self._run_test(kwargs)
def test_separable_conv1d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'depthwise_regularizer': 'l2',
'pointwise_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.SeparableConv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones((1, 5, 2))))
self.assertEqual(len(layer.losses), 4)
def test_separable_conv1d_constraints(self):
d_constraint = lambda x: x
p_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'pointwise_constraint': p_constraint,
'depthwise_constraint': d_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.SeparableConv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(layer.depthwise_kernel.constraint, d_constraint)
self.assertEqual(layer.pointwise_kernel.constraint, p_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
@keras_parameterized.run_all_keras_modes
class SeparableConv2DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.SeparableConv2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}),
('padding_same', {'padding': 'same'}),
('padding_same_dilation_2', {'padding': 'same', 'dilation_rate': 2}),
('strides', {'strides': 2}),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {'data_format': 'channels_first'}),
('dilation_rate', {'dilation_rate': 2}),
('depth_multiplier', {'depth_multiplier': 2}),
)
def test_separable_conv2d(self, kwargs):
kwargs['filters'] = 2
kwargs['kernel_size'] = 3
if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True):
self._run_test(kwargs)
def test_separable_conv2d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'depthwise_regularizer': 'l2',
'pointwise_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.SeparableConv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(len(layer.losses), 3)
layer(keras.backend.variable(np.ones((1, 5, 5, 2))))
self.assertEqual(len(layer.losses), 4)
def test_separable_conv2d_constraints(self):
d_constraint = lambda x: x
p_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'pointwise_constraint': p_constraint,
'depthwise_constraint': d_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.SeparableConv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(layer.depthwise_kernel.constraint, d_constraint)
self.assertEqual(layer.pointwise_kernel.constraint, p_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
| kevin-coder/tensorflow-fork | tensorflow/python/keras/layers/separable_convolutional_test.py | Python | apache-2.0 | 5,792 |
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
import contextlib
import os
import re
import sys
import warnings
from io import StringIO
from os.path import abspath, dirname, join
from typing import Iterator, List, TextIO
import pytest
from pylint.lint import Run
HERE = abspath(dirname(__file__))
DATA = join(HERE, "regrtest_data", "duplicate_code")
CLEAN_PATH = re.escape(dirname(dirname(__file__)) + os.path.sep)
@contextlib.contextmanager
def _patch_streams(out: TextIO) -> Iterator:
sys.stderr = sys.stdout = out
try:
yield
finally:
sys.stderr = sys.__stderr__
sys.stdout = sys.__stdout__
class TestSimilarCodeChecker:
def _runtest(self, args: List[str], code: int) -> None:
"""Runs the tests and sees if output code is as expected."""
out = StringIO()
pylint_code = self._run_pylint(args, out=out)
output = out.getvalue()
msg = f"expected output status {code}, got {pylint_code}"
if output is not None:
msg = f"{msg}. Below pylint output: \n{output}"
assert pylint_code == code, msg
@staticmethod
def _run_pylint(args: List[str], out: TextIO) -> int:
"""Runs pylint with a patched output."""
args = args + ["--persistent=no"]
with _patch_streams(out):
with pytest.raises(SystemExit) as cm:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
Run(args)
return cm.value.code
@staticmethod
def _clean_paths(output: str) -> str:
"""Normalize path to the tests directory."""
output = re.sub(CLEAN_PATH, "", output, flags=re.MULTILINE)
return output.replace("\\", "/")
def _test_output(self, args: List[str], expected_output: str) -> None:
"""Tests if the output of a pylint run is as expected."""
out = StringIO()
self._run_pylint(args, out=out)
actual_output = self._clean_paths(out.getvalue())
expected_output = self._clean_paths(expected_output)
assert expected_output.strip() in actual_output.strip()
def test_duplicate_code_raw_strings_all(self) -> None:
"""Test similar lines in 3 similar files."""
path = join(DATA, "raw_strings_all")
expected_output = "Similar lines in 2 files"
self._test_output(
[path, "--disable=all", "--enable=duplicate-code"],
expected_output=expected_output,
)
def test_duplicate_code_raw_strings_disable_file(self) -> None:
"""Tests disabling duplicate-code at the file level in a single file."""
path = join(DATA, "raw_strings_disable_file")
expected_output = "Similar lines in 2 files"
self._test_output(
[path, "--disable=all", "--enable=duplicate-code"],
expected_output=expected_output,
)
def test_duplicate_code_raw_strings_disable_file_double(self) -> None:
"""Tests disabling duplicate-code at the file level in two files."""
path = join(DATA, "raw_strings_disable_file_double")
self._runtest([path, "--disable=all", "--enable=duplicate-code"], code=0)
def test_duplicate_code_raw_strings_disable_line_two(self) -> None:
"""Tests disabling duplicate-code at a line at the begin of a piece of similar code."""
path = join(DATA, "raw_strings_disable_line_begin")
expected_output = "Similar lines in 2 files"
self._test_output(
[path, "--disable=all", "--enable=duplicate-code"],
expected_output=expected_output,
)
def test_duplicate_code_raw_strings_disable_line_disable_all(self) -> None:
"""Tests disabling duplicate-code with all similar lines disabled per line."""
path = join(DATA, "raw_strings_disable_line_disable_all")
self._runtest([path, "--disable=all", "--enable=duplicate-code"], code=0)
def test_duplicate_code_raw_strings_disable_line_midle(self) -> None:
"""Tests disabling duplicate-code at a line in the middle of a piece of similar code."""
path = join(DATA, "raw_strings_disable_line_middle")
self._runtest([path, "--disable=all", "--enable=duplicate-code"], code=0)
def test_duplicate_code_raw_strings_disable_line_end(self) -> None:
"""Tests disabling duplicate-code at a line at the end of a piece of similar code."""
path = join(DATA, "raw_strings_disable_line_end")
expected_output = "Similar lines in 2 files"
self._test_output(
[path, "--disable=all", "--enable=duplicate-code"],
expected_output=expected_output,
)
def test_duplicate_code_raw_strings_disable_scope(self) -> None:
"""Tests disabling duplicate-code at an inner scope level."""
path = join(DATA, "raw_strings_disable_scope")
expected_output = "Similar lines in 2 files"
self._test_output(
[path, "--disable=all", "--enable=duplicate-code"],
expected_output=expected_output,
)
def test_duplicate_code_raw_strings_disable_scope_double(self) -> None:
"""Tests disabling duplicate-code at an inner scope level in two files."""
path = join(DATA, "raw_strings_disable_scope_double")
self._runtest([path, "--disable=all", "--enable=duplicate-code"], code=0)
def test_duplicate_code_raw_strings_disable_scope_function(self) -> None:
"""Tests disabling duplicate-code at an inner scope level with another scope with similarity."""
path = join(DATA, "raw_strings_disable_scope_second_function")
expected_output = "Similar lines in 2 files"
self._test_output(
[path, "--disable=all", "--enable=duplicate-code"],
expected_output=expected_output,
)
| PyCQA/pylint | tests/test_similar.py | Python | gpl-2.0 | 5,923 |
#!/usr/bin/env python
from efl import evas
import unittest
class TestBoxBasics(unittest.TestCase):
def setUp(self):
self.canvas = evas.Canvas(method="buffer",
size=(400, 500),
viewport=(0, 0, 400, 500))
self.canvas.engine_info_set(self.canvas.engine_info_get())
def tearDown(self):
self.canvas.delete()
def testConstructor(self):
box = evas.Box(self.canvas)
self.assertEqual(type(box), evas.Box)
box.delete()
def testConstructorBaseParameters(self):
size = (20, 30)
pos = (40, 50)
geometry = (60, 70, 80, 90)
color = (110, 120, 130, 140)
# create box using size/pos
box1 = evas.Box(self.canvas, name="box1", color=color, size=size, pos=pos)
self.assertEqual(box1.name, "box1")
self.assertEqual(box1.color, color)
self.assertEqual(box1.size, size)
self.assertEqual(box1.pos, pos)
box1.delete()
# cleate box2 using geometry
box2 = evas.Box(self.canvas, name="box2", color=color, geometry=geometry)
self.assertEqual(box2.name, "box2")
self.assertEqual(box2.color, color)
self.assertEqual(box2.geometry, geometry)
box2.delete()
def testRemoveAll(self):
box = evas.Box(self.canvas)
r1 = evas.Rectangle(self.canvas)
r2 = evas.Rectangle(self.canvas)
box.append(r1)
box.append(r2)
box.remove_all(True)
self.assertEqual(r1.is_deleted(), True)
self.assertEqual(r2.is_deleted(), True)
box.delete()
if __name__ == '__main__':
unittest.main(verbosity=2)
evas.shutdown()
| maikodaraine/EnlightenmentUbuntu | bindings/python/python-efl/tests/evas/test_04_object_box.py | Python | unlicense | 1,720 |
import numpy as nm
from sfepy.base.conf import transform_functions
from sfepy.base.testing import TestCommon
def get_vertices(coors, domain=None):
x, z = coors[:,0], coors[:,2]
return nm.where((z < 0.1) & (x < 0.1))[0]
def get_cells(coors, domain=None):
return nm.where(coors[:, 0] < 0)[0]
class Test(TestCommon):
@staticmethod
def from_conf( conf, options ):
from sfepy import data_dir
from sfepy.discrete.fem import Mesh, FEDomain
from sfepy.discrete import Functions
mesh = Mesh.from_file(data_dir
+ '/meshes/various_formats/abaqus_tet.inp')
mesh.nodal_bcs['set0'] = [0, 7]
domain = FEDomain('test domain', mesh)
conf_functions = {
'get_vertices' : (get_vertices,),
'get_cells' : (get_cells,),
}
functions = Functions.from_conf(transform_functions(conf_functions))
test = Test(conf=conf, options=options,
domain=domain, functions=functions)
return test
def test_selectors(self):
"""
Test basic region selectors.
"""
selectors = [
['all', 'cell'],
['vertices of surface', 'facet'],
['vertices of group 0', 'facet'],
['vertices of set set0', 'vertex'],
['vertices in (z < 0.1) & (x < 0.1)', 'facet'],
['vertices by get_vertices', 'cell'],
['vertex 0, 1, 2', 'vertex'],
['vertex in r.r6', 'vertex'],
['cells of group 0', 'cell'],
# ['cells of set 0', 'cell'], not implemented...
['cells by get_cells', 'cell'],
['cell 1, 4, 5', 'cell'],
['copy r.r5', 'cell'],
['r.r5', 'cell'],
]
vertices = [
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[0, 1, 3, 7],
[0, 7],
[1, 2, 3, 4, 5, 9, 11],
[1, 2, 3, 4, 5, 9, 11],
[0, 1, 2],
[0],
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
[0, 1, 2, 3, 4, 5, 6, 9, 10, 11],
[0, 1, 2, 3, 4, 5, 6, 8],
[1, 2, 3, 4, 5, 9, 11],
[1, 2, 3, 4, 5, 9, 11],
]
ok = True
for ii, sel in enumerate(selectors):
self.report('select:', sel)
reg = self.domain.create_region('r%d' % ii, sel[0], kind=sel[1],
functions=self.functions)
_ok = ((len(reg.vertices) == len(vertices[ii]))
and (reg.vertices == vertices[ii]).all())
self.report(' vertices:', _ok)
ok = ok and _ok
return ok
def test_operators(self):
"""
Test operators in region selectors.
"""
ok = True
r1 = self.domain.create_region('r1', 'all')
sel = 'r.r1 -v vertices of group 0'
self.report('select:', sel)
reg = self.domain.create_region('reg', sel, kind='vertex')
av = [2, 4, 5, 6, 8, 9, 10, 11, 12]
_ok = (reg.vertices == nm.array(av)).all()
self.report(' vertices:', _ok)
ok = ok and _ok
sel = 'vertex 0, 1, 2 +v vertices of group 0'
self.report('select:', sel)
reg = self.domain.create_region('reg', sel, kind='vertex')
av = [0, 1, 2, 3, 7]
_ok = (reg.vertices == nm.array(av)).all()
self.report(' vertices:', _ok)
ok = ok and _ok
sel = 'vertex 0, 1, 2 *v vertices of group 0'
self.report('select:', sel)
reg = self.domain.create_region('reg', sel, kind='vertex')
av = [0, 1]
_ok = (reg.vertices == nm.array(av)).all()
self.report(' vertices:', _ok)
ok = ok and _ok
sel = 'r.r1 -c cell 1, 4, 5'
self.report('select:', sel)
reg = self.domain.create_region('reg', sel)
_ok = (nm.setdiff1d(r1.cells[0], [1, 4, 5]) == reg.cells[0]).all()
self.report(' cells:', _ok)
ok = ok and _ok
sel = 'cell 8, 3 +c cell 1, 4, 5'
self.report('select:', sel)
reg = self.domain.create_region('reg', sel)
cells = [1, 3, 4, 5, 8]
_ok = (reg.cells == nm.array(cells)).all()
self.report(' cells:', _ok)
ok = ok and _ok
sel = 'cell 8, 3, 2 *c cell 8, 4, 2, 7'
self.report('select:', sel)
reg = self.domain.create_region('reg', sel)
cells = [2, 8]
_ok = (reg.cells == nm.array(cells)).all()
self.report(' cells:', _ok)
ok = ok and _ok
return ok
| RexFuzzle/sfepy | tests/test_regions.py | Python | bsd-3-clause | 4,744 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.log import get_logger
from knack.util import CLIError
from azure.cli.core.extension import (
ExtensionNotInstalledException, get_extension_modname, get_extension)
from azure.cli.core.extension.operations import (
reload_extension, update_extension, add_extension, add_extension_to_path, check_version_compatibility)
logger = get_logger(__name__)
INTERACTIVE_EXTENSION_NAME = 'interactive'
def start_shell(cmd, update=None, style=None):
from importlib import import_module
try:
get_extension(INTERACTIVE_EXTENSION_NAME)
if update:
logger.warning("Updating the Interactive extension to the latest available...")
update_extension(cmd, INTERACTIVE_EXTENSION_NAME)
reload_extension(INTERACTIVE_EXTENSION_NAME)
except ExtensionNotInstalledException:
logger.warning("Installing the Interactive extension...")
add_extension(cmd, extension_name=INTERACTIVE_EXTENSION_NAME)
ext = get_extension(INTERACTIVE_EXTENSION_NAME)
try:
check_version_compatibility(ext.get_metadata())
except CLIError:
raise CLIError('Run `az interactive --update` and try again.')
add_extension_to_path(INTERACTIVE_EXTENSION_NAME, ext_dir=ext.path)
interactive_module = get_extension_modname(ext_name=INTERACTIVE_EXTENSION_NAME, ext_dir=ext.path)
azext_interactive = import_module(interactive_module)
azext_interactive.start_shell(cmd, style=style)
| yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/interactive/custom.py | Python | mit | 1,810 |
from twisted.trial import unittest
from tipsip.header import Headers
from tipsip.header import Header, AddressHeader, ViaHeader
class HeadersTest(unittest.TestCase):
def test_construct(self):
aq = self.assertEqual
at = self.assertTrue
h = Headers({'Subject': 'lunch'}, f='John', to='abacaba')
h['TO'] = 'Carol'
aq(h['Subject'], 'lunch')
aq(h['from'], 'John')
aq(h['t'], 'Carol')
r = str(h)
for line in r.split('\r\n'):
at(line in ['Subject: lunch', 'From: John', 'To: Carol'])
def test_manipulation(self):
aq = self.assertEqual
at = self.assertTrue
h = Headers()
h['f'] = "from header"
h['to'] = "to header"
at('FROM' in h)
at('To' in h)
to = h.pop('t')
aq(to, "to header")
at(h.has_key('From'))
class HeaderTest(unittest.TestCase):
def test_construct(self):
aq = self.assertEqual
at = self.assertTrue
h = Header('active', params={'expires': '3600'})
aq(str(h), 'active ;expires=3600')
class AddressHeaderTest(unittest.TestCase):
def test_parsing(self):
aq = self.assertEqual
v = AddressHeader.parse('<sips:[email protected]>;expires=60')
aq(str(v.uri), 'sips:[email protected]')
aq(v.params['expires'], '60')
aq(v.display_name, '')
v = AddressHeader.parse('<sip:server10.biloxi.com;lr>')
aq(str(v.uri), 'sip:server10.biloxi.com;lr')
aq(v.params, {})
aq(v.display_name, '')
v = AddressHeader.parse('The Operator <sip:[email protected]>;tag=287447')
aq(str(v.uri), 'sip:[email protected]')
aq(v.display_name, 'The Operator')
aq(v.params, {'tag': '287447'})
v = AddressHeader.parse('sip:[email protected]')
aq(str(v.uri), 'sip:[email protected]')
class ViaHeaderTest(unittest.TestCase):
def test_construct(self):
aq = self.assertEqual
v = ViaHeader(transport='UDP', host='192.168.0.1', port='5060', params={'received': '8.8.8.8'})
aq(str(v), 'SIP/2.0/UDP 192.168.0.1:5060 ;received=8.8.8.8')
def test_parsing(self):
aq = self.assertEqual
at = self.assertTrue
v = ViaHeader.parse('SIP/2.0/UDP 127.0.0.1:21375;branch=z9hG4bK-d8754z-2f9c4f090fc81b1f-1---d8754z-;rport')
aq(v.version, 'SIP/2.0')
aq(v.transport, 'UDP')
aq(v.host, '127.0.0.1')
aq(v.port, '21375')
aq(v.params['branch'], 'z9hG4bK-d8754z-2f9c4f090fc81b1f-1---d8754z-')
at('rport' in v.params)
v = ViaHeader.parse('SIP/2.0/UDP pc33.atlanta.com:5066;branch=z9hG4bK776asdhds')
aq(v.port, '5066')
def test_serialize(self):
aq = self.assertEqual
v = ViaHeader.parse('SIP/2.0/UDP 127.0.0.1:21375;rport')
aq(str(v), 'SIP/2.0/UDP 127.0.0.1:21375 ;rport')
| ivaxer/tipsip | tipsip/tests/test_header.py | Python | isc | 2,901 |
# Copyright (c) 2001-2016, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import
from .tests_mechanism import AbstractTestFixture, dataset
from .check_utils import *
@dataset({"main_stif_test": {}})
class TestStif(AbstractTestFixture):
"""
Test the stif scenario responses
Possible journeys from A to B:
1/ 8h00 ====(line A)====> 10h00
2/ 9h00 ==(line B + C)==> 11h00
3/ 10h00 ====(line A)====> 12h00
"""
def test_stif_simple(self):
"""
Test of simple request :
* we want to make at least 2 journey calls (not only the best journey, but also try next)
* we don't want 2 journeys using the same line and changing at same points
So here we want journeys 1 and 2
"""
query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default" \
"&min_nb_journeys=1&_min_journeys_calls=2&_final_line_filter=true&_max_successive_physical_mode=3"\
.format(from_sp="stopA", to_sp="stopB", datetime="20140614T075500")
response = self.query_region(query)
assert len(response['journeys']) == 2
assert response['journeys'][0]['arrival_date_time'] == '20140614T100000'
assert response['journeys'][1]['arrival_date_time'] == '20140614T110000'
def test_stif_override_min_journeys_calls(self):
"""
Test of simple request :
* we only want 1 journey calls (no next call)
So here we only want journeys 1
"""
query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default" \
"&min_nb_journeys=1&_min_journeys_calls=1&_final_line_filter=true&_max_successive_physical_mode=3"\
.format(from_sp="stopA", to_sp="stopB", datetime="20140614T075500")
response = self.query_region(query)
assert len(response['journeys']) == 1
assert response['journeys'][0]['arrival_date_time'] == '20140614T100000'
def test_stif_override_final_line_filter(self):
"""
Test of simple request :
* we want to make at least 2 journey calls (not only the best journey, but also try next)
* we deactivate the filter on journeys using the same line and changing at same points
So here we want journeys 1, 2 and 3
"""
query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default" \
"&min_nb_journeys=1&_min_journeys_calls=2&_final_line_filter=false&_max_successive_physical_mode=3"\
.format(from_sp="stopA", to_sp="stopB", datetime="20140614T075500")
response = self.query_region(query)
assert len(response['journeys']) == 3
assert response['journeys'][0]['arrival_date_time'] == '20140614T100000'
assert response['journeys'][1]['arrival_date_time'] == '20140614T110000'
assert response['journeys'][2]['arrival_date_time'] == '20140614T120000'
def test_stif_max_successive_buses(self):
"""
BUS Bus Bus Bus
stopP ----> stopQ ----> stopR ----> stopS ----> stopT
15:00 16:00 17:00 18:00 19:00
Bus
stopP ----------------------------------------> stopT
15:00 20:00
Test of request with parameter "_max_successive_physical_mode":
* we want to make at least 2 journey calls (not only the best journey, but also try next)
* we don't want the journey using more than 3 Buses
So here we want journey1
"""
query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default" \
"&_max_successive_physical_mode=3&_max_additional_connections=10"\
.format(from_sp="stopP", to_sp="stopT", datetime="20140614T145500")
response = self.query_region(query)
assert len(response['journeys']) == 1
#As we modify the value of _max_successive_physical_mode to 5 we want two journeys
query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default" \
"&_max_successive_physical_mode=5&_max_additional_connections=10"\
.format(from_sp="stopP", to_sp="stopT", datetime="20140614T145500")
response = self.query_region(query)
assert len(response['journeys']) == 2
def test_stif_max_successive_buses_with_tram_in_between(self):
"""
BUS Bus Bus Bus Tram Bus Bus
stopP ----> stopQ ----> stopR ----> stopS ----> stopT ----> stopU ----> stopV ----> stopW
15:00 16:00 17:00 18:00 19:00 19:30 20:00 20:30
Bus
stopP ----------------------------------------------------------------------------> stopW
15:00 21:00
Test of request with parameter "_max_successive_physical_mode":
* we want to make at least 2 journey calls (not only the best journey, but also try next)
* we don't want the journey using more than 3 Buses successive
* we have "Bus" and "Tram" as means of transport
"""
#As there are 4 buses successive to be used from stopP to stopW and _max_successive_physical_mode = 3
# we have 1 journey
query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default"\
"&_max_successive_physical_mode=3&_max_additional_connections=10"\
.format(from_sp="stopP", to_sp="stopW", datetime="20140614T145500")
response = self.query_region(query)
assert len(response['journeys']) == 1
#As we modify the value of _max_successive_physical_mode to 5 we want two journeys
query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default" \
"&_max_successive_physical_mode=5&_max_additional_connections=10"\
.format(from_sp="stopP", to_sp="stopW", datetime="20140614T145500")
response = self.query_region(query)
assert len(response['journeys']) == 2
# As we modify the value of _max_additional_connections to 2 we delete the second journey because
# it contains more then nb_connections + 2 ()
query = "journeys?from={from_sp}&to={to_sp}&datetime={datetime}&_override_scenario=new_default" \
"&_max_successive_physical_mode=5&_max_additional_connections=2"\
.format(from_sp="stopP", to_sp="stopW", datetime="20140614T145500")
response = self.query_region(query)
assert len(response['journeys']) == 1
| kadhikari/navitia | source/jormungandr/tests/stif_tests.py | Python | agpl-3.0 | 7,989 |
import json
import platform
from datetime import timedelta
from unittest import SkipTest
from nose.tools import nottest
from functools import wraps
from acouchbase.cluster import (Cluster, get_event_loop,
close_event_loop)
from couchbase_tests.async_base import AsyncioTestCase
from couchbase.exceptions import DocumentNotFoundException, ValueFormatException, DocumentLockedException
from couchbase.transcoder import (JSONTranscoder, RawJSONTranscoder,
RawStringTranscoder, RawBinaryTranscoder, LegacyTranscoder)
from couchbase.collection import (GetOptions, UpsertOptions, InsertOptions, ReplaceOptions,
GetAndTouchOptions, GetAndLockOptions, GetAnyReplicaOptions, GetAllReplicasOptions)
@nottest
def async_test(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
return self.loop.run_until_complete(func(self, *args, **kwargs))
return wrapper
class AcouchbaseDefaultTranscoderTestSuite(object):
CONTENT = {"some": "content", "num": 1,
"list": [1, 2, 3], "nested": {"a": "b"}}
KEY = "imakey"
async def initialize(self):
try:
await self.collection.remove(self.KEY)
except DocumentNotFoundException:
pass
@async_test
async def test_default_tc_json_upsert(self):
await self.collection.upsert(self.KEY, self.CONTENT)
resp = await self.collection.get(self.KEY)
result = resp.content_as[dict]
self.assertIsNotNone(result)
self.assertIsInstance(result, dict)
self.assertEqual(self.CONTENT, result)
@async_test
async def test_default_tc_json_insert(self):
await self.collection.insert(self.KEY, self.CONTENT)
resp = await self.collection.get(self.KEY)
result = resp.content_as[dict]
self.assertIsNotNone(result)
self.assertIsInstance(result, dict)
self.assertEqual(self.CONTENT, result)
@async_test
async def test_default_tc_json_replace(self):
await self.collection.upsert(self.KEY, self.CONTENT)
new_content = self.CONTENT
new_content["some"] = "new content"
await self.collection.replace(self.KEY, new_content)
resp = await self.collection.get(self.KEY)
result = resp.content_as[dict]
self.assertIsNotNone(result)
self.assertIsInstance(result, dict)
self.assertEqual(new_content, result)
@async_test
async def test_default_tc_string_upsert(self):
content = "some string content"
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content_as[str]
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(content, result)
@async_test
async def test_default_tc_string_insert(self):
content = "some string content"
await self.collection.insert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content_as[str]
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(content, result)
@async_test
async def test_default_tc_string_replace(self):
content = "some string content"
await self.collection.upsert(self.KEY, content)
new_content = "new string content"
await self.collection.replace(self.KEY, new_content)
resp = await self.collection.get(self.KEY)
result = resp.content_as[str]
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(new_content, result)
@async_test
async def test_default_tc_binary_upsert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
with self.assertRaises(ValueFormatException):
await self.collection.upsert(self.KEY, content)
@async_test
async def test_default_tc_bytearray_upsert(self):
content = bytearray(json.dumps("Here are some bytes"), "utf-8")
with self.assertRaises(ValueFormatException):
await self.collection.upsert(self.KEY, content)
@async_test
async def test_default_tc_binary_insert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
with self.assertRaises(ValueFormatException):
await self.collection.insert(self.KEY, content)
@async_test
async def test_default_tc_binary_replace(self):
content = "Lets to a str first"
await self.collection.upsert(self.KEY, content)
new_content = bytes(json.dumps("Here are some newer bytes"), "utf-8")
with self.assertRaises(ValueFormatException):
await self.collection.replace(self.KEY, new_content)
class AcouchbaseDefaultTranscoderTests(
AsyncioTestCase, AcouchbaseDefaultTranscoderTestSuite):
@classmethod
def setUpClass(cls) -> None:
super(AcouchbaseDefaultTranscoderTests, cls).setUpClass(
get_event_loop(), cluster_class=Cluster)
@classmethod
def tearDownClass(cls) -> None:
super(AcouchbaseDefaultTranscoderTests, cls).tearDownClass()
close_event_loop()
def setUp(self):
super(AcouchbaseDefaultTranscoderTests, self).setUp()
self.loop.run_until_complete(self.initialize())
class AcouchbaseDefaultJsonTranscoderTests(AsyncioTestCase, AcouchbaseDefaultTranscoderTestSuite):
@classmethod
def setUpClass(cls) -> None:
super(AcouchbaseDefaultJsonTranscoderTests, cls).setUpClass(
get_event_loop(), cluster_class=Cluster, transcoder=JSONTranscoder())
@classmethod
def tearDownClass(cls) -> None:
super(AcouchbaseDefaultJsonTranscoderTests, cls).tearDownClass()
def setUp(self):
super(AcouchbaseDefaultJsonTranscoderTests, self).setUp()
self.loop.run_until_complete(self.initialize())
class AcouchbaseRawJsonTranscoderTests(AsyncioTestCase):
CONTENT = {"some": "content", "num": 1,
"list": [1, 2, 3], "nested": {"a": "b"}}
KEY = "imakey"
@classmethod
def setUpClass(cls) -> None:
super(AcouchbaseRawJsonTranscoderTests, cls).setUpClass(
get_event_loop(), cluster_class=Cluster, transcoder=RawJSONTranscoder())
@classmethod
def tearDownClass(cls) -> None:
super(AcouchbaseRawJsonTranscoderTests, cls).tearDownClass()
async def initialize(self):
try:
await self.collection.remove(self.KEY)
except DocumentNotFoundException:
pass
def setUp(self):
super(AcouchbaseRawJsonTranscoderTests, self).setUp()
self.loop.run_until_complete(self.initialize())
@async_test
async def test_raw_json_tc_json_upsert(self):
with self.assertRaises(ValueFormatException):
await self.collection.upsert(self.KEY, self.CONTENT)
@async_test
async def test_raw_json_tc_json_insert(self):
with self.assertRaises(ValueFormatException):
await self.collection.insert(self.KEY, self.CONTENT)
@async_test
async def test_raw_json_tc_json_replace(self):
await self.collection.upsert(self.KEY, "some string content")
with self.assertRaises(ValueFormatException):
await self.collection.replace(self.KEY, self.CONTENT)
@async_test
async def test_raw_json_tc_string_upsert(self):
content = "some string content"
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result.decode("utf-8"))
@async_test
async def test_raw_json_tc_string_insert(self):
content = "some string content"
await self.collection.insert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result.decode("utf-8"))
@async_test
async def test_raw_json_tc_string_replace(self):
content = "some string content"
await self.collection.upsert(self.KEY, content)
new_content = "new string content"
await self.collection.replace(self.KEY, new_content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(new_content, result.decode("utf-8"))
@async_test
async def test_raw_json_tc_binary_upsert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
@async_test
async def test_raw_json_tc_bytearray_upsert(self):
content = bytearray(json.dumps("Here are some bytes"), "utf-8")
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
@async_test
async def test_raw_json_tc_binary_insert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
await self.collection.insert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
@async_test
async def test_raw_json_tc_binary_replace(self):
content = "Lets to a str first"
await self.collection.upsert(self.KEY, content)
new_content = bytes(json.dumps("Here are some newer bytes"), "utf-8")
await self.collection.replace(self.KEY, new_content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(new_content, result)
@async_test
async def test_pass_through(self):
content = json.dumps(self.CONTENT)
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertNotEqual(self.CONTENT, result)
# json.loads expects a string in Python 3.5
if float(platform.python_version()[:3]) <= 3.5:
result = result.decode("utf-8")
decoded = json.loads(result)
self.assertEqual(self.CONTENT, decoded)
class AcouchbaseRawStringTranscoderTests(AsyncioTestCase):
CONTENT = {"some": "content", "num": 1,
"list": [1, 2, 3], "nested": {"a": "b"}}
KEY = "imakey"
@classmethod
def setUpClass(cls) -> None:
super(AcouchbaseRawStringTranscoderTests, cls).setUpClass(
get_event_loop(), cluster_class=Cluster, transcoder=RawStringTranscoder())
@classmethod
def tearDownClass(cls) -> None:
super(AcouchbaseRawStringTranscoderTests, cls).tearDownClass()
async def initialize(self):
try:
await self.collection.remove(self.KEY)
except DocumentNotFoundException:
pass
def setUp(self):
super(AcouchbaseRawStringTranscoderTests, self).setUp()
self.loop.run_until_complete(self.initialize())
@async_test
async def test_raw_str_tc_json_upsert(self):
with self.assertRaises(ValueFormatException):
await self.collection.upsert(self.KEY, self.CONTENT)
@async_test
async def test_raw_str_tc_json_insert(self):
with self.assertRaises(ValueFormatException):
await self.collection.insert(self.KEY, self.CONTENT)
@async_test
async def test_raw_str_tc_json_replace(self):
await self.collection.upsert(self.KEY, "some string content")
with self.assertRaises(ValueFormatException):
await self.collection.replace(self.KEY, self.CONTENT)
@async_test
async def test_raw_json_tc_string_upsert(self):
content = "some string content"
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(content, result)
@async_test
async def test_raw_json_tc_string_insert(self):
content = "some string content"
await self.collection.insert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(content, result)
@async_test
async def test_raw_json_tc_string_replace(self):
content = "some string content"
await self.collection.upsert(self.KEY, content)
new_content = "new string content"
await self.collection.replace(self.KEY, new_content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(new_content, result)
@async_test
async def test_raw_str_tc_binary_upsert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
with self.assertRaises(ValueFormatException):
await self.collection.upsert(self.KEY, content)
@async_test
async def test_raw_str_tc_bytearray_upsert(self):
content = bytearray(json.dumps("Here are some bytes"), "utf-8")
with self.assertRaises(ValueFormatException):
await self.collection.upsert(self.KEY, content)
@async_test
async def test_raw_str_tc_binary_insert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
with self.assertRaises(ValueFormatException):
await self.collection.insert(self.KEY, content)
@async_test
async def test_raw_str_tc_binary_replace(self):
await self.collection.upsert(self.KEY, "some string content")
content = bytes(json.dumps("Here are some bytes"), "utf-8")
with self.assertRaises(ValueFormatException):
await self.collection.replace(self.KEY, content)
class AcouchbaseRawBinaryTranscoderTests(AsyncioTestCase):
CONTENT = {"some": "content", "num": 1,
"list": [1, 2, 3], "nested": {"a": "b"}}
KEY = "imakey"
@classmethod
def setUpClass(cls) -> None:
super(AcouchbaseRawBinaryTranscoderTests, cls).setUpClass(
get_event_loop(), cluster_class=Cluster, transcoder=RawBinaryTranscoder())
@classmethod
def tearDownClass(cls) -> None:
super(AcouchbaseRawBinaryTranscoderTests, cls).tearDownClass()
async def initialize(self):
try:
await self.collection.remove(self.KEY)
except DocumentNotFoundException:
pass
def setUp(self):
super(AcouchbaseRawBinaryTranscoderTests, self).setUp()
self.loop.run_until_complete(self.initialize())
@async_test
async def test_raw_bin_tc_json_upsert(self):
with self.assertRaises(ValueFormatException):
await self.collection.upsert(self.KEY, self.CONTENT)
@async_test
async def test_raw_bin_tc_json_insert(self):
with self.assertRaises(ValueFormatException):
await self.collection.insert(self.KEY, self.CONTENT)
@async_test
async def test_raw_bin_tc_json_replace(self):
await self.collection.upsert(self.KEY, bytes("some string content", "utf-8"))
with self.assertRaises(ValueFormatException):
await self.collection.replace(self.KEY, self.CONTENT)
@async_test
async def test_raw_bin_tc_str_upsert(self):
with self.assertRaises(ValueFormatException):
await self.collection.upsert(self.KEY, "some string content")
@async_test
async def test_raw_bin_tc_str_insert(self):
with self.assertRaises(ValueFormatException):
await self.collection.insert(self.KEY, "some string content")
@async_test
async def test_raw_bin_tc_str_replace(self):
await self.collection.upsert(self.KEY, bytes("some string content", "utf-8"))
with self.assertRaises(ValueFormatException):
await self.collection.replace(self.KEY, "some new string content")
@async_test
async def test_raw_bin_tc_binary_upsert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
@async_test
async def test_raw_bin_tc_bytearray_upsert(self):
content = bytearray(json.dumps("Here are some bytes"), "utf-8")
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
@async_test
async def test_raw_bin_tc_binary_insert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
await self.collection.insert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
@async_test
async def test_raw_bin_tc_binary_replace(self):
await self.collection.upsert(self.KEY, bytes("Lets to a str first", "utf-8"))
new_content = bytes(json.dumps("Here are some newer bytes"), "utf-8")
await self.collection.replace(self.KEY, new_content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(new_content, result)
@nottest
class FakeObject(object):
PROP = "fake prop"
PROP1 = 12345
class AcouchbaseLegacyTranscoderTests(AsyncioTestCase):
CONTENT = {"some": "content", "num": 1,
"list": [1, 2, 3], "nested": {"a": "b"}}
KEY = "imakey"
@classmethod
def setUpClass(cls) -> None:
super(AcouchbaseLegacyTranscoderTests, cls).setUpClass(
get_event_loop(), cluster_class=Cluster, transcoder=LegacyTranscoder())
@classmethod
def tearDownClass(cls) -> None:
super(AcouchbaseLegacyTranscoderTests, cls).tearDownClass()
async def initialize(self):
try:
await self.collection.remove(self.KEY)
except DocumentNotFoundException:
pass
def setUp(self):
super(AcouchbaseLegacyTranscoderTests, self).setUp()
self.loop.run_until_complete(self.initialize())
@async_test
async def test_legacy_tc_json_upsert(self):
await self.collection.upsert(self.KEY, self.CONTENT)
resp = await self.collection.get(self.KEY)
result = resp.content_as[dict]
self.assertIsNotNone(result)
self.assertIsInstance(result, dict)
self.assertEqual(self.CONTENT, result)
@async_test
async def test_legacy_tc_json_insert(self):
await self.collection.insert(self.KEY, self.CONTENT)
resp = await self.collection.get(self.KEY)
result = resp.content_as[dict]
self.assertIsNotNone(result)
self.assertIsInstance(result, dict)
self.assertEqual(self.CONTENT, result)
@async_test
async def test_legacy_tc_json_replace(self):
await self.collection.upsert(self.KEY, self.CONTENT)
new_content = self.CONTENT
new_content["some"] = "new content"
await self.collection.replace(self.KEY, new_content)
resp = await self.collection.get(self.KEY)
result = resp.content_as[dict]
self.assertIsNotNone(result)
self.assertIsInstance(result, dict)
self.assertEqual(new_content, result)
@async_test
async def test_legacy_tc_pickle_upsert(self):
fake_obj = FakeObject()
await self.collection.upsert(self.KEY, fake_obj)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, FakeObject)
self.assertEqual(fake_obj.PROP, result.PROP)
self.assertEqual(fake_obj.PROP1, result.PROP1)
@async_test
async def test_legacy_tc_pickle_insert(self):
fake_obj = FakeObject()
await self.collection.insert(self.KEY, fake_obj)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, FakeObject)
self.assertEqual(fake_obj.PROP, result.PROP)
self.assertEqual(fake_obj.PROP1, result.PROP1)
@async_test
async def test_legacy_tc_pickle_replace(self):
fake_obj = FakeObject()
await self.collection.upsert(self.KEY, self.CONTENT)
await self.collection.replace(self.KEY, fake_obj)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, FakeObject)
self.assertEqual(fake_obj.PROP, result.PROP)
self.assertEqual(fake_obj.PROP1, result.PROP1)
@async_test
async def test_legacy_tc_string_upsert(self):
content = "some string content"
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content_as[str]
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(content, result)
@async_test
async def test_legacy_tc_string_insert(self):
content = "some string content"
await self.collection.insert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content_as[str]
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(content, result)
@async_test
async def test_legacy_tc_string_replace(self):
content = "some string content"
await self.collection.upsert(self.KEY, content)
new_content = "new string content"
await self.collection.replace(self.KEY, new_content)
resp = await self.collection.get(self.KEY)
result = resp.content_as[str]
self.assertIsNotNone(result)
self.assertIsInstance(result, str)
self.assertEqual(new_content, result)
@async_test
async def test_legacy_tc_binary_upsert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
@async_test
async def test_legacy_tc_bytearray_upsert(self):
content = bytearray(json.dumps("Here are some bytes"), "utf-8")
await self.collection.upsert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
@async_test
async def test_legacy_tc_binary_insert(self):
content = bytes(json.dumps("Here are some bytes"), "utf-8")
await self.collection.insert(self.KEY, content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
@async_test
async def test_legacy_tc_binary_replace(self):
await self.collection.upsert(self.KEY, bytes("Lets to a str first", "utf-8"))
new_content = bytes(json.dumps("Here are some newer bytes"), "utf-8")
await self.collection.replace(self.KEY, new_content)
resp = await self.collection.get(self.KEY)
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(new_content, result)
class AcouchbaseKeyValueOpTranscoderTests(AsyncioTestCase):
CONTENT = {"some": "content", "num": 1,
"list": [1, 2, 3], "nested": {"a": "b"}}
KEY = "imakey"
@classmethod
def setUpClass(cls) -> None:
super(AcouchbaseKeyValueOpTranscoderTests, cls).setUpClass(
get_event_loop(), cluster_class=Cluster, transcoder=JSONTranscoder())
@classmethod
def tearDownClass(cls) -> None:
super(AcouchbaseKeyValueOpTranscoderTests, cls).tearDownClass()
async def initialize(self):
try:
await self.collection.remove(self.KEY)
except DocumentNotFoundException:
pass
def setUp(self):
super(AcouchbaseKeyValueOpTranscoderTests, self).setUp()
self.loop.run_until_complete(self.initialize())
@async_test
async def test_upsert(self):
content = bytes(json.dumps(self.CONTENT), "utf-8")
# use RawBinaryTranscoder() so that get() fails as excpected
# since get() w/o passing in transcoder uses the default JSONTranscoder()
await self.collection.upsert(self.KEY, content, UpsertOptions(transcoder=RawBinaryTranscoder()))
with self.assertRaises(ValueFormatException):
await self.collection.get(self.KEY)
@async_test
async def test_insert(self):
# use RawStringTranscoder() so that get() fails as excpected
# since get() w/o passing in transcoder uses the default JSONTranscoder()
await self.collection.upsert(self.KEY, "some string content", InsertOptions(transcoder=RawStringTranscoder()))
with self.assertRaises(ValueFormatException):
await self.collection.get(self.KEY)
@async_test
async def test_replace(self):
content = bytes(json.dumps(self.CONTENT), "utf-8")
# use RawBinaryTranscoder() so that get() fails as excpected
# since get() w/o passing in transcoder uses the default JSONTranscoder()
await self.collection.upsert(self.KEY, self.CONTENT)
await self.collection.replace(self.KEY, content, ReplaceOptions(transcoder=RawBinaryTranscoder()))
with self.assertRaises(ValueFormatException):
await self.collection.get(self.KEY)
@async_test
async def test_get(self):
content = bytes(json.dumps(self.CONTENT), "utf-8")
tc = RawBinaryTranscoder()
await self.collection.upsert(self.KEY, content, UpsertOptions(transcoder=tc))
with self.assertRaises(ValueFormatException):
await self.collection.get(self.KEY)
resp = await self.collection.get(self.KEY, GetOptions(transcoder=tc))
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
@async_test
async def test_get_and_touch(self):
content = bytes(json.dumps(self.CONTENT), "utf-8")
tc = RawBinaryTranscoder()
await self.collection.upsert(self.KEY, content, UpsertOptions(transcoder=tc))
with self.assertRaises(ValueFormatException):
await self.collection.get_and_touch(self.KEY, timedelta(seconds=30))
resp = await self.collection.get_and_touch(self.KEY, timedelta(seconds=3), GetAndTouchOptions(transcoder=tc))
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
await self.try_n_times_till_exception_async(
10, 3, self.collection.get, self.KEY, GetOptions(transcoder=tc), DocumentNotFoundException)
@async_test
async def test_get_and_lock(self):
content = bytes(json.dumps(self.CONTENT), "utf-8")
tc = RawBinaryTranscoder()
await self.collection.upsert(self.KEY, content, UpsertOptions(transcoder=tc))
with self.assertRaises(ValueFormatException):
await self.collection.get_and_lock(self.KEY, timedelta(seconds=1))
await self.try_n_times_async(10, 1, self.collection.upsert, self.KEY, content, UpsertOptions(transcoder=tc))
resp = await self.collection.get_and_lock(self.KEY, timedelta(seconds=3), GetAndLockOptions(transcoder=tc))
result = resp.content
self.assertIsNotNone(result)
self.assertIsInstance(result, bytes)
self.assertEqual(content, result)
# upsert should definitely fail
with self.assertRaises(DocumentLockedException):
await self.collection.upsert(self.KEY, self.CONTENT)
# but succeed eventually
await self.try_n_times_async(10, 1, self.collection.upsert, self.KEY, self.CONTENT)
# TODO: replica ops are not available w/ async
# @async_test
# async def test_get_any_replica(self):
# num_replicas = self.bucket.configured_replica_count
# if num_replicas < 2:
# raise SkipTest('Need replicas to test')
# content = bytes(json.dumps(self.CONTENT), "utf-8")
# tc = RawBinaryTranscoder()
# await self.collection.upsert(self.KEY, content, UpsertOptions(transcoder=tc))
# with self.assertRaises(ValueFormatException):
# await self.collection.get_any_replica(self.KEY)
# resp = await self.try_n_times_async(
# 10, 3, self.collection.get_any_replica, self.KEY, GetAnyReplicaOptions(transcoder=tc))
# result = resp.content
# self.assertIsNotNone(result)
# self.assertIsInstance(result, bytes)
# self.assertEqual(content, result)
# @async_test
# async def test_get_all_replicas(self):
# num_replicas = self.bucket.configured_replica_count
# if num_replicas < 2:
# raise SkipTest('Need replicas to test')
# # TODO: is this check needed?
# # kv_results = self.bucket.ping().endpoints.get(ServiceType.KeyValue, None)
# # if not kv_results or len(kv_results) < num_replicas+1:
# # raise SkipTest('Not all replicas are online')
# content = bytes(json.dumps(self.CONTENT), "utf-8")
# tc = RawBinaryTranscoder()
# await self.collection.upsert(self.KEY, content, UpsertOptions(transcoder=tc))
# with self.assertRaises(ValueFormatException):
# await self.collection.get_all_replicas(self.KEY)
# resp = await self.try_n_times_async(
# 10, 3, self.collection.get_all_replicas, self.KEY, GetAllReplicasOptions(transcoder=tc))
# for r in resp:
# result = r.content
# self.assertIsNotNone(result)
# self.assertIsInstance(result, bytes)
# self.assertEqual(content, result)
| couchbase/couchbase-python-client | acouchbase/tests/cases/transcoder_t.py | Python | apache-2.0 | 31,512 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Eduard Trott
# @Date: 2015-09-08 09:23:57
# @Email: [email protected]
# @Last modified by: etrott
# @Last Modified time: 2015-10-19 09:57:53
from __future__ import unicode_literals, absolute_import
import logging
import os
# EXTERNALLY INSTALLED
import yaml
from oauth2client import file, client, tools
# Load logging before anything else
logging.basicConfig(format='>> %(message)s')
logr = logging.getLogger('gfreespace')
''' Load the config file so modules can import and reuse '''
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.abspath(os.path.join(dir_path, '..'))
CONFIG_FILE = os.path.expanduser('~/.gfreespace')
if os.path.exists(CONFIG_FILE):
with open(CONFIG_FILE) as _:
config = yaml.load(_)
else:
config = {}
CLIENT_SECRET_FILE = os.path.expanduser('~/.gdrive_private')
DEFAULT_TOKEN = os.path.expanduser('~/.oauth/calendar.json')
SCOPES = ('https://www.googleapis.com/auth/calendar '
'https://apps-apis.google.com/a/feeds/calendar/resource/')
def get_credentials():
"""
FIXME DOCs
Taken from:
https://developers.google.com/drive/web/quickstart/python
"""
try:
import argparse
flags = argparse.ArgumentParser(
parents=[tools.argparser]).parse_known_args()[0]
except ImportError:
flags = None
logr.error(
'Unable to parse oauth2client args; `pip install argparse`')
store = file.Storage(DEFAULT_TOKEN)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(
CLIENT_SECRET_FILE, SCOPES)
flow.redirect_uri = client.OOB_CALLBACK_URN
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatability with Python 2.6
credentials = tools.run(flow, store)
logr.info('Storing credentials to ' + DEFAULT_TOKEN)
return credentials
| maybelinot/gfreespace | gfreespace/utils.py | Python | gpl-3.0 | 2,023 |
# -*- coding: utf-8 -*-
"""
This module defines the specialized Evaluator for segmentation applications
All logic except default metrics is delegated to the parent class
"""
from __future__ import absolute_import, division, print_function
from niftynet.evaluation.base_evaluator import CachedSubanalysisEvaluator
class SegmentationEvaluator(CachedSubanalysisEvaluator):
"""
Evaluator for SegmentationApplication
Supports caching of intermediate results which is
important for boundary error calculations
"""
def default_evaluation_list(self):
"""
:return: list of metric names to compute by default
"""
return ['dice', 'jaccard', 'average_distance']
| NifTK/NiftyNet | niftynet/evaluation/segmentation_evaluator.py | Python | apache-2.0 | 710 |
#
# NEPI, a framework to manage network experiments
# Copyright (C) 2014 INRIA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Alina Quereilhac <[email protected]>
from nepi.execution.attribute import Attribute, Flags, Types
from nepi.execution.resource import clsinit_copy, ResourceState
from nepi.resources.ns3.ns3ccndceapplication import NS3BaseCCNDceApplication
@clsinit_copy
class LinuxNS3CCNDceApplication(NS3BaseCCNDceApplication):
_rtype = "linux::ns3::dce::CCNApplication"
@classmethod
def _register_attributes(cls):
sources = Attribute("sources",
"Path to tar.gz file with sources for the application execute in DCE. "
"Sources will be uploaded to ${SRC} and it is the responsibility of "
"the build instructions (in the build attribute) to copy the compiled "
"binaries to the ${BIN_DCE} directory",
flags = Flags.Design)
build = Attribute("build",
"Instructions to compile sources DCE-compatible way. "
"Note that sources will be uploaded to ${SRC} and the "
"build instructions are responsible for copying the "
"binaries to the ${BIN_DCE} directory. ",
flags = Flags.Design)
depends = Attribute("depends",
"Space-separated list of packages required to run the application",
flags = Flags.Design)
files = Attribute("files",
"Semi-colon separated list of 'key=value' pairs to set as "
"DCE files (AddFile). The key should be a path to a local file "
"and the key is the path to be set in DCE for that file" ,
flags = Flags.Design)
stdinfile = Attribute("stdinFile",
"File to set as StdinFile. The value shoudl be either an empty "
"or a path to a local file ",
flags = Flags.Design)
cls._register_attribute(sources)
cls._register_attribute(build)
cls._register_attribute(depends)
cls._register_attribute(files)
cls._register_attribute(stdinfile)
def _instantiate_object(self):
command = []
# Install package dependencies required to run the binary
depends = self.get("depends")
if depends:
dcmd = self.simulation.install_dependencies(depends = depends)
if dcmd:
command.append(dcmd)
# Upload sources to generate the binary
sources = self.get("sources")
if sources:
scmd = self.simulation.upload_extra_sources(sources = sources)
if scmd:
command.append(scmd)
# Upload instructions to build the binary
build = self.get("build")
if build:
bcmd = self.simulation.build(build = build)
if bcmd:
command.append(bcmd)
# Upload CCN files (e.g. repo)
files = self.get("files")
if files:
upload = []
for file in map(str.strip, files.split(";")):
localpath, dcepath = files.split("=")
upload.append(localpath)
sources = ";".join(upload)
fcmd = self.simulation.upload_extra_sources(sources = sources,
src_dir = self.simulation.app_home)
if fcmd:
command.append(fcmd)
if command:
deploy_command = ";".join(command)
prefix = "%d_deploy" % self.guid
self.simulation.execute_deploy_command(deploy_command, prefix=prefix)
| phiros/nepi | src/nepi/resources/linux/ns3/ccn/ns3ccndceapplication.py | Python | gpl-3.0 | 4,309 |
#!/bin/python2
import time
from threading import *
import signal
import sys
import netifaces
from sniffer_module import *
from dblib.dbSender import *
# globals
shouldQuit = None
otherThread = None
sender = Sender()
hostname = ""
# signal handler
def sigintHandler(signum, frame):
print("Quitting uncleanly")
sys.exit()
# search for interfaces until we found a monitor interface
def findIface():
chosenIface = None
while chosenIface is None:
ifaces = netifaces.interfaces()
for iface in ifaces:
if iface[:3] == 'mon':
chosenIface = iface
return chosenIface
# gets hostname for computer
def loadHostname():
global hostname
hostnameFile = open("/etc/hostname", "r")
hostname = hostnameFile.readline()
hostnameFile.close()
hostname = hostname[:-1]
# send the contents of the list
def sendData(otherThread):
sender.clear()
# lock for clearing
otherThread.listLock.acquire()
# add each of the packets to the list
for mac in otherThread.devices:
sender.add(mac, hostname, otherThread.devices[mac][0], otherThread.devices[mac][1])
# clear out the device buffer
otherThread.devices = {}
otherThread.listLock.release()
# and send off the data
sender.send()
def main():
# register signal handler and then sleep until we get a response
signal.signal(signal.SIGINT, sigintHandler)
# init the processing thread
shouldQuit = False
listLock = Lock()
otherThread = ProcessingThread(listLock, shouldQuit)
# find monitor inferaces
print("Looking for monitor interfaces...")
chosenIface = findIface()
print("Found Interface: " + chosenIface)
# get the hostname
loadHostname()
print("Hostname: " + hostname)
# start the other thread to start sniffing
otherThread.start()
# sit and spin while the other thread does things
while True:
print("sending off data")
sendData(otherThread)
time.sleep(15)
# run main
if __name__ == "__main__":
main()
| DepthDeluxe/dot11sniffer | dot11sniffer.py | Python | mit | 2,073 |
# Copyright (C) 2010 CAMd
# Please see the accompanying LICENSE file for further information.
"""This module provides all the classes and functions associated with the
evaluation of exact exchange with k-point sampling."""
from math import pi, sqrt
import numpy as np
from ase import Atoms
from gpaw.xc import XC
from gpaw.xc.kernel import XCNull
from gpaw.xc.functional import XCFunctional
from gpaw.utilities import hartree, pack, unpack2, packed_index
from gpaw.lfc import LFC
from gpaw.wavefunctions.pw import PWDescriptor
from gpaw.kpt_descriptor import KPointDescriptor
from gpaw.kpoint import KPoint as KPoint0
from gpaw.mpi import world
class KPoint:
def __init__(self, kd, kpt=None):
"""Helper class for parallelizing over k-points.
Placeholder for wave functions, occupation numbers,
projections, and global k-point index."""
self.kd = kd
if kpt is not None:
self.psit_nG = kpt.psit_nG
self.f_n = kpt.f_n
self.P_ani = kpt.P_ani
self.k = kpt.k
self.s = kpt.s
self.requests = []
def next(self):
"""Create empty object.
Data will be received from other processor."""
kpt = KPoint(self.kd)
# intialize array for receiving:
kpt.psit_nG = np.empty_like(self.psit_nG)
kpt.f_n = np.empty_like(self.f_n)
# Total number of projector functions:
I = sum([P_ni.shape[1] for P_ni in self.P_ani.values()])
kpt.P_In = np.empty((I, len(kpt.f_n)), complex)
kpt.P_ani = {}
I1 = 0
for a, P_ni in self.P_ani.items():
I2 = I1 + P_ni.shape[1]
kpt.P_ani[a] = kpt.P_In[I1:I2].T
I1 = I2
kpt.k = (self.k + 1) % self.kd.nibzkpts
kpt.s = self.s
return kpt
def start_sending(self, rank):
P_In = np.concatenate([P_ni.T for P_ni in self.P_ani.values()])
self.requests += [
self.kd.comm.send(self.psit_nG, rank, block=False, tag=1),
self.kd.comm.send(self.f_n, rank, block=False, tag=2),
self.kd.comm.send(P_In, rank, block=False, tag=3)]
def start_receiving(self, rank):
self.requests += [
self.kd.comm.receive(self.psit_nG, rank, block=False, tag=1),
self.kd.comm.receive(self.f_n, rank, block=False, tag=2),
self.kd.comm.receive(self.P_In, rank, block=False, tag=3)]
def wait(self):
self.kd.comm.waitall(self.requests)
self.requests = []
class HybridXC(XCFunctional):
orbital_dependent = True
def __init__(self, name, hybrid=None, xc=None, finegrid=False,
alpha=None):
"""Mix standard functionals with exact exchange.
name: str
Name of hybrid functional.
hybrid: float
Fraction of exact exchange.
xc: str or XCFunctional object
Standard DFT functional with scaled down exchange.
finegrid: boolean
Use fine grid for energy functional evaluations?
"""
if name == 'EXX':
assert hybrid is None and xc is None
hybrid = 1.0
xc = XC(XCNull())
elif name == 'PBE0':
assert hybrid is None and xc is None
hybrid = 0.25
xc = XC('HYB_GGA_XC_PBEH')
elif name == 'B3LYP':
assert hybrid is None and xc is None
hybrid = 0.2
xc = XC('HYB_GGA_XC_B3LYP')
if isinstance(xc, str):
xc = XC(xc)
self.hybrid = hybrid
self.xc = xc
self.type = xc.type
self.alpha = alpha
self.exx = 0.0
XCFunctional.__init__(self, name)
def get_setup_name(self):
return 'PBE'
def calculate_radial(self, rgd, n_sLg, Y_L, v_sg,
dndr_sLg=None, rnablaY_Lv=None,
tau_sg=None, dedtau_sg=None):
return self.xc.calculate_radial(rgd, n_sLg, Y_L, v_sg,
dndr_sLg, rnablaY_Lv)
def initialize(self, density, hamiltonian, wfs, occupations):
self.xc.initialize(density, hamiltonian, wfs, occupations)
self.nspins = wfs.nspins
self.setups = wfs.setups
self.density = density
self.kpt_u = wfs.kpt_u
self.gd = density.gd
self.kd = wfs.kd
self.bd = wfs.bd
N_c = self.gd.N_c
N = self.gd.N_c.prod()
vol = self.gd.dv * N
if self.alpha is None:
self.alpha = 6 * vol**(2 / 3.0) / pi**2
self.gamma = (vol / (2 * pi)**2 * sqrt(pi / self.alpha) *
self.kd.nbzkpts)
ecut = 0.5 * pi**2 / (self.gd.h_cv**2).sum(1).max()
if self.kd.N_c is None:
self.bzk_kc = np.zeros((1, 3))
dfghdfgh
else:
n = self.kd.N_c * 2 - 1
bzk_kc = np.indices(n).transpose((1, 2, 3, 0))
bzk_kc.shape = (-1, 3)
bzk_kc -= self.kd.N_c - 1
self.bzk_kc = bzk_kc.astype(float) / self.kd.N_c
self.pwd = PWDescriptor(ecut, self.gd, self.bzk_kc)
n = 0
for k_c, Gpk2_G in zip(self.bzk_kc[:], self.pwd.G2_qG):
if (k_c > -0.5).all() and (k_c <= 0.5).all(): #XXX???
if k_c.any():
self.gamma -= np.dot(np.exp(-self.alpha * Gpk2_G),
Gpk2_G**-1)
else:
self.gamma -= np.dot(np.exp(-self.alpha * Gpk2_G[1:]),
Gpk2_G[1:]**-1)
n += 1
assert n == self.kd.N_c.prod()
self.ghat = LFC(self.gd,
[setup.ghat_l for setup in density.setups],
dtype=complex
)
self.ghat.set_k_points(self.bzk_kc)
self.fullkd = KPointDescriptor(self.kd.bzk_kc, nspins=1)
class S:
id_a = []
def set_symmetry(self, s): pass
self.fullkd.set_symmetry(Atoms(pbc=True), S(), False)
self.fullkd.set_communicator(world)
self.pt = LFC(self.gd, [setup.pt_j for setup in density.setups],
dtype=complex)
self.pt.set_k_points(self.fullkd.ibzk_kc)
self.interpolator = density.interpolator
def set_positions(self, spos_ac):
self.ghat.set_positions(spos_ac)
self.pt.set_positions(spos_ac)
def calculate(self, gd, n_sg, v_sg=None, e_g=None):
# Normal XC contribution:
exc = self.xc.calculate(gd, n_sg, v_sg, e_g)
# Add EXX contribution:
return exc + self.exx
def calculate_exx(self):
"""Non-selfconsistent calculation."""
kd = self.kd
K = self.fullkd.nibzkpts
assert self.nspins == 1
Q = K // world.size
assert Q * world.size == K
parallel = (world.size > self.nspins)
self.exx = 0.0
self.exx_skn = np.zeros((self.nspins, K, self.bd.nbands))
kpt_u = []
for k in range(world.rank * Q, (world.rank + 1) * Q):
k_c = self.fullkd.ibzk_kc[k]
for k1, k1_c in enumerate(kd.bzk_kc):
if abs(k1_c - k_c).max() < 1e-10:
break
# Index of symmetry related point in the irreducible BZ
ik = kd.kibz_k[k1]
kpt = self.kpt_u[ik]
# KPoint from ground-state calculation
phase_cd = np.exp(2j * pi * self.gd.sdisp_cd * k_c[:, np.newaxis])
kpt2 = KPoint0(kpt.weight, kpt.s, k, None, phase_cd)
kpt2.psit_nG = np.empty_like(kpt.psit_nG)
kpt2.f_n = kpt.f_n / kpt.weight / K * 2
for n, psit_G in enumerate(kpt2.psit_nG):
psit_G[:] = kd.transform_wave_function(kpt.psit_nG[n], k1)
kpt2.P_ani = self.pt.dict(len(kpt.psit_nG))
self.pt.integrate(kpt2.psit_nG, kpt2.P_ani, k)
kpt_u.append(kpt2)
for s in range(self.nspins):
kpt1_q = [KPoint(self.fullkd, kpt) for kpt in kpt_u if kpt.s == s]
kpt2_q = kpt1_q[:]
if len(kpt1_q) == 0:
# No s-spins on this CPU:
continue
# Send rank:
srank = self.fullkd.get_rank_and_index(s, (kpt1_q[0].k - 1) % K)[0]
# Receive rank:
rrank = self.fullkd.get_rank_and_index(s, (kpt1_q[-1].k + 1) % K)[0]
# Shift k-points K // 2 times:
for i in range(K // 2 + 1):
if i < K // 2:
if parallel:
kpt = kpt2_q[-1].next()
kpt.start_receiving(rrank)
kpt2_q[0].start_sending(srank)
else:
kpt = kpt2_q[0]
for kpt1, kpt2 in zip(kpt1_q, kpt2_q):
if 2 * i == K:
self.apply(kpt1, kpt2, invert=(kpt1.k > kpt2.k))
else:
self.apply(kpt1, kpt2)
self.apply(kpt1, kpt2, invert=True)
if i < K // 2:
if parallel:
kpt.wait()
kpt2_q[0].wait()
kpt2_q.pop(0)
kpt2_q.append(kpt)
self.exx = world.sum(self.exx)
world.sum(self.exx_skn)
self.exx += self.calculate_paw_correction()
def apply(self, kpt1, kpt2, invert=False):
#print world.rank,kpt1.k,kpt2.k,invert
k1_c = self.fullkd.ibzk_kc[kpt1.k]
k2_c = self.fullkd.ibzk_kc[kpt2.k]
if invert:
k2_c = -k2_c
k12_c = k1_c - k2_c
N_c = self.gd.N_c
eikr_R = np.exp(2j * pi * np.dot(np.indices(N_c).T, k12_c / N_c).T)
for q, k_c in enumerate(self.bzk_kc):
if abs(k_c + k12_c).max() < 1e-9:
q0 = q
break
for q, k_c in enumerate(self.bzk_kc):
if abs(k_c - k12_c).max() < 1e-9:
q00 = q
break
Gpk2_G = self.pwd.G2_qG[q0]
if Gpk2_G[0] == 0:
Gpk2_G = Gpk2_G.copy()
Gpk2_G[0] = 1.0 / self.gamma
N = N_c.prod()
vol = self.gd.dv * N
nspins = self.nspins
same = (kpt1.k == kpt2.k)
for n1, psit1_R in enumerate(kpt1.psit_nG):
f1 = kpt1.f_n[n1]
for n2, psit2_R in enumerate(kpt2.psit_nG):
if same and n2 > n1:
continue
f2 = kpt2.f_n[n2]
nt_R = self.calculate_pair_density(n1, n2, kpt1, kpt2, q0,
invert)
nt_G = self.pwd.fft(nt_R * eikr_R) / N
vt_G = nt_G.copy()
vt_G *= -pi * vol / Gpk2_G
e = np.vdot(nt_G, vt_G).real * nspins * self.hybrid
if same and n1 == n2:
e /= 2
self.exx += e * f1 * f2
self.ekin -= 2 * e * f1 * f2
self.exx_skn[kpt1.s, kpt1.k, n1] += f2 * e
self.exx_skn[kpt2.s, kpt2.k, n2] += f1 * e
calculate_potential = not True
if calculate_potential:
vt_R = self.pwd.ifft(vt_G).conj() * eikr_R * N / vol
if kpt1 is kpt2 and not invert and n1 == n2:
kpt1.vt_nG[n1] = 0.5 * f1 * vt_R
if invert:
kpt1.Htpsit_nG[n1] += \
f2 * nspins * psit2_R.conj() * vt_R
else:
kpt1.Htpsit_nG[n1] += f2 * nspins * psit2_R * vt_R
if kpt1 is not kpt2:
if invert:
kpt2.Htpsit_nG[n2] += (f1 * nspins *
psit1_R.conj() * vt_R)
else:
kpt2.Htpsit_nG[n2] += (f1 * nspins *
psit1_R * vt_R.conj())
def calculate_paw_correction(self):
exx = 0
deg = 2 // self.nspins # spin degeneracy
for a, D_sp in self.density.D_asp.items():
setup = self.setups[a]
for D_p in D_sp:
D_ii = unpack2(D_p)
ni = len(D_ii)
for i1 in range(ni):
for i2 in range(ni):
A = 0.0
for i3 in range(ni):
p13 = packed_index(i1, i3, ni)
for i4 in range(ni):
p24 = packed_index(i2, i4, ni)
A += setup.M_pp[p13, p24] * D_ii[i3, i4]
p12 = packed_index(i1, i2, ni)
exx -= self.hybrid / deg * D_ii[i1, i2] * A
if setup.X_p is not None:
exx -= self.hybrid * np.dot(D_p, setup.X_p)
exx += self.hybrid * setup.ExxC
return exx
def calculate_pair_density(self, n1, n2, kpt1, kpt2, q, invert):
if invert:
nt_G = kpt1.psit_nG[n1].conj() * kpt2.psit_nG[n2].conj()
else:
nt_G = kpt1.psit_nG[n1].conj() * kpt2.psit_nG[n2]
Q_aL = {}
for a, P1_ni in kpt1.P_ani.items():
P1_i = P1_ni[n1]
P2_i = kpt2.P_ani[a][n2]
if invert:
D_ii = np.outer(P1_i.conj(), P2_i.conj())
else:
D_ii = np.outer(P1_i.conj(), P2_i)
D_p = pack(D_ii)
Q_aL[a] = np.dot(D_p, self.setups[a].Delta_pL)
self.ghat.add(nt_G, Q_aL, q)
return nt_G
if __name__ == '__main__':
import sys
from gpaw import GPAW
from gpaw.mpi import serial_comm
calc = GPAW(sys.argv[1], txt=None, communicator=serial_comm)
alpha = 5.0
e = calc.get_potential_energy()
exx = HybridXC('EXX', alpha=alpha)
e2 = calc.get_xc_difference(exx)
print e, e + e2, exx.exx
| qsnake/gpaw | gpaw/xc/hybridk.py | Python | gpl-3.0 | 14,413 |
from __future__ import print_function
__author__ = 'abuddenberg'
from os.path import exists
import sys
from domain import GcisObject
def warning(*objs):
print("WARNING: ", *objs, file=sys.stderr)
#This function is for adding images to existing figures
def move_images_to_gcis(webform_client, gcis_client, webform_url, gcis_id, report_id, subset_images=None):
figure = webform_client.get_webform(webform_url, download_images=True)
#Now identifiers don't need to be matched
figure.identifier = gcis_id
#If a subset of identifiers has been provided, only process those
if subset_images:
images_to_process = [image for image in figure.images if image.identifier in subset_images]
else:
images_to_process = figure.images
for image in images_to_process:
if not exists(image.local_path):
raise Exception('Local file missing ' + image.local_path)
if not gcis_client.image_exists(image.identifier):
print('Creating image: {img}'.format(img=image.identifier))
gcis_client.create_image(image, report_id=report_id, figure_id=figure.identifier)
def sync_dataset_metadata(gcis_client, datasets, skip=[]):
for ds in [ds for ds in datasets if ds.identifier not in skip]:
gcis_client.create_or_update_dataset(ds)
# gcis_client.create_or_update_activity(ds.activity)
def realize_contributors(gcis_client, contributors):
for cont in contributors:
person = cont.person
org = cont.organization
if not person.id:
name_matches = gcis_client.lookup_person(person.first_name + ' ' + person.last_name)
if len(name_matches) == 1:
person.id = name_matches[0][0]
elif len(name_matches) == 0:
warning('No ID found for ' + person.first_name + ' ' + person.last_name)
else:
warning('Ambiguous results for ' + person.first_name + ' ' + person.last_name)
warning(name_matches)
if org and org.identifier in (None, '') and org.name not in (None, ''):
warning('No ID found for ' + org.name)
#Check if we missed any organizations in our hardcoding...
if not all(map(lambda c: c.organization is None or c.organization.identifier is not None, contributors)):
warning('Missing organizations: ', contributors)
def realize_parents(gcis_client, parents):
for parent in parents:
# print parent.publication_type_identifier, parent.label
if parent.url:
print(' '.join(('Using hint for', parent.publication_type_identifier, parent.label)))
continue
parent_matches = gcis_client.lookup_publication(parent.publication_type_identifier, parent.label)
if len(parent_matches) == 1:
matched_id, matched_name = parent_matches[0]
parent.url = '/{type}/{id}'.format(type=parent.publication_type_identifier, id=matched_id)
# Need the ability to dynamically identify and retrieve an instance of the parent publication.
# Here's a generic, for the time being.
parent.publication = GcisObject({'identifier': matched_id})
elif len(parent_matches) == 0:
warning(' '.join(('No ID found for', parent.publication_type_identifier, parent.label)))
else:
warning(' '.join(('Ambiguous results for', parent.publication_type_identifier, parent.label)))
warning(parent_matches)
| USGCRP/gcis-py-client | gcis_clients/sync_utils.py | Python | bsd-3-clause | 3,490 |
import os
import pytest
import pytest_mock
import cachspeak
from test_cachet import cachet_response
from test_utils import load_from_json
from test_teamspeak import TS3ConnectionMock
@pytest.fixture(scope='module')
def saved_status_no_updates():
return load_from_json('saved_status_no_updates.json')
@pytest.fixture(scope='module')
def saved_status_multi_updates():
return load_from_json('saved_status_multi_updates.json')
def create_test_persist_file(data, tmpdir_factory):
file_path = str(tmpdir_factory.mktemp('data').join('persist_file.db'))
with cachspeak.persistence.persistent_storage(file_path) as storage:
storage['last_status'] = data
return file_path
def test_main_no_updates(mocker, tmpdir_factory, saved_status_no_updates, cachet_response):
"""Assert that no messages are sent if no components were updated"""
mock_cachet = mocker.patch('cachetclient.cachet.Components.get')
mock_ts3 = mocker.patch('ts3.query.TS3Connection', new=TS3ConnectionMock)
mock_ts3.sendtextmessage = mocker.Mock()
mock_cachet.return_value = cachet_response
persist_file = create_test_persist_file(data=saved_status_no_updates, tmpdir_factory=tmpdir_factory)
config_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'fixtures', 'cachspeak.ini')
cachspeak.main(config_path=config_path, persist_path=persist_file)
assert mock_ts3.sendtextmessage.call_count == 0
def test_main_with_updates(mocker, tmpdir_factory, saved_status_multi_updates, cachet_response):
"""Assert that messages are sent for proper updated components"""
mock_cachet = mocker.patch('cachetclient.cachet.Components.get')
mock_ts3 = mocker.patch('ts3.query.TS3Connection', new=TS3ConnectionMock)
mock_ts3.sendtextmessage = mocker.Mock()
mock_cachet.return_value = cachet_response
persist_file = create_test_persist_file(data=saved_status_multi_updates, tmpdir_factory=tmpdir_factory)
config_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'fixtures', 'cachspeak.ini')
cachspeak.main(config_path=config_path, persist_path=persist_file, debug=True)
assert mock_ts3.sendtextmessage.call_count == 3
| enricoghdn/cachspeak | tests/test_cachspeak.py | Python | bsd-3-clause | 2,199 |
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('webapp',
url(r'^/?$', 'views.home', name='home'),
url(r'^auth_redirect$', 'views.auth_redirect', name='auth_redirect'),
url(r'^nights$', 'views.night_index', name='night_index'),
url(r'^song$', 'views.song_index', name='song_index'),
url(r'^create_song$', 'views.song_create', name='song_create'),
url(r'^song/(?P<key>[\w\d]+)$', 'views.song', name='song'),
url(r'^song/(?P<key>[\w\d]+).mp3$', 'views.song_mp3', name='song_mp3'),
url(r'^song/(?P<key>[\w\d]+)/edit$', 'views.song_edit', name='song_edit'),
url(r'^song/(?P<key>[\w\d]+)/wait$', 'views.song_wait_finished', name='song_wait_finished'),
url(r'^sign_out$', 'views.sign_out', name='sign_out'),
)
| beddit/sleep-musicalization-web | webapp/urls.py | Python | bsd-2-clause | 806 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import imagekit.models.fields
import brasilcomvc.portal.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='HomeBanner',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('image', imagekit.models.fields.ProcessedImageField(help_text='Imagem de alta resolução; será cortada para 1400x550.', upload_to=brasilcomvc.portal.models.homebanner_video_upload_to)),
('content', models.TextField(help_text='Conteúdo (HTML) para sobrepor a imagem no banner.')),
],
options={
},
bases=(models.Model,),
),
]
| brasilcomvc/brasilcomvc | brasilcomvc/portal/migrations/0001_homebanner.py | Python | apache-2.0 | 877 |
"""Installed backends command."""
# Copyright (c) 2001-2009 ElevenCraft Inc.
# See LICENSE for details.
from textwrap import dedent
from schevo.script.command import Command
from schevo.script import opt
usage = """\
schevo backends
Shows a list of installed backends and the options that each one
accepts."""
def _parser():
p = opt.parser(usage)
return p
class Backends(Command):
name = 'Installed Backends'
description = 'Show a list of installed backends.'
def main(self, arg0, args):
print
print
from schevo.backend import backends
for backend_name, backend_class in sorted(backends.iteritems()):
print backend_name, '-', backend_class.description
print '=' * (len(backend_name) + len(backend_class.description) + 3)
print 'Available options for --backend-args:'
print dedent(backend_class.backend_args_help).strip()
print
start = Backends
| Schevo/schevo | schevo/script/backends.py | Python | mit | 969 |
from django import forms
# from crispy_forms.helper import FormHelper, Layout
# from crispy_forms.layout import Field, Div, Row # , HTML
# from crispy_forms.bootstrap import FormActions # , TabHolder, Tab, \
# # PrependedAppendedText, PrependedText
# from backend_apps.utils.forms import smtSave, btnCancel, btnReset
from ..models.categoria import Categoria
class CategoriaForm(forms.ModelForm):
class Meta:
model = Categoria
fields = ('nombre',)
widgets = {
'nombre': forms.TextInput(
attrs={'class': 'form-control', 'placeholder':
'Ingrese nombre', 'required': 'true'}),
}
# class TipoForm(forms.ModelForm):
# class Meta:
# model = Tipo
# fields = ('nombre',)
# def __init__(self, *args, **kwargs):
# super(TipoForm, self).__init__(*args, **kwargs)
# self.fields['nombre'].help_text = u'<small class="help-error"></small> %s' % (u' ')
# self.fields['nombre'].widget.attrs = {'placeholder': 'Ingrese nombre', }
# self.helper = FormHelper()
# self.helper.form_class = 'js-validate form-vertical'
# self.helper.form_id = 'form'
# self.helper.layout = Layout(
# Field('nombre', css_class='input-required'),
# FormActions(
# smtSave(),
# btnCancel(),
# btnReset(),
# ),
# )
| gitdealdo/serva | apps/recetario/forms/categoria.py | Python | gpl-2.0 | 1,444 |
#!/usr/bin/python
import os
path = "../jwright/Desktop"
flist = os.listdir(path)
for file in flist:
if file.endswith("jpg"):
print file
| jjwright55/code_fragments | frags/python/testlistdir.py | Python | unlicense | 158 |
import numpy as np
from dnfpy.core.map2D import Map2D
import struct
def addressToCoordAEDAT1(address,res):
pol = address & 0x0001
YAddr = address >> 8 & 0x007f
XAddr = address >> 1 & 0x007f
return YAddr,XAddr,pol
def addressToCoordAEDAT2(address,res):
#XAddr = (address >> 17) & 0x00007FFF
#YAddr = (address >> 2) & 0x00007FFF
#pol = (address >> 1) & 0x00000001
xmask = 0x00fe
xshift = 1
ymask = 0x7f00
yshift = 8
pmask = 0x1
pshift = 0
XAddr = (address & xmask) >> xshift
YAddr = (address & ymask) >> yshift
pol = (address & pmask) >> pshift
return YAddr,XAddr,pol
def unpackAEDAT1(f,p):
f.seek(p)
byte = f.read(6)
p+=6
address,timeStamp = struct.unpack(">HI",byte)
return address,timeStamp,p
def unpackAEDAT2(f,p):
f.seek(p)
byte = f.read(8)
p+=8
address,timeStamp = struct.unpack(">II",byte)
#print(timeStamp)
return address,timeStamp,p
class AEDatReader(Map2D):
def __init__(self,name,size,fileName,dt=0.1,tick=10,offset=0):
"""
tick in us
timeStep in us
"""
self.offset = offset
super().__init__(name=name,size=size,fileName=fileName,dt=dt,tick=tick,timeStep=dt/(tick*1e-6))
def detectFormat(self,f):
p = 0 # pointer for byte
line = f.readline()
p += len(line)
print(line[0:2])
if line[0:2] == b'#!':
format = line[2:-2].decode()
line=f.readline()
p += len(line)
while line and line[0:1] == b'#':
line=f.readline()
p += len(line)
else:
format = "AER-DAT1.0"
return format,p
def _compute(self,timeStep,size):
self.dataTmp[...] = 0
address,timeStamp,self.p = self.unpack(self.f,self.p)
YAddr,XAddr,pol = self.addressToCoord(address,128)
if not(self.lastTimeStamp):
self.lastTimeStamp = timeStamp
self.lastTimeStamp = self.lastTimeStamp + timeStep
#print(self.lastTimeStamp/1e6)
while timeStamp < self.lastTimeStamp :
address,timeStamp,self.p = self.unpack(self.f,self.p)
YAddr,XAddr,pol = self.addressToCoord(address,128)
self.dataTmp[-YAddr,-XAddr] += 1
self._data[...] = self.dataTmp[:size,:size]
def reset(self):
super().reset()
fileName = self._init_kwargs['fileName']
self.f = open(fileName,"rb")
self.lastTimeStamp = None
self.dataTmp = np.zeros((128,128))
self.format,self.p = self.detectFormat(self.f)
print(self.offset)
self.p += self.offset
print(self.format,self.p)
if self.format == "AER-DAT1.0":
self.addressToCoord = addressToCoordAEDAT1
self.unpack = unpackAEDAT1
elif self.format == "AER-DAT2.0":
self.addressToCoord = addressToCoordAEDAT2
self.unpack = unpackAEDAT2
else:
print("unknown format " + str(self.format))
| bchappet/dnfpy | src/dnfpyUtils/camera/aedatReader.py | Python | gpl-2.0 | 3,057 |
from lib_nrf24 import * | CarlosPena00/Mobbi | Rasp/nrf/lib_nrf24/__init__.py | Python | mit | 23 |
import config
from flask import Flask
from flask.ext.migrate import Migrate
from flask.ext.sqlalchemy import SQLAlchemy
import os
from flask.ext.login import LoginManager
from flask.ext.openid import OpenID
from config import basedir
app = Flask(__name__)
app.config.from_object(config)
app.config
db = SQLAlchemy(app)
migrate = Migrate(app, db)
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'login'
oid = OpenID(app, os.path.join(basedir, 'tmp'))
from app import views, models
| Adynatos/egida | app/__init__.py | Python | mit | 491 |
###
# Copyright 2008-2011 Diamond Light Source Ltd.
# This file is part of Diffcalc.
#
# Diffcalc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Diffcalc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Diffcalc. If not, see <http://www.gnu.org/licenses/>.
###
import time
from diffcalc.util import DiffcalcException
try:
from gda.device.scannable import ScannableBase, ScannaleMotionBase
except ImportError:
class Scannable(object):
pass
class ScannableBase(Scannable):
"""Implementation of a subset of OpenGDA's Scannable interface
"""
level = 5
inputNames = []
extraNames = []
outputFormat = []
def isBusy(self):
raise NotImplementedError()
def rawGetPosition(self):
raise NotImplementedError()
def rawAsynchronousMoveTo(self, newpos):
raise NotImplementedError()
def waitWhileBusy(self):
while self.isBusy():
time.sleep(.1)
def getPosition(self):
return self.rawGetPosition()
def checkPositionValid(self, externalPosition):
raise NotImplementedError()
def asynchronousMoveTo(self, newpos):
self.rawAsynchronousMoveTo(newpos)
def atScanStart(self):
pass
def atScanEnd(self):
pass
def atCommandFailure(self):
pass
###
def __repr__(self):
pos = self.getPosition()
formattedValues = self.formatPositionFields(pos)
if len(tuple(self.getInputNames()) + tuple(self.getExtraNames())) > 1:
result = self.getName() + ': '
else:
result = ''
names = tuple(self.getInputNames()) + tuple(self.getExtraNames())
for name, val in zip(names, formattedValues):
result += ' ' + name + ': ' + val
return result
###
def formatPositionFields(self, pos):
"""Returns position as array of formatted strings"""
# Make sure pos is a tuple or list
if type(pos) not in (tuple, list):
pos = tuple([pos])
# Sanity check
if len(pos) != len(self.getOutputFormat()):
raise Exception(
"In scannable '%s':number of position fields differs from "
"number format strings specified" % self.getName())
result = []
for field, frmt in zip(pos, self.getOutputFormat()):
if field is None:
result.append('???')
else:
s = (frmt % field)
## if width!=None:
## s = s.ljust(width)
result.append(s)
return result
def getName(self):
return self.name
def setName(self, value):
self.name = value
def getLevel(self):
return self.level
def setLevel(self, value):
self.level = value
def getInputNames(self):
return self.inputNames
def setInputNames(self, value):
self.inputNames = value
def getExtraNames(self):
return self.extraNames
def setExtraNames(self, value):
self.extraNames = value
def getOutputFormat(self):
return self.outputFormat
def setOutputFormat(self, value):
if type(value) not in (tuple, list):
raise TypeError(
"%s.setOutputFormat() expects tuple or list; not %s" %
(self.getName(), str(type(value))))
self.outputFormat = value
def __call__(self, newpos=None):
if newpos is None:
return self.getPosition()
self.asynchronousMoveTo(newpos)
class ScannableLimitsComponent(object):
def __init__(self):
# Array of lower limits (one for each input name). Null if no limits set. Any value within array may be null if
# that input has no corresponding limit.
self.internalLowerLim = None
# Array of upper limits (one for each input name). Null if no limits set. Any value within array may be null if
# that input has no corresponding limit.
self.internalUpperLim = None
self.hostScannable = None
self.limitType = "Scannable"
def getInternalLower(self):
return self.internalLowerLim
def getInternalUpper(self):
return self.internalUpperLim
def checkInternalPosition(self, internalPosition):
# If neither limits are set, return null indicating okay.
if self.internalLowerLim is None and self.internalUpperLim is None:
return None
# Check lower limits if set
if self.internalLowerLim:
for i, (lim, pos) in enumerate(zip(self.internalLowerLim, internalPosition)):
if pos < lim:
fieldName = "{}.{}".format(self.getHostScannable().getName(), self.getHostScannable().getInputNames()[i])
return "{} limit violation on {}: {} < {} (internal/hardware/dial values).".format(
self.limitType, fieldName, pos, lim)
# Check upper limits if set
if self.internalUpperLim:
for i, (lim, pos) in enumerate(zip(self.internalUpperLim, internalPosition)):
if pos > lim:
fieldName = "{}.{}".format(self.getHostScannable().getName(), self.getHostScannable().getInputNames()[i])
return "{} limit violation on {}: {} > {} (internal/hardware/dial values).".format(
self.limitType, fieldName, pos, lim)
return None
def checkPositionLength(self, positionArray):
if len(positionArray) != len(self.getHostScannable().getInputNames()):
raise DiffcalcException(
"Expected position of length {} but got position of length {}".format(len(self.getHostScannable().getInputNames().length),
len(positionArray)))
def setInternalUpper(self, internalUpperLim, index=None, length=None):
if index is not None:
if not self.internalUpperLim:
self.internalUpperLim = [None] * length
self.internalUpperLim[index] = internalUpperLim
else:
if internalUpperLim:
self.checkPositionLength(internalUpperLim)
self.internalUpperLim = internalUpperLim
def setInternalLower(self, internalLowerLim, index=None, length=None):
if index is not None:
if not self.internalLowerLim:
self.internalLowerLim = [None] * length
self.internalLowerLim[index] = internalLowerLim
else:
if internalLowerLim:
self.checkPositionLength(internalLowerLim)
self.internalLowerLim = internalLowerLim
def setHostScannable(self, hostScannable):
self.hostScannable = hostScannable
def getHostScannable(self):
return self.hostScannable
class ScannableMotionBase(ScannableBase):
"""Implementation of a subset of OpenGDA's ScannableMotion interface
"""
def __init__(self):
self.limitsComponent = ScannableLimitsComponent()
self.limitsComponent.setHostScannable(self)
def asynchronousMoveTo(self, newpos):
report = self.checkPositionValid([newpos,])
if report:
raise DiffcalcException(report)
ScannableBase.asynchronousMoveTo(self, newpos)
def checkPositionValid(self, externalPosition):
limitsComponentMsg = self.limitsComponent.checkInternalPosition(externalPosition)
if limitsComponentMsg:
return limitsComponentMsg
return None
def getLowerInnerLimit(self):
limits = self.limitsComponent.getInternalLower()
try:
return limits[0]
except TypeError:
return None
def getUpperInnerLimit(self):
limits = self.limitsComponent.getInternalUpper()
try:
return limits[0]
except TypeError:
return None
class ScannableAdapter(Scannable):
'''Wrap up a Scannable and give it a new name and optionally an offset
(added to the delegate when reading up and subtracting when setting down
'''
def __init__(self, delegate_scn, name, offset=0):
assert len(delegate_scn.getInputNames()) == 1
assert len(delegate_scn.getExtraNames()) == 0
self.delegate_scn = delegate_scn
self.name = name
self.offset = offset
def __getattr__(self, name):
return getattr(self.delegate_scn, name)
def getName(self):
return self.name
def getInputNames(self):
return [self.name]
def getPosition(self):
return self.delegate_scn.getPosition() + self.offset
def asynchronousMoveTo(self, newpos):
self.delegate_scn.asynchronousMoveTo(newpos - self.offset)
def __repr__(self):
pos = self.getPosition()
formatted_values = self.delegate_scn.formatPositionFields(pos)
return self.name + ': ' + formatted_values[0] + ' ' + self.get_hint()
def get_hint(self):
if self.offset:
offset_hint = ' + ' if self.offset >= 0 else ' - '
offset_hint += str(self.offset)
else:
offset_hint = ''
return '(%s%s)' % (self.delegate_scn.name, offset_hint)
def __call__(self, newpos=None):
if newpos is None:
return self.getPosition()
self.asynchronousMoveTo(newpos)
class SingleFieldDummyScannable(ScannableMotionBase):
def __init__(self, name, initial_position=0.):
ScannableMotionBase.__init__(self)
self.name = name
self.inputNames = [name,]
self.outputFormat = ['% 6.4f',]
self.level = 3
self._current_position = float(initial_position)
def isBusy(self):
return False
def waitWhileBusy(self):
return
def asynchronousMoveTo(self, new_position):
report = self.checkPositionValid([new_position,])
if report:
raise DiffcalcException(report)
self._current_position = float(new_position)
def getPosition(self):
return self._current_position
def setLowerDummyLimit(self, lowLimit):
self.limitsComponent.setInternalLower(lowLimit, 0, len(self.getInputNames()))
def setUpperDummyLimit(self, upperLimit):
self.limitsComponent.setInternalUpper(upperLimit, 0, len(self.getInputNames()))
class DummyPD(SingleFieldDummyScannable):
"""For compatability with the gda's dummy_pd module"""
pass
class MultiInputExtraFieldsDummyScannable(ScannableMotionBase):
'''Multi input Dummy PD Class supporting input and extra fields'''
def __init__(self, name, inputNames, extraNames):
ScannableMotionBase.__init__(self)
self.setName(name)
self.setInputNames(inputNames)
self.setExtraNames(extraNames)
self.setOutputFormat(['%6.4f'] * (len(inputNames) + len(extraNames)))
self.setLevel(3)
self.currentposition = [0.0] * len(inputNames)
def isBusy(self):
return 0
def asynchronousMoveTo(self, new_position):
if type(new_position) == type(1) or type(new_position) == type(1.0):
new_position = [new_position]
msg = "Wrong new_position size"
assert len(new_position) == len(self.currentposition), msg
for i in range(len(new_position)):
if new_position[i] != None:
self.currentposition[i] = float(new_position[i])
def getPosition(self):
extraValues = range(100, 100 + (len(self.getExtraNames())))
return self.currentposition + map(float, extraValues)
class ZeroInputExtraFieldsDummyScannable(ScannableMotionBase):
'''Zero input/extra field dummy pd
'''
def __init__(self, name):
ScannableMotionBase.__init__(self)
self.setName(name)
self.setInputNames([])
self.setOutputFormat([])
def isBusy(self):
return 0
def asynchronousMoveTo(self, new_position):
pass
def getPosition(self):
pass
class ScannableGroup(ScannableBase):
"""wraps up motors. Simulates motors if non given."""
def __init__(self, name, motorList):
self.setName(name)
# Set input format
motorNames = []
for scn in motorList:
motorNames.append(scn.getName())
self.setInputNames(motorNames)
# Set output format
frmt = []
for motor in motorList:
frmt.append(motor.getOutputFormat()[0])
self.setOutputFormat(frmt)
self.__motors = motorList
def asynchronousMoveTo(self, position):
# if input has any Nones, then replace these with the current positions
if None in position:
position = list(position)
current = self.getPosition()
for idx, val in enumerate(position):
if val is None:
position[idx] = current[idx]
for scn, pos in zip(self.__motors, position):
scn.asynchronousMoveTo(pos)
def getPosition(self):
return [scn.getPosition() for scn in self.__motors]
def getGroupMembers(self):
return self.__motors
def getGroupMember(self, name):
for scn in self.__motors:
if scn.getName() == name:
return scn
return None
def isBusy(self):
for scn in self.__motors:
if scn.isBusy():
return True
return False
def configure(self):
pass
def checkPositionValid(self, positionArray):
msg = "Wrong position array size for scannable group"
assert len(self.__motors) == len(positionArray), msg
for motor, pos in zip(self.__motors, positionArray):
reason = motor.checkPositionValid([pos,])
if reason:
return reason
return None
class ScannableMotionWithScannableFieldsBase(ScannableMotionBase):
'''
This extended version of ScannableMotionBase contains a
completeInstantiation() method which adds a dictionary of
MotionScannableParts to an instance. Each part allows one of the
instances fields to be interacted with like it itself is a scannable.
Fields are dynamically added to the instance linking to these parts
allowing dotted access from Jython. They may also be accessed using
Jython container access methods (via the __getitem__() method). To acess
them from Jave use the getComponent(name) method.
When moving a part (via either a pos or scan command), the part calls
the parent to perform the actual task. The parts asynchronousMoveto
command will call the parent with a list of None values except for the
field it represents which will be passed the desired position value.
The asynchronousMoveTo method in class that inherats from this base
class then must handle these Nones. In some cases the method may
actually be able to move the underlying system assoiciated with one
field individually from others. If this is not possible the best
behaviour may be to simply not support this beahviour and exception or
alternatively to substitute the None values with actual current position
of parent's scannables associated fields.
ScannableMotionBaseWithMemory() inherats from this calss and provides a
solution useful for some scenarious: it keeps track of the last position
moved to, and replaces the Nones in an asynchronousMoveTo request with
these values. There are a number of dangers associated with this which
are addressed in that class's documentation, but it provides a way to
move one axis within a group of non-orthogonal axis while keeping the
others still.
'''
childrenDict = {}
numInputFields = None
numExtraFields = None
def completeInstantiation(self):
'''This method should be called at the end of all user defined
consructors'''
# self.validate()
self.numInputFields = len(self.getInputNames())
self.numExtraFields = len(self.getExtraNames())
self.addScannableParts()
self.autoCompletePartialMoveToTargets = False
self.positionAtScanStart = None
def setAutoCompletePartialMoveToTargets(self, b):
self.autoCompletePartialMoveToTargets = b
def atScanStart(self):
self.positionAtScanStart = self.getPosition()
def atCommandFailure(self):
self.positionAtScanStart = None
def atScanEnd(self):
self.positionAtScanStart = None
###
def __repr__(self):
pos = self.getPosition()
formattedValues = self.formatPositionFields(pos)
if len(tuple(self.getInputNames()) + tuple(self.getExtraNames())) > 1:
result = self.getName() + ': '
else:
result = ''
names = tuple(self.getInputNames()) + tuple(self.getExtraNames())
for name, val in zip(names, formattedValues):
result += ' ' + name + ': ' + val
return result
###
def formatPositionFields(self, pos):
"""Returns position as array of formatted strings"""
# Make sure pos is a tuple or list
if type(pos) not in (tuple, list):
pos = tuple([pos])
# Sanity check
if len(pos) != len(self.getOutputFormat()):
raise Exception(
"In scannable '%s':number of position fields differs from "
"number format strings specified" % self.getName())
result = []
for field, frmt in zip(pos, self.getOutputFormat()):
if field is None:
result.append('???')
else:
s = (frmt % field)
## if width!=None:
## s = s.ljust(width)
result.append(s)
return result
###
def addScannableParts(self):
'''
Creates an array of MotionScannableParts each of which allows access to
the scannable's fields. See this class's documentation for more info.
'''
self.childrenDict = {}
# Add parts to access the input fields
for index in range(len(self.getInputNames())):
scannableName = self.getInputNames()[index]
self.childrenDict[scannableName] = self.MotionScannablePart(
scannableName, index, self, isInputField=1)
# Add parts to access the extra fields
for index in range(len(self.getExtraNames())):
scannableName = self.getExtraNames()[index]
self.childrenDict[scannableName] = self.MotionScannablePart(
scannableName, index + len(self.getInputNames()),
self, isInputField=0)
def asynchronousMoveTo(self, newpos):
if self.autoCompletePartialMoveToTargets:
newpos = self.completePosition(newpos)
ScannableBase.asynchronousMoveTo(self, newpos)
def completePosition(self, position):
'''
If position contains any null or None values, these are replaced with
the corresponding fields from the scannables current position and then
returned.'''
# Just return position if it does not need padding
if None not in position:
return position
if self.positionAtScanStart is not None:
basePosition = self.positionAtScanStart
else:
basePosition = self.getPosition()[:self.numInputFields]
for i in range(self.numInputFields):
if position[i] is None:
position[i] = basePosition[i]
return position
def __getattr__(self, name):
try:
return self.childrenDict[name]
except:
raise AttributeError("No child named:" + name)
def __getitem__(self, key):
'''Provides container like access from Jython'''
return self.childrenDict[key]
def getPart(self, name):
'''Returns the a compnent scannable'''
return self.childrenDict[name]
class MotionScannablePart(ScannableMotionBase):
'''
A scannable to be placed in the parent's childrenDict that allows
access to the parent's individual fields.'''
def __init__(self, scannableName, index, parentScannable,
isInputField):
ScannableMotionBase.__init__(self)
self.setName(scannableName)
if isInputField:
self.setInputNames([scannableName])
else:
self.setExtraNames([scannableName])
self.index = index
self.parentScannable = parentScannable
self.setOutputFormat(
[self.parentScannable.getOutputFormat()[index]])
def getParent(self):
return self.parentScannable
def isBusy(self):
return self.parentScannable.isBusy()
def asynchronousMoveTo(self, new_position):
if self.parentScannable.isBusy():
raise Exception(
self.parentScannable.getName() + "." + self.getName() +
" cannot be moved because " +
self.parentScannable.getName() + " is already moving")
toMoveTo = [None] * len(self.parentScannable.getInputNames())
toMoveTo[self.index] = new_position
self.parentScannable.asynchronousMoveTo(toMoveTo)
def moveTo(self, new_position):
self.asynchronousMoveTo(new_position)
self.waitWhileBusy()
def getPosition(self):
return self.parentScannable.getPosition()[self.index]
def getLowerGdaLimits(self):
limit = self.parentScannable.getLowerGdaLimits()
return None if limit is None else [limit[self.index]]
def getUpperGdaLimits(self):
limit = self.parentScannable.getUpperGdaLimits()
return None if limit is None else [limit[self.index]]
def setLowerGdaLimits(self, lowerLim):
try:
if len(lowerLim) != 1:
raise DiffcalcException("Could not setLowerGdaLmits() on scannable {} to {}. This scannable has only one field.".format(
self.getName(), str(lowerLim)))
lowerLimValue = lowerLim[0]
except TypeError:
lowerLimValue = lowerLim
limit = self.parentScannable.getLowerGdaLimits()
if not limit:
limit = [None] * len(self.parentScannable.getInputNames())
limit[self.index] = lowerLimValue
self.parentScannable.setLowerGdaLimits(limit)
def setUpperGdaLimits(self, upperLim):
try:
if len(upperLim) != 1:
raise DiffcalcException("Could not setUpperGdaLmits() on scannable {} to {}. This scannable has only one field.".format(
self.getName(), str(upperLim)))
upperLimValue = upperLim[0]
except TypeError:
upperLimValue = upperLim
limit = self.parentScannable.getUpperGdaLimits()
if not limit:
limit = [None] * len(self.parentScannable.getInputNames())
limit[self.index] = upperLimValue
self.parentScannable.setUpperGdaLimits(limit)
def __str__(self):
return self.__repr__()
def __repr__(self):
# Get the name of this field
# (assume its an input field first and correct if wrong)
name = self.getInputNames()[0]
if name == 'value':
name = self.getExtraNames()[0]
parentName = self.parentScannable.getName()
return parentName + "." + name + " : " + str(self.getPosition())
| DiamondLightSource/diffcalc | diffcalc/gdasupport/minigda/scannable.py | Python | gpl-3.0 | 25,322 |
#
# Copyright {{ cookiecutter.author_name }}, {{ cookiecutter.initial_year_to_release }}
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
utils.test_environment
----------------------
Test functions for the executing environment
"""
import unittest
import subprocess
from {{ cookiecutter.package_name }} import __version__
from {{ cookiecutter.package_name }}.utils.environment import {{ cookiecutter.package_name }}_version
from {{ cookiecutter.package_name }}.utils.environment import python_version
def shell_python():
"""
Compare the default python 3 version with a shell call to the actual interpreter.
"""
shellpython_version = subprocess.check_output(["python3", "-V", "/dev/null"])
return str(shellpython_version, 'utf-8')
class TestUtilsEnvironment(unittest.TestCase):
"""
Test utils functions.
- show released program version
- show Python version
"""
def test_program_version(self):
self.assertEqual({{ cookiecutter.package_name }}_version(), __version__)
def test_python_version(self):
self.assertIn(python_version(), shell_python())
def test_python_major_version(self):
self.assertEqual(python_version('major'), '3')
| berrak/cookiecutter-py3starter | {{cookiecutter.github_repo_name}}/{{cookiecutter.package_name}}/utils/test/test_environment.py | Python | apache-2.0 | 1,716 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow versions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from mxconsole.lib.native import pywrap_tensorflow_fs as pywrap_tensorflow
__version__ = pywrap_tensorflow.__version__
__git_version__ = pywrap_tensorflow.__git_version__
__compiler_version__ = pywrap_tensorflow.__compiler_version__
VERSION = __version__
GIT_VERSION = __git_version__
COMPILER_VERSION = __compiler_version__
GRAPH_DEF_VERSION = pywrap_tensorflow.GRAPH_DEF_VERSION
GRAPH_DEF_VERSION_MIN_CONSUMER = (
pywrap_tensorflow.GRAPH_DEF_VERSION_MIN_CONSUMER)
GRAPH_DEF_VERSION_MIN_PRODUCER = (
pywrap_tensorflow.GRAPH_DEF_VERSION_MIN_PRODUCER)
__all__ = [
"__version__",
"__git_version__",
"__compiler_version__",
"COMPILER_VERSION",
"GIT_VERSION",
"GRAPH_DEF_VERSION",
"GRAPH_DEF_VERSION_MIN_CONSUMER",
"GRAPH_DEF_VERSION_MIN_PRODUCER",
"VERSION",
]
| bravomikekilo/mxconsole | mxconsole/framework/versions.py | Python | apache-2.0 | 1,634 |
import datetime
import pytest
import virtool.history.db
from aiohttp.test_utils import make_mocked_coro
class TestAdd:
async def test(
self, snapshot, dbi, static_time, test_otu_edit, test_change, tmp_path, config
):
app = {"db": dbi, "config": config}
old, new = test_otu_edit
change = await virtool.history.db.add(
app, "edit", old, new, "Edited {}".format(new["name"]), "test"
)
assert change == snapshot
assert await dbi.history.find_one() == snapshot
async def test_create(
self, snapshot, dbi, static_time, test_otu_edit, test_change, tmp_path, config
):
app = {"db": dbi, "config": config}
# There is no old document because this is a change document for a otu creation operation.
old = None
new, _ = test_otu_edit
description = "Created {}".format(new["name"])
change = await virtool.history.db.add(
app, "create", old, new, description, "test"
)
assert change == snapshot
assert await dbi.history.find_one() == snapshot
async def test_remove(
self, snapshot, dbi, static_time, test_otu_edit, test_change, tmp_path, config
):
"""
Test that the addition of a change due to otu removal inserts the expected change document.
"""
app = {"db": dbi, "config": config}
# There is no new document because this is a change document for a otu removal operation.
new = None
old, _ = test_otu_edit
description = "Removed {}".format(old["name"])
change = await virtool.history.db.add(
app, "remove", old, new, description, "test"
)
assert change == snapshot
assert await dbi.history.find_one() == snapshot
@pytest.mark.parametrize("file", [True, False])
async def test_get(file, mocker, snapshot, dbi, fake, tmp_path, config):
user = await fake.users.insert()
await dbi.history.insert_one(
{
"_id": "baz.2",
"diff": "file" if file else {"foo": "bar"},
"user": {"id": user["_id"]},
}
)
mocker.patch(
"virtool.history.utils.read_diff_file", make_mocked_coro(return_value="loaded")
)
app = {"db": dbi, "config": config}
assert await virtool.history.db.get(app, "baz.2") == snapshot
@pytest.mark.parametrize("exists", [True, False])
async def test_get_most_recent_change(exists, snapshot, dbi, static_time):
"""
Test that the most recent change document is returned for the given ``otu_id``.
"""
# First change is 3 days before the second
delta = datetime.timedelta(3)
if exists:
await dbi.history.insert_many(
[
{
"_id": "6116cba1.1",
"description": "Description",
"method_name": "update",
"created_at": static_time.datetime - delta,
"user": {"id": "test"},
"otu": {"id": "6116cba1", "name": "Prunus virus F", "version": 1},
"index": {"id": "unbuilt"},
},
{
"_id": "6116cba1.2",
"description": "Description number 2",
"method_name": "update",
"created_at": static_time.datetime,
"user": {"id": "test"},
"otu": {"id": "6116cba1", "name": "Prunus virus F", "version": 2},
"index": {"id": "unbuilt"},
},
]
)
return_value = await virtool.history.db.get_most_recent_change(dbi, "6116cba1")
assert return_value == snapshot
@pytest.mark.parametrize("remove", [True, False])
async def test_patch_to_version(remove, snapshot, dbi, create_mock_history):
await create_mock_history(remove=remove)
app = {"db": dbi}
current, patched, reverted_change_ids = await virtool.history.db.patch_to_version(
app, "6116cba1", 1
)
assert current == snapshot
assert patched == snapshot
assert reverted_change_ids == snapshot
| igboyes/virtool | tests/history/test_db.py | Python | mit | 4,168 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-31 18:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ActiveAdminComments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('namespace', models.CharField(max_length=255)),
('body', models.TextField()),
('resource_id', models.CharField(max_length=255)),
('resource_type', models.CharField(max_length=255)),
('author_id', models.IntegerField()),
('author_type', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'active_admin_comments',
},
),
migrations.CreateModel(
name='CkeditorAssets',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data_file_name', models.CharField(max_length=255)),
('data_content_type', models.CharField(blank=True, max_length=255, null=True)),
('data_file_size', models.IntegerField(blank=True, null=True)),
('assetable_id', models.IntegerField(blank=True, null=True)),
('assetable_type', models.CharField(blank=True, max_length=30, null=True)),
('type', models.CharField(blank=True, max_length=30, null=True)),
('width', models.IntegerField(blank=True, null=True)),
('height', models.IntegerField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'ckeditor_assets',
},
),
migrations.CreateModel(
name='SchemaMigrations',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.CharField(max_length=255, unique=True)),
],
options={
'db_table': 'schema_migrations',
},
),
migrations.CreateModel(
name='UserSessions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'db_table': 'user_sessions',
},
),
]
| LernaProject/Lerna | dbtrash/migrations/0001_initial.py | Python | gpl-2.0 | 3,042 |
import hazelcast
from hazelcast.core import HazelcastJsonValue
from hazelcast.predicate import less_or_equal
from hazelcast.projection import single_attribute, multi_attribute
client = hazelcast.HazelcastClient()
people = client.get_map("people").blocking()
people.put_all(
{
1: HazelcastJsonValue({"name": "Philip", "age": 46}),
2: HazelcastJsonValue({"name": "Elizabeth", "age": 44}),
3: HazelcastJsonValue({"name": "Henry", "age": 13}),
4: HazelcastJsonValue({"name": "Paige", "age": 15}),
}
)
names = people.project(single_attribute("name"))
print("Names of the people are %s." % names)
children_names = people.project(single_attribute("name"), less_or_equal("age", 18))
print("Names of the children are %s." % children_names)
names_and_ages = people.project(multi_attribute("name", "age"))
print("Names and ages of the people are %s." % names_and_ages)
client.shutdown()
| hazelcast/hazelcast-python-client | examples/projections/projections_example.py | Python | apache-2.0 | 924 |
Subsets and Splits