gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Lint as: python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for control_flow module."""
# Unfortunately pylint has false positives when nonlocal is present.
# pylint:disable=unused-variable
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
import six
from tensorflow.python.autograph.operators import control_flow
from tensorflow.python.autograph.utils import ag_logging
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ForLoopTest(test.TestCase):
def test_tensor(self):
def body(i):
nonlocal s
s = s * 10 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = 0
control_flow.for_stmt(
constant_op.constant([1, 2, 3, 4]),
extra_test=lambda: True,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
self.assertEqual(self.evaluate(s), (1234,))
def test_range_tensor(self):
def body(i):
nonlocal s
s = s * 10 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = 0
control_flow.for_stmt(
math_ops.range(5),
extra_test=lambda: True,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
self.assertEqual(self.evaluate(s), (1234,))
def test_range_tensor_explicit_limit_delta(self):
def body(i):
nonlocal s
s = s * 100 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = 0
control_flow.for_stmt(
math_ops.range(-17, -3, 5),
extra_test=lambda: True,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
self.assertEqual(self.evaluate(s), (-171207,))
def test_range_tensor_explicit_limit_negative_delta(self):
def body(i):
nonlocal s
s = s * 100 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = 0
control_flow.for_stmt(
math_ops.range(17, 3, -5),
extra_test=lambda: True,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
self.assertEqual(self.evaluate(s), (171207,))
def test_range_tensor_random_delta(self):
def body(i):
nonlocal s
s = s * 10 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = 0
random_one = random_ops.random_uniform((), 1, 2, dtype=dtypes.int32)
control_flow.for_stmt(
math_ops.range(0, 5, random_one),
extra_test=lambda: True,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
self.assertEqual(self.evaluate(s), (1234,))
def test_range_tensor_random_negative_delta(self):
def body(i):
nonlocal s
s = s * 100 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = 0
random_neg_five = random_ops.random_uniform((), -5, -4, dtype=dtypes.int32)
control_flow.for_stmt(
math_ops.range(17, 3, random_neg_five),
extra_test=lambda: True,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
self.assertEqual(self.evaluate(s), (171207,))
def test_tensor_with_extra_test_object_vars(self):
class MutableObject(object):
field_1 = constant_op.constant(0, dtype=dtypes.int32)
field_2 = constant_op.constant(1, dtype=dtypes.int32)
state = MutableObject()
def body(i):
state.field_1 += i
state.field_2 *= i
def get_state():
return state.field_1, state.field_2
def set_state(loop_vars):
state.field_1, state.field_2 = loop_vars
control_flow.for_stmt(
iter_=constant_op.constant([1, 2, 3, 4]),
body=body,
extra_test=lambda: state.field_1 < 6,
get_state=get_state,
set_state=set_state,
symbol_names=('state.field_1', 'state.field_2'),
opts={})
self.assertEqual(self.evaluate((state.field_1, state.field_2)), (6, 6))
def test_python(self):
def body(i):
nonlocal s
s = s * 10 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = 0
control_flow.for_stmt(
range(5),
extra_test=lambda: True,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
self.assertEqual(s, 1234)
def test_python_generator_with_extra_test(self):
def new_generator():
for i in range(1, 5):
yield i
gen = new_generator()
def run_loop():
s = 0
c = 0
def body(i):
nonlocal s, c
s = s * 10 + i
c += 1
control_flow.for_stmt(
gen,
extra_test=lambda: c == 0, # Break after first iteration
body=body,
get_state=None,
set_state=None,
symbol_names=('s', 'c'),
opts={})
return s, c
self.assertEqual(run_loop(), (1, 1))
self.assertEqual(run_loop(), (2, 1))
self.assertEqual(run_loop(), (3, 1))
self.assertEqual(next(gen), 4)
def test_python_generator_with_extra_test_no_iterations(self):
def new_generator():
for i in range(5):
yield i
gen = new_generator()
def run_loop():
s = 0
def body(i):
nonlocal s
s = s * 10 + i
control_flow.for_stmt(
gen,
extra_test=lambda: False, # Break before loop
body=body,
get_state=None,
set_state=None,
symbol_names=('s',),
opts={})
return s
self.assertEqual(run_loop(), 0)
self.assertEqual(run_loop(), 0)
self.assertEqual(next(gen), 0)
def test_tf_dataset(self):
def body(i):
nonlocal s
s = s * 10 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = constant_op.constant(0, dtype=dtypes.int64)
control_flow.for_stmt(
dataset_ops.Dataset.range(5),
extra_test=None,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
self.assertEqual(self.evaluate(s), (1234,))
def test_dataset_with_extra_test(self):
def body(i):
nonlocal s
s = s * 10 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = constant_op.constant(0, dtype=dtypes.int64)
control_flow.for_stmt(
dataset_ops.Dataset.range(5),
extra_test=lambda: s < 3,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
self.assertEqual(self.evaluate(s), (12,))
def test_dataset_with_extra_test_collection_vars(self):
def body(i):
nonlocal s
l[0] += i
s += i
def set_state(loop_vars):
nonlocal s
l[0], s = loop_vars
s = constant_op.constant(0, dtype=dtypes.int64)
l = [constant_op.constant(0, dtype=dtypes.int64)]
control_flow.for_stmt(
dataset_ops.Dataset.range(5),
extra_test=lambda: s < 3,
body=body,
get_state=lambda: (l[0], s),
set_state=set_state,
symbol_names=('l[0]', 's'),
opts={})
self.assertEqual(self.evaluate((l[0], s)), (3, 3))
def test_dataset_with_extra_test_iteration_limiting(self):
def body(it):
nonlocal i
with ops.control_dependencies((control_flow_ops.Assert(i < 3, (i,)),)):
i = it
def set_state(loop_vars):
nonlocal i
i, = loop_vars
i = constant_op.constant(0, dtype=dtypes.int64)
control_flow.for_stmt(
dataset_ops.Dataset.range(5),
extra_test=lambda: i < 3,
body=body,
get_state=lambda: (i,),
set_state=set_state,
symbol_names=('i',),
opts={})
self.assertEqual(self.evaluate(i), (3,))
def test_tf_dataset_no_loop_vars(self):
def body(i):
v.assign(v.read_value() * 10 + i)
v = variables.Variable(0, dtype=dtypes.int64)
self.evaluate(v.initializer)
# tf.function required for the automatic control dependencies, and because
# ops test for its presence.
@def_function.function
def test_fn():
control_flow.for_stmt(
dataset_ops.Dataset.range(5),
extra_test=None,
body=body,
get_state=lambda: (),
set_state=lambda _: None,
symbol_names=(),
opts={})
self.evaluate(test_fn())
self.assertEqual(self.evaluate(v.read_value()), 1234)
def test_tf_iterator(self):
# graph-mode iterators are only supported inside tf.function.
@def_function.function
def test_fn():
def body(i):
nonlocal s
s = s * 10 + i
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = constant_op.constant(0, dtype=dtypes.int64)
control_flow.for_stmt(
iter(dataset_ops.Dataset.range(5)),
extra_test=None,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
return s
self.assertAllEqual(test_fn(), 1234)
def test_tf_iterator_shape_invariants(self):
# graph-mode iterators are only supported inside tf.function.
@def_function.function
def test_fn():
def body(i):
nonlocal s
s = array_ops.concat([s, [i]], 0)
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = constant_op.constant([], dtype=dtypes.int64)
control_flow.for_stmt(
iter(dataset_ops.Dataset.range(5)),
extra_test=None,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={'shape_invariants': [(s, tensor_shape.TensorShape([None]))]})
return s
self.assertAllEqual(test_fn(), [0, 1, 2, 3, 4])
def test_tf_iterator_no_loop_vars(self):
def body(i):
v.assign(v.read_value() * 10 + i)
v = variables.Variable(0, dtype=dtypes.int64)
self.evaluate(v.initializer)
# tf.function required for the automatic control dependencies.
@def_function.function
def test_fn():
control_flow.for_stmt(
iter(dataset_ops.Dataset.range(5)),
extra_test=None,
body=body,
get_state=lambda: (),
set_state=lambda _: None,
symbol_names=(),
opts={})
self.evaluate(test_fn())
self.assertEqual(self.evaluate(v.read_value()), 1234)
def test_tf_ragged_tensor(self):
def body(i):
nonlocal s
s = s * 10 + i[0]
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = 0
control_flow.for_stmt(
ragged_factory_ops.constant([[1], [2, 4], [3]]),
extra_test=None,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
self.assertEqual(self.evaluate(s), (123,))
def test_tf_ragged_tensor_higher_dimensional(self):
def body(i):
nonlocal s
s = s * 10 + i[0][0]
def set_state(loop_vars):
nonlocal s
s, = loop_vars
s = 0
ragged_3d = [
[[1], [1, 1], [1]],
[[2], [2]],
]
control_flow.for_stmt(
ragged_factory_ops.constant(ragged_3d),
extra_test=None,
body=body,
get_state=lambda: (s,),
set_state=set_state,
symbol_names=('s',),
opts={})
self.assertEqual(self.evaluate(s), (12,))
def test_tf_ragged_tensor_no_loop_vars(self):
v = variables.Variable(0, dtype=dtypes.int32)
self.evaluate(v.initializer)
def body(i):
v.assign(v.read_value() * 10 + i[0])
# tf.function required for the automatic control dependencies.
@def_function.function(autograph=False)
def test_fn():
control_flow.for_stmt(
ragged_factory_ops.constant([[1], [2, 4], [3]]),
extra_test=None,
body=body,
get_state=lambda: (),
set_state=lambda _: None,
symbol_names=(),
opts={})
self.evaluate(test_fn())
# Note: 123 = ((0*10 + 1)*10+2)*10+3 (first element of each row).
self.assertEqual(self.evaluate(v.read_value()), 123)
@test_util.run_all_in_graph_and_eager_modes
class WhileLoopTest(test.TestCase):
def test_tensor(self):
def body():
nonlocal i, s
s = s * 10 + i
i += 1
def set_state(loop_vars):
nonlocal i, s
i, s = loop_vars
i = 0
n = constant_op.constant(5)
s = 0
control_flow.while_stmt(
test=lambda: i < n,
body=body,
get_state=lambda: (i, s),
set_state=set_state,
symbol_names=('i', 's'),
opts={})
self.assertEqual(self.evaluate((i, s)), (5, 1234))
def test_tensor_with_side_effecting_condition(self):
v = variables.Variable(0)
# tf.function required for the automatic control dependencies.
@def_function.function
def test_fn():
def cond():
v.assign(v.read_value() * 10 + i)
return i < n
def body():
nonlocal i
i += 1
def set_state(loop_vars):
nonlocal i
i, = loop_vars
i = 0
n = constant_op.constant(5)
control_flow.while_stmt(
test=cond,
body=body,
get_state=lambda: (i,),
set_state=set_state,
symbol_names=('i',),
opts={})
return i
self.evaluate(v.initializer)
self.assertEqual(self.evaluate(test_fn()), (5,))
self.assertEqual(self.evaluate(v), (12345,))
def test_tensor_with_python_state(self):
class MutableObject(object):
field = constant_op.constant(0, dtype=dtypes.int32)
state = MutableObject()
def body():
nonlocal i
state.field = state.field * 10 + i
i += 1
def set_state(loop_vars):
nonlocal i
i, state.field = loop_vars
i = 0
n = constant_op.constant(5)
control_flow.while_stmt(
test=lambda: i < n,
body=body,
get_state=lambda: (i, state.field),
set_state=set_state,
symbol_names=('i', 'state.field'),
opts={})
self.assertEqual(self.evaluate((i, state.field)), (5, 1234))
def test_python(self):
def body():
nonlocal i, s
s = s * 10 + i
i += 1
i = 0
s = 0
n = 5
control_flow.while_stmt(
test=lambda: i < n,
body=body,
get_state=None,
set_state=None,
symbol_names=('i', 's'),
opts={})
self.assertEqual(s, 1234)
def test_python_with_tensor_state(self):
def body():
nonlocal i, s
s = s * 10 + i
i += 1
i = 0
s = constant_op.constant(0)
n = 5
control_flow.while_stmt(
test=lambda: i < n,
body=body,
get_state=None,
set_state=None,
symbol_names=('i', 's'),
opts={})
self.assertEqual(i, 5)
self.assertEqual(self.evaluate(s), 1234)
def test_python_while_infinite(self):
if not __debug__:
self.skipTest('Feature disabled in optimized mode.')
with test.mock.patch.object(control_flow, 'PYTHON_MAX_ITERATIONS', 100):
with self.assertRaisesRegexp(ValueError, 'iteration limit'):
control_flow.while_stmt(
test=lambda: True,
body=lambda: None,
get_state=None,
set_state=None,
symbol_names=(),
opts={})
def test_python_for_infinite(self):
if not __debug__:
self.skipTest('Feature disabled in optimized mode.')
with test.mock.patch.object(control_flow, 'PYTHON_MAX_ITERATIONS', 100):
with self.assertRaisesRegexp(ValueError, 'iteration limit'):
control_flow.for_stmt(
iter_=range(101),
extra_test=None,
body=lambda i: None,
get_state=None,
set_state=None,
symbol_names=(),
opts={})
def test_python_while_large_unroll_warning(self):
if not __debug__:
self.skipTest('Feature disabled in optimized mode.')
with test.mock.patch.object(
control_flow, 'INEFFICIENT_UNROLL_MIN_ITERATIONS', 10):
with ops.Graph().as_default():
out_capturer = six.StringIO()
with test.mock.patch.object(sys, 'stdout', out_capturer):
with test.mock.patch.object(ag_logging, 'echo_log_to_stdout', True):
def custom_iterator():
for i in range(11):
c = constant_op.constant(i)
yield c
i = 0
control_flow.for_stmt(
iter_=custom_iterator(),
extra_test=None,
body=lambda i: None,
get_state=None,
set_state=None,
symbol_names=(),
opts={})
self.assertTrue(re.match(
r'.* Large unrolled loop.*Const.*', out_capturer.getvalue()))
def test_python_for_large_unroll_warning(self):
if not __debug__:
self.skipTest('Feature disabled in optimized mode.')
with test.mock.patch.object(
control_flow, 'INEFFICIENT_UNROLL_MIN_ITERATIONS', 10):
with ops.Graph().as_default():
out_capturer = six.StringIO()
with test.mock.patch.object(sys, 'stdout', out_capturer):
with test.mock.patch.object(ag_logging, 'echo_log_to_stdout', True):
def body():
nonlocal i
gen_math_ops.add(i, 1)
i += 1
i = 0
control_flow.while_stmt(
test=lambda: i < 100,
body=body,
get_state=None,
set_state=None,
symbol_names=('i',),
opts={})
self.assertTrue(re.match(
r'.* Large unrolled loop.*Add.*', out_capturer.getvalue()))
@test_util.run_all_in_graph_and_eager_modes
class IfStmtTest(test.TestCase):
def test_tensor(self):
def test_fn(cond):
return control_flow.if_stmt(
cond=cond,
body=lambda: constant_op.constant(1),
orelse=lambda: constant_op.constant(-1),
get_state=lambda: (),
set_state=lambda _: None,
basic_symbol_names=('_',),
composite_symbol_names=())
self.assertEqual(1, self.evaluate(test_fn(constant_op.constant(True))))
self.assertEqual(-1, self.evaluate(test_fn(constant_op.constant(False))))
def test_tensor_multiple_returns(self):
def test_fn(cond):
return control_flow.if_stmt(
cond=cond,
body=lambda: (constant_op.constant(1), constant_op.constant(2)),
orelse=lambda: (constant_op.constant(-1), constant_op.constant(-2)),
get_state=lambda: (),
set_state=lambda _: None,
basic_symbol_names=('_',),
composite_symbol_names=())
self.assertEqual((1, 2), self.evaluate(test_fn(constant_op.constant(True))))
self.assertEqual((-1, -2),
self.evaluate(test_fn(constant_op.constant(False))))
def test_python(self):
def test_fn(cond):
return control_flow.if_stmt(
cond=cond,
body=lambda: 1,
orelse=lambda: -1,
get_state=lambda: (),
set_state=lambda _: None,
basic_symbol_names=('_',),
composite_symbol_names=())
self.assertEqual(1, test_fn(True))
self.assertEqual(-1, test_fn(False))
def test_python_multiple_returns(self):
def test_fn(cond):
return control_flow.if_stmt(
cond=cond,
body=lambda: (1, 2),
orelse=lambda: (-1, -2),
get_state=lambda: (),
set_state=lambda _: None,
basic_symbol_names=('_',),
composite_symbol_names=())
self.assertEqual((1, 2), test_fn(True))
self.assertEqual((-1, -2), test_fn(False))
if __name__ == '__main__':
test.main()
|
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from cryptography import utils
from cryptography.exceptions import (
InvalidSignature, UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.openssl.utils import _truncate_digest
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import (
AsymmetricSignatureContext, AsymmetricVerificationContext, ec
)
def _truncate_digest_for_ecdsa(ec_key_cdata, digest, backend):
"""
This function truncates digests that are longer than a given elliptic
curve key's length so they can be signed. Since elliptic curve keys are
much shorter than RSA keys many digests (e.g. SHA-512) may require
truncation.
"""
_lib = backend._lib
_ffi = backend._ffi
group = _lib.EC_KEY_get0_group(ec_key_cdata)
with backend._tmp_bn_ctx() as bn_ctx:
order = _lib.BN_CTX_get(bn_ctx)
assert order != _ffi.NULL
res = _lib.EC_GROUP_get_order(group, order, bn_ctx)
assert res == 1
order_bits = _lib.BN_num_bits(order)
return _truncate_digest(digest, order_bits)
def _ec_key_curve_sn(backend, ec_key):
group = backend._lib.EC_KEY_get0_group(ec_key)
assert group != backend._ffi.NULL
nid = backend._lib.EC_GROUP_get_curve_name(group)
# The following check is to find EC keys with unnamed curves and raise
# an error for now.
if nid == backend._lib.NID_undef:
raise NotImplementedError(
"ECDSA certificates with unnamed curves are unsupported "
"at this time"
)
curve_name = backend._lib.OBJ_nid2sn(nid)
assert curve_name != backend._ffi.NULL
sn = backend._ffi.string(curve_name).decode('ascii')
return sn
def _mark_asn1_named_ec_curve(backend, ec_cdata):
"""
Set the named curve flag on the EC_KEY. This causes OpenSSL to
serialize EC keys along with their curve OID which makes
deserialization easier.
"""
backend._lib.EC_KEY_set_asn1_flag(
ec_cdata, backend._lib.OPENSSL_EC_NAMED_CURVE
)
def _sn_to_elliptic_curve(backend, sn):
try:
return ec._CURVE_TYPES[sn]()
except KeyError:
raise UnsupportedAlgorithm(
"{0} is not a supported elliptic curve".format(sn),
_Reasons.UNSUPPORTED_ELLIPTIC_CURVE
)
@utils.register_interface(AsymmetricSignatureContext)
class _ECDSASignatureContext(object):
def __init__(self, backend, private_key, algorithm):
self._backend = backend
self._private_key = private_key
self._digest = hashes.Hash(algorithm, backend)
def update(self, data):
self._digest.update(data)
def finalize(self):
ec_key = self._private_key._ec_key
digest = self._digest.finalize()
digest = _truncate_digest_for_ecdsa(ec_key, digest, self._backend)
max_size = self._backend._lib.ECDSA_size(ec_key)
assert max_size > 0
sigbuf = self._backend._ffi.new("char[]", max_size)
siglen_ptr = self._backend._ffi.new("unsigned int[]", 1)
res = self._backend._lib.ECDSA_sign(
0,
digest,
len(digest),
sigbuf,
siglen_ptr,
ec_key
)
assert res == 1
return self._backend._ffi.buffer(sigbuf)[:siglen_ptr[0]]
@utils.register_interface(AsymmetricVerificationContext)
class _ECDSAVerificationContext(object):
def __init__(self, backend, public_key, signature, algorithm):
self._backend = backend
self._public_key = public_key
self._signature = signature
self._digest = hashes.Hash(algorithm, backend)
def update(self, data):
self._digest.update(data)
def verify(self):
ec_key = self._public_key._ec_key
digest = self._digest.finalize()
digest = _truncate_digest_for_ecdsa(ec_key, digest, self._backend)
res = self._backend._lib.ECDSA_verify(
0,
digest,
len(digest),
self._signature,
len(self._signature),
ec_key
)
if res != 1:
self._backend._consume_errors()
raise InvalidSignature
return True
@utils.register_interface(ec.EllipticCurvePrivateKeyWithSerialization)
class _EllipticCurvePrivateKey(object):
def __init__(self, backend, ec_key_cdata):
self._backend = backend
_mark_asn1_named_ec_curve(backend, ec_key_cdata)
self._ec_key = ec_key_cdata
sn = _ec_key_curve_sn(backend, ec_key_cdata)
self._curve = _sn_to_elliptic_curve(backend, sn)
curve = utils.read_only_property("_curve")
def signer(self, signature_algorithm):
if isinstance(signature_algorithm, ec.ECDSA):
return _ECDSASignatureContext(
self._backend, self, signature_algorithm.algorithm
)
else:
raise UnsupportedAlgorithm(
"Unsupported elliptic curve signature algorithm.",
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
def public_key(self):
group = self._backend._lib.EC_KEY_get0_group(self._ec_key)
assert group != self._backend._ffi.NULL
curve_nid = self._backend._lib.EC_GROUP_get_curve_name(group)
public_ec_key = self._backend._lib.EC_KEY_new_by_curve_name(curve_nid)
assert public_ec_key != self._backend._ffi.NULL
public_ec_key = self._backend._ffi.gc(
public_ec_key, self._backend._lib.EC_KEY_free
)
point = self._backend._lib.EC_KEY_get0_public_key(self._ec_key)
assert point != self._backend._ffi.NULL
res = self._backend._lib.EC_KEY_set_public_key(public_ec_key, point)
assert res == 1
return _EllipticCurvePublicKey(
self._backend, public_ec_key
)
def private_numbers(self):
bn = self._backend._lib.EC_KEY_get0_private_key(self._ec_key)
private_value = self._backend._bn_to_int(bn)
return ec.EllipticCurvePrivateNumbers(
private_value=private_value,
public_numbers=self.public_key().public_numbers()
)
def private_bytes(self, encoding, format, encryption_algorithm):
evp_pkey = self._backend._lib.EVP_PKEY_new()
assert evp_pkey != self._backend._ffi.NULL
evp_pkey = self._backend._ffi.gc(
evp_pkey, self._backend._lib.EVP_PKEY_free
)
res = self._backend._lib.EVP_PKEY_set1_EC_KEY(evp_pkey, self._ec_key)
assert res == 1
return self._backend._private_key_bytes(
encoding,
format,
encryption_algorithm,
self._backend._lib.PEM_write_bio_ECPrivateKey,
evp_pkey,
self._ec_key
)
@utils.register_interface(ec.EllipticCurvePublicKeyWithNumbers)
class _EllipticCurvePublicKey(object):
def __init__(self, backend, ec_key_cdata):
self._backend = backend
_mark_asn1_named_ec_curve(backend, ec_key_cdata)
self._ec_key = ec_key_cdata
sn = _ec_key_curve_sn(backend, ec_key_cdata)
self._curve = _sn_to_elliptic_curve(backend, sn)
curve = utils.read_only_property("_curve")
def verifier(self, signature, signature_algorithm):
if isinstance(signature_algorithm, ec.ECDSA):
return _ECDSAVerificationContext(
self._backend, self, signature, signature_algorithm.algorithm
)
else:
raise UnsupportedAlgorithm(
"Unsupported elliptic curve signature algorithm.",
_Reasons.UNSUPPORTED_PUBLIC_KEY_ALGORITHM)
def public_numbers(self):
set_func, get_func, group = (
self._backend._ec_key_determine_group_get_set_funcs(self._ec_key)
)
point = self._backend._lib.EC_KEY_get0_public_key(self._ec_key)
assert point != self._backend._ffi.NULL
with self._backend._tmp_bn_ctx() as bn_ctx:
bn_x = self._backend._lib.BN_CTX_get(bn_ctx)
bn_y = self._backend._lib.BN_CTX_get(bn_ctx)
res = get_func(group, point, bn_x, bn_y, bn_ctx)
assert res == 1
x = self._backend._bn_to_int(bn_x)
y = self._backend._bn_to_int(bn_y)
return ec.EllipticCurvePublicNumbers(
x=x,
y=y,
curve=self._curve
)
def public_bytes(self, encoding, format):
if format is serialization.PublicFormat.PKCS1:
raise ValueError(
"EC public keys do not support PKCS1 serialization"
)
evp_pkey = self._backend._lib.EVP_PKEY_new()
assert evp_pkey != self._backend._ffi.NULL
evp_pkey = self._backend._ffi.gc(
evp_pkey, self._backend._lib.EVP_PKEY_free
)
res = self._backend._lib.EVP_PKEY_set1_EC_KEY(evp_pkey, self._ec_key)
assert res == 1
return self._backend._public_key_bytes(
encoding,
format,
None,
evp_pkey,
None
)
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import time
import pyauto_functional # Must be imported before pyauto
import pyauto
import test_utils
class HistoryTest(pyauto.PyUITest):
"""TestCase for History."""
def testBasic(self):
url = self.GetFileURLForDataPath('title2.html')
title = 'Title Of Awesomeness'
self.NavigateToURL(url)
history = self.GetHistoryInfo().History()
self.assertEqual(1, len(history))
self.assertEqual(title, history[0]['title'])
self.assertEqual(url, history[0]['url'])
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
"""
while True:
raw_input('Interact with the browser and hit <enter> to dump history.. ')
print '*' * 20
self.pprint(self.GetHistoryInfo().History())
def testHistoryPersists(self):
"""Verify that history persists after session restart."""
assert not self.GetHistoryInfo().History(), 'Expecting clean history.'
url = self.GetFileURLForDataPath('title2.html')
title = 'Title Of Awesomeness'
self.NavigateToURL(url)
history = self.GetHistoryInfo().History()
self.assertEqual(1, len(history))
self.assertEqual(title, history[0]['title'])
self.assertEqual(url, history[0]['url'])
self.RestartBrowser(clear_profile=False)
# Verify that history persists.
history = self.GetHistoryInfo().History()
self.assertEqual(1, len(history))
self.assertEqual(title, history[0]['title'])
self.assertEqual(url, history[0]['url'])
def testInvalidURLNoHistory(self):
"""Invalid URLs should not go in history."""
assert not self.GetHistoryInfo().History(), 'Expecting clean history.'
urls = [ self.GetFileURLForPath('some_non-existing_path'),
self.GetFileURLForPath('another_non-existing_path'),
]
for url in urls:
if not url.startswith('file://'):
logging.warn('Using %s. Might depend on how dns failures are handled'
'on the network' % url)
self.NavigateToURL(url)
self.assertEqual(0, len(self.GetHistoryInfo().History()))
def testNewTabNoHistory(self):
"""New tab page - chrome://newtab/ should not show up in history."""
assert not self.GetHistoryInfo().History(), 'Expecting clean history.'
self.AppendTab(pyauto.GURL('chrome://newtab/'))
self.assertEqual(0, len(self.GetHistoryInfo().History()))
def testIncognitoNoHistory(self):
"""Incognito browsing should not show up in history."""
assert not self.GetHistoryInfo().History(), 'Expecting clean history.'
url = self.GetFileURLForDataPath('title2.html')
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.NavigateToURL(url, 1, 0)
self.assertEqual(0, len(self.GetHistoryInfo().History()))
def testStarredBookmarkInHistory(self):
"""Verify "starred" URLs in history."""
url = self.GetFileURLForDataPath('title2.html')
title = 'Title Of Awesomeness'
self.NavigateToURL(url)
# Should not be starred in history yet.
history = self.GetHistoryInfo().History()
self.assertEqual(1, len(history))
self.assertFalse(history[0]['starred'])
# Bookmark the URL.
bookmarks = self.GetBookmarkModel()
bar_id = bookmarks.BookmarkBar()['id']
self.AddBookmarkURL(bar_id, 0, title, url)
# Should be starred now.
history = self.GetHistoryInfo().History()
self.assertEqual(1, len(history))
self.assertTrue(history[0]['starred'])
# Remove bookmark.
bookmarks = self.GetBookmarkModel()
node = bookmarks.FindByTitle(title)
self.assertTrue(node)
id = node[0]['id']
self.RemoveBookmark(id)
# Should not be starred anymore.
history = self.GetHistoryInfo().History()
self.assertEqual(1, len(history))
self.assertFalse(history[0]['starred'])
def testNavigateMultiTimes(self):
"""Multiple navigations to the same url should have a single history."""
assert not self.GetHistoryInfo().History(), 'Expecting clean history.'
url = self.GetFileURLForDataPath('title2.html')
for i in range(5):
self.NavigateToURL(url)
self.assertEqual(1, len(self.GetHistoryInfo().History()))
def testMultiTabsWindowsHistory(self):
"""Verify history with multiple windows and tabs."""
assert not self.GetHistoryInfo().History(), 'Expecting clean history.'
urls = []
for name in ['title2.html', 'title1.html', 'title3.html', 'simple.html']:
urls.append(self.GetFileURLForDataPath(name))
num_urls = len(urls)
assert num_urls == 4, 'Need 4 urls'
self.NavigateToURL(urls[0], 0, 0) # window 0, tab 0
self.OpenNewBrowserWindow(True)
self.AppendTab(pyauto.GURL(urls[1]), 0) # window 0, tab 1
self.AppendTab(pyauto.GURL(urls[2]), 1) # window 1
self.AppendTab(pyauto.GURL(urls[3]), 1) # window 1
history = self.GetHistoryInfo().History()
self.assertEqual(num_urls, len(history))
# The history should be ordered most recent first.
for i in range(num_urls):
self.assertEqual(urls[-1 - i], history[i]['url'])
def testDownloadNoHistory(self):
"""Downloaded URLs should not show up in history."""
zip_file = 'a_zip_file.zip'
assert not self.GetHistoryInfo().History(), 'Expecting clean history.'
test_utils.DownloadFileFromDownloadsDataDir(self, zip_file)
test_utils.RemoveDownloadedTestFile(self, zip_file)
# We shouldn't have any history
history = self.GetHistoryInfo().History()
self.assertEqual(0, len(history))
def testRedirectHistory(self):
"""HTTP meta-refresh redirects should have separate history entries."""
assert not self.GetHistoryInfo().History(), 'Expecting clean history.'
file_url = self.GetFileURLForDataPath('History', 'redirector.html')
landing_url = self.GetFileURLForDataPath('History', 'landing.html')
tab = self.GetBrowserWindow(0).GetTab(0)
tab.NavigateToURLBlockUntilNavigationsComplete(pyauto.GURL(file_url), 2)
self.assertEqual(landing_url, self.GetActiveTabURL().spec())
# We should have two history items
history = self.GetHistoryInfo().History()
self.assertEqual(2, len(history))
self.assertEqual(landing_url, history[0]['url'])
def testForge(self):
"""Brief test of forging history items.
Note the history system can tweak values (e.g. lower-case a URL or
append an '/' on it) so be careful with exact comparison.
"""
assert not self.GetHistoryInfo().History(), 'Expecting clean history.'
# Minimal interface
self.AddHistoryItem({'url': 'http://ZOINKS'})
history = self.GetHistoryInfo().History()
self.assertEqual(1, len(history))
self.assertTrue('zoinks' in history[0]['url']) # yes it gets lower-cased.
# Python's time might be slightly off (~10 ms) from Chrome's time (on win).
# time.time() on win counts in 1ms steps whereas it's 1us on linux.
# So give the new history item some time separation, so that we can rely
# on the history ordering.
def _GetTimeLaterThan(tm):
y = time.time()
if y - tm < 0.5: # 0.5s should be an acceptable separation
return 0.5 + y
new_time = _GetTimeLaterThan(history[0]['time'])
# Full interface (specify both title and url)
self.AddHistoryItem({'title': 'Google',
'url': 'http://www.google.com',
'time': new_time})
# Expect a second item
history = self.GetHistoryInfo().History()
self.assertEqual(2, len(history))
# And make sure our forged item is there.
self.assertEqual('Google', history[0]['title'])
self.assertTrue('google.com' in history[0]['url'])
self.assertTrue(abs(new_time - history[0]['time']) < 1.0)
def testHttpsHistory(self):
"""Verify a site using https protocol shows up within history."""
https_url = 'https://encrypted.google.com/'
url_title = 'Google'
self.NavigateToURL(https_url)
history = self.GetHistoryInfo().History()
self.assertEqual(len(history), 1)
self.assertEqual(url_title, history[0]['title'])
self.assertEqual(https_url, history[0]['url'])
def testFtpHistory(self):
"""Verify a site using ftp protocol shows up within history."""
ftp_server = self.StartFTPServer(os.path.join('chrome', 'test', 'data'))
ftp_title = 'A Small Hello'
ftp_url = self.GetFtpURLForDataPath(ftp_server, 'History', 'landing.html')
self.NavigateToURL(ftp_url)
history = self.GetHistoryInfo().History()
self.assertEqual(len(history), 1)
self.assertEqual(ftp_title, history[0]['title'])
self.StopFTPServer(ftp_server)
def _CheckHistory(self, title, url, length, index=0):
"""Verify that the current history matches expectations.
Verify that history item has the given title and url
and that length of history list is as expected.
Args:
title: Expected title of given web page.
url: Expected address of given web page.
length: Expected length of history list.
index: Position of item we want to check in history list.
"""
history = self.GetHistoryInfo().History()
self.assertEqual(
length, len(history),
msg='History length: expected = %d, actual = %d.'
% (length, len(history)))
self.assertEqual(
title, history[index]['title'],
msg='Title: expected = %s, actual = %s.'
% (title, history[index]['title']))
self.assertEqual(
url, history[index]['url'], msg='URL: expected = %s, actual = %s.'
% (url, history[index]['url']))
def _NavigateAndCheckHistory(self, title, page, length):
"""Navigate to a page, then verify the history.
Args:
title: Title of given web page.
page: Filename of given web page.
length: Length of history list.
"""
url = self.GetFileURLForDataPath(page)
self.NavigateToURL(url)
self._CheckHistory(title, url, length)
def testNavigateBringPageToTop(self):
"""Verify that navigation brings current page to top of history list."""
self._NavigateAndCheckHistory('Title Of Awesomeness', 'title2.html', 1)
self._NavigateAndCheckHistory('Title Of More Awesomeness', 'title3.html',
2)
def testReloadBringPageToTop(self):
"""Verify that reloading a page brings it to top of history list."""
url1 = self.GetFileURLForDataPath('title2.html')
title1 = 'Title Of Awesomeness'
self._NavigateAndCheckHistory(title1, 'title2.html', 1)
url2 = self.GetFileURLForDataPath('title3.html')
title2 = 'Title Of More Awesomeness'
self.AppendTab(pyauto.GURL(url2))
self._CheckHistory(title2, url2, 2)
self.ActivateTab(0)
self.ReloadActiveTab()
self._CheckHistory(title1, url1, 2)
def testBackForwardBringPageToTop(self):
"""Verify that back/forward brings current page to top of history list."""
url1 = self.GetFileURLForDataPath('title2.html')
title1 = 'Title Of Awesomeness'
self._NavigateAndCheckHistory(title1, 'title2.html', 1)
url2 = self.GetFileURLForDataPath('title3.html')
title2 = 'Title Of More Awesomeness'
self._NavigateAndCheckHistory(title2, 'title3.html', 2)
tab = self.GetBrowserWindow(0).GetTab(0)
tab.GoBack()
self._CheckHistory(title1, url1, 2)
tab.GoForward()
self._CheckHistory(title2, url2, 2)
def testAppendTabAddPage(self):
"""Verify that opening a new tab adds that page to history."""
self._NavigateAndCheckHistory('Title Of Awesomeness', 'title2.html', 1)
url2 = self.GetFileURLForDataPath('title3.html')
title2 = 'Title Of More Awesomeness'
self.AppendTab(pyauto.GURL(url2))
self._CheckHistory(title2, url2, 2)
def testOpenWindowAddPage(self):
"""Verify that opening new window to a page adds the page to history."""
self._NavigateAndCheckHistory('Title Of Awesomeness', 'title2.html', 1)
url2 = self.GetFileURLForDataPath('title3.html')
title2 = 'Title Of More Awesomeness'
self.OpenNewBrowserWindow(True)
self.NavigateToURL(url2, 1)
self._CheckHistory(title2, url2, 2)
def testSubmitFormAddsTargetPage(self):
"""Verify that submitting form adds target page to history list."""
url1 = self.GetFileURLForDataPath('History', 'form.html')
self.NavigateToURL(url1)
self.assertTrue(self.SubmitForm('form'))
url2 = self.GetFileURLForDataPath('History', 'target.html')
self.assertEqual(
'SUCCESS',
self.GetDOMValue('document.getElementById("result").innerHTML'))
self._CheckHistory('Target Page', url2, 2)
def testOneHistoryTabPerWindow(self):
"""Verify history shortcut opens only one history tab per window.
Also, make sure that existing history tab is activated.
"""
command_line = self.GetBrowserInfo()['properties']['command_line_string']
history_url = 'chrome://chrome/history'
# Invoke History.
self.RunCommand(pyauto.IDC_SHOW_HISTORY)
# Even when the above command completes, the currently-active tab title
# is 'Loading...' for a brief time while the history page loads.
self.assertTrue(
self.WaitUntil(lambda: 'History' == self.GetActiveTabTitle()),
msg='History page was not opened.')
# Open new tab, invoke History again.
self.RunCommand(pyauto.IDC_NEW_TAB)
self.RunCommand(pyauto.IDC_SHOW_HISTORY)
# Verify there is only one history tab, and that it is activated.
tab0url = self.GetBrowserInfo()['windows'][0]['tabs'][0]['url']
self.assertEqual(
history_url, tab0url, msg='Tab 0: expected = %s, actual = %s.'
% (history_url, tab0url))
tab1url = self.GetBrowserInfo()['windows'][0]['tabs'][1]['url']
self.assertNotEqual(
history_url, tab1url,
msg='Tab 1: History page not expected.')
self.assertEqual('History', self.GetActiveTabTitle(),
msg='History page is not activated.')
if __name__ == '__main__':
pyauto_functional.Main()
|
|
from django.contrib.auth.models import User
from django.core.cache import cache
from django.core.mail import send_mail
from django.core.validators import validate_email, RegexValidator
from django.core.validators import MinValueValidator
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Q
from django.utils.text import slugify
from localflavor.us.models import PhoneNumberField
from stdimage import StdImageField
from mig_main.pdf_field import ContentTypeRestrictedFileField, pdf_types
from mig_main.location_field import LocationField
def resume_file_name(instance, filename):
""" Returns the resume filename for a member.
Fixes serialization loop issue.
"""
return '/'.join([u"resumes", instance.uniqname+u'.pdf'])
PREFERENCES = [
{
'name': 'google_calendar_add',
'values': ('always', 'never'),
'verbose': ('Do you want to add events signed up for to your '
'personal calendar?'),
'default': 'never',
},
{
'name': 'google_calendar_account',
'values': ('umich', 'alternate'),
'verbose': 'Which email has your personal calendar',
'default': 'umich',
},
]
ALUM_MAIL_FREQ_CHOICES = (
("NO", "None"),
("YR", "Yearly"),
("SM", "Semesterly"),
("MO", "Monthly"),
("WK", "Weekly"),
("AC", "Remain Active Member"),
)
GENDER_CHOICES = (
("F", "Female"),
("M", "Male"),
("O", "Other/Prefer not to respond"),
)
# homepage models
class SlideShowPhoto(models.Model):
""" Photo that can be displayed on the home page.
"""
photo = StdImageField(
upload_to='home_page_photos',
variations={'thumbnail': (1050, 790, True)}
)
active = models.BooleanField(default=False)
title = models.TextField()
text = models.TextField()
link = models.CharField(max_length=256)
# general usage models
class AcademicTerm(models.Model):
""" An individual term, e.g. Fall 2015.
"""
year = models.PositiveSmallIntegerField(
validators=[MinValueValidator(1960)]
)
semester_type = models.ForeignKey('requirements.SemesterType')
@classmethod
def get_rchron_before(cls):
"""
This gets all of the full terms prior to, and including, the current.
While AcademicTerm objects do have a defined sort order, and thus
sorted() could be used, this method is actually faster and allows for
more ready exclusion of summer terms.
"""
current = cls.get_current_term()
query = Q(year__lte=current.year) & ~Q(semester_type__name='Summer')
if current.semester_type.name == 'Winter':
query = (query & ~
(Q(semester_type__name='Fall') &
Q(year=current.year)
)
)
terms = cls.objects.filter(query)
return terms.order_by('-year', '-semester_type')
@classmethod
def get_rchron(cls):
return cls.objects.all().order_by('-year', '-semester_type')
@classmethod
def get_current_term(cls):
current_terms = CurrentTerm.objects.all()
if current_terms.exists():
return current_terms[0].current_term
return None
def get_previous_full_term(self):
new_type = self.semester_type.get_previous_full_type()
if self.semester_type.name == 'Winter':
new_year = self.year - 1
else:
new_year = self.year
if self.__class__.objects.filter(
year=new_year,
semester_type=new_type).exists():
return self.__class__.objects.get(
year=new_year,
semester_type=new_type)
else:
a = self.__class__(year=new_year, semester_type=new_type)
a.save()
return a
def get_next_term(self):
new_type = self.semester_type.get_next_type()
if self.semester_type.name == 'Fall':
new_year = self.year + 1
else:
new_year = self.year
if self.__class__.objects.filter(
year=new_year,
semester_type=new_type).exists():
return self.__class__.objects.get(
year=new_year,
semester_type=new_type)
else:
a = self.__class__(year=new_year, semester_type=new_type)
a.save()
return a
def get_next_full_term(self):
new_type = self.semester_type.get_next_full_type()
if self.semester_type.name == 'Fall':
new_year = self.year + 1
else:
new_year = self.year
if self.__class__.objects.filter(
year=new_year,
semester_type=new_type).exists():
return self.__class__.objects.get(
year=new_year,
semester_type=new_type)
else:
a = self.__class__(year=new_year, semester_type=new_type)
a.save()
return a
def get_abbreviation(self):
return self.semester_type.name[0]+str(self.year)
def __unicode__(self):
return self.semester_type.name + ' ' + unicode(self.year)
def __gt__(self, term2):
if not hasattr(term2, 'year'):
return True
if self.year > term2.year:
return True
if self.year < term2.year:
return False
return self.semester_type > term2.semester_type
def __lt__(self, term2):
if not hasattr(term2, 'year'):
return False
if self.year < term2.year:
return True
if self.year > term2.year:
return False
return self.semester_type < term2.semester_type
def __eq__(self, term2):
if not hasattr(term2, 'year'):
return False
if self.year != term2.year:
return False
return self.semester_type == term2.semester_type
def __ne__(self, term2):
return not self == term2
def __le__(self, term2):
return not self > term2
def __ge__(self, term2):
return not self < term2
def __sub__(self, term2):
if not hasattr(term2, 'year'):
return 0
years_diff = self.year-term2.year
terms_diff = self.semester_type-term2.semester_type
return 3*years_diff+terms_diff
class CurrentTerm(models.Model):
""" The current term.
There can only be one. This is slightly vestigial. An improvement was
attempted and it didn't work, so we stuck with this.
"""
current_term = models.ForeignKey(AcademicTerm)
def __unicode__(self):
return unicode(self.current_term)
def save(self, *args, **kwargs):
if CurrentTerm.objects.count() > 1:
return
if (CurrentTerm.objects.count() == 1 and
not CurrentTerm.objects.get().id == self.id):
return
super(CurrentTerm, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
if CurrentTerm.objects.count() <= 1:
return
super(CurrentTerm, self).delete(*args, **kwargs)
class TBPChapter(models.Model):
""" A TBP Chapter.
This will almost assuredly be incomplete, but it's unlikely to matter.
"""
class Meta:
verbose_name = 'TBP Chapter'
verbose_name_plural = 'TBP Chapters'
state = models.CharField(
max_length=2,
validators=[
RegexValidator(
regex=r'^[A-Z]{2}$',
message=('Must be the state (or territory) 2-letter'
'code e.g. Michigan is MI')
)
]
)
letter = models.CharField(
max_length=4,
validators=[
RegexValidator(
regex=r'^[A-I,K-U,W-Z]+$',
message=('Greek letter (latin equivalent), e.g. Gamma'
'is G, Theta is Q')
)
]
)
school = models.CharField(max_length=70)
def __unicode__(self):
return self.state+'-'+self.letter
class OfficerPosition(models.Model):
""" One of the officer or chair positions within the chapter.
"""
POSITION_TYPE_CHOICES = (
("O", "Officer"),
("C", "Chair"),
)
name = models.CharField(max_length=45)
description = models.TextField()
email = models.EmailField(max_length=254)
enabled = models.BooleanField(default=True)
display_order = models.PositiveIntegerField(default=0)
position_type = models.CharField(
max_length=1,
choices=POSITION_TYPE_CHOICES,
default='O'
)
is_elected = models.BooleanField(default=True)
TERM_LENGTH_CHOICES = (
('S', 'Semester'),
('A', 'Academic Year'),
('C', 'Calendar year'),
)
term_length = models.CharField(
max_length=1,
choices=TERM_LENGTH_CHOICES,
default='S',
)
@classmethod
def get_current(cls):
return cls.objects.filter(enabled=True).order_by('display_order')
def __unicode__(self):
return self.name
def get_teams_led(self):
return self.team_lead.exclude(
end_term__lte=AcademicTerm.get_current_term()
)
def get_teams(self):
return self.members.exclude(
end_term__lte=AcademicTerm.get_current_term()
)
class OfficerTeam(models.Model):
""" An officer team is a grouping of officer positions around some
functional area.
This is generally not interacted with in terms of the website but is
documented for informational purposes: officers are displayed by team on
the leadership page.
"""
name = models.CharField(max_length=80)
lead = models.ForeignKey(OfficerPosition, related_name='team_lead')
members = models.ManyToManyField(OfficerPosition, related_name='members')
start_term = models.ForeignKey(
AcademicTerm,
related_name='teams_starting_in_term'
)
end_term = models.ForeignKey(
AcademicTerm,
related_name='teams_ending_in_term',
null=True,
blank=True
)
def __unicode__(self):
return self.name
class Committee(models.Model):
""" Used to display committee members, handle permissions.
"""
name = models.CharField(max_length=128)
description = models.TextField()
is_active = models.BooleanField(default=True)
def __unicode__(self):
return self.name
class Standing(models.Model):
""" Alumni, Grad, or Undergrad.
"""
name = models.CharField(max_length=20)
enabled = models.BooleanField(default=True)
def __unicode__(self):
return self.name
class Major(models.Model):
""" Major area of study.
"""
name = models.CharField(max_length=60)
acronym = models.CharField(max_length=10)
standing_type = models.ManyToManyField(Standing)
enabled = models.BooleanField(default=True)
def __unicode__(self):
return self.name+' ('+self.acronym+')'
class Status(models.Model):
""" Active or Electee.
"""
class Meta:
verbose_name_plural = 'Statuses'
name = models.CharField(max_length=20)
enabled = models.BooleanField(default=True)
def __unicode__(self):
return self.name
class ShirtSize(models.Model):
""" Used for compiling member demographics, assisting with T-shirt orders.
"""
name = models.CharField(max_length=35)
acronym = models.CharField(max_length=4)
enabled = models.BooleanField(default=True)
def __unicode__(self):
return self.name
class UserPreference(models.Model):
""" An individual preference key-value pair for a user.
"""
user = models.ForeignKey('mig_main.UserProfile')
preference_type = models.CharField(max_length=64)
preference_value = models.CharField(max_length=64)
def __unicode__(self):
return unicode(self.user)+': '+self.preference_type
class UserProfile(models.Model):
""" A generic user profile.
Serves as the base class for other types of profiles (namely member).
"""
user = models.OneToOneField(User, on_delete=models.PROTECT)
# Name Stuff
nickname = models.CharField(max_length=40, blank=True)
first_name = models.CharField(max_length=40)
middle_name = models.CharField(max_length=40, blank=True)
last_name = models.CharField(max_length=40)
suffix = models.CharField(max_length=15, blank=True)
maiden_name = models.CharField(max_length=40, blank=True, null=True)
title = models.CharField(max_length=20, blank=True)
uniqname = models.CharField(
max_length=8,
primary_key=True,
validators=[RegexValidator(
regex=r'^[a-z]{3,8}$',
message=('Your uniqname must be 3-8 characters,'
'all lowercase.')
)
]
)
# Methods
@classmethod
def get_users(cls):
return cls.objects.all().order_by('last_name',
'first_name',
'uniqname'
)
def get_full_name(self):
name = self.title+" " if self.title else ""
name = name + self.first_name + " "
name = name + self.middle_name+" " if self.middle_name else name
name = name + '('+self.maiden_name+") " if self.maiden_name else name
name = name + self.last_name
name = name + ", "+self.suffix if self.suffix else name
return name
def __unicode__(self):
return self.get_full_name()+" ("+self.uniqname+")"
def __gt__(self, user2):
if not hasattr(user2, 'last_name'):
return True
if self.last_name > user2.last_name:
return True
if self.last_name < user2.last_name:
return False
if not hasattr(user2, 'first_name'):
return True
if self.first_name > user2.first_name:
return True
if self.first_name < user2.first_name:
return False
if not hasattr(user2, 'middle_name'):
return True
return self.middle_name > user2.middle_name
def __lt__(self, user2):
if not hasattr(user2, 'last_name'):
return False
if self.last_name < user2.last_name:
return True
if self.last_name > user2.last_name:
return False
if not hasattr(user2, 'first_name'):
return False
if self.first_name < user2.first_name:
return True
if self.first_name > user2.first_name:
return False
if not hasattr(user2, 'middle_name'):
return False
return self.middle_name < user2.middle_name
def __le__(self, user2):
return not (self > user2)
def __ge__(self, user2):
return not (self < user2)
def get_casual_name(self):
if len(self.nickname) > 0:
return self.nickname
else:
return self.first_name
def get_firstlast_name(self):
first_name = self.get_casual_name()
if first_name == self.last_name:
first_name = self.first_name
middle = '('+self.maiden_name+") " if self.maiden_name else ''
return first_name+' '+middle+self.last_name
def get_email(self):
return self.uniqname+"@umich.edu"
def is_member(self):
try:
self.memberprofile
return True
except ObjectDoesNotExist:
return False
def is_electee(self):
if not self.is_member():
return False
return self.memberprofile.status.name == 'Electee'
def is_active(self):
if not self.is_member():
return False
return self.memberprofile.status.name == 'Active'
def is_ugrad(self):
if not self.is_member():
return False
return self.memberprofile.standing.name == 'Undergraduate'
def is_grad(self):
if not self.is_member():
return False
return self.memberprofile.standing.name == 'Graduate'
def is_alumni(self):
if not self.is_member():
return False
return self.memberprofile.standing.name == 'Alumni'
class MemberProfile(UserProfile):
""" A profile for a TBP member.
The basic building block of almost everything on the site. Houses a
member's information in terms of major, contact info, etc.
"""
# Preferred Email address
MAIL_PREF_CHOICES = (
("UM", "Umich email"),
("ALT", "Alternate email"),
)
# Classifications
major = models.ManyToManyField(Major)
status = models.ForeignKey(Status, on_delete=models.PROTECT)
UMID = models.CharField(
max_length=8,
validators=[RegexValidator(
regex=r'^[0-9]{8}$',
message="Your UMID must be 8 numbers."
)
]
)
init_chapter = models.ForeignKey(
TBPChapter,
on_delete=models.PROTECT,
verbose_name="Initiating Chapter"
)
standing = models.ForeignKey(Standing, on_delete=models.PROTECT)
alt_email = models.EmailField(
"Alternate email",
max_length=254,
blank=True
)
jobs_email = models.BooleanField(
"Receive corporate emails?",
default=True
)
alum_mail_freq = models.CharField(
"How frequently would you like alumni emails?",
max_length=2,
choices=ALUM_MAIL_FREQ_CHOICES,
default="WK"
)
preferred_email = models.CharField(
max_length=3,
choices=MAIL_PREF_CHOICES,
default="UM"
)
shirt_size = models.ForeignKey(ShirtSize, on_delete=models.PROTECT)
short_bio = models.TextField()
init_term = models.ForeignKey(
AcademicTerm,
on_delete=models.PROTECT,
verbose_name="Initiation term"
)
gender = models.CharField(
max_length=1,
choices=GENDER_CHOICES,
default="O"
)
expect_grad_date = models.DateField("Expected graduation date")
still_electing = models.BooleanField(default=True)
photo = StdImageField(
upload_to='member_photos',
variations={'thumbnail': (555, 775)}
)
resume = ContentTypeRestrictedFileField(
upload_to=resume_file_name,
content_types=pdf_types,
max_upload_size=104857600,
blank=True
)
phone = PhoneNumberField()
location = LocationField(blank=True)
# Methods
@classmethod
def get_members(cls, include_alums=True):
query = Q(status__name='Active') | Q(status__name='Electee',
still_electing=True)
if include_alums:
output = cls.objects.filter(query)
else:
output = cls.objects.filter(query).exclude(standing__name='Alumni')
return output.order_by('last_name',
'first_name',
'uniqname')
@classmethod
def get_actives(cls):
query = Q(status__name='Active')
return cls.objects.filter(query).order_by('last_name',
'first_name',
'uniqname')
@classmethod
def get_electees(cls):
query = Q(status__name='Electee', still_electing=True)
return cls.objects.filter(query).order_by('last_name',
'first_name',
'uniqname')
def get_num_terms_distinction(self, distinction):
distinctions = self.distinction_set.filter(
distinction_type=distinction
)
return distinctions.count()
def get_email(self):
if self.preferred_email == "UM" or not self.alt_email:
return self.uniqname+"@umich.edu"
else:
return self.alt_email
def save(self, *args, **kwargs):
super(MemberProfile, self).save(*args, **kwargs)
cache.delete('active_list_html')
def delete(self, *args, **kwargs):
super(MemberProfile, self).delete(*args, **kwargs)
cache.delete('active_list_html')
def get_resume_name(self):
if not self.resume:
return None
return slugify(self.last_name +
'_' + self.first_name + '_' + self.uniqname) + '.pdf'
class TBPraise(models.Model):
""" An object used to send (potentially anonymous) praise to another
member for something good that they've done.
"""
giver = models.ForeignKey(UserProfile, related_name='praise_giver')
recipient = models.ForeignKey(UserProfile, related_name='praise_recipient')
description = models.TextField()
public = models.BooleanField(default=False)
anonymous = models.BooleanField(default=False)
approved = models.BooleanField(default=False)
date_added = models.DateField(auto_now_add=True)
PRAISE_BODY = r'''Hello %(name)s
This is an automated notice that %(sender)s has sent you a personal
affirmation, TBPraise if you will. The details are below:
%(praise)s
%(public_bit)s
If you have any questions, please let the Website Chair know
(tbp-website@umich.edu).
Regards,
The Website
If you'd like, you can pay it forward by sending an affirmation to another
member:
https://tbp.engin.umich.edu%(link)s'''
def is_public(self):
return self.approved and self.public
def email_praise(self):
persons_name = self.recipient.get_casual_name()
if self.anonymous:
sender = 'someone'
subject = '[TBP] You\'ve been sent an affirmation'
else:
sender = self.giver.get_firstlast_name()
subject = '[TBP] %s has sent you an affirmation' % (sender)
if self.public:
rel_link = reverse('member_resources:approve_praise',
args=(self.id,))
link = 'https://tbp.engin.umich.edu%(link)s' % {'link': rel_link}
public_bit = ('Pending your approval, this message will appear on '
'the website so that other\'s know about your '
'awesomeness. To approve the affirmation for posting'
' click here: ')+link
else:
public_bit = ''
public_take_down = ''
body = self.PRAISE_BODY % {
'name': persons_name,
'public_bit': public_bit,
'sender': sender,
'praise': self.description,
'link': reverse('member_resources:submit_praise')
}
send_mail(
subject,
body,
'tbp.mi.g@gmail.com',
[self.recipient.get_email()],
fail_silently=True
)
|
|
"""
***************
Graphviz AGraph
***************
Interface to pygraphviz AGraph class.
Examples
--------
>>> G=nx.complete_graph(5)
>>> A=nx.to_agraph(G)
>>> H=nx.from_agraph(A)
See Also
--------
Pygraphviz: http://pygraphviz.github.io/
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import os
import sys
import tempfile
import networkx as nx
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
__all__ = ['from_agraph', 'to_agraph',
'write_dot', 'read_dot',
'graphviz_layout',
'pygraphviz_layout',
'view_pygraphviz']
def from_agraph(A,create_using=None):
"""Return a NetworkX Graph or DiGraph from a PyGraphviz graph.
Parameters
----------
A : PyGraphviz AGraph
A graph created with PyGraphviz
create_using : NetworkX graph class instance
The output is created using the given graph class instance
Examples
--------
>>> K5=nx.complete_graph(5)
>>> A=nx.to_agraph(K5)
>>> G=nx.from_agraph(A)
>>> G=nx.from_agraph(A)
Notes
-----
The Graph G will have a dictionary G.graph_attr containing
the default graphviz attributes for graphs, nodes and edges.
Default node attributes will be in the dictionary G.node_attr
which is keyed by node.
Edge attributes will be returned as edge data in G. With
edge_attr=False the edge data will be the Graphviz edge weight
attribute or the value 1 if no edge weight attribute is found.
"""
if create_using is None:
if A.is_directed():
if A.is_strict():
create_using=nx.DiGraph()
else:
create_using=nx.MultiDiGraph()
else:
if A.is_strict():
create_using=nx.Graph()
else:
create_using=nx.MultiGraph()
# assign defaults
N=nx.empty_graph(0,create_using)
N.name=''
if A.name is not None:
N.name=A.name
# add nodes, attributes to N.node_attr
for n in A.nodes():
str_attr=dict((str(k),v) for k,v in n.attr.items())
N.add_node(str(n),**str_attr)
# add edges, assign edge data as dictionary of attributes
for e in A.edges():
u,v=str(e[0]),str(e[1])
attr=dict(e.attr)
str_attr=dict((str(k),v) for k,v in attr.items())
if not N.is_multigraph():
if e.name is not None:
str_attr['key']=e.name
N.add_edge(u,v,**str_attr)
else:
N.add_edge(u,v,key=e.name,**str_attr)
# add default attributes for graph, nodes, and edges
# hang them on N.graph_attr
N.graph['graph']=dict(A.graph_attr)
N.graph['node']=dict(A.node_attr)
N.graph['edge']=dict(A.edge_attr)
return N
def to_agraph(N):
"""Return a pygraphviz graph from a NetworkX graph N.
Parameters
----------
N : NetworkX graph
A graph created with NetworkX
Examples
--------
>>> K5=nx.complete_graph(5)
>>> A=nx.to_agraph(K5)
Notes
-----
If N has an dict N.graph_attr an attempt will be made first
to copy properties attached to the graph (see from_agraph)
and then updated with the calling arguments if any.
"""
try:
import pygraphviz
except ImportError:
raise ImportError('requires pygraphviz ',
'http://pygraphviz.github.io/')
directed=N.is_directed()
strict=N.number_of_selfloops()==0 and not N.is_multigraph()
A=pygraphviz.AGraph(name=N.name,strict=strict,directed=directed)
# default graph attributes
A.graph_attr.update(N.graph.get('graph',{}))
A.node_attr.update(N.graph.get('node',{}))
A.edge_attr.update(N.graph.get('edge',{}))
# add nodes
for n,nodedata in N.nodes(data=True):
A.add_node(n,**nodedata)
# loop over edges
if N.is_multigraph():
for u,v,key,edgedata in N.edges(data=True,keys=True):
str_edgedata=dict((k,str(v)) for k,v in edgedata.items())
A.add_edge(u,v,key=str(key),**str_edgedata)
else:
for u,v,edgedata in N.edges(data=True):
str_edgedata=dict((k,str(v)) for k,v in edgedata.items())
A.add_edge(u,v,**str_edgedata)
return A
def write_dot(G,path):
"""Write NetworkX graph G to Graphviz dot format on path.
Parameters
----------
G : graph
A networkx graph
path : filename
Filename or file handle to write
"""
try:
import pygraphviz
except ImportError:
raise ImportError('requires pygraphviz ',
'http://pygraphviz.github.io/')
A=to_agraph(G)
A.write(path)
A.clear()
return
def read_dot(path):
"""Return a NetworkX graph from a dot file on path.
Parameters
----------
path : file or string
File name or file handle to read.
"""
try:
import pygraphviz
except ImportError:
raise ImportError('read_dot() requires pygraphviz ',
'http://pygraphviz.github.io/')
A=pygraphviz.AGraph(file=path)
return from_agraph(A)
def graphviz_layout(G,prog='neato',root=None, args=''):
"""Create node positions for G using Graphviz.
Parameters
----------
G : NetworkX graph
A graph created with NetworkX
prog : string
Name of Graphviz layout program
root : string, optional
Root node for twopi layout
args : string, optional
Extra arguments to Graphviz layout program
Returns : dictionary
Dictionary of x,y, positions keyed by node.
Examples
--------
>>> G=nx.petersen_graph()
>>> pos=nx.graphviz_layout(G)
>>> pos=nx.graphviz_layout(G,prog='dot')
Notes
-----
This is a wrapper for pygraphviz_layout.
"""
return pygraphviz_layout(G,prog=prog,root=root,args=args)
def pygraphviz_layout(G,prog='neato',root=None, args=''):
"""Create node positions for G using Graphviz.
Parameters
----------
G : NetworkX graph
A graph created with NetworkX
prog : string
Name of Graphviz layout program
root : string, optional
Root node for twopi layout
args : string, optional
Extra arguments to Graphviz layout program
Returns : dictionary
Dictionary of x,y, positions keyed by node.
Examples
--------
>>> G=nx.petersen_graph()
>>> pos=nx.graphviz_layout(G)
>>> pos=nx.graphviz_layout(G,prog='dot')
"""
try:
import pygraphviz
except ImportError:
raise ImportError('requires pygraphviz ',
'http://pygraphviz.github.io/')
if root is not None:
args+="-Groot=%s"%root
A=to_agraph(G)
A.layout(prog=prog,args=args)
node_pos={}
for n in G:
node=pygraphviz.Node(A,n)
try:
xx,yy=node.attr["pos"].split(',')
node_pos[n]=(float(xx),float(yy))
except:
print("no position for node",n)
node_pos[n]=(0.0,0.0)
return node_pos
@nx.utils.open_file(5, 'w')
def view_pygraphviz(G, edgelabel=None, prog='dot', args='',
suffix='', path=None):
"""Views the graph G using the specified layout algorithm.
Parameters
----------
G : NetworkX graph
The machine to draw.
edgelabel : str, callable, None
If a string, then it specifes the edge attribute to be displayed
on the edge labels. If a callable, then it is called for each
edge and it should return the string to be displayed on the edges.
The function signature of `edgelabel` should be edgelabel(data),
where `data` is the edge attribute dictionary.
prog : string
Name of Graphviz layout program.
args : str
Additional arguments to pass to the Graphviz layout program.
suffix : str
If `filename` is None, we save to a temporary file. The value of
`suffix` will appear at the tail end of the temporary filename.
path : str, None
The filename used to save the image. If None, save to a temporary
file. File formats are the same as those from pygraphviz.agraph.draw.
Returns
-------
path : str
The filename of the generated image.
A : PyGraphviz graph
The PyGraphviz graph instance used to generate the image.
Notes
-----
If this function is called in succession too quickly, sometimes the
image is not displayed. So you might consider time.sleep(.5) between
calls if you experience problems.
"""
if not len(G):
raise nx.NetworkXException("An empty graph cannot be drawn.")
import pygraphviz
# If we are providing default values for graphviz, these must be set
# before any nodes or edges are added to the PyGraphviz graph object.
# The reason for this is that default values only affect incoming objects.
# If you change the default values after the objects have been added,
# then they inherit no value and are set only if explicitly set.
# to_agraph() uses these values.
attrs = ['edge', 'node', 'graph']
for attr in attrs:
if attr not in G.graph:
G.graph[attr] = {}
# These are the default values.
edge_attrs = {'fontsize': '10'}
node_attrs = {'style': 'filled',
'fillcolor': '#0000FF40',
'height': '0.75',
'width': '0.75',
'shape': 'circle'}
graph_attrs = {}
def update_attrs(which, attrs):
# Update graph attributes. Return list of those which were added.
added = []
for k,v in attrs.items():
if k not in G.graph[which]:
G.graph[which][k] = v
added.append(k)
def clean_attrs(which, added):
# Remove added attributes
for attr in added:
del G.graph[which][attr]
if not G.graph[which]:
del G.graph[which]
# Update all default values
update_attrs('edge', edge_attrs)
update_attrs('node', node_attrs)
update_attrs('graph', graph_attrs)
# Convert to agraph, so we inherit default values
A = to_agraph(G)
# Remove the default values we added to the original graph.
clean_attrs('edge', edge_attrs)
clean_attrs('node', node_attrs)
clean_attrs('graph', graph_attrs)
# If the user passed in an edgelabel, we update the labels for all edges.
if edgelabel is not None:
if not hasattr(edgelabel, '__call__'):
def func(data):
return ''.join([" ", str(data[edgelabel]), " "])
else:
func = edgelabel
# update all the edge labels
if G.is_multigraph():
for u,v,key,data in G.edges(keys=True, data=True):
# PyGraphviz doesn't convert the key to a string. See #339
edge = A.get_edge(u,v,str(key))
edge.attr['label'] = str(func(data))
else:
for u,v,data in G.edges(data=True):
edge = A.get_edge(u,v)
edge.attr['label'] = str(func(data))
if path is None:
ext = 'png'
if suffix:
suffix = '_%s.%s' % (suffix, ext)
else:
suffix = '.%s' % (ext,)
path = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
else:
# Assume the decorator worked and it is a file-object.
pass
display_pygraphviz(A, path=path, prog=prog, args=args)
return path.name, A
def display_pygraphviz(graph, path, format=None, prog=None, args=''):
"""Internal function to display a graph in OS dependent manner.
Parameters
----------
graph : PyGraphviz graph
A PyGraphviz AGraph instance.
path : file object
An already opened file object that will be closed.
format : str, None
An attempt is made to guess the output format based on the extension
of the filename. If that fails, the value of `format` is used.
prog : string
Name of Graphviz layout program.
args : str
Additional arguments to pass to the Graphviz layout program.
Notes
-----
If this function is called in succession too quickly, sometimes the
image is not displayed. So you might consider time.sleep(.5) between
calls if you experience problems.
"""
if format is None:
filename = path.name
format = os.path.splitext(filename)[1].lower()[1:]
if not format:
# Let the draw() function use its default
format = None
# Save to a file and display in the default viewer.
# We must close the file before viewing it.
graph.draw(path, format, prog, args)
path.close()
nx.utils.default_opener(filename)
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import pygraphviz
except:
raise SkipTest("pygraphviz not available")
|
|
"""Creates a disk volume from a disk offering. This disk volume must still be attached to a virtual machine to make use of it."""
from baseCmd import *
from baseResponse import *
class createVolumeCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "true"
"""the account associated with the disk volume. Must be used with the domainId parameter."""
self.account = None
self.typeInfo['account'] = 'string'
"""an optional field, in case you want to set a custom id to the resource. Allowed to Root Admins only"""
self.customid = None
self.typeInfo['customid'] = 'string'
"""the ID of the disk offering. Either diskOfferingId or snapshotId must be passed in."""
self.diskofferingid = None
self.typeInfo['diskofferingid'] = 'uuid'
"""an optional field, whether to display the volume to the end user or not."""
self.displayvolume = None
self.typeInfo['displayvolume'] = 'boolean'
"""the domain ID associated with the disk offering. If used with the account parameter returns the disk volume associated with the account for the specified domain."""
self.domainid = None
self.typeInfo['domainid'] = 'uuid'
"""max iops"""
self.maxiops = None
self.typeInfo['maxiops'] = 'long'
"""min iops"""
self.miniops = None
self.typeInfo['miniops'] = 'long'
"""the name of the disk volume"""
self.name = None
self.typeInfo['name'] = 'string'
"""the project associated with the volume. Mutually exclusive with account parameter"""
self.projectid = None
self.typeInfo['projectid'] = 'uuid'
"""Arbitrary volume size"""
self.size = None
self.typeInfo['size'] = 'long'
"""the snapshot ID for the disk volume. Either diskOfferingId or snapshotId must be passed in."""
self.snapshotid = None
self.typeInfo['snapshotid'] = 'uuid'
"""the ID of the virtual machine; to be used with snapshot Id, VM to which the volume gets attached after creation"""
self.virtualmachineid = None
self.typeInfo['virtualmachineid'] = 'uuid'
"""the ID of the availability zone"""
self.zoneid = None
self.typeInfo['zoneid'] = 'uuid'
self.required = []
class createVolumeResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""ID of the disk volume"""
self.id = None
self.typeInfo['id'] = 'string'
"""the account associated with the disk volume"""
self.account = None
self.typeInfo['account'] = 'string'
"""the date the volume was attached to a VM instance"""
self.attached = None
self.typeInfo['attached'] = 'date'
"""the chain info of the volume"""
self.chaininfo = None
self.typeInfo['chaininfo'] = 'string'
"""the date the disk volume was created"""
self.created = None
self.typeInfo['created'] = 'date'
"""the boolean state of whether the volume is destroyed or not"""
self.destroyed = None
self.typeInfo['destroyed'] = 'boolean'
"""the ID of the device on user vm the volume is attahed to. This tag is not returned when the volume is detached."""
self.deviceid = None
self.typeInfo['deviceid'] = 'long'
"""bytes read rate of the disk volume"""
self.diskBytesReadRate = None
self.typeInfo['diskBytesReadRate'] = 'long'
"""bytes write rate of the disk volume"""
self.diskBytesWriteRate = None
self.typeInfo['diskBytesWriteRate'] = 'long'
"""io requests read rate of the disk volume"""
self.diskIopsReadRate = None
self.typeInfo['diskIopsReadRate'] = 'long'
"""io requests write rate of the disk volume"""
self.diskIopsWriteRate = None
self.typeInfo['diskIopsWriteRate'] = 'long'
"""the display text of the disk offering"""
self.diskofferingdisplaytext = None
self.typeInfo['diskofferingdisplaytext'] = 'string'
"""ID of the disk offering"""
self.diskofferingid = None
self.typeInfo['diskofferingid'] = 'string'
"""name of the disk offering"""
self.diskofferingname = None
self.typeInfo['diskofferingname'] = 'string'
"""an optional field whether to the display the volume to the end user or not."""
self.displayvolume = None
self.typeInfo['displayvolume'] = 'boolean'
"""the domain associated with the disk volume"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the ID of the domain associated with the disk volume"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""Hypervisor the volume belongs to"""
self.hypervisor = None
self.typeInfo['hypervisor'] = 'string'
"""true if the volume is extractable, false otherwise"""
self.isextractable = None
self.typeInfo['isextractable'] = 'boolean'
"""an alternate display text of the ISO attached to the virtual machine"""
self.isodisplaytext = None
self.typeInfo['isodisplaytext'] = 'string'
"""the ID of the ISO attached to the virtual machine"""
self.isoid = None
self.typeInfo['isoid'] = 'string'
"""the name of the ISO attached to the virtual machine"""
self.isoname = None
self.typeInfo['isoname'] = 'string'
"""max iops of the disk volume"""
self.maxiops = None
self.typeInfo['maxiops'] = 'long'
"""min iops of the disk volume"""
self.miniops = None
self.typeInfo['miniops'] = 'long'
"""name of the disk volume"""
self.name = None
self.typeInfo['name'] = 'string'
"""the path of the volume"""
self.path = None
self.typeInfo['path'] = 'string'
"""the project name of the vpn"""
self.project = None
self.typeInfo['project'] = 'string'
"""the project id of the vpn"""
self.projectid = None
self.typeInfo['projectid'] = 'string'
"""provisioning type used to create volumes."""
self.provisioningtype = None
self.typeInfo['provisioningtype'] = 'string'
"""need quiesce vm or not when taking snapshot"""
self.quiescevm = None
self.typeInfo['quiescevm'] = 'boolean'
"""the display text of the service offering for root disk"""
self.serviceofferingdisplaytext = None
self.typeInfo['serviceofferingdisplaytext'] = 'string'
"""ID of the service offering for root disk"""
self.serviceofferingid = None
self.typeInfo['serviceofferingid'] = 'string'
"""name of the service offering for root disk"""
self.serviceofferingname = None
self.typeInfo['serviceofferingname'] = 'string'
"""size of the disk volume"""
self.size = None
self.typeInfo['size'] = 'long'
"""ID of the snapshot from which this volume was created"""
self.snapshotid = None
self.typeInfo['snapshotid'] = 'string'
"""the state of the disk volume"""
self.state = None
self.typeInfo['state'] = 'string'
"""the status of the volume"""
self.status = None
self.typeInfo['status'] = 'string'
"""name of the primary storage hosting the disk volume"""
self.storage = None
self.typeInfo['storage'] = 'string'
"""id of the primary storage hosting the disk volume; returned to admin user only"""
self.storageid = None
self.typeInfo['storageid'] = 'string'
"""shared or local storage"""
self.storagetype = None
self.typeInfo['storagetype'] = 'string'
"""an alternate display text of the template for the virtual machine"""
self.templatedisplaytext = None
self.typeInfo['templatedisplaytext'] = 'string'
"""the ID of the template for the virtual machine. A -1 is returned if the virtual machine was created from an ISO file."""
self.templateid = None
self.typeInfo['templateid'] = 'string'
"""the name of the template for the virtual machine"""
self.templatename = None
self.typeInfo['templatename'] = 'string'
"""type of the disk volume (ROOT or DATADISK)"""
self.type = None
self.typeInfo['type'] = 'string'
"""id of the virtual machine"""
self.virtualmachineid = None
self.typeInfo['virtualmachineid'] = 'string'
"""display name of the virtual machine"""
self.vmdisplayname = None
self.typeInfo['vmdisplayname'] = 'string'
"""name of the virtual machine"""
self.vmname = None
self.typeInfo['vmname'] = 'string'
"""state of the virtual machine"""
self.vmstate = None
self.typeInfo['vmstate'] = 'string'
"""ID of the availability zone"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""name of the availability zone"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
"""the list of resource tags associated with volume"""
self.tags = []
"""the ID of the latest async job acting on this object"""
self.jobid = None
self.typeInfo['jobid'] = ''
"""the current status of the latest async job acting on this object"""
self.jobstatus = None
self.typeInfo['jobstatus'] = ''
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
|
|
from datetime import datetime
from datetime import timedelta
import http.client
from unittest import mock
import os
import shutil
import tempfile
import unittest
from cumulusci.core.github import get_github_api
import responses
from cumulusci.core.exceptions import GithubApiError
from cumulusci.core.exceptions import GithubApiNotFoundError
from cumulusci.tasks.release_notes.generator import GithubReleaseNotesGenerator
from cumulusci.tasks.release_notes.provider import BaseChangeNotesProvider
from cumulusci.tasks.release_notes.provider import StaticChangeNotesProvider
from cumulusci.tasks.release_notes.provider import DirectoryChangeNotesProvider
from cumulusci.tasks.release_notes.provider import GithubChangeNotesProvider
from cumulusci.tasks.github.tests.util_github_api import GithubApiTestMixin
from cumulusci.tasks.release_notes.tests.utils import MockUtil
__location__ = os.path.split(os.path.realpath(__file__))[0]
date_format = "%Y-%m-%dT%H:%M:%SZ"
PARSER_CONFIG = [
{
"class_path": "cumulusci.tasks.release_notes.parser.GithubLinesParser",
"title": "Critical Changes",
},
{
"class_path": "cumulusci.tasks.release_notes.parser.GithubLinesParser",
"title": "Changes",
},
{
"class_path": "cumulusci.tasks.release_notes.parser.GithubIssuesParser",
"title": "Issues Closed",
},
]
class TestBaseChangeNotesProvider(unittest.TestCase):
def test_init(self):
provider = BaseChangeNotesProvider("test")
assert provider.release_notes_generator == "test"
def test_call_raises_notimplemented(self):
provider = BaseChangeNotesProvider("test")
self.assertRaises(NotImplementedError, provider.__call__)
class TestStaticChangeNotesProvider(unittest.TestCase):
def test_empty_list(self):
provider = StaticChangeNotesProvider("test", [])
assert list(provider()) == []
def test_single_item_list(self):
provider = StaticChangeNotesProvider("test", ["abc"])
assert list(provider()) == ["abc"]
def test_multi_item_list(self):
provider = StaticChangeNotesProvider("test", ["abc", "d", "e"])
assert list(provider()) == ["abc", "d", "e"]
class TestDirectoryChangeNotesProvider(unittest.TestCase):
def get_empty_dir(self):
tempdir = tempfile.mkdtemp()
return os.path.join(tempdir)
def get_dir_content(self, path):
dir_content = []
for item in sorted(os.listdir(path)):
item_path = "{}/{}".format(path, item)
dir_content.append(open(item_path, "r").read())
return dir_content
def test_empty_directory(self):
directory = self.get_empty_dir()
provider = DirectoryChangeNotesProvider("test", directory)
dir_content = self.get_dir_content(directory)
assert list(provider()) == dir_content
shutil.rmtree(directory)
def test_single_item_directory(self):
directory = "{}/change_notes/single/".format(__location__)
provider = DirectoryChangeNotesProvider("test", directory)
dir_content = self.get_dir_content(directory)
assert list(provider()) == dir_content
def test_multi_item_directory(self):
directory = "{}/change_notes/multi/".format(__location__)
provider = DirectoryChangeNotesProvider("test", directory)
dir_content = self.get_dir_content(directory)
assert list(provider()) == dir_content
class TestGithubChangeNotesProvider(unittest.TestCase, GithubApiTestMixin):
def setUp(self):
# Set up the mock release_tag lookup response
self.repo_api_url = "https://api.github.com/repos/TestOwner/TestRepo"
# Tag that does not exist
self.invalid_tag = "release/1.4"
# The current production release
self.current_tag = "release/1.3"
# The previous beta release
self.beta_tag = "beta/1.3-Beta_1"
# The previous production release with no change notes vs 1.3
self.last_tag = "release/1.2"
# The second previous production release with one change note vs 1.3
self.last2_tag = "release/1.1"
# The third previous production release with three change notes vs 1.3
self.last3_tag = "release/1.0"
self.current_tag_sha = self._random_sha()
self.beta_tag_sha = self._random_sha()
self.current_tag_commit_sha = self._random_sha()
self.current_tag_commit_date = datetime.utcnow()
self.last_tag_sha = self._random_sha()
self.last_tag_commit_sha = self._random_sha()
self.last_tag_commit_date = datetime.utcnow() - timedelta(days=1)
self.last2_tag_sha = self._random_sha()
self.gh = get_github_api("TestUser", "TestPass")
self.init_github()
self.mock_util = MockUtil("TestOwner", "TestRepo")
def _create_generator(self, current_tag, last_tag=None):
generator = GithubReleaseNotesGenerator(
self.gh,
self.github_info.copy(),
PARSER_CONFIG,
current_tag,
last_tag=last_tag,
)
return generator
def _mock_current_tag(self):
api_url = "{}/git/tags/{}".format(self.repo_api_url, self.current_tag_sha)
expected_response = self._get_expected_tag(
self.current_tag,
self.current_tag_commit_sha,
self.current_tag_sha,
self.current_tag_commit_date,
)
responses.add(method=responses.GET, url=api_url, json=expected_response)
return expected_response
def _mock_current_tag_commit(self):
api_url = "{}/git/commits/{}".format(
self.repo_api_url, self.current_tag_commit_sha
)
expected_response = {
"author": {
"name": "John Doe",
"email": "john.doe@example.com",
"date": datetime.strftime(self.current_tag_commit_date, date_format),
},
"committer": None,
"message": "",
"parents": [],
"sha": self.current_tag_commit_sha,
"tree": {"sha": "", "url": ""},
"url": "",
"verification": None,
}
responses.add(method=responses.GET, url=api_url, json=expected_response)
def _mock_current_tag_ref(self):
api_url = "{}/git/refs/tags/{}".format(self.repo_api_url, self.current_tag)
expected_response_current_tag_ref = self._get_expected_tag_ref(
self.current_tag, self.current_tag_sha
)
responses.add(
method=responses.GET, url=api_url, json=expected_response_current_tag_ref
)
def _mock_invalid_tag(self):
api_url = "{}/git/refs/tags/{}".format(self.repo_api_url, self.invalid_tag)
expected_response = {
"message": "Not Found",
"documentation_url": "https://developer.github.com/v3",
}
responses.add(
method=responses.GET,
url=api_url,
json=expected_response,
status=http.client.NOT_FOUND,
)
def _mock_last_tag(self):
api_url = "{}/git/tags/{}".format(self.repo_api_url, self.last_tag_sha)
expected_response = self._get_expected_tag(
self.last_tag,
self.last_tag_commit_sha,
self.last_tag_sha,
self.last_tag_commit_date,
)
responses.add(method=responses.GET, url=api_url, json=expected_response)
return expected_response
def _mock_last_tag_commit(self):
api_url = "{}/git/commits/{}".format(
self.repo_api_url, self.last_tag_commit_sha
)
expected_response = {
"author": {
"name": "John Doe",
"date": datetime.strftime(self.last_tag_commit_date, date_format),
},
"committer": None,
"message": "",
"parents": [],
"sha": self.last_tag_commit_sha,
"tree": {"sha": "", "url": ""},
"url": "",
"verification": None,
}
responses.add(method=responses.GET, url=api_url, json=expected_response)
def _mock_last_tag_ref(self):
api_url = "{}/git/refs/tags/{}".format(self.repo_api_url, self.last_tag)
expected_response_last_tag_ref = self._get_expected_tag_ref(
self.last_tag, self.last_tag_sha
)
responses.add(
method=responses.GET, url=api_url, json=expected_response_last_tag_ref
)
def _mock_list_pull_requests_one_in_range(self):
api_url = "{}/pulls".format(self.repo_api_url)
expected_response = [
self._get_expected_pull_request(
1, 101, "pull 1", datetime.utcnow() - timedelta(seconds=60)
),
self._get_expected_pull_request(
2, 102, "pull 2", datetime.utcnow() - timedelta(days=4)
),
self._get_expected_pull_request(
3, 103, "pull 3", datetime.utcnow() - timedelta(days=5)
),
]
responses.add(method=responses.GET, url=api_url, json=expected_response)
def _mock_list_pull_requests_multiple_in_range(self):
api_url = "{}/pulls".format(self.repo_api_url)
expected_response = [
self._get_expected_pull_request(
1, 101, "pull 1", datetime.utcnow() - timedelta(seconds=60)
),
self._get_expected_pull_request(
2, 102, "pull 2", datetime.utcnow() - timedelta(seconds=90)
),
self._get_expected_pull_request(
3, 103, "pull 3", datetime.utcnow() - timedelta(seconds=120)
),
self._get_expected_pull_request(
4, 104, "pull 4", datetime.utcnow() - timedelta(days=4)
),
self._get_expected_pull_request(
5, 105, "pull 5", datetime.utcnow() - timedelta(days=5)
),
self._get_expected_pull_request(6, 106, "pull 6", None),
self._get_expected_pull_request(
7,
107,
"pull 7",
datetime.utcnow() - timedelta(seconds=180),
merge_commit_sha=self.last_tag_commit_sha,
),
self._get_expected_pull_request(
8,
108,
"pull 8",
datetime.utcnow(),
merge_commit_sha=self.current_tag_commit_sha,
),
]
responses.add(method=responses.GET, url=api_url, json=expected_response)
def _mock_list_tags_multiple(self):
api_url = "{}/tags".format(self.repo_api_url)
expected_response = [
self._get_expected_repo_tag(self.current_tag, self.current_tag_sha),
self._get_expected_repo_tag(self.beta_tag, self.beta_tag_sha),
self._get_expected_repo_tag(self.last_tag, self.last_tag_sha),
self._get_expected_repo_tag(self.last2_tag, self.last2_tag_sha),
]
responses.add(method=responses.GET, url=api_url, json=expected_response)
def _mock_list_tags_single(self):
api_url = "{}/tags".format(self.repo_api_url)
expected_response = [
self._get_expected_repo_tag(self.current_tag, self.current_tag_sha)
]
responses.add(method=responses.GET, url=api_url, json=expected_response)
@responses.activate
def test_invalid_current_tag(self):
self.mock_util.mock_get_repo()
self._mock_invalid_tag()
generator = self._create_generator(self.invalid_tag)
provider = GithubChangeNotesProvider(generator, self.invalid_tag)
with self.assertRaises(GithubApiNotFoundError):
provider.current_tag_info
@responses.activate
def test_current_tag_is_lightweight(self):
self.mock_util.mock_get_repo()
tag = "release/lightweight"
generator = self._create_generator(tag)
provider = GithubChangeNotesProvider(generator, tag)
api_url = "{}/git/refs/tags/{}".format(self.repo_api_url, tag)
responses.add(
method=responses.GET,
url=api_url,
json={
"object": {"type": "commit", "url": "", "sha": ""},
"url": "",
"ref": "tags/{}".format(tag),
},
)
with self.assertRaises(GithubApiError):
provider.current_tag_info
@responses.activate
def test_current_tag_without_last(self):
self.mock_util.mock_get_repo()
self._mock_current_tag_ref()
expected_current_tag = self._mock_current_tag()
self._mock_current_tag_commit()
self._mock_last_tag_ref()
expected_last_tag = self._mock_last_tag()
self._mock_last_tag_commit()
self._mock_list_tags_multiple()
generator = self._create_generator(self.current_tag)
provider = GithubChangeNotesProvider(generator, self.current_tag)
current_tag = provider.current_tag_info["tag"]
last_tag = provider.last_tag_info["tag"]
self.assertEqual(current_tag.tag, expected_current_tag["tag"])
self.assertEqual(last_tag.tag, expected_last_tag["tag"])
@responses.activate
def test_current_tag_without_last_no_last_found(self):
self.mock_util.mock_get_repo()
self._mock_current_tag_ref()
self._mock_current_tag()
self._mock_current_tag_commit()
self._mock_list_tags_single()
generator = self._create_generator(self.current_tag)
provider = GithubChangeNotesProvider(generator, self.current_tag)
self.assertEqual(provider.last_tag, None)
self.assertEqual(provider.last_tag_info, None)
@responses.activate
def test_no_pull_requests_in_repo(self):
self.mock_util.mock_get_repo()
# Mock the tag calls
self._mock_current_tag_ref()
self._mock_current_tag()
self._mock_current_tag_commit()
self._mock_last_tag_ref()
self._mock_last_tag()
self._mock_last_tag_commit()
# Mock the list all pull requests call
api_url = "{}/pulls".format(self.repo_api_url)
responses.add(
method=responses.GET, url=api_url, json=[], content_type="application/json"
)
generator = self._create_generator(self.current_tag, self.last_tag)
provider = GithubChangeNotesProvider(generator, self.current_tag, self.last_tag)
self.assertEqual(list(provider()), [])
@responses.activate
def test_no_pull_requests_in_range(self):
self.mock_util.mock_get_repo()
# Mock the tag calls
self._mock_current_tag_ref()
self._mock_current_tag()
self._mock_current_tag_commit()
self._mock_last_tag_ref()
self._mock_last_tag()
self._mock_last_tag_commit()
# Mock the list all pull requests call
api_url = "{}/pulls".format(self.repo_api_url)
expected_pull_request_1 = self._get_expected_pull_request(
pull_id=1,
issue_number=101,
body="pull 1",
merged_date=datetime.utcnow() - timedelta(days=2),
)
expected_response_list_pull_requests = [expected_pull_request_1]
responses.add(
method=responses.GET, url=api_url, json=expected_response_list_pull_requests
)
generator = self._create_generator(self.current_tag, self.last_tag)
provider = GithubChangeNotesProvider(generator, self.current_tag, self.last_tag)
self.assertEqual(list(provider()), [])
@responses.activate
def test_one_pull_request_in_range(self):
self.mock_util.mock_get_repo()
# Mock the tag calls
self._mock_current_tag_ref()
self._mock_current_tag()
self._mock_current_tag_commit()
self._mock_last_tag_ref()
self._mock_last_tag()
self._mock_last_tag_commit()
self._mock_list_pull_requests_one_in_range()
generator = self._create_generator(self.current_tag, self.last_tag)
provider = GithubChangeNotesProvider(generator, self.current_tag, self.last_tag)
provider_list = list(provider())
pr_body_list = ["pull 1"]
self.assertEqual(len(provider_list), len(pr_body_list))
for pr, pr_body in zip(provider_list, pr_body_list):
self.assertEqual(pr.body, pr_body)
@responses.activate
def test_multiple_pull_requests_in_range(self):
self.mock_util.mock_get_repo()
# Mock the tag calls
self._mock_current_tag_ref()
self._mock_current_tag()
self._mock_current_tag_commit()
self._mock_last_tag_ref()
self._mock_last_tag()
self._mock_last_tag_commit()
self._mock_list_pull_requests_multiple_in_range()
generator = self._create_generator(self.current_tag, self.last_tag)
provider = GithubChangeNotesProvider(generator, self.current_tag, self.last_tag)
provider_list = list(provider())
pr_body_list = []
pr_body_list = ["pull 1", "pull 2", "pull 3", "pull 8"]
self.assertEqual(len(provider_list), len(pr_body_list))
for pr, pr_body in zip(provider_list, pr_body_list):
self.assertEqual(pr.body, pr_body)
@responses.activate
def test_pull_requests_with_no_last_tag(self):
self.mock_util.mock_get_repo()
# Mock the tag calls
self._mock_current_tag_ref()
self._mock_current_tag()
self._mock_current_tag_commit()
self._mock_last_tag_ref()
self._mock_last_tag()
self._mock_last_tag_commit()
self._mock_list_pull_requests_multiple_in_range()
generator = self._create_generator(self.current_tag)
provider = GithubChangeNotesProvider(generator, self.current_tag)
provider._get_last_tag = mock.Mock(return_value=None)
provider_list = list(provider())
pr_body_list = []
pr_body_list = [
"pull 1",
"pull 2",
"pull 3",
"pull 4",
"pull 5",
"pull 7",
"pull 8",
]
self.assertEqual(len(provider_list), len(pr_body_list))
for pr, pr_body in zip(provider_list, pr_body_list):
self.assertEqual(pr.body, pr_body)
@responses.activate
def test_get_version_from_tag(self):
self.mock_util.mock_get_repo()
tag = "beta/1.0-Beta_1"
generator = self._create_generator(tag)
provider = GithubChangeNotesProvider(generator, tag)
self.assertEqual("1.0-Beta_1", provider._get_version_from_tag(tag))
with self.assertRaises(ValueError):
provider._get_version_from_tag("bogus")
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a fake volume API."""
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
from nova import exception
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('cross_az_attach',
'nova.volume.cinder', group='cinder')
class fake_volume(object):
user_uuid = '4a3cd440-b9c2-11e1-afa6-0800200c9a66'
instance_uuid = '4a3cd441-b9c2-11e1-afa6-0800200c9a66'
def __init__(self, size, name,
description, volume_id, snapshot,
volume_type, metadata,
availability_zone):
snapshot_id = None
if snapshot is not None:
snapshot_id = snapshot['id']
if volume_id is None:
volume_id = str(uuid.uuid4())
self.vol = {
'created_at': timeutils.utcnow(),
'deleted_at': None,
'updated_at': timeutils.utcnow(),
'uuid': 'WTF',
'deleted': False,
'id': volume_id,
'user_id': self.user_uuid,
'project_id': 'fake-project-id',
'snapshot_id': snapshot_id,
'host': None,
'size': size,
'availability_zone': availability_zone,
'instance_uuid': None,
'mountpoint': None,
'attach_time': timeutils.utcnow(),
'status': 'available',
'attach_status': 'detached',
'scheduled_at': None,
'launched_at': None,
'terminated_at': None,
'display_name': name,
'display_description': description,
'provider_location': 'fake-location',
'provider_auth': 'fake-auth',
'volume_type_id': 99
}
def get(self, key, default=None):
return self.vol[key]
def __setitem__(self, key, value):
self.vol[key] = value
def __getitem__(self, key):
self.vol[key]
class fake_snapshot(object):
user_uuid = '4a3cd440-b9c2-11e1-afa6-0800200c9a66'
instance_uuid = '4a3cd441-b9c2-11e1-afa6-0800200c9a66'
def __init__(self, volume_id, size, name, desc, id=None):
if id is None:
id = str(uuid.uuid4())
self.snap = {
'created_at': timeutils.utcnow(),
'deleted_at': None,
'updated_at': timeutils.utcnow(),
'uuid': 'WTF',
'deleted': False,
'id': str(id),
'volume_id': volume_id,
'status': 'available',
'progress': '100%',
'volume_size': 1,
'display_name': name,
'display_description': desc,
'user_id': self.user_uuid,
'project_id': 'fake-project-id'
}
def get(self, key, default=None):
return self.snap[key]
def __setitem__(self, key, value):
self.snap[key] = value
def __getitem__(self, key):
self.snap[key]
class API(object):
volume_list = []
snapshot_list = []
_instance = None
class Singleton(object):
def __init__(self):
self.API = None
def __init__(self):
if API._instance is None:
API._instance = API.Singleton()
self._EventHandler_instance = API._instance
def create(self, context, size, name, description, snapshot=None,
volume_type=None, metadata=None, availability_zone=None):
v = fake_volume(size, name,
description, None,
snapshot, volume_type,
metadata, availability_zone)
self.volume_list.append(v.vol)
LOG.info('creating volume %s', v.vol['id'])
return v.vol
def create_with_kwargs(self, context, **kwargs):
volume_id = kwargs.get('volume_id', None)
v = fake_volume(kwargs['size'],
kwargs['name'],
kwargs['description'],
str(volume_id),
None,
None,
None,
None)
if kwargs.get('status', None) is not None:
v.vol['status'] = kwargs['status']
if kwargs['host'] is not None:
v.vol['host'] = kwargs['host']
if kwargs['attach_status'] is not None:
v.vol['attach_status'] = kwargs['attach_status']
if kwargs.get('snapshot_id', None) is not None:
v.vol['snapshot_id'] = kwargs['snapshot_id']
self.volume_list.append(v.vol)
return v.vol
def get(self, context, volume_id):
if str(volume_id) == '87654321':
return {'id': volume_id,
'attach_time': '13:56:24',
'attach_status': 'attached',
'status': 'in-use'}
for v in self.volume_list:
if v['id'] == str(volume_id):
return v
raise exception.VolumeNotFound(volume_id=volume_id)
def get_all(self, context):
return self.volume_list
def delete(self, context, volume_id):
LOG.info('deleting volume %s', volume_id)
self.volume_list = [v for v in self.volume_list
if v['id'] != volume_id]
def check_attach(self, context, volume, instance=None):
if volume['status'] != 'available':
msg = "status must be available"
msg = "%s" % volume
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == 'attached':
msg = "already attached"
raise exception.InvalidVolume(reason=msg)
if instance and not CONF.cinder.cross_az_attach:
if instance['availability_zone'] != volume['availability_zone']:
msg = "Instance and volume not in same availability_zone"
raise exception.InvalidVolume(reason=msg)
def check_detach(self, context, volume):
if volume['status'] == "available":
msg = "already detached"
raise exception.InvalidVolume(reason=msg)
def attach(self, context, volume_id, instance_uuid, mountpoint, mode='rw'):
LOG.info('attaching volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'in-use'
volume['mountpoint'] = mountpoint
volume['attach_status'] = 'attached'
volume['instance_uuid'] = instance_uuid
volume['attach_time'] = timeutils.utcnow()
def fake_set_snapshot_id(self, context, volume, snapshot_id):
volume['snapshot_id'] = snapshot_id
def reset_fake_api(self, context):
del self.volume_list[:]
del self.snapshot_list[:]
def detach(self, context, volume_id):
LOG.info('detaching volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'available'
volume['mountpoint'] = None
volume['attach_status'] = 'detached'
volume['instance_uuid'] = None
def initialize_connection(self, context, volume_id, connector):
return {'driver_volume_type': 'iscsi', 'data': {}}
def terminate_connection(self, context, volume_id, connector):
return None
def get_snapshot(self, context, snapshot_id):
for snap in self.snapshot_list:
if snap['id'] == str(snapshot_id):
return snap
def get_all_snapshots(self, context):
return self.snapshot_list
def create_snapshot(self, context, volume_id, name, description, id=None):
volume = self.get(context, volume_id)
snapshot = fake_snapshot(volume['id'], volume['size'],
name, description, id)
self.snapshot_list.append(snapshot.snap)
return snapshot.snap
def create_snapshot_with_kwargs(self, context, **kwargs):
snapshot = fake_snapshot(kwargs.get('volume_id'),
kwargs.get('volume_size'),
kwargs.get('name'),
kwargs.get('description'),
kwargs.get('snap_id'))
status = kwargs.get('status', None)
snapshot.snap['status'] = status
self.snapshot_list.append(snapshot.snap)
return snapshot.snap
def create_snapshot_force(self, context, volume_id,
name, description, id=None):
volume = self.get(context, volume_id)
snapshot = fake_snapshot(volume['id'], volume['size'],
name, description, id)
self.snapshot_list.append(snapshot.snap)
return snapshot.snap
def delete_snapshot(self, context, snapshot_id):
self.snapshot_list = [s for s in self.snapshot_list
if s['id'] != snapshot_id]
def reserve_volume(self, context, volume_id):
LOG.info('reserving volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'attaching'
def unreserve_volume(self, context, volume_id):
LOG.info('unreserving volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'available'
def begin_detaching(self, context, volume_id):
LOG.info('beging detaching volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'detaching'
def roll_detaching(self, context, volume_id):
LOG.info('roll detaching volume %s', volume_id)
volume = self.get(context, volume_id)
volume['status'] = 'in-use'
|
|
from __future__ import unicode_literals
import json
import random
import re
import time
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse,
)
from ..utils import (
ExtractorError,
float_or_none,
int_or_none,
KNOWN_EXTENSIONS,
parse_filesize,
unescapeHTML,
update_url_query,
unified_strdate,
url_or_none,
)
class BandcampIE(InfoExtractor):
_VALID_URL = r'https?://.*?\.bandcamp\.com/track/(?P<title>[^/?#&]+)'
_TESTS = [{
'url': 'http://youtube-dl.bandcamp.com/track/youtube-dl-test-song',
'md5': 'c557841d5e50261777a6585648adf439',
'info_dict': {
'id': '1812978515',
'ext': 'mp3',
'title': "youtube-dl \"'/\\\u00e4\u21ad - youtube-dl test song \"'/\\\u00e4\u21ad",
'duration': 9.8485,
},
'_skip': 'There is a limit of 200 free downloads / month for the test song'
}, {
'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
'md5': '0369ace6b939f0927e62c67a1a8d9fa7',
'info_dict': {
'id': '2650410135',
'ext': 'aiff',
'title': 'Ben Prunty - Lanius (Battle)',
'uploader': 'Ben Prunty',
},
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title = mobj.group('title')
webpage = self._download_webpage(url, title)
thumbnail = self._html_search_meta('og:image', webpage, default=None)
m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage)
if not m_download:
m_trackinfo = re.search(r'trackinfo: (.+),\s*?\n', webpage)
if m_trackinfo:
json_code = m_trackinfo.group(1)
data = json.loads(json_code)[0]
track_id = compat_str(data['id'])
if not data.get('file'):
raise ExtractorError('Not streamable', video_id=track_id, expected=True)
formats = []
for format_id, format_url in data['file'].items():
ext, abr_str = format_id.split('-', 1)
formats.append({
'format_id': format_id,
'url': self._proto_relative_url(format_url, 'http:'),
'ext': ext,
'vcodec': 'none',
'acodec': ext,
'abr': int_or_none(abr_str),
})
self._sort_formats(formats)
return {
'id': track_id,
'title': data['title'],
'thumbnail': thumbnail,
'formats': formats,
'duration': float_or_none(data.get('duration')),
}
else:
raise ExtractorError('No free songs found')
download_link = m_download.group(1)
video_id = self._search_regex(
r'(?ms)var TralbumData = .*?[{,]\s*id: (?P<id>\d+),?$',
webpage, 'video id')
download_webpage = self._download_webpage(
download_link, video_id, 'Downloading free downloads page')
blob = self._parse_json(
self._search_regex(
r'data-blob=(["\'])(?P<blob>{.+?})\1', download_webpage,
'blob', group='blob'),
video_id, transform_source=unescapeHTML)
info = blob['digital_items'][0]
downloads = info['downloads']
track = info['title']
artist = info.get('artist')
title = '%s - %s' % (artist, track) if artist else track
download_formats = {}
for f in blob['download_formats']:
name, ext = f.get('name'), f.get('file_extension')
if all(isinstance(x, compat_str) for x in (name, ext)):
download_formats[name] = ext.strip('.')
formats = []
for format_id, f in downloads.items():
format_url = f.get('url')
if not format_url:
continue
# Stat URL generation algorithm is reverse engineered from
# download_*_bundle_*.js
stat_url = update_url_query(
format_url.replace('/download/', '/statdownload/'), {
'.rand': int(time.time() * 1000 * random.random()),
})
format_id = f.get('encoding_name') or format_id
stat = self._download_json(
stat_url, video_id, 'Downloading %s JSON' % format_id,
transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1],
fatal=False)
if not stat:
continue
retry_url = url_or_none(stat.get('retry_url'))
if not retry_url:
continue
formats.append({
'url': self._proto_relative_url(retry_url, 'http:'),
'ext': download_formats.get(format_id),
'format_id': format_id,
'format_note': f.get('description'),
'filesize': parse_filesize(f.get('size_mb')),
'vcodec': 'none',
})
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'thumbnail': info.get('thumb_url') or thumbnail,
'uploader': info.get('artist'),
'artist': artist,
'track': track,
'formats': formats,
}
class BandcampAlbumIE(InfoExtractor):
IE_NAME = 'Bandcamp:album'
_VALID_URL = r'https?://(?:(?P<subdomain>[^.]+)\.)?bandcamp\.com(?:/album/(?P<album_id>[^/?#&]+))?'
_TESTS = [{
'url': 'http://blazo.bandcamp.com/album/jazz-format-mixtape-vol-1',
'playlist': [
{
'md5': '39bc1eded3476e927c724321ddf116cf',
'info_dict': {
'id': '1353101989',
'ext': 'mp3',
'title': 'Intro',
}
},
{
'md5': '1a2c32e2691474643e912cc6cd4bffaa',
'info_dict': {
'id': '38097443',
'ext': 'mp3',
'title': 'Kero One - Keep It Alive (Blazo remix)',
}
},
],
'info_dict': {
'title': 'Jazz Format Mixtape vol.1',
'id': 'jazz-format-mixtape-vol-1',
'uploader_id': 'blazo',
},
'params': {
'playlistend': 2
},
'skip': 'Bandcamp imposes download limits.'
}, {
'url': 'http://nightbringer.bandcamp.com/album/hierophany-of-the-open-grave',
'info_dict': {
'title': 'Hierophany of the Open Grave',
'uploader_id': 'nightbringer',
'id': 'hierophany-of-the-open-grave',
},
'playlist_mincount': 9,
}, {
'url': 'http://dotscale.bandcamp.com',
'info_dict': {
'title': 'Loom',
'id': 'dotscale',
'uploader_id': 'dotscale',
},
'playlist_mincount': 7,
}, {
# with escaped quote in title
'url': 'https://jstrecords.bandcamp.com/album/entropy-ep',
'info_dict': {
'title': '"Entropy" EP',
'uploader_id': 'jstrecords',
'id': 'entropy-ep',
},
'playlist_mincount': 3,
}, {
# not all tracks have songs
'url': 'https://insulters.bandcamp.com/album/we-are-the-plague',
'info_dict': {
'id': 'we-are-the-plague',
'title': 'WE ARE THE PLAGUE',
'uploader_id': 'insulters',
},
'playlist_count': 2,
}]
@classmethod
def suitable(cls, url):
return (False
if BandcampWeeklyIE.suitable(url) or BandcampIE.suitable(url)
else super(BandcampAlbumIE, cls).suitable(url))
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
uploader_id = mobj.group('subdomain')
album_id = mobj.group('album_id')
playlist_id = album_id or uploader_id
webpage = self._download_webpage(url, playlist_id)
track_elements = re.findall(
r'(?s)<div[^>]*>(.*?<a[^>]+href="([^"]+?)"[^>]+itemprop="url"[^>]*>.*?)</div>', webpage)
if not track_elements:
raise ExtractorError('The page doesn\'t contain any tracks')
# Only tracks with duration info have songs
entries = [
self.url_result(
compat_urlparse.urljoin(url, t_path),
ie=BandcampIE.ie_key(),
video_title=self._search_regex(
r'<span\b[^>]+\bitemprop=["\']name["\'][^>]*>([^<]+)',
elem_content, 'track title', fatal=False))
for elem_content, t_path in track_elements
if self._html_search_meta('duration', elem_content, default=None)]
title = self._html_search_regex(
r'album_title\s*:\s*"((?:\\.|[^"\\])+?)"',
webpage, 'title', fatal=False)
if title:
title = title.replace(r'\"', '"')
return {
'_type': 'playlist',
'uploader_id': uploader_id,
'id': playlist_id,
'title': title,
'entries': entries,
}
class BandcampWeeklyIE(InfoExtractor):
IE_NAME = 'Bandcamp:weekly'
_VALID_URL = r'https?://(?:www\.)?bandcamp\.com/?\?(?:.*?&)?show=(?P<id>\d+)'
_TESTS = [{
'url': 'https://bandcamp.com/?show=224',
'md5': 'b00df799c733cf7e0c567ed187dea0fd',
'info_dict': {
'id': '224',
'ext': 'opus',
'title': 'BC Weekly April 4th 2017 - Magic Moments',
'description': 'md5:5d48150916e8e02d030623a48512c874',
'duration': 5829.77,
'release_date': '20170404',
'series': 'Bandcamp Weekly',
'episode': 'Magic Moments',
'episode_number': 208,
'episode_id': '224',
}
}, {
'url': 'https://bandcamp.com/?blah/blah@&show=228',
'only_matching': True
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
blob = self._parse_json(
self._search_regex(
r'data-blob=(["\'])(?P<blob>{.+?})\1', webpage,
'blob', group='blob'),
video_id, transform_source=unescapeHTML)
show = blob['bcw_show']
# This is desired because any invalid show id redirects to `bandcamp.com`
# which happens to expose the latest Bandcamp Weekly episode.
show_id = int_or_none(show.get('show_id')) or int_or_none(video_id)
formats = []
for format_id, format_url in show['audio_stream'].items():
if not url_or_none(format_url):
continue
for known_ext in KNOWN_EXTENSIONS:
if known_ext in format_id:
ext = known_ext
break
else:
ext = None
formats.append({
'format_id': format_id,
'url': format_url,
'ext': ext,
'vcodec': 'none',
})
self._sort_formats(formats)
title = show.get('audio_title') or 'Bandcamp Weekly'
subtitle = show.get('subtitle')
if subtitle:
title += ' - %s' % subtitle
episode_number = None
seq = blob.get('bcw_seq')
if seq and isinstance(seq, list):
try:
episode_number = next(
int_or_none(e.get('episode_number'))
for e in seq
if isinstance(e, dict) and int_or_none(e.get('id')) == show_id)
except StopIteration:
pass
return {
'id': video_id,
'title': title,
'description': show.get('desc') or show.get('short_desc'),
'duration': float_or_none(show.get('audio_duration')),
'is_live': False,
'release_date': unified_strdate(show.get('published_date')),
'series': 'Bandcamp Weekly',
'episode': show.get('subtitle'),
'episode_number': episode_number,
'episode_id': compat_str(video_id),
'formats': formats
}
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2006 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
from abc import ABCMeta
from BaseHTTPServer import BaseHTTPRequestHandler
from Cookie import CookieError, BaseCookie, SimpleCookie
import cgi
from datetime import datetime
from hashlib import md5
import new
import mimetypes
import os
import re
import socket
from StringIO import StringIO
import sys
import urlparse
from genshi.builder import Fragment
from trac.core import Interface, TracBaseError, TracError
from trac.util import as_bool, as_int, get_last_traceback, lazy, unquote
from trac.util.datefmt import http_date, localtz
from trac.util.html import tag
from trac.util.text import empty, exception_to_unicode, to_unicode
from trac.util.translation import _, N_, tag_
from trac.web.href import Href
from trac.web.wsgi import _FileWrapper, is_client_disconnect_exception
class IAuthenticator(Interface):
"""Extension point interface for components that can provide the name
of the remote user."""
def authenticate(req):
"""Return the name of the remote user, or `None` if the identity of the
user is unknown."""
class IRequestHandler(Interface):
"""Decide which `trac.core.Component` handles which `Request`, and how.
The boolean property `is_valid_default_handler` determines whether the
`IRequestFilter` can be used as a `default_handler` and defaults to
`True`. To be suitable as a `default_handler`, an `IRequestFilter` must
return an HTML document and `data` dictionary for rendering the document,
and must not require that `match_request` be called prior to
`process_request`.
The boolean property `jquery_noconflict` determines whether jQuery's
`noConflict` mode will be activated by the handler, and defaults to
`False`.
"""
def match_request(req):
"""Return whether the handler wants to process the given request."""
def process_request(req):
"""Process the request.
Return a `(template_name, data, content_type)` tuple,
where `data` is a dictionary of substitutions for the Genshi template.
"text/html" is assumed if `content_type` is `None`.
Note that if template processing should not occur, this method can
simply send the response itself and not return anything.
:Since 1.0: Clearsilver templates are no longer supported.
:Since 1.1.2: the rendering `method` (xml, xhtml or text) may be
returned as a fourth parameter in the tuple, but if not specified
it will be inferred from the `content_type` when rendering the
template.
"""
def is_valid_default_handler(handler):
"""Returns `True` if the `handler` is a valid default handler, as
described in the `IRequestHandler` interface documentation.
"""
return handler and getattr(handler, 'is_valid_default_handler', True)
class IRequestFilter(Interface):
"""Enable components to interfere with the processing done by the
main handler, either before and/or after it enters in action.
"""
def pre_process_request(req, handler):
"""Called after initial handler selection, and can be used to change
the selected handler or redirect request.
Always returns the request handler, even if unchanged.
"""
def post_process_request(req, template, data, content_type, method=None):
"""Do any post-processing the request might need; typically adding
values to the template `data` dictionary, or changing the Genshi
template or mime type.
`data` may be updated in place.
Always returns a tuple of (template, data, content_type), even if
unchanged.
Note that `template`, `data`, `content_type` will be `None` if:
- called when processing an error page
- the default request handler did not return any result
:Since 0.11: there's a `data` argument for supporting Genshi templates;
this introduced a difference in arity which made it possible to
distinguish between the IRequestFilter components still targeted
at ClearSilver templates and the newer ones targeted at Genshi
templates.
:Since 1.0: Clearsilver templates are no longer supported.
:Since 1.1.2: the rendering `method` will be passed if it is returned
by the request handler, otherwise `method` will be `None`. For
backward compatibility, the parameter is optional in the
implementation's signature.
"""
class ITemplateStreamFilter(Interface):
"""Transform the generated content by filtering the Genshi event stream
generated by the template, prior to its serialization.
"""
def filter_stream(req, method, filename, stream, data):
"""Return a filtered Genshi event stream, or the original unfiltered
stream if no match.
`req` is the current request object, `method` is the Genshi render
method (xml, xhtml or text), `filename` is the filename of the template
to be rendered, `stream` is the event stream and `data` is the data for
the current template.
See the Genshi_ documentation for more information.
.. _Genshi: http://genshi.edgewall.org/wiki/Documentation/filters.html
"""
class TracNotImplementedError(TracError, NotImplementedError):
"""Raised when a `NotImplementedError` is trapped.
This exception is for internal use and should not be raised by
plugins. Plugins should raise `NotImplementedError`.
:since: 1.0.11
"""
title = N_("Not Implemented Error")
HTTP_STATUS = dict([(code, reason.title()) for code, (reason, description)
in BaseHTTPRequestHandler.responses.items()])
class HTTPException(TracBaseError):
__metaclass__ = ABCMeta
def __init__(self, detail, *args):
"""Factory for HTTPException classes."""
if isinstance(detail, TracBaseError):
self.detail = detail.message
self.reason = detail.title
else:
self.detail = detail
if args:
self.detail = self.detail % args
super(HTTPException, self).__init__('%s %s (%s)' % (self.code,
self.reason,
self.detail))
@property
def message(self):
# The message is based on the e.detail, which can be an Exception
# object, but not a TracError one: when creating HTTPException,
# a TracError.message is directly assigned to e.detail
if isinstance(self.detail, Exception): # not a TracBaseError
message = exception_to_unicode(self.detail)
elif isinstance(self.detail, Fragment): # TracBaseError markup
message = self.detail
else:
message = to_unicode(self.detail)
return message
@property
def title(self):
try:
# We first try to get localized error messages here, but we
# should ignore secondary errors if the main error was also
# due to i18n issues
title = _("Error")
if self.reason:
if title.lower() in self.reason.lower():
title = self.reason
else:
title = _("Error: %(message)s", message=self.reason)
except Exception:
title = "Error"
return title
@classmethod
def subclass(cls, name, code):
"""Create a new Exception class representing a HTTP status code."""
reason = HTTP_STATUS.get(code, 'Unknown')
new_class = new.classobj(name, (HTTPException,), {
'__doc__': 'Exception for HTTP %d %s' % (code, reason)
})
new_class.code = code
new_class.reason = reason
return new_class
_HTTPException_subclass_names = []
for code in [code for code in HTTP_STATUS if code >= 400]:
exc_name = HTTP_STATUS[code].replace(' ', '').replace('-', '')
# 2.5 compatibility hack:
if exc_name == 'InternalServerError':
exc_name = 'InternalError'
if exc_name.lower().startswith('http'):
exc_name = exc_name[4:]
exc_name = 'HTTP' + exc_name
setattr(sys.modules[__name__], exc_name,
HTTPException.subclass(exc_name, code))
_HTTPException_subclass_names.append(exc_name)
del code, exc_name
class _FieldStorage(cgi.FieldStorage):
"""Our own version of cgi.FieldStorage, with tweaks."""
def read_multi(self, *args, **kwargs):
try:
cgi.FieldStorage.read_multi(self, *args, **kwargs)
except ValueError:
# Most likely "Invalid boundary in multipart form",
# possibly an upload of a .mht file? See #9880.
self.read_single()
class _RequestArgs(dict):
"""Dictionary subclass that provides convenient access to request
parameters that may contain multiple values."""
def as_int(self, name, default=None, min=None, max=None):
"""Return the value as an integer. Return `default` if
if an exception is raised while converting the value to an
integer.
:param name: the name of the request parameter
:keyword default: the value to return if the parameter is not
specified or an exception occurs converting
the value to an integer.
:keyword min: lower bound to which the value is limited
:keyword max: upper bound to which the value is limited
:since: 1.2
"""
if name not in self:
return default
return as_int(self.getfirst(name), default, min, max)
def as_bool(self, name, default=None):
"""Return the value as a boolean. Return `default` if
if an exception is raised while converting the value to a
boolean.
:param name: the name of the request parameter
:keyword default: the value to return if the parameter is not
specified or an exception occurs converting
the value to a boolean.
:since: 1.2
"""
if name not in self:
return default
return as_bool(self.getfirst(name), default)
def getbool(self, name, default=None):
"""Return the value as a boolean. Raise an `HTTPBadRequest`
exception if an exception occurs while converting the value to
a boolean.
:param name: the name of the request parameter
:keyword default: the value to return if the parameter is not
specified.
:since: 1.2
"""
if name not in self:
return default
value = self[name]
if isinstance(value, list):
raise HTTPBadRequest(tag_("Invalid value for request argument "
"%(name)s.", name=tag.em(name)))
value = as_bool(value, None)
if value is None:
raise HTTPBadRequest(tag_("Invalid value for request argument "
"%(name)s.", name=tag.em(name)))
return value
def getint(self, name, default=None, min=None, max=None):
"""Return the value as an integer. Raise an `HTTPBadRequest`
exception if an exception occurs while converting the value
to an integer.
:param name: the name of the request parameter
:keyword default: the value to return if the parameter is not
specified
:keyword min: lower bound to which the value is limited
:keyword max: upper bound to which the value is limited
:since: 1.2
"""
if name not in self:
return default
value = as_int(self[name], None, min, max)
if value is None:
raise HTTPBadRequest(tag_("Invalid value for request argument "
"%(name)s.", name=tag.em(name)))
return value
def getfirst(self, name, default=None):
"""Return the first value for the specified parameter, or `default` if
the parameter was not provided.
"""
if name not in self:
return default
val = self[name]
if isinstance(val, list):
val = val[0]
return val
def getlist(self, name):
"""Return a list of values for the specified parameter, even if only
one value was provided.
"""
if name not in self:
return []
val = self[name]
if not isinstance(val, list):
val = [val]
return val
def require(self, name):
"""Raise an `HTTPBadRequest` exception if the parameter is
not in the request.
:param name: the name of the request parameter
:since: 1.2
"""
if name not in self:
raise HTTPBadRequest(
tag_("Missing request argument. The %(name)s argument "
"must be included in the request.", name=tag.em(name)))
def parse_arg_list(query_string):
"""Parse a query string into a list of `(name, value)` tuples.
:Since 1.1.2: a leading `?` is stripped from `query_string`."""
args = []
if not query_string:
return args
query_string = query_string.lstrip('?')
for arg in query_string.split('&'):
nv = arg.split('=', 1)
if len(nv) == 2:
(name, value) = nv
else:
(name, value) = (nv[0], empty)
name = unquote(name.replace('+', ' '))
if isinstance(name, str):
name = unicode(name, 'utf-8')
value = unquote(value.replace('+', ' '))
if isinstance(value, str):
value = unicode(value, 'utf-8')
args.append((name, value))
return args
def arg_list_to_args(arg_list):
"""Convert a list of `(name, value)` tuples into into a `_RequestArgs`."""
args = _RequestArgs()
for name, value in arg_list:
if name in args:
if isinstance(args[name], list):
args[name].append(value)
else:
args[name] = [args[name], value]
else:
args[name] = value
return args
class RequestDone(TracBaseError):
"""Marker exception that indicates whether request processing has completed
and a response was sent.
"""
iterable = None
def __init__(self, iterable=None):
self.iterable = iterable
class Cookie(SimpleCookie):
def load(self, rawdata, ignore_parse_errors=False):
if ignore_parse_errors:
self.bad_cookies = []
self._BaseCookie__set = self._loose_set
SimpleCookie.load(self, rawdata)
if ignore_parse_errors:
self._BaseCookie__set = self._strict_set
for key in self.bad_cookies:
del self[key]
_strict_set = BaseCookie._BaseCookie__set
def _loose_set(self, key, real_value, coded_value):
# If a key appears multiple times, the first occurrence has the
# narrowest scope, keep that
if key in self:
return
try:
self._strict_set(key, real_value, coded_value)
except CookieError:
self.bad_cookies.append(key)
dict.__setitem__(self, key, None)
class Request(object):
"""Represents a HTTP request/response pair.
This class provides a convenience API over WSGI.
"""
def __init__(self, environ, start_response):
"""Create the request wrapper.
:param environ: The WSGI environment dict
:param start_response: The WSGI callback for starting the response
:param callbacks: A dictionary of functions that are used to lazily
evaluate attribute lookups
"""
self.environ = environ
self._start_response = start_response
self._write = None
self._status = '200 OK'
self._response = None
self._outheaders = []
self._outcharset = None
self.outcookie = Cookie()
self.callbacks = {
'arg_list': Request._parse_arg_list,
'args': lambda req: arg_list_to_args(req.arg_list),
'languages': Request._parse_languages,
'incookie': Request._parse_cookies,
'_inheaders': Request._parse_headers
}
self.redirect_listeners = []
self.base_url = self.environ.get('trac.base_url')
if not self.base_url:
self.base_url = self._reconstruct_url()
self.href = Href(self.base_path)
self.abs_href = Href(self.base_url)
def __getattr__(self, name):
"""Performs lazy attribute lookup by delegating to the functions in the
callbacks dictionary."""
if name in self.callbacks:
value = self.callbacks[name](self)
setattr(self, name, value)
return value
raise AttributeError(name)
def __repr__(self):
uri = self.environ.get('PATH_INFO', '')
qs = self.query_string
if qs:
uri += '?' + qs
return '<%s "%s %r">' % (self.__class__.__name__, self.method, uri)
# Public API
@lazy
def is_xhr(self):
"""Returns `True` if the request is an `XMLHttpRequest`.
:since: 1.1.6
"""
return self.get_header('X-Requested-With') == 'XMLHttpRequest'
@property
def method(self):
"""The HTTP method of the request"""
return self.environ['REQUEST_METHOD']
@property
def path_info(self):
"""Path inside the application"""
path_info = self.environ.get('PATH_INFO', '')
try:
return unicode(path_info, 'utf-8')
except UnicodeDecodeError:
raise HTTPNotFound(_("Invalid URL encoding (was %(path_info)r)",
path_info=path_info))
@property
def query_string(self):
"""Query part of the request"""
return self.environ.get('QUERY_STRING', '')
@property
def remote_addr(self):
"""IP address of the remote user"""
return self.environ.get('REMOTE_ADDR')
@property
def remote_user(self):
""" Name of the remote user.
Will be `None` if the user has not logged in using HTTP authentication.
"""
user = self.environ.get('REMOTE_USER')
if user is not None:
return to_unicode(user)
@property
def scheme(self):
"""The scheme of the request URL"""
return self.environ['wsgi.url_scheme']
@property
def base_path(self):
"""The root path of the application"""
return self.environ.get('SCRIPT_NAME', '')
@property
def server_name(self):
"""Name of the server"""
return self.environ['SERVER_NAME']
@property
def server_port(self):
"""Port number the server is bound to"""
return int(self.environ['SERVER_PORT'])
def add_redirect_listener(self, listener):
"""Add a callable to be called prior to executing a redirect.
The callable is passed the arguments to the `redirect()` call.
"""
self.redirect_listeners.append(listener)
def get_header(self, name):
"""Return the value of the specified HTTP header, or `None` if there's
no such header in the request.
"""
name = name.lower()
for key, value in self._inheaders:
if key == name:
return value
return None
def send_response(self, code=200):
"""Set the status code of the response."""
self._status = '%s %s' % (code, HTTP_STATUS.get(code, 'Unknown'))
def send_header(self, name, value):
"""Send the response header with the specified name and value.
`value` must either be an `unicode` string or can be converted to one
(e.g. numbers, ...)
"""
lower_name = name.lower()
if lower_name == 'content-type':
ctpos = value.find('charset=')
if ctpos >= 0:
self._outcharset = value[ctpos + 8:].strip()
elif lower_name == 'content-length':
self._content_length = int(value)
self._outheaders.append((name, unicode(value).encode('utf-8')))
def end_headers(self):
"""Must be called after all headers have been sent and before the
actual content is written.
"""
self._send_cookie_headers()
self._write = self._start_response(self._status, self._outheaders)
def check_modified(self, datetime, extra=''):
"""Check the request "If-None-Match" header against an entity tag.
The entity tag is generated from the specified last modified time
(`datetime`), optionally appending an `extra` string to
indicate variants of the requested resource.
That `extra` parameter can also be a list, in which case the MD5 sum
of the list content will be used.
If the generated tag matches the "If-None-Match" header of the request,
this method sends a "304 Not Modified" response to the client.
Otherwise, it adds the entity tag as an "ETag" header to the response
so that consecutive requests can be cached.
"""
if isinstance(extra, list):
m = md5()
for elt in extra:
m.update(repr(elt))
extra = m.hexdigest()
etag = 'W/"%s/%s/%s"' % (self.authname, http_date(datetime), extra)
inm = self.get_header('If-None-Match')
if not inm or inm != etag:
self.send_header('ETag', etag)
else:
self.send_response(304)
self.send_header('Content-Length', 0)
self.end_headers()
raise RequestDone
_trident_re = re.compile(r' Trident/([0-9]+)')
def redirect(self, url, permanent=False):
"""Send a redirect to the client, forwarding to the specified URL.
The `url` may be relative or absolute, relative URLs will be translated
appropriately.
"""
for listener in self.redirect_listeners:
listener(self, url, permanent)
if permanent:
status = 301 # 'Moved Permanently'
elif self.method == 'POST':
status = 303 # 'See Other' -- safe to use in response to a POST
else:
status = 302 # 'Found' -- normal temporary redirect
self.send_response(status)
if not url.startswith(('http://', 'https://')):
# Make sure the URL is absolute
scheme, host = urlparse.urlparse(self.base_url)[:2]
url = urlparse.urlunparse((scheme, host, url, None, None, None))
# Workaround #10382, IE6-IE9 bug when post and redirect with hash
if status == 303 and '#' in url:
user_agent = self.environ.get('HTTP_USER_AGENT', '')
match_trident = self._trident_re.search(user_agent)
if ' MSIE ' in user_agent and \
(not match_trident or int(match_trident.group(1)) < 6):
url = url.replace('#', '#__msie303:')
self.send_header('Location', url)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', 0)
self.send_header('Pragma', 'no-cache')
self.send_header('Cache-Control', 'no-cache')
self.send_header('Expires', 'Fri, 01 Jan 1999 00:00:00 GMT')
self.end_headers()
raise RequestDone
def send(self, content, content_type='text/html', status=200):
self.send_response(status)
self.send_header('Cache-Control', 'must-revalidate')
self.send_header('Expires', 'Fri, 01 Jan 1999 00:00:00 GMT')
self.send_header('Content-Type', content_type + ';charset=utf-8')
if isinstance(content, basestring):
self.send_header('Content-Length', len(content))
self.end_headers()
if self.method != 'HEAD':
self.write(content)
raise RequestDone
def send_error(self, exc_info, template='error.html',
content_type='text/html', status=500, env=None, data={}):
try:
if template.endswith('.html'):
if env:
from trac.web.chrome import Chrome, add_stylesheet
add_stylesheet(self, 'common/css/code.css')
try:
data = Chrome(env).render_template(self, template,
data, 'text/html')
except Exception:
# second chance rendering, in "safe" mode
data['trac_error_rendering'] = True
data = Chrome(env).render_template(self, template,
data, 'text/html')
else:
content_type = 'text/plain'
data = '%s\n\n%s: %s' % (data.get('title'),
data.get('type'),
data.get('message'))
except Exception: # failed to render
data = get_last_traceback()
content_type = 'text/plain'
if isinstance(data, unicode):
data = data.encode('utf-8')
self.send_response(status)
self._outheaders = []
self.send_header('Cache-Control', 'must-revalidate')
self.send_header('Expires', 'Fri, 01 Jan 1999 00:00:00 GMT')
self.send_header('Content-Type', content_type + ';charset=utf-8')
self.send_header('Content-Length', len(data))
self._send_cookie_headers()
self._write = self._start_response(self._status, self._outheaders,
exc_info)
if self.method != 'HEAD':
self.write(data)
raise RequestDone
def send_no_content(self):
self.send_response(204)
self.send_header('Content-Length', 0)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
raise RequestDone
def send_file(self, path, mimetype=None):
"""Send a local file to the browser.
This method includes the "Last-Modified", "Content-Type" and
"Content-Length" headers in the response, corresponding to the file
attributes. It also checks the last modification time of the local file
against the "If-Modified-Since" provided by the user agent, and sends a
"304 Not Modified" response if it matches.
"""
if not os.path.isfile(path):
raise HTTPNotFound(_("File %(path)s not found", path=path))
stat = os.stat(path)
mtime = datetime.fromtimestamp(stat.st_mtime, localtz)
last_modified = http_date(mtime)
if last_modified == self.get_header('If-Modified-Since'):
self.send_response(304)
self.send_header('Content-Length', 0)
self.end_headers()
raise RequestDone
if not mimetype:
mimetype = mimetypes.guess_type(path)[0] or \
'application/octet-stream'
self.send_response(200)
self.send_header('Content-Type', mimetype)
self.send_header('Content-Length', stat.st_size)
self.send_header('Last-Modified', last_modified)
use_xsendfile = getattr(self, 'use_xsendfile', False)
if use_xsendfile:
xsendfile_header = getattr(self, 'xsendfile_header', None)
if xsendfile_header:
self.send_header(xsendfile_header, os.path.abspath(path))
else:
use_xsendfile = False
self.end_headers()
if not use_xsendfile and self.method != 'HEAD':
fileobj = open(path, 'rb')
file_wrapper = self.environ.get('wsgi.file_wrapper', _FileWrapper)
self._response = file_wrapper(fileobj, 4096)
raise RequestDone
def read(self, size=None):
"""Read the specified number of bytes from the request body."""
fileobj = self.environ['wsgi.input']
if size is None:
size = self.get_header('Content-Length')
if size is None:
size = -1
else:
size = int(size)
data = fileobj.read(size)
return data
CHUNK_SIZE = 4096
def write(self, data):
"""Write the given data to the response body.
*data* **must** be a `str` string or an iterable instance
which iterates `str` strings, encoded with the charset which
has been specified in the ``'Content-Type'`` header or UTF-8
otherwise.
Note that when the ``'Content-Length'`` header is specified,
its value either corresponds to the length of *data*, or, if
there are multiple calls to `write`, to the cumulative length
of the *data* arguments.
"""
if not self._write:
self.end_headers()
try:
chunk_size = self.CHUNK_SIZE
bufsize = 0
buf = []
buf_append = buf.append
if isinstance(data, basestring):
data = [data]
for chunk in data:
if isinstance(chunk, unicode):
raise ValueError("Can't send unicode content")
if not chunk:
continue
bufsize += len(chunk)
buf_append(chunk)
if bufsize >= chunk_size:
self._write(''.join(buf))
bufsize = 0
buf[:] = ()
if bufsize > 0:
self._write(''.join(buf))
except (IOError, socket.error) as e:
if self._is_client_disconnected(e):
raise RequestDone
raise
# Internal methods
def _parse_arg_list(self):
"""Parse the supplied request parameters into a list of
`(name, value)` tuples.
"""
fp = self.environ['wsgi.input']
# Avoid letting cgi.FieldStorage consume the input stream when the
# request does not contain form data
ctype = self.get_header('Content-Type')
if ctype:
ctype, options = cgi.parse_header(ctype)
if ctype not in ('application/x-www-form-urlencoded',
'multipart/form-data'):
fp = StringIO('')
# Python 2.6 introduced a backwards incompatible change for
# FieldStorage where QUERY_STRING is no longer ignored for POST
# requests. We'll keep the pre 2.6 behaviour for now...
if self.method == 'POST':
qs_on_post = self.environ.pop('QUERY_STRING', '')
try:
fs = _FieldStorage(fp, environ=self.environ,
keep_blank_values=True)
except (IOError, socket.error), e:
if self._is_client_disconnected(e):
raise HTTPBadRequest(
_("Exception caught while reading request: %(msg)s",
msg=exception_to_unicode(e)))
raise
if self.method == 'POST':
self.environ['QUERY_STRING'] = qs_on_post
def raise_if_null_bytes(value):
if value and '\x00' in value:
raise HTTPBadRequest(_("Invalid request arguments."))
args = []
for value in fs.list or ():
name = value.name
raise_if_null_bytes(name)
try:
if name is not None:
name = unicode(name, 'utf-8')
if value.filename:
raise_if_null_bytes(value.filename)
else:
value = value.value
raise_if_null_bytes(value)
value = unicode(value, 'utf-8')
except UnicodeDecodeError as e:
raise HTTPBadRequest(
_("Invalid encoding in form data: %(msg)s",
msg=exception_to_unicode(e)))
args.append((name, value))
return args
def _parse_cookies(self):
cookies = Cookie()
header = self.get_header('Cookie')
if header:
cookies.load(header, ignore_parse_errors=True)
return cookies
def _parse_headers(self):
headers = [(name[5:].replace('_', '-').lower(), value)
for name, value in self.environ.items()
if name.startswith('HTTP_')]
if 'CONTENT_LENGTH' in self.environ:
headers.append(('content-length', self.environ['CONTENT_LENGTH']))
if 'CONTENT_TYPE' in self.environ:
headers.append(('content-type', self.environ['CONTENT_TYPE']))
return headers
def _parse_languages(self):
"""The list of languages preferred by the remote user, taken from the
``Accept-Language`` header.
"""
header = self.get_header('Accept-Language') or 'en-us'
langs = []
for i, lang in enumerate(header.split(',')):
code, params = cgi.parse_header(lang)
q = 1
if 'q' in params:
try:
q = float(params['q'])
except ValueError:
q = 0
langs.append((-q, i, code))
langs.sort()
return [code for q, i, code in langs]
def _reconstruct_url(self):
"""Reconstruct the absolute base URL of the application."""
host = self.get_header('Host')
if not host:
# Missing host header, so reconstruct the host from the
# server name and port
default_port = {'http': 80, 'https': 443}
if self.server_port and self.server_port != \
default_port[self.scheme]:
host = '%s:%d' % (self.server_name, self.server_port)
else:
host = self.server_name
return urlparse.urlunparse((self.scheme, host, self.base_path, None,
None, None))
def _send_cookie_headers(self):
for name in self.outcookie.keys():
path = self.outcookie[name].get('path')
if path:
path = path.replace(' ', '%20') \
.replace(';', '%3B') \
.replace(',', '%3C')
self.outcookie[name]['path'] = path
cookies = to_unicode(self.outcookie.output(header='')).encode('utf-8')
for cookie in cookies.splitlines():
self._outheaders.append(('Set-Cookie', cookie.strip()))
def _is_client_disconnected(self, e):
if is_client_disconnect_exception(e):
return True
# Note that mod_wsgi raises an IOError with only a message
# if the client disconnects
if 'mod_wsgi.version' in self.environ:
return e.args[0] in ('failed to write data',
'client connection closed',
'request data read error')
return False
__no_apidoc__ = _HTTPException_subclass_names
|
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow NMT model implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import random
import sys
# import matplotlib.image as mpimg
import numpy as np
import six
from six.moves import range
import tensorflow.compat.v1 as tf
# copybara:strip_begin
from REDACTED import REDACTED
from REDACTED.tensorflow.contrib import training as contrib_training
# copybara:strip_end
from REDACTED.tensorflow.contrib.training.python.training import evaluation
from REDACTED.tensorflow.python.ops import control_flow_util
from REDACTED.mlp_log import mlp_log
from REDACTED.nmt import estimator
from REDACTED.nmt.utils import iterator_utils
from REDACTED.nmt.utils import misc_utils as utils
from REDACTED.nmt.utils import vocab_utils
utils.check_tensorflow_version()
FLAGS = None
def add_arguments(parser):
"""Build ArgumentParser."""
parser.register("type", "bool", lambda v: v.lower() == "true")
# network
parser.add_argument(
"--num_units", type=int, default=1024, help="Network size.")
parser.add_argument(
"--num_layers", type=int, default=4, help="Network depth.")
parser.add_argument("--num_encoder_layers", type=int, default=None,
help="Encoder depth, equal to num_layers if None.")
parser.add_argument("--num_decoder_layers", type=int, default=None,
help="Decoder depth, equal to num_layers if None.")
parser.add_argument("--num_embeddings_partitions", type=int, default=0,
help="Number of partitions for embedding vars.")
# optimizer
parser.add_argument(
"--optimizer", type=str, default="adam", help="sgd | adam")
parser.add_argument(
"--learning_rate",
type=float,
default=0.001,
help="Learning rate. Adam: 0.001 | 0.0001")
parser.add_argument(
"--warmup_steps",
type=int,
default=200,
help="How many steps we inverse-decay learning.")
parser.add_argument("--warmup_scheme", type=str, default="t2t", help="""\
How to warmup learning rates. Options include:
t2t: Tensor2Tensor's way, start with lr 100 times smaller, then
exponentiate until the specified lr.\
""")
parser.add_argument(
"--decay_start", type=int, default=3000, help="step to start decay")
parser.add_argument(
"--decay_interval",
type=int,
default=400,
help="interval steps between 2 decays")
parser.add_argument(
"--decay_steps", type=int, default=5, help="number of decays")
parser.add_argument(
"--decay_factor", type=float, default=0.66, help="decay rate")
parser.add_argument(
"--max_train_epochs", type=int, default=8,
help="Maximum number of training epochs.")
parser.add_argument("--num_examples_per_epoch", type=int, default=3442299,
help="Number of examples in one epoch")
parser.add_argument("--label_smoothing", type=float, default=0.1,
help=("If nonzero, smooth the labels towards "
"1/num_classes."))
# initializer
parser.add_argument("--init_op", type=str, default="uniform",
help="uniform | glorot_normal | glorot_uniform")
parser.add_argument("--init_weight", type=float, default=0.1,
help=("for uniform init_op, initialize weights "
"between [-this, this]."))
# data
parser.add_argument(
"--src", type=str, default="en", help="Source suffix, e.g., en.")
parser.add_argument(
"--tgt", type=str, default="de", help="Target suffix, e.g., de.")
parser.add_argument(
"--data_dir", type=str, default="", help="Training/eval data directory.")
parser.add_argument(
"--train_prefix",
type=str,
default="train.tok.clean.bpe.32000",
help="Train prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--test_prefix",
type=str,
default="newstest2014",
help="Test prefix, expect files with src/tgt suffixes.")
parser.add_argument(
"--use_preprocessed_data",
type="bool",
default=True,
help="Whether to use preprocessed training data.")
parser.add_argument(
"--out_dir", type=str, default=None, help="Store log/model files.")
# Vocab
parser.add_argument(
"--vocab_prefix",
type=str,
default="vocab.bpe.32000",
help="""\
Vocab prefix, expect files with src/tgt suffixes.\
""")
parser.add_argument("--check_special_token", type="bool", default=True,
help="""\
Whether check special sos, eos, unk tokens exist in the
vocab files.\
""")
# Sequence lengths
parser.add_argument(
"--src_max_len",
type=int,
default=48,
help="Max length of src sequences during training.")
parser.add_argument(
"--tgt_max_len",
type=int,
default=48,
help="Max length of tgt sequences during training.")
parser.add_argument(
"--src_max_len_infer",
type=int,
default=160,
help="Max length of src sequences during inference.")
parser.add_argument(
"--tgt_max_len_infer",
type=int,
default=160,
help="""\
Max length of tgt sequences during inference. Also use to restrict the
maximum decoding length.\
""")
# Default settings works well (rarely need to change)
parser.add_argument("--forget_bias", type=float, default=0.0,
help="Forget bias for BasicLSTMCell.")
parser.add_argument("--dropout", type=float, default=0.2,
help="Dropout rate (not keep_prob)")
parser.add_argument("--max_gradient_norm", type=float, default=5.0,
help="Clip gradients to this norm.")
parser.add_argument("--batch_size", type=int, default=512, help="Batch size.")
parser.add_argument("--steps_per_stats", type=int, default=5,
help=("How many training steps to do per stats logging."
"Save checkpoint every 10x steps_per_stats"))
parser.add_argument(
"--num_buckets",
type=int,
default=5,
help="Put data into similar-length buckets.")
parser.add_argument(
"--choose_buckets",
type=int,
default=1,
help="Choose from this number of length buckets per training step.")
# SPM
parser.add_argument("--subword_option", type=str, default="bpe",
choices=["", "bpe", "spm"],
help="""\
Set to bpe or spm to activate subword desegmentation.\
""")
# Misc
parser.add_argument(
"--num_shards", type=int,
default=8, help="Number of shards (TPU cores).")
parser.add_argument(
"--num_shards_per_host", type=int,
default=8, help="Number of shards (TPU cores) per host.")
parser.add_argument(
"--num_gpus", type=int, default=4, help="Number of gpus in each worker.")
parser.add_argument(
"--num_infeed_workers",
type=int,
default=1,
help="Number of TPU workers used for input generation.")
parser.add_argument(
"--num_tpu_workers",
type=int,
default=1,
help="Number of TPU workers; if set, uses the distributed-sync pipeline.")
parser.add_argument("--hparams_path", type=str, default=None,
help=("Path to standard hparams json file that overrides"
"hparams values from FLAGS."))
parser.add_argument(
"--random_seed",
type=int,
default=None,
help="Random seed (>0, set a specific seed).")
# Inference
parser.add_argument("--ckpt", type=str, default="",
help="Checkpoint file to load a model for inference.")
parser.add_argument(
"--infer_batch_size",
type=int,
default=512,
help="Batch size for inference mode.")
parser.add_argument(
"--examples_to_infer",
type=int,
default=3003,
help="Number of examples to infer.")
parser.add_argument("--detokenizer_file", type=str,
default="mosesdecoder/scripts/tokenizer/detokenizer.perl",
help=("""Detokenizer script file."""))
parser.add_argument("--use_REDACTED", type=bool, default=False)
parser.add_argument(
"--target_bleu", type=float, default=24.0, help="Target accuracy.")
# Advanced inference arguments
parser.add_argument("--infer_mode", type=str, default="beam_search",
choices=["greedy", "sample", "beam_search"],
help="Which type of decoder to use during inference.")
parser.add_argument("--beam_width", type=int, default=5,
help=("""\
beam width when using beam search decoder. If 0 (default), use standard
decoder with greedy helper.\
"""))
parser.add_argument(
"--length_penalty_weight",
type=float,
default=0.6,
help="Length penalty for beam search.")
parser.add_argument(
"--coverage_penalty_weight",
type=float,
default=0.1,
help="Coverage penalty for beam search.")
# Job info
parser.add_argument("--jobid", type=int, default=0,
help="Task id of the worker.")
# TPU
parser.add_argument("--use_tpu", type=bool, default=True)
parser.add_argument("--master", type=str, default="",
help=("Address of the master. Either --master or "
"--tpu_name must be specified."))
parser.add_argument("--tpu_name", type=str, default=None,
help=("Name of the TPU for Cluster Resolvers. Either "
"--tpu_name or --master must be specified."))
parser.add_argument("--use_dynamic_rnn", type=bool, default=False)
parser.add_argument("--use_synthetic_data", type=bool, default=False)
parser.add_argument(
"--mode",
type=str,
default="train_and_eval",
choices=["train", "train_and_eval", "infer", "preprocess"])
parser.add_argument(
"--activation_dtype",
type=str,
default="bfloat16",
choices=["float32", "bfloat16"])
parser.add_argument("--tpu_job_name", type=str, default=None)
# copybara:strip_begin
# Vizier
parser.add_argument("--client_handle", type=str, default="",
help=("Client_handle for the tuner."))
parser.add_argument("--study_name", type=str, default=None,
help=("Name of Vizier hparams tuning study."))
parser.add_argument("--REDACTED", type=int,
default=REDACTED.StudyConfig.RANDOM_SEARCH,
help=("Vizier search algorithm to use."))
# copybara:strip_end
def create_hparams(flags):
"""Create training hparams."""
return contrib_training.HParams(
# Data
src=flags.src,
tgt=flags.tgt,
train_prefix=flags.data_dir + flags.train_prefix,
test_prefix=flags.data_dir + flags.test_prefix,
vocab_prefix=flags.data_dir + flags.vocab_prefix,
out_dir=flags.out_dir,
# Networks
num_units=flags.num_units,
num_encoder_layers=(flags.num_encoder_layers or flags.num_layers),
num_decoder_layers=(flags.num_decoder_layers or flags.num_layers),
dropout=flags.dropout,
num_embeddings_partitions=flags.num_embeddings_partitions,
# Train
optimizer=flags.optimizer,
max_train_epochs=flags.max_train_epochs,
num_examples_per_epoch=flags.num_examples_per_epoch,
batch_size=flags.batch_size,
num_train_steps=int(flags.num_examples_per_epoch / flags.batch_size *
flags.max_train_epochs),
init_op=flags.init_op,
init_weight=flags.init_weight,
max_gradient_norm=flags.max_gradient_norm,
learning_rate=flags.learning_rate,
label_smoothing=flags.label_smoothing,
warmup_steps=flags.warmup_steps,
warmup_scheme=flags.warmup_scheme,
decay_start=flags.decay_start,
decay_interval=flags.decay_interval,
decay_steps=flags.decay_steps,
decay_factor=flags.decay_factor,
# Data constraints
num_buckets=flags.num_buckets,
choose_buckets=flags.choose_buckets,
src_max_len=flags.src_max_len,
tgt_max_len=flags.tgt_max_len,
use_preprocessed_data=flags.use_preprocessed_data,
# Inference
src_max_len_infer=flags.src_max_len_infer,
tgt_max_len_infer=flags.tgt_max_len_infer,
infer_batch_size=flags.infer_batch_size,
examples_to_infer=flags.examples_to_infer,
detokenizer_file=flags.data_dir + flags.detokenizer_file,
use_REDACTED=flags.use_REDACTED,
target_bleu=flags.target_bleu,
# Advanced inference arguments
infer_mode=flags.infer_mode,
beam_width=flags.beam_width,
length_penalty_weight=flags.length_penalty_weight,
coverage_penalty_weight=flags.coverage_penalty_weight,
# Vocab
sos=vocab_utils.SOS,
eos=vocab_utils.EOS,
subword_option=flags.subword_option,
check_special_token=flags.check_special_token,
# Misc
forget_bias=flags.forget_bias,
num_shards=flags.num_shards,
num_shards_per_host=flags.num_shards_per_host,
num_gpus=flags.num_gpus,
num_infeed_workers=flags.num_infeed_workers,
epoch_step=0, # record where we were within an epoch.
steps_per_stats=flags.steps_per_stats,
random_seed=flags.random_seed,
# TPU
use_tpu=flags.use_tpu,
master=flags.master,
tpu_name=flags.tpu_name,
use_dynamic_rnn=flags.use_dynamic_rnn,
use_synthetic_data=flags.use_synthetic_data,
mode=flags.mode,
activation_dtype=flags.activation_dtype,
tpu_job_name=flags.tpu_job_name)
def _add_argument(hparams, key, value, update=True):
"""Add an argument to hparams; if exists, change the value if update==True."""
if hasattr(hparams, key):
if update:
setattr(hparams, key, value)
else:
hparams.add_hparam(key, value)
def extend_hparams(hparams):
"""Add new arguments to hparams."""
# Sanity checks
if hparams.subword_option and hparams.subword_option not in ["spm", "bpe"]:
raise ValueError("subword option must be either spm, or bpe")
if hparams.infer_mode == "beam_search" and hparams.beam_width <= 0:
raise ValueError("beam_width must greater than 0 when using beam_search"
"decoder.")
# Different number of encoder / decoder layers
assert hparams.num_encoder_layers == hparams.num_decoder_layers
# The first unidirectional layer (after the bi-directional layer) in
# the GNMT encoder can't have residual connection due to the input is
# the concatenation of fw_cell and bw_cell's outputs.
num_encoder_residual_layers = hparams.num_encoder_layers - 2
num_decoder_residual_layers = num_encoder_residual_layers
_add_argument(hparams, "num_encoder_residual_layers",
num_encoder_residual_layers)
_add_argument(hparams, "num_decoder_residual_layers",
num_decoder_residual_layers)
## Vocab
# Get vocab file names first
if hparams.vocab_prefix:
src_vocab_file = six.ensure_str(
hparams.vocab_prefix) + "." + six.ensure_str(hparams.src)
tgt_vocab_file = six.ensure_str(
hparams.vocab_prefix) + "." + six.ensure_str(hparams.tgt)
else:
raise ValueError("hparams.vocab_prefix must be provided.")
# Source vocab
src_vocab_size, src_vocab_file = vocab_utils.check_vocab(
src_vocab_file,
hparams.out_dir,
check_special_token=hparams.check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
# Target vocab
utils.print_out(" using source vocab for target")
tgt_vocab_file = src_vocab_file
tgt_vocab_size = src_vocab_size
_add_argument(hparams, "src_vocab_size", src_vocab_size)
_add_argument(hparams, "tgt_vocab_size", tgt_vocab_size)
_add_argument(hparams, "src_vocab_file", src_vocab_file)
_add_argument(hparams, "tgt_vocab_file", tgt_vocab_file)
# Num embedding partitions
_add_argument(
hparams, "num_enc_emb_partitions", hparams.num_embeddings_partitions)
_add_argument(
hparams, "num_dec_emb_partitions", hparams.num_embeddings_partitions)
# Pretrained Embeddings
_add_argument(hparams, "src_embed_file", "")
_add_argument(hparams, "tgt_embed_file", "")
return hparams
def create_or_load_hparams(default_hparams, hparams_path):
"""Create hparams or load hparams from out_dir."""
hparams = utils.maybe_parse_standard_hparams(default_hparams, hparams_path)
hparams = extend_hparams(hparams)
# Print HParams
utils.print_hparams(hparams)
return hparams
def prepare_dataset(flags):
"""Generate the preprocessed dataset."""
src_file = "%s.%s" % (flags.data_dir + flags.train_prefix, flags.src)
tgt_file = "%s.%s" % (flags.data_dir + flags.train_prefix, flags.tgt)
vocab_file = flags.data_dir + flags.vocab_prefix
_, vocab_file = vocab_utils.check_vocab(vocab_file, flags.out_dir)
out_file = six.ensure_str(flags.out_dir) + "preprocessed_dataset"
src_vocab_table, tgt_vocab_table = vocab_utils.create_vocab_tables(vocab_file)
src_dataset = tf.data.TextLineDataset(src_file)
tgt_dataset = tf.data.TextLineDataset(tgt_file)
iterator = iterator_utils.get_iterator(
src_dataset,
tgt_dataset,
src_vocab_table,
tgt_vocab_table,
batch_size=1,
global_batch_size=1,
sos=vocab_utils.SOS,
eos=vocab_utils.EOS,
random_seed=1,
num_buckets=flags.num_buckets,
src_max_len=flags.src_max_len,
tgt_max_len=flags.tgt_max_len,
filter_oversized_sequences=True,
return_raw=True).make_initializable_iterator()
with tf.Session() as sess:
sess.run(tf.tables_initializer())
sess.run(iterator.initializer)
try:
i = 0
while True:
with open(out_file + "_%d" % i, "wb") as f:
i += 1
for _ in range(100):
for j in sess.run(iterator.get_next()):
tf.logging.info(j)
f.write(bytearray(j))
except tf.errors.OutOfRangeError:
pass
def run_main(flags, default_hparams, estimator_fn):
"""Run main."""
# Job
jobid = flags.jobid
utils.print_out("# Job id %d" % jobid)
# Random
random_seed = flags.random_seed
if random_seed is not None and random_seed > 0:
utils.print_out("# Set random seed to %d" % random_seed)
random.seed(random_seed + jobid)
np.random.seed(random_seed + jobid)
tf.set_random_seed(random_seed)
mlp_log.mlperf_print("cache_clear", True)
mlp_log.mlperf_print("init_start", None)
mlp_log.mlperf_print("submission_benchmark", "resnet")
mlp_log.mlperf_print("submission_division", "closed")
mlp_log.mlperf_print("submission_org", "google")
mlp_log.mlperf_print("submission_platform", "tpu-v3-%d" % FLAGS.num_shards)
mlp_log.mlperf_print("submission_status", "research")
mlp_log.mlperf_print("global_batch_size", FLAGS.batch_size)
mlp_log.mlperf_print("opt_learning_rate_alt_decay_func", "True")
mlp_log.mlperf_print("opt_base_learning_rate", FLAGS.learning_rate)
mlp_log.mlperf_print("opt_learning_rate_decay_interval", FLAGS.decay_interval)
mlp_log.mlperf_print("opt_learning_rate_decay_factor", FLAGS.decay_factor)
mlp_log.mlperf_print("opt_learning_rate_decay_steps", FLAGS.decay_steps)
mlp_log.mlperf_print("opt_learning_rate_remain_steps", FLAGS.decay_start)
mlp_log.mlperf_print("opt_learning_rate_alt_warmup_func", FLAGS.warmup_scheme)
mlp_log.mlperf_print("opt_learning_rate_warmup_steps", FLAGS.warmup_steps)
mlp_log.mlperf_print(
"max_sequence_length", FLAGS.src_max_len, metadata={"method": "discard"})
mlp_log.mlperf_print("train_samples", FLAGS.num_examples_per_epoch)
mlp_log.mlperf_print("eval_samples", FLAGS.examples_to_infer)
# Model output directory
out_dir = flags.out_dir
if out_dir and not tf.gfile.Exists(out_dir):
utils.print_out("# Creating output directory %s ..." % out_dir)
tf.gfile.MakeDirs(out_dir)
# Load hparams.
hparams = create_or_load_hparams(default_hparams, flags.hparams_path)
# Train or Evaluation
return estimator_fn(hparams)
def main(unused_argv):
# pylint: disable=g-long-lambda
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
if FLAGS.mode == "preprocess":
prepare_dataset(FLAGS)
elif FLAGS.mode == "train":
print("Running training mode.")
default_hparams = create_hparams(FLAGS)
run_main(FLAGS, default_hparams, estimator.train_fn)
elif FLAGS.mode == "train_and_eval":
print("Running training and evaluation mode.")
default_hparams = create_hparams(FLAGS)
run_main(FLAGS, default_hparams,
estimator.train_and_eval_with_low_level_api)
else:
print("Running inference mode.")
default_hparams = create_hparams(FLAGS)
current_epoch = 0
last_step = 0
# Run evaluation when there's a new checkpoint
for ckpt in evaluation.checkpoints_iterator(FLAGS.out_dir):
# Terminate eval job once target score is reached
current_step = int(six.ensure_str(os.path.basename(ckpt)).split("-")[1])
if current_step <= last_step:
continue
last_step = current_step
tf.logging.info("Starting to evaluate...%s", ckpt)
try:
score = run_main(FLAGS, default_hparams, estimator.eval_fn)
current_epoch += 1
if score > FLAGS.target_bleu:
tf.logging.info(
"Evaluation finished after training step %d" % current_step)
break
# Terminate eval job when final checkpoint is reached
max_steps = default_hparams.num_train_steps
if current_step >= max_steps:
tf.logging.info(
"Evaluation finished but failed to reach target score.")
break
except tf.errors.NotFoundError:
tf.logging.info(
"Checkpoint %s no longer exists, skipping checkpoint" % ckpt)
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
nmt_parser = argparse.ArgumentParser()
add_arguments(nmt_parser)
FLAGS, unparsed = nmt_parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Software License Agreement (BSD License)
__license__ = 'BSD'
from threading import Thread, Lock
import sys
import rospy
import time
from dynamixel_driver.dynamixel_serial_proxy import SerialProxy
from dynamixel_driver.dynamixel_io import DynamixelIO
from dynamixel_msgs.msg import CustomHand
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue
from dynamixel_msgs.msg import Service
from sensor_msgs.msg import JointState
from dynamixel_controllers.srv import StartController, StartControllerResponse, StopController, StopControllerResponse
from dynamixel_controllers.srv import RestartController, RestartControllerResponse
from dynamixel_controllers.srv import Actions, TorqueEnable
#import Adafruit_GPIO.SPI as SPI
#import Adafruit_MCP3008
# Software SPI configuration for ADC1and ADC2
#CLK = 18
#MISO_1 = 23
#MOSI_1 = 24
#CS_1 = 25
#MISO_2 = 22
#MOSI_2 = 27
#CS_2 = 10
#mcp_1= Adafruit_MCP3008.MCP3008(clk=CLK, cs=CS_1, miso=MISO_1, mosi=MOSI_1)
#mcp_2 = Adafruit_MCP3008.MCP3008(clk=CLK, cs=CS_2, miso=MISO_2, mosi=MOSI_2)
class ControllerManager:
def __init__(self):
rospy.init_node('dynamixel_controller_manager', anonymous=True)
rospy.on_shutdown(self.on_shutdown)
self.waiting_meta_controllers = []
self.controllers = {}
self.serial_proxies = {}
self.diagnostics_rate = rospy.get_param('~diagnostics_rate', 1)
self.start_controller_lock = Lock()
self.stop_controller_lock = Lock()
manager_namespace = rospy.get_param('~namespace')
serial_ports = rospy.get_param('~serial_ports')
self.bhand_node_name = 'rqt_gui'
self.finger_names = ['joint_1', 'joint_2', 'joint_3']
self.motor_ids = [1, 2, 3]
data = JointState()
for port_namespace,port_config in serial_ports.items():
port_name = port_config['port_name']
baud_rate = port_config['baud_rate']
readback_echo = port_config['readback_echo'] if 'readback_echo' in port_config else False
min_motor_id = port_config['min_motor_id'] if 'min_motor_id' in port_config else 0
max_motor_id = port_config['max_motor_id'] if 'max_motor_id' in port_config else 253
update_rate = port_config['update_rate'] if 'update_rate' in port_config else 5
error_level_temp = 75
warn_level_temp = 70
if 'diagnostics' in port_config:
if 'error_level_temp' in port_config['diagnostics']:
error_level_temp = port_config['diagnostics']['error_level_temp']
if 'warn_level_temp' in port_config['diagnostics']:
warn_level_temp = port_config['diagnostics']['warn_level_temp']
serial_proxy = SerialProxy(port_name,
port_namespace,
baud_rate,
min_motor_id,
max_motor_id,
update_rate,
self.diagnostics_rate,
error_level_temp,
warn_level_temp,
readback_echo)
serial_proxy.connect()
rospy.Service('/actions', Actions, self.handActions)
rospy.Service('/torque_disable', TorqueEnable, self.torque_disable)
self.serial_proxies[port_namespace] = serial_proxy
#Publishers
self.diagnostics_pub = rospy.Publisher('/pressure', CustomHand, queue_size=1)
if self.diagnostics_rate > 0: Thread(target=self.diagnostics_processor).start()
#Subscribers
self._command_topic = '/command'#%self.bhand_node_name
self._subscriber_command = rospy.Subscriber(self._command_topic, JointState, self.receive_joints_data)
while not rospy.is_shutdown():
self.receive_joints_data(data)
self.read_sensor
def on_shutdown(self):
for serial_proxy in self.serial_proxies.values():
serial_proxy.disconnect()
def diagnostics_processor(self):
diag_msg = DiagnosticArray()
rate = rospy.Rate(self.diagnostics_rate)
while not rospy.is_shutdown():
diag_msg.status
diag_msg.header.stamp = rospy.Time.now()
for controller in self.controllers.values():
try:
joint_state = controller.joint_state
temps = joint_state.motor_temps
max_temp = max(temps)
status = DiagnosticStatus()
status.name = 'Joint Controller (%DMT_HAND)'
status.hardware_id = 'Robotis Dynamixel %s on port %s' % (str(joint_state.motor_ids), controller.port_namespace)
status.values.append(KeyValue('Goal', str(joint_state.goal_pos)))
status.values.append(KeyValue('Position', str(joint_state.current_pos)))
status.values.append(KeyValue('Error', str(joint_state.error)))
status.values.append(KeyValue('Velocity', str(joint_state.velocity)))
status.values.append(KeyValue('Load', str(joint_state.load)))
status.values.append(KeyValue('Moving', str(joint_state.is_moving)))
status.values.append(KeyValue('Temperature', str(max_temp)))
status.level = DiagnosticStatus.OK
status.message = 'OK'
diag_msg.status.append(status)
except:
pass
#self.states_pub.publish(status)
rate.sleep()
def check_deps(self):
controllers_still_waiting = []
for i,(controller_name,deps,kls) in enumerate(self.waiting_meta_controllers):
if not set(deps).issubset(self.controllers.keys()):
controllers_still_waiting.append(self.waiting_meta_controllers[i])
rospy.logwarn('[%s] not all dependencies started, still waiting for %s...' % (controller_name, str(list(set(deps).difference(self.controllers.keys())))))
else:
dependencies = [self.controllers[dep_name] for dep_name in deps]
controller = kls(controller_name, dependencies)
if controller.initialize():
controller.start()
self.controllers[controller_name] = controller
self.waiting_meta_controllers = controllers_still_waiting[:]
def start_controller(self, req):
port_name = req.port_name
package_path = req.package_path
module_name = req.module_name
class_name = req.class_name
controller_name = req.controller_name
self.start_controller_lock.acquire()
if controller_name in self.controllers:
self.start_controller_lock.release()
return StartControllerResponse(False, 'Controller [%s] already started. If you want to restart it, call restart.' % controller_name)
try:
if module_name not in sys.modules:
# import if module not previously imported
package_module = __import__(package_path, globals(), locals(), [module_name], -1)
else:
# reload module if previously imported
package_module = reload(sys.modules[package_path])
controller_module = getattr(package_module, module_name)
except ImportError, ie:
self.start_controller_lock.release()
return StartControllerResponse(False, 'Cannot find controller module. Unable to start controller %s\n%s' % (module_name, str(ie)))
except SyntaxError, se:
self.start_controller_lock.release()
return StartControllerResponse(False, 'Syntax error in controller module. Unable to start controller %s\n%s' % (module_name, str(se)))
except Exception, e:
self.start_controller_lock.release()
return StartControllerResponse(False, 'Unknown error has occured. Unable to start controller %s\n%s' % (module_name, str(e)))
kls = getattr(controller_module, class_name)
if port_name == 'meta':
self.waiting_meta_controllers.append((controller_name,req.dependencies,kls))
self.check_deps()
self.start_controller_lock.release()
return StartControllerResponse(True, '')
if port_name != 'meta' and (port_name not in self.serial_proxies):
self.start_controller_lock.release()
return StartControllerResponse(False, 'Specified port [%s] not found, available ports are %s. Unable to start controller %s' % (port_name, str(self.serial_proxies.keys()), controller_name))
controller = kls(self.serial_proxies[port_name].dxl_io, controller_name, port_name)
if controller.initialize():
controller.start()
self.controllers[controller_name] = controller
self.check_deps()
self.start_controller_lock.release()
return StartControllerResponse(True, 'Controller %s successfully started.' % controller_name)
else:
self.start_controller_lock.release()
return StartControllerResponse(False, 'Initialization failed. Unable to start controller %s' % controller_name)
def stop_controller(self, req):
controller_name = req.controller_name
self.stop_controller_lock.acquire()
if controller_name in self.controllers:
self.controllers[controller_name].stop()
del self.controllers[controller_name]
self.stop_controller_lock.release()
return StopControllerResponse(True, 'controller %s successfully stopped.' % controller_name)
else:
self.self.stop_controller_lock.release()
return StopControllerResponse(False, 'controller %s was not running.' % controller_name)
def restart_controller(self, req):
response1 = self.stop_controller(StopController(req.controller_name))
response2 = self.start_controller(req)
return RestartControllerResponse(response1.success and response2.success, '%s\n%s' % (response1.reason, response2.reason))
def receive_joints_data(self, data):
self.joint_state=data
self.position_control()
self.torque_control()
self.speed_control()
rospy.sleep(0.01)
def position_control(self):
for i in range(len(self.joint_state.name)):
if self.joint_state.name[i]=='joint_1':
self.position = int(self.joint_state.position[i])
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_position(1, self.position)
if self.joint_state.name[i]=='joint_2':
self.position = int(self.joint_state.position[i])
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_position(2, self.position)
if self.joint_state.name[i]=='joint_3':
self.position = int(self.joint_state.position[i])
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_position(3, self.position)
def speed_control(self):
for i in range(len(self.joint_state.name)):
if self.joint_state.name[i]=='joint_1':
self.speed = int(self.joint_state.velocity[i])
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_speed(1, self.speed)
if self.joint_state.name[i]=='joint_2':
self.speed = int(self.joint_state.velocity[i])
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_speed(2, self.speed)
if self.joint_state.name[i]=='joint_3':
self.speed = int(self.joint_state.velocity[i])
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_speed(3, self.speed)
def torque_control(self):
for i in range(len(self.joint_state.name)):
if self.joint_state.name[i]=='joint_1':
self.torque = int(self.joint_state.velocity[i])
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_goal_torque(1, self.torque)
if self.joint_state.name[i]=='joint_2':
self.torque = int(self.joint_state.velocity[i])
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_goal_torque(2, self.torque)
if self.joint_state.name[i]=='joint_3':
self.torque = int(self.joint_state.velocity[i])
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_goal_torque(3, self.torque)
def torque_disable(self, req):
"""
Sets the value of the torque enabled register to 1 or 0. When the
torque is disabled the servo can be moved manually while the motor is
still powered.
"""
if req.torque_enable==False:
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_torque_enabled(1, 0)
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_torque_enabled(2, 0)
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_torque_enabled(3, 0)
return
def read_sensor(self):
self.status = CustomHand()
rate = rospy.Rate(5)
while not rospy.is_shutdown():
try:
t = rospy.Time.now()
self.status.header.stamp =t
self.status.header.frame_id ='DENIRO/'
self.status.finger1=[0,0,0]
self.status.finger2=[0,0,0]
self.status.finger3=[0,0,0]
self.status.palm=[0,0]
self.status.finger1[0] = 1023-mcp_1.read_adc(5)
self.status.finger1[1] = 1023-mcp_1.read_adc(4)
self.status.finger1[2] = 1023-mcp_1.read_adc(3)
self.status.finger2[0] = 1023-mcp_1.read_adc(2)
self.status.finger2[1] = 1023-mcp_1.read_adc(1)
self.status.finger2[2] = 1023-mcp_1.read_adc(0)
self.status.finger3[0] = mcp_2.read_adc(7)
self.status.finger3[1] = mcp_2.read_adc(6)
self.status.finger3[2] = mcp_2.read_adc(5)
self.status.palm[0] = mcp_2.read_adc(4)
self.status.palm[1] = mcp_2.read_adc(3)
rospy.loginfo(status)
except:
pass
self.diagnostics_pub.publish(self.status)
rate.sleep()
def handActions(self, req):
if req.action == Service.GRAB_GRASP:
try: self.grab()
except rospy.ServiceException: return False
if req.action == Service.OPEN_GRASP:
try: self.open_hand()
except rospy.ServiceException: return False
if req.action == Service.POINT_GRASP:
try: self.point()
except rospy.ServiceException: return False
if req.action == Service.SQUEEZE_GRASP:
try: self.squeeze()
except rospy.ServiceException: return False
def grab(self):
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_position(1, 2000)
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_position(2, 4000)
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_position(3, 1800)
return True
def open_hand(self):
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_position(1, 4000)
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_position(2, 2300)
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_position(3, 4000)
return True
def point(self):
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_position(1, 4000)
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_position(2, 2300)
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_position(3, 3100)
return True
def squeeze(self):
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_position(1, 1100)
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_position(2, 4000)
DynamixelIO('/dev/ttyACM0',57600,readback_echo=False).set_position(3, 1100)
return True
if __name__ == '__main__':
try:
manager = ControllerManager()
rospy.spin()
except rospy.ROSInterruptException: pass
|
|
try:
import capstone
except ImportError as e:
capstone = None
import pytest
import windows.native_exec.simple_x86 as x86
from windows.native_exec.simple_x86 import *
del Test # Prevent pytest warning
from windows.pycompat import int_types
VERBOSE = False
if capstone:
disassembleur = capstone.Cs(capstone.CS_ARCH_X86, capstone.CS_MODE_32)
disassembleur.detail = True
@pytest.fixture
def need_capstone():
if capstone is None:
raise pytest.skip("Capstone is not installed")
return True
pytestmark = pytest.mark.usefixtures("need_capstone")
def disas(x):
return list(disassembleur.disasm(x, 0))
class CheckInstr(object):
def __init__(self, instr_to_test, immediat_accepted=None, expected_result=None, debug=False):
self.instr_to_test = instr_to_test
self.expected_result = expected_result
self.immediat_accepted = immediat_accepted
self.debug = debug
def __call__(self, *args):
if self.debug:
import pdb;pdb.set_trace()
pdb.DONE = True
res = bytes(self.instr_to_test(*args).get_code())
capres_list = disas(res)
if len(capres_list) != 1:
raise AssertionError("Trying to disas an instruction resulted in multiple disassembled instrs")
capres = capres_list[0]
print("{0} {1}".format(capres.mnemonic, capres.op_str))
if len(res) != len(capres.bytes):
raise AssertionError("Not all bytes have been used by the disassembler")
if VERBOSE:
print(" * [CODE] {0}".format(repr(res)))
print(" * [DISAS] {0} {1}".format(capres.mnemonic, capres.op_str))
if self.expected_result is not None:
if "{0} {1}".format(capres.mnemonic, capres.op_str) == self.expected_result:
return True
else:
raise AssertionError("Expected result <{0}> got <{1}>".format(self.expected_result, "{0} {1}".format(capres.mnemonic, capres.op_str)))
self.compare_mnemo(capres)
self.compare_args(args, capres)
def compare_mnemo(self, capres):
expected = self.instr_to_test.__name__.lower()
if expected != str(capres.mnemonic):
raise AssertionError("Expected menmo {0} got {1}".format(expected, str(capres.mnemonic)))
return True
def compare_args(self, args, capres):
capres_op = list(capres.operands)
if len(args) != len(capres_op):
raise AssertionError("Expected {0} operands got {1}".format(len(args), len(capres_op)))
for op_args, cap_op in zip(args, capres_op):
if isinstance(op_args, str): # Register
if cap_op.type != capstone.x86.X86_OP_REG:
raise AssertionError("Expected args {0} operands got {1}".format(op_args, capres_op))
if op_args.lower() != capres.reg_name(cap_op.reg).lower():
raise AssertionError("Expected register <{0}> got {1}".format(op_args.lower(), capres.reg_name(cap_op.reg).lower()))
elif isinstance(op_args, int_types):
if (op_args != cap_op.imm) and not (self.immediat_accepted and self.immediat_accepted == cap_op.imm):
raise AssertionError("Expected Immediat <{0}> got {1}".format(op_args, cap_op.imm))
elif isinstance(op_args, mem_access):
self.compare_mem_access(op_args, capres, cap_op)
else:
raise ValueError("Unknow argument {0} of type {1}".format(op_args, type(op_args)))
def compare_mem_access(self, memaccess, capres, cap_op):
if cap_op.type != capstone.x86.X86_OP_MEM:
raise AssertionError("Expected Memaccess <{0}> got {1}".format(memaccess, cap_op))
if memaccess.prefix is not None and capres.prefix[1] != x86_segment_selectors[memaccess.prefix].PREFIX_VALUE:
try:
get_prefix = [n for n, x in x86_segment_selectors.items() if x.PREFIX_VALUE == capres.prefix[1]][0]
except IndexError:
get_prefix = None
raise AssertionError("Expected Segment overide <{0}> got {1}".format(memaccess.prefix, get_prefix))
cap_mem = cap_op.mem
if memaccess.base is None and cap_mem.base != capstone.x86.X86_REG_INVALID:
raise AssertionError("Unexpected memaccess base <{0}>".format(capres.reg_name(cap_mem.base)))
if memaccess.base is not None and capres.reg_name(cap_mem.base) != memaccess.base.lower():
raise AssertionError("Expected mem.base {0} got {1}".format(memaccess.base.lower(), capres.reg_name(cap_mem.base)))
if memaccess.index is None and cap_mem.index != capstone.x86.X86_REG_INVALID:
raise AssertionError("Unexpected memaccess index <{0}>".format(capres.reg_name(cap_mem.base)))
if memaccess.index is not None and capres.reg_name(cap_mem.index) != memaccess.index.lower():
raise AssertionError("Expected mem.index {0} got {1}".format(memaccess.index.lower(), capres.reg_name(cap_mem.index)))
if memaccess.scale != cap_mem.scale and not (memaccess.scale is None and cap_mem.scale == 1):
raise AssertionError("Expected mem.scale {0} got {1}".format(memaccess.scale, cap_mem.scale))
if memaccess.disp & 0xffffffff != cap_mem.disp & 0xffffffff:
raise AssertionError("Expected mem.disp {0} got {1}".format(memaccess.disp, cap_mem.disp))
def test_assembler():
CheckInstr(Mov)('EAX', 'CR3')
CheckInstr(Mov)('EDX', 'CR0')
CheckInstr(Mov)('EDI', 'CR7')
CheckInstr(Mov)('CR3', 'EAX')
CheckInstr(Mov)('CR0', 'EDX')
CheckInstr(Mov)('CR7', 'EDI')
# Registers
CheckInstr(Pushad, expected_result="pushal ")()
CheckInstr(Pushfd)()
CheckInstr(Popad, expected_result="popal ")()
CheckInstr(Popfd)()
CheckInstr(Mov)('EAX', 'ESP')
CheckInstr(Mov)('ECX', mem('[EAX]'))
CheckInstr(Mov)('EDX', mem('[ECX + 0x10]'))
CheckInstr(Mov)('EDX', mem('[EDI * 8 + 0xffff]'))
CheckInstr(Mov)('EDX', mem('[0x11223344]'))
CheckInstr(Mov)('EDX', mem('[ESP + EBP * 2 + 0x223344]'))
CheckInstr(Mov)(mem('[EBP + EBP * 2 + 0x223344]'), 'ESP')
CheckInstr(Mov)('ESI', mem('[ESI + EDI * 1]'))
CheckInstr(Mov)('EAX', mem('fs:[0x30]'))
CheckInstr(Mov)('EDI', mem('gs:[EAX + ECX * 4]'))
CheckInstr(Mov)('AX', 'AX')
CheckInstr(Mov)('SI', 'DI')
CheckInstr(Mov)('AX', 'AX')
CheckInstr(Mov)('AX', mem('fs:[0x30]'))
CheckInstr(Mov)('AX', mem('fs:[EAX + 0x30]'))
CheckInstr(Mov)('AX', mem('fs:[EAX + ECX * 4+0x30]'))
# Segment selector
CheckInstr(Mov)('SS', 'ECX')
CheckInstr(Mov)('ECX', 'SS')
CheckInstr(Mov)('EDX', 'es')
CheckInstr(Mov)('EDX', 'cs')
CheckInstr(Mov)('EDX', 'ds')
CheckInstr(Mov)('EDX', 'fs')
CheckInstr(Mov)('fs', 'eax')
CheckInstr(Mov)('fs', 'eax')
CheckInstr(Add)('EAX', 8)
CheckInstr(Add)('EAX', 0xffffffff)
CheckInstr(Add)("ECX", mem("[EAX + 0xff]"))
CheckInstr(Add)("ECX", mem("[EAX + 0xffffffff]"))
CheckInstr(Add)(mem('[EAX]'), 10)
CheckInstr(Mov)('EAX', mem('fs:[0xfffc]'))
CheckInstr(Mov)(mem('fs:[0xfffc]'), 0)
CheckInstr(Push)('ECX')
CheckInstr(Push)(mem('[ECX + 8]'))
CheckInstr(Sub)('ECX', 'ESP')
CheckInstr(Sub)('ECX', mem('[ESP]'))
CheckInstr(Inc)('EAX')
CheckInstr(Inc)(mem('[0x42424242]'))
CheckInstr(Lea)('EAX', mem('[EAX + 1]'))
CheckInstr(Lea)('ECX', mem('[EDI + -0xff]'))
CheckInstr(Call)('EAX')
CheckInstr(Call)(mem('[EAX + ECX * 8]'))
CheckInstr(Cpuid)()
CheckInstr(Movsb, expected_result='movsb byte ptr es:[edi], byte ptr [esi]')()
CheckInstr(Movsd, expected_result='movsd dword ptr es:[edi], dword ptr [esi]')()
CheckInstr(Xchg)('EAX', 'ESP')
CheckInstr(Rol)('EAX', 7)
CheckInstr(Rol)('ECX', 0)
CheckInstr(Ror)('ECX', 0)
CheckInstr(Ror)('EDI', 7)
CheckInstr(Ror)('EDI', -128)
CheckInstr(Cmp, immediat_accepted=0xffffffff)('EAX', -1)
CheckInstr(Cmp)('EAX', 0xffffffff)
CheckInstr(And)('ECX', 'EBX')
CheckInstr(And)('EAX', 0x11223344)
CheckInstr(And)('EAX', mem('[EAX + 1]'))
CheckInstr(And)(mem('[EAX + EAX]'), 'EDX')
CheckInstr(Or)('ECX', 'EBX')
CheckInstr(Or)('EAX', 0x11223344)
CheckInstr(Or)('EAX', mem('[EAX + 1]'))
CheckInstr(Or)(mem('[EAX + EAX]'), 'EDX')
CheckInstr(Shr)('EAX', 8)
CheckInstr(Shr)('EDX', 0x12)
CheckInstr(Shl)('EAX', 8)
CheckInstr(Shl)('EDX', 0x12)
CheckInstr(Not)('EAX')
CheckInstr(Not)(mem('[EAX]'))
CheckInstr(Int3)()
CheckInstr(Int)(0)
CheckInstr(Int)(3)
CheckInstr(Int)(0xff)
CheckInstr(ScasB, expected_result="scasb al, byte ptr es:[edi]")()
CheckInstr(ScasW, expected_result="scasw ax, word ptr es:[edi]")()
CheckInstr(ScasD, expected_result="scasd eax, dword ptr es:[edi]")()
CheckInstr(CmpsB, expected_result="cmpsb byte ptr [esi], byte ptr es:[edi]")()
CheckInstr(CmpsW, expected_result="cmpsw word ptr [esi], word ptr es:[edi]")()
CheckInstr(CmpsD, expected_result="cmpsd dword ptr [esi], dword ptr es:[edi]")()
CheckInstr(x86.Test)('EAX', 'EAX')
CheckInstr(x86.Test, expected_result="test edi, ecx")('ECX', 'EDI')
CheckInstr(x86.Test)(mem('[ECX + 0x100]'), 'ECX')
CheckInstr(x86.Test)('EAX', 0x11223344)
CheckInstr(x86.Test, immediat_accepted=-1)('EAX', 0xffffffff)
CheckInstr(x86.Test)('ECX', 0x42)
assert x86.Test(mem('[ECX + 0x100]'), 'ECX').get_code() == x86.Test('ECX', mem('[ECX + 0x100]')).get_code()
assert Xchg('EAX', 'ECX').get_code() == Xchg('ECX', 'EAX').get_code()
code = MultipleInstr()
code += Nop()
code += Rep + Nop()
code += Ret()
print(repr(code.get_code()))
assert code.get_code() == b"\x90\xf3\x90\xc3"
def test_simple_x64_raw_instruction():
# Test the fake instruction "raw"
# By emetting a multi-char nop manually
CheckInstr(Raw, expected_result="nop word ptr [eax + eax]")("66 0F 1F 84 00 00 00 00 00")
def test_x86_multiple_instr_add_instr_and_str():
res = x86.MultipleInstr()
res += x86.Nop()
res += "ret; ret; label :offset_3; ret"
res += x86.Nop()
res += x86.Label(":offset_5")
assert res.get_code() == b"\x90\xc3\xc3\xc3\x90"
assert res.labels == {":offset_3": 3, ":offset_5": 5}
def test_x86_instr_multiply():
res = x86.MultipleInstr()
res += (x86.Nop() * 5)
res += x86.Ret()
assert res.get_code() == b"\x90\x90\x90\x90\x90\xc3"
import threading
threads_error = []
def test_x86_multithread_target():
try:
assert x86.Mov("ECX", "SS").get_code() == b"\x8c\xd1"
assert x86.Mov("SS", "ECX").get_code() == b"\x8e\xd1"
assert x86.Ret().get_code() == b"\xc3"
res = x86.MultipleInstr()
res += x86.Mov("ECX", "SS")
res += x86.Mov("SS", "ECX")
res += x86.Ret()
assert res.get_code() == b"\x8c\xd1\x8e\xd1\xc3"
except Exception as e:
threads_error.append(e)
raise
return True
def test_x86_multithread():
all_threads = []
for tnb in range(10):
t = threading.Thread(target=test_x86_multithread_target)
all_threads.append(t)
# import pdb; pdb.set_trace()
for t in all_threads:
t.start()
for t in all_threads:
t.join()
assert not threads_error, "syswow call inconsistent with MultiThreading inconsistent"
if capstone is None:
test_assembler = pytest.mark.skip("Capstone not installed")(test_assembler)
if __name__ == "__main__":
test_assembler()
|
|
import os
from front_base.config import ConfigBase
import simple_http_client
import utils
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir))
data_path = os.path.abspath(os.path.join(root_path, os.pardir, os.pardir, 'data'))
module_data_path = os.path.join(data_path, 'gae_proxy')
headers = {"connection": "close"}
fqrouter = simple_http_client.request("GET", "http://127.0.0.1:2515/ping", headers=headers, timeout=0.5)
mobile = fqrouter and "PONG" in fqrouter.text
del headers, fqrouter
class Config(ConfigBase):
def __init__(self, fn):
super(Config, self).__init__(fn)
# globa setting level
# passive < conservative < normal < radical < extreme
self.set_var("setting_level", "normal")
# proxy
self.set_var("listen_ip", "127.0.0.1")
self.set_var("listen_port", 8087)
# auto range
self.set_var("AUTORANGE_THREADS", 10)
self.set_var("AUTORANGE_MAXSIZE", 512 * 1024)
if mobile:
self.set_var("AUTORANGE_MAXBUFFERSIZE", 10 * 1024 * 1024 / 8)
else:
self.set_var("AUTORANGE_MAXBUFFERSIZE", 20 * 1024 * 1024)
self.set_var("JS_MAXSIZE", 0)
# gae
self.set_var("GAE_PASSWORD", "")
self.set_var("GAE_VALIDATE", 0)
# host rules
self.set_var("hosts_direct", [
#b"docs.google.com",
#"play.google.com",
#b"scholar.google.com",
#"scholar.google.com.hk",
#b"appengine.google.com"
])
self.set_var("hosts_direct_endswith", [
#b".gvt1.com",
b".appspot.com"
])
self.set_var("hosts_gae", [
b"accounts.google.com",
b"mail.google.com"
])
self.set_var("hosts_gae_endswith", [
b".googleapis.com"
])
# sites using br
self.set_var("BR_SITES", [
b"webcache.googleusercontent.com",
b"www.google.com",
b"www.google.com.hk",
b"www.google.com.cn",
b"fonts.googleapis.com"
])
self.set_var("BR_SITES_ENDSWITH", [
b".youtube.com",
b".facebook.com",
b".googlevideo.com"
])
# some unsupport request like url length > 2048, will go Direct
self.set_var("google_endswith", [
b".youtube.com",
b".googleapis.com",
b".google.com",
b".googleusercontent.com",
b".ytimg.com",
b".doubleclick.net",
b".google-analytics.com",
b".googlegroups.com",
b".googlesource.com",
b".gstatic.com",
b".appspot.com",
b".gvt1.com",
b".android.com",
b".ggpht.com",
b".googleadservices.com",
b".googlesyndication.com",
b".2mdn.net"
])
# front
self.set_var("front_continue_fail_num", 10)
self.set_var("front_continue_fail_block", 0)
# http_dispatcher
self.set_var("dispather_min_idle_workers", 3)
self.set_var("dispather_work_min_idle_time", 0)
self.set_var("dispather_work_max_score", 1000)
self.set_var("dispather_min_workers", 20)
self.set_var("dispather_max_workers", 50)
self.set_var("dispather_max_idle_workers", 15)
self.set_var("max_task_num", 80)
# http 1 worker
self.set_var("http1_first_ping_wait", 5)
self.set_var("http1_idle_time", 200)
self.set_var("http1_ping_interval", 0)
# http 2 worker
self.set_var("http2_max_concurrent", 20)
self.set_var("http2_target_concurrent", 1)
self.set_var("http2_max_timeout_tasks", 1)
self.set_var("http2_timeout_active", 0)
self.set_var("http2_ping_min_interval", 0)
# connect_manager
self.set_var("https_max_connect_thread", 10)
self.set_var("ssl_first_use_timeout", 5)
self.set_var("connection_pool_min", 1)
self.set_var("https_connection_pool_min", 0)
self.set_var("https_connection_pool_max", 10)
self.set_var("https_new_connect_num", 3)
self.set_var("https_keep_alive", 10)
# check_ip
self.set_var("check_ip_host", "xxnet-1.appspot.com")
self.set_var("check_ip_accept_status", [200, 503])
self.set_var("check_ip_content", b"GoAgent")
# host_manager
self.set_var("GAE_APPIDS", [])
# connect_creator
self.set_var("check_pkp", [
# https://pki.google.com/GIAG2.crt has expired
# https://pki.goog/gsr2/GIAG3.crt
# https://pki.goog/gsr2/GTSGIAG3.crt
b'''\
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAylJL6h7/ziRrqNpyGGjV
Vl0OSFotNQl2Ws+kyByxqf5TifutNP+IW5+75+gAAdw1c3UDrbOxuaR9KyZ5zhVA
Cu9RuJ8yjHxwhlJLFv5qJ2vmNnpiUNjfmonMCSnrTykUiIALjzgegGoYfB29lzt4
fUVJNk9BzaLgdlc8aDF5ZMlu11EeZsOiZCx5wOdlw1aEU1pDbcuaAiDS7xpp0bCd
c6LgKmBlUDHP+7MvvxGIQC61SRAPCm7cl/q/LJ8FOQtYVK8GlujFjgEWvKgaTUHF
k5GiHqGL8v7BiCRJo0dLxRMB3adXEmliK+v+IO9p+zql8H4p7u2WFvexH6DkkCXg
MwIDAQAB
-----END PUBLIC KEY-----
''',
# https://pki.goog/gsr4/GIAG3ECC.crt
b'''\
-----BEGIN PUBLIC KEY-----
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEG4ANKJrwlpAPXThRcA3Z4XbkwQvW
hj5J/kicXpbBQclS4uyuQ5iSOGKcuCRt8ralqREJXuRsnLZo0sIT680+VQ==
-----END PUBLIC KEY-----
''',
# https://pki.goog/gsr2/giag4.crt
b'''\
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvSw7AnhsoyYa5z/crKtt
B52X+R0ld3UdQBU4Yc/4wmF66cpHeEOMSmhdaY5RzYrowZ6kG1xXLrSoVUuudUPR
fg/zjRqv/AAVDJFqc8OnhghzaWZU9zlhtRgY4lx4Z6pDosTuR5imCcKvwqiDztOJ
r4YKHuk23p3cxu1zDnUsuN+cm4TkVtI1SsuSc9t1uErBvFIcW6v3dLcjrPkmwE61
udZQlBDHJzCFwrhXLtXLlmuSA5/9pOuWJ+U3rSgS7ICSfa83vkBe00ymjIZT6ogD
XWuFsu4edue27nG8g9gO1YozIUCV7+zExG0G5kxTovis+FJpy9hIIxSFrRIKM4DX
aQIDAQAB
-----END PUBLIC KEY-----
''',
# https://pki.goog/gsr4/giag4ecc.crt
b'''\
-----BEGIN PUBLIC KEY-----
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEWgDxDsTP7Od9rB8TPUltMacYCHYI
NthcDjlPu3wP0Csmy6Drit3ghqaTqFecqcgks5RwcKQkT9rbY3e8lHuuAw==
-----END PUBLIC KEY-----
''',
# https://pki.goog/gsr2/GTS1O1.crt
b'''\
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0BjPRdSLzdOc5EDvfrTd
aSEbyc88jkx1uQ8xGYQ9njwp71ANEJNvBYCAnyqgvRJLAuE9n1gWJP4wnwt0d1WT
HUv3TeGSghD2UawMw7IilA80a5gQSecLnYM53SDGHC3v0RhhZecjgyCoIxL/0iR/
1C/nRGpbTddQZrCvnkJjBfvgHMRjYa+fajP/Ype9SNnTfBRn3HXcLmno+G14adC3
EAW48THCOyT9GjN0+CPg7GsZihbG482kzQvbs6RZYDiIO60ducaMp1Mb/LzZpKu8
3Txh15MVmO6BvY/iZEcgQAZO16yX6LnAWRKhSSUj5O1wNCyltGN8+aM9g9HNbSSs
BwIDAQAB
-----END PUBLIC KEY-----
''',
# https://pki.goog/gsr2/GTS1D2.crt
b'''\
-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAstl74eHXPxyRcv/5EM2H
FXl0tz5Hi7JhVf0MNsZ+d0I6svpSWwtxgdZN1ekrJE0jXosrcl8hVbUp70TL64JS
qz4npJJJQUreqN0x4DzfbXpNLdZtCbAO42Hysv6QbFp7EGRJtAs8CPLqeQxsphqJ
alYyoCmiMIKPgVEM86K52XW5Ip4nFLpKLyxjWIfxXRDmX5G7uVvMR+IedbaMj8x1
XVcF54LGhA50cirLO1X1bnDrZmnDJLs4kzWbaGEvm9aupndyfHFIWDMQr+mAgh21
B0Ab9j3soq1HnbSUKTSzjC/NJQNYNcAlpFVf4bMHVj3I0GO4IPuMHUMs+Pmp1exv
lwIDAQAB
-----END PUBLIC KEY-----
'''
])
#self.set_var("check_commonname", "Google")
self.set_var("min_intermediate_CA", 2)
self.set_var("support_http2", 1)
# ip_manager
self.set_var("max_scan_ip_thread_num", 10)
self.set_var("max_good_ip_num", 100)
self.set_var("target_handshake_time", 600)
# ip source
self.set_var("use_ipv6", "auto") #force_ipv4/force_ipv6/auto
self.set_var("ipv6_scan_ratio", 90) # 0 - 100
# Check local network
self.set_var("check_local_network_rules", "normal") # normal, force_ok, force_fail
self.load()
def load(self):
super(Config, self).load()
need_save = 0
if not os.path.isfile(self.config_path):
for fn in [
os.path.join(module_data_path, "config.ini"),
os.path.join(module_data_path, "manual.ini")
]:
need_save += self.load_old_config(fn)
self.HOSTS_GAE = tuple(utils.to_bytes(self.hosts_gae))
self.HOSTS_DIRECT = tuple(utils.to_bytes(self.hosts_direct))
self.HOSTS_GAE_ENDSWITH = tuple(utils.to_bytes(self.hosts_gae_endswith))
self.HOSTS_DIRECT_ENDSWITH = tuple(utils.to_bytes(self.hosts_direct_endswith))
self.GOOGLE_ENDSWITH = tuple(utils.to_bytes(self.google_endswith))
self.br_sites = tuple(utils.to_bytes(self.BR_SITES))
self.br_endswith = tuple(utils.to_bytes(self.BR_SITES_ENDSWITH))
# there are only hundreds of GAE IPs, we don't need a large threads num
self.max_scan_ip_thread_num = min(self.max_scan_ip_thread_num, 200)
if need_save:
self.save()
def load_old_config(self, fn):
if not os.path.isfile(fn):
return 0
need_save = 0
with open(fn, "r") as fd:
for line in fd.readlines():
if line.startswith("appid"):
try:
appid_str = line.split("=")[1]
appids = []
for appid in appid_str.split("|"):
appid = appid.strip()
appids.append(appid)
self.GAE_APPIDS = appids
need_save += 1
except Exception as e:
pass
elif line.startswith("password"):
password = line.split("=")[1].strip()
self.GAE_PASSWORD = password
need_save += 1
return need_save
def set_level(self, level=None):
if level is None:
level = self.setting_level
elif level in ["passive", "conservative", "normal", "radical", "extreme"]:
self.setting_level = level
if level == "passive":
self.dispather_min_idle_workers = 0
self.dispather_work_min_idle_time = 0
self.dispather_work_max_score = 1000
self.dispather_min_workers = 5
self.dispather_max_workers = 30
self.dispather_max_idle_workers = 5
self.max_task_num = 50
self.https_max_connect_thread = 10
self.https_keep_alive = 5
self.https_connection_pool_min = 0
self.https_connection_pool_max = 10
self.max_scan_ip_thread_num = 10
self.max_good_ip_num = 60
self.target_handshake_time = 600
elif level == "conservative":
self.dispather_min_idle_workers = 1
self.dispather_work_min_idle_time = 0
self.dispather_work_max_score = 1000
self.dispather_min_workers = 10
self.dispather_max_workers = 30
self.dispather_max_idle_workers = 10
self.max_task_num = 50
self.https_max_connect_thread = 10
self.https_keep_alive = 15
self.https_connection_pool_min = 0
self.https_connection_pool_max = 10
self.max_scan_ip_thread_num = 10
self.max_good_ip_num = 100
self.target_handshake_time = 600
elif level == "normal":
self.dispather_min_idle_workers = 3
self.dispather_work_min_idle_time = 0
self.dispather_work_max_score = 1000
self.dispather_min_workers = 20
self.dispather_max_workers = 50
self.dispather_max_idle_workers = 15
self.max_task_num = 80
self.https_max_connect_thread = 10
self.https_keep_alive = 15
self.https_connection_pool_min = 0
self.https_connection_pool_max = 10
self.max_scan_ip_thread_num = 10
self.max_good_ip_num = 100
self.target_handshake_time = 600
elif level == "radical":
self.dispather_min_idle_workers = 3
self.dispather_work_min_idle_time = 1
self.dispather_work_max_score = 1000
self.dispather_min_workers = 30
self.dispather_max_workers = 70
self.dispather_max_idle_workers = 25
self.max_task_num = 100
self.https_max_connect_thread = 15
self.https_keep_alive = 15
self.https_connection_pool_min = 1
self.https_connection_pool_max = 15
self.max_scan_ip_thread_num = 20
self.max_good_ip_num = 100
self.target_handshake_time = 1200
elif level == "extreme":
self.dispather_min_idle_workers = 5
self.dispather_work_min_idle_time = 5
self.dispather_work_max_score = 1000
self.dispather_min_workers = 45
self.dispather_max_workers = 100
self.dispather_max_idle_workers = 40
self.max_task_num = 130
self.https_max_connect_thread = 20
self.https_keep_alive = 15
self.https_connection_pool_min = 2
self.https_connection_pool_max = 20
self.max_scan_ip_thread_num = 30
self.max_good_ip_num = 200
self.target_handshake_time = 1500
self.save()
self.load()
class DirectConfig(object):
def __init__(self, config):
self._config = config
self.set_default()
def __getattr__(self, attr):
return getattr(self._config, attr)
def dummy(*args, **kwargs):
pass
set_var = save = load = dummy
def set_level(self, level=None):
if level is None:
level = self.setting_level
if level == "passive":
self.dispather_min_idle_workers = 0
self.dispather_work_min_idle_time = 0
self.dispather_work_max_score = 1000
self.dispather_min_workers = 0
self.dispather_max_workers = 8
self.dispather_max_idle_workers = 0
self.max_task_num = 16
self.https_max_connect_thread = 4
self.https_connection_pool_min = 0
self.https_connection_pool_max = 6
elif level == "conservative":
self.dispather_min_idle_workers = 1
self.dispather_work_min_idle_time = 0
self.dispather_work_max_score = 1000
self.dispather_min_workers = 1
self.dispather_max_workers = 8
self.dispather_max_idle_workers = 2
self.max_task_num = 16
self.https_max_connect_thread = 5
self.https_connection_pool_min = 0
self.https_connection_pool_max = 8
elif level == "normal":
self.dispather_min_idle_workers = 2
self.dispather_work_min_idle_time = 0
self.dispather_work_max_score = 1000
self.dispather_min_workers = 3
self.dispather_max_workers = 8
self.dispather_max_idle_workers = 3
self.max_task_num = 16
self.https_max_connect_thread = 6
self.https_connection_pool_min = 0
self.https_connection_pool_max = 10
elif level == "radical":
self.dispather_min_idle_workers = 3
self.dispather_work_min_idle_time = 1
self.dispather_work_max_score = 1000
self.dispather_min_workers = 5
self.dispather_max_workers = 10
self.dispather_max_idle_workers = 5
self.max_task_num = 20
self.https_max_connect_thread = 6
self.https_connection_pool_min = 1
self.https_connection_pool_max = 10
elif level == "extreme":
self.dispather_min_idle_workers = 5
self.dispather_work_min_idle_time = 5
self.dispather_work_max_score = 1000
self.dispather_min_workers = 5
self.dispather_max_workers = 15
self.dispather_max_idle_workers = 5
self.max_task_num = 30
self.https_max_connect_thread = 10
self.https_connection_pool_min = 1
self.https_connection_pool_max = 10
set_default = set_level
config_path = os.path.join(module_data_path, "config.json")
config = Config(config_path)
direct_config = DirectConfig(config)
|
|
# Python 3
from __future__ import print_function
import csv
from pathlib import Path
import sys
import functools
import multiprocessing
import itertools
import axelrod as axl
import pandas as pd
from axelrod import ApproximateMoranProcess, Pdf
from generate_cache import read_csv
# For tests
import collections
import tempfile
import unittest
def output_players(players, outfilename="players.csv"):
"""Cache players to disk for later retrieval."""
rows = [(i, str(player), player.classifier["stochastic"]) for (i, player) in enumerate(players)]
path = Path("../data") / outfilename
with path.open('w') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(rows)
def build_population(players, i, j, weights):
"""Return the population of strategies according to a given weights"""
sub_players = players[i], players[j]
population = []
for player, weight in zip(sub_players, weights):
for _ in range(weight):
population.append(player.clone())
return population
def obtain_current_count(filename):
"""Count the number of repetitions for a given strategy pair"""
df = pd.read_csv(filename, header=None, names=["Strategy 1 index",
"Strategy 2 index",
"Winner index",
"Count"])
counts = {pair: f["Count"].sum()
for pair, f in df.groupby(["Strategy 1 index",
"Strategy 2 index"])}
return counts
def write_winner(outfilename, names_inv,
N, i, j, repetitions, n=1):
"""
Write the winner of a Moran process to file
"""
initial_population = build_population(players, i, j, [n, N-n])
s1 = str(players[i].clone())
s2 = str(players[j].clone())
# Pull out just the interaction we need
outcomes = dict()
for pair in [(s1, s1), (s1, s2), (s2, s1), (s2, s2)]:
outcomes[pair] = match_outcomes[pair]
mp = ApproximateMoranProcess(initial_population, cached_outcomes=outcomes)
data = {i: 0, j: 0}
for seed in range(repetitions):
axl.seed(seed)
mp.reset()
mp.play()
winner_name = mp.winning_strategy_name
data[names_inv[winner_name]] += 1
path = Path("../data")
path = path / outfilename
with path.open('a') as f:
outputfile = csv.writer(f)
for winner, count in data.items():
outputfile.writerow([i, j, winner, count])
def run_simulations(N=2, repetitions=1000, outfilename=None,
processes=None, count=False, n=1):
"""This function conducts many moran processes to empirically estimate
fixation probabilities. For each pair of strategies, the population consists
of n player of the first type and N-n players of the second type."""
if not outfilename:
outfilename = "sims_{N}.csv".format(N=N)
# Obtain current count of obtained values
if count is True:
try:
counts = obtain_current_count("../data/" + outfilename)
except OSError:
# If file does not exist then don't count
count = False
# Cache names to reverse winners to ids later
names_inv = dict(zip([str(p) for p in players], range(len(players))))
player_indices = range(len(players))
if processes is None:
for i in player_indices:
for j in player_indices:
if i != j:
if count is True:
reps = repetitions - counts.get((i, j), 0)
else:
reps = repetitions
if reps > 0:
write_winner(outfilename, names_inv, N, i, j, reps)
else:
if processes == 0:
processes = multiprocessing.cpu_count()
func = functools.partial(write_winner, outfilename,
names_inv, N)
p = multiprocessing.Pool(processes)
player_index_pairs = [(i, j)
for i, j in itertools.product(player_indices,
player_indices)
if i != j]
if count is True:
reps = [repetitions - counts.get(pair, 0)
for pair in player_index_pairs]
else:
reps = [repetitions for pair in player_index_pairs]
args = (pair + (reps[i], n) for i, pair in enumerate(player_index_pairs)
if reps[i] > 0)
p.starmap(func, args)
def main():
N = int(sys.argv[1]) # Population size
try:
n = int(sys.argv[2]) # Initial population (n, N - n)
except IndexError:
n = 1
try:
outfilename = sys.argv[4]
except IndexError:
outfilename = None
repetitions = 1000
# Make sure the data folder exists
path = Path("../data")
path.mkdir(exist_ok=True)
output_players(players)
run_simulations(N=N, repetitions=repetitions, processes=0, count=True,
outfilename=outfilename, n=n)
if __name__ == "__main__":
# match_outcomes and players are global
# Run with `python moran.py <N> <n> <outcome_file> <filename>`
try:
match_outcomes_file = sys.argv[3]
except IndexError:
match_outcomes_file = "../data/outcomes.csv"
match_outcomes = read_csv(match_outcomes_file)
for k, v in match_outcomes.items():
match_outcomes[k] = Pdf(v)
# players are global
from players import selected_players
players = selected_players()
main()
#########
# Tests #
#########
class Test_output_players(unittest.TestCase):
"""Test the output players function"""
def test_output(self):
outfile = tempfile.NamedTemporaryFile('w')
players = [s() for s in axl.demo_strategies]
output_players(players, outfile.name)
with open(outfile.name, 'r') as outfile:
test_output = [row for row in csv.reader(outfile)]
expected_output = [['0', 'Cooperator', 'False'],
['1', 'Defector', 'False'],
['2', 'Tit For Tat', 'False'],
['3', 'Grudger', 'False'],
['4', 'Random: 0.5', 'True']]
self.assertEqual(test_output, expected_output)
class Test_build_population(unittest.TestCase):
"""Test the output players function"""
def test_build_pop(self):
players = [axl.Cooperator(), axl.Defector()]
for weights in [(1, 1), (1, 5), (5, 3), (0, 0), (4, 12)]:
population = build_population(players, 0, 1, weights)
str_population = [str(p) for p in population]
self.assertEqual(str_population, ['Cooperator'] * weights[0] +
['Defector'] * weights[1])
class Test_obtain_current_count(unittest.TestCase):
"""Test the obtain current count function"""
def test_obtain_current_count(self):
data = [(0, 1, 0, 5), (0, 1, 1, 1), (0, 2, 0, 1), (0, 2, 1, 3)]
df = pd.DataFrame(data)
temp_file = tempfile.NamedTemporaryFile("w")
df.to_csv(temp_file.name, header=False)
current_count = obtain_current_count(temp_file.name)
for pair, count in [((0, 1), 6), ((0, 2), 4)]:
self.assertEqual(current_count[pair], count)
temp_file.close()
class Test_write_winner(unittest.TestCase):
"""Test that the output of a given simulation is as expected"""
global players
players = [axl.Cooperator(), axl.Defector()]
global match_outcomes
players = [axl.Cooperator(), axl.Defector()]
match_outcomes = {}
counter = collections.Counter([(5, 0)])
pdf = Pdf(counter)
match_outcomes[('Defector', 'Cooperator')] = pdf
counter = collections.Counter([(0, 5)])
pdf = Pdf(counter)
match_outcomes[('Cooperator', 'Defector')] = pdf
counter = collections.Counter([(3, 3)])
pdf = Pdf(counter)
match_outcomes[('Cooperator', 'Cooperator')] = pdf
counter = collections.Counter([(1, 1)])
pdf = Pdf(counter)
match_outcomes[('Defector', 'Defector')] = pdf
temp_file = tempfile.NamedTemporaryFile()
names_inv = {"Cooperator": 0, "Defector": 1}
def test_write_winner(self):
write_winner(self.temp_file.name, self.names_inv, 2, 0, 1, 10)
df = pd.read_csv(self.temp_file.name, header=None)
self.assertEqual(list(df.ix[:, 3]), [0, 10])
self.temp_file.close()
|
|
# Copyright 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova import utils
from nova.virt import hardware
@base.NovaObjectRegistry.register
class ImageMeta(base.NovaObject):
VERSION = '1.0'
# These are driven by what the image client API returns
# to Nova from Glance. This is defined in the glance
# code glance/api/v2/images.py get_base_properties()
# method. A few things are currently left out:
# self, file, schema - Nova does not appear to ever use
# these field; locations - modelling the arbitrary
# data in the 'metadata' subfield is non-trivial as
# there's no clear spec
#
fields = {
'id': fields.UUIDField(),
'name': fields.StringField(),
'status': fields.StringField(),
'visibility': fields.StringField(),
'protected': fields.FlexibleBooleanField(),
'checksum': fields.StringField(),
'owner': fields.StringField(),
'size': fields.IntegerField(),
'virtual_size': fields.IntegerField(),
'container_format': fields.StringField(),
'disk_format': fields.StringField(),
'created_at': fields.DateTimeField(nullable=True),
'updated_at': fields.DateTimeField(nullable=True),
'tags': fields.ListOfStringsField(),
'direct_url': fields.StringField(),
'min_ram': fields.IntegerField(),
'min_disk': fields.IntegerField(),
'properties': fields.ObjectField('ImageMetaProps'),
}
obj_relationships = {
'properties': [('1.0', '1.0')],
}
@classmethod
def from_dict(cls, image_meta):
"""Create instance from image metadata dict
:param image_meta: image metadata dictionary
Creates a new object instance, initializing from the
properties associated with the image metadata instance
:returns: an ImageMeta instance
"""
if image_meta is None:
image_meta = {}
# We must turn 'properties' key dict into an object
# so copy image_meta to avoid changing original
image_meta = copy.deepcopy(image_meta)
image_meta["properties"] = \
objects.ImageMetaProps.from_dict(
image_meta.get("properties", {}))
return cls(**image_meta)
@classmethod
def from_instance(cls, instance):
"""Create instance from instance system metadata
:param instance: Instance object
Creates a new object instance, initializing from the
system metadata "image_" properties associated with
instance
:returns: an ImageMeta instance
"""
sysmeta = utils.instance_sys_meta(instance)
image_meta = utils.get_image_from_system_metadata(sysmeta)
return cls.from_dict(image_meta)
@base.NovaObjectRegistry.register
class ImageMetaProps(base.NovaObject):
VERSION = ImageMeta.VERSION
# 'hw_' - settings affecting the guest virtual machine hardware
# 'img_' - settings affecting the use of images by the compute node
# 'os_' - settings affecting the guest operating system setup
fields = {
# name of guest hardware architecture eg i686, x86_64, ppc64
'hw_architecture': fields.ArchitectureField(),
# used to decide to expand root disk partition and fs to full size of
# root disk
'hw_auto_disk_config': fields.StringField(),
# whether to display BIOS boot device menu
'hw_boot_menu': fields.FlexibleBooleanField(),
# name of the CDROM bus to use eg virtio, scsi, ide
'hw_cdrom_bus': fields.DiskBusField(),
# preferred number of CPU cores per socket
'hw_cpu_cores': fields.IntegerField(),
# preferred number of CPU sockets
'hw_cpu_sockets': fields.IntegerField(),
# maximum number of CPU cores per socket
'hw_cpu_max_cores': fields.IntegerField(),
# maximum number of CPU sockets
'hw_cpu_max_sockets': fields.IntegerField(),
# maximum number of CPU threads per core
'hw_cpu_max_threads': fields.IntegerField(),
# CPU thread allocation policy
'hw_cpu_policy': fields.CPUAllocationPolicyField(),
# preferred number of CPU threads per core
'hw_cpu_threads': fields.IntegerField(),
# guest ABI version for guest xentools either 1 or 2 (or 3 - depends on
# Citrix PV tools version installed in image)
'hw_device_id': fields.IntegerField(),
# name of the hard disk bus to use eg virtio, scsi, ide
'hw_disk_bus': fields.DiskBusField(),
# allocation mode eg 'preallocated'
'hw_disk_type': fields.StringField(),
# name of the floppy disk bus to use eg fd, scsi, ide
'hw_floppy_bus': fields.DiskBusField(),
# boolean - used to trigger code to inject networking when booting a CD
# image with a network boot image
'hw_ipxe_boot': fields.FlexibleBooleanField(),
# There are sooooooooooo many possible machine types in
# QEMU - several new ones with each new release - that it
# is not practical to enumerate them all. So we use a free
# form string
'hw_machine_type': fields.StringField(),
# One of the magic strings 'small', 'any', 'large'
# or an explicit page size in KB (eg 4, 2048, ...)
'hw_mem_page_size': fields.StringField(),
# Number of guest NUMA nodes
'hw_numa_nodes': fields.IntegerField(),
# Each list entry corresponds to a guest NUMA node and the
# set members indicate CPUs for that node
'hw_numa_cpus': fields.ListOfSetsOfIntegersField(),
# Each list entry corresponds to a guest NUMA node and the
# list value indicates the memory size of that node.
'hw_numa_mem': fields.ListOfIntegersField(),
# boolean 'yes' or 'no' to enable QEMU guest agent
'hw_qemu_guest_agent': fields.FlexibleBooleanField(),
# name of the RNG device type eg virtio
'hw_rng_model': fields.RNGModelField(),
# number of serial ports to create
'hw_serial_port_count': fields.IntegerField(),
# name of the SCSI bus controller eg 'virtio-scsi', 'lsilogic', etc
'hw_scsi_model': fields.SCSIModelField(),
# name of the video adapter model to use, eg cirrus, vga, xen, qxl
'hw_video_model': fields.VideoModelField(),
# MB of video RAM to provide eg 64
'hw_video_ram': fields.IntegerField(),
# name of a NIC device model eg virtio, e1000, rtl8139
'hw_vif_model': fields.VIFModelField(),
# "xen" vs "hvm"
'hw_vm_mode': fields.VMModeField(),
# action to take when watchdog device fires eg reset, poweroff, pause,
# none
'hw_watchdog_action': fields.WatchdogActionField(),
# if true download using bittorrent
'img_bittorrent': fields.FlexibleBooleanField(),
# Which data format the 'img_block_device_mapping' field is
# using to represent the block device mapping
'img_bdm_v2': fields.FlexibleBooleanField(),
# Block device mapping - the may can be in one or two completely
# different formats. The 'img_bdm_v2' field determines whether
# it is in legacy format, or the new current format. Ideally
# we would have a formal data type for this field instead of a
# dict, but with 2 different formats to represent this is hard.
# See nova/block_device.py from_legacy_mapping() for the complex
# conversion code. So for now leave it as a dict and continue
# to use existing code that is able to convert dict into the
# desired internal BDM formats
'img_block_device_mapping':
fields.ListOfDictOfNullableStringsField(),
# boolean - if True, and image cache set to "some" decides if image
# should be cached on host when server is booted on that host
'img_cache_in_nova': fields.FlexibleBooleanField(),
# Compression level for images. (1-9)
'img_compression_level': fields.IntegerField(),
# boolean flag to set space-saving or performance behavior on the
# Datastore
'img_linked_clone': fields.FlexibleBooleanField(),
# Image mappings - related to Block device mapping data - mapping
# of virtual image names to device names. This could be represented
# as a formatl data type, but is left as dict for same reason as
# img_block_device_mapping field. It would arguably make sense for
# the two to be combined into a single field and data type in the
# future.
'img_mappings': fields.ListOfDictOfNullableStringsField(),
# image project id (set on upload)
'img_owner_id': fields.StringField(),
# root device name, used in snapshotting eg /dev/<blah>
'img_root_device_name': fields.StringField(),
# boolean - if false don't talk to nova agent
'img_use_agent': fields.FlexibleBooleanField(),
# integer value 1
'img_version': fields.IntegerField(),
# string of boot time command line arguments for the guest kernel
'os_command_line': fields.StringField(),
# the name of the specific guest operating system distro. This
# is not done as an Enum since the list of operating systems is
# growing incredibly fast, and valid values can be arbitrarily
# user defined. Nova has no real need for strict validation so
# leave it freeform
'os_distro': fields.StringField(),
# boolean - if using agent don't inject files, assume someone else is
# doing that (cloud-init)
'os_skip_agent_inject_files_at_boot': fields.FlexibleBooleanField(),
# boolean - if using agent don't try inject ssh key, assume someone
# else is doing that (cloud-init)
'os_skip_agent_inject_ssh': fields.FlexibleBooleanField(),
# The guest operating system family such as 'linux', 'windows' - this
# is a fairly generic type. For a detailed type consider os_distro
# instead
'os_type': fields.OSTypeField(),
}
# The keys are the legacy property names and
# the values are the current preferred names
_legacy_property_map = {
'architecture': 'hw_architecture',
'owner_id': 'img_owner_id',
'vmware_adaptertype': 'hw_scsi_model',
'vmware_disktype': 'hw_disk_type',
'vmware_image_version': 'img_version',
'vmware_ostype': 'os_distro',
'auto_disk_config': 'hw_auto_disk_config',
'ipxe_boot': 'hw_ipxe_boot',
'xenapi_device_id': 'hw_device_id',
'xenapi_image_compression_level': 'img_compression_level',
'vmware_linked_clone': 'img_linked_clone',
'xenapi_use_agent': 'img_use_agent',
'xenapi_skip_agent_inject_ssh': 'os_skip_agent_inject_ssh',
'xenapi_skip_agent_inject_files_at_boot':
'os_skip_agent_inject_files_at_boot',
'cache_in_nova': 'img_cache_in_nova',
'vm_mode': 'hw_vm_mode',
'bittorrent': 'img_bittorrent',
'mappings': 'img_mappings',
'block_device_mapping': 'img_block_device_mapping',
'bdm_v2': 'img_bdm_v2',
'root_device_name': 'img_root_device_name',
}
# TODO(berrange): Need to run this from a data migration
# at some point so we can eventually kill off the compat
def _set_attr_from_legacy_names(self, image_props):
for legacy_key in self._legacy_property_map:
new_key = self._legacy_property_map[legacy_key]
if legacy_key not in image_props:
continue
setattr(self, new_key, image_props[legacy_key])
def _set_numa_mem(self, image_props):
nodes = int(image_props.get("hw_numa_nodes", "1"))
hw_numa_mem = [None for i in range(nodes)]
hw_numa_mem_set = False
for cellid in range(nodes):
memprop = "hw_numa_mem.%d" % cellid
if memprop in image_props:
hw_numa_mem[cellid] = int(image_props[memprop])
hw_numa_mem_set = True
del image_props[memprop]
if hw_numa_mem_set:
self.hw_numa_mem = hw_numa_mem
def _set_numa_cpus(self, image_props):
nodes = int(image_props.get("hw_numa_nodes", "1"))
hw_numa_cpus = [None for i in range(nodes)]
hw_numa_cpus_set = False
for cellid in range(nodes):
cpuprop = "hw_numa_cpus.%d" % cellid
if cpuprop in image_props:
hw_numa_cpus[cellid] = \
hardware.parse_cpu_spec(image_props[cpuprop])
hw_numa_cpus_set = True
del image_props[cpuprop]
if hw_numa_cpus_set:
self.hw_numa_cpus = hw_numa_cpus
def _set_attr_from_current_names(self, image_props):
for key in self.fields:
# The two NUMA fields need special handling to
# un-stringify them correctly
if key == "hw_numa_mem":
self._set_numa_mem(image_props)
elif key == "hw_numa_cpus":
self._set_numa_cpus(image_props)
else:
if key not in image_props:
continue
setattr(self, key, image_props[key])
@classmethod
def from_dict(cls, image_props):
"""Create instance from image properties dict
:param image_props: dictionary of image metdata properties
Creates a new object instance, initializing from a
dictionary of image metadata properties
:returns: an ImageMetaProps instance
"""
obj = cls()
# We look to see if the dict has entries for any
# of the legacy property names first. Then we use
# the current property names. That way if both the
# current and legacy names are set, the value
# associated with the current name takes priority
obj._set_attr_from_legacy_names(image_props)
obj._set_attr_from_current_names(image_props)
return obj
def get(self, name, defvalue=None):
"""Get the value of an attribute
:param name: the attribute to request
:param defvalue: the default value if not set
This returns the value of an attribute if it is currently
set, otherwise it will return None.
This differs from accessing props.attrname, because that
will raise an exception if the attribute has no value set.
So instead of
if image_meta.properties.obj_attr_is_set("some_attr"):
val = image_meta.properties.some_attr
else
val = None
Callers can rely on unconditional access
val = image_meta.properties.get("some_attr")
:returns: the attribute value or None
"""
if not self.obj_attr_is_set(name):
return defvalue
return getattr(self, name)
|
|
"""
Code to manage the creation and SQL rendering of 'where' constraints.
"""
from django.core.exceptions import EmptyResultSet
from django.utils import tree
from django.utils.functional import cached_property
# Connection types
AND = 'AND'
OR = 'OR'
class WhereNode(tree.Node):
"""
Used to represent the SQL where-clause.
The class is tied to the Query class that created it (in order to create
the correct SQL).
A child is usually an expression producing boolean values. Most likely the
expression is a Lookup instance.
However, a child could also be any class with as_sql() and either
relabeled_clone() method or relabel_aliases() and clone() methods and
contains_aggregate attribute.
"""
default = AND
def split_having(self, negated=False):
"""
Returns two possibly None nodes: one for those parts of self that
should be included in the WHERE clause and one for those parts of
self that must be included in the HAVING clause.
"""
if not self.contains_aggregate:
return self, None
in_negated = negated ^ self.negated
# If the effective connector is OR and this node contains an aggregate,
# then we need to push the whole branch to HAVING clause.
may_need_split = (
(in_negated and self.connector == AND) or
(not in_negated and self.connector == OR))
if may_need_split and self.contains_aggregate:
return None, self
where_parts = []
having_parts = []
for c in self.children:
if hasattr(c, 'split_having'):
where_part, having_part = c.split_having(in_negated)
if where_part is not None:
where_parts.append(where_part)
if having_part is not None:
having_parts.append(having_part)
elif c.contains_aggregate:
having_parts.append(c)
else:
where_parts.append(c)
having_node = self.__class__(having_parts, self.connector, self.negated) if having_parts else None
where_node = self.__class__(where_parts, self.connector, self.negated) if where_parts else None
return where_node, having_node
def as_sql(self, compiler, connection):
"""
Returns the SQL version of the where clause and the value to be
substituted in. Returns '', [] if this node matches everything,
None, [] if this node is empty, and raises EmptyResultSet if this
node can't match anything.
"""
result = []
result_params = []
if self.connector == AND:
full_needed, empty_needed = len(self.children), 1
else:
full_needed, empty_needed = 1, len(self.children)
for child in self.children:
try:
sql, params = compiler.compile(child)
except EmptyResultSet:
empty_needed -= 1
else:
if sql:
result.append(sql)
result_params.extend(params)
else:
full_needed -= 1
# Check if this node matches nothing or everything.
# First check the amount of full nodes and empty nodes
# to make this node empty/full.
# Now, check if this node is full/empty using the
# counts.
if empty_needed == 0:
if self.negated:
return '', []
else:
raise EmptyResultSet
if full_needed == 0:
if self.negated:
raise EmptyResultSet
else:
return '', []
conn = ' %s ' % self.connector
sql_string = conn.join(result)
if sql_string:
if self.negated:
# Some backends (Oracle at least) need parentheses
# around the inner SQL in the negated case, even if the
# inner SQL contains just a single expression.
sql_string = 'NOT (%s)' % sql_string
elif len(result) > 1:
sql_string = '(%s)' % sql_string
return sql_string, result_params
def get_group_by_cols(self):
cols = []
for child in self.children:
cols.extend(child.get_group_by_cols())
return cols
def get_source_expressions(self):
return self.children[:]
def set_source_expressions(self, children):
assert len(children) == len(self.children)
self.children = children
def relabel_aliases(self, change_map):
"""
Relabels the alias values of any children. 'change_map' is a dictionary
mapping old (current) alias values to the new values.
"""
for pos, child in enumerate(self.children):
if hasattr(child, 'relabel_aliases'):
# For example another WhereNode
child.relabel_aliases(change_map)
elif hasattr(child, 'relabeled_clone'):
self.children[pos] = child.relabeled_clone(change_map)
def clone(self):
"""
Creates a clone of the tree. Must only be called on root nodes (nodes
with empty subtree_parents). Childs must be either (Contraint, lookup,
value) tuples, or objects supporting .clone().
"""
clone = self.__class__._new_instance(
children=[], connector=self.connector, negated=self.negated)
for child in self.children:
if hasattr(child, 'clone'):
clone.children.append(child.clone())
else:
clone.children.append(child)
return clone
def relabeled_clone(self, change_map):
clone = self.clone()
clone.relabel_aliases(change_map)
return clone
@classmethod
def _contains_aggregate(cls, obj):
if isinstance(obj, tree.Node):
return any(cls._contains_aggregate(c) for c in obj.children)
return obj.contains_aggregate
@cached_property
def contains_aggregate(self):
return self._contains_aggregate(self)
@property
def is_summary(self):
return any(child.is_summary for child in self.children)
class NothingNode(object):
"""
A node that matches nothing.
"""
contains_aggregate = False
def as_sql(self, compiler=None, connection=None):
raise EmptyResultSet
class ExtraWhere(object):
# The contents are a black box - assume no aggregates are used.
contains_aggregate = False
def __init__(self, sqls, params):
self.sqls = sqls
self.params = params
def as_sql(self, compiler=None, connection=None):
sqls = ["(%s)" % sql for sql in self.sqls]
return " AND ".join(sqls), list(self.params or ())
class SubqueryConstraint(object):
# Even if aggregates would be used in a subquery, the outer query isn't
# interested about those.
contains_aggregate = False
def __init__(self, alias, columns, targets, query_object):
self.alias = alias
self.columns = columns
self.targets = targets
self.query_object = query_object
def as_sql(self, compiler, connection):
query = self.query_object
query.set_values(self.targets)
query_compiler = query.get_compiler(connection=connection)
return query_compiler.as_subquery_condition(self.alias, self.columns, compiler)
|
|
#!/usr/bin/env python
# coding=utf-8
"""
color multi universe pattern.
generates a test pattern:
history:
see git commits
todo:
~ all fine :-)
"""
import sys
import array
# import colorsys
import pattern
##########################################
# globals
##########################################
# functions
##########################################
# classes
class ColorsMultiuninverse(pattern.Pattern):
"""ColorsMultiuninverse Pattern Class."""
def __init__(self, config, config_global):
"""Init pattern."""
self.config_defaults = {
'update_interval': 5000,
'colors': {},
}
# python3 syntax
# super().__init__()
# python2 syntax
# super(Pattern, self).__init__()
# explicit call
pattern.Pattern.__init__(self, config, config_global)
# inits for this pattern
self.strobe_state = False
self.colors = self.config['colors']
self.colors_rgb_high = {}
self.colors_rgb_low = {}
def _update_colors(self):
# print("_update_colors")
start_universe = self.config_global['universe']['output']
universe_list = range(
start_universe,
start_universe + self.config_global['universe']['count']
)
# print("universe_list:{}".format(universe_list))
hue_step = 1.0 / self.config_global['universe']['count']
# print("hue_step:{}".format(hue_step))
for universe in universe_list:
# print("universe:{}".format(universe))
hue_section = (universe - start_universe)
# hue = hue_step * hue_section
hue = (hue_step * hue_section) + (0.5 * (hue_section % 2))
if hue > 1:
hue = hue - 1
# hue = random.random(0, 1)
saturation = 1
value_high = pattern.map_16bit_to_01(self.values['high'])
value_low = pattern.map_16bit_to_01(self.values['low'])
# self.colors[universe] = {
# 'hue': hue,
# 'saturation': saturation,
# 'value': value,
# }
# print("hue:{}".format(hue))
# print(
# "hue:{}, "
# "saturation:{}, "
# "value_high:{}".format(
# hue,
# saturation,
# value_high
# )
# )
self.colors_rgb_high[universe] = self._hsv_01_to_rgb_16bit(
hue, saturation, value_high
)
self.colors_rgb_low[universe] = self._hsv_01_to_rgb_16bit(
hue, saturation, value_low
)
# debug output
# print(
# "resulting arrays:\n"
# " colors_rgb_high:{}\n"
# " colors_rgb_low:{}".format(
# self.colors_rgb_high,
# self.colors_rgb_low
# )
# )
def _calculate_step(self, universe):
"""Calculate single step."""
# pattern.Pattern._calculate_step(self)
# available attributes:
# global things (readonly)
# self.channel_count
# self.pixel_count
# self.repeat_count
# self.repeat_snake
# self.color_channels
# self.update_interval
# self.mode_16bit
# self.values['off']
# self.values['low']
# self.values['high']
# self.config_global[]
#
# self.colors_rgb_high
# self.colors_rgb_low
if not universe:
universe = self.config_global['universe']['output']
if universe == self.config_global['universe']['output']:
self.update_config()
self._update_colors()
# prepare temp array
data_output = array.array('B')
# data_output.append(0)
# # multiply so we have a array with total_channel_count zeros in it:
# # this is much faster than a for loop!
# data_output *= self.total_channel_count
# fill array with meaningfull data according to the pattern :-)
# .....
# color = {
# 'red': {
# 'high': 0,
# 'low': 0,
# },
# 'green': {
# 'high': 0,
# 'low': 0,
# },
# 'blue': {
# 'high': 0,
# 'low': 0,
# },
# }
# if universe in self.colors_rgb_high:
color = self.colors_rgb_high[universe]
if not self.strobe_state:
color = self.colors_rgb_low[universe]
if self.mode_16bit:
data_output.append(color['red']['high'])
data_output.append(color['red']['low'])
data_output.append(color['green']['high'])
data_output.append(color['green']['low'])
data_output.append(color['blue']['high'])
data_output.append(color['blue']['low'])
else:
data_output.append(color['red']['high'])
data_output.append(color['green']['high'])
data_output.append(color['blue']['high'])
# copy for all pixels
data_output *= self.pixel_count
# toggle strobe_state after last universe
if (
universe ==
self.config_global['universe']['output'] +
self.config_global['universe']['count'] - 1
):
self.strobe_state = not self.strobe_state
return data_output
##########################################
if __name__ == '__main__':
print(42 * '*')
print('Python Version: ' + sys.version)
print(42 * '*')
print(__doc__)
print(42 * '*')
print("This Module has now stand alone functionality.")
print(42 * '*')
##########################################
|
|
# -*- coding: utf-8 -*-
"""
legit.cli
~~~~~~~~~
This module provides the CLI interface to legit.
"""
import os
import click
from clint import resources
from clint.textui import columns
import crayons
from .core import __version__
from .scm import SCMRepo
from .settings import legit_settings
from .utils import black, format_help, order_manually, output_aliases, status_log, verbose_echo
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
pass_scm = click.make_pass_decorator(SCMRepo)
class LegitGroup(click.Group):
"""Custom Group class with specially sorted command list"""
command_aliases = {
'pub': 'publish',
'sw': 'switch',
'sy': 'sync',
'unp': 'unpublish',
'un': 'undo',
}
def list_commands(self, ctx):
"""Override for showing commands in particular order"""
commands = super(LegitGroup, self).list_commands(ctx)
return [cmd for cmd in order_manually(commands)]
def get_command(self, ctx, cmd_name):
"""Override to handle command aliases"""
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
cmd_name = self.command_aliases.get(cmd_name, "")
return click.Group.get_command(self, ctx, cmd_name)
def get_help_option(self, ctx):
"""Override for showing formatted main help via --help and -h options"""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
if not ctx.obj:
# legit main help
click.echo(format_help(ctx.get_help()))
else:
# legit sub-command help
click.echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return click.Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help='Show this message and exit.')
@click.group(cls=LegitGroup, invoke_without_command=True, context_settings=CONTEXT_SETTINGS)
@click.version_option(prog_name=black('legit', bold=True), version=__version__)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@click.option('--install', is_flag=True, help='Install legit git aliases.')
@click.option('--uninstall', is_flag=True, help='Uninstall legit git aliases.')
@click.option('--config', is_flag=True, help='Edit legit configuration file.')
@click.pass_context
def cli(ctx, verbose, fake, install, uninstall, config):
"""legit command line interface"""
# Create a repo object and remember it as as the context object. From
# this point onwards other commands can refer to it by using the
# @pass_scm decorator.
ctx.obj = SCMRepo()
ctx.obj.fake = fake
ctx.obj.verbose = fake or verbose
if install:
do_install(ctx, verbose, fake)
ctx.exit()
elif uninstall:
do_uninstall(ctx, verbose, fake)
ctx.exit()
elif config:
do_edit_settings(fake)
ctx.exit()
else:
if ctx.invoked_subcommand is None:
# Display help to user if no commands were passed.
click.echo(format_help(ctx.get_help()))
@cli.command(short_help='Switches to specified branch.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def switch(scm, to_branch, verbose, fake):
"""Switches from one branch to another, safely stashing and restoring local changes.
"""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check()
if to_branch is None:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to switch to')
scm.stash_log()
status_log(scm.checkout_branch, 'Switching to {0}.'.format(
crayons.yellow(to_branch)), to_branch)
scm.unstash_log()
@cli.command(short_help='Synchronizes the given branch with remote.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
@click.pass_context
def sync(ctx, scm, to_branch, verbose, fake):
"""Stashes unstaged changes, Fetches remote data, Performs smart
pull+merge, Pushes local commits up, and Unstashes changes.
Defaults to current branch.
"""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
if to_branch:
# Optional branch specifier.
branch = scm.fuzzy_match_branch(to_branch)
if branch:
is_external = True
original_branch = scm.get_current_branch_name()
else:
raise click.BadArgumentUsage(
"Branch {0} does not exist. Use an existing branch."
.format(crayons.yellow(branch)))
else:
# Sync current branch.
branch = scm.get_current_branch_name()
is_external = False
if branch in scm.get_branch_names(local=False):
if is_external:
ctx.invoke(switch, to_branch=branch, verbose=verbose, fake=fake)
scm.stash_log(sync=True)
status_log(scm.smart_pull, 'Pulling commits from the server.')
status_log(scm.push, 'Pushing commits to the server.', branch)
scm.unstash_log(sync=True)
if is_external:
ctx.invoke(switch, to_branch=original_branch, verbose=verbose, fake=fake)
else:
raise click.BadArgumentUsage(
"Branch {0} is not published. Publish before syncing."
.format(crayons.yellow(branch)))
@cli.command(short_help='Publishes specified branch to the remote.')
@click.argument('to_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def publish(scm, to_branch, verbose, fake):
"""Pushes an unpublished branch to a remote repository."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(to_branch)
if not branch:
branch = scm.get_current_branch_name()
scm.display_available_branches()
if to_branch is None:
click.echo("Using current branch {0}".format(crayons.yellow(branch)))
else:
click.echo(
"Branch {0} not found, using current branch {1}"
.format(crayons.red(to_branch), crayons.yellow(branch)))
branch_names = scm.get_branch_names(local=False)
if branch in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is already published. Use a branch that is not published."
.format(crayons.yellow(branch)))
status_log(scm.publish_branch, 'Publishing {0}.'.format(
crayons.yellow(branch)), branch)
@cli.command(short_help='Removes specified branch from the remote.')
@click.argument('published_branch', required=False)
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@pass_scm
def unpublish(scm, published_branch, verbose, fake):
"""Removes a published branch from the remote repository."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check(require_remote=True)
branch = scm.fuzzy_match_branch(published_branch)
if not branch:
scm.display_available_branches()
raise click.BadArgumentUsage('Please specify a branch to unpublish')
branch_names = scm.get_branch_names(local=False)
if branch not in branch_names:
raise click.BadArgumentUsage(
"Branch {0} is not published. Use a branch that is published."
.format(crayons.yellow(branch)))
status_log(scm.unpublish_branch, 'Unpublishing {0}.'.format(
crayons.yellow(branch)), branch)
@cli.command()
@click.option('--verbose', is_flag=True, help='Enables verbose mode.')
@click.option('--fake', is_flag=True, help='Show but do not invoke git commands.')
@click.option('--hard', is_flag=True, help='Discard local changes.')
@pass_scm
def undo(scm, verbose, fake, hard):
"""Removes the last commit from history."""
scm.fake = fake
scm.verbose = fake or verbose
scm.repo_check()
status_log(scm.undo, 'Last commit removed from history.', hard)
@cli.command()
@pass_scm
def branches(scm):
"""Displays a list of branches."""
scm.repo_check()
scm.display_available_branches()
def do_install(ctx, verbose, fake):
"""Installs legit git aliases."""
click.echo('The following git aliases will be installed:\n')
aliases = cli.list_commands(ctx)
output_aliases(aliases)
if click.confirm('\n{}Install aliases above?'.format('FAKE ' if fake else ''), default=fake):
for alias in aliases:
cmd = '!legit ' + alias
system_command = 'git config --global --replace-all alias.{0} "{1}"'.format(alias, cmd)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo("\nAliases installed.")
else:
click.echo("\nAliases will not be installed.")
def do_uninstall(ctx, verbose, fake):
"""Uninstalls legit git aliases, including deprecated legit sub-commands."""
aliases = cli.list_commands(ctx)
# Add deprecated aliases
aliases.extend(['graft', 'harvest', 'sprout', 'resync', 'settings', 'install', 'uninstall'])
for alias in aliases:
system_command = 'git config --global --unset-all alias.{0}'.format(alias)
verbose_echo(system_command, verbose, fake)
if not fake:
os.system(system_command)
if not fake:
click.echo('\nThe following git aliases are uninstalled:\n')
output_aliases(aliases)
def do_edit_settings(fake):
"""Opens legit settings in editor."""
path = resources.user.open('config.ini').name
click.echo('Legit Settings:\n')
for (option, _, description) in legit_settings.config_defaults:
click.echo(columns([crayons.yellow(option), 25], [description, None]))
click.echo("") # separate settings info from os output
if fake:
click.echo(crayons.red('Faked! >>> edit {}'.format(path)))
else:
click.edit(path)
def handle_abort(aborted, type=None):
click.echo('{0} {1}'.format(crayons.red('Error:'), aborted.message))
click.echo(str(aborted.log))
if type == 'merge':
click.echo('Unfortunately, there was a merge conflict.'
' It has to be merged manually.')
elif type == 'unpublish':
click.echo(
'''It seems that the remote branch is deleted.
If `legit branches` still shows it as published,
then probably the branch has been deleted at the remote by someone else.
You can run `git fetch --prune` to update remote information.
''')
raise click.Abort
legit_settings.abort_handler = handle_abort
|
|
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core import serializers
from django.core.paginator import Paginator, InvalidPage
import json as simplejson
from django.http import (
HttpResponseNotAllowed,
# HttpResponseForbidden,
HttpResponse,
Http404,
HttpResponseBadRequest
)
from django.shortcuts import redirect
SIZE = getattr(settings, "OBJECTS_PAGE", 15)
DEBUG = getattr(settings, "DEBUG")
MAX_SIZE = getattr(settings, "MAX_OBJECTS_PAGE", 12)
def redirect_if_admin(func):
"""
Redirecciona al menu del administrador si el usuario logeado es un administrador
"""
def decorator(request, *args, **kwargs):
if request.user.is_authenticated() and request.user.type is not None:
return func(request, *args, **kwargs)
elif request.user.is_authenticated():
return redirect('admin:index')
else:
return func(request, *args, **kwargs)
return decorator
def requires_ceelat_ally(func):
"""
Redirecciona al menu del administrador si el usuario logeado es un administrador
"""
def decorator(request, *args, **kwargs):
if request.user.is_authenticated() and not request.user.is_ceelat_ally():
return redirect('dashboard')
else:
return func(request, *args, **kwargs)
return decorator
def requires_company_user(func):
"""
Redirecciona al menu del administrador si el usuario logeado es un administrador
"""
def decorator(request, *args, **kwargs):
if request.user.is_authenticated() and not request.user.is_company_user():
return redirect('dashboard')
else:
return func(request, *args, **kwargs)
return decorator
def requires_entrepreneur(func):
"""
Redirecciona al dashboard si el usuario logeado no es
"""
def decorator(request, *args, **kwargs):
if request.user.is_authenticated() and not request.user.is_entrepreneur():
return redirect('dashboard')
else:
return func(request, *args, **kwargs)
return decorator
def requires_post(func):
"""
Retorna un error 405 si el request.method no es POST
"""
def decorator(request, *args, **kwargs):
if DEBUG or request.method == 'POST':
return func(request, *args, **kwargs)
return HttpResponseNotAllowed(['POST'])
return decorator
def requires_get(func):
"""
Retorna un error 405 si el request.method no es GET
"""
def decorator(request, *args, **kwargs):
if DEBUG or request.method == 'GET':
return func(request, *args, **kwargs)
return HttpResponseNotAllowed(['GET'])
return decorator
from django.contrib.auth.decorators import login_required
requires_login = login_required
def json_response(func):
"""
Convierte la respuesta de la funcion en json usando
la libreria simplejson.
"""
def decorator(request, *args, **kwargs):
objects = func(request, *args, **kwargs)
if isinstance(objects, HttpResponse):
return objects
try:
data = simplejson.dumps(objects)
if 'callback' in request.GET:
data = '%s(%s);' % (request.GET['callback'], data)
except:
data = simplejson.dumps(str(objects))
if 'just_the_json_plz' in kwargs:
return data
if 'just_the_data_plz' in kwargs:
return objects
if 'callback' in request.GET or 'callback' in request.POST:
#jsonp
return HttpResponse(data, "text/javascript")
else:
#json
return HttpResponse(data, "application/json")
return decorator
def paginator(func):
"""
Este metodo decora una funcion que retorne algo iterable
y lo pagina por la variable POST page. Retorna la pagina,
si no existe la variable page, retorna la primera pagina,
el parametro paginate_by determina el numero de elementos por pagina.
"""
def decorator(request, *args, **kwargs):
objects = func(request, *args, **kwargs)
try:
if request.method == "POST":
page = int(request.POST['page'] or 1)
else:
page = int(request.GET['page'] or 1)
except Exception:
page = 1
try:
if request.method == "POST":
size = int(request.POST['size'] or SIZE)
else:
size = int(request.GET['size'] or SIZE)
except Exception:
size = SIZE
if size > MAX_SIZE:
size = MAX_SIZE
paginatorr = Paginator(objects, size, orphans=0, allow_empty_first_page=True)
try:
page_obj = paginatorr.page(page)
except InvalidPage:
raise Http404
return page_obj
return decorator
def paginated_for_json_response(func):
"""
Este metodo decora una funcion que retorna, y utiliza el metodo
to_json_dict para obtener el diccionario de cada objeto.
El resultado del json va de la siguiente manera:
{
total_number:int,
page_number:int,
next:boolean,
previous:boolean,
objects:[object]
}
"""
def decorator(request, *args, **kwargs):
page = func(request, *args, **kwargs)
data = {}
data['total_number'] = page.paginator.count
data['page_number'] = page.number
data['total_pages'] = page.paginator.num_pages
data['next'] = page.has_next()
data['previous'] = page.has_previous()
objects = []
for o in page.object_list:
if hasattr(o, 'to_json_dict'):
try:
objects.append(o.to_json_dict())
except Exception, args:
pass
else:
try:
objects.append(o)
except Exception, args:
pass
if not objects:
objects = serializers.serialize("python", [o for o in page.object_list])
data['objects'] = objects
return data
return decorator
def paginated_search_for_json_response(func):
"""
Este metodo decora una funcion que retorna, y utiliza el metodo
to_json_dict para obtener el diccionario de cada objeto.
El resultado del json va de la siguiente manera:
{
total_number:int,
page_number:int,
next:boolean,
previous:boolean,
objects:[object]
}
"""
def decorator(request, *args, **kwargs):
page = func(request, *args, **kwargs)
data = {}
data['total_number'] = page.paginator.count
data['page_number'] = page.number
data['total_pages'] = page.paginator.num_pages
data['next'] = page.has_next()
data['previous'] = page.has_previous()
try:
data['objects'] = [(a.object.to_json_dict(request)) for a in page.object_list]
except Exception, args:
try:
data['objects'] = [(a.object.to_json_dict()) for a in page.object_list]
except Exception, args:
data['objects'] = serializers.serialize("python", [a.object for a in page.object_list])
return data
return decorator
def paginated_json_response(func):
"""
Combina los decoradores paginator, paginated_for_json_response y
json_response para devolver una json de elementos iterables paginados.
"""
@json_response
@paginated_for_json_response
@paginator
def decorator(request, *args, **kwargs):
objects = func(request, *args, **kwargs)
return objects
return decorator
def paginated_search_json_response(func):
"""
Combina los decoradores paginator, paginated_for_json_response y
json_response para devolver una json de elementos iterables paginados.
"""
@json_response
@paginated_search_for_json_response
@paginator
def decorator(request, *args, **kwargs):
objects = func(request, *args, **kwargs)
return objects
return decorator
def http_var_required(parameter_name):
"""
Verifica que exista el parametro post con nombre parameter_name
y si no, retorna HttpResponseBadRequest
"""
def wrap(func):
def decorator(request, *args, **kwargs):
if not (parameter_name in request.POST or parameter_name in request.GET):
return HttpResponseBadRequest('Please define GET or POST parameter '+parameter_name)
return func(request, *args, **kwargs)
return decorator
return wrap
def add_http_var(parameter_name, required=True):
"""
Pasa la variable a la funcion, extrayendola del POST o GET,
lanzando una excepcion si no existe esta y no ha sido explicitamente
declarado opcional en True.
"""
def wrap(func):
def decorator(request, *args, **kwargs):
if parameter_name in request.POST:
kwargs[parameter_name] = request.POST[parameter_name]
elif parameter_name in request.GET:
kwargs[parameter_name] = request.GET[parameter_name]
elif required:
return HttpResponseBadRequest('Please define GET or POST parameter '+parameter_name)
else:
pass
return func(request, *args, **kwargs)
return decorator
return wrap
|
|
from flask import flash, redirect, render_template, request, url_for
from flask.ext.login import (current_user, login_required, login_user,
logout_user)
from flask.ext.rq import get_queue
from . import account
from .. import db
from ..email import send_email
from ..models import User
from .forms import (ChangeAccountInfoForm, ChangeEmailForm, ChangePasswordForm,
CreatePasswordForm, LoginForm, RegistrationForm,
RequestResetPasswordForm, ResetPasswordForm)
@account.route('/login', methods=['GET', 'POST'])
def login():
"""Log in an existing user."""
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.password_hash is not None and \
user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
flash('You are now logged in. Welcome back!', 'success')
return redirect(request.args.get('next') or url_for('admin.index'))
else:
flash('Invalid email or password.', 'form-error')
return render_template('account/login.html', form=form)
@account.route('/register', methods=['GET', 'POST'])
def register():
"""Register a new user, and send them a confirmation email."""
form = RegistrationForm()
if form.validate_on_submit():
user = User(
first_name=form.first_name.data,
last_name=form.last_name.data,
email=form.email.data,
password=form.password.data)
db.session.add(user)
db.session.commit()
token = user.generate_confirmation_token()
confirm_link = url_for('account.confirm', token=token, _external=True)
get_queue().enqueue(
send_email,
recipient=user.email,
subject='Confirm Your Account',
template='account/email/confirm',
user=user,
confirm_link=confirm_link)
flash('A confirmation link has been sent to {}.'.format(user.email),
'warning')
return redirect(url_for('main.index'))
return render_template('account/register.html', form=form)
@account.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.', 'info')
return redirect(url_for('main.index'))
@account.route('/manage', methods=['GET', 'POST'])
@account.route('/manage/info', methods=['GET', 'POST'])
@login_required
def manage():
"""Display a user's account information."""
return render_template('account/manage.html', user=current_user, form=None)
@account.route('/reset-password', methods=['GET', 'POST'])
def reset_password_request():
"""Respond to existing user's request to reset their password."""
if not current_user.is_anonymous():
return redirect(url_for('main.index'))
form = RequestResetPasswordForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_password_reset_token()
reset_link = url_for(
'account.reset_password', token=token, _external=True)
get_queue().enqueue(
send_email,
recipient=user.email,
subject='Reset Your Password',
template='account/email/reset_password',
user=user,
reset_link=reset_link,
next=request.args.get('next'))
flash('A password reset link has been sent to {}.'
.format(form.email.data), 'warning')
return redirect(url_for('account.login'))
return render_template('account/reset_password.html', form=form)
@account.route('/reset-password/<token>', methods=['GET', 'POST'])
def reset_password(token):
"""Reset an existing user's password."""
if not current_user.is_anonymous():
return redirect(url_for('main.index'))
form = ResetPasswordForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
flash('Invalid email address.', 'form-error')
return redirect(url_for('main.index'))
if user.reset_password(token, form.new_password.data):
flash('Your password has been updated.', 'form-success')
return redirect(url_for('account.login'))
else:
flash('The password reset link is invalid or has expired.',
'form-error')
return redirect(url_for('main.index'))
return render_template('account/reset_password.html', form=form)
@account.route('/manage/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
"""Change an existing user's password."""
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.new_password.data
db.session.add(current_user)
db.session.commit()
flash('Your password has been updated.', 'form-success')
return redirect(url_for('main.index'))
else:
flash('Original password is invalid.', 'form-error')
return render_template('account/manage.html', form=form)
@account.route('/manage/change-account-info', methods=['GET', 'POST'])
@login_required
def change_account_info():
"""
Change an existing user's account information (excluding email and
password).
"""
form = ChangeAccountInfoForm()
if form.validate_on_submit():
current_user.first_name = form.first_name.data
current_user.last_name = form.last_name.data
db.session.add(current_user)
db.session.commit()
# When form is first displayed, pre-populate its fields with the user's
# current information.
else:
form.first_name.data = current_user.first_name
form.last_name.data = current_user.last_name
return render_template('account/manage.html', form=form)
@account.route('/manage/change-email', methods=['GET', 'POST'])
@login_required
def change_email_request():
"""Respond to existing user's request to change their email."""
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
new_email = form.email.data
token = current_user.generate_email_change_token(new_email)
change_email_link = url_for(
'account.change_email', token=token, _external=True)
get_queue().enqueue(
send_email,
recipient=new_email,
subject='Confirm Your New Email',
template='account/email/change_email',
# current_user is a LocalProxy, we want the underlying user
# object
user=current_user._get_current_object(),
change_email_link=change_email_link)
flash('A confirmation link has been sent to {}.'.format(new_email),
'warning')
return redirect(url_for('main.index'))
else:
flash('Invalid email or password.', 'form-error')
return render_template('account/manage.html', form=form)
@account.route('/manage/change-email/<token>', methods=['GET', 'POST'])
@login_required
def change_email(token):
"""Change existing user's email with provided token."""
if current_user.change_email(token):
flash('Your email address has been updated.', 'success')
else:
flash('The confirmation link is invalid or has expired.', 'error')
return redirect(url_for('main.index'))
@account.route('/confirm-account')
@login_required
def confirm_request():
"""Respond to new user's request to confirm their account."""
token = current_user.generate_confirmation_token()
confirm_link = url_for('account.confirm', token=token, _external=True)
get_queue().enqueue(
send_email,
recipient=current_user.email,
subject='Confirm Your Account',
template='account/email/confirm',
# current_user is a LocalProxy, we want the underlying user object
user=current_user._get_current_object(),
confirm_link=confirm_link)
flash('A new confirmation link has been sent to {}.'.format(
current_user.email), 'warning')
return redirect(url_for('main.index'))
@account.route('/confirm-account/<token>')
@login_required
def confirm(token):
"""Confirm new user's account with provided token."""
if current_user.confirmed:
return redirect(url_for('main.index'))
if current_user.confirm_account(token):
flash('Your account has been confirmed.', 'success')
else:
flash('The confirmation link is invalid or has expired.', 'error')
return redirect(url_for('main.index'))
@account.route(
'/join-from-invite/<int:user_id>/<token>', methods=['GET', 'POST'])
def join_from_invite(user_id, token):
"""
Confirm new user's account with provided token and prompt them to set
a password.
"""
if current_user is not None and current_user.is_authenticated():
flash('You are already logged in.', 'error')
return redirect(url_for('main.index'))
new_user = User.query.get(user_id)
if new_user is None:
return redirect(404)
if new_user.password_hash is not None:
flash('You have already joined.', 'error')
return redirect(url_for('main.index'))
if new_user.confirm_account(token):
form = CreatePasswordForm()
if form.validate_on_submit():
new_user.password = form.password.data
db.session.add(new_user)
db.session.commit()
flash('Your password has been set. After you log in, you can '
'go to the "Your Account" page to review your account '
'information and settings.', 'success')
return redirect(url_for('account.login'))
return render_template('account/join_invite.html', form=form)
else:
flash('The confirmation link is invalid or has expired. Another '
'invite email with a new link has been sent to you.', 'error')
token = new_user.generate_confirmation_token()
invite_link = url_for(
'account.join_from_invite',
user_id=user_id,
token=token,
_external=True)
get_queue().enqueue(
send_email,
recipient=new_user.email,
subject='You Are Invited To Join',
template='account/email/invite',
user=new_user,
invite_link=invite_link)
return redirect(url_for('main.index'))
@account.before_app_request
def before_request():
"""Force user to confirm email before accessing login-required routes."""
if current_user.is_authenticated() \
and not current_user.confirmed \
and request.endpoint[:8] != 'account.' \
and request.endpoint != 'static':
return redirect(url_for('account.unconfirmed'))
@account.route('/unconfirmed')
def unconfirmed():
"""Catch users with unconfirmed emails."""
if current_user.is_anonymous() or current_user.confirmed:
return redirect(url_for('main.index'))
return render_template('account/unconfirmed.html')
|
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class MaintenanceWindow(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
MaintenanceWindow - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'created_at': 'int',
'creator_user_id': 'str',
'customer_id': 'str',
'end_time_in_seconds': 'int',
'event_name': 'str',
'reason': 'str',
'relevant_customer_tags': 'list[str]',
'relevant_host_names': 'list[str]',
'relevant_host_tags': 'list[str]',
'start_time_in_seconds': 'int',
'title': 'str'
}
self.attribute_map = {
'created_at': 'createdAt',
'creator_user_id': 'creatorUserId',
'customer_id': 'customerId',
'end_time_in_seconds': 'endTimeInSeconds',
'event_name': 'eventName',
'reason': 'reason',
'relevant_customer_tags': 'relevantCustomerTags',
'relevant_host_names': 'relevantHostNames',
'relevant_host_tags': 'relevantHostTags',
'start_time_in_seconds': 'startTimeInSeconds',
'title': 'title'
}
self._created_at = None
self._creator_user_id = None
self._customer_id = None
self._end_time_in_seconds = None
self._event_name = None
self._reason = None
self._relevant_customer_tags = None
self._relevant_host_names = None
self._relevant_host_tags = None
self._start_time_in_seconds = None
self._title = None
@property
def created_at(self):
"""
Gets the created_at of this MaintenanceWindow.
The creation time for this maintenance window in milliseconds. Used as the id
:return: The created_at of this MaintenanceWindow.
:rtype: int
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""
Sets the created_at of this MaintenanceWindow.
The creation time for this maintenance window in milliseconds. Used as the id
:param created_at: The created_at of this MaintenanceWindow.
:type: int
"""
self._created_at = created_at
@property
def creator_user_id(self):
"""
Gets the creator_user_id of this MaintenanceWindow.
Id of the user who created this maintenance window
:return: The creator_user_id of this MaintenanceWindow.
:rtype: str
"""
return self._creator_user_id
@creator_user_id.setter
def creator_user_id(self, creator_user_id):
"""
Sets the creator_user_id of this MaintenanceWindow.
Id of the user who created this maintenance window
:param creator_user_id: The creator_user_id of this MaintenanceWindow.
:type: str
"""
self._creator_user_id = creator_user_id
@property
def customer_id(self):
"""
Gets the customer_id of this MaintenanceWindow.
:return: The customer_id of this MaintenanceWindow.
:rtype: str
"""
return self._customer_id
@customer_id.setter
def customer_id(self, customer_id):
"""
Sets the customer_id of this MaintenanceWindow.
:param customer_id: The customer_id of this MaintenanceWindow.
:type: str
"""
self._customer_id = customer_id
@property
def end_time_in_seconds(self):
"""
Gets the end_time_in_seconds of this MaintenanceWindow.
The time in seconds for when this maintenance window will end
:return: The end_time_in_seconds of this MaintenanceWindow.
:rtype: int
"""
return self._end_time_in_seconds
@end_time_in_seconds.setter
def end_time_in_seconds(self, end_time_in_seconds):
"""
Sets the end_time_in_seconds of this MaintenanceWindow.
The time in seconds for when this maintenance window will end
:param end_time_in_seconds: The end_time_in_seconds of this MaintenanceWindow.
:type: int
"""
self._end_time_in_seconds = end_time_in_seconds
@property
def event_name(self):
"""
Gets the event_name of this MaintenanceWindow.
:return: The event_name of this MaintenanceWindow.
:rtype: str
"""
return self._event_name
@event_name.setter
def event_name(self, event_name):
"""
Sets the event_name of this MaintenanceWindow.
:param event_name: The event_name of this MaintenanceWindow.
:type: str
"""
self._event_name = event_name
@property
def reason(self):
"""
Gets the reason of this MaintenanceWindow.
Description on the purpose of this maintenance window
:return: The reason of this MaintenanceWindow.
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""
Sets the reason of this MaintenanceWindow.
Description on the purpose of this maintenance window
:param reason: The reason of this MaintenanceWindow.
:type: str
"""
self._reason = reason
@property
def relevant_customer_tags(self):
"""
Gets the relevant_customer_tags of this MaintenanceWindow.
List of shared alert tags that will be put into maintenance because of this maintenance window
:return: The relevant_customer_tags of this MaintenanceWindow.
:rtype: list[str]
"""
return self._relevant_customer_tags
@relevant_customer_tags.setter
def relevant_customer_tags(self, relevant_customer_tags):
"""
Sets the relevant_customer_tags of this MaintenanceWindow.
List of shared alert tags that will be put into maintenance because of this maintenance window
:param relevant_customer_tags: The relevant_customer_tags of this MaintenanceWindow.
:type: list[str]
"""
self._relevant_customer_tags = relevant_customer_tags
@property
def relevant_host_names(self):
"""
Gets the relevant_host_names of this MaintenanceWindow.
List of the specific hosts that will be put into maintenance because of this maintenance window
:return: The relevant_host_names of this MaintenanceWindow.
:rtype: list[str]
"""
return self._relevant_host_names
@relevant_host_names.setter
def relevant_host_names(self, relevant_host_names):
"""
Sets the relevant_host_names of this MaintenanceWindow.
List of the specific hosts that will be put into maintenance because of this maintenance window
:param relevant_host_names: The relevant_host_names of this MaintenanceWindow.
:type: list[str]
"""
self._relevant_host_names = relevant_host_names
@property
def relevant_host_tags(self):
"""
Gets the relevant_host_tags of this MaintenanceWindow.
List of host tags whose matching hosts will be put into maintenance because of this maintenance window
:return: The relevant_host_tags of this MaintenanceWindow.
:rtype: list[str]
"""
return self._relevant_host_tags
@relevant_host_tags.setter
def relevant_host_tags(self, relevant_host_tags):
"""
Sets the relevant_host_tags of this MaintenanceWindow.
List of host tags whose matching hosts will be put into maintenance because of this maintenance window
:param relevant_host_tags: The relevant_host_tags of this MaintenanceWindow.
:type: list[str]
"""
self._relevant_host_tags = relevant_host_tags
@property
def start_time_in_seconds(self):
"""
Gets the start_time_in_seconds of this MaintenanceWindow.
The time in seconds for when this maintenance window will start
:return: The start_time_in_seconds of this MaintenanceWindow.
:rtype: int
"""
return self._start_time_in_seconds
@start_time_in_seconds.setter
def start_time_in_seconds(self, start_time_in_seconds):
"""
Sets the start_time_in_seconds of this MaintenanceWindow.
The time in seconds for when this maintenance window will start
:param start_time_in_seconds: The start_time_in_seconds of this MaintenanceWindow.
:type: int
"""
self._start_time_in_seconds = start_time_in_seconds
@property
def title(self):
"""
Gets the title of this MaintenanceWindow.
Title text
:return: The title of this MaintenanceWindow.
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""
Sets the title of this MaintenanceWindow.
Title text
:param title: The title of this MaintenanceWindow.
:type: str
"""
self._title = title
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
#!/usr/bin/python
# encoding:utf-8
# Copyright (c) 2014 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Liu, xin <xinx.liu@intel.com>
# Li, Hao <haox.li@intel.com>
import os
import csv
import re
import sys
import platform
import logging
import logging.handlers
from xml.etree import ElementTree
LOG = None
LOG_LEVEL = logging.DEBUG
class Set():
set_name = ""
set_type = ""
ui_auto = ""
testcase = []
def __init__(self, setname, settype, uiauto):
self.set_name = setname
self.set_type = settype
self.ui_auto = uiauto
self.testcase = []
def __init__(self, setname, settype, uiauto, testcase):
self.set_name = setname
self.set_type = settype
self.ui_auto = uiauto
self.testcase = testcase
class TestCase():
case_id = ""
purpose = ""
component = ""
priority = ""
execution_type = ""
status = ""
case_type = ""
onload_delay = ""
subcase = ""
pre_condition = ""
post_condition = ""
steps = []
test_script_entry = ""
refer_test_script_entry = ""
bdd_test_script_entry = ""
spec_category = ""
spec_section = ""
spec_specification = ""
spec_interface = ""
spec_element_name = ""
spec_element_type = ""
spec_url = ""
spec_statement = ""
def __init__(self, caseid, purpose, component, priority, executiontype, status, casetype,\
onloaddelay, subcase, precondition, postcondition, steps, testscriptentry,\
refertestscriptentry, bddtestscriptentry, speccategory, specsection,\
specification, specinterface, specelementname, specelementtype, specurl):
self.case_id = caseid
self.purpose = purpose
self.component = component
self.priority = priority
self.execution_type = executiontype
self.status = status
self.case_type = casetype
self.onload_delay = onloaddelay
self.subcase = subcase
self.pre_condition = precondition
self.post_condition = postcondition
self.steps = steps
self.test_script_entry = testscriptentry
self.refer_test_script_entry = refertestscriptentry
self.bdd_test_script_entry = bddtestscriptentry
self.spec_category = speccategory
self.spec_section = specsection
self.spec_specification = specification
self.spec_interface = specinterface
self.spec_element_name = specelementname
self.spec_element_type = specelementtype
self.spec_url = specurl
class ColorFormatter(logging.Formatter):
def __init__(self, msg):
logging.Formatter.__init__(self, msg)
def format(self, record):
red, green, yellow, blue = range(4)
colors = {'INFO': green, 'DEBUG': blue,
'WARNING': yellow, 'ERROR': red}
msg = record.msg
if msg[0] == "+":
msg = "\33[01m" + msg[1:] + "\033[0m"
elif msg[0] == "=":
msg = "\33[07m" + msg + "\033[0m"
levelname = record.levelname
if levelname in colors:
msg_color = "\033[0;%dm" % (
31 + colors[levelname]) + msg + "\033[0m"
record.msg = msg_color
return logging.Formatter.format(self, record)
def csv2full(csv_path, split_sign):
if not os.path.isfile(csv_path):
print '%s is not a file' % csv_path
return
name, ext = os.path.splitext(csv_path)
if not ext == '.csv':
print '%s is not a csv' % csv_path
return
LOG.info("+Convert csv to test.full.xml start ...")
csv_file = file(csv_path, 'rb')
csv_file.readline()
reader = csv.reader(csv_file)
test_suite = {}
for line in reader:
if test_suite.get(line[0]) is None:
testset = Set(line[0], line[1], line[2], [])
test_suite[line[0]] = testset
testcase = TestCase(line[3], line[4], line[5], line[8], line[7], line[17], line[22],\
str(line[6]), str(line[21]), line[23], line[24], line[25], line[18], line[19],\
line[20], line[14], line[13], line[12], line[11], line[10], line[9], line[15])
test_suite[line[0]].testcase.append(testcase)
csv_file.close()
suite_name = test_suite.values()[0].testcase[0].test_script_entry.split('/')[2]
category_name = test_suite.values()[0].testcase[0].component.split('/')[0]
folder = os.path.dirname(csv_path)
full_test_path = '%s%s%s-tests.full.xml' % (folder, split_sign, suite_name)
make_full_test(
test_suite,
full_test_path,
suite_name,
category_name)
LOG.info('General %s' % full_test_path)
def make_full_test(test_suite, full_test_name, suite_name, category_name):
full_test_file = open(full_test_name, 'w')
content = '<?xml version="1.0" encoding="UTF-8"?>\n'\
+ '<?xml-stylesheet type="text/xsl" href="./testcase.xsl"?>\n'\
+ '<test_definition>\n'\
+ ' <suite category="%s" name="%s">\n' % (category_name, suite_name)
for testset in test_suite.values():
set_ui_auto = ""
if testset.ui_auto is not "":
set_ui_auto = ' ui-auto="%s"' % testset.ui_auto
content += ' <set name="%s" type="%s"%s>\n' % (testset.set_name, testset.set_type, set_ui_auto)
testcasestr = ""
for testcase in testset.testcase:
onload_delay = ' onload_delay="%s"' % testcase.onload_delay if testcase.onload_delay is not "" else ""
subcase = ' subcase="%s"' % testcase.subcase if testcase.subcase is not "" else ""
pre_condition = '\
<pre_condition>\n\
%s\n\
</pre_condition>\n' % testcase.pre_condition if testcase.pre_condition is not "" else ""
post_condition = '\
<post_condition>\n\
%s\n\
</post_condition>\n' % testcase.post_condition if testcase.post_condition is not "" else ""
refer_test_script_entry = " <refer_test_script_entry>%s</refer_test_script_entry>\n" \
% testcase.post_condition if testcase.post_condition is not "" else ""
bdd_test_script_entry = " <bdd_test_script_entry>%s</bdd_test_script_entry>\n" \
% testcase.bdd_test_script_entry if testcase.bdd_test_script_entry is not "" else ""
testcasestr += '\
<testcase purpose="%s" component="%s" type="%s" status="%s" execution_type="%s" priority="%s" id="%s"%s%s>\n\
<description>\n%s%s\
<test_script_entry>%s</test_script_entry>\n%s%s\
</description>\n\
<specs>\n\
<spec>\n\
<spec_assertion element_type="%s" element_name="%s" interface="%s" specification="%s" section="%s" category="%s"/>\n\
<spec_url>%s</spec_url>\n\
<spec_statement/>\n\
</spec>\n\
</specs>\n\
</testcase>\n' % (testcase.purpose, testcase.component, testcase.case_type, testcase.status, testcase.execution_type,\
testcase.priority, testcase.case_id, onload_delay, subcase, pre_condition, post_condition,\
testcase.test_script_entry, refer_test_script_entry, bdd_test_script_entry, testcase.spec_element_type,\
testcase.spec_element_name, testcase.spec_interface, testcase.spec_specification, testcase.spec_section,\
testcase.spec_category, testcase.spec_url)
content += testcasestr\
+ ' </set>\n'
content += ' </suite>\n</test_definition>'
full_test_file.seek(0)
full_test_file.truncate()
full_test_file.write(content)
full_test_file.close()
def echo_about():
"""
This function will print the user guide and stop toolkit.
"""
about = 'csv2xml V1.0\n-c <path> | Convert csv file to tests.full.xml and tests.xml\n'
print about
sys.exit()
def main():
"""
main function will call different functions according to the command line argvs followed the toolkit.
"""
global LOG
LOG = logging.getLogger("pack-tool")
LOG.setLevel(LOG_LEVEL)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(LOG_LEVEL)
stream_formatter = ColorFormatter("[%(asctime)s] %(message)s")
stream_handler.setFormatter(stream_formatter)
LOG.addHandler(stream_handler)
sys_name = platform.system()
if sys_name == 'Windows':
split_sign = '\\'
elif sys_name == 'Linux':
split_sign = '/'
if len(sys.argv) != 3:
print 'Error: No enough argv!'
echo_about()
else:
{'-c': lambda: csv2full(sys.argv[2], split_sign)}[sys.argv[1]]()
if __name__ == '__main__':
main()
|
|
""" This module gets used by :class:`ircutils.client.SimpleClient` and
:class:`ircutils.bot.SimpleBot` for event handling and management.
Each line sent from the IRC server represents its own event. This information
is parsed to fill in the values for the event object. In some cases, these
single-line events are combined together to build more complex events that span
multiple lines of data from the server. This information
is parsed to fill in the values for the event object.
"""
import bisect
import collections
import traceback
from . import protocol
class EventDispatcher(object):
""" The event dispatcher is in charge of three major tasks. (1) Registering
listeners to the dispatcher, (2) providing a way to interact with the
listeners, and (3) dispatching events.
"""
def __init__(self):
self._listeners = {}
def register_listener(self, name, listener):
""" Adds a listener to the dispatcher. """
self._listeners[name] = listener
def __setitem__(self, name, listener):
self.register_listener(name, listener)
def __getitem__(self, name):
return self._listeners[name]
def __iter__(self):
return iter(self._listeners.keys())
def dispatch(self, client, event):
""" Notifies all of the listeners that an event is available.
Any listener which analyses the event and finds it to have what
the listener is looking for will then activate its event handlers.
"""
for name, listener in self._listeners.items():
if listener.handlers != []:
listener.notify(client, event)
# ------------------------------------------------------------------------------
# > BEGIN EVENT OBJECTS
# ------------------------------------------------------------------------------
#
class Event(object):
pass
class ConnectionEvent(Event):
""" Handles events for connecting and disconnecting. Currently, the only useful data in
the event object is the command. It will either be CONN_CONNECT or CONN_DISCONNECT.
"""
def __init__(self, command):
self.command = command
self.source = None
self.target = None
self.params = []
class StandardEvent(Event):
""" Represents a standard event. """
def __init__(self, prefix, command, params):
self.command = command
self.prefix = prefix
self.source, self.user, self.host = protocol.parse_prefix(prefix)
if len(params) > 0:
if command not in protocol.commands_with_no_target:
self.target = params[0]
self.params = params[1:]
else:
self.target = None
self.params = params
else:
self.target = None
self.params = []
def __str__(self):
return '<StandardEvent '+str(self.command)+' prefix="'+str(self.prefix)+'" target="'+str(self.target)+'" params="'+str(self.params)+'">'
class MessageEvent(StandardEvent):
""" MessageEvent has all of the attributes as
:class:`ircutils.events.StandardEvent` with the added attribute ``message``
which holds the message data.
::
from ircutils import bot
class PrinterBot(bot.SimpleBot):
def on_message(self, event):
print "<{0}> {1}".format(event.source, event.message)
"""
def __init__(self, prefix, command, params):
StandardEvent.__init__(self, prefix, command, params)
self.message = params[-1]
class CTCPEvent(StandardEvent):
""" Represents a Client-To-Client Protocol (CTCP) event. """
def __init__(self):
self.source = None
self.target = None
self.command = None
self.params = []
# ------------------------------------------------------------------------------
# > BEGIN EventListener AND HELPER CODE
# ------------------------------------------------------------------------------
class EventListener(object):
""" This class is a simple event listener designed to be subclassed. Each
event listener is in charge of activating its handlers.
"""
def __init__(self):
self.handlers = []
def add_handler(self, handler, priority=0):
""" Add a handler to the event listener. It will be called when the
listener decides it's time. It will place it in order depending
on the priority specified. The default is 0.
Event handlers take the form of::
def my_handler(client, event):
# Do stuff with the client and event here
# Example:
client.send_message(event.target, "Hi!")
If :class:`ircutils.bot.SimpleBot` is being used, you do not need to
use this method as handlers are automatically added.
"""
self.handlers += [(priority, handler)]
self.handlers.sort(key=lambda r: r[0])
def remove_handler(self, handler):
""" This removes all handlers that are equal to the ``handler`` which
are bound to the event listener. This isn't too efficient since
it is ``O(n^2)``.
"""
for p, l in self.handlers:
if l == handler:
self.handlers.remove((p,l))
def activate_handlers(self, *args):
""" This activates each handler that's bound to the listener. It works
in order, so handlers with a higher priority will be activated
before all others. The ``args`` sent to this will be sent to each
handler. It's a good idea to always make sure to send in the client
and the event.
"""
for p, handler in self.handlers:
try:
handler(*args)
except Exception as ex:
print(ex)
self.handlers.remove((p, handler))
def notify(self, client, event):
""" This is to be overridden when subclassed. It gets called after each
event generated by the system. If the event listener decides to, it
should run its handlers from here.
"""
raise NotImplementedError("notify() must be overridden.")
class _CustomListener(EventListener):
def __init__(self, command, target, source):
EventListener.__init__(self)
self.command = command
self.target = target
self.source = source
def notify(self, client, event):
if self.command in (None, event.command) and \
self.target in (None, event.target) and \
self.source in (None, event.source):
self.activate_handlers(client, event)
def create_listener(command=None, target=None, source=None):
""" Create a listener on-the-fly. This is the simplest way of creating event
listeners, but also very limited. Examples::
# Creates a listener that looks for events where the command is PRIVMSG
msg_listener = events.create_listener(command="PRIVMSG")
# Listens for events from the NickServ service
ns_listener = events.create_lisener(source="NickServ")
# Listens for events that are messages to a specific channel
example = events.create_listener(command="PRIVMSG", target="#channel")
"""
return _CustomListener(command, target, source)
# ------------------------------------------------------------------------------
# > BEGIN BUILT-IN EVENT LISTENERS
# ------------------------------------------------------------------------------
class ConnectListener(EventListener):
def notify(self, client, event):
if event.command == "CONN_CONNECT":
self.activate_handlers(client, event)
class DisconnectListener(EventListener):
def notify(self, client, event):
if event.command == "CONN_DISCONNECT":
self.activate_handlers(client, event)
connection = {
"connect": ConnectListener,
"disconnect": DisconnectListener
}
class AnyListener(EventListener):
def notify(self, client, event):
self.activate_handlers(client, event)
class WelcomeListener(EventListener):
def notify(self, client, event):
if event.command == "RPL_WELCOME":
self.activate_handlers(client, event)
class NickChangeListener(EventListener):
def notify(self, client, event):
if event.command == "NICK":
self.activate_handlers(client, event)
class PingListener(EventListener):
def notify(self, client, event):
if event.command == "PING":
self.activate_handlers(client, event)
class InviteListener(EventListener):
def notify(self, client, event):
if event.command == "INVITE":
self.activate_handlers(client, event)
class KickListener(EventListener):
def notify(self, client, event):
if event.command == "KICK":
self.activate_handlers(client, event)
class JoinListener(EventListener):
def notify(self, client, event):
if event.command == "JOIN":
self.activate_handlers(client, event)
class QuitListener(EventListener):
def notify(self, client, event):
if event.command == "QUIT":
self.activate_handlers(client, event)
class PartListener(EventListener):
def notify(self, client, event):
if event.command == "PART":
self.activate_handlers(client, event)
class ErrorListener(EventListener):
def notify(self, client, event):
if event.command == "ERROR":
self.activate_handlers(client, event)
standard = {
"any": AnyListener,
"welcome": WelcomeListener,
"ping": PingListener,
"invite": InviteListener,
"kick": KickListener,
"join": JoinListener,
"quit": QuitListener,
"part": PartListener,
"nick_change": NickChangeListener,
"error": ErrorListener
}
class MessageListener(EventListener):
def notify(self, client, event):
if event.command == "PRIVMSG":
self.activate_handlers(client, event)
class PrivateMessageListener(MessageListener):
def notify(self, client, event):
if event.command == "PRIVMSG":
if not protocol.is_channel(event.target):
self.activate_handlers(client, event)
class ChannelMessageListener(MessageListener):
def notify(self, client, event):
if event.command == "PRIVMSG":
if protocol.is_channel(event.target):
self.activate_handlers(client, event)
class NoticeListener(MessageListener):
def notify(self, client, event):
if event.command == "NOTICE":
self.activate_handlers(client, event)
class PrivateNoticeListener(NoticeListener):
def notify(self, client, event):
if event.command == "NOTICE":
if not protocol.is_channel(event.target):
self.activate_handlers(client, event)
class ChannelNoticeListener(NoticeListener):
def notify(self, client, event):
if event.command == "NOTICE":
if protocol.is_channel(event.target):
self.activate_handlers(client, event)
messages = {
"message": MessageListener,
"channel_message": ChannelMessageListener,
"private_message": PrivateMessageListener,
"notice": NoticeListener,
"channel_notice": ChannelNoticeListener,
"private_notice": PrivateNoticeListener
}
class CTCPListener(EventListener):
def notify(self, client, event):
if event.command.startswith("CTCP_"):
self.activate_handlers(client, event)
class CTCPActionListener(CTCPListener):
def notify(self, client, event):
if event.command == "CTCP_ACTION":
self.activate_handlers(client, event)
class CTCPUserInfoListener(CTCPListener):
def notify(self, client, event):
if event.command == "CTCP_USERINFO":
self.activate_handlers(client, event)
class CTCPClientInfoListener(CTCPListener):
def notify(self, client, event):
return event.command == "CTCP_CLIENTINFO"
class CTCPVersionListener(CTCPListener):
def notify(self, client, event):
if event.command == "CTCP_VERSION":
self.activate_handlers(client, event)
class CTCPPingListener(CTCPListener):
def notify(self, client, event):
if event.command == "CTCP_PING":
self.activate_handlers(client, event)
class CTCPErrorListener(CTCPListener):
def notify(self, client, event):
if event.command == "CTCP_ERROR":
self.activate_handlers(client, event)
class CTCPTimeListener(CTCPListener):
def notify(self, client, event):
if event.command == "CTCP_TIME":
self.activate_handlers(client, event)
class DCCListener(CTCPListener):
def notify(self, client, event):
if event.command.startswith("CTCP_DCC"):
self.activate_handlers(client, event)
ctcp = {
"ctcp": CTCPListener,
"ctcp_action": CTCPActionListener,
"ctcp_userinfo": CTCPUserInfoListener,
"ctcp_clientinfo": CTCPClientInfoListener,
"ctcp_version": CTCPVersionListener,
"ctcp_ping": CTCPPingListener,
"ctcp_error": CTCPErrorListener,
"ctcp_time": CTCPTimeListener,
"dcc": DCCListener
}
class ReplyListener(EventListener):
def notify(self, client, event):
if event.command.startswith("RPL_"):
self.activate_handlers(client, event)
class NameReplyListener(ReplyListener):
class NameReplyEvent(Event):
def __init__(self):
self.channel = None
self.name_list = []
def __init__(self):
ReplyListener.__init__(self)
self._name_lists = collections.defaultdict(self.NameReplyEvent)
def notify(self, client, event):
if event.command == "RPL_NAMREPLY":
# "( "=" / "*" / "@" ) <channel>
# :[ "@" / "+" ] <nick> *( " " [ "@" / "+" ] <nick> )
#
# - "@" is used for secret channels, "*" for private
# channels, and "=" for others (public channels).
channel = event.params[1].lower()
names = event.params[2].strip().split(" ")
# TODO: This line below is wrong. It doesn't use name symbols.
names = map(protocol.strip_name_symbol, names)
self._name_lists[channel].name_list.extend(names)
elif event.command == "RPL_ENDOFNAMES":
# <channel> :End of NAMES list
channel_name = event.params[0]
name_event = self._name_lists[channel_name]
name_event.channel = channel_name
self.activate_handlers(client, name_event)
del self._name_lists[channel_name]
class ListReplyListener(ReplyListener):
class ListReplyEvent(Event):
def __init__(self, channel_list):
self.channel_list = channel_list
def __init__(self):
ReplyListener.__init__(self)
self.channel_list = []
def notify(self, client, event):
if event.command == "RPL_LIST":
# <channel> <# visible> :<topic>
channel_data = (event.params[0].lower(), event.params[1], event.params[2])
self.channel_list.append(channel_data)
elif event.command == "RPL_LISTEND":
# :End of LIST
list_event = self.ListReplyEvent(self.channel_list)
self.activate_handlers(client, list_event)
self.channel_list = []
class WhoisReplyListener(ReplyListener):
""" http://tools.ietf.org/html/rfc1459#section-4.5.2 """
class WhoisReplyEvent(Event):
def __init__(self):
self.nick = None
self.user = None
self.host = None
self.real_name = None
self.channels = []
self.server = None
self.is_operator = False
self.idle_time = 0 # seconds
def __init__(self):
ReplyListener.__init__(self)
self._whois_replies = collections.defaultdict(self.WhoisReplyEvent)
def notify(self, client, event):
if event.command == "RPL_WHOISUSER":
# <nick> <user> <host> * :<real name>
reply = self._whois_replies[event.params[1]]
reply.nick = event.params[0]
reply.user = event.params[1]
reply.host = event.params[2]
reply.real_name = event.params[4]
elif event.command == "RPL_WHOISCHANNELS":
# <nick> :*( ( "@" / "+" ) <channel> " " )
channels = event.params[1].strip().split()
channels = map(protocol.strip_name_symbol, channels)
self._whois_replies[event.params[0]].channels.extend(channels)
elif event.command == "RPL_WHOISSERVER":
# <nick> <server> :<server info>
self._whois_replies[event.params[0]].server = event.params[1]
elif event.command == "RPL_WHOISIDLE":
# <nick> <integer> :seconds idle
self._whois_replies[event.params[0]].idle_time = event.params[1]
elif event.command == "RPL_WHOISOPERATOR":
# <nick> :is an IRC operator
self._whois_replies[event.params[0]].is_operator = True
elif event.command == "RPL_ENDOFWHOIS":
# <nick> :End of WHOIS list
self.activate_handlers(client, self._whois_replies[event.params[0]])
del self._whois_replies[event.params[0]]
class WhoReplyListener(ReplyListener):
""" http://tools.ietf.org/html/rfc1459#section-4.5.2 """
class WhoReplyEvent(Event):
def __init__(self):
self.channel_name = None
self.user_list = []
def __init__(self):
ReplyListener.__init__(self)
self._who_replies = collections.defaultdict(self.WhoReplyEvent)
def notify(self, client, event):
if event.command == "RPL_WHOREPLY":
channel = event.params[0].lower()
user = protocol.User()
user.user = event.params[1]
user.host = event.params[2]
user.server = event.params[3]
user.nick = event.params[4]
user.real_name = event.params[6].split()[1]
self._who_replies[channel].user_list.append(user)
elif event.command == "RPL_ENDOFWHO":
channel = event.params[0].lower()
self._who_replies[channel].channel_name = channel
self.activate_handlers(client, self._who_replies[channel])
class ErrorReplyListener(ReplyListener):
def notify(self, client, event):
if event.command.startswith("ERR_"):
self.activate_handlers(client, event)
replies = {
"reply": ReplyListener,
"name_reply": NameReplyListener,
"list_reply": ListReplyListener,
"whois_reply": WhoisReplyListener,
"who_reply": WhoReplyListener,
"error_reply": ErrorReplyListener
}
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import logging
import urlparse
from integration_tests import chrome_proxy_metrics as metrics
from metrics import loading
from telemetry.core import util
from telemetry.page import page_test
class ChromeProxyLatency(page_test.PageTest):
"""Chrome proxy latency measurement."""
def __init__(self, *args, **kwargs):
super(ChromeProxyLatency, self).__init__(*args, **kwargs)
def WillNavigateToPage(self, page, tab):
tab.ClearCache(force=True)
def ValidateAndMeasurePage(self, page, tab, results):
# Wait for the load event.
tab.WaitForJavaScriptExpression('performance.timing.loadEventStart', 300)
loading.LoadingMetric().AddResults(tab, results)
class ChromeProxyDataSaving(page_test.PageTest):
"""Chrome proxy data daving measurement."""
def __init__(self, *args, **kwargs):
super(ChromeProxyDataSaving, self).__init__(*args, **kwargs)
self._metrics = metrics.ChromeProxyMetric()
def WillNavigateToPage(self, page, tab):
tab.ClearCache(force=True)
self._metrics.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
# Wait for the load event.
tab.WaitForJavaScriptExpression('performance.timing.loadEventStart', 300)
self._metrics.Stop(page, tab)
self._metrics.AddResultsForDataSaving(tab, results)
class ChromeProxyValidation(page_test.PageTest):
"""Base class for all chrome proxy correctness measurements."""
def __init__(self, restart_after_each_page=False):
super(ChromeProxyValidation, self).__init__(
needs_browser_restart_after_each_page=restart_after_each_page)
self._metrics = metrics.ChromeProxyMetric()
self._page = None
# Whether a timeout exception is expected during the test.
self._expect_timeout = False
def CustomizeBrowserOptions(self, options):
# Enable the chrome proxy (data reduction proxy).
options.AppendExtraBrowserArgs('--enable-spdy-proxy-auth')
def WillNavigateToPage(self, page, tab):
tab.ClearCache(force=True)
assert self._metrics
self._metrics.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
self._page = page
# Wait for the load event.
tab.WaitForJavaScriptExpression('performance.timing.loadEventStart', 300)
assert self._metrics
self._metrics.Stop(page, tab)
self.AddResults(tab, results)
def AddResults(self, tab, results):
raise NotImplementedError
def StopBrowserAfterPage(self, browser, page): # pylint: disable=W0613
if hasattr(page, 'restart_after') and page.restart_after:
return True
return False
def RunNavigateSteps(self, page, tab):
# The redirect from safebrowsing causes a timeout. Ignore that.
try:
super(ChromeProxyValidation, self).RunNavigateSteps(page, tab)
except util.TimeoutException, e:
if self._expect_timeout:
logging.warning('Navigation timeout on page %s',
page.name if page.name else page.url)
else:
raise e
class ChromeProxyHeaders(ChromeProxyValidation):
"""Correctness measurement for response headers."""
def __init__(self):
super(ChromeProxyHeaders, self).__init__(restart_after_each_page=True)
def AddResults(self, tab, results):
self._metrics.AddResultsForHeaderValidation(tab, results)
class ChromeProxyBypass(ChromeProxyValidation):
"""Correctness measurement for bypass responses."""
def __init__(self):
super(ChromeProxyBypass, self).__init__(restart_after_each_page=True)
def AddResults(self, tab, results):
self._metrics.AddResultsForBypass(tab, results)
class ChromeProxySafebrowsing(ChromeProxyValidation):
"""Correctness measurement for safebrowsing."""
def __init__(self):
super(ChromeProxySafebrowsing, self).__init__()
def WillNavigateToPage(self, page, tab):
super(ChromeProxySafebrowsing, self).WillNavigateToPage(page, tab)
self._expect_timeout = True
def AddResults(self, tab, results):
self._metrics.AddResultsForSafebrowsing(tab, results)
_FAKE_PROXY_AUTH_VALUE = 'aabbccdd3b7579186c1b0620614fdb1f0000ffff'
_TEST_SERVER = 'chromeproxy-test.appspot.com'
_TEST_SERVER_DEFAULT_URL = 'http://' + _TEST_SERVER + '/default'
# We rely on the chromeproxy-test server to facilitate some of the tests.
# The test server code is at <TBD location> and runs at _TEST_SERVER
#
# The test server allow request to override response status, headers, and
# body through query parameters. See GetResponseOverrideURL.
def GetResponseOverrideURL(url, respStatus=0, respHeader="", respBody=""):
""" Compose the request URL with query parameters to override
the chromeproxy-test server response.
"""
queries = []
if respStatus > 0:
queries.append('respStatus=%d' % respStatus)
if respHeader:
queries.append('respHeader=%s' % base64.b64encode(respHeader))
if respBody:
queries.append('respBody=%s' % base64.b64encode(respBody))
if len(queries) == 0:
return url
"&".join(queries)
# url has query already
if urlparse.urlparse(url).query:
return url + '&' + "&".join(queries)
else:
return url + '?' + "&".join(queries)
class ChromeProxyHTTPFallbackProbeURL(ChromeProxyValidation):
"""Correctness measurement for proxy fallback.
In this test, the probe URL does not return 'OK'. Chrome is expected
to use the fallback proxy.
"""
def __init__(self):
super(ChromeProxyHTTPFallbackProbeURL, self).__init__()
def CustomizeBrowserOptions(self, options):
super(ChromeProxyHTTPFallbackProbeURL,
self).CustomizeBrowserOptions(options)
# Use the test server probe URL which returns the response
# body as specified by respBody.
probe_url = GetResponseOverrideURL(
_TEST_SERVER_DEFAULT_URL,
respBody='not OK')
options.AppendExtraBrowserArgs(
'--data-reduction-proxy-probe-url=%s' % probe_url)
def AddResults(self, tab, results):
self._metrics.AddResultsForHTTPFallback(tab, results)
# Depends on the fix of http://crbug.com/330342.
class ChromeProxyHTTPFallbackViaHeader(ChromeProxyValidation):
"""Correctness measurement for proxy fallback.
In this test, the configured proxy is the chromeproxy-test server which
will send back a response without the expected Via header. Chrome is
expected to use the fallback proxy and add the configured proxy to the
bad proxy list.
"""
def __init__(self):
super(ChromeProxyHTTPFallbackViaHeader, self).__init__()
def CustomizeBrowserOptions(self, options):
super(ChromeProxyHTTPFallbackViaHeader,
self).CustomizeBrowserOptions(options)
options.AppendExtraBrowserArgs('--ignore-certificate-errors')
options.AppendExtraBrowserArgs(
'--spdy-proxy-auth-origin=http://%s' % _TEST_SERVER)
options.AppendExtraBrowserArgs(
'--spdy-proxy-auth-value=%s' % _FAKE_PROXY_AUTH_VALUE)
def AddResults(self, tab, results):
proxies = [
_TEST_SERVER + ":80",
self._metrics.effective_proxies['fallback'],
self._metrics.effective_proxies['direct']]
bad_proxies = [_TEST_SERVER + ":80"]
self._metrics.AddResultsForHTTPFallback(tab, results, proxies, bad_proxies)
class ChromeProxySmoke(ChromeProxyValidation):
"""Smoke measurement for basic chrome proxy correctness."""
def __init__(self):
super(ChromeProxySmoke, self).__init__()
def WillNavigateToPage(self, page, tab):
super(ChromeProxySmoke, self).WillNavigateToPage(page, tab)
if page.name == 'safebrowsing':
self._expect_timeout = True
def AddResults(self, tab, results):
# Map a page name to its AddResults func.
page_to_metrics = {
'header validation': [self._metrics.AddResultsForHeaderValidation],
'compression: image': [
self._metrics.AddResultsForHeaderValidation,
self._metrics.AddResultsForDataSaving,
],
'compression: javascript': [
self._metrics.AddResultsForHeaderValidation,
self._metrics.AddResultsForDataSaving,
],
'compression: css': [
self._metrics.AddResultsForHeaderValidation,
self._metrics.AddResultsForDataSaving,
],
'bypass': [self._metrics.AddResultsForBypass],
'safebrowsing': [self._metrics.AddResultsForSafebrowsing],
}
if not self._page.name in page_to_metrics:
raise page_test.MeasurementFailure(
'Invalid page name (%s) in smoke. Page name must be one of:\n%s' % (
self._page.name, page_to_metrics.keys()))
for add_result in page_to_metrics[self._page.name]:
add_result(tab, results)
|
|
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Parts of this file are based upon xmlrpclib.py, the XML-RPC client
# interface included in the Python distribution.
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
"""
A fake XenAPI SDK.
"""
import base64
import pickle
import random
from xml.sax import saxutils
import zlib
from os_xenapi.client import session as xenapi_session
from os_xenapi.client import XenAPI
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
from nova import exception
from nova.i18n import _
_CLASSES = ['host', 'network', 'session', 'pool', 'SR', 'VBD',
'PBD', 'VDI', 'VIF', 'PIF', 'VM', 'VLAN', 'task']
_after_create_functions = {}
_destroy_functions = {}
_db_content = {}
LOG = logging.getLogger(__name__)
def add_to_dict(functions):
"""A decorator that adds a function to dictionary."""
def decorator(func):
functions[func.__name__] = func
return func
return decorator
def reset():
for c in _CLASSES:
_db_content[c] = {}
create_host('fake')
create_vm('fake dom 0',
'Running',
is_a_template=False,
is_control_domain=True,
domid='0')
def reset_table(table):
if table not in _CLASSES:
return
_db_content[table] = {}
def _create_pool(name_label):
return _create_object('pool',
{'name_label': name_label})
def create_host(name_label, hostname='fake_name', address='fake_addr'):
host_ref = _create_object('host',
{'name_label': name_label,
'hostname': hostname,
'address': address})
host_default_sr_ref = _create_local_srs(host_ref)
_create_local_pif(host_ref)
# Create a pool if we don't have one already
if len(_db_content['pool']) == 0:
pool_ref = _create_pool('')
_db_content['pool'][pool_ref]['master'] = host_ref
_db_content['pool'][pool_ref]['default-SR'] = host_default_sr_ref
_db_content['pool'][pool_ref]['suspend-image-SR'] = host_default_sr_ref
def create_network(name_label, bridge):
return _create_object('network',
{'name_label': name_label,
'bridge': bridge})
def create_vm(name_label, status, **kwargs):
if status == 'Running':
domid = "%d" % random.randrange(1, 1 << 16)
resident_on = list(_db_content['host'])[0]
else:
domid = "-1"
resident_on = ''
vm_rec = {'name_label': name_label,
'domid': domid,
'power_state': status,
'blocked_operations': {},
'resident_on': resident_on}
vm_rec.update(kwargs.copy())
vm_ref = _create_object('VM', vm_rec)
after_VM_create(vm_ref, vm_rec)
return vm_ref
@add_to_dict(_destroy_functions)
def destroy_vm(vm_ref):
vm_rec = _db_content['VM'][vm_ref]
vbd_refs = vm_rec['VBDs']
# NOTE(johannes): Shallow copy since destroy_vbd will remove itself
# from the list
for vbd_ref in vbd_refs[:]:
destroy_vbd(vbd_ref)
del _db_content['VM'][vm_ref]
@add_to_dict(_destroy_functions)
def destroy_vbd(vbd_ref):
vbd_rec = _db_content['VBD'][vbd_ref]
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'].remove(vbd_ref)
vdi_ref = vbd_rec['VDI']
vdi_rec = _db_content['VDI'][vdi_ref]
vdi_rec['VBDs'].remove(vbd_ref)
del _db_content['VBD'][vbd_ref]
@add_to_dict(_destroy_functions)
def destroy_vdi(vdi_ref):
vdi_rec = _db_content['VDI'][vdi_ref]
vbd_refs = vdi_rec['VBDs']
# NOTE(johannes): Shallow copy since destroy_vbd will remove itself
# from the list
for vbd_ref in vbd_refs[:]:
destroy_vbd(vbd_ref)
del _db_content['VDI'][vdi_ref]
def create_vdi(name_label, sr_ref, **kwargs):
vdi_rec = {
'SR': sr_ref,
'read_only': False,
'type': '',
'name_label': name_label,
'name_description': '',
'sharable': False,
'other_config': {},
'location': '',
'xenstore_data': {},
'sm_config': {'vhd-parent': None},
'physical_utilisation': '123',
'managed': True,
}
vdi_rec.update(kwargs)
vdi_ref = _create_object('VDI', vdi_rec)
after_VDI_create(vdi_ref, vdi_rec)
return vdi_ref
@add_to_dict(_after_create_functions)
def after_VDI_create(vdi_ref, vdi_rec):
vdi_rec.setdefault('VBDs', [])
def create_vbd(vm_ref, vdi_ref, userdevice=0, other_config=None):
if other_config is None:
other_config = {}
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': str(userdevice),
'currently_attached': False,
'other_config': other_config}
vbd_ref = _create_object('VBD', vbd_rec)
after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
@add_to_dict(_after_create_functions)
def after_VBD_create(vbd_ref, vbd_rec):
"""Create read-only fields and backref from VM and VDI to VBD when VBD
is created.
"""
vbd_rec['currently_attached'] = False
vbd_rec['device'] = ''
vbd_rec.setdefault('other_config', {})
vm_ref = vbd_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VBDs'].append(vbd_ref)
vm_name_label = _db_content['VM'][vm_ref]['name_label']
vbd_rec['vm_name_label'] = vm_name_label
vdi_ref = vbd_rec['VDI']
if vdi_ref and vdi_ref != "OpaqueRef:NULL":
vdi_rec = _db_content['VDI'][vdi_ref]
vdi_rec['VBDs'].append(vbd_ref)
@add_to_dict(_after_create_functions)
def after_VIF_create(vif_ref, vif_rec):
"""Create backref from VM to VIF when VIF is created.
"""
vm_ref = vif_rec['VM']
vm_rec = _db_content['VM'][vm_ref]
vm_rec['VIFs'].append(vif_ref)
@add_to_dict(_after_create_functions)
def after_VM_create(vm_ref, vm_rec):
"""Create read-only fields in the VM record."""
vm_rec.setdefault('domid', "-1")
vm_rec.setdefault('is_control_domain', False)
vm_rec.setdefault('is_a_template', False)
vm_rec.setdefault('memory_static_max', str(8 * units.Gi))
vm_rec.setdefault('memory_dynamic_max', str(8 * units.Gi))
vm_rec.setdefault('VCPUs_max', str(4))
vm_rec.setdefault('VBDs', [])
vm_rec.setdefault('VIFs', [])
vm_rec.setdefault('resident_on', '')
def create_pbd(host_ref, sr_ref, attached):
config = {'path': '/var/run/sr-mount/%s' % sr_ref}
return _create_object('PBD',
{'device_config': config,
'host': host_ref,
'SR': sr_ref,
'currently_attached': attached})
def create_task(name_label):
return _create_object('task',
{'name_label': name_label,
'status': 'pending'})
def _create_local_srs(host_ref):
"""Create an SR that looks like the one created on the local disk by
default by the XenServer installer. Also, fake the installation of
an ISO SR.
"""
create_sr(name_label='Local storage ISO',
type='iso',
other_config={'i18n-original-value-name_label':
'Local storage ISO',
'i18n-key': 'local-storage-iso'},
physical_size=80000,
physical_utilisation=40000,
virtual_allocation=80000,
host_ref=host_ref)
return create_sr(name_label='Local storage',
type='ext',
other_config={'i18n-original-value-name_label':
'Local storage',
'i18n-key': 'local-storage'},
physical_size=40000,
physical_utilisation=20000,
virtual_allocation=10000,
host_ref=host_ref)
def create_sr(**kwargs):
sr_ref = _create_object(
'SR',
{'name_label': kwargs.get('name_label'),
'type': kwargs.get('type'),
'content_type': kwargs.get('type', 'user'),
'shared': kwargs.get('shared', False),
'physical_size': kwargs.get('physical_size', str(1 << 30)),
'physical_utilisation': str(
kwargs.get('physical_utilisation', 0)),
'virtual_allocation': str(kwargs.get('virtual_allocation', 0)),
'other_config': kwargs.get('other_config', {}),
'VDIs': kwargs.get('VDIs', [])})
pbd_ref = create_pbd(kwargs.get('host_ref'), sr_ref, True)
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
return sr_ref
def _create_local_pif(host_ref):
pif_ref = _create_object('PIF',
{'name-label': 'Fake PIF',
'MAC': '00:11:22:33:44:55',
'physical': True,
'VLAN': -1,
'device': 'fake0',
'host_uuid': host_ref,
'network': '',
'IP': '10.1.1.1',
'IPv6': '',
'uuid': '',
'management': 'true'})
_db_content['PIF'][pif_ref]['uuid'] = pif_ref
return pif_ref
def _create_object(table, obj):
ref = uuidutils.generate_uuid()
obj['uuid'] = uuidutils.generate_uuid()
_db_content[table][ref] = obj
return ref
def _create_sr(table, obj):
sr_type = obj[6]
# Forces fake to support iscsi only
if sr_type != 'iscsi' and sr_type != 'nfs':
raise XenAPI.Failure(['SR_UNKNOWN_DRIVER', sr_type])
host_ref = list(_db_content['host'])[0]
sr_ref = _create_object(table, obj[2])
if sr_type == 'iscsi':
vdi_ref = create_vdi('', sr_ref)
pbd_ref = create_pbd(host_ref, sr_ref, True)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
_db_content['VDI'][vdi_ref]['SR'] = sr_ref
_db_content['PBD'][pbd_ref]['SR'] = sr_ref
return sr_ref
def _create_vlan(pif_ref, vlan_num, network_ref):
pif_rec = get_record('PIF', pif_ref)
vlan_pif_ref = _create_object('PIF',
{'name-label': 'Fake VLAN PIF',
'MAC': '00:11:22:33:44:55',
'physical': True,
'VLAN': vlan_num,
'device': pif_rec['device'],
'host_uuid': pif_rec['host_uuid']})
return _create_object('VLAN',
{'tagged-pif': pif_ref,
'untagged-pif': vlan_pif_ref,
'tag': vlan_num})
def get_all(table):
return list(_db_content[table].keys())
def get_all_records(table):
return _db_content[table]
def _query_matches(record, query):
# Simple support for the XenServer query language:
# 'field "host"="<uuid>" and field "SR"="<sr uuid>"'
# Tested through existing tests (e.g. calls to find_network_with_bridge)
and_clauses = query.split(" and ")
if len(and_clauses) > 1:
matches = True
for clause in and_clauses:
matches = matches and _query_matches(record, clause)
return matches
or_clauses = query.split(" or ")
if len(or_clauses) > 1:
matches = False
for clause in or_clauses:
matches = matches or _query_matches(record, clause)
return matches
if query.startswith('not '):
return not _query_matches(record, query[4:])
# Now it must be a single field - bad queries never match
if not query.startswith('field'):
return False
(field, value) = query[6:].split('=', 1)
# Some fields (e.g. name_label, memory_overhead) have double
# underscores in the DB, but only single underscores when querying
field = field.replace("__", "_").strip(" \"'")
value = value.strip(" \"'")
# Strings should be directly compared
if isinstance(record[field], str):
return record[field] == value
# But for all other value-checks, convert to a string first
# (Notably used for booleans - which can be lower or camel
# case and are interpreted/sanitised by XAPI)
return str(record[field]).lower() == value.lower()
def get_all_records_where(table_name, query):
matching_records = {}
table = _db_content[table_name]
for record in table:
if _query_matches(table[record], query):
matching_records[record] = table[record]
return matching_records
def get_record(table, ref):
if ref in _db_content[table]:
return _db_content[table].get(ref)
else:
raise XenAPI.Failure(['HANDLE_INVALID', table, ref])
def check_for_session_leaks():
if len(_db_content['session']) > 0:
raise exception.NovaException('Sessions have leaked: %s' %
_db_content['session'])
def as_value(s):
"""Helper function for simulating XenAPI plugin responses. It
escapes and wraps the given argument.
"""
return '<value>%s</value>' % saxutils.escape(s)
def as_json(*args, **kwargs):
"""Helper function for simulating XenAPI plugin responses for those
that are returning JSON. If this function is given plain arguments,
then these are rendered as a JSON list. If it's given keyword
arguments then these are rendered as a JSON dict.
"""
arg = args or kwargs
return jsonutils.dumps(arg)
class Failure(Exception):
def __init__(self, details):
self.details = details
def __str__(self):
try:
return str(self.details)
except Exception:
return "XenAPI Fake Failure: %s" % str(self.details)
def _details_map(self):
return {str(i): self.details[i] for i in range(len(self.details))}
class SessionBase(object):
"""Base class for Fake Sessions."""
def __init__(self, uri):
self._session = None
xenapi_session.apply_session_helpers(self)
def pool_get_default_SR(self, _1, pool_ref):
return list(_db_content['pool'].values())[0]['default-SR']
def VBD_insert(self, _1, vbd_ref, vdi_ref):
vbd_rec = get_record('VBD', vbd_ref)
get_record('VDI', vdi_ref)
vbd_rec['empty'] = False
vbd_rec['VDI'] = vdi_ref
def VBD_plug(self, _1, ref):
rec = get_record('VBD', ref)
if rec['currently_attached']:
raise XenAPI.Failure(['DEVICE_ALREADY_ATTACHED', ref])
rec['currently_attached'] = True
rec['device'] = 'fakedev'
def VBD_unplug(self, _1, ref):
rec = get_record('VBD', ref)
if not rec['currently_attached']:
raise XenAPI.Failure(['DEVICE_ALREADY_DETACHED', ref])
rec['currently_attached'] = False
rec['device'] = ''
def VBD_add_to_other_config(self, _1, vbd_ref, key, value):
db_ref = _db_content['VBD'][vbd_ref]
if 'other_config' not in db_ref:
db_ref['other_config'] = {}
if key in db_ref['other_config']:
raise XenAPI.Failure(
['MAP_DUPLICATE_KEY', 'VBD', 'other_config', vbd_ref, key])
db_ref['other_config'][key] = value
def VBD_get_other_config(self, _1, vbd_ref):
db_ref = _db_content['VBD'][vbd_ref]
if 'other_config' not in db_ref:
return {}
return db_ref['other_config']
def PBD_create(self, _1, pbd_rec):
pbd_ref = _create_object('PBD', pbd_rec)
_db_content['PBD'][pbd_ref]['currently_attached'] = False
return pbd_ref
def PBD_plug(self, _1, pbd_ref):
rec = get_record('PBD', pbd_ref)
if rec['currently_attached']:
raise XenAPI.Failure(['DEVICE_ALREADY_ATTACHED', rec])
rec['currently_attached'] = True
sr_ref = rec['SR']
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
def PBD_unplug(self, _1, pbd_ref):
rec = get_record('PBD', pbd_ref)
if not rec['currently_attached']:
raise XenAPI.Failure(['DEVICE_ALREADY_DETACHED', rec])
rec['currently_attached'] = False
sr_ref = rec['SR']
_db_content['SR'][sr_ref]['PBDs'].remove(pbd_ref)
def SR_introduce(self, _1, sr_uuid, label, desc, type, content_type,
shared, sm_config):
for ref, rec in _db_content['SR'].items():
if rec.get('uuid') == sr_uuid:
# make forgotten = 0 and return ref
_db_content['SR'][ref]['forgotten'] = 0
return ref
# SR not found in db, so we create one
params = {'sr_uuid': sr_uuid,
'label': label,
'desc': desc,
'type': type,
'content_type': content_type,
'shared': shared,
'sm_config': sm_config}
sr_ref = _create_object('SR', params)
_db_content['SR'][sr_ref]['uuid'] = sr_uuid
_db_content['SR'][sr_ref]['forgotten'] = 0
vdi_per_lun = False
if type == 'iscsi':
# Just to be clear
vdi_per_lun = True
if vdi_per_lun:
# we need to create a vdi because this introduce
# is likely meant for a single vdi
vdi_ref = create_vdi('', sr_ref)
_db_content['SR'][sr_ref]['VDIs'] = [vdi_ref]
_db_content['VDI'][vdi_ref]['SR'] = sr_ref
return sr_ref
def SR_forget(self, _1, sr_ref):
_db_content['SR'][sr_ref]['forgotten'] = 1
def SR_scan(self, _1, sr_ref):
return
def VM_get_xenstore_data(self, _1, vm_ref):
return _db_content['VM'][vm_ref].get('xenstore_data', {})
def VM_remove_from_xenstore_data(self, _1, vm_ref, key):
db_ref = _db_content['VM'][vm_ref]
if 'xenstore_data' not in db_ref:
return
if key in db_ref['xenstore_data']:
del db_ref['xenstore_data'][key]
def VM_add_to_xenstore_data(self, _1, vm_ref, key, value):
db_ref = _db_content['VM'][vm_ref]
if 'xenstore_data' not in db_ref:
db_ref['xenstore_data'] = {}
db_ref['xenstore_data'][key] = value
def VM_pool_migrate(self, _1, vm_ref, host_ref, options):
pass
def VDI_remove_from_other_config(self, _1, vdi_ref, key):
db_ref = _db_content['VDI'][vdi_ref]
if 'other_config' not in db_ref:
return
if key in db_ref['other_config']:
del db_ref['other_config'][key]
def VDI_add_to_other_config(self, _1, vdi_ref, key, value):
db_ref = _db_content['VDI'][vdi_ref]
if 'other_config' not in db_ref:
db_ref['other_config'] = {}
if key in db_ref['other_config']:
raise XenAPI.Failure(
['MAP_DUPLICATE_KEY', 'VDI', 'other_config', vdi_ref, key])
db_ref['other_config'][key] = value
def VDI_copy(self, _1, vdi_to_copy_ref, sr_ref):
db_ref = _db_content['VDI'][vdi_to_copy_ref]
name_label = db_ref['name_label']
read_only = db_ref['read_only']
sharable = db_ref['sharable']
other_config = db_ref['other_config'].copy()
return create_vdi(name_label, sr_ref, sharable=sharable,
read_only=read_only, other_config=other_config)
def VDI_clone(self, _1, vdi_to_clone_ref):
db_ref = _db_content['VDI'][vdi_to_clone_ref]
sr_ref = db_ref['SR']
return self.VDI_copy(_1, vdi_to_clone_ref, sr_ref)
def host_compute_free_memory(self, _1, ref):
# Always return 12GB available
return 12 * units.Gi
def _plugin_agent_version(self, method, args):
return as_json(returncode='0', message='1.0\\r\\n')
def _plugin_agent_key_init(self, method, args):
return as_json(returncode='D0', message='1')
def _plugin_agent_password(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_inject_file(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_resetnetwork(self, method, args):
return as_json(returncode='0', message='success')
def _plugin_agent_agentupdate(self, method, args):
url = args["url"]
md5 = args["md5sum"]
message = "success with %(url)s and hash:%(md5)s" % dict(url=url,
md5=md5)
return as_json(returncode='0', message=message)
def _plugin_noop(self, method, args):
return ''
def _plugin_pickle_noop(self, method, args):
return pickle.dumps(None)
def _plugin_migration_transfer_vhd(self, method, args):
kwargs = pickle.loads(args['params'])['kwargs']
vdi_ref = self.xenapi_request('VDI.get_by_uuid',
(kwargs['vdi_uuid'], ))
assert vdi_ref
return pickle.dumps(None)
_plugin_glance_upload_vhd2 = _plugin_pickle_noop
_plugin_kernel_copy_vdi = _plugin_noop
_plugin_kernel_create_kernel_ramdisk = _plugin_noop
_plugin_kernel_remove_kernel_ramdisk = _plugin_noop
_plugin_migration_move_vhds_into_sr = _plugin_noop
def _plugin_xenhost_host_data(self, method, args):
return jsonutils.dumps({
'host_memory': {'total': 10,
'overhead': 20,
'free': 30,
'free-computed': 40},
'host_uuid': 'fb97583b-baa1-452d-850e-819d95285def',
'host_name-label': 'fake-xenhost',
'host_name-description': 'Default install of XenServer',
'host_hostname': 'fake-xenhost',
'host_ip_address': '10.219.10.24',
'enabled': 'true',
'host_capabilities': ['xen-3.0-x86_64',
'xen-3.0-x86_32p',
'hvm-3.0-x86_32',
'hvm-3.0-x86_32p',
'hvm-3.0-x86_64'],
'host_other-config': {
'agent_start_time': '1412774967.',
'iscsi_iqn': 'iqn.2014-10.org.example:39fa9ee3',
'boot_time': '1412774885.',
},
'host_cpu_info': {
'physical_features': '0098e3fd-bfebfbff-00000001-28100800',
'modelname': 'Intel(R) Xeon(R) CPU X3430 @ 2.40GHz',
'vendor': 'GenuineIntel',
'features': '0098e3fd-bfebfbff-00000001-28100800',
'family': 6,
'maskable': 'full',
'cpu_count': 4,
'socket_count': '1',
'flags': 'fpu de tsc msr pae mce cx8 apic sep mtrr mca '
'cmov pat clflush acpi mmx fxsr sse sse2 ss ht '
'nx constant_tsc nonstop_tsc aperfmperf pni vmx '
'est ssse3 sse4_1 sse4_2 popcnt hypervisor ida '
'tpr_shadow vnmi flexpriority ept vpid',
'stepping': 5,
'model': 30,
'features_after_reboot': '0098e3fd-bfebfbff-00000001-28100800',
'speed': '2394.086'
},
})
def _plugin_poweraction(self, method, args):
return jsonutils.dumps({"power_action": method[5:]})
_plugin_xenhost_host_reboot = _plugin_poweraction
_plugin_xenhost_host_startup = _plugin_poweraction
_plugin_xenhost_host_shutdown = _plugin_poweraction
def _plugin_xenhost_set_host_enabled(self, method, args):
enabled = 'enabled' if args.get('enabled') == 'true' else 'disabled'
return jsonutils.dumps({"status": enabled})
def _plugin_xenhost_host_uptime(self, method, args):
return jsonutils.dumps({"uptime": "fake uptime"})
def _plugin_xenhost_get_pci_device_details(self, method, args):
"""Simulate the ouput of three pci devices.
Both of those devices are available for pci passtrough but
only one will match with the pci whitelist used in the
method test_pci_passthrough_devices_*().
Return a single list.
"""
# Driver is not pciback
dev_bad1 = ["Slot:\t0000:86:10.0", "Class:\t0604", "Vendor:\t10b5",
"Device:\t8747", "Rev:\tba", "Driver:\tpcieport", "\n"]
# Driver is pciback but vendor and device are bad
dev_bad2 = ["Slot:\t0000:88:00.0", "Class:\t0300", "Vendor:\t0bad",
"Device:\tcafe", "SVendor:\t10de", "SDevice:\t100d",
"Rev:\ta1", "Driver:\tpciback", "\n"]
# Driver is pciback and vendor, device are used for matching
dev_good = ["Slot:\t0000:87:00.0", "Class:\t0300", "Vendor:\t10de",
"Device:\t11bf", "SVendor:\t10de", "SDevice:\t100d",
"Rev:\ta1", "Driver:\tpciback", "\n"]
lspci_output = "\n".join(dev_bad1 + dev_bad2 + dev_good)
return pickle.dumps(lspci_output)
def _plugin_xenhost_get_pci_type(self, method, args):
return pickle.dumps("type-PCI")
def _plugin_console_get_console_log(self, method, args):
dom_id = args["dom_id"]
if dom_id == 0:
raise XenAPI.Failure('Guest does not have a console')
return base64.b64encode(
zlib.compress(("dom_id: %s" % dom_id).encode('utf-8')))
def _plugin_dom0_plugin_version_get_version(self, method, args):
return pickle.dumps("2.0")
def _plugin_xenhost_query_gc(self, method, args):
return pickle.dumps("False")
def _plugin_partition_utils_make_partition(self, method, args):
return pickle.dumps(None)
def host_call_plugin(self, _1, _2, plugin, method, args):
plugin = plugin.rstrip('.py')
func = getattr(self, '_plugin_%s_%s' % (plugin, method), None)
if not func:
raise Exception('No simulation in host_call_plugin for %s,%s' %
(plugin, method))
return func(method, args)
def VDI_get_virtual_size(self, *args):
return 1 * units.Gi
def VDI_resize_online(self, *args):
return 'derp'
VDI_resize = VDI_resize_online
def _VM_reboot(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
if db_ref['power_state'] != 'Running':
raise XenAPI.Failure(['VM_BAD_POWER_STATE', 'fake-opaque-ref',
db_ref['power_state'].lower(), 'halted'])
db_ref['power_state'] = 'Running'
db_ref['domid'] = '%d' % (random.randrange(1, 1 << 16))
def VM_clean_reboot(self, session, vm_ref):
return self._VM_reboot(session, vm_ref)
def VM_hard_reboot(self, session, vm_ref):
return self._VM_reboot(session, vm_ref)
def VM_hard_shutdown(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Halted'
db_ref['domid'] = "-1"
VM_clean_shutdown = VM_hard_shutdown
def VM_suspend(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Suspended'
def VM_pause(self, session, vm_ref):
db_ref = _db_content['VM'][vm_ref]
db_ref['power_state'] = 'Paused'
def pool_eject(self, session, host_ref):
pass
def pool_join(self, session, hostname, username, password):
pass
def pool_set_name_label(self, session, pool_ref, name):
pass
def host_migrate_receive(self, session, destref, nwref, options):
return {"value": "fake_migrate_data"}
def VM_assert_can_migrate(self, session, vmref, migrate_data, live,
vdi_map, vif_map, options):
pass
def VM_migrate_send(self, session, mref, migrate_data, live, vdi_map,
vif_map, options):
pass
def VM_remove_from_blocked_operations(self, session, vm_ref, key):
# operation is idempotent, XenServer doesn't care if the key exists
_db_content['VM'][vm_ref]['blocked_operations'].pop(key, None)
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
self._login(methodname, params)
return None
elif methodname == 'logout' or methodname == 'session.logout':
self._logout()
return None
else:
full_params = (self._session,) + params
meth = getattr(self, methodname, None)
if meth is None:
LOG.debug('Raising NotImplemented')
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s') %
methodname)
return meth(*full_params)
def _login(self, method, params):
self._session = uuidutils.generate_uuid()
_session_info = {'uuid': uuidutils.generate_uuid(),
'this_host': list(_db_content['host'])[0]}
_db_content['session'][self._session] = _session_info
def _logout(self):
s = self._session
self._session = None
if s not in _db_content['session']:
raise exception.NovaException(
"Logging out a session that is invalid or already logged "
"out: %s" % s)
del _db_content['session'][s]
def __getattr__(self, name):
if name == 'handle':
return self._session
elif name == 'xenapi':
return _Dispatcher(self.xenapi_request, None)
elif name.startswith('login') or name.startswith('slave_local'):
return lambda *params: self._login(name, params)
elif name.startswith('Async'):
return lambda *params: self._async(name, params)
elif '.' in name:
impl = getattr(self, name.replace('.', '_'))
if impl is not None:
def callit(*params):
LOG.debug('Calling %(name)s %(impl)s',
{'name': name, 'impl': impl})
self._check_session(params)
return impl(*params)
return callit
if self._is_gettersetter(name, True):
LOG.debug('Calling getter %s', name)
return lambda *params: self._getter(name, params)
elif self._is_gettersetter(name, False):
LOG.debug('Calling setter %s', name)
return lambda *params: self._setter(name, params)
elif self._is_create(name):
return lambda *params: self._create(name, params)
elif self._is_destroy(name):
return lambda *params: self._destroy(name, params)
elif name == 'XenAPI':
return FakeXenAPI()
else:
return None
def _is_gettersetter(self, name, getter):
bits = name.split('.')
return (len(bits) == 2 and
bits[0] in _CLASSES and
bits[1].startswith(getter and 'get_' or 'set_'))
def _is_create(self, name):
return self._is_method(name, 'create')
def _is_destroy(self, name):
return self._is_method(name, 'destroy')
def _is_method(self, name, meth):
bits = name.split('.')
return (len(bits) == 2 and
bits[0] in _CLASSES and
bits[1] == meth)
def _getter(self, name, params):
self._check_session(params)
(cls, func) = name.split('.')
if func == 'get_all':
self._check_arg_count(params, 1)
return get_all(cls)
if func == 'get_all_records':
self._check_arg_count(params, 1)
return get_all_records(cls)
if func == 'get_all_records_where':
self._check_arg_count(params, 2)
return get_all_records_where(cls, params[1])
if func == 'get_record':
self._check_arg_count(params, 2)
return get_record(cls, params[1])
if func in ('get_by_name_label', 'get_by_uuid'):
self._check_arg_count(params, 2)
return_singleton = (func == 'get_by_uuid')
return self._get_by_field(
_db_content[cls], func[len('get_by_'):], params[1],
return_singleton=return_singleton)
if len(params) == 2:
field = func[len('get_'):]
ref = params[1]
if (ref in _db_content[cls]):
if (field in _db_content[cls][ref]):
return _db_content[cls][ref][field]
else:
raise XenAPI.Failure(['HANDLE_INVALID', cls, ref])
LOG.debug('Raising NotImplemented')
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments') % name)
def _setter(self, name, params):
self._check_session(params)
(cls, func) = name.split('.')
if len(params) == 3:
field = func[len('set_'):]
ref = params[1]
val = params[2]
if (ref in _db_content[cls] and
field in _db_content[cls][ref]):
_db_content[cls][ref][field] = val
return
LOG.debug('Raising NotImplemented')
raise NotImplementedError(
'xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments or the database '
'is missing that field' % name)
def _create(self, name, params):
self._check_session(params)
is_sr_create = name == 'SR.create'
is_vlan_create = name == 'VLAN.create'
# Storage Repositories have a different API
expected = is_sr_create and 10 or is_vlan_create and 4 or 2
self._check_arg_count(params, expected)
(cls, _) = name.split('.')
ref = (is_sr_create and
_create_sr(cls, params) or
is_vlan_create and
_create_vlan(params[1], params[2], params[3]) or
_create_object(cls, params[1]))
# Call hook to provide any fixups needed (ex. creating backrefs)
after_hook = 'after_%s_create' % cls
try:
func = _after_create_functions[after_hook]
except KeyError:
pass
else:
func(ref, params[1])
obj = get_record(cls, ref)
# Add RO fields
if cls == 'VM':
obj['power_state'] = 'Halted'
return ref
def _destroy(self, name, params):
self._check_session(params)
self._check_arg_count(params, 2)
table = name.split('.')[0]
ref = params[1]
if ref not in _db_content[table]:
raise XenAPI.Failure(['HANDLE_INVALID', table, ref])
# Call destroy function (if exists)
destroy_func = _destroy_functions.get('destroy_%s' % table.lower())
if destroy_func:
destroy_func(ref)
else:
del _db_content[table][ref]
def _async(self, name, params):
task_ref = create_task(name)
task = _db_content['task'][task_ref]
func = name[len('Async.'):]
try:
result = self.xenapi_request(func, params[1:])
if result:
result = as_value(result)
task['result'] = result
task['status'] = 'success'
except XenAPI.Failure as exc:
task['error_info'] = exc.details
task['status'] = 'failed'
task['finished'] = timeutils.utcnow()
return task_ref
def _check_session(self, params):
if (self._session is None or
self._session not in _db_content['session']):
raise XenAPI.Failure(
['HANDLE_INVALID', 'session', self._session])
if len(params) == 0 or params[0] != self._session:
LOG.debug('Raising NotImplemented')
raise NotImplementedError('Call to XenAPI without using .xenapi')
def _check_arg_count(self, params, expected):
actual = len(params)
if actual != expected:
raise XenAPI.Failure(
['MESSAGE_PARAMETER_COUNT_MISMATCH', expected, actual])
def _get_by_field(self, recs, k, v, return_singleton):
result = []
for ref, rec in recs.items():
if rec.get(k) == v:
result.append(ref)
if return_singleton:
try:
return result[0]
except IndexError:
raise XenAPI.Failure(['UUID_INVALID', v, result, recs, k])
return result
class FakeXenAPI(object):
def __init__(self):
self.Failure = XenAPI.Failure
# Based upon _Method from xmlrpclib.
class _Dispatcher(object):
def __init__(self, send, name):
self.__send = send
self.__name = name
def __repr__(self):
if self.__name:
return '<xenapi.fake._Dispatcher for %s>' % self.__name
else:
return '<xenapi.fake._Dispatcher>'
def __getattr__(self, name):
if self.__name is None:
return _Dispatcher(self.__send, name)
else:
return _Dispatcher(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
|
|
#!/usr/bin/env python
"""
Trolley syncs issues between CSV, Github, and Buffer with Trello.
"""
import csv
import datetime
import os
import random
import click
import click_config
import github3
from buffpy.api import API as BufferAPI
from buffpy.managers.profiles import Profiles
from buffpy.managers.updates import Updates
from trello import TrelloClient
__author__ = 'Jeff Triplett'
__copyright__ = 'Copyright 2015, Jeff Triplett'
__license__ = 'BSD'
__version__ = '0.1.6'
# hold auth state
_buffer_auth = None
_github_auth = None
_trello_auth = None
BUFFER_CLIENT_ID = os.environ.get('BUFFER_CLIENT_ID')
BUFFER_CLIENT_SECRET = os.environ.get('BUFFER_CLIENT_SECRET')
BUFFER_ACCESS_TOKEN = os.environ.get('BUFFER_ACCESS_TOKEN')
GITHUB_USERNAME = os.environ.get('GITHUB_USERNAME')
GITHUB_PASSWORD = os.environ.get('GITHUB_PASSWORD')
GITHUB_ORG = os.environ.get('GITHUB_ORG')
GITHUB_REPO = os.environ.get('GITHUB_REPO')
GITHUB_SCOPES = ['user', 'repo']
TRELLO_APP_KEY = os.environ.get('TRELLO_APP_KEY')
TRELLO_APP_SECRET = os.environ.get('TRELLO_APP_SECRET')
TRELLO_AUTH_TOKEN = os.environ.get('TRELLO_AUTH_TOKEN')
TRELLO_BOARD_ID = os.environ.get('TRELLO_BOARD_ID')
TRELLO_DEFAULT_LIST = os.environ.get('TRELLO_DEFAULT_LIST', 'Uncategorized')
# might migrate to:
# http://click.pocoo.org/4/options/#values-from-environment-variables
class config(object):
class buffer(object):
client_id = BUFFER_CLIENT_ID
client_secret = BUFFER_CLIENT_SECRET
access_token = BUFFER_ACCESS_TOKEN
class github(object):
username = GITHUB_USERNAME
password = GITHUB_PASSWORD
org = GITHUB_ORG
repo = GITHUB_REPO
class trello(object):
app_key = TRELLO_APP_KEY
app_secret = TRELLO_APP_SECRET
auth_token = TRELLO_AUTH_TOKEN
board_id = TRELLO_BOARD_ID
default_list = TRELLO_DEFAULT_LIST
# utils
def csv_to_dict_list(filename):
"""Open a CSV file and return a list of dict objects."""
with open(filename) as f:
values = list(csv.DictReader(f))
return values
def get_random_color():
filename = 'etc/color-blind-safe.csv'
colors = csv_to_dict_list(filename)
index = random.randint(0, len(colors))
return colors[index]['color']
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
click.echo('version {}'.format(__version__))
ctx.exit()
# github utils
def get_github_auth(github_config):
"""Log me into github and return an object."""
global _github_auth
if _github_auth:
return _github_auth
assert github_config.username
assert github_config.password
_github_auth = github3.login(
github_config.username,
github_config.password)
return _github_auth
def get_github_repository(config, github_org, github_repo):
"""Return a repository object and log me in."""
github = get_github_auth(config.github)
repository = github.repository(github_org, github_repo)
return repository
def get_existing_github_issues(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
existing_issues = [str(item.title) for item in repository.iter_issues()]
return existing_issues
def get_existing_github_labels(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
existing_labels = [str(item.name) for item in repository.iter_labels()]
return existing_labels
def get_existing_github_milestones(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
existing_milestones = [str(item.title) for item in repository.iter_milestones()]
return existing_milestones
# github core
def close_existing_github_issues(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
issues = [str(issue.title) for issue in repository.iter_issues()]
click.echo('closing {} issues'.format(len(issues)))
for issue in repository.iter_issues():
click.echo('closing issue "{}"'.format(issue.title))
issue.close()
def create_github_issues(config, github_org, github_repo,
filename='etc/default_github_issues.csv'):
issues = csv_to_dict_list(filename)
repository = get_github_repository(config, github_org, github_repo)
existing_issues = get_existing_github_issues(config, github_org, github_repo)
click.echo('creating {} issues'.format(len(issues)))
for issue in issues:
title = str(issue['title'])
body = str(issue['body'])
labels = issue['labels']
if labels:
if ',' in labels:
labels = labels.split(',')
else:
labels = [labels]
if title not in existing_issues:
click.echo('creating issue "{}"'.format(title))
repository.create_issue(title, body, labels=labels)
else:
click.echo('issue "{}" already exists'.format(title))
def create_github_labels(config, github_org, github_repo,
filename='etc/default_github_labels.csv'):
labels = csv_to_dict_list(filename)
repository = get_github_repository(config, github_org, github_repo)
existing_labels = get_existing_github_labels(config, github_org, github_repo)
click.echo('creating {} labels'.format(len(labels)))
for label in labels:
name = str(label['name'])
color = str(label['color'])
if name not in existing_labels:
click.echo('creating label "{}"'.format(name))
if not len(color):
color = get_random_color()
repository.create_label(name, color)
else:
click.echo('label "{}" already exists'.format(name))
def create_github_milestones(config, github_org, github_repo,
filename='etc/default_github_milestones.csv'):
milestones = csv_to_dict_list(filename)
repository = get_github_repository(config, github_org, github_repo)
existing_milestones = get_existing_github_milestones(config, github_org, github_repo)
click.echo('creating {} milestones'.format(len(milestones)))
for milestone in milestones:
title = str(milestone['title'])
if title not in existing_milestones:
click.echo('creating milestone "{}"'.format(title))
repository.create_milestone(title)
else:
click.echo('milestone "{}" already exists'.format(title))
def delete_existing_github_labels(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
labels = [str(label.name) for label in repository.iter_labels()]
click.echo('removing {} labels'.format(len(labels)))
for label in labels:
click.echo('removing label "{}"'.format(label))
repository.label(label).delete()
def delete_existing_github_milestones(config, github_org, github_repo):
repository = get_github_repository(config, github_org, github_repo)
milestones = repository.iter_milestones(github_org, github_repo)
click.echo('removing {} milestones'.format(len(list(milestones))))
for milestone in milestones:
click.echo('removing milestone "{}"'.format(milestone.title))
milestone.delete()
# trello utils
def get_trello_auth(trello_config):
"""Log me into trello and return an object."""
global _trello_auth
if _trello_auth:
return _trello_auth
assert trello_config.app_key
assert trello_config.app_secret
assert trello_config.auth_token
_trello_auth = TrelloClient(
api_key=trello_config.app_key,
api_secret=trello_config.app_secret,
token=trello_config.auth_token,
# token_secret=str(trello_config.auth_token),
)
return _trello_auth
def get_existing_trello_boards(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
boards = [str(board.name) for board in board.get_cards()]
return boards
def get_existing_trello_cards(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
cards = board.get_cards()
cards = [str(card.name) for card in cards]
return cards
def get_existing_trello_labels(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
labels = board.get_labels()
labels = [label for label in labels]
return labels
def get_existing_trello_lists(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
all_lists = board.all_lists()
all_lists = [item.name for item in all_lists]
return all_lists
def get_trello_list_lookup(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(trello_board_id)
all_lists = board.all_lists()
list_lookup = {}
for item in all_lists:
id = item.id
name = item.name
list_lookup[name] = id
list_lookup[id] = name
default_list = config.trello.default_list
if default_list not in list_lookup:
new_list = board.add_list(default_list)
new_list_id = new_list.id
list_lookup[default_list] = new_list_id
list_lookup[new_list_id] = default_list
return list_lookup
# trello core
def create_trello_cards(config, trello_board_id,
filename='etc/default_trello_cards.csv'):
cards = csv_to_dict_list(filename)
trello = get_trello_auth(config.trello)
existing_cards = get_existing_trello_cards(config, trello_board_id)
board_lookup = get_trello_list_lookup(config, trello_board_id)
category = board_lookup[config.trello.default_list]
board = trello.get_board(trello_board_id)
click.echo('creating {} cards'.format(len(cards)))
for card in cards:
name = str(card.get('title', ''))
description = str(card.get('body', ''))
labels = card.get('labels', [])
if labels:
if ',' in labels:
labels = labels.split(',')
else:
labels = [labels]
if name not in existing_cards:
click.echo('creating issue "{}"'.format(name))
list_item = board.get_list(category)
new_card = list_item.add_card(name, description, labels=labels)
'''
# currently labels are broken in the trello python client :/
if len(labels):
for label in labels:
trello.cards.new_label(new_card['id'], label)
'''
else:
click.echo('issue "{}" already exists'.format(name))
def create_trello_labels(config, trello_board_id,
filename='etc/default_trello_labels.csv'):
labels = csv_to_dict_list(filename)
existing_labels = get_existing_trello_labels(config, trello_board_id)
click.echo('creating {} labels'.format(len(labels)))
for label in labels:
name = str(label['name'])
color = str(label['color'])
if name not in existing_labels:
click.echo('creating label "{}"'.format(name))
if not len(color):
color = get_random_color()
# TODO: Create Trello label via API
#repository.create_label(name, color)
else:
click.echo('label "{}" already exists'.format(name))
def create_trello_lists(config, trello_board_id,
filename='etc/default_trello_lists.csv'):
lists = csv_to_dict_list(filename)
trello = get_trello_auth(config.trello)
existing_lists = get_existing_trello_lists(config, trello_board_id)
click.echo('creating {} lists'.format(len(lists)))
for item in lists:
title = str(item['title'])
if title not in existing_lists:
click.echo('creating list "{}"'.format(title))
trello.boards.new_list(trello_board_id, title)
else:
click.echo('list "{}" already exists'.format(title))
def list_trello_boards(config):
trello = get_trello_auth(config.trello)
boards = trello.list_boards()
for board in boards:
click.echo('{0}: {1}{2}'.format(
board.id,
board.name,
' (closed)' if board.closed else ''
))
def list_trello_organizations(config):
trello = get_trello_auth(config.trello)
organizations = trello.list_organizations()
for organization in organizations:
click.echo('{0}: {1}'.format(
organization.id,
organization.name
))
# sync github and trello
def sync_github_issues_to_trello_cards(config, github_org, github_repo,
trello_board_id):
trello = get_trello_auth(config.trello)
board_lookup = get_trello_list_lookup(config, trello_board_id)
existing_trello_cards = get_existing_trello_cards(config, trello_board_id)
repository = get_github_repository(config, github_org, github_repo)
issues = repository.iter_issues()
#click.echo('creating {} issues'.format(issues.count))
for issue in issues:
title = issue.title
desc = issue.body
category = board_lookup[config.trello.default_list]
if title not in existing_trello_cards:
click.echo('creating issue "{}"'.format(title))
trello.cards.new(title, category, desc=desc)
else:
click.echo('issue "{}" already exists'.format(title))
def sync_trello_cards_to_github_issues(config, trello_board_id, github_org, github_repo):
trello = get_trello_auth(config.trello)
existing_github_issues = get_existing_github_issues(config, github_org, github_repo)
repository = get_github_repository(config, github_org, github_repo)
board = trello.get_board(trello_board_id)
cards = board.all_cards()
click.echo('creating {} cards'.format(len(cards)))
for card in cards:
name = card.name
# id = card['id']
# list_id = card['idList']
description = card.description
labels = card.labels
if name not in existing_github_issues:
click.echo('creating card "{}"'.format(name))
repository.create_issue(name, description, labels=labels)
else:
click.echo('card "{}" already exists'.format(name))
def list_trello_cards(config, trello_board_id):
trello = get_trello_auth(config.trello)
board = trello.get_board(config.trello.board_id)
cards = [card for card in board.open_cards()]
for card in cards:
name = card.name
card_id = card.id
description = card.description
click.echo('{0}: {1}'.format(card_id, name))
if len(description):
click.echo(description)
def get_buffer_auth(buffer_config):
"""Log me into buffer and return an object."""
global _buffer_auth
if _buffer_auth:
return _buffer_auth
assert buffer_config.client_id
assert buffer_config.client_secret
assert buffer_config.access_token
_buffer_auth = BufferAPI(
client_id=buffer_config.client_id,
client_secret=buffer_config.client_secret,
access_token=buffer_config.access_token,
)
return _buffer_auth
def test_buffer(config):
client = get_buffer_auth(config.buffer)
profiles = Profiles(api=client).filter(service='twitter')
if not len(profiles):
raise Exception('Your twitter account is not configured')
profile = profiles[0]
print profile
print
pending = profile.updates.pending
for item in pending:
print item
print item.id
print item.text
print item.scheduled_at
print datetime.datetime.fromtimestamp(item.scheduled_at)
# cli methods we are exposing to be used via terminal
@click.group()
@click_config.wrap(module=config, sections=('github', 'trello'))
@click.option('--version', is_flag=True, callback=print_version,
expose_value=False, is_eager=True)
def cli():
assert config.buffer
pass
@cli.command('bootstrap')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_bootstrap(github_org, github_repo):
"""Sets up github with some sensible defaults."""
delete_existing_github_labels(
config,
github_org or config.github.org,
github_repo or config.github.repo)
create_github_labels(
config,
github_org or config.github.org,
github_repo or config.github.repo)
create_github_issues(
config,
github_org or config.github.org,
github_repo or config.github.repo)
create_github_milestones(
config,
github_org or config.github.org,
github_repo or config.github.repo)
@cli.command('close_existing_github_issues')
@click.option('--force/--no-force', default=False)
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_close_existing_github_issues(force, github_org, github_repo):
"""Close all existing GitHub issues."""
message = 'Do you really want to close all of your existing GitHub issues?'
if force or click.confirm(message):
close_existing_github_issues(
config,
github_org or config.github.org,
github_repo or config.github.repo)
else:
click.echo('Action aborted')
@cli.command('create_github_issues')
@click.option('--filename', default='etc/default_github_issues.csv')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_create_github_issues(filename, github_org, github_repo):
"""Create GitHub issues from a CSV file."""
create_github_issues(
config,
github_org or config.github.org,
github_repo or config.github.repo,
filename)
@cli.command('create_github_labels')
@click.option('--filename', default='etc/default_github_labels.csv')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_create_github_labels(filename, github_org, github_repo):
"""Create GitHub labels from a CSV file."""
create_github_labels(
config,
github_org or config.github.org,
github_repo or config.github.repo,
filename)
@cli.command('create_github_milestones')
@click.option('--filename', default='etc/default_github_milestones.csv')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_create_github_milestones(filename, github_org, github_repo):
"""Create GitHub milestones from a CSV file."""
create_github_milestones(
config,
github_org or config.github.org,
github_repo or config.github.repo,
filename)
@cli.command('create_trello_cards')
@click.option('--filename', default='etc/default_trello_cards.csv')
@click.option('--trello-board', type=str)
def cli_create_trello_cards(filename, trello_board):
"""Create Trello cards from a CSV file."""
create_trello_cards(
config,
trello_board or config.trello.board_id,
filename)
@cli.command('create_trello_labels')
@click.option('--filename', default='etc/default_trello_labels.csv')
@click.option('--trello-board', type=str)
def cli_create_trello_labels(filename, trello_board):
"""Create Trello labels from a CSV file."""
create_trello_labels(
config,
trello_board or config.trello.board_id,
filename)
@cli.command('create_trello_lists')
@click.option('--filename', default='etc/default_trello_lists.csv')
@click.option('--trello-board', type=str)
def cli_create_trello_lists(filename, trello_board):
"""Create Trello lists from a CSV file."""
create_trello_lists(
config,
trello_board or config.trello.board_id,
filename)
@cli.command('delete_existing_github_labels')
@click.option('--force/--no-force', default=False)
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_delete_existing_github_labels(force, github_org, github_repo):
"""Delete labels from GitHub repo."""
message = 'Do you really want to delete all of the existing GitHub labels?'
if force or click.confirm(message):
delete_existing_github_labels(
config,
github_org or config.github.org,
github_repo or config.github.repo)
else:
click.echo('Action aborted')
@cli.command('delete_existing_github_milestones')
@click.option('--force/--no-force', default=False)
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_delete_existing_github_milestones(force, github_org, github_repo):
"""Delete milestones from GitHub repo."""
message = 'Do you really want to delete all of the existing GitHub milestones?'
if force or click.confirm(message):
delete_existing_github_milestones(
config,
github_org or config.github.org,
github_repo or config.github.repo)
else:
click.echo('Action aborted')
@cli.command('sync_github_issues_to_trello_cards')
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
@click.option('--trello-board', type=str)
def cli_sync_github_issues_to_trello_cards(github_org, github_repo, trello_board):
"""Convert your GitHub issues to Trello cards."""
sync_github_issues_to_trello_cards(
config,
github_org or config.github.org,
github_repo or config.github.repo,
trello_board or config.trello.board_id)
@cli.command('sync_trello_cards_to_github_issues')
@click.option('--trello-board', type=str)
@click.option('--github-org', type=str)
@click.option('--github-repo', type=str)
def cli_sync_trello_cards_to_github_issues(trello_board, github_org, github_repo):
"""Convert your Trello cards to GitHub issues."""
sync_trello_cards_to_github_issues(
config,
trello_board or config.trello.board_id,
github_org or config.github.org,
github_repo or config.github.repo)
@cli.command('list_trello_boards')
def cli_list_trello_boards():
"""List your Trello boards."""
list_trello_boards(config)
@cli.command('list_trello_cards')
@click.option('--trello-board', type=str)
def cli_list_trello_cards(trello_board):
"""List your Trello cards for a given board."""
list_trello_cards(
config,
trello_board or config.trello.board_id)
@cli.command('list_trello_organizations')
def cli_list_trello_organizations():
"""List your Trello organizations."""
list_trello_organizations(config)
@cli.command('test_buffer')
def cli_test_buffer():
"""Convert your Trello cards to GitHub issues."""
try:
test_buffer(config)
except Exception as e:
print e
if __name__ == '__main__':
cli()
|
|
from __future__ import unicode_literals
import hashlib
import json
import time
import warnings
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import (
authenticate,
get_backends,
login as django_login,
logout as django_logout,
)
from django.contrib.auth.models import AbstractUser
from django.contrib.auth.password_validation import validate_password
from django.contrib.sites.shortcuts import get_current_site
from django.core.cache import cache
from django.core.mail import EmailMessage, EmailMultiAlternatives
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import resolve_url
from django.template import TemplateDoesNotExist
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils import timezone
from allauth.compat import force_str, ugettext_lazy as _
from ..utils import (
build_absolute_uri,
email_address_exists,
generate_unique_username,
get_user_model,
import_attribute,
)
from . import app_settings
class DefaultAccountAdapter(object):
error_messages = {
'username_blacklisted':
_('Username can not be used. Please use other username.'),
'username_taken':
AbstractUser._meta.get_field('username').error_messages['unique'],
'too_many_login_attempts':
_('Too many failed login attempts. Try again later.'),
'email_taken':
_("A user is already registered with this e-mail address."),
}
def __init__(self, request=None):
self.request = request
def stash_verified_email(self, request, email):
request.session['account_verified_email'] = email
def unstash_verified_email(self, request):
ret = request.session.get('account_verified_email')
request.session['account_verified_email'] = None
return ret
def stash_user(self, request, user):
request.session['account_user'] = user
def unstash_user(self, request):
return request.session.pop('account_user', None)
def is_email_verified(self, request, email):
"""
Checks whether or not the email address is already verified
beyond allauth scope, for example, by having accepted an
invitation before signing up.
"""
ret = False
verified_email = request.session.get('account_verified_email')
if verified_email:
ret = verified_email.lower() == email.lower()
return ret
def format_email_subject(self, subject):
prefix = app_settings.EMAIL_SUBJECT_PREFIX
if prefix is None:
site = get_current_site(self.request)
prefix = "[{name}] ".format(name=site.name)
return prefix + force_str(subject)
def get_from_email(self):
"""
This is a hook that can be overridden to programatically
set the 'from' email address for sending emails
"""
return settings.DEFAULT_FROM_EMAIL
def render_mail(self, template_prefix, email, context):
"""
Renders an e-mail to `email`. `template_prefix` identifies the
e-mail that is to be sent, e.g. "account/email/email_confirmation"
"""
subject = render_to_string('{0}_subject.txt'.format(template_prefix),
context)
# remove superfluous line breaks
subject = " ".join(subject.splitlines()).strip()
subject = self.format_email_subject(subject)
from_email = self.get_from_email()
bodies = {}
for ext in ['html', 'txt']:
try:
template_name = '{0}_message.{1}'.format(template_prefix, ext)
bodies[ext] = render_to_string(template_name,
context).strip()
except TemplateDoesNotExist:
if ext == 'txt' and not bodies:
# We need at least one body
raise
if 'txt' in bodies:
msg = EmailMultiAlternatives(subject,
bodies['txt'],
from_email,
[email])
if 'html' in bodies:
msg.attach_alternative(bodies['html'], 'text/html')
else:
msg = EmailMessage(subject,
bodies['html'],
from_email,
[email])
msg.content_subtype = 'html' # Main content is now text/html
return msg
def send_mail(self, template_prefix, email, context):
msg = self.render_mail(template_prefix, email, context)
msg.send()
def get_login_redirect_url(self, request):
"""
Returns the default URL to redirect to after logging in. Note
that URLs passed explicitly (e.g. by passing along a `next`
GET parameter) take precedence over the value returned here.
"""
assert request.user.is_authenticated
url = getattr(settings, "LOGIN_REDIRECT_URLNAME", None)
if url:
warnings.warn("LOGIN_REDIRECT_URLNAME is deprecated, simply"
" use LOGIN_REDIRECT_URL with a URL name",
DeprecationWarning)
else:
url = settings.LOGIN_REDIRECT_URL
return resolve_url(url)
def get_logout_redirect_url(self, request):
"""
Returns the URL to redirect to after the user logs out. Note that
this method is also invoked if you attempt to log out while no users
is logged in. Therefore, request.user is not guaranteed to be an
authenticated user.
"""
return resolve_url(app_settings.LOGOUT_REDIRECT_URL)
def get_email_confirmation_redirect_url(self, request):
"""
The URL to return to after successful e-mail confirmation.
"""
if request.user.is_authenticated:
if app_settings.EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL:
return \
app_settings.EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL
else:
return self.get_login_redirect_url(request)
else:
return app_settings.EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL
def is_open_for_signup(self, request):
"""
Checks whether or not the site is open for signups.
Next to simply returning True/False you can also intervene the
regular flow by raising an ImmediateHttpResponse
"""
return True
def new_user(self, request):
"""
Instantiates a new User instance.
"""
user = get_user_model()()
return user
def populate_username(self, request, user):
"""
Fills in a valid username, if required and missing. If the
username is already present it is assumed to be valid
(unique).
"""
from .utils import user_username, user_email, user_field
first_name = user_field(user, 'first_name')
last_name = user_field(user, 'last_name')
email = user_email(user)
username = user_username(user)
if app_settings.USER_MODEL_USERNAME_FIELD:
user_username(
user,
username or self.generate_unique_username([
first_name,
last_name,
email,
username,
'user']))
def generate_unique_username(self, txts, regex=None):
return generate_unique_username(txts, regex)
def save_user(self, request, user, form, commit=True):
"""
Saves a new `User` instance using information provided in the
signup form.
"""
from .utils import user_username, user_email, user_field
data = form.cleaned_data
first_name = data.get('first_name')
last_name = data.get('last_name')
email = data.get('email')
username = data.get('username')
user_email(user, email)
user_username(user, username)
if first_name:
user_field(user, 'first_name', first_name)
if last_name:
user_field(user, 'last_name', last_name)
if 'password1' in data:
user.set_password(data["password1"])
else:
user.set_unusable_password()
self.populate_username(request, user)
if commit:
# Ability not to commit makes it easier to derive from
# this adapter by adding
user.save()
return user
def clean_username(self, username, shallow=False):
"""
Validates the username. You can hook into this if you want to
(dynamically) restrict what usernames can be chosen.
"""
for validator in app_settings.USERNAME_VALIDATORS:
validator(username)
# TODO: Add regexp support to USERNAME_BLACKLIST
username_blacklist_lower = [ub.lower()
for ub in app_settings.USERNAME_BLACKLIST]
if username.lower() in username_blacklist_lower:
raise forms.ValidationError(
self.error_messages['username_blacklisted'])
# Skipping database lookups when shallow is True, needed for unique
# username generation.
if not shallow:
from .utils import filter_users_by_username
if filter_users_by_username(username).exists():
user_model = get_user_model()
username_field = app_settings.USER_MODEL_USERNAME_FIELD
error_message = user_model._meta.get_field(
username_field).error_messages.get('unique')
if not error_message:
error_message = self.error_messages['username_taken']
raise forms.ValidationError(
error_message,
params={
'model_name': user_model.__name__,
'field_label': username_field,
}
)
return username
def clean_email(self, email):
"""
Validates an email value. You can hook into this if you want to
(dynamically) restrict what email addresses can be chosen.
"""
return email
def clean_password(self, password, user=None):
"""
Validates a password. You can hook into this if you want to
restric the allowed password choices.
"""
min_length = app_settings.PASSWORD_MIN_LENGTH
if min_length and len(password) < min_length:
raise forms.ValidationError(_("Password must be a minimum of {0} "
"characters.").format(min_length))
validate_password(password, user)
return password
def validate_unique_email(self, email):
if email_address_exists(email):
raise forms.ValidationError(self.error_messages['email_taken'])
return email
def add_message(self, request, level, message_template,
message_context=None, extra_tags=''):
"""
Wrapper of `django.contrib.messages.add_message`, that reads
the message text from a template.
"""
if 'django.contrib.messages' in settings.INSTALLED_APPS:
try:
if message_context is None:
message_context = {}
message = render_to_string(message_template,
message_context).strip()
if message:
messages.add_message(request, level, message,
extra_tags=extra_tags)
except TemplateDoesNotExist:
pass
def ajax_response(self, request, response, redirect_to=None, form=None,
data=None):
resp = {}
status = response.status_code
if redirect_to:
status = 200
resp['location'] = redirect_to
if form:
if request.method == 'POST':
if form.is_valid():
status = 200
else:
status = 400
else:
status = 200
resp['form'] = self.ajax_response_form(form)
if hasattr(response, 'render'):
response.render()
resp['html'] = response.content.decode('utf8')
if data is not None:
resp['data'] = data
return HttpResponse(json.dumps(resp),
status=status,
content_type='application/json')
def ajax_response_form(self, form):
form_spec = {
'fields': {},
'field_order': [],
'errors': form.non_field_errors()
}
for field in form:
field_spec = {
'label': force_str(field.label),
'value': field.value(),
'help_text': force_str(field.help_text),
'errors': [
force_str(e) for e in field.errors
],
'widget': {
'attrs': {
k: force_str(v)
for k, v in field.field.widget.attrs.items()
}
}
}
form_spec['fields'][field.html_name] = field_spec
form_spec['field_order'].append(field.html_name)
return form_spec
def login(self, request, user):
# HACK: This is not nice. The proper Django way is to use an
# authentication backend
if not hasattr(user, 'backend'):
from .auth_backends import AuthenticationBackend
backends = get_backends()
backend = None
for b in backends:
if isinstance(b, AuthenticationBackend):
# prefer our own backend
backend = b
break
elif not backend and hasattr(b, 'get_user'):
# Pick the first vald one
backend = b
backend_path = '.'.join([backend.__module__,
backend.__class__.__name__])
user.backend = backend_path
django_login(request, user)
def logout(self, request):
django_logout(request)
def confirm_email(self, request, email_address):
"""
Marks the email address as confirmed on the db
"""
email_address.verified = True
email_address.set_as_primary(conditional=True)
email_address.save()
def set_password(self, user, password):
user.set_password(password)
user.save()
def get_user_search_fields(self):
user = get_user_model()()
return filter(lambda a: a and hasattr(user, a),
[app_settings.USER_MODEL_USERNAME_FIELD,
'first_name', 'last_name', 'email'])
def is_safe_url(self, url):
from django.utils.http import is_safe_url
return is_safe_url(url, allowed_hosts=None)
def get_email_confirmation_url(self, request, emailconfirmation):
"""Constructs the email confirmation (activation) url.
Note that if you have architected your system such that email
confirmations are sent outside of the request context `request`
can be `None` here.
"""
url = reverse(
"account_confirm_email",
args=[emailconfirmation.key])
ret = build_absolute_uri(
request,
url)
return ret
def send_confirmation_mail(self, request, emailconfirmation, signup):
current_site = get_current_site(request)
activate_url = self.get_email_confirmation_url(
request,
emailconfirmation)
ctx = {
"user": emailconfirmation.email_address.user,
"activate_url": activate_url,
"current_site": current_site,
"key": emailconfirmation.key,
}
if signup:
email_template = 'account/email/email_confirmation_signup'
else:
email_template = 'account/email/email_confirmation'
self.send_mail(email_template,
emailconfirmation.email_address.email,
ctx)
def respond_user_inactive(self, request, user):
return HttpResponseRedirect(
reverse('account_inactive'))
def respond_email_verification_sent(self, request, user):
return HttpResponseRedirect(
reverse('account_email_verification_sent'))
def _get_login_attempts_cache_key(self, request, **credentials):
site = get_current_site(request)
login = credentials.get('email', credentials.get('username', ''))
login_key = hashlib.sha256(login.encode('utf8')).hexdigest()
return 'allauth/login_attempts@{site_id}:{login}'.format(
site_id=site.pk,
login=login_key)
def pre_authenticate(self, request, **credentials):
if app_settings.LOGIN_ATTEMPTS_LIMIT:
cache_key = self._get_login_attempts_cache_key(
request, **credentials)
login_data = cache.get(cache_key, None)
if login_data:
dt = timezone.now()
current_attempt_time = time.mktime(dt.timetuple())
if (len(login_data) >= app_settings.LOGIN_ATTEMPTS_LIMIT and
current_attempt_time < (
login_data[-1] +
app_settings.LOGIN_ATTEMPTS_TIMEOUT)):
raise forms.ValidationError(
self.error_messages['too_many_login_attempts'])
def authenticate(self, request, **credentials):
"""Only authenticates, does not actually login. See `login`"""
from allauth.account.auth_backends import AuthenticationBackend
self.pre_authenticate(request, **credentials)
AuthenticationBackend.unstash_authenticated_user()
user = authenticate(request, **credentials)
alt_user = AuthenticationBackend.unstash_authenticated_user()
user = user or alt_user
if user and app_settings.LOGIN_ATTEMPTS_LIMIT:
cache_key = self._get_login_attempts_cache_key(
request, **credentials)
cache.delete(cache_key)
else:
self.authentication_failed(request, **credentials)
return user
def authentication_failed(self, request, **credentials):
if app_settings.LOGIN_ATTEMPTS_LIMIT:
cache_key = self._get_login_attempts_cache_key(
request, **credentials
)
data = cache.get(cache_key, [])
dt = timezone.now()
data.append(time.mktime(dt.timetuple()))
cache.set(cache_key, data, app_settings.LOGIN_ATTEMPTS_TIMEOUT)
def is_ajax(self, request):
return request.is_ajax()
def get_adapter(request=None):
return import_attribute(app_settings.ADAPTER)(request)
|
|
"""
The MIT License
Copyright (c) 2007 Leah Culver
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import cgi
import urllib
import time
import random
import urlparse
import hmac
import binascii
VERSION = '1.0' # Hi Blaine!
HTTP_METHOD = 'GET'
SIGNATURE_METHOD = 'PLAINTEXT'
class OAuthError(RuntimeError):
"""Generic exception class."""
def __str__(self):
if not self.args:
message = ''
else:
message = self.args[0]
return '%s(%s)' % (self.__class__.__name__, message)
def build_authenticate_header(realm=''):
"""Optional WWW-Authenticate header (401 error)"""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def escape(s):
"""Escape a URL including any /."""
return urllib.quote(s, safe='~&')
def _utf8_str(s):
"""Convert unicode to utf-8."""
if isinstance(s, unicode):
return s.encode("utf-8")
else:
return str(s)
def generate_timestamp():
"""Get seconds since epoch (UTC)."""
return int(time.time())
def generate_nonce(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
def generate_verifier(length=8):
"""Generate pseudorandom number."""
return ''.join([str(random.randint(0, 9)) for i in range(length)])
class OAuthConsumer(object):
"""Consumer of OAuth authentication.
OAuthConsumer is a data type that represents the identity of the Consumer
via its shared secret with the Service Provider.
"""
key = None
secret = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
class OAuthToken(object):
"""OAuthToken is a data type that represents an End User via either an access
or request token.
key -- the token
secret -- the token secret
"""
key = None
secret = None
callback = None
callback_confirmed = None
verifier = None
def __init__(self, key, secret):
self.key = key
self.secret = secret
def set_callback(self, callback):
self.callback = callback
self.callback_confirmed = 'true'
def set_verifier(self, verifier=None):
if verifier is not None:
self.verifier = verifier
else:
self.verifier = generate_verifier()
def get_callback_url(self):
if self.callback and self.verifier:
# Append the oauth_verifier.
parts = urlparse.urlparse(self.callback)
scheme, netloc, path, params, query, fragment = parts[:6]
if query:
query = '%s&oauth_verifier=%s' % (query, self.verifier)
else:
query = 'oauth_verifier=%s' % self.verifier
return urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
return self.callback
def to_string(self):
data = {
'oauth_token': self.key,
'oauth_token_secret': self.secret,
}
if self.callback_confirmed is not None:
data['oauth_callback_confirmed'] = self.callback_confirmed
return urllib.urlencode(data)
def from_string(s):
""" Returns a token from something like:
oauth_token_secret=xxx&oauth_token=xxx
"""
params = cgi.parse_qs(s, keep_blank_values=False)
key = params['oauth_token'][0]
secret = params['oauth_token_secret'][0]
token = OAuthToken(key, secret)
try:
token.callback_confirmed = params['oauth_callback_confirmed'][0]
except KeyError:
pass # 1.0, no callback confirmed.
return token
from_string = staticmethod(from_string)
def __str__(self):
return self.to_string()
class OAuthRequest(object):
"""OAuthRequest represents the request and can be serialized.
OAuth parameters:
- oauth_consumer_key
- oauth_token
- oauth_signature_method
- oauth_signature
- oauth_timestamp
- oauth_nonce
- oauth_version
- oauth_verifier
... any additional parameters, as defined by the Service Provider.
"""
parameters = None # OAuth parameters.
http_method = HTTP_METHOD
http_url = None
version = VERSION
def __init__(self, http_method=HTTP_METHOD, http_url=None, parameters=None):
self.http_method = http_method
self.http_url = http_url
self.parameters = parameters or {}
def set_parameter(self, parameter, value):
self.parameters[parameter] = value
def get_parameter(self, parameter):
try:
return self.parameters[parameter]
except:
raise OAuthError('Parameter not found: %s' % parameter)
def _get_timestamp_nonce(self):
return self.get_parameter('oauth_timestamp'), self.get_parameter(
'oauth_nonce')
def get_nonoauth_parameters(self):
"""Get any non-OAuth parameters."""
parameters = {}
for k, v in self.parameters.iteritems():
# Ignore oauth parameters.
if k.find('oauth_') < 0:
parameters[k] = v
return parameters
def to_header(self, realm=''):
"""Serialize as a header for an HTTPAuth request."""
auth_header = 'OAuth realm="%s"' % realm
# Add the oauth parameters.
if self.parameters:
for k, v in self.parameters.iteritems():
if k[:6] == 'oauth_':
auth_header += ', %s="%s"' % (k, escape(str(v)))
return {'Authorization': auth_header}
def to_postdata(self):
"""Serialize as post data for a POST request."""
return '&'.join(['%s=%s' % (escape(str(k)), escape(str(v))) \
for k, v in self.parameters.iteritems()])
def to_url(self):
"""Serialize as a URL for a GET request."""
return '%s?%s' % (self.get_normalized_http_url(), self.to_postdata())
def get_normalized_parameters(self):
"""Return a string that contains the parameters that must be signed."""
params = self.parameters
try:
# Exclude the signature if it exists.
del params['oauth_signature']
except:
pass
# Escape key values before sorting.
key_values = [(escape(_utf8_str(k)), escape(_utf8_str(v))) \
for k,v in params.items()]
# Sort lexicographically, first after key, then after value.
key_values.sort()
# Combine key value pairs into a string.
return '&'.join(['%s=%s' % (k, v) for k, v in key_values])
def get_normalized_http_method(self):
"""Uppercases the http method."""
return self.http_method.upper()
def get_normalized_http_url(self):
"""Parses the URL and rebuilds it to be scheme://host/path."""
parts = urlparse.urlparse(self.http_url)
scheme, netloc, path = parts[:3]
# Exclude default port numbers.
if scheme == 'http' and netloc[-3:] == ':80':
netloc = netloc[:-3]
elif scheme == 'https' and netloc[-4:] == ':443':
netloc = netloc[:-4]
return '%s://%s%s' % (scheme, netloc, path)
def sign_request(self, signature_method, consumer, token):
"""Set the signature parameter to the result of build_signature."""
# Set the signature method.
self.set_parameter('oauth_signature_method',
signature_method.get_name())
# Set the signature.
self.set_parameter('oauth_signature',
self.build_signature(signature_method, consumer, token))
def build_signature(self, signature_method, consumer, token):
"""Calls the build signature method within the signature method."""
return signature_method.build_signature(self, consumer, token)
def from_request(http_method, http_url, headers=None, parameters=None,
query_string=None):
"""Combines multiple parameter sources."""
if parameters is None:
parameters = {}
# Headers
if headers and 'Authorization' in headers:
auth_header = headers['Authorization']
# Check that the authorization header is OAuth.
if auth_header[:6] == 'OAuth ':
auth_header = auth_header[6:]
try:
# Get the parameters from the header.
header_params = OAuthRequest._split_header(auth_header)
parameters.update(header_params)
except:
raise OAuthError('Unable to parse OAuth parameters from '
'Authorization header.')
# GET or POST query string.
if query_string:
query_params = OAuthRequest._split_url_string(query_string)
parameters.update(query_params)
# URL parameters.
param_str = urlparse.urlparse(http_url)[4] # query
url_params = OAuthRequest._split_url_string(param_str)
parameters.update(url_params)
if parameters:
return OAuthRequest(http_method, http_url, parameters)
return None
from_request = staticmethod(from_request)
def from_consumer_and_token(oauth_consumer, token=None,
callback=None, verifier=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
defaults = {
'oauth_consumer_key': oauth_consumer.key,
'oauth_timestamp': generate_timestamp(),
'oauth_nonce': generate_nonce(),
'oauth_version': OAuthRequest.version,
}
defaults.update(parameters)
parameters = defaults
if token:
parameters['oauth_token'] = token.key
if token.callback:
parameters['oauth_callback'] = token.callback
# 1.0a support for verifier.
if verifier:
parameters['oauth_verifier'] = verifier
elif callback:
# 1.0a support for callback in the request token request.
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_consumer_and_token = staticmethod(from_consumer_and_token)
def from_token_and_callback(token, callback=None, http_method=HTTP_METHOD,
http_url=None, parameters=None):
if not parameters:
parameters = {}
parameters['oauth_token'] = token.key
if callback:
parameters['oauth_callback'] = callback
return OAuthRequest(http_method, http_url, parameters)
from_token_and_callback = staticmethod(from_token_and_callback)
def _split_header(header):
"""Turn Authorization: header into parameters."""
params = {}
parts = header.split(',')
for param in parts:
# Ignore realm parameter.
if param.find('realm') > -1:
continue
# Remove whitespace.
param = param.strip()
# Split key-value.
param_parts = param.split('=', 1)
# Remove quotes and unescape the value.
params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"'))
return params
_split_header = staticmethod(_split_header)
def _split_url_string(param_str):
"""Turn URL string into parameters."""
parameters = cgi.parse_qs(param_str, keep_blank_values=False)
for k, v in parameters.iteritems():
parameters[k] = urllib.unquote(v[0])
return parameters
_split_url_string = staticmethod(_split_url_string)
class OAuthServer(object):
"""A worker to check the validity of a request against a data store."""
timestamp_threshold = 300 # In seconds, five minutes.
version = VERSION
signature_methods = None
data_store = None
def __init__(self, data_store=None, signature_methods=None):
self.data_store = data_store
self.signature_methods = signature_methods or {}
def set_data_store(self, data_store):
self.data_store = data_store
def get_data_store(self):
return self.data_store
def add_signature_method(self, signature_method):
self.signature_methods[signature_method.get_name()] = signature_method
return self.signature_methods
def fetch_request_token(self, oauth_request):
"""Processes a request_token request and returns the
request token on success.
"""
try:
# Get the request token for authorization.
token = self._get_token(oauth_request, 'request')
except OAuthError:
# No token required for the initial token request.
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
callback = self.get_callback(oauth_request)
except OAuthError:
callback = None # 1.0, no callback specified.
self._check_signature(oauth_request, consumer, None)
# Fetch a new token.
token = self.data_store.fetch_request_token(consumer, callback)
return token
def fetch_access_token(self, oauth_request):
"""Processes an access_token request and returns the
access token on success.
"""
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
try:
verifier = self._get_verifier(oauth_request)
except OAuthError:
verifier = None
# Get the request token.
token = self._get_token(oauth_request, 'request')
self._check_signature(oauth_request, consumer, token)
new_token = self.data_store.fetch_access_token(consumer, token, verifier)
return new_token
def verify_request(self, oauth_request):
"""Verifies an api call and checks all the parameters."""
# -> consumer and token
version = self._get_version(oauth_request)
consumer = self._get_consumer(oauth_request)
# Get the access token.
token = self._get_token(oauth_request, 'access')
self._check_signature(oauth_request, consumer, token)
parameters = oauth_request.get_nonoauth_parameters()
return consumer, token, parameters
def authorize_token(self, token, user):
"""Authorize a request token."""
return self.data_store.authorize_request_token(token, user)
def get_callback(self, oauth_request):
"""Get the callback URL."""
return oauth_request.get_parameter('oauth_callback')
def build_authenticate_header(self, realm=''):
"""Optional support for the authenticate header."""
return {'WWW-Authenticate': 'OAuth realm="%s"' % realm}
def _get_version(self, oauth_request):
"""Verify the correct version request for this server."""
try:
version = oauth_request.get_parameter('oauth_version')
except:
version = VERSION
if version and version != self.version:
raise OAuthError('OAuth version %s not supported.' % str(version))
return version
def _get_signature_method(self, oauth_request):
"""Figure out the signature with some defaults."""
try:
signature_method = oauth_request.get_parameter(
'oauth_signature_method')
except:
signature_method = SIGNATURE_METHOD
try:
# Get the signature method object.
signature_method = self.signature_methods[signature_method]
except:
signature_method_names = ', '.join(self.signature_methods.keys())
raise OAuthError('Signature method %s not supported try one of the '
'following: %s' % (signature_method, signature_method_names))
return signature_method
def _get_consumer(self, oauth_request):
consumer_key = oauth_request.get_parameter('oauth_consumer_key')
consumer = self.data_store.lookup_consumer(consumer_key)
if not consumer:
raise OAuthError('Invalid consumer.')
return consumer
def _get_token(self, oauth_request, token_type='access'):
"""Try to find the token for the provided request token key."""
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token
def _get_verifier(self, oauth_request):
return oauth_request.get_parameter('oauth_verifier')
def _check_signature(self, oauth_request, consumer, token):
timestamp, nonce = oauth_request._get_timestamp_nonce()
self._check_timestamp(timestamp)
self._check_nonce(consumer, token, nonce, timestamp)
signature_method = self._get_signature_method(oauth_request)
try:
signature = oauth_request.get_parameter('oauth_signature')
except:
raise OAuthError('Missing signature.')
# Validate the signature.
valid_sig = signature_method.check_signature(oauth_request, consumer,
token, signature)
if not valid_sig:
key, base = signature_method.build_signature_base_string(
oauth_request, consumer, token)
raise OAuthError('Invalid signature. Expected signature base '
'string: %s' % base)
built = signature_method.build_signature(oauth_request, consumer, token)
def _check_timestamp(self, timestamp):
"""Verify that timestamp is recentish."""
timestamp = int(timestamp)
now = int(time.time())
lapsed = now - timestamp
if lapsed > self.timestamp_threshold:
raise OAuthError('Expired timestamp: given %d and now %s has a '
'greater difference than threshold %d' %
(timestamp, now, self.timestamp_threshold))
def _check_nonce(self, consumer, token, nonce, timestamp):
"""Verify that the nonce is uniqueish."""
nonce = self.data_store.lookup_nonce(consumer, token, nonce, timestamp)
if nonce:
raise OAuthError('Nonce already used: %s' % str(nonce))
class OAuthClient(object):
"""OAuthClient is a worker to attempt to execute a request."""
consumer = None
token = None
def __init__(self, oauth_consumer, oauth_token):
self.consumer = oauth_consumer
self.token = oauth_token
def get_consumer(self):
return self.consumer
def get_token(self):
return self.token
def fetch_request_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_request):
"""-> OAuthToken."""
raise NotImplementedError
def access_resource(self, oauth_request):
"""-> Some protected resource."""
raise NotImplementedError
class OAuthDataStore(object):
"""A database abstraction used to lookup consumers and tokens."""
def lookup_consumer(self, key):
"""-> OAuthConsumer."""
raise NotImplementedError
def lookup_token(self, oauth_consumer, token_type, token_token):
"""-> OAuthToken."""
raise NotImplementedError
def lookup_nonce(self, oauth_consumer, oauth_token, nonce):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_request_token(self, oauth_consumer, oauth_callback):
"""-> OAuthToken."""
raise NotImplementedError
def fetch_access_token(self, oauth_consumer, oauth_token, oauth_verifier):
"""-> OAuthToken."""
raise NotImplementedError
def authorize_request_token(self, oauth_token, user):
"""-> OAuthToken."""
raise NotImplementedError
class OAuthSignatureMethod(object):
"""A strategy class that implements a signature method."""
def get_name(self):
"""-> str."""
raise NotImplementedError
def build_signature_base_string(self, oauth_request, oauth_consumer, oauth_token):
"""-> str key, str raw."""
raise NotImplementedError
def build_signature(self, oauth_request, oauth_consumer, oauth_token):
"""-> str."""
raise NotImplementedError
def check_signature(self, oauth_request, consumer, token, signature):
built = self.build_signature(oauth_request, consumer, token)
return built == signature
class OAuthSignatureMethod_HMAC_SHA1(OAuthSignatureMethod):
def get_name(self):
return 'HMAC-SHA1'
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
escape(oauth_request.get_normalized_http_method()),
escape(oauth_request.get_normalized_http_url()),
escape(oauth_request.get_normalized_parameters()),
)
key = '%s&' % escape(consumer.secret)
if token:
key += escape(token.secret)
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
"""Builds the base signature string."""
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
# HMAC object.
try:
import hashlib # 2.5
hashed = hmac.new(key, raw, hashlib.sha1)
except:
import sha # Deprecated
hashed = hmac.new(key, raw, sha)
# Calculate the digest base 64.
return binascii.b2a_base64(hashed.digest())[:-1]
class OAuthSignatureMethod_PLAINTEXT(OAuthSignatureMethod):
def get_name(self):
return 'PLAINTEXT'
def build_signature_base_string(self, oauth_request, consumer, token):
"""Concatenates the consumer key and secret."""
sig = '%s&' % escape(consumer.secret)
if token:
sig = sig + escape(token.secret)
return sig, sig
def build_signature(self, oauth_request, consumer, token):
key, raw = self.build_signature_base_string(oauth_request, consumer,
token)
return key
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utilities for dealing with file modes.
from past.builtins import basestring
from builtins import object
import bz2
import os
import posixpath
import sys
import tarfile
import tempfile
from desktop.lib.exceptions_renderable import PopupException
from filebrowser.conf import ARCHIVE_UPLOAD_TEMPDIR
from zipfile import ZipFile
if sys.version_info[0] > 2:
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
__all__ = ['archive_factory']
class Archive(object):
"""
Acrchive interface.
"""
def extract(self, path):
"""
Extract an Archive.
Should return a directory where the extracted contents live.
"""
raise NotImplemented(_("Must implement 'extract' method."))
def _create_dirs(self, basepath, dirs=[]):
"""
Creates all directories passed at the given basepath.
"""
for directory in dirs:
# Stops if directory start with '/' or points to a relative path
if os.path.isabs(directory) or '..' in directory:
raise IllegalPathException()
directory = os.path.join(basepath, directory)
try:
os.makedirs(directory)
except OSError:
pass
class ZipArchive(Archive):
"""
Acts on a zip file in memory or in a temporary location.
Python's ZipFile class inherently buffers all reading.
"""
def __init__(self, file):
if sys.version_info[0] > 2:
self.file = isinstance(file, basestring) and file
else:
self.file = isinstance(file, basestring) and open(file) or file
self.zfh = ZipFile(self.file)
def extract(self):
"""
Extracts a zip file.
If a 'file' ends with '/', then it is a directory and we must create it.
Else, open a file for writing and meta pipe the contents zipfile to the new file.
"""
# Store all extracted files in a temporary directory.
if ARCHIVE_UPLOAD_TEMPDIR.get():
directory = tempfile.mkdtemp(dir=ARCHIVE_UPLOAD_TEMPDIR.get())
else:
directory = tempfile.mkdtemp()
dirs, files = self._filenames()
self._create_dirs(directory, dirs)
self._create_files(directory, files)
return directory
def _filenames(self):
"""
List all dirs and files by reading the table of contents of the Zipfile.
"""
dirs = []
files = []
for name in self.zfh.namelist():
if name.endswith(posixpath.sep):
dirs.append(name)
else:
files.append(name)
# self.zfh.namelist() sometimes doesn't return all the directories
# Go up the path one directory at the time
parent = os.path.dirname(name)
while parent != '' and parent not in dirs:
dirs.append(parent)
parent = os.path.dirname(parent)
return (dirs, files)
def _create_files(self, basepath, files=[]):
"""
Extract files to their rightful place.
Files are written to a temporary directory immediately after being decompressed.
"""
for f in files:
new_path = os.path.join(basepath, f)
new_file = open(new_path, 'w')
zdata = self.zfh.read(f)
if not isinstance(zdata, str):
zdata = zdata.decode('utf-8')
new_file.write(zdata)
new_file.close()
class TarballArchive(Archive):
"""
Acts on a tarball (tar.gz) file in memory or in a temporary location.
Python's ZipFile class inherently buffers all reading.
"""
def __init__(self, file):
if isinstance(file, basestring):
self.path = file
else:
f = tempfile.NamedTemporaryFile(delete=False)
f.write(file.read())
self.path = f.name
f.close()
self.fh = tarfile.open(self.path)
def extract(self):
"""
Extracts a zip file.
If a 'file' ends with '/', then it is a directory and we must create it.
Else, open a file for writing and meta pipe the contents zipfile to the new file.
"""
# Store all extracted files in a temporary directory.
directory = tempfile.mkdtemp()
dirs, files = self._filenames()
self._create_dirs(directory, dirs)
self._create_files(directory, files)
return directory
def _filenames(self):
"""
List all dirs and files by reading the table of contents of the Zipfile.
"""
dirs = []
files = []
for tarinfo in self.fh.getmembers():
if tarinfo.isdir():
dirs.append(tarinfo.name)
else:
files.append(tarinfo.name)
parent = os.path.dirname(tarinfo.path)
# getmembers() sometimes doesn't return all the directories
# Go up the path one directory at the time
while parent != '' and parent not in dirs:
dirs.append(parent)
parent = os.path.dirname(parent)
return (dirs, files)
def _create_files(self, basepath, files=[]):
"""
Extract files to their rightful place.
Files are written to a temporary directory immediately after being decompressed.
"""
for f in files:
new_path = os.path.join(basepath, f)
new_file = open(new_path, 'w')
extracted_data = self.fh.extractfile(f).read()
if not isinstance(extracted_data, str):
extracted_data = extracted_data.decode('utf-8')
new_file.write(extracted_data)
new_file.close()
class BZ2Archive(Archive):
"""
Acts on a bzip2 file in memory or in a temporary location.
Python's BZ2File class inherently buffers all reading.
"""
def __init__(self, file):
# bzip2 only compresses single files and there is no direct method in the bz2 library to get the file name
self.name = file.name[:-6] if file.name.lower().endswith('.bzip2') else file.name[:-4]
if isinstance(file, basestring):
self.path = file
else:
f = tempfile.NamedTemporaryFile(delete=False)
f.write(file.read())
self.path = f.name
f.close()
self.fh = bz2.BZ2File(self.path)
def extract(self):
"""
Extracts a bz2 file.
Opens the file for writing and meta pipe the contents bz2file to the new file.
"""
# Store all extracted files in a temporary directory.
if ARCHIVE_UPLOAD_TEMPDIR.get():
directory = tempfile.mkdtemp(dir=ARCHIVE_UPLOAD_TEMPDIR.get())
else:
directory = tempfile.mkdtemp()
files = [self.name]
self._create_files(directory, files)
return directory
def _create_files(self, basepath, files=[]):
"""
Files are written to a temporary directory immediately after being decompressed.
"""
for f in files:
new_path = os.path.join(basepath, f)
new_file = open(new_path, 'w')
new_file.write(self.fh.read())
new_file.close()
def archive_factory(path, archive_type='zip'):
if archive_type == 'zip':
return ZipArchive(path)
elif archive_type == 'tarball' or archive_type == 'tar.gz' or archive_type == 'tgz':
return TarballArchive(path)
elif archive_type == 'bz2' or archive_type == 'bzip2':
return BZ2Archive(path)
class IllegalPathException(PopupException):
def __init__(self):
super(IllegalPathException, self).__init__('''Archive path cannot be absolute or contain '..' ''')
|
|
import abc
import os
import time
import lockfile
import numpy as np
from ParamSklearn.classification import ParamSklearnClassifier
from ParamSklearn.regression import ParamSklearnRegressor
from autosklearn.scores import libscores
from autosklearn.constants import *
try:
import cPickle as pickle
except:
import pickle
def calculate_score(solution, prediction, task_type, metric, num_classes,
all_scoring_functions=False):
if task_type == MULTICLASS_CLASSIFICATION:
solution_binary = np.zeros((prediction.shape[0], num_classes))
for i in range(solution_binary.shape[0]):
label = solution[i]
solution_binary[i, label] = 1
solution = solution_binary
elif task_type in [BINARY_CLASSIFICATION, REGRESSION]:
if len(solution.shape) == 1:
solution = solution.reshape((-1, 1))
if task_type not in TASK_TYPES:
raise NotImplementedError(task_type)
scoring_func = getattr(libscores, metric)
if solution.shape != prediction.shape:
raise ValueError("Solution shape %s != prediction shape %s" %
(solution.shape, prediction.shape))
if all_scoring_functions:
score = dict()
if task_type in REGRESSION_TASKS:
cprediction = libscores.sanitize_array(prediction)
score["a_metric"] = libscores.a_metric(solution, cprediction,
task=task_type)
score["r2_metric"] = libscores.r2_metric(solution, cprediction,
task=task_type)
else:
csolution, cprediction = libscores.normalize_array(
solution, prediction)
score["bac_metric"] = libscores.bac_metric(csolution, cprediction,
task=task_type)
score["auc_metric"] = libscores.auc_metric(csolution, cprediction,
task=task_type)
score["f1_metric"] = libscores.f1_metric(csolution, cprediction,
task=task_type)
score["pac_metric"] = libscores.pac_metric(csolution, cprediction,
task=task_type)
score["acc_metric"] = libscores.acc_metric(csolution, cprediction,
task=task_type)
else:
if task_type in REGRESSION_TASKS:
cprediction = libscores.sanitize_array(prediction)
score = scoring_func(solution, cprediction, task=task_type)
else:
csolution, cprediction = libscores.normalize_array(
solution, prediction)
score = scoring_func(csolution, cprediction, task=task_type)
return score
def get_new_run_num():
seed = os.environ.get("AUTOSKLEARN_SEED")
counter_file = "num_run"
if seed is not None:
counter_file = counter_file + ("_%s" % seed)
counter_file = os.path.join(os.getcwd(), counter_file)
lock = lockfile.LockFile(counter_file)
with lock:
if not os.path.exists(counter_file):
with open(counter_file, "w") as fh:
fh.write("0")
num = 0
else:
with open(counter_file, "r") as fh:
num = int(fh.read())
num += 1
with open(counter_file, "w") as fh:
fh.write(str(num).zfill(4))
return num
class Evaluator(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self, Datamanager, configuration, with_predictions=False,
all_scoring_functions=False, seed=1, output_dir=None,
output_y_test=False, num_run=None):
self.starttime = time.time()
self.configuration = configuration
self.D = Datamanager
self.X_valid = Datamanager.data.get('X_valid')
self.X_test = Datamanager.data.get('X_test')
self.metric = Datamanager.info['metric']
self.task_type = Datamanager.info['task']
self.seed = seed
if output_dir is None:
self.output_dir = os.getcwd()
else:
self.output_dir = output_dir
self.output_y_test = output_y_test
self.with_predictions = with_predictions
self.all_scoring_functions = all_scoring_functions
if self.task_type in REGRESSION_TASKS:
self.model_class = ParamSklearnRegressor
self.predict_function = self.predict_regression
else:
self.model_class = ParamSklearnClassifier
self.predict_function = self.predict_proba
if num_run is None:
num_run = get_new_run_num()
self.num_run = num_run
@abc.abstractmethod
def fit(self):
pass
@abc.abstractmethod
def predict(self):
pass
# This function does everything necessary after the fitting is done:
# predicting
# saving the files for the ensembles_statistics
# generate output for SMAC
# We use it as the signal handler so we can recycle the code for the normal usecase and when the runsolver kills us here :)
def finish_up(self):
try:
self.duration = time.time() - self.starttime
result, additional_run_info = self.file_output()
print "Result for ParamILS: %s, %f, 1, %f, %d, %s" % ("SAT", abs(self.duration), result, self.seed, additional_run_info)
except:
self.duration = time.time() - self.starttime
import traceback
print traceback.format_exc()
print
print "Result for ParamILS: %s, %f, 1, %f, %d, %s" % (
"TIMEOUT", abs(self.duration), 1.0, self.seed,
"No results were produced! Probably the training was not "
"finished and no valid model was generated!")
def file_output(self):
seed = os.environ.get("AUTOSKLEARN_SEED")
errs, Y_optimization_pred, Y_valid_pred, Y_test_pred = self.predict()
num_run = str(self.num_run).zfill(5)
pred_dump_name_template = os.path.join(self.output_dir,
"predictions_%s_%s", self.D.basename + '_predictions_%s_' +
num_run + '.npy')
if self.output_y_test:
try:
os.makedirs(self.output_dir)
except OSError:
pass
y_test_filename = os.path.join(self.output_dir,
"y_optimization.npy")
with lockfile.LockFile(y_test_filename + '.lock'):
with open(y_test_filename, "w") as fh:
pickle.dump(self.Y_optimization.astype(np.float32), fh, -1)
ensemble_output_dir = os.path.join(self.output_dir,
"predictions_ensemble_%s" % seed)
if not os.path.exists(ensemble_output_dir):
os.makedirs(ensemble_output_dir)
with open(pred_dump_name_template % ("ensemble", seed, "ensemble"), "w") as fh:
pickle.dump(Y_optimization_pred.astype(np.float32), fh, -1)
if Y_valid_pred is not None:
valid_output_dir = os.path.join(self.output_dir,
"predictions_valid_%s" % seed)
if not os.path.exists(valid_output_dir):
os.makedirs(valid_output_dir)
with open(pred_dump_name_template % ("valid", seed, "valid"), "w") as fh:
pickle.dump(Y_valid_pred.astype(np.float32), fh, -1)
if Y_test_pred is not None:
test_output_dir = os.path.join(self.output_dir,
"predictions_test_%s" % seed)
if not os.path.exists(test_output_dir):
os.makedirs(test_output_dir)
with open(pred_dump_name_template % ("test", seed, "test"), "w") as fh:
pickle.dump(Y_test_pred.astype(np.float32), fh, -1)
self.duration = time.time() - self.starttime
err = errs[self.D.info['metric']]
additional_run_info = ";".join(["%s: %s" % (metric, value)
for metric, value in errs.items()])
additional_run_info += ";" + "duration: " + str(self.duration)
additional_run_info += ";" + "num_run:" + num_run
# print "Saved predictions with shapes %s, %s, %s for num_run %s" % \
# (Y_optimization_pred.shape, Y_valid_pred.shape,
# Y_test_pred.shape, num_run)
return err, additional_run_info
def predict_proba(self, X, model, task_type, Y_train=None):
Y_pred = model.predict_proba(X, batch_size=1000)
if task_type == MULTILABEL_CLASSIFICATION:
Y_pred = np.hstack(
[Y_pred[i][:, -1].reshape((-1, 1))
for i in range(len(Y_pred))])
elif task_type == BINARY_CLASSIFICATION:
if len(Y_pred.shape) != 1:
Y_pred = Y_pred[:, 1].reshape(-1, 1)
elif task_type == MULTICLASS_CLASSIFICATION:
pass
Y_pred = self._ensure_prediction_array_sizes(Y_pred, Y_train)
return Y_pred
def predict_regression(self, X, model, task_type, Y_train=None):
Y_pred = model.predict(X, batch_size=1000)
if len(Y_pred.shape) == 1:
Y_pred = Y_pred.reshape((-1, 1))
return Y_pred
def _ensure_prediction_array_sizes(self, prediction, Y_train):
num_classes = self.D.info["target_num"]
if self.task_type == MULTICLASS_CLASSIFICATION and \
prediction.shape[1] < num_classes:
classes = list(np.unique(self.D.data["Y_train"]))
if num_classes == prediction.shape[1]:
return prediction
if Y_train is not None:
classes = list(np.unique(Y_train))
mapping = dict()
for class_number in range(num_classes):
if class_number in classes:
index = classes.index(class_number)
mapping[index] = class_number
new_predictions = np.zeros((prediction.shape[0], num_classes))
for index in mapping:
class_index = mapping[index]
new_predictions[:, class_index] = prediction[:, index]
return new_predictions
return prediction
|
|
# This files creates all the needed data structures that relab needs
# User gives the option for training and testing mode
import numpy as np
import os
import xml.etree.ElementTree as ET
from numpy import linalg as LA
import matplotlib.pyplot as plt
import cv2
from faster_rcnn import network
from faster_rcnn.faster_rcnn import FasterRCNN
from faster_rcnn.utils.timer import Timer
import pickle
from numpy import unravel_index
from test2 import *
from faster_rcnn.nms.py_cpu_nms import py_cpu_nms
repo_of_ground_truth = '/home/revan/VOCdevkit/VOC2007/Annotations'
repo_of_images = '/home/revan/VOCdevkit/VOC2007/JPEGImages'
train_set = '/home/revan/VOCdevkit/VOC2007/ImageSets/Main/trainval.txt'
test_set = '/home/revan/VOCdevkit/VOC2007/ImageSets/Main/test.txt'
classes_dict = {'__background__': 0, 'aeroplane': 1, 'bicycle': 2, 'bird': 3, 'boat': 4, 'bottle': 5, 'bus': 6, 'car': 7,
'cat': 8, 'chair': 9, 'cow': 10, 'diningtable': 11, 'dog': 12, 'horse': 13, 'motorbike': 14,
'person': 15, 'pottedplant': 16, 'sheep': 17, 'sofa': 18, 'train': 19, 'tvmonitor': 20}
imdb_name = 'voc_2007_test'
cfg_file = 'experiments/cfgs/faster_rcnn_end2end.yml'
trained_model = 'VGGnet_fast_rcnn_iter_70000.h5'
rand_seed = 1024
save_name = 'faster_rcnn_100000'
max_per_image = 300
thresh = 0.05
vis = False
def main(train=1, serialize=1):
distribution_mode = 'softmax' # the mode can be 'peak', k_peak or 'softmax'
if train:
if serialize:
content = get_filenames_of_set(train_set)
p, gt, width_and_height, all_boxes_all_images, p_binary = create_p_and_gt(content, repo_of_images, classes_dict, distribution_mode)
# dump data structures into pickle files
with open('p_trainval.pickle', 'wb') as f:
pickle.dump(p, f, pickle.HIGHEST_PROTOCOL)
with open('gt_trainval.pickle', 'wb') as f:
pickle.dump(gt, f, pickle.HIGHEST_PROTOCOL)
with open('width_and_height_traival.pickle', 'wb') as f:
pickle.dump(width_and_height, f, pickle.HIGHEST_PROTOCOL)
with open('all_boxes_all_images_trainval.pickle', 'wb') as f:
pickle.dump(all_boxes_all_images, f, pickle.HIGHEST_PROTOCOL)
with open('p_binary_trainval.pickle', 'wb') as f:
pickle.dump(p_binary, f, pickle.HIGHEST_PROTOCOL)
# load the pickle files
with open('p_trainval.pickle', 'rb') as f:
p = pickle.load(f)
with open('gt_trainval.pickle', 'rb') as f:
gt = pickle.load(f)
with open('width_and_height_traival.pickle', 'rb') as f:
width_and_height = pickle.load(f)
with open('all_boxes_all_images_trainval.pickle', 'rb') as f:
all_boxes_all_images = pickle.load(f)
with open('p_binary_trainval.pickle', 'rb') as f:
p_binary = pickle.load(f)
if serialize:
# do the matching between rcnn results and the ground truth
info_all_images = postprocess_all_images(p, gt, width_and_height, all_boxes_all_images, p_binary, false_positives=False, false_negatives=False)
# pickle the final data structure
with open('train_k_peak_softmax_nofp_nofn_including_binary_thresholded_iou.pickle', 'wb') as f:
pickle.dump(info_all_images, f, pickle.HIGHEST_PROTOCOL)
# load the final data structure
with open('train_k_peak_softmax_nofp_nofn_including_binary_thresholded_iou.pickle', 'rb') as f:
info_all_images = pickle.load(f)
else:
if serialize:
content = get_filenames_of_set(test_set)
p, gt, width_and_height, all_boxes_all_images, p_binary = create_p_and_gt(content, repo_of_images, classes_dict, distribution_mode)
# dump data structures into pickle files
with open('p_test.pickle', 'wb') as f:
pickle.dump(p, f, pickle.HIGHEST_PROTOCOL)
with open('gt_test.pickle', 'wb') as f:
pickle.dump(gt, f, pickle.HIGHEST_PROTOCOL)
with open('width_and_height_test.pickle', 'wb') as f:
pickle.dump(width_and_height, f, pickle.HIGHEST_PROTOCOL)
with open('all_boxes_all_images_test.pickle', 'wb') as f:
pickle.dump(all_boxes_all_images, f, pickle.HIGHEST_PROTOCOL)
with open('p_binary_test.pickle', 'wb') as f:
pickle.dump(p_binary, f, pickle.HIGHEST_PROTOCOL)
# load the pickle files
with open('p_test.pickle', 'rb') as f:
p = pickle.load(f)
with open('gt_test.pickle', 'rb') as f:
gt = pickle.load(f)
with open('width_and_height_test.pickle', 'rb') as f:
width_and_height = pickle.load(f)
with open('all_boxes_all_images_test.pickle', 'rb') as f:
all_boxes_all_images = pickle.load(f)
with open('p_binary_test.pickle', 'rb') as f:
p_binary = pickle.load(f)
if serialize:
info_all_images = []
len_p = len(p)
for i in xrange(len_p):
new_data = []
if len(p[i] > 0):
new_p = p[i][:, :21]
new_rect_p = p[i][:, 21:]
else:
new_p = []
new_rect_p = []
new_gt = gt[i][:, :21]
new_rect_gt = gt[i][:, 21:]
new_width_and_height = width_and_height[i]
new_data.append(new_p)
new_data.append(new_gt)
new_data.append(new_rect_p)
new_data.append(new_rect_gt)
new_data.append(new_width_and_height)
new_data.append(all_boxes_all_images[i])
new_data.append(p_binary[i])
info_all_images.append(new_data)
with open('info_all_images_test_thresholded_iou.pickle', 'wb') as f:
pickle.dump(info_all_images, f, pickle.HIGHEST_PROTOCOL)
# load the final data structure
with open('info_all_images_test_thresholded_iou.pickle', 'rb') as f:
info_all_images = pickle.load(f)
print("Done")
def remove_background(p):
"""
This function removes the background class, by spreading its probability over all other classes
:param p: representation of the class as a probability function (final 4 elements are spatial dimensions)
:return:
new_p: representation of the class as a probability function with background removed
new_rect: the bounding box
"""
new_p = []
new_rect = []
for i in xrange(len(p)):
if len(p[i]) > 0:
probs = p[i][:, 1:21]
probs /= np.sum(probs, axis=1)[:, np.newaxis]
rects = p[i][:, 21:]
new_p.append(probs)
new_rect.append(rects)
else:
new_p.append(np.zeros((0, 0)))
new_rect.append(np.zeros((0, 0)))
return new_p, new_rect
def get_filenames_of_set(train_set):
"""
This function reads the names of the files that we are going to use as our training test
:param train_set - the file which contains the names of the training set
:return: content - an array containing the names of each filew
"""
# read the names of the files that we are going to use as our training test
with open(train_set) as f:
content = f.readlines()
content = [x.strip() for x in content]
return content
def find_objects_in_files(filename, dict):
"""
This function finds the centers of each object in the image
:param element: the name of the image
:return: points - a two 2 array containing the centers of each bounding box
"""
tree = ET.parse(filename)
root = tree.getroot()
gt_info = []
for size in root.iter('size'):
for width in size.iter('width'):
width = float(int(width.text))
for height in size.iter('height'):
height = float(int(height.text))
for object in root.iter('object'):
for name in object.iter('name'):
class_of_object = name.text
gt_info_object = np.zeros(25)
if class_of_object in dict: # skip parts of bodies (head, foot, hand etc)
name_decrypted = dict[class_of_object]
gt_info_object[name_decrypted] = 1.0
for bounding_box in object.iter('bndbox'):
for xmin in bounding_box.iter('xmin'):
# we convert first to float and then to int because for some reasons the bounding boxes can have float
# coordinates like 273.3863
xmin = int(float(xmin.text)) # / width
for ymin in bounding_box.iter('ymin'):
ymin = int(float(ymin.text)) # / height
for xmax in bounding_box.iter('xmax'):
xmax = int(float(xmax.text)) # / width
for ymax in bounding_box.iter('ymax'):
ymax = int(float(ymax.text)) # / height
# fill the spatial positions for each object
gt_info_object[21] = xmin
gt_info_object[22] = ymin
gt_info_object[23] = xmax
gt_info_object[24] = ymax
gt_info.append(gt_info_object)
gt_info = np.asarray(gt_info)
return gt_info, width, height
def test_net(name, net, imdb, im, max_per_image=300, thresh=0.3, vis=False):
"""Test a Fast R-CNN network on an image database."""
scores, boxes = im_detect(net, im)
to_keep = []
all_class_boxes = []
for j in xrange(1, imdb.num_classes):
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j * 4:(j + 1) * 4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
object_class = np.full((cls_dets.shape[0], 1), j, dtype=np.float32)
cls_dets = np.hstack((cls_dets, object_class))
cls_dets = cls_dets[keep, :]
if cls_dets.shape[0] != 0:
for k in xrange(len(cls_dets)):
all_class_boxes.append(cls_dets[k])
to_keep.extend(inds[keep])
all_scores = scores[to_keep]
all_boxes = boxes[to_keep]
class_boxes = np.zeros((len(all_class_boxes), 6))
for whatever in range(len(all_class_boxes)):
class_boxes[whatever] = all_class_boxes[whatever]
return all_scores, class_boxes, all_boxes
def create_p_and_gt(content, repo_of_images, dict, mode='peak'):
"""
This function creates initial p and gt based on the output of the r-cnn and the ground truth
:param content: a list containing the names of the images
:param repo_of_images: the repository of the images
:param dict: a dictionary where as key are names of the classes, and as values are an enumeration of them
:return:
rcnn_output: the representation given by rcnn
ground_truth: the representation gotten from the ground truth
all_width_and_height: width and the height of the image (read from xml files)
all_boxes_all_images: boxes for each possible class, might be needed after relab
"""
rcnn_output = []
ground_truth = []
all_width_and_height = []
all_boxes_all_images = []
rcnn_output_binary = []
for image_name in content:
im = cv2.imread(os.path.join(repo_of_images, image_name + ".jpg"))
xml_file = os.path.join(repo_of_ground_truth, image_name + ".xml")
all_scores, all_class_boxes, all_boxes = test_net(save_name, net, imdb, im, max_per_image, thresh=thresh, vis=0)
all_scores_length = len(all_scores)
if mode == 'peak':
for i in xrange(all_scores_length):
all_scores[i, :] = create_peak_array(all_class_boxes[i, 4], all_class_boxes[i, 5])
elif mode == 'k-peak':
k = 5
for i in xrange(all_scores_length):
all_scores[i, :] = create_peak_k_array(all_scores[i, :], k)
gt_info, width, height = find_objects_in_files(xml_file, dict)
ground_truth.append(gt_info)
if len(all_scores) != 0:
rcnn_output_individual = np.zeros((len(all_scores), 25))
rcnn_output_individual[:, :21] = all_scores
rcnn_output_individual[:, 21:] = all_class_boxes[:, :-2]
rcnn_output_binary_individual = np.zeros((len(all_scores), 2))
rcnn_output_binary_individual[:, 0] = all_class_boxes[:, -2]
rcnn_output_binary_individual[:, 1] = 1. - rcnn_output_binary_individual[:, 0]
rcnn_output.append(rcnn_output_individual)
rcnn_output_binary.append(rcnn_output_binary_individual)
all_width_and_height.append(np.asarray([width, height]))
all_boxes_all_images.append(all_boxes)
else:
rcnn_output.append(np.asarray([])) # we add an empty array just to have everything synchronized
rcnn_output_binary.append(np.asarray([]))
all_width_and_height.append(np.asarray([width, height]))
all_boxes_all_images.append(all_boxes)
return rcnn_output, ground_truth, all_width_and_height, all_boxes_all_images, rcnn_output_binary
def bb_intersection_over_union(boxA, boxB):
"""
This function does intersection over union between two bounding boxes
:param boxA: box x1 represented as [min_x1, min_y1, max_x1, max_y1]
:param boxB: box x2 represented as [min_x2, min_y2, max_x2, max_y2
:return: iou: intersection over union - a number between 0 and 1
"""
# determine the (x, y)-coordinates of the intersection rectangle
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
if xA > xB or yA > yB:
return 0
else:
# compute the area of intersection rectangle
interArea = (xB - xA + 1) * (yB - yA + 1)
# compute the area of both the prediction and ground-truth
# rectangles
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = interArea / float(boxAArea + boxBArea - interArea)
# return the intersection over union value
return iou
def postprocess(p, gt, width_and_height, p_binary, false_positives=False, false_negatives=False):
"""
This function does matching and then postprocessing of p's and gt's
:param p: the objects given from rcnn
:param gt: the objects we get from the ground truth
:param width_and_height: the width and height of the image
:return: info_image: a list which contains the postprocessed p, rectangels for p, postprocessed gt, rectangles
for gt, width and height
"""
len_p = len(p)
len_gt = len(gt)
elements_in_p = [i for i in xrange(len_p)]
elements_in_gt = [i for i in xrange(len_gt)]
matching_table = create_matching_table(p, gt)
max_number_of_matches = min(matching_table.shape[0], matching_table.shape[1])
new_p = []
new_gt = []
new_rects_p = []
new_rects_gt = []
new_p_binary = []
new_gt_binary = []
threshold = 0.5
# on this part we create the real matches between p and gt
for _ in xrange(max_number_of_matches):
best_match = unravel_index(matching_table.argmax(), matching_table.shape)
if matching_table[best_match[0], best_match[1]] > threshold: # check if it is a different value from 0
matching_table[best_match[0], :] = 0.
matching_table[:, best_match[1]] = 0.
new_p.append(p[best_match[0], :21])
new_p_binary.append(p_binary[best_match[0]])
new_gt_binary.append(np.array([1., 0.]))
new_rects_p.append(p[best_match[0], 21:])
new_gt.append(gt[best_match[1], :21])
new_rects_gt.append(gt[best_match[1], 21:])
elements_in_p.remove(best_match[0])
elements_in_gt.remove(best_match[1])
# here we add the matches of false positives by inserting background class on the given rectangles on the ground
# truth
if false_positives:
for element in elements_in_p:
new_p.append(p[element, :21])
new_p_binary.append(p_binary[element])
new_rects_p.append(p[element, 21:])
new_gt.append(create_background_peak_array())
new_gt_binary.append(np.array([0., 1.])) # 0 - not background; 1 - background
new_rects_gt.append(p[element, 21:])
# here we deal with false negatives, by adding them as r-cnn outputs equal to the ground truth
if false_negatives:
for element in elements_in_gt:
new_p.append(gt[element, :21])
new_p_binary.append(np.array([1., 0.]))
new_rects_p.append(gt[element, 21:])
new_gt.append(gt[element, :21])
new_gt_binary.append((np.array([1., 0.])))
new_rects_gt.append(gt[element, 21:])
# convert all the lists to numpy arrays
new_p = np.asarray(new_p)
new_rects_p = np.asarray(new_rects_p)
new_gt = np.asarray(new_gt)
new_rects_gt = np.asarray(new_rects_gt)
# add all the postprocessed information to a list
info_image = [new_p, new_gt, new_rects_p, new_rects_gt, width_and_height, new_p_binary, new_gt_binary]
return info_image
def create_peak_array(peak, peak_index):
"""
This function creates an array which represents a probability distribution, with value peak in the peak_index'th
entry, and value (1 - peak)/20. in all the other positions
:param peak: the peak value
:param peak_index: the index where we have to put the peak value
:return:
"""
not_peak = (1. - peak) / 20.
peak_distribution = np.full((1, 21), not_peak, dtype=np.float32)
peak_distribution[0, int(peak_index)] = peak
return peak_distribution
def create_peak_k_array(softmax_array, k):
"""
This function thresholds to 0 all entries which are not in the top k values
:param softmax_array: an array which represents a probability distribution over classes
k: k peak elements
:return: k_peak_array - the postprocessed softmax array
"""
argsort_array = np.argsort(softmax_array)[::-1] # argsort in reverse order
k_peak_array = np.zeros(21)
for i in xrange(k):
k_peak_array[argsort_array[i]] = softmax_array[argsort_array[i]]
k_peak_array /= np.sum(k_peak_array)
return k_peak_array
def create_matching_table(p, gt):
"""
This function creates a table of size n by m (n number of objects in p_rect, m number of objects in gt_rect),
where the entries on the table are the values of intersection over union of those objects
:param p: a numpy array of size n by 25 (21 first elements are the prob. distribution, last 4 elements are sptatial
dimensions
:param gt_rect: same as p, but the probability distribution is taken from the ground truth
:return: matching table: the table containing all i_over_u for each cartesian product between p and gt
"""
len_p = len(p)
len_gt = len(gt)
matching_table = np.zeros((len_p, len_gt))
for i in xrange(len_p):
for j in xrange(len_gt):
matching_table[i, j] = bb_intersection_over_union(p[i, -4:], gt[j, -4:])
return matching_table
def create_background_peak_array():
"""
This function simply returns an array with 1 in the background class and 0 in all other classes
:return: the above mentioned array
"""
return np.array([1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
def postprocess_all_images(p, gt, width_and_height, all_boxes, p_binary, false_positives=False, false_negatives=False):
"""
This function iterates over all images, calling postprocess
:param p: data structure containing information given from rcnn
:param gt: data structure containing information given from the ground truth
:param width_and_height: width and height info for all images
:return: info_all_images: a list containing all information needed from relab
"""
number_of_images = len(p)
info_all_images = []
for i in xrange(number_of_images):
info_all_images.append(postprocess(p[i], gt[i], width_and_height[i], p_binary[i],
false_positives=false_positives, false_negatives=false_negatives))
return info_all_images
if __name__ == "__main__":
imdb = get_imdb(imdb_name)
print(imdb_name)
imdb.competition_mode(on=True)
# load net
net = FasterRCNN(classes=imdb.classes, debug=False)
network.load_net(trained_model, net)
print('load model successfully!')
net.cuda()
net.eval()
train_mode = 1 # 1 if on train mode, 0 on test mode
serialize = 1 # 0 if you just want to load the data, 1 if you want to process it
main(train=train_mode, serialize=serialize)
|
|
"""Review Bot tool to run FBInfer."""
from __future__ import unicode_literals
import json
import logging
import shlex
from reviewbot.tools import RepositoryTool
from reviewbot.utils.process import execute, is_exe_in_path
logger = logging.getLogger(__name__)
class FBInferTool(RepositoryTool):
"""Review Bot tool to run FBInfer."""
name = 'FBInfer'
version = '1.0'
description = ('Checks code for errors using FBInfer, a tool for static '
'Android, Java, C, C++, and iOS/Objective-C code analysis.')
timeout = 90
options = [
{
'name': 'build_type',
'field_type': 'django.forms.ChoiceField',
'field_options': {
'label': 'Select Build Type',
'help_text': 'Choose how the project will be compiled.',
'choices': (
('./gradlew', 'Android/Gradle with Wrapper'),
('ant', 'Apache Ant'),
('buck build', 'Buck'),
('clang -c', 'C/Objective-C'),
('cmake', 'CMake'),
('gradle', 'Gradle'),
('xcodebuild', 'iOS/XCode'),
('javac', 'Java'),
('make', 'Make'),
('mvn', 'Maven'),
),
},
},
{
'name': 'build_target',
'field_type': 'django.forms.CharField',
'default': '',
'field_options': {
'label': 'Build Target',
'required': False,
'help_text': (
'Include a build target if required to successfully build '
'the project. (e.g. pass in the "build" parameter for a '
'gradle compilation or a target for a buck compilation)'
),
},
},
{
'name': 'xcode_configuration',
'field_type': 'django.forms.CharField',
'default': '',
'field_options': {
'label': 'XCode Configuration',
'required': False,
'help_text': ('Include any additional configuration options '
'for the Xcode -configuration flag.'),
},
},
{
'name': 'sdk',
'field_type': 'django.forms.CharField',
'default': '',
'field_options': {
'label': 'SDK',
'required': False,
'help_text': ('Include an SDK required to build a project '
'such as "iphonesimulator" for iOS.'),
},
},
]
# Tuple of builds that do not require a path.
multi_file_build_types = (
'ant',
'buck build',
'cmake',
'gradle',
'./gradlew',
'make',
'mvn',
'xcodebuild',
)
def check_dependencies(self):
"""Verify the tool's dependencies are installed.
Returns:
bool:
True if all dependencies for the tool are satisfied. If this
returns False, the worker will not listen for this Tool's queue,
and a warning will be logged.
"""
return is_exe_in_path('infer')
def handle_files(self, files, settings):
"""Perform a review of all files.
Args:
files (list of reviewbot.processing.review.File):
The files to process.
settings (dict):
Tool-specific settings.
"""
self.build_type = settings['build_type']
if self.build_type in self.multi_file_build_types:
self.run_multi_file_build(files, settings)
for f in files:
self.report_file(f)
else:
for f in files:
self.run_single_file_build(f)
self.report_file(f)
def report_file(self, f):
"""Report infer results for a given file.
Args:
f (reviewbot.processing.review.File):
The file to process.
"""
path = f.get_patched_file_path()
if not path:
return
try:
with open('infer-out/report.json', 'r') as fp:
json_data = json.load(fp)
for entry in json_data:
if path == entry['file']:
f.comment('Bug Type: %(bug_type_hum)s\n'
'%(severity)s %(qualifier)s'
% entry,
first_line=entry['line'],
rich_text=True)
except Exception as e:
logger.exception('JSON file could not be opened/loaded: %s', e)
def run_multi_file_build(self, files, settings):
"""Perform execution of FBInfer run on a multi-file build
Args:
files (list of reviewbot.processing.review.File):
The files to process.
settings (dict):
Tool-specific settings.
"""
# CMake requires a compilation before analysis.
if self.build_type == 'cmake':
try:
execute([
'infer',
'compile',
'--',
self.build_type,
'.',
])
# After compilation, update build_type from CMake to Make.
# FBInfer uses Make in `infer run` to do its analysis.
self.build_type = 'make'
except Exception as e:
logger.exception('FBInfer compile command failed with build '
'type: %s %s', self.build_type, e)
# Build the infer command to execute.
infer_cmd = [
'infer',
'run',
'--',
]
# Append the build type.
infer_cmd += shlex.split(self.build_type)
# Check for a specified build target.
if settings['build_target'] != '':
if self.build_type == 'xcodebuild':
infer_cmd.append('-target')
infer_cmd.append(settings['build_target'])
# Append necessary configurations and SDKs for Xcode.
if settings['xcode_configuration'] != '':
infer_cmd += [
'-configuration',
settings['xcode_configuration']
]
if settings['sdk'] != '':
infer_cmd += [
'-sdk',
settings['sdk']
]
try:
execute(infer_cmd)
except Exception as e:
logger.exception('FBInfer run command failed with build type: '
'%s %s', self.build_type, e)
def run_single_file_build(self, f):
"""Perform execution of FBInfer run on a single file
Args:
f (reviewbot.processing.review.File):
The file to process.
"""
path = f.get_patched_file_path()
if not path:
return
try:
execute(['infer', 'run', '--'] +
shlex.split(self.build_type) +
[path])
except Exception as e:
logger.exception('FBInfer run command failed with file: '
'%s %s', path, e)
|
|
#!/usr/bin/env python
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Glance
"""
import logging
import logging.config
import logging.handlers
import os
import tempfile
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_policy import policy
from paste import deploy
from glance import i18n
from glance.version import version_info as version
_ = i18n._
paste_deploy_opts = [
cfg.StrOpt('flavor',
help=_('Partial name of a pipeline in your paste configuration '
'file with the service name removed. For example, if '
'your paste section name is '
'[pipeline:glance-api-keystone] use the value '
'"keystone"')),
cfg.StrOpt('config_file',
help=_('Name of the paste configuration file.')),
]
image_format_opts = [
cfg.ListOpt('container_formats',
default=['ami', 'ari', 'aki', 'bare', 'ovf', 'ova'],
help=_("Supported values for the 'container_format' "
"image attribute"),
deprecated_opts=[cfg.DeprecatedOpt('container_formats',
group='DEFAULT')]),
cfg.ListOpt('disk_formats',
default=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2',
'vdi', 'iso'],
help=_("Supported values for the 'disk_format' "
"image attribute"),
deprecated_opts=[cfg.DeprecatedOpt('disk_formats',
group='DEFAULT')]),
]
task_opts = [
cfg.IntOpt('task_time_to_live',
default=48,
help=_("Time in hours for which a task lives after, either "
"succeeding or failing"),
deprecated_opts=[cfg.DeprecatedOpt('task_time_to_live',
group='DEFAULT')]),
cfg.StrOpt('task_executor',
default='taskflow',
help=_("Specifies which task executor to be used to run the "
"task scripts.")),
cfg.StrOpt('work_dir',
default=None,
help=_('Work dir for asynchronous task operations. '
'The directory set here will be used to operate over '
'images - normally before they are imported in the '
'destination store. When providing work dir, make sure '
'enough space is provided for concurrent tasks to run '
'efficiently without running out of space. A rough '
'estimation can be done by multiplying the number of '
'`max_workers` - or the N of workers running - by an '
'average image size (e.g 500MB). The image size '
'estimation should be done based on the average size in '
'your deployment. Note that depending on the tasks '
'running you may need to multiply this number by some '
'factor depending on what the task does. For example, '
'you may want to double the available size if image '
'conversion is enabled. All this being said, remember '
'these are just estimations and you should do them '
'based on the worst case scenario and be prepared to '
'act in case they were wrong.')),
]
common_opts = [
cfg.BoolOpt('allow_additional_image_properties', default=True,
help=_('Whether to allow users to specify image properties '
'beyond what the image schema provides')),
cfg.IntOpt('image_member_quota', default=128,
help=_('Maximum number of image members per image. '
'Negative values evaluate to unlimited.')),
cfg.IntOpt('image_property_quota', default=128,
help=_('Maximum number of properties allowed on an image. '
'Negative values evaluate to unlimited.')),
cfg.IntOpt('image_tag_quota', default=128,
help=_('Maximum number of tags allowed on an image. '
'Negative values evaluate to unlimited.')),
cfg.IntOpt('image_location_quota', default=10,
help=_('Maximum number of locations allowed on an image. '
'Negative values evaluate to unlimited.')),
cfg.StrOpt('data_api', default='glance.db.sqlalchemy.api',
help=_('Python module path of data access API')),
cfg.IntOpt('limit_param_default', default=25,
help=_('Default value for the number of items returned by a '
'request if not specified explicitly in the request')),
cfg.IntOpt('api_limit_max', default=1000,
help=_('Maximum permissible number of items that could be '
'returned by a request')),
cfg.BoolOpt('show_image_direct_url', default=False,
help=_('Whether to include the backend image storage location '
'in image properties. Revealing storage location can '
'be a security risk, so use this setting with '
'caution!')),
cfg.BoolOpt('show_multiple_locations', default=False,
help=_('Whether to include the backend image locations '
'in image properties. '
'For example, if using the file system store a URL of '
'"file:///path/to/image" will be returned to the user '
'in the \'direct_url\' meta-data field. '
'Revealing storage location can '
'be a security risk, so use this setting with '
'caution! The overrides show_image_direct_url.')),
cfg.IntOpt('image_size_cap', default=1099511627776,
max=9223372036854775808,
help=_("Maximum size of image a user can upload in bytes. "
"Defaults to 1099511627776 bytes (1 TB)."
"WARNING: this value should only be increased after "
"careful consideration and must be set to a value under "
"8 EB (9223372036854775808).")),
cfg.StrOpt('user_storage_quota', default='0',
help=_("Set a system wide quota for every user. This value is "
"the total capacity that a user can use across "
"all storage systems. A value of 0 means unlimited."
"Optional unit can be specified for the value. Accepted "
"units are B, KB, MB, GB and TB representing "
"Bytes, KiloBytes, MegaBytes, GigaBytes and TeraBytes "
"respectively. If no unit is specified then Bytes is "
"assumed. Note that there should not be any space "
"between value and unit and units are case sensitive.")),
cfg.BoolOpt('enable_v1_api', default=True,
help=_("Deploy the v1 OpenStack Images API.")),
cfg.BoolOpt('enable_v2_api', default=True,
help=_("Deploy the v2 OpenStack Images API.")),
cfg.BoolOpt('enable_v3_api', default=False,
help=_("Deploy the v3 OpenStack Objects API.")),
cfg.BoolOpt('enable_v1_registry', default=True,
help=_("Deploy the v1 OpenStack Registry API.")),
cfg.BoolOpt('enable_v2_registry', default=True,
help=_("Deploy the v2 OpenStack Registry API.")),
cfg.StrOpt('pydev_worker_debug_host',
help=_('The hostname/IP of the pydev process listening for '
'debug connections')),
cfg.IntOpt('pydev_worker_debug_port', default=5678, min=1, max=65535,
help=_('The port on which a pydev process is listening for '
'connections.')),
cfg.StrOpt('metadata_encryption_key', secret=True,
help=_('AES key for encrypting store \'location\' metadata. '
'This includes, if used, Swift or S3 credentials. '
'Should be set to a random string of length 16, 24 or '
'32 bytes')),
cfg.StrOpt('digest_algorithm', default='sha256',
help=_('Digest algorithm which will be used for digital '
'signature. Use the command "openssl list-message-'
'digest-algorithms" to get the available algorithms'
'supported by the version of OpenSSL on the platform.'
' Examples are "sha1", "sha256", "sha512", etc.')),
]
CONF = cfg.CONF
CONF.register_opts(paste_deploy_opts, group='paste_deploy')
CONF.register_opts(image_format_opts, group='image_format')
CONF.register_opts(task_opts, group='task')
CONF.register_opts(common_opts)
policy.Enforcer(CONF)
def parse_args(args=None, usage=None, default_config_files=None):
if "OSLO_LOCK_PATH" not in os.environ:
lockutils.set_defaults(tempfile.gettempdir())
CONF(args=args,
project='glance',
version=version.cached_version_string(),
usage=usage,
default_config_files=default_config_files)
def parse_cache_args(args=None):
config_files = cfg.find_config_files(project='glance', prog='glance-cache')
parse_args(args=args, default_config_files=config_files)
def _get_deployment_flavor(flavor=None):
"""
Retrieve the paste_deploy.flavor config item, formatted appropriately
for appending to the application name.
:param flavor: if specified, use this setting rather than the
paste_deploy.flavor configuration setting
"""
if not flavor:
flavor = CONF.paste_deploy.flavor
return '' if not flavor else ('-' + flavor)
def _get_paste_config_path():
paste_suffix = '-paste.ini'
conf_suffix = '.conf'
if CONF.config_file:
# Assume paste config is in a paste.ini file corresponding
# to the last config file
path = CONF.config_file[-1].replace(conf_suffix, paste_suffix)
else:
path = CONF.prog + paste_suffix
return CONF.find_file(os.path.basename(path))
def _get_deployment_config_file():
"""
Retrieve the deployment_config_file config item, formatted as an
absolute pathname.
"""
path = CONF.paste_deploy.config_file
if not path:
path = _get_paste_config_path()
if not path:
msg = _("Unable to locate paste config file for %s.") % CONF.prog
raise RuntimeError(msg)
return os.path.abspath(path)
def load_paste_app(app_name, flavor=None, conf_file=None):
"""
Builds and returns a WSGI app from a paste config file.
We assume the last config file specified in the supplied ConfigOpts
object is the paste config file, if conf_file is None.
:param app_name: name of the application to load
:param flavor: name of the variant of the application to load
:param conf_file: path to the paste config file
:raises RuntimeError when config file cannot be located or application
cannot be loaded from config file
"""
# append the deployment flavor to the application name,
# in order to identify the appropriate paste pipeline
app_name += _get_deployment_flavor(flavor)
if not conf_file:
conf_file = _get_deployment_config_file()
try:
logger = logging.getLogger(__name__)
logger.debug("Loading %(app_name)s from %(conf_file)s",
{'conf_file': conf_file, 'app_name': app_name})
app = deploy.loadapp("config:%s" % conf_file, name=app_name)
# Log the options used when starting if we're in debug mode...
if CONF.debug:
CONF.log_opt_values(logger, logging.DEBUG)
return app
except (LookupError, ImportError) as e:
msg = (_("Unable to load %(app_name)s from "
"configuration file %(conf_file)s."
"\nGot: %(e)r") % {'app_name': app_name,
'conf_file': conf_file,
'e': e})
logger.error(msg)
raise RuntimeError(msg)
|
|
# Under MIT License, see LICENSE.txt
import time
import math
from Util import Pose
from Util.constant import IN_PLAY_MIN_DISTANCE, ROBOT_RADIUS
from Util.role import Role
from ai.Algorithm.evaluation_module import closest_players_to_point_except, \
ball_going_toward_player
from ai.GameDomainObjects import Player
from ai.STA.Strategy.graphless_strategy import GraphlessStrategy
from ai.STA.Tactic.go_kick import GoKick
from ai.STA.Tactic.go_to_position import GoToPosition
from ai.STA.Tactic.goalkeeper import GoalKeeper
from ai.STA.Tactic.position_for_pass import PositionForPass
from ai.STA.Tactic.receive_pass import ReceivePass
from ai.STA.Tactic.tactic_constants import Flags
from config.config import Config
TIME_TO_GET_IN_POSITION = 2
# noinspection PyMethodMayBeStatic,PyMethodMayBeStatic
class GraphlessFreeKick(GraphlessStrategy):
def __init__(self, p_game_state, can_kick_in_goal):
super().__init__(p_game_state)
self.robots_in_formation = [p for r, p in self.assigned_roles.items() if r != Role.GOALKEEPER]
self.forbidden_areas = [self.game_state.field.free_kick_avoid_area,
self.game_state.field.our_goal_forbidden_area]
initial_position_for_pass_center = {}
working_kicker_roles = []
for role, player in self.assigned_roles.items():
if player.id in Config()["COACH"]["working_kicker_ids"]:
working_kicker_roles.append(role)
if role == Role.GOALKEEPER:
self.roles_to_tactics[role] = GoalKeeper(self.game_state, player)
else:
self.roles_to_tactics[role] = PositionForPass(self.game_state,
player,
robots_in_formation=self.robots_in_formation,
auto_position=True,
forbidden_areas=self.game_state.field.border_limits
+ self.forbidden_areas)
initial_position_for_pass_center[role] = self.roles_to_tactics[role].area.center # Hack
# Find position for ball player closest to ball
self.closest_role = None
ball_position = self.game_state.ball_position
for r, position in initial_position_for_pass_center.items():
if self.closest_role is None or (
(initial_position_for_pass_center[self.closest_role] - ball_position).norm > (position - ball_position).norm and
r in working_kicker_roles):
self.closest_role = r
self.ball_start_position = self.game_state.ball.position
self.next_state = self.get_in_position
self.current_pass_receiver = None
self.can_kick_in_goal = can_kick_in_goal
self.first_passing_player = None
self.start_time = time.time()
def get_in_position(self):
for role, player in self.assigned_roles.items():
if role == Role.GOALKEEPER:
continue
if self.is_closest_not_goalkeeper(player):
self.logger.info(f"Robot {player.id} is closest => go behind ball")
self.first_passing_player = player
their_goal_to_ball = self.game_state.ball_position - self.game_state.field.their_goal
go_behind_position = self.game_state.ball_position + their_goal_to_ball.unit * ROBOT_RADIUS * 2.0
go_behind_orientation = their_goal_to_ball.angle + math.pi
self.roles_to_tactics[role] = GoToPosition(self.game_state,
player,
target=Pose(go_behind_position, go_behind_orientation),
cruise_speed=1)
else:
self.roles_to_tactics[role] = PositionForPass(self.game_state,
player,
auto_position=True,
robots_in_formation=self.robots_in_formation)
self.next_state = self.wait_before_pass
def wait_before_pass(self):
if time.time() - self.start_time > TIME_TO_GET_IN_POSITION:
self.next_state = self.pass_to_receiver
def pass_to_receiver(self):
for role, player in self.assigned_roles.items():
if role == Role.GOALKEEPER:
continue
tactic = self.roles_to_tactics[role]
if isinstance(tactic, GoKick):
if not self.is_closest_not_goalkeeper(player):
self.logger.info(f"Robot {player.id} was not closest. Returning to PositionForPass")
self.roles_to_tactics[role] = PositionForPass(self.game_state,
player,
auto_position=True,
robots_in_formation=self.robots_in_formation)
elif tactic.status_flag == Flags.PASS_TO_PLAYER:
self.logger.info(
f"Robot {player.id} decided to make a pass to Robot {tactic.current_player_target.id}")
self._assign_target_to_receive_pass(tactic.current_player_target, passing_robot=player)
self.logger.info("Switching to receive_pass")
self.next_state = self.receive_pass
return # We dont want to override self.current_pass_receiver
elif self.is_closest_not_goalkeeper(player):
self.logger.info(f"Robot {player.id} is closest! Switching to GoKick")
can_kick_in_goal = self.can_kick_in_goal if self.first_passing_player else True
self.roles_to_tactics[role] = GoKick(self.game_state,
player,
auto_update_target=True,
can_kick_in_goal=can_kick_in_goal,
forbidden_areas=self.game_state.field.border_limits
+ self.forbidden_areas
)
elif ball_going_toward_player(self.game_state, player):
self.logger.info(f"Ball is going toward Robot {player.id}!")
self._assign_target_to_receive_pass(player, passing_robot=None)
self.logger.info("Switching to receive_pass")
self.next_state = self.receive_pass
# Robots must not stay in receive pass if they are not receiving a pass
elif isinstance(tactic, ReceivePass):
self.roles_to_tactics[role] = PositionForPass(self.game_state,
player,
auto_position=True,
robots_in_formation=self.robots_in_formation)
def receive_pass(self):
for role, player in self.assigned_roles.items():
if role == Role.GOALKEEPER:
continue
tactic = self.roles_to_tactics[role]
if isinstance(tactic, GoKick):
gokick_target = tactic.current_player_target
if gokick_target is not None:
if gokick_target != self.current_pass_receiver:
self.logger.info(
f"Robot {player.id} changed its target! Last target: Robot {self.current_pass_receiver.id} -- New target : {gokick_target.id}")
self.logger.info(f"Switching Robot {self.current_pass_receiver.id} tactic to PositionForPass")
last_receiver_role = self.game_state.get_role_by_player_id(self.current_pass_receiver.id)
self.roles_to_tactics[last_receiver_role] = PositionForPass(self.game_state,
self.current_pass_receiver,
auto_position=True,
robots_in_formation=self.robots_in_formation)
self._assign_target_to_receive_pass(gokick_target, player)
if tactic.status_flag == Flags.SUCCESS:
self.logger.info(f"Robot {player.id} has kicked!")
receiver_role = self.game_state.get_role_by_player_id(self.current_pass_receiver.id)
receiver_tactic = self.roles_to_tactics[receiver_role]
assert isinstance(receiver_tactic, ReceivePass)
receiver_tactic.passing_robot_has_kicked = True
elif isinstance(tactic, ReceivePass) and tactic.status_flag == Flags.SUCCESS:
self.logger.info(f"Robot {player.id} has received ball!")
self.logger.info("Switching to go_get_ball")
self.next_state = self.pass_to_receiver
# FIXME
if all(not isinstance(self.roles_to_tactics[role], GoKick) for role, _ in self.assigned_roles.items()):
self.logger.info("No robot is assigned to GoKick! Switching to go_get_ball")
self.next_state = self.pass_to_receiver
def _assign_target_to_receive_pass(self, target: Player, passing_robot):
self.logger.info(f"Switching Robot {target.id} tactic to ReceivePass")
role = self.game_state.get_role_by_player_id(target.id)
self.roles_to_tactics[role] = ReceivePass(self.game_state, target, passing_robot=passing_robot)
self.current_pass_receiver = target
@classmethod
def required_roles(cls):
return [Role.GOALKEEPER,
Role.FIRST_ATTACK,
Role.MIDDLE]
@classmethod
def optional_roles(cls):
return [Role.SECOND_ATTACK,
Role.FIRST_DEFENCE,
Role.SECOND_DEFENCE]
def is_closest_not_goalkeeper(self, player):
ban_players = self.game_state.double_touch_checker.ban_players
if player in ban_players:
return False
role = self.game_state.get_role_by_player_id(player.id)
if (self.ball_start_position - self.game_state.ball.position).norm <= IN_PLAY_MIN_DISTANCE:
return role == self.closest_role
closests = closest_players_to_point_except(self.game_state.ball.position,
except_roles=[Role.GOALKEEPER],
except_players=ban_players)
return len(closests) > 0 and closests[0].player == player
|
|
# Copyright 2020 The gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions that obviate explicit stubs and explicit channels."""
import collections
import datetime
import logging
import os
import threading
from typing import (Any, AnyStr, Callable, Dict, Iterator, Optional, Sequence,
Tuple, TypeVar, Union)
import grpc
from grpc.experimental import experimental_api
RequestType = TypeVar('RequestType')
ResponseType = TypeVar('ResponseType')
OptionsType = Sequence[Tuple[str, str]]
CacheKey = Tuple[str, OptionsType, Optional[grpc.ChannelCredentials],
Optional[grpc.Compression]]
_LOGGER = logging.getLogger(__name__)
_EVICTION_PERIOD_KEY = "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS"
if _EVICTION_PERIOD_KEY in os.environ:
_EVICTION_PERIOD = datetime.timedelta(
seconds=float(os.environ[_EVICTION_PERIOD_KEY]))
_LOGGER.debug("Setting managed channel eviction period to %s",
_EVICTION_PERIOD)
else:
_EVICTION_PERIOD = datetime.timedelta(minutes=10)
_MAXIMUM_CHANNELS_KEY = "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM"
if _MAXIMUM_CHANNELS_KEY in os.environ:
_MAXIMUM_CHANNELS = int(os.environ[_MAXIMUM_CHANNELS_KEY])
_LOGGER.debug("Setting maximum managed channels to %d", _MAXIMUM_CHANNELS)
else:
_MAXIMUM_CHANNELS = 2**8
_DEFAULT_TIMEOUT_KEY = "GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS"
if _DEFAULT_TIMEOUT_KEY in os.environ:
_DEFAULT_TIMEOUT = float(os.environ[_DEFAULT_TIMEOUT_KEY])
_LOGGER.debug("Setting default timeout seconds to %f", _DEFAULT_TIMEOUT)
else:
_DEFAULT_TIMEOUT = 60.0
def _create_channel(target: str, options: Sequence[Tuple[str, str]],
channel_credentials: Optional[grpc.ChannelCredentials],
compression: Optional[grpc.Compression]) -> grpc.Channel:
_LOGGER.debug(
f"Creating secure channel with credentials '{channel_credentials}', " +
f"options '{options}' and compression '{compression}'")
return grpc.secure_channel(target,
credentials=channel_credentials,
options=options,
compression=compression)
class ChannelCache:
# NOTE(rbellevi): Untyped due to reference cycle.
_singleton = None
_lock: threading.RLock = threading.RLock()
_condition: threading.Condition = threading.Condition(lock=_lock)
_eviction_ready: threading.Event = threading.Event()
_mapping: Dict[CacheKey, Tuple[grpc.Channel, datetime.datetime]]
_eviction_thread: threading.Thread
def __init__(self):
self._mapping = collections.OrderedDict()
self._eviction_thread = threading.Thread(
target=ChannelCache._perform_evictions, daemon=True)
self._eviction_thread.start()
@staticmethod
def get():
with ChannelCache._lock:
if ChannelCache._singleton is None:
ChannelCache._singleton = ChannelCache()
ChannelCache._eviction_ready.wait()
return ChannelCache._singleton
def _evict_locked(self, key: CacheKey):
channel, _ = self._mapping.pop(key)
_LOGGER.debug("Evicting channel %s with configuration %s.", channel,
key)
channel.close()
del channel
@staticmethod
def _perform_evictions():
while True:
with ChannelCache._lock:
ChannelCache._eviction_ready.set()
if not ChannelCache._singleton._mapping:
ChannelCache._condition.wait()
elif len(ChannelCache._singleton._mapping) > _MAXIMUM_CHANNELS:
key = next(iter(ChannelCache._singleton._mapping.keys()))
ChannelCache._singleton._evict_locked(key)
# And immediately reevaluate.
else:
key, (_, eviction_time) = next(
iter(ChannelCache._singleton._mapping.items()))
now = datetime.datetime.now()
if eviction_time <= now:
ChannelCache._singleton._evict_locked(key)
continue
else:
time_to_eviction = (eviction_time - now).total_seconds()
# NOTE: We aim to *eventually* coalesce to a state in
# which no overdue channels are in the cache and the
# length of the cache is longer than _MAXIMUM_CHANNELS.
# We tolerate momentary states in which these two
# criteria are not met.
ChannelCache._condition.wait(timeout=time_to_eviction)
def get_channel(self, target: str, options: Sequence[Tuple[str, str]],
channel_credentials: Optional[grpc.ChannelCredentials],
insecure: bool,
compression: Optional[grpc.Compression]) -> grpc.Channel:
if insecure and channel_credentials:
raise ValueError("The insecure option is mutually exclusive with " +
"the channel_credentials option. Please use one " +
"or the other.")
if insecure:
channel_credentials = grpc.experimental.insecure_channel_credentials(
)
elif channel_credentials is None:
_LOGGER.debug("Defaulting to SSL channel credentials.")
channel_credentials = grpc.ssl_channel_credentials()
key = (target, options, channel_credentials, compression)
with self._lock:
channel_data = self._mapping.get(key, None)
if channel_data is not None:
channel = channel_data[0]
self._mapping.pop(key)
self._mapping[key] = (channel, datetime.datetime.now() +
_EVICTION_PERIOD)
return channel
else:
channel = _create_channel(target, options, channel_credentials,
compression)
self._mapping[key] = (channel, datetime.datetime.now() +
_EVICTION_PERIOD)
if len(self._mapping) == 1 or len(
self._mapping) >= _MAXIMUM_CHANNELS:
self._condition.notify()
return channel
def _test_only_channel_count(self) -> int:
with self._lock:
return len(self._mapping)
@experimental_api
def unary_unary(
request: RequestType,
target: str,
method: str,
request_serializer: Optional[Callable[[Any], bytes]] = None,
response_deserializer: Optional[Callable[[bytes], Any]] = None,
options: Sequence[Tuple[AnyStr, AnyStr]] = (),
channel_credentials: Optional[grpc.ChannelCredentials] = None,
insecure: bool = False,
call_credentials: Optional[grpc.CallCredentials] = None,
compression: Optional[grpc.Compression] = None,
wait_for_ready: Optional[bool] = None,
timeout: Optional[float] = _DEFAULT_TIMEOUT,
metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None
) -> ResponseType:
"""Invokes a unary-unary RPC without an explicitly specified channel.
THIS IS AN EXPERIMENTAL API.
This is backed by a per-process cache of channels. Channels are evicted
from the cache after a fixed period by a background. Channels will also be
evicted if more than a configured maximum accumulate.
The default eviction period is 10 minutes. One may set the environment
variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this.
The default maximum number of channels is 256. One may set the
environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure
this.
Args:
request: An iterator that yields request values for the RPC.
target: The server address.
method: The name of the RPC method.
request_serializer: Optional :term:`serializer` for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional :term:`deserializer` for deserializing the response
message. Response goes undeserialized in case None is passed.
options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC Core
runtime) to configure the channel.
channel_credentials: A credential applied to the whole channel, e.g. the
return value of grpc.ssl_channel_credentials() or
grpc.insecure_channel_credentials().
insecure: If True, specifies channel_credentials as
:term:`grpc.insecure_channel_credentials()`. This option is mutually
exclusive with the `channel_credentials` option.
call_credentials: A call credential applied to each call individually,
e.g. the output of grpc.metadata_call_credentials() or
grpc.access_token_call_credentials().
compression: An optional value indicating the compression method to be
used over the lifetime of the channel, e.g. grpc.Compression.Gzip.
wait_for_ready: An optional flag indicating whether the RPC should fail
immediately if the connection is not ready at the time the RPC is
invoked, or if it should wait until the connection to the server
becomes ready. When using this option, the user will likely also want
to set a timeout. Defaults to True.
timeout: An optional duration of time in seconds to allow for the RPC,
after which an exception will be raised. If timeout is unspecified,
defaults to a timeout controlled by the
GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS environment variable. If that is
unset, defaults to 60 seconds. Supply a value of None to indicate that
no timeout should be enforced.
metadata: Optional metadata to send to the server.
Returns:
The response to the RPC.
"""
channel = ChannelCache.get().get_channel(target, options,
channel_credentials, insecure,
compression)
multicallable = channel.unary_unary(method, request_serializer,
response_deserializer)
wait_for_ready = wait_for_ready if wait_for_ready is not None else True
return multicallable(request,
metadata=metadata,
wait_for_ready=wait_for_ready,
credentials=call_credentials,
timeout=timeout)
@experimental_api
def unary_stream(
request: RequestType,
target: str,
method: str,
request_serializer: Optional[Callable[[Any], bytes]] = None,
response_deserializer: Optional[Callable[[bytes], Any]] = None,
options: Sequence[Tuple[AnyStr, AnyStr]] = (),
channel_credentials: Optional[grpc.ChannelCredentials] = None,
insecure: bool = False,
call_credentials: Optional[grpc.CallCredentials] = None,
compression: Optional[grpc.Compression] = None,
wait_for_ready: Optional[bool] = None,
timeout: Optional[float] = _DEFAULT_TIMEOUT,
metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None
) -> Iterator[ResponseType]:
"""Invokes a unary-stream RPC without an explicitly specified channel.
THIS IS AN EXPERIMENTAL API.
This is backed by a per-process cache of channels. Channels are evicted
from the cache after a fixed period by a background. Channels will also be
evicted if more than a configured maximum accumulate.
The default eviction period is 10 minutes. One may set the environment
variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this.
The default maximum number of channels is 256. One may set the
environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure
this.
Args:
request: An iterator that yields request values for the RPC.
target: The server address.
method: The name of the RPC method.
request_serializer: Optional :term:`serializer` for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional :term:`deserializer` for deserializing the response
message. Response goes undeserialized in case None is passed.
options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC Core
runtime) to configure the channel.
channel_credentials: A credential applied to the whole channel, e.g. the
return value of grpc.ssl_channel_credentials().
insecure: If True, specifies channel_credentials as
:term:`grpc.insecure_channel_credentials()`. This option is mutually
exclusive with the `channel_credentials` option.
call_credentials: A call credential applied to each call individually,
e.g. the output of grpc.metadata_call_credentials() or
grpc.access_token_call_credentials().
compression: An optional value indicating the compression method to be
used over the lifetime of the channel, e.g. grpc.Compression.Gzip.
wait_for_ready: An optional flag indicating whether the RPC should fail
immediately if the connection is not ready at the time the RPC is
invoked, or if it should wait until the connection to the server
becomes ready. When using this option, the user will likely also want
to set a timeout. Defaults to True.
timeout: An optional duration of time in seconds to allow for the RPC,
after which an exception will be raised. If timeout is unspecified,
defaults to a timeout controlled by the
GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS environment variable. If that is
unset, defaults to 60 seconds. Supply a value of None to indicate that
no timeout should be enforced.
metadata: Optional metadata to send to the server.
Returns:
An iterator of responses.
"""
channel = ChannelCache.get().get_channel(target, options,
channel_credentials, insecure,
compression)
multicallable = channel.unary_stream(method, request_serializer,
response_deserializer)
wait_for_ready = wait_for_ready if wait_for_ready is not None else True
return multicallable(request,
metadata=metadata,
wait_for_ready=wait_for_ready,
credentials=call_credentials,
timeout=timeout)
@experimental_api
def stream_unary(
request_iterator: Iterator[RequestType],
target: str,
method: str,
request_serializer: Optional[Callable[[Any], bytes]] = None,
response_deserializer: Optional[Callable[[bytes], Any]] = None,
options: Sequence[Tuple[AnyStr, AnyStr]] = (),
channel_credentials: Optional[grpc.ChannelCredentials] = None,
insecure: bool = False,
call_credentials: Optional[grpc.CallCredentials] = None,
compression: Optional[grpc.Compression] = None,
wait_for_ready: Optional[bool] = None,
timeout: Optional[float] = _DEFAULT_TIMEOUT,
metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None
) -> ResponseType:
"""Invokes a stream-unary RPC without an explicitly specified channel.
THIS IS AN EXPERIMENTAL API.
This is backed by a per-process cache of channels. Channels are evicted
from the cache after a fixed period by a background. Channels will also be
evicted if more than a configured maximum accumulate.
The default eviction period is 10 minutes. One may set the environment
variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this.
The default maximum number of channels is 256. One may set the
environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure
this.
Args:
request_iterator: An iterator that yields request values for the RPC.
target: The server address.
method: The name of the RPC method.
request_serializer: Optional :term:`serializer` for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional :term:`deserializer` for deserializing the response
message. Response goes undeserialized in case None is passed.
options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC Core
runtime) to configure the channel.
channel_credentials: A credential applied to the whole channel, e.g. the
return value of grpc.ssl_channel_credentials().
call_credentials: A call credential applied to each call individually,
e.g. the output of grpc.metadata_call_credentials() or
grpc.access_token_call_credentials().
insecure: If True, specifies channel_credentials as
:term:`grpc.insecure_channel_credentials()`. This option is mutually
exclusive with the `channel_credentials` option.
compression: An optional value indicating the compression method to be
used over the lifetime of the channel, e.g. grpc.Compression.Gzip.
wait_for_ready: An optional flag indicating whether the RPC should fail
immediately if the connection is not ready at the time the RPC is
invoked, or if it should wait until the connection to the server
becomes ready. When using this option, the user will likely also want
to set a timeout. Defaults to True.
timeout: An optional duration of time in seconds to allow for the RPC,
after which an exception will be raised. If timeout is unspecified,
defaults to a timeout controlled by the
GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS environment variable. If that is
unset, defaults to 60 seconds. Supply a value of None to indicate that
no timeout should be enforced.
metadata: Optional metadata to send to the server.
Returns:
The response to the RPC.
"""
channel = ChannelCache.get().get_channel(target, options,
channel_credentials, insecure,
compression)
multicallable = channel.stream_unary(method, request_serializer,
response_deserializer)
wait_for_ready = wait_for_ready if wait_for_ready is not None else True
return multicallable(request_iterator,
metadata=metadata,
wait_for_ready=wait_for_ready,
credentials=call_credentials,
timeout=timeout)
@experimental_api
def stream_stream(
request_iterator: Iterator[RequestType],
target: str,
method: str,
request_serializer: Optional[Callable[[Any], bytes]] = None,
response_deserializer: Optional[Callable[[bytes], Any]] = None,
options: Sequence[Tuple[AnyStr, AnyStr]] = (),
channel_credentials: Optional[grpc.ChannelCredentials] = None,
insecure: bool = False,
call_credentials: Optional[grpc.CallCredentials] = None,
compression: Optional[grpc.Compression] = None,
wait_for_ready: Optional[bool] = None,
timeout: Optional[float] = _DEFAULT_TIMEOUT,
metadata: Optional[Sequence[Tuple[str, Union[str, bytes]]]] = None
) -> Iterator[ResponseType]:
"""Invokes a stream-stream RPC without an explicitly specified channel.
THIS IS AN EXPERIMENTAL API.
This is backed by a per-process cache of channels. Channels are evicted
from the cache after a fixed period by a background. Channels will also be
evicted if more than a configured maximum accumulate.
The default eviction period is 10 minutes. One may set the environment
variable "GRPC_PYTHON_MANAGED_CHANNEL_EVICTION_SECONDS" to configure this.
The default maximum number of channels is 256. One may set the
environment variable "GRPC_PYTHON_MANAGED_CHANNEL_MAXIMUM" to configure
this.
Args:
request_iterator: An iterator that yields request values for the RPC.
target: The server address.
method: The name of the RPC method.
request_serializer: Optional :term:`serializer` for serializing the request
message. Request goes unserialized in case None is passed.
response_deserializer: Optional :term:`deserializer` for deserializing the response
message. Response goes undeserialized in case None is passed.
options: An optional list of key-value pairs (:term:`channel_arguments` in gRPC Core
runtime) to configure the channel.
channel_credentials: A credential applied to the whole channel, e.g. the
return value of grpc.ssl_channel_credentials().
call_credentials: A call credential applied to each call individually,
e.g. the output of grpc.metadata_call_credentials() or
grpc.access_token_call_credentials().
insecure: If True, specifies channel_credentials as
:term:`grpc.insecure_channel_credentials()`. This option is mutually
exclusive with the `channel_credentials` option.
compression: An optional value indicating the compression method to be
used over the lifetime of the channel, e.g. grpc.Compression.Gzip.
wait_for_ready: An optional flag indicating whether the RPC should fail
immediately if the connection is not ready at the time the RPC is
invoked, or if it should wait until the connection to the server
becomes ready. When using this option, the user will likely also want
to set a timeout. Defaults to True.
timeout: An optional duration of time in seconds to allow for the RPC,
after which an exception will be raised. If timeout is unspecified,
defaults to a timeout controlled by the
GRPC_PYTHON_DEFAULT_TIMEOUT_SECONDS environment variable. If that is
unset, defaults to 60 seconds. Supply a value of None to indicate that
no timeout should be enforced.
metadata: Optional metadata to send to the server.
Returns:
An iterator of responses.
"""
channel = ChannelCache.get().get_channel(target, options,
channel_credentials, insecure,
compression)
multicallable = channel.stream_stream(method, request_serializer,
response_deserializer)
wait_for_ready = wait_for_ready if wait_for_ready is not None else True
return multicallable(request_iterator,
metadata=metadata,
wait_for_ready=wait_for_ready,
credentials=call_credentials,
timeout=timeout)
|
|
# -*- coding: utf-8
from copy import deepcopy
from django.utils.translation import ugettext_lazy as _
from django.contrib import admin
from django.core.urlresolvers import reverse
from bccf.admin import make_featured, make_unfeatured
from mezzanine.core.admin import DisplayableAdmin
from pybb.models import Category, Forum, Topic, Post, Profile, Attachment, PollAnswer
from pybb import util
username_field = util.get_username_field()
class ForumInlineAdmin(admin.TabularInline):
model = Forum
fields = ['name', 'hidden', 'position']
extra = 0
class CategoryAdmin(admin.ModelAdmin):
list_display = ['name', 'position', 'hidden', 'forum_count']
list_per_page = 20
ordering = ['position']
search_fields = ['name']
list_editable = ['position']
inlines = [ForumInlineAdmin]
class ForumAdmin(admin.ModelAdmin):
list_display = ['name', 'category', 'hidden', 'position', 'topic_count', ]
list_per_page = 20
raw_id_fields = ['moderators']
ordering = ['-category']
search_fields = ['name', 'category__name']
list_editable = ['position', 'hidden']
fieldsets = (
(None, {
'fields': ('category', 'name', 'hidden', 'position', )
}
),
(_('Additional options'), {
'classes': ('collapse',),
'fields': ('updated', 'description', 'headline', 'post_count', 'moderators')
}
),
)
class PollAnswerAdmin(admin.TabularInline):
model = PollAnswer
fields = ['text', ]
extra = 0
class TopicAdmin(admin.ModelAdmin):
list_display = ['name', 'forum', 'created', 'head', 'post_count', 'poll_type',]
list_per_page = 20
raw_id_fields = ['user', 'subscribers']
ordering = ['-created']
date_hierarchy = 'created'
search_fields = ['name']
fieldsets = (
(None, {
'fields': ('forum', 'name', 'user', ('created', 'updated'), 'poll_type',)
}
),
(_('Additional options'), {
'classes': ('collapse',),
'fields': (('views', 'post_count'), ('sticky', 'closed'), 'subscribers')
}
),
)
inlines = [PollAnswerAdmin, ]
class BCCFTopicAdmin(DisplayableAdmin):
actions = [make_featured, make_unfeatured]
list_per_page = 20
raw_id_fields = ['user', 'subscribers']
ordering = ['-created']
date_hierarchy = 'created'
search_fields = ['name']
def __init__(self, *args, **kwargs):
super(BCCFTopicAdmin, self).__init__(*args, **kwargs)
if self.fieldsets == DisplayableAdmin.fieldsets:
self.fieldsets = deepcopy(self.fieldsets)
for field in reversed(['forum',
'name',
'user',
'closed',
'content',
'bccf_topic',
'featured',
'image',
'page_for']):
self.fieldsets[0][1]['fields'].insert(3, field)
if self.list_display == DisplayableAdmin.list_display:
self.list_display = list(deepcopy(self.list_display))
for fieldname in ['head', 'post_count', 'featured']:
self.list_display.insert(-1, fieldname)
if self.list_filter == DisplayableAdmin.list_filter:
self.list_filter = list(deepcopy(self.list_filter))
for fieldname in ['featured']:
self.list_filter.insert(-1, fieldname)
class TopicReadTrackerAdmin(admin.ModelAdmin):
list_display = ['topic', 'user', 'time_stamp']
search_fields = ['user__%s' % username_field]
class ForumReadTrackerAdmin(admin.ModelAdmin):
list_display = ['forum', 'user', 'time_stamp']
search_fields = ['user__%s' % username_field]
class PostAdmin(admin.ModelAdmin):
list_display = ['topic', 'user', 'created', 'updated', 'on_moderation', 'summary']
list_editable = ['on_moderation']
list_per_page = 20
actions = ['make_approve', 'make_unapprove']
raw_id_fields = ['user', 'topic']
ordering = ['-created']
date_hierarchy = 'created'
search_fields = ['body']
fieldsets = (
(None, {
'fields': ('topic', 'user')
}
),
(_('Additional options'), {
'classes': ('collapse',),
'fields' : (('created', 'updated'), 'user_ip')
}
),
(_('Message'), {
'fields': ('body', 'body_html', 'body_text', 'on_moderation')
}
),
)
def make_approve(self, request, queryset):
num_rows = queryset.update(on_moderation=False)
if num_rows == 1:
message_bit = '1 post approved'
else:
message_bit = '%s posts approved' % num_rows
make_approve.short_description = "Approve selected posts"
def make_unapprove(self, request, queryset):
num_rows = queryset.update(on_moderation=True)
if num_rows == 1:
message_bit = '1 post disapproved'
else:
message_bit = '%s posts disapproved' % num_rows
make_unapprove.short_description = "Disapprove selected posts"
class ProfileAdmin(admin.ModelAdmin):
list_display = ['user', 'time_zone', 'language', 'post_count']
list_per_page = 20
ordering = ['-user']
search_fields = ['user__%s' % username_field]
fieldsets = (
(None, {
'fields': ('user', 'time_zone', 'language')
}
),
(_('Additional options'), {
'classes': ('collapse',),
'fields' : ('avatar', 'signature', 'show_signatures')
}
),
)
class AttachmentAdmin(admin.ModelAdmin):
list_display = ['file', 'size', 'admin_view_post', 'admin_edit_post']
def admin_view_post(self, obj):
return u'<a href="%s">view</a>' % obj.post.get_absolute_url()
admin_view_post.allow_tags = True
admin_view_post.short_description = _('View post')
def admin_edit_post(self, obj):
return u'<a href="%s">edit</a>' % reverse('admin:pybb_post_change', args=[obj.post.pk])
admin_edit_post.allow_tags = True
admin_edit_post.short_description = _('Edit post')
#admin.site.register(Category, CategoryAdmin)
admin.site.register(Forum, ForumAdmin)
admin.site.register(Topic, BCCFTopicAdmin)
admin.site.register(Post, PostAdmin)
#admin.site.register(Attachment, AttachmentAdmin)
if util.get_pybb_profile_model() == Profile:
admin.site.register(Profile, ProfileAdmin)
# This can be used to debug read/unread trackers
#admin.site.register(TopicReadTracker, TopicReadTrackerAdmin)
#admin.site.register(ForumReadTracker, ForumReadTrackerAdmin)
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the listtransactions API."""
from decimal import Decimal
from io import BytesIO
from test_framework.messages import COIN, CTransaction
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_array_result,
assert_equal,
hex_str_to_bytes,
)
def tx_from_hex(hexstring):
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(hexstring))
tx.deserialize(f)
return tx
class ListTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].generate(1) # Get out of IBD
self.sync_all()
# Simple send, 0 to 1:
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid},
{"category": "send", "amount": Decimal("-0.1"), "confirmations": 0})
assert_array_result(self.nodes[1].listtransactions(),
{"txid": txid},
{"category": "receive", "amount": Decimal("0.1"), "confirmations": 0})
# mine a block, confirmations should change:
blockhash = self.nodes[0].generate(1)[0]
blockheight = self.nodes[0].getblockheader(blockhash)['height']
self.sync_all()
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid},
{"category": "send", "amount": Decimal("-0.1"), "confirmations": 1, "blockhash": blockhash, "blockheight": blockheight})
assert_array_result(self.nodes[1].listtransactions(),
{"txid": txid},
{"category": "receive", "amount": Decimal("0.1"), "confirmations": 1, "blockhash": blockhash, "blockheight": blockheight})
# send-to-self:
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid, "category": "send"},
{"amount": Decimal("-0.2")})
assert_array_result(self.nodes[0].listtransactions(),
{"txid": txid, "category": "receive"},
{"amount": Decimal("0.2")})
# sendmany from node1: twice to self, twice to node2:
send_to = {self.nodes[0].getnewaddress(): 0.11,
self.nodes[1].getnewaddress(): 0.22,
self.nodes[0].getnewaddress(): 0.33,
self.nodes[1].getnewaddress(): 0.44}
txid = self.nodes[1].sendmany("", send_to)
self.sync_all()
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.11")},
{"txid": txid})
assert_array_result(self.nodes[0].listtransactions(),
{"category": "receive", "amount": Decimal("0.11")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.22")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "receive", "amount": Decimal("0.22")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.33")},
{"txid": txid})
assert_array_result(self.nodes[0].listtransactions(),
{"category": "receive", "amount": Decimal("0.33")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "send", "amount": Decimal("-0.44")},
{"txid": txid})
assert_array_result(self.nodes[1].listtransactions(),
{"category": "receive", "amount": Decimal("0.44")},
{"txid": txid})
if not self.options.descriptors:
# include_watchonly is a legacy wallet feature, so don't test it for descriptor wallets
pubkey = self.nodes[1].getaddressinfo(self.nodes[1].getnewaddress())['pubkey']
multisig = self.nodes[1].createmultisig(1, [pubkey])
self.nodes[0].importaddress(multisig["redeemScript"], "watchonly", False, True)
txid = self.nodes[1].sendtoaddress(multisig["address"], 0.1)
self.nodes[1].generate(1)
self.sync_all()
assert_equal(len(self.nodes[0].listtransactions(label="watchonly", include_watchonly=True)), 1)
assert_equal(len(self.nodes[0].listtransactions(dummy="watchonly", include_watchonly=True)), 1)
assert len(self.nodes[0].listtransactions(label="watchonly", count=100, include_watchonly=False)) == 0
assert_array_result(self.nodes[0].listtransactions(label="watchonly", count=100, include_watchonly=True),
{"category": "receive", "amount": Decimal("0.1")},
{"txid": txid, "label": "watchonly"})
self.run_rbf_opt_in_test()
# Check that the opt-in-rbf flag works properly, for sent and received
# transactions.
def run_rbf_opt_in_test(self):
# Check whether a transaction signals opt-in RBF itself
def is_opt_in(node, txid):
rawtx = node.getrawtransaction(txid, 1)
for x in rawtx["vin"]:
if x["sequence"] < 0xfffffffe:
return True
return False
# Find an unconfirmed output matching a certain txid
def get_unconfirmed_utxo_entry(node, txid_to_match):
utxo = node.listunspent(0, 0)
for i in utxo:
if i["txid"] == txid_to_match:
return i
return None
# 1. Chain a few transactions that don't opt-in.
txid_1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
assert not is_opt_in(self.nodes[0], txid_1)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"})
self.sync_mempools()
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_1}, {"bip125-replaceable": "no"})
# Tx2 will build off txid_1, still not opting in to RBF.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_1)
assert_equal(utxo_to_use["safe"], True)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_1)
assert_equal(utxo_to_use["safe"], False)
# Create tx2 using createrawtransaction
inputs = [{"txid": utxo_to_use["txid"], "vout": utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.999}
tx2 = self.nodes[1].createrawtransaction(inputs, outputs)
tx2_signed = self.nodes[1].signrawtransactionwithwallet(tx2)["hex"]
txid_2 = self.nodes[1].sendrawtransaction(tx2_signed)
# ...and check the result
assert not is_opt_in(self.nodes[1], txid_2)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_2}, {"bip125-replaceable": "no"})
self.sync_mempools()
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_2}, {"bip125-replaceable": "no"})
# Tx3 will opt-in to RBF
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[0], txid_2)
inputs = [{"txid": txid_2, "vout": utxo_to_use["vout"]}]
outputs = {self.nodes[1].getnewaddress(): 0.998}
tx3 = self.nodes[0].createrawtransaction(inputs, outputs)
tx3_modified = tx_from_hex(tx3)
tx3_modified.vin[0].nSequence = 0
tx3 = tx3_modified.serialize().hex()
tx3_signed = self.nodes[0].signrawtransactionwithwallet(tx3)['hex']
txid_3 = self.nodes[0].sendrawtransaction(tx3_signed)
assert is_opt_in(self.nodes[0], txid_3)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_3}, {"bip125-replaceable": "yes"})
self.sync_mempools()
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_3}, {"bip125-replaceable": "yes"})
# Tx4 will chain off tx3. Doesn't signal itself, but depends on one
# that does.
utxo_to_use = get_unconfirmed_utxo_entry(self.nodes[1], txid_3)
inputs = [{"txid": txid_3, "vout": utxo_to_use["vout"]}]
outputs = {self.nodes[0].getnewaddress(): 0.997}
tx4 = self.nodes[1].createrawtransaction(inputs, outputs)
tx4_signed = self.nodes[1].signrawtransactionwithwallet(tx4)["hex"]
txid_4 = self.nodes[1].sendrawtransaction(tx4_signed)
assert not is_opt_in(self.nodes[1], txid_4)
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "yes"})
self.sync_mempools()
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "yes"})
# Replace tx3, and check that tx4 becomes unknown
tx3_b = tx3_modified
tx3_b.vout[0].nValue -= int(Decimal("0.004") * COIN) # bump the fee
tx3_b = tx3_b.serialize().hex()
tx3_b_signed = self.nodes[0].signrawtransactionwithwallet(tx3_b)['hex']
txid_3b = self.nodes[0].sendrawtransaction(tx3_b_signed, 0)
assert is_opt_in(self.nodes[0], txid_3b)
assert_array_result(self.nodes[0].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "unknown"})
self.sync_mempools()
assert_array_result(self.nodes[1].listtransactions(), {"txid": txid_4}, {"bip125-replaceable": "unknown"})
# Check gettransaction as well:
for n in self.nodes[0:2]:
assert_equal(n.gettransaction(txid_1)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_2)["bip125-replaceable"], "no")
assert_equal(n.gettransaction(txid_3)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_3b)["bip125-replaceable"], "yes")
assert_equal(n.gettransaction(txid_4)["bip125-replaceable"], "unknown")
# After mining a transaction, it's no longer BIP125-replaceable
self.nodes[0].generate(1)
assert txid_3b not in self.nodes[0].getrawmempool()
assert_equal(self.nodes[0].gettransaction(txid_3b)["bip125-replaceable"], "no")
assert_equal(self.nodes[0].gettransaction(txid_4)["bip125-replaceable"], "unknown")
if __name__ == '__main__':
ListTransactionsTest().main()
|
|
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import weakref
from neutron_lib.db import api
from neutron_lib.db import model_base
from neutron_lib import exceptions
from neutron_lib.objects import exceptions as obj_exc
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils import excutils
from osprofiler import opts as profiler_opts
import osprofiler.sqlalchemy
from pecan import util as p_util
import six
import sqlalchemy
from sqlalchemy import event # noqa
from sqlalchemy import orm
from sqlalchemy.orm import exc
def set_hook(engine):
if (profiler_opts.is_trace_enabled() and
profiler_opts.is_db_trace_enabled()):
osprofiler.sqlalchemy.add_tracing(sqlalchemy, engine, 'neutron.db')
context_manager = api.get_context_manager()
# TODO(ihrachys) the hook assumes options defined by osprofiler, and the only
# public function that is provided by osprofiler that will register them is
# set_defaults, that's why we call it here even though we don't need to change
# defaults
profiler_opts.set_defaults(cfg.CONF)
context_manager.append_on_engine_create(set_hook)
MAX_RETRIES = 10
LOG = logging.getLogger(__name__)
def is_retriable(e):
if getattr(e, '_RETRY_EXCEEDED', False):
return False
if _is_nested_instance(e, (db_exc.DBDeadlock, exc.StaleDataError,
db_exc.DBConnectionError,
db_exc.DBDuplicateEntry, db_exc.RetryRequest,
obj_exc.NeutronDbObjectDuplicateEntry)):
return True
# looking savepoints mangled by deadlocks. see bug/1590298 for details.
return _is_nested_instance(e, db_exc.DBError) and '1305' in str(e)
def _tag_retriables_as_unretriable(f):
"""Puts a flag on retriable exceptions so is_retriable returns False.
This decorator can be used outside of a retry decorator to prevent
decorators higher up from retrying again.
"""
@six.wraps(f)
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
with excutils.save_and_reraise_exception():
if is_retriable(e):
setattr(e, '_RETRY_EXCEEDED', True)
return wrapped
def _copy_if_lds(item):
"""Deepcopy lists/dicts/sets, leave everything else alone."""
return copy.deepcopy(item) if isinstance(item, (list, dict, set)) else item
def retry_if_session_inactive(context_var_name='context'):
"""Retries only if the session in the context is inactive.
Calls a retry_db_errors wrapped version of the function if the context's
session passed in is inactive, otherwise it just calls the function
directly. This is useful to avoid retrying things inside of a transaction
which is ineffective for DB races/errors.
This should be used in all cases where retries are desired and the method
accepts a context.
"""
def decorator(f):
try:
# NOTE(kevinbenton): we use pecan's util function here because it
# deals with the horrors of finding args of already decorated
# functions
ctx_arg_index = p_util.getargspec(f).args.index(context_var_name)
except ValueError:
raise RuntimeError("Could not find position of var %s" %
context_var_name)
f_with_retry = api.retry_db_errors(f)
@six.wraps(f)
def wrapped(*args, **kwargs):
# only use retry wrapper if we aren't nested in an active
# transaction
if context_var_name in kwargs:
context = kwargs[context_var_name]
else:
context = args[ctx_arg_index]
method = f if context.session.is_active else f_with_retry
return method(*args, **kwargs)
return wrapped
return decorator
def _is_nested_instance(e, etypes):
"""Check if exception or its inner excepts are an instance of etypes."""
if isinstance(e, etypes):
return True
if isinstance(e, exceptions.MultipleExceptions):
return any(_is_nested_instance(i, etypes) for i in e.inner_exceptions)
if isinstance(e, db_exc.DBError):
return _is_nested_instance(e.inner_exception, etypes)
return False
@event.listens_for(orm.session.Session, "after_flush")
def add_to_rel_load_list(session, flush_context=None):
# keep track of new items to load relationships on during commit
session.info.setdefault('_load_rels', weakref.WeakSet()).update(
session.new)
@event.listens_for(orm.session.Session, "before_commit")
def load_one_to_manys(session):
# TODO(kevinbenton): we should be able to remove this after we
# have eliminated all places where related objects are constructed
# using a key rather than a relationship.
# capture any new objects
if session.new:
session.flush()
if session.transaction.nested:
# wait until final commit
return
for new_object in session.info.pop('_load_rels', []):
if new_object not in session:
# don't load detached objects because that brings them back into
# session
continue
state = sqlalchemy.inspect(new_object)
# set up relationship loading so that we can call lazy
# loaders on the object even though the ".key" is not set up yet
# (normally happens by in after_flush_postexec, but we're trying
# to do this more succinctly). in this context this is only
# setting a simple flag on the object's state.
session.enable_relationship_loading(new_object)
# look for eager relationships and do normal load.
# For relationships where the related object is also
# in the session these lazy loads will pull from the
# identity map and not emit SELECT. Otherwise, we are still
# local in the transaction so a normal SELECT load will work fine.
for relationship_attr in state.mapper.relationships:
if relationship_attr.lazy not in ('joined', 'subquery'):
# we only want to automatically load relationships that would
# automatically load during a lookup operation
continue
if relationship_attr.key not in state.dict:
getattr(new_object, relationship_attr.key)
if relationship_attr.key not in state.dict:
msg = ("Relationship %s attributes must be loaded in db"
" object %s" % (relationship_attr.key, state.dict))
raise AssertionError(msg)
# Expire relationships when foreign key changes.
#
# NOTE(ihrachys) Arguably, it's a sqlalchemy anti-pattern to access child
# models directly and through parent relationships in the same session. But
# since OVO mechanism is built around synthetic fields that assume this mixed
# access is possible, we keep it here until we find a way to migrate OVO
# synthetic fields to better mechanism that would update child models via
# parents. Even with that, there are multiple places in plugin code where we
# mix access when using models directly; those occurrences would need to be
# fixed too to be able to remove this hook and explicit expire() calls.
#
# Adopted from the following recipe:
# https://bitbucket.org/zzzeek/sqlalchemy/wiki/UsageRecipes
# /ExpireRelationshipOnFKChange
#
# ...then massively changed to actually work for all neutron backref cases.
#
# TODO(ihrachys) at some point these event handlers should be extended to also
# automatically refresh values for expired attributes
def expire_for_fk_change(target, fk_value, relationship_prop, column_attr):
"""Expire relationship attributes when a many-to-one column changes."""
sess = orm.object_session(target)
# subnets and network's many-to-one relationship is used as example in the
# comments in this function
if sess is not None:
# optional behavior #1 - expire the "Network.subnets"
# collection on the existing "network" object
if relationship_prop.back_populates and \
relationship_prop.key in target.__dict__:
obj = getattr(target, relationship_prop.key)
if obj is not None and sqlalchemy.inspect(obj).persistent:
sess.expire(obj, [relationship_prop.back_populates])
# optional behavior #2 - expire the "Subnet.network"
if sqlalchemy.inspect(target).persistent:
sess.expire(target, [relationship_prop.key])
# optional behavior #3 - "trick" the ORM by actually
# setting the value ahead of time, then emitting a load
# for the attribute so that the *new* Subnet.network
# is loaded. Then, expire Network.subnets on *that*.
# Other techniques here including looking in the identity
# map for "value", if this is a simple many-to-one get.
if relationship_prop.back_populates:
target.__dict__[column_attr] = fk_value
new = getattr(target, relationship_prop.key)
if new is not None:
if sqlalchemy.inspect(new).persistent:
sess.expire(new, [relationship_prop.back_populates])
else:
# no Session yet, do it later. This path is reached from the 'expire'
# listener setup by '_expire_prop_on_col' below, when a foreign key
# is directly assigned to in the many to one side of a relationship.
# i.e. assigning directly to Subnet.network_id before Subnet is added
# to the session
if target not in _emit_on_pending:
_emit_on_pending[target] = []
_emit_on_pending[target].append(
(fk_value, relationship_prop, column_attr))
_emit_on_pending = weakref.WeakKeyDictionary()
@event.listens_for(orm.session.Session, "pending_to_persistent")
def _pending_callables(session, obj):
"""Expire relationships when a new object w/ a foreign key becomes
persistent
"""
if obj is None:
return
args = _emit_on_pending.pop(obj, [])
for a in args:
if a is not None:
expire_for_fk_change(obj, *a)
@event.listens_for(orm.session.Session, "persistent_to_deleted")
def _persistent_to_deleted(session, obj):
"""Expire relationships when an object w/ a foreign key becomes deleted"""
mapper = sqlalchemy.inspect(obj).mapper
for prop in mapper.relationships:
if prop.direction is orm.interfaces.MANYTOONE:
for col in prop.local_columns:
colkey = mapper.get_property_by_column(col).key
expire_for_fk_change(obj, None, prop, colkey)
@event.listens_for(model_base.BASEV2, "attribute_instrument", propagate=True)
def _listen_for_changes(cls, key, inst):
mapper = sqlalchemy.inspect(cls)
if key not in mapper.relationships:
return
prop = inst.property
if prop.direction is orm.interfaces.MANYTOONE:
for col in prop.local_columns:
colkey = mapper.get_property_by_column(col).key
_expire_prop_on_col(cls, prop, colkey)
elif prop.direction is orm.interfaces.ONETOMANY:
remote_mapper = prop.mapper
# the collection *has* to have a MANYTOONE backref so we
# can look up the parent. so here we make one if it doesn't
# have it already, as is the case in this example
if not prop.back_populates:
name = "_%s_backref" % prop.key
backref_prop = orm.relationship(
prop.parent, back_populates=prop.key)
remote_mapper.add_property(name, backref_prop)
prop.back_populates = name
def _expire_prop_on_col(cls, prop, colkey):
@event.listens_for(getattr(cls, colkey), "set")
def expire(target, value, oldvalue, initiator):
"""Expire relationships when the foreign key attribute on
an object changes
"""
expire_for_fk_change(target, value, prop, colkey)
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ..core.enums import HorizontalLocation, MarkerType, VerticalLocation
from ..core.properties import (
Any,
Auto,
Either,
Enum,
Instance,
Int,
List,
Seq,
String,
Tuple,
)
from ..models import ColumnDataSource, GraphRenderer, Plot, Title, Tool, glyphs, markers
from ..models.tools import Drag, Inspection, Scroll, Tap
from ..transform import linear_cmap
from ..util.options import Options
from ._decorators import glyph_method
from ._graph import get_graph_kwargs
from ._plot import get_range, get_scale, process_axis_and_grid
from ._stack import double_stack, single_stack
from ._tools import process_active_tools, process_tools_arg
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
DEFAULT_TOOLS = "pan,wheel_zoom,box_zoom,save,reset,help"
__all__ = (
'Figure',
'figure',
'markers'
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class Figure(Plot):
''' Create a new Figure for plotting.
A subclass of :class:`~bokeh.models.plots.Plot` that simplifies plot
creation with default axes, grids, tools, etc.
Figure objects have many glyph methods that can be used to draw
vectorized graphical glyphs:
.. hlist::
:columns: 3
* :func:`~bokeh.plotting.figure.Figure.annular_wedge`
* :func:`~bokeh.plotting.figure.Figure.annulus`
* :func:`~bokeh.plotting.figure.Figure.arc`
* :func:`~bokeh.plotting.figure.Figure.asterisk`
* :func:`~bokeh.plotting.figure.Figure.bezier`
* :func:`~bokeh.plotting.figure.Figure.circle`
* :func:`~bokeh.plotting.figure.Figure.circle_cross`
* :func:`~bokeh.plotting.figure.Figure.circle_dot`
* :func:`~bokeh.plotting.figure.Figure.circle_x`
* :func:`~bokeh.plotting.figure.Figure.circle_y`
* :func:`~bokeh.plotting.figure.Figure.cross`
* :func:`~bokeh.plotting.figure.Figure.dash`
* :func:`~bokeh.plotting.figure.Figure.diamond`
* :func:`~bokeh.plotting.figure.Figure.diamond_cross`
* :func:`~bokeh.plotting.figure.Figure.diamond_dot`
* :func:`~bokeh.plotting.figure.Figure.dot`
* :func:`~bokeh.plotting.figure.Figure.ellipse`
* :func:`~bokeh.plotting.figure.Figure.harea`
* :func:`~bokeh.plotting.figure.Figure.hbar`
* :func:`~bokeh.plotting.figure.Figure.hex`
* :func:`~bokeh.plotting.figure.Figure.hex_tile`
* :func:`~bokeh.plotting.figure.Figure.image`
* :func:`~bokeh.plotting.figure.Figure.image_rgba`
* :func:`~bokeh.plotting.figure.Figure.image_url`
* :func:`~bokeh.plotting.figure.Figure.inverted_triangle`
* :func:`~bokeh.plotting.figure.Figure.line`
* :func:`~bokeh.plotting.figure.Figure.multi_line`
* :func:`~bokeh.plotting.figure.Figure.multi_polygons`
* :func:`~bokeh.plotting.figure.Figure.oval`
* :func:`~bokeh.plotting.figure.Figure.patch`
* :func:`~bokeh.plotting.figure.Figure.patches`
* :func:`~bokeh.plotting.figure.Figure.plus`
* :func:`~bokeh.plotting.figure.Figure.quad`
* :func:`~bokeh.plotting.figure.Figure.quadratic`
* :func:`~bokeh.plotting.figure.Figure.ray`
* :func:`~bokeh.plotting.figure.Figure.rect`
* :func:`~bokeh.plotting.figure.Figure.segment`
* :func:`~bokeh.plotting.figure.Figure.square`
* :func:`~bokeh.plotting.figure.Figure.square_cross`
* :func:`~bokeh.plotting.figure.Figure.square_dot`
* :func:`~bokeh.plotting.figure.Figure.square_pin`
* :func:`~bokeh.plotting.figure.Figure.square_x`
* :func:`~bokeh.plotting.figure.Figure.step`
* :func:`~bokeh.plotting.figure.Figure.text`
* :func:`~bokeh.plotting.figure.Figure.triangle`
* :func:`~bokeh.plotting.figure.Figure.triangle_dot`
* :func:`~bokeh.plotting.figure.Figure.triangle_pin`
* :func:`~bokeh.plotting.figure.Figure.varea`
* :func:`~bokeh.plotting.figure.Figure.vbar`
* :func:`~bokeh.plotting.figure.Figure.wedge`
* :func:`~bokeh.plotting.figure.Figure.x`
* :func:`~bokeh.plotting.figure.Figure.y`
There is a scatter function that can be parameterized by marker type:
* :func:`~bokeh.plotting.figure.Figure.scatter`
There are also specialized methods for stacking bars:
* bars: :func:`~bokeh.plotting.figure.Figure.hbar_stack`, :func:`~bokeh.plotting.figure.Figure.vbar_stack`
* lines: :func:`~bokeh.plotting.figure.Figure.hline_stack`, :func:`~bokeh.plotting.figure.Figure.vline_stack`
* areas: :func:`~bokeh.plotting.figure.Figure.harea_stack`, :func:`~bokeh.plotting.figure.Figure.varea_stack`
As well as one specialized method for making simple hexbin plots:
* :func:`~bokeh.plotting.figure.Figure.hexbin`
In addition to all the ``Figure`` property attributes, the following
options are also accepted:
.. bokeh-options:: FigureOptions
:module: bokeh.plotting.figure
'''
__subtype__ = "Figure"
__view_model__ = "Plot"
def __init__(self, *arg, **kw):
if 'plot_width' in kw and 'width' in kw:
raise ValueError("Figure called with both 'plot_width' and 'width' supplied, supply only one")
if 'plot_height' in kw and 'height' in kw:
raise ValueError("Figure called with both 'plot_height' and 'height' supplied, supply only one")
if 'height' in kw:
kw['plot_height'] = kw.pop('height')
if 'width' in kw:
kw['plot_width'] = kw.pop('width')
opts = FigureOptions(kw)
title = kw.get("title", None)
if isinstance(title, str):
kw['title'] = Title(text=title)
super().__init__(*arg, **kw)
self.x_range = get_range(opts.x_range)
self.y_range = get_range(opts.y_range)
self.x_scale = get_scale(self.x_range, opts.x_axis_type)
self.y_scale = get_scale(self.y_range, opts.y_axis_type)
process_axis_and_grid(self, opts.x_axis_type, opts.x_axis_location, opts.x_minor_ticks, opts.x_axis_label, self.x_range, 0)
process_axis_and_grid(self, opts.y_axis_type, opts.y_axis_location, opts.y_minor_ticks, opts.y_axis_label, self.y_range, 1)
tool_objs, tool_map = process_tools_arg(self, opts.tools, opts.tooltips)
self.add_tools(*tool_objs)
process_active_tools(self.toolbar, tool_map, opts.active_drag, opts.active_inspect, opts.active_scroll, opts.active_tap)
@glyph_method(glyphs.AnnularWedge)
def annular_wedge(self, **kwargs):
pass
@glyph_method(glyphs.Annulus)
def annulus(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.annulus(x=[1, 2, 3], y=[1, 2, 3], color="#7FC97F",
inner_radius=0.2, outer_radius=0.5)
show(plot)
"""
@glyph_method(glyphs.Arc)
def arc(self, **kwargs):
pass
@glyph_method(markers.Asterisk)
def asterisk(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.asterisk(x=[1,2,3], y=[1,2,3], size=20, color="#F0027F")
show(plot)
"""
@glyph_method(glyphs.Bezier)
def bezier(self, **kwargs):
pass
@glyph_method(markers.Circle)
def circle(self, **kwargs):
"""
.. note::
Only one of ``size`` or ``radius`` should be provided. Note that ``radius``
defaults to data units.
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.circle(x=[1, 2, 3], y=[1, 2, 3], size=20)
show(plot)
"""
@glyph_method(markers.CircleCross)
def circle_cross(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.circle_cross(x=[1,2,3], y=[4,5,6], size=20,
color="#FB8072", fill_alpha=0.2, line_width=2)
show(plot)
"""
@glyph_method(markers.CircleCross)
def circle_dot(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.circle_dot(x=[1,2,3], y=[4,5,6], size=20,
color="#FB8072", fill_color=None)
show(plot)
"""
@glyph_method(markers.CircleX)
def circle_x(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.circle_x(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#DD1C77", fill_alpha=0.2)
show(plot)
"""
@glyph_method(markers.CircleX)
def circle_y(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.circle_y(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#DD1C77", fill_alpha=0.2)
show(plot)
"""
@glyph_method(markers.Cross)
def cross(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.cross(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#E6550D", line_width=2)
show(plot)
"""
@glyph_method(markers.Dash)
def dash(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.dash(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
"""
@glyph_method(markers.Diamond)
def diamond(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.diamond(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#1C9099", line_width=2)
show(plot)
"""
@glyph_method(markers.DiamondCross)
def diamond_cross(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.diamond_cross(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
"""
@glyph_method(markers.DiamondDot)
def diamond_dot(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.diamond_dot(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#386CB0", fill_color=None)
show(plot)
"""
@glyph_method(markers.Dot)
def dot(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.dot(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#386CB0")
show(plot)
"""
@glyph_method(glyphs.HArea)
def harea(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.harea(x1=[0, 0, 0], x2=[1, 4, 2], y=[1, 2, 3],
fill_color="#99D594")
show(plot)
"""
@glyph_method(glyphs.HBar)
def hbar(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.hbar(y=[1, 2, 3], height=0.5, left=0, right=[1,2,3], color="#CAB2D6")
show(plot)
"""
@glyph_method(glyphs.Ellipse)
def ellipse(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.ellipse(x=[1, 2, 3], y=[1, 2, 3], width=30, height=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
"""
@glyph_method(markers.Hex)
def hex(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.hex(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30], color="#74ADD1")
show(plot)
"""
@glyph_method(markers.Hex)
def hex_dot(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.hex_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30],
color="#74ADD1", fill_color=None)
show(plot)
"""
@glyph_method(glyphs.HexTile)
def hex_tile(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300, match_aspect=True)
plot.hex_tile(r=[0, 0, 1], q=[1, 2, 2], fill_color="#74ADD1")
show(plot)
"""
@glyph_method(glyphs.Image)
def image(self, **kwargs):
"""
.. note::
If both ``palette`` and ``color_mapper`` are passed, a ``ValueError``
exception will be raised. If neither is passed, then the ``Greys9``
palette will be used as a default.
"""
@glyph_method(glyphs.ImageRGBA)
def image_rgba(self, **kwargs):
"""
.. note::
The ``image_rgba`` method accepts images as a two-dimensional array of RGBA
values (encoded as 32-bit integers).
"""
@glyph_method(glyphs.ImageURL)
def image_url(self, **kwargs):
pass
@glyph_method(markers.InvertedTriangle)
def inverted_triangle(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.inverted_triangle(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")
show(plot)
"""
@glyph_method(glyphs.Line)
def line(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
p = figure(title="line", plot_width=300, plot_height=300)
p.line(x=[1, 2, 3, 4, 5], y=[6, 7, 2, 4, 5])
show(p)
"""
@glyph_method(glyphs.MultiLine)
def multi_line(self, **kwargs):
"""
.. note::
For this glyph, the data is not simply an array of scalars, it is an
"array of arrays".
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
p = figure(plot_width=300, plot_height=300)
p.multi_line(xs=[[1, 2, 3], [2, 3, 4]], ys=[[6, 7, 2], [4, 5, 7]],
color=['red','green'])
show(p)
"""
@glyph_method(glyphs.MultiPolygons)
def multi_polygons(self, **kwargs):
"""
.. note::
For this glyph, the data is not simply an array of scalars, it is a
nested array.
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
p = figure(plot_width=300, plot_height=300)
p.multi_polygons(xs=[[[[1, 1, 2, 2]]], [[[1, 1, 3], [1.5, 1.5, 2]]]],
ys=[[[[4, 3, 3, 4]]], [[[1, 3, 1], [1.5, 2, 1.5]]]],
color=['red', 'green'])
show(p)
"""
@glyph_method(glyphs.Oval)
def oval(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.oval(x=[1, 2, 3], y=[1, 2, 3], width=0.2, height=0.4,
angle=-0.7, color="#1D91C0")
show(plot)
"""
@glyph_method(glyphs.Patch)
def patch(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
p = figure(plot_width=300, plot_height=300)
p.patch(x=[1, 2, 3, 2], y=[6, 7, 2, 2], color="#99d8c9")
show(p)
"""
@glyph_method(glyphs.Patches)
def patches(self, **kwargs):
"""
.. note::
For this glyph, the data is not simply an array of scalars, it is an
"array of arrays".
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
p = figure(plot_width=300, plot_height=300)
p.patches(xs=[[1,2,3],[4,5,6,5]], ys=[[1,2,1],[4,5,5,4]],
color=["#43a2ca", "#a8ddb5"])
show(p)
"""
@glyph_method(markers.Plus)
def plus(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.plus(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")
show(plot)
"""
@glyph_method(glyphs.Quad)
def quad(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.quad(top=[2, 3, 4], bottom=[1, 2, 3], left=[1, 2, 3],
right=[1.2, 2.5, 3.7], color="#B3DE69")
show(plot)
"""
@glyph_method(glyphs.Quadratic)
def quadratic(self, **kwargs):
pass
@glyph_method(glyphs.Ray)
def ray(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.ray(x=[1, 2, 3], y=[1, 2, 3], length=45, angle=-0.7, color="#FB8072",
line_width=2)
show(plot)
"""
@glyph_method(glyphs.Rect)
def rect(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.rect(x=[1, 2, 3], y=[1, 2, 3], width=10, height=20, color="#CAB2D6",
width_units="screen", height_units="screen")
show(plot)
"""
@glyph_method(glyphs.Step)
def step(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.step(x=[1, 2, 3, 4, 5], y=[1, 2, 3, 2, 5], color="#FB8072")
show(plot)
"""
@glyph_method(glyphs.Segment)
def segment(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.segment(x0=[1, 2, 3], y0=[1, 2, 3],
x1=[1, 2, 3], y1=[1.2, 2.5, 3.7],
color="#F4A582", line_width=3)
show(plot)
"""
@glyph_method(markers.Square)
def square(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.square(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30], color="#74ADD1")
show(plot)
"""
@glyph_method(markers.SquareCross)
def square_cross(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.square_cross(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F",fill_color=None, line_width=2)
show(plot)
"""
@glyph_method(markers.SquareDot)
def square_dot(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.square_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F", fill_color=None)
show(plot)
"""
@glyph_method(markers.SquarePin)
def square_pin(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.square_pin(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F",fill_color=None, line_width=2)
show(plot)
"""
@glyph_method(markers.SquareX)
def square_x(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.square_x(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#FDAE6B",fill_color=None, line_width=2)
show(plot)
"""
@glyph_method(glyphs.Text)
def text(self, **kwargs):
"""
.. note::
The location and angle of the text relative to the ``x``, ``y`` coordinates
is indicated by the alignment and baseline text properties.
"""
@glyph_method(markers.Triangle)
def triangle(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.triangle(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
"""
@glyph_method(markers.TriangleDot)
def triangle_dot(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.triangle_dot(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", fill_color=None)
show(plot)
"""
@glyph_method(markers.TrianglePin)
def triangle_pin(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.triangle_pin(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
"""
@glyph_method(glyphs.VArea)
def varea(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.varea(x=[1, 2, 3], y1=[0, 0, 0], y2=[1, 4, 2],
fill_color="#99D594")
show(plot)
"""
@glyph_method(glyphs.VBar)
def vbar(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.vbar(x=[1, 2, 3], width=0.5, bottom=0, top=[1,2,3], color="#CAB2D6")
show(plot)
"""
@glyph_method(glyphs.Wedge)
def wedge(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.wedge(x=[1, 2, 3], y=[1, 2, 3], radius=15, start_angle=0.6,
end_angle=4.1, radius_units="screen", color="#2b8cbe")
show(plot)
"""
@glyph_method(markers.X)
def x(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.x(x=[1, 2, 3], y=[1, 2, 3], size=[10, 20, 25], color="#fa9fb5")
show(plot)
"""
@glyph_method(markers.Y)
def y(self, **kwargs):
"""
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(plot_width=300, plot_height=300)
plot.y(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")
show(plot)
"""
# -------------------------------------------------------------------------
@glyph_method(markers.Scatter)
def _scatter(self, **kwargs):
pass
def scatter(self, *args, **kwargs):
''' Creates a scatter plot of the given x and y items.
Args:
x (str or seq[float]) : values or field names of center x coordinates
y (str or seq[float]) : values or field names of center y coordinates
size (str or list[float]) : values or field names of sizes in screen units
marker (str, or list[str]): values or field names of marker types
color (color value, optional): shorthand to set both fill and line color
source (:class:`~bokeh.models.sources.ColumnDataSource`) : a user-supplied data source.
An attempt will be made to convert the object to :class:`~bokeh.models.sources.ColumnDataSource`
if needed. If none is supplied, one is created for the user automatically.
**kwargs: :ref:`userguide_styling_line_properties` and :ref:`userguide_styling_fill_properties`
Examples:
>>> p.scatter([1,2,3],[4,5,6], marker="square", fill_color="red")
>>> p.scatter("data1", "data2", marker="mtype", source=data_source, ...)
.. note::
When passing ``marker="circle"`` it is also possible to supply a
``radius`` value in data-space units. When configuring marker type
from a data source column, *all* markers including circles may only
be configured with ``size`` in screen units.
'''
marker_type = kwargs.pop("marker", "circle")
if isinstance(marker_type, str) and marker_type in _MARKER_SHORTCUTS:
marker_type = _MARKER_SHORTCUTS[marker_type]
# The original scatter implementation allowed circle scatters to set a
# radius. We will leave this here for compatibility but note that it
# only works when the marker type is "circle" (and not referencing a
# data source column). Consider deprecating in the future.
if marker_type == "circle" and "radius" in kwargs:
return self.circle(*args, **kwargs)
else:
return self._scatter(*args, marker=marker_type, **kwargs)
def hexbin(self, x, y, size, orientation="pointytop", palette="Viridis256", line_color=None, fill_color=None, aspect_scale=1, **kwargs):
''' Perform a simple equal-weight hexagonal binning.
A :class:`~bokeh.models.glyphs.HexTile` glyph will be added to display
the binning. The :class:`~bokeh.models.sources.ColumnDataSource` for
the glyph will have columns ``q``, ``r``, and ``count``, where ``q``
and ``r`` are `axial coordinates`_ for a tile, and ``count`` is the
associated bin count.
It is often useful to set ``match_aspect=True`` on the associated plot,
so that hexagonal tiles are all regular (i.e. not "stretched") in
screen space.
For more sophisticated use-cases, e.g. weighted binning or individually
scaling hex tiles, use :func:`hex_tile` directly, or consider a higher
level library such as HoloViews.
Args:
x (array[float]) :
A NumPy array of x-coordinates to bin into hexagonal tiles.
y (array[float]) :
A NumPy array of y-coordinates to bin into hexagonal tiles
size (float) :
The size of the hexagonal tiling to use. The size is defined as
distance from the center of a hexagon to a corner.
In case the aspect scaling is not 1-1, then specifically `size`
is the distance from the center to the "top" corner with the
`"pointytop"` orientation, and the distance from the center to
a "side" corner with the "flattop" orientation.
orientation ("pointytop" or "flattop", optional) :
Whether the hexagonal tiles should be oriented with a pointed
corner on top, or a flat side on top. (default: "pointytop")
palette (str or seq[color], optional) :
A palette (or palette name) to use to colormap the bins according
to count. (default: 'Viridis256')
If ``fill_color`` is supplied, it overrides this value.
line_color (color, optional) :
The outline color for hex tiles, or None (default: None)
fill_color (color, optional) :
An optional fill color for hex tiles, or None. If None, then
the ``palette`` will be used to color map the tiles by
count. (default: None)
aspect_scale (float) :
Match a plot's aspect ratio scaling.
When working with a plot with ``aspect_scale != 1``, this
parameter can be set to match the plot, in order to draw
regular hexagons (instead of "stretched" ones).
This is roughly equivalent to binning in "screen space", and
it may be better to use axis-aligned rectangular bins when
plot aspect scales are not one.
Any additional keyword arguments are passed to :func:`hex_tile`.
Returns
(Glyphrender, DataFrame)
A tuple with the ``HexTile`` renderer generated to display the
binning, and a Pandas ``DataFrame`` with columns ``q``, ``r``,
and ``count``, where ``q`` and ``r`` are `axial coordinates`_
for a tile, and ``count`` is the associated bin count.
Example:
.. bokeh-plot::
:source-position: above
import numpy as np
from bokeh.models import HoverTool
from bokeh.plotting import figure, show
x = 2 + 2*np.random.standard_normal(500)
y = 2 + 2*np.random.standard_normal(500)
p = figure(match_aspect=True, tools="wheel_zoom,reset")
p.background_fill_color = '#440154'
p.grid.visible = False
p.hexbin(x, y, size=0.5, hover_color="pink", hover_alpha=0.8)
hover = HoverTool(tooltips=[("count", "@c"), ("(q,r)", "(@q, @r)")])
p.add_tools(hover)
show(p)
.. _axial coordinates: https://www.redblobgames.com/grids/hexagons/#coordinates-axial
'''
from ..util.hex import hexbin
bins = hexbin(x, y, size, orientation, aspect_scale=aspect_scale)
if fill_color is None:
fill_color = linear_cmap('c', palette, 0, max(bins.counts))
source = ColumnDataSource(data=dict(q=bins.q, r=bins.r, c=bins.counts))
r = self.hex_tile(q="q", r="r", size=size, orientation=orientation, aspect_scale=aspect_scale,
source=source, line_color=line_color, fill_color=fill_color, **kwargs)
return (r, bins)
def harea_stack(self, stackers, **kw):
''' Generate multiple ``HArea`` renderers for levels stacked left
to right.
Args:
stackers (seq[str]) : a list of data source field names to stack
successively for ``x1`` and ``x2`` harea coordinates.
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``harea``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``harea_stack`` will
will create two ``HArea`` renderers that stack:
.. code-block:: python
p.harea_stack(['2016', '2017'], y='y', color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.harea(x1=stack(), x2=stack('2016'), y='y', color='blue', source=source, name='2016')
p.harea(x1=stack('2016'), x2=stack('2016', '2017'), y='y', color='red', source=source, name='2017')
'''
result = []
for kw in double_stack(stackers, "x1", "x2", **kw):
result.append(self.harea(**kw))
return result
def hbar_stack(self, stackers, **kw):
''' Generate multiple ``HBar`` renderers for levels stacked left to right.
Args:
stackers (seq[str]) : a list of data source field names to stack
successively for ``left`` and ``right`` bar coordinates.
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``hbar``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``hbar_stack`` will
will create two ``HBar`` renderers that stack:
.. code-block:: python
p.hbar_stack(['2016', '2017'], x=10, width=0.9, color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.hbar(bottom=stack(), top=stack('2016'), x=10, width=0.9, color='blue', source=source, name='2016')
p.hbar(bottom=stack('2016'), top=stack('2016', '2017'), x=10, width=0.9, color='red', source=source, name='2017')
'''
result = []
for kw in double_stack(stackers, "left", "right", **kw):
result.append(self.hbar(**kw))
return result
def _line_stack(self, x, y, **kw):
''' Generate multiple ``Line`` renderers for lines stacked vertically
or horizontally.
Args:
x (seq[str]) :
y (seq[str]) :
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``hbar``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``line_stack`` with
stackers for the y-coordinates will will create two ``Line``
renderers that stack:
.. code-block:: python
p.line_stack(['2016', '2017'], x='x', color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.line(y=stack('2016'), x='x', color='blue', source=source, name='2016')
p.line(y=stack('2016', '2017'), x='x', color='red', source=source, name='2017')
'''
if all(isinstance(val, (list, tuple)) for val in (x,y)):
raise ValueError("Only one of x or y may be a list of stackers")
result = []
if isinstance(y, (list, tuple)):
kw['x'] = x
for kw in single_stack(y, "y", **kw):
result.append(self.line(**kw))
return result
if isinstance(x, (list, tuple)):
kw['y'] = y
for kw in single_stack(x, "x", **kw):
result.append(self.line(**kw))
return result
return [self.line(x, y, **kw)]
def hline_stack(self, stackers, **kw):
''' Generate multiple ``Line`` renderers for lines stacked horizontally.
Args:
stackers (seq[str]) : a list of data source field names to stack
successively for ``x`` line coordinates.
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``line``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``hline_stack`` with
stackers for the x-coordinates will will create two ``Line``
renderers that stack:
.. code-block:: python
p.hline_stack(['2016', '2017'], y='y', color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.line(x=stack('2016'), y='y', color='blue', source=source, name='2016')
p.line(x=stack('2016', '2017'), y='y', color='red', source=source, name='2017')
'''
return self._line_stack(x=stackers, **kw)
def varea_stack(self, stackers, **kw):
''' Generate multiple ``VArea`` renderers for levels stacked bottom
to top.
Args:
stackers (seq[str]) : a list of data source field names to stack
successively for ``y1`` and ``y1`` varea coordinates.
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``varea``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``varea_stack`` will
will create two ``VArea`` renderers that stack:
.. code-block:: python
p.varea_stack(['2016', '2017'], x='x', color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.varea(y1=stack(), y2=stack('2016'), x='x', color='blue', source=source, name='2016')
p.varea(y1=stack('2016'), y2=stack('2016', '2017'), x='x', color='red', source=source, name='2017')
'''
result = []
for kw in double_stack(stackers, "y1", "y2", **kw):
result.append(self.varea(**kw))
return result
def vbar_stack(self, stackers, **kw):
''' Generate multiple ``VBar`` renderers for levels stacked bottom
to top.
Args:
stackers (seq[str]) : a list of data source field names to stack
successively for ``left`` and ``right`` bar coordinates.
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``vbar``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``vbar_stack`` will
will create two ``VBar`` renderers that stack:
.. code-block:: python
p.vbar_stack(['2016', '2017'], x=10, width=0.9, color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.vbar(bottom=stack(), top=stack('2016'), x=10, width=0.9, color='blue', source=source, name='2016')
p.vbar(bottom=stack('2016'), top=stack('2016', '2017'), x=10, width=0.9, color='red', source=source, name='2017')
'''
result = []
for kw in double_stack(stackers, "bottom", "top", **kw):
result.append(self.vbar(**kw))
return result
def vline_stack(self, stackers, **kw):
''' Generate multiple ``Line`` renderers for lines stacked vertically.
Args:
stackers (seq[str]) : a list of data source field names to stack
successively for ``y`` line coordinates.
Additionally, the ``name`` of the renderer will be set to
the value of each successive stacker (this is useful with the
special hover variable ``$name``)
Any additional keyword arguments are passed to each call to ``line``.
If a keyword value is a list or tuple, then each call will get one
value from the sequence.
Returns:
list[GlyphRenderer]
Examples:
Assuming a ``ColumnDataSource`` named ``source`` with columns
*2016* and *2017*, then the following call to ``vline_stack`` with
stackers for the y-coordinates will will create two ``Line``
renderers that stack:
.. code-block:: python
p.vline_stack(['2016', '2017'], x='x', color=['blue', 'red'], source=source)
This is equivalent to the following two separate calls:
.. code-block:: python
p.line(y=stack('2016'), x='x', color='blue', source=source, name='2016')
p.line(y=stack('2016', '2017'), x='x', color='red', source=source, name='2017')
'''
return self._line_stack(y=stackers, **kw)
def graph(self, node_source, edge_source, layout_provider, **kwargs):
''' Creates a network graph using the given node, edge and layout provider.
Args:
node_source (:class:`~bokeh.models.sources.ColumnDataSource`) : a user-supplied data source
for the graph nodes. An attempt will be made to convert the object to
:class:`~bokeh.models.sources.ColumnDataSource` if needed. If none is supplied, one is created
for the user automatically.
edge_source (:class:`~bokeh.models.sources.ColumnDataSource`) : a user-supplied data source
for the graph edges. An attempt will be made to convert the object to
:class:`~bokeh.models.sources.ColumnDataSource` if needed. If none is supplied, one is created
for the user automatically.
layout_provider (:class:`~bokeh.models.graphs.LayoutProvider`) : a ``LayoutProvider`` instance to
provide the graph coordinates in Cartesian space.
**kwargs: :ref:`userguide_styling_line_properties` and :ref:`userguide_styling_fill_properties`
'''
kw = get_graph_kwargs(node_source, edge_source, **kwargs)
graph_renderer = GraphRenderer(layout_provider=layout_provider, **kw)
self.renderers.append(graph_renderer)
return graph_renderer
def figure(**kwargs):
return Figure(**kwargs)
figure.__doc__ = Figure.__doc__
_MARKER_SHORTCUTS = {
"*" : "asterisk",
"+" : "cross",
"o" : "circle",
"o+" : "circle_cross",
"o." : "circle_dot",
"ox" : "circle_x",
"oy" : "circle_y",
"-" : "dash",
"." : "dot",
"v" : "inverted_triangle",
"^" : "triangle",
"^." : "triangle_dot",
}
def markers():
''' Prints a list of valid marker types for scatter()
Returns:
None
'''
print("Available markers: \n\n - " + "\n - ".join(list(MarkerType)))
print()
print("Shortcuts: \n\n" + "\n".join(" %r: %s" % item for item in _MARKER_SHORTCUTS.items()))
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
# This class itself is intentionally undocumented (it is used to generate
# documentation elsewhere)
class FigureOptions(Options):
tools = Either(String, Seq(Either(String, Instance(Tool))), default=DEFAULT_TOOLS, help="""
Tools the plot should start with.
""")
x_range = Any(help="""
Customize the x-range of the plot.
""")
y_range = Any(help="""
Customize the y-range of the plot.
""")
x_minor_ticks = Either(Auto, Int, default="auto", help="""
Number of minor ticks between adjacent x-axis major ticks.
""")
y_minor_ticks = Either(Auto, Int, default="auto", help="""
Number of minor ticks between adjacent y-axis major ticks.
""")
x_axis_location = Enum(VerticalLocation, default="below", help="""
Where the x-axis should be located.
""")
y_axis_location = Enum(HorizontalLocation, default="left", help="""
Where the y-axis should be located.
""")
x_axis_label = String(default="", help="""
A label for the x-axis.
""")
y_axis_label = String(default="", help="""
A label for the y-axis.
""")
active_drag = Either(Auto, String, Instance(Drag), default="auto", help="""
Which drag tool should initially be active.
""")
active_inspect = Either(Auto, String, Instance(Inspection), Seq(Instance(Inspection)), default="auto", help="""
Which drag tool should initially be active.
""")
active_scroll = Either(Auto, String, Instance(Scroll), default="auto", help="""
Which scroll tool should initially be active.
""")
active_tap = Either(Auto, String, Instance(Tap), default="auto", help="""
Which tap tool should initially be active.
""")
x_axis_type = Either(Auto, Enum("linear", "log", "datetime", "mercator"), default="auto", help="""
The type of the x-axis.
""")
y_axis_type = Either(Auto, Enum("linear", "log", "datetime", "mercator"), default="auto", help="""
The type of the y-axis.
""")
tooltips = Either(String, List(Tuple(String, String)), help="""
An optional argument to configure tooltips for the Figure. This argument
accepts the same values as the ``HoverTool.tooltips`` property. If a hover
tool is specified in the ``tools`` argument, this value will override that
hover tools ``tooltips`` value. If no hover tool is specified in the
``tools`` argument, then passing tooltips here will cause one to be created
and added.
""")
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
_color_fields = set(["color", "fill_color", "line_color"])
_alpha_fields = set(["alpha", "fill_alpha", "line_alpha"])
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Supports sending emails when tasks fail.
This needs some more documentation.
See :doc:`/configuration` for configuration options.
In particular using the config `receiver` should set up Luigi so that it will send emails when tasks fail.
.. code-block:: ini
[email]
receiver=foo@bar.baz
'''
import logging
import socket
import sys
import textwrap
import luigi.task
import luigi.parameter
logger = logging.getLogger("luigi-interface")
DEFAULT_CLIENT_EMAIL = 'luigi-client@%s' % socket.gethostname()
class TestNotificationsTask(luigi.task.Task):
"""
You may invoke this task to quickly check if you correctly have setup your
notifications Configuration. You can run:
.. code-block:: console
$ luigi TestNotificationsTask --local-scheduler --email-force-send
And then check your email inbox to see if you got an error email or any
other kind of notifications that you expected.
"""
raise_in_complete = luigi.parameter.BoolParameter(description='If true, fail in complete() instead of run()')
def run(self):
raise ValueError('Testing notifications triggering')
def complete(self):
if self.raise_in_complete:
raise ValueError('Testing notifications triggering')
return False
class email(luigi.Config):
force_send = luigi.parameter.BoolParameter(
default=False,
description='Send e-mail even from a tty')
format = luigi.parameter.ChoiceParameter(
default='plain',
config_path=dict(section='core', name='email-type'),
choices=('plain', 'html', 'none'),
description='Format type for sent e-mails')
method = luigi.parameter.ChoiceParameter(
default='smtp',
config_path=dict(section='email', name='type'),
choices=('smtp', 'sendgrid', 'ses', 'sns'),
description='Method for sending e-mail')
prefix = luigi.parameter.Parameter(
default='',
config_path=dict(section='core', name='email-prefix'),
description='Prefix for subject lines of all e-mails')
receiver = luigi.parameter.Parameter(
default='',
config_path=dict(section='core', name='error-email'),
description='Address to send error e-mails to')
sender = luigi.parameter.Parameter(
default=DEFAULT_CLIENT_EMAIL,
config_path=dict(section='core', name='email-sender'),
description='Address to send e-mails from')
class smtp(luigi.Config):
host = luigi.parameter.Parameter(
default='localhost',
config_path=dict(section='core', name='smtp_host'),
description='Hostname of smtp server')
local_hostname = luigi.parameter.Parameter(
default=None,
config_path=dict(section='core', name='smtp_local_hostname'),
description='If specified, local_hostname is used as the FQDN of the local host in the HELO/EHLO command')
no_tls = luigi.parameter.BoolParameter(
default=False,
config_path=dict(section='core', name='smtp_without_tls'),
description='Do not use TLS in SMTP connections')
password = luigi.parameter.Parameter(
default=None,
config_path=dict(section='core', name='smtp_password'),
description='Password for the SMTP server login')
port = luigi.parameter.IntParameter(
default=0,
config_path=dict(section='core', name='smtp_port'),
description='Port number for smtp server')
ssl = luigi.parameter.BoolParameter(
default=False,
config_path=dict(section='core', name='smtp_ssl'),
description='Use SSL for the SMTP connection.')
timeout = luigi.parameter.FloatParameter(
default=10.0,
config_path=dict(section='core', name='smtp_timeout'),
description='Number of seconds before timing out the smtp connection')
username = luigi.parameter.Parameter(
default=None,
config_path=dict(section='core', name='smtp_login'),
description='Username used to log in to the SMTP host')
class sendgrid(luigi.Config):
username = luigi.parameter.Parameter(
config_path=dict(section='email', name='SENDGRID_USERNAME'),
description='Username for sendgrid login')
password = luigi.parameter.Parameter(
config_path=dict(section='email', name='SENDGRID_PASSWORD'),
description='Username for sendgrid login')
def generate_email(sender, subject, message, recipients, image_png):
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
msg_root = MIMEMultipart('related')
msg_text = MIMEText(message, email().format)
msg_text.set_charset('utf-8')
msg_root.attach(msg_text)
if image_png:
with open(image_png, 'rb') as fp:
msg_image = MIMEImage(fp.read(), 'png')
msg_root.attach(msg_image)
msg_root['Subject'] = subject
msg_root['From'] = sender
msg_root['To'] = ','.join(recipients)
return msg_root
def wrap_traceback(traceback):
"""
For internal use only (until further notice)
"""
if email().format == 'html':
try:
from pygments import highlight
from pygments.lexers import PythonTracebackLexer
from pygments.formatters import HtmlFormatter
with_pygments = True
except ImportError:
with_pygments = False
if with_pygments:
formatter = HtmlFormatter(noclasses=True)
wrapped = highlight(traceback, PythonTracebackLexer(), formatter)
else:
wrapped = '<pre>%s</pre>' % traceback
else:
wrapped = traceback
return wrapped
def send_email_smtp(sender, subject, message, recipients, image_png):
import smtplib
smtp_config = smtp()
kwargs = dict(
host=smtp_config.host,
port=smtp_config.port,
local_hostname=smtp_config.local_hostname,
)
if smtp_config.timeout:
kwargs['timeout'] = smtp_config.timeout
try:
smtp_conn = smtplib.SMTP_SSL(**kwargs) if smtp_config.ssl else smtplib.SMTP(**kwargs)
smtp_conn.ehlo_or_helo_if_needed()
if smtp_conn.has_extn('starttls') and not smtp_config.no_tls:
smtp_conn.starttls()
if smtp_config.username and smtp_config.password:
smtp_conn.login(smtp_config.username, smtp_config.password)
msg_root = generate_email(sender, subject, message, recipients, image_png)
smtp_conn.sendmail(sender, recipients, msg_root.as_string())
except socket.error:
logger.error("Not able to connect to smtp server")
def send_email_ses(sender, subject, message, recipients, image_png):
"""
Sends notification through AWS SES.
Does not handle access keys. Use either
1/ configuration file
2/ EC2 instance profile
See also https://boto3.readthedocs.io/en/latest/guide/configuration.html.
"""
from boto3 import client as boto3_client
client = boto3_client('ses')
msg_root = generate_email(sender, subject, message, recipients, image_png)
response = client.send_raw_email(Source=sender,
Destinations=recipients,
RawMessage={'Data': msg_root.as_string()})
logger.debug(("Message sent to SES.\nMessageId: {},\nRequestId: {},\n"
"HTTPSStatusCode: {}").format(response['MessageId'],
response['ResponseMetadata']['RequestId'],
response['ResponseMetadata']['HTTPStatusCode']))
def send_email_sendgrid(sender, subject, message, recipients, image_png):
import sendgrid as sendgrid_lib
client = sendgrid_lib.SendGridClient(
sendgrid().username, sendgrid().password, raise_errors=True)
to_send = sendgrid_lib.Mail()
to_send.add_to(recipients)
to_send.set_from(sender)
to_send.set_subject(subject)
if email().format == 'html':
to_send.set_html(message)
else:
to_send.set_text(message)
if image_png:
to_send.add_attachment(image_png)
client.send(to_send)
def _email_disabled_reason():
if email().format == 'none':
return "email format is 'none'"
elif email().force_send:
return None
elif sys.stdout.isatty():
return "running from a tty"
else:
return None
def send_email_sns(sender, subject, message, topic_ARN, image_png):
"""
Sends notification through AWS SNS. Takes Topic ARN from recipients.
Does not handle access keys. Use either
1/ configuration file
2/ EC2 instance profile
See also https://boto3.readthedocs.io/en/latest/guide/configuration.html.
"""
from boto3 import resource as boto3_resource
sns = boto3_resource('sns')
topic = sns.Topic(topic_ARN[0])
# Subject is max 100 chars
if len(subject) > 100:
subject = subject[0:48] + '...' + subject[-49:]
response = topic.publish(Subject=subject, Message=message)
logger.debug(("Message sent to SNS.\nMessageId: {},\nRequestId: {},\n"
"HTTPSStatusCode: {}").format(response['MessageId'],
response['ResponseMetadata']['RequestId'],
response['ResponseMetadata']['HTTPStatusCode']))
def send_email(subject, message, sender, recipients, image_png=None):
"""
Decides whether to send notification. Notification is cancelled if there are
no recipients or if stdout is onto tty or if in debug mode.
Dispatches on config value email.method. Default is 'smtp'.
"""
notifiers = {
'ses': send_email_ses,
'sendgrid': send_email_sendgrid,
'smtp': send_email_smtp,
'sns': send_email_sns,
}
subject = _prefix(subject)
if not recipients or recipients == (None,):
return
if _email_disabled_reason():
logger.info("Not sending email to %r because %s",
recipients, _email_disabled_reason())
return
# Clean the recipients lists to allow multiple email addresses, comma
# separated in luigi.cfg
recipients_tmp = []
for r in recipients:
recipients_tmp.extend([a.strip() for a in r.split(',') if a.strip()])
# Replace original recipients with the clean list
recipients = recipients_tmp
logger.info("Sending email to %r", recipients)
# Get appropriate sender and call it to send the notification
email_sender = notifiers[email().method]
email_sender(sender, subject, message, recipients, image_png)
def _email_recipients(additional_recipients=None):
receiver = email().receiver
recipients = [receiver] if receiver else []
if additional_recipients:
if isinstance(additional_recipients, str):
recipients.append(additional_recipients)
else:
recipients.extend(additional_recipients)
return recipients
def send_error_email(subject, message, additional_recipients=None):
"""
Sends an email to the configured error email, if it's configured.
"""
recipients = _email_recipients(additional_recipients)
sender = email().sender
send_email(
subject=subject,
message=message,
sender=sender,
recipients=recipients
)
def _prefix(subject):
"""
If the config has a special prefix for emails then this function adds
this prefix.
"""
if email().prefix:
return "{} {}".format(email().prefix, subject)
else:
return subject
def format_task_error(headline, task, command, formatted_exception=None):
"""
Format a message body for an error email related to a luigi.task.Task
:param headline: Summary line for the message
:param task: `luigi.task.Task` instance where this error occurred
:param formatted_exception: optional string showing traceback
:return: message body
"""
if formatted_exception:
formatted_exception = wrap_traceback(formatted_exception)
else:
formatted_exception = ""
if email().format == 'html':
msg_template = textwrap.dedent('''
<html>
<body>
<h2>{headline}</h2>
<table style="border-top: 1px solid black; border-bottom: 1px solid black">
<thead>
<tr><th>name</th><td>{name}</td></tr>
</thead>
<tbody>
{param_rows}
</tbody>
</table>
</pre>
<h2>Command line</h2>
<pre>
{command}
</pre>
<h2>Traceback</h2>
{traceback}
</body>
</html>
''')
str_params = task.to_str_params()
params = '\n'.join('<tr><th>{}</th><td>{}</td></tr>'.format(*items) for items in str_params.items())
body = msg_template.format(headline=headline, name=task.task_family, param_rows=params,
command=command, traceback=formatted_exception)
else:
msg_template = textwrap.dedent('''\
{headline}
Name: {name}
Parameters:
{params}
Command line:
{command}
{traceback}
''')
str_params = task.to_str_params()
max_width = max([0] + [len(x) for x in str_params.keys()])
params = '\n'.join(' {:{width}}: {}'.format(*items, width=max_width) for items in str_params.items())
body = msg_template.format(headline=headline, name=task.task_family, params=params,
command=command, traceback=formatted_exception)
return body
|
|
# Copyright 2013 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import datetime
import gzip
import io
import json
import os
import re
import shutil
import signal
import socket
import sys
import tarfile
import tempfile
import threading
import time
import unittest
import warnings
import random
import docker
import requests
import six
import fake_api
try:
from unittest import mock
except ImportError:
import mock
warnings.simplefilter('error')
create_host_config = docker.utils.create_host_config
def response(status_code=200, content='', headers=None, reason=None, elapsed=0,
request=None):
res = requests.Response()
res.status_code = status_code
if not isinstance(content, six.binary_type):
content = json.dumps(content).encode('ascii')
res._content = content
res.headers = requests.structures.CaseInsensitiveDict(headers or {})
res.reason = reason
res.elapsed = datetime.timedelta(elapsed)
res.request = request
return res
def fake_resolve_authconfig(authconfig, registry=None):
return None
def fake_resp(url, data=None, **kwargs):
status_code, content = fake_api.fake_responses[url]()
return response(status_code=status_code, content=content)
fake_request = mock.Mock(side_effect=fake_resp)
url_prefix = 'http+unix://var/run/docker.sock/v{0}/'.format(
docker.client.DEFAULT_DOCKER_API_VERSION)
class Cleanup(object):
if sys.version_info < (2, 7):
# Provide a basic implementation of addCleanup for Python < 2.7
def __init__(self, *args, **kwargs):
super(Cleanup, self).__init__(*args, **kwargs)
self._cleanups = []
def tearDown(self):
super(Cleanup, self).tearDown()
ok = True
while self._cleanups:
fn, args, kwargs = self._cleanups.pop(-1)
try:
fn(*args, **kwargs)
except KeyboardInterrupt:
raise
except:
ok = False
if not ok:
raise
def addCleanup(self, function, *args, **kwargs):
self._cleanups.append((function, args, kwargs))
@mock.patch.multiple('docker.Client', get=fake_request, post=fake_request,
put=fake_request, delete=fake_request)
class DockerClientTest(Cleanup, unittest.TestCase):
def setUp(self):
self.client = docker.Client()
# Force-clear authconfig to avoid tampering with the tests
self.client._cfg = {'Configs': {}}
def tearDown(self):
self.client.close()
def assertIn(self, object, collection):
if six.PY2 and sys.version_info[1] <= 6:
return self.assertTrue(object in collection)
return super(DockerClientTest, self).assertIn(object, collection)
def base_create_payload(self, img='busybox', cmd=None):
if not cmd:
cmd = ['true']
return {"Tty": False, "Image": img, "Cmd": cmd,
"AttachStdin": False, "Memory": 0,
"AttachStderr": True, "AttachStdout": True,
"StdinOnce": False,
"OpenStdin": False, "NetworkDisabled": False,
"MemorySwap": 0
}
def test_ctor(self):
try:
docker.Client(version=1.12)
except Exception as e:
self.assertTrue(isinstance(e, docker.errors.DockerException))
if not six.PY3:
self.assertEqual(
str(e),
'Version parameter must be a string or None. Found float'
)
#########################
# INFORMATION TESTS #
#########################
def test_version(self):
try:
self.client.version()
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'version',
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_retrieve_server_version(self):
client = docker.Client(version="auto")
self.assertTrue(isinstance(client._version, six.string_types))
self.assertFalse(client._version == "auto")
client.close()
def test_auto_retrieve_server_version(self):
try:
version = self.client._retrieve_server_version()
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
else:
self.assertTrue(isinstance(version, six.string_types))
def test_info(self):
try:
self.client.info()
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'info',
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_search(self):
try:
self.client.search('busybox')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/search',
params={'term': 'busybox'},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_image_viz(self):
try:
self.client.images('busybox', viz=True)
self.fail('Viz output should not be supported!')
except Exception:
pass
def test_events(self):
try:
self.client.events()
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'events',
params={'since': None, 'until': None, 'filters': None},
stream=True
)
def test_events_with_since_until(self):
ts = 1356048000
now = datetime.datetime.fromtimestamp(ts)
since = now - datetime.timedelta(seconds=10)
until = now + datetime.timedelta(seconds=10)
try:
self.client.events(since=since, until=until)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'events',
params={
'since': ts - 10,
'until': ts + 10,
'filters': None
},
stream=True
)
def test_events_with_filters(self):
filters = {'event': ['die', 'stop'],
'container': fake_api.FAKE_CONTAINER_ID}
try:
self.client.events(filters=filters)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
expected_filters = docker.utils.convert_filters(filters)
fake_request.assert_called_with(
url_prefix + 'events',
params={
'since': None,
'until': None,
'filters': expected_filters
},
stream=True
)
###################
# LISTING TESTS #
###################
def test_images(self):
try:
self.client.images(all=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 0, 'all': 1},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_images_quiet(self):
try:
self.client.images(all=True, quiet=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 1, 'all': 1},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_image_ids(self):
try:
self.client.images(quiet=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 1, 'all': 0},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_images_filters(self):
try:
self.client.images(filters={'dangling': True})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 0, 'all': 0,
'filters': '{"dangling": ["true"]}'},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_list_containers(self):
try:
self.client.containers(all=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/json',
params={
'all': 1,
'since': None,
'size': 0,
'limit': -1,
'trunc_cmd': 1,
'before': None
},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
#####################
# CONTAINER TESTS #
#####################
def test_create_container(self):
try:
self.client.create_container('busybox', 'true')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": false, "Memory": 0,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": false,
"OpenStdin": false, "NetworkDisabled": false,
"MemorySwap": 0}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_binds(self):
mount_dest = '/mnt'
try:
self.client.create_container('busybox', ['ls', mount_dest],
volumes=[mount_dest])
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls", "/mnt"], "AttachStdin": false,
"Volumes": {"/mnt": {}}, "Memory": 0,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"MemorySwap": 0}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_volume_string(self):
mount_dest = '/mnt'
try:
self.client.create_container('busybox', ['ls', mount_dest],
volumes=mount_dest)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls", "/mnt"], "AttachStdin": false,
"Volumes": {"/mnt": {}}, "Memory": 0,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"MemorySwap": 0}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_ports(self):
try:
self.client.create_container('busybox', 'ls',
ports=[1111, (2222, 'udp'), (3333,)])
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"Memory": 0, "ExposedPorts": {
"1111/tcp": {},
"2222/udp": {},
"3333/tcp": {}
},
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"MemorySwap": 0}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_entrypoint(self):
try:
self.client.create_container('busybox', 'hello',
entrypoint='cowsay')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["hello"], "AttachStdin": false,
"Memory": 0,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"Entrypoint": "cowsay",
"MemorySwap": 0}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_cpu_shares(self):
try:
self.client.create_container('busybox', 'ls',
cpu_shares=5)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"Memory": 0,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"CpuShares": 5,
"MemorySwap": 0}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_cpuset(self):
try:
self.client.create_container('busybox', 'ls',
cpuset='0,1')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"Memory": 0,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"Cpuset": "0,1",
"MemorySwap": 0}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_working_dir(self):
try:
self.client.create_container('busybox', 'ls',
working_dir='/root')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"Memory": 0,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"WorkingDir": "/root",
"MemorySwap": 0}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_stdin_open(self):
try:
self.client.create_container('busybox', 'true', stdin_open=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": true, "Memory": 0,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": true,
"OpenStdin": true, "NetworkDisabled": false,
"MemorySwap": 0}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_volumes_from(self):
vol_names = ['foo', 'bar']
try:
self.client.create_container('busybox', 'true',
volumes_from=vol_names)
except docker.errors.DockerException as e:
self.assertTrue(
docker.utils.compare_version('1.10', self.client._version) >= 0
)
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['VolumesFrom'],
','.join(vol_names))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_empty_volumes_from(self):
try:
self.client.create_container('busybox', 'true', volumes_from=[])
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertTrue('VolumesFrom' not in data)
def test_create_named_container(self):
try:
self.client.create_container('busybox', 'true',
name='marisa-kirisame')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": false, "Memory": 0,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": false,
"OpenStdin": false, "NetworkDisabled": false,
"MemorySwap": 0}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(args[1]['params'], {'name': 'marisa-kirisame'})
def test_create_container_with_mem_limit_as_int(self):
try:
self.client.create_container('busybox', 'true',
mem_limit=128.0)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['Memory'], 128.0)
def test_create_container_with_mem_limit_as_string(self):
try:
self.client.create_container('busybox', 'true',
mem_limit='128')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['Memory'], 128.0)
def test_create_container_with_mem_limit_as_string_with_k_unit(self):
try:
self.client.create_container('busybox', 'true',
mem_limit='128k')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['Memory'], 128.0 * 1024)
def test_create_container_with_mem_limit_as_string_with_m_unit(self):
try:
self.client.create_container('busybox', 'true',
mem_limit='128m')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['Memory'], 128.0 * 1024 * 1024)
def test_create_container_with_mem_limit_as_string_with_g_unit(self):
try:
self.client.create_container('busybox', 'true',
mem_limit='128g')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['Memory'], 128.0 * 1024 * 1024 * 1024)
def test_create_container_with_mem_limit_as_string_with_wrong_value(self):
self.assertRaises(docker.errors.DockerException,
self.client.create_container,
'busybox', 'true', mem_limit='128p')
self.assertRaises(docker.errors.DockerException,
self.client.create_container,
'busybox', 'true', mem_limit='1f28')
def test_start_container(self):
try:
self.client.start(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
raise e
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0],
url_prefix + 'containers/3cc2351ab11b/start'
)
self.assertEqual(json.loads(args[1]['data']), {})
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_lxc_conf(self):
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0],
url_prefix + 'containers/create'
)
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['LxcConf'] = [
{"Value": "lxc.conf.value", "Key": "lxc.conf.k"}
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'],
{'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'],
docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_lxc_conf_compat(self):
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['LxcConf'] = [
{"Value": "lxc.conf.value", "Key": "lxc.conf.k"}
]
self.assertEqual(
json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_ro(self):
try:
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"ro": True
}}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:ro"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_rw(self):
try:
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"ro": False
}}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:rw"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_port_binds(self):
self.maxDiff = None
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
port_bindings={
1111: None,
2222: 2222,
'3333/udp': (3333,),
4444: ('127.0.0.1',),
5555: ('127.0.0.1', 5555),
6666: [('127.0.0.1',), ('192.168.0.1',)]
}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
data = json.loads(args[1]['data'])
port_bindings = data['HostConfig']['PortBindings']
self.assertTrue('1111/tcp' in port_bindings)
self.assertTrue('2222/tcp' in port_bindings)
self.assertTrue('3333/udp' in port_bindings)
self.assertTrue('4444/tcp' in port_bindings)
self.assertTrue('5555/tcp' in port_bindings)
self.assertTrue('6666/tcp' in port_bindings)
self.assertEqual(
[{"HostPort": "", "HostIp": ""}],
port_bindings['1111/tcp']
)
self.assertEqual(
[{"HostPort": "2222", "HostIp": ""}],
port_bindings['2222/tcp']
)
self.assertEqual(
[{"HostPort": "3333", "HostIp": ""}],
port_bindings['3333/udp']
)
self.assertEqual(
[{"HostPort": "", "HostIp": "127.0.0.1"}],
port_bindings['4444/tcp']
)
self.assertEqual(
[{"HostPort": "5555", "HostIp": "127.0.0.1"}],
port_bindings['5555/tcp']
)
self.assertEqual(len(port_bindings['6666/tcp']), 2)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_mac_address(self):
try:
mac_address_expected = "02:42:ac:11:00:0a"
container = self.client.create_container(
'busybox', ['sleep', '60'], mac_address=mac_address_expected)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
res = self.client.inspect_container(container['Id'])
self.assertEqual(mac_address_expected,
res['NetworkSettings']['MacAddress'])
def test_create_container_with_links(self):
try:
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
links={link_path: alias}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0], url_prefix + 'containers/create'
)
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Links'] = ['path:alias']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_with_multiple_links(self):
try:
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
links={
link_path + '1': alias + '1',
link_path + '2': alias + '2'
}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Links'] = [
'path1:alias1', 'path2:alias2'
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_with_links_as_list_of_tuples(self):
try:
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
links=[(link_path, alias)]
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Links'] = ['path:alias']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_privileged(self):
try:
self.client.create_container(
'busybox', 'true',
host_config=create_host_config(privileged=True)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Privileged'] = True
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_start_container_with_lxc_conf(self):
try:
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0],
url_prefix + 'containers/3cc2351ab11b/start'
)
self.assertEqual(
json.loads(args[1]['data']),
{"LxcConf": [{"Value": "lxc.conf.value", "Key": "lxc.conf.k"}]}
)
self.assertEqual(
args[1]['headers'],
{'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'],
docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_start_container_with_lxc_conf_compat(self):
try:
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix +
'containers/3cc2351ab11b/start')
self.assertEqual(
json.loads(args[1]['data']),
{"LxcConf": [{"Key": "lxc.conf.k", "Value": "lxc.conf.value"}]}
)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_start_container_with_binds_ro(self):
try:
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.start(fake_api.FAKE_CONTAINER_ID,
binds={mount_origin: {
"bind": mount_dest,
"ro": True
}})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix +
'containers/3cc2351ab11b/start')
self.assertEqual(
json.loads(args[1]['data']), {"Binds": ["/tmp:/mnt:ro"]}
)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
docker.client.DEFAULT_TIMEOUT_SECONDS)
def test_start_container_with_binds_rw(self):
try:
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.start(fake_api.FAKE_CONTAINER_ID,
binds={mount_origin: {
"bind": mount_dest, "ro": False}})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix +
'containers/3cc2351ab11b/start')
self.assertEqual(
json.loads(args[1]['data']), {"Binds": ["/tmp:/mnt:rw"]}
)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_start_container_with_port_binds(self):
self.maxDiff = None
try:
self.client.start(fake_api.FAKE_CONTAINER_ID, port_bindings={
1111: None,
2222: 2222,
'3333/udp': (3333,),
4444: ('127.0.0.1',),
5555: ('127.0.0.1', 5555),
6666: [('127.0.0.1',), ('192.168.0.1',)]
})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix +
'containers/3cc2351ab11b/start')
data = json.loads(args[1]['data'])
self.assertTrue('1111/tcp' in data['PortBindings'])
self.assertTrue('2222/tcp' in data['PortBindings'])
self.assertTrue('3333/udp' in data['PortBindings'])
self.assertTrue('4444/tcp' in data['PortBindings'])
self.assertTrue('5555/tcp' in data['PortBindings'])
self.assertTrue('6666/tcp' in data['PortBindings'])
self.assertEqual(
[{"HostPort": "", "HostIp": ""}],
data['PortBindings']['1111/tcp']
)
self.assertEqual(
[{"HostPort": "2222", "HostIp": ""}],
data['PortBindings']['2222/tcp']
)
self.assertEqual(
[{"HostPort": "3333", "HostIp": ""}],
data['PortBindings']['3333/udp']
)
self.assertEqual(
[{"HostPort": "", "HostIp": "127.0.0.1"}],
data['PortBindings']['4444/tcp']
)
self.assertEqual(
[{"HostPort": "5555", "HostIp": "127.0.0.1"}],
data['PortBindings']['5555/tcp']
)
self.assertEqual(len(data['PortBindings']['6666/tcp']), 2)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_start_container_with_links(self):
# one link
try:
link_path = 'path'
alias = 'alias'
self.client.start(fake_api.FAKE_CONTAINER_ID,
links={link_path: alias})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0],
url_prefix + 'containers/3cc2351ab11b/start'
)
self.assertEqual(
json.loads(args[1]['data']), {"Links": ["path:alias"]}
)
self.assertEqual(
args[1]['headers'],
{'Content-Type': 'application/json'}
)
def test_start_container_with_multiple_links(self):
try:
link_path = 'path'
alias = 'alias'
self.client.start(
fake_api.FAKE_CONTAINER_ID,
links={
link_path + '1': alias + '1',
link_path + '2': alias + '2'
}
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0],
url_prefix + 'containers/3cc2351ab11b/start'
)
self.assertEqual(
json.loads(args[1]['data']),
{"Links": ["path1:alias1", "path2:alias2"]}
)
self.assertEqual(
args[1]['headers'],
{'Content-Type': 'application/json'}
)
def test_start_container_with_links_as_list_of_tuples(self):
# one link
try:
link_path = 'path'
alias = 'alias'
self.client.start(fake_api.FAKE_CONTAINER_ID,
links=[(link_path, alias)])
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0],
url_prefix + 'containers/3cc2351ab11b/start'
)
self.assertEqual(
json.loads(args[1]['data']), {"Links": ["path:alias"]}
)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_start_container_privileged(self):
try:
self.client.start(fake_api.FAKE_CONTAINER_ID, privileged=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0],
url_prefix + 'containers/3cc2351ab11b/start'
)
self.assertEqual(json.loads(args[1]['data']), {"Privileged": True})
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_start_container_with_dict_instead_of_id(self):
try:
self.client.start({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0],
url_prefix + 'containers/3cc2351ab11b/start'
)
self.assertEqual(json.loads(args[1]['data']), {})
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_restart_policy(self):
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
restart_policy={
"Name": "always",
"MaximumRetryCount": 0
}
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['RestartPolicy'] = {
"MaximumRetryCount": 0, "Name": "always"
}
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_added_capabilities(self):
try:
self.client.create_container(
'busybox', 'true',
host_config=create_host_config(cap_add=['MKNOD'])
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['CapAdd'] = ['MKNOD']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_dropped_capabilities(self):
try:
self.client.create_container(
'busybox', 'true',
host_config=create_host_config(cap_drop=['MKNOD'])
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['CapDrop'] = ['MKNOD']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_devices(self):
try:
self.client.create_container(
'busybox', 'true', host_config=create_host_config(
devices=['/dev/sda:/dev/xvda:rwm',
'/dev/sdb:/dev/xvdb',
'/dev/sdc']
)
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = create_host_config()
expected_payload['HostConfig']['Devices'] = [
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/xvda',
'PathOnHost': '/dev/sda'},
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/xvdb',
'PathOnHost': '/dev/sdb'},
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/sdc',
'PathOnHost': '/dev/sdc'}
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_labels_dict(self):
labels_dict = {
six.text_type('foo'): six.text_type('1'),
six.text_type('bar'): six.text_type('2'),
}
try:
self.client.create_container(
'busybox', 'true',
labels=labels_dict,
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['Labels'], labels_dict)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_labels_list(self):
labels_list = [
six.text_type('foo'),
six.text_type('bar'),
]
labels_dict = {
six.text_type('foo'): six.text_type(),
six.text_type('bar'): six.text_type(),
}
try:
self.client.create_container(
'busybox', 'true',
labels=labels_list,
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['Labels'], labels_dict)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_resize_container(self):
try:
self.client.resize(
{'Id': fake_api.FAKE_CONTAINER_ID},
height=15,
width=120
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/resize',
params={'h': 15, 'w': 120},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_rename_container(self):
try:
self.client.rename(
{'Id': fake_api.FAKE_CONTAINER_ID},
name='foobar'
)
except Exception as e:
self.fail('Command shold not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/rename',
params={'name': 'foobar'},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_wait(self):
try:
self.client.wait(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/wait',
timeout=None
)
def test_wait_with_dict_instead_of_id(self):
try:
self.client.wait({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
raise e
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/wait',
timeout=None
)
def test_url_compatibility_unix(self):
c = docker.Client(base_url="unix://socket")
assert c.base_url == "http+unix://socket"
def test_url_compatibility_unix_triple_slash(self):
c = docker.Client(base_url="unix:///socket")
assert c.base_url == "http+unix://socket"
def test_url_compatibility_http_unix_triple_slash(self):
c = docker.Client(base_url="http+unix:///socket")
assert c.base_url == "http+unix://socket"
def test_url_compatibility_http(self):
c = docker.Client(base_url="http://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_url_compatibility_tcp(self):
c = docker.Client(base_url="tcp://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_logs(self):
try:
logs = self.client.logs(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS,
stream=False
)
self.assertEqual(
logs,
'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
)
def test_logs_with_dict_instead_of_id(self):
try:
logs = self.client.logs({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS,
stream=False
)
self.assertEqual(
logs,
'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
)
def test_log_streaming(self):
try:
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS,
stream=True
)
def test_log_tail(self):
try:
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False, tail=10)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 10},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS,
stream=False
)
def test_diff(self):
try:
self.client.diff(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/changes',
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_diff_with_dict_instead_of_id(self):
try:
self.client.diff({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/changes',
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_port(self):
try:
self.client.port({'Id': fake_api.FAKE_CONTAINER_ID}, 1111)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/json',
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_stop_container(self):
timeout = 2
try:
self.client.stop(fake_api.FAKE_CONTAINER_ID, timeout=timeout)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/stop',
params={'t': timeout},
timeout=(docker.client.DEFAULT_TIMEOUT_SECONDS + timeout)
)
def test_stop_container_with_dict_instead_of_id(self):
timeout = 2
try:
self.client.stop({'Id': fake_api.FAKE_CONTAINER_ID},
timeout=timeout)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/stop',
params={'t': timeout},
timeout=(docker.client.DEFAULT_TIMEOUT_SECONDS + timeout)
)
def test_execute_command(self):
try:
self.client.execute(fake_api.FAKE_CONTAINER_ID, ['ls', '-1'])
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(args[0][0],
url_prefix + 'exec/3cc2351ab11b/start')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''{
"Tty": false,
"AttachStderr": true,
"Container": "3cc2351ab11b",
"Cmd": ["ls", "-1"],
"AttachStdin": false,
"User": "",
"Detach": false,
"Privileged": false,
"AttachStdout": true}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_pause_container(self):
try:
self.client.pause(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/pause',
timeout=(docker.client.DEFAULT_TIMEOUT_SECONDS)
)
def test_unpause_container(self):
try:
self.client.unpause(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/unpause',
timeout=(docker.client.DEFAULT_TIMEOUT_SECONDS)
)
def test_kill_container(self):
try:
self.client.kill(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/kill',
params={},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_kill_container_with_dict_instead_of_id(self):
try:
self.client.kill({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/kill',
params={},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_kill_container_with_signal(self):
try:
self.client.kill(fake_api.FAKE_CONTAINER_ID, signal=signal.SIGTERM)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/kill',
params={'signal': signal.SIGTERM},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_restart_container(self):
try:
self.client.restart(fake_api.FAKE_CONTAINER_ID, timeout=2)
except Exception as e:
self.fail('Command should not raise exception : {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/restart',
params={'t': 2},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_restart_container_with_dict_instead_of_id(self):
try:
self.client.restart({'Id': fake_api.FAKE_CONTAINER_ID}, timeout=2)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/restart',
params={'t': 2},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_remove_container(self):
try:
self.client.remove_container(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': False, 'force': False},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_remove_container_with_dict_instead_of_id(self):
try:
self.client.remove_container({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': False, 'force': False},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_remove_link(self):
try:
self.client.remove_container(fake_api.FAKE_CONTAINER_ID, link=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': True, 'force': False},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_export(self):
try:
self.client.export(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/export',
stream=True,
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_export_with_dict_instead_of_id(self):
try:
self.client.export({'Id': fake_api.FAKE_CONTAINER_ID})
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/export',
stream=True,
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_container(self):
try:
self.client.inspect_container(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/json',
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_container_stats(self):
try:
self.client.stats(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'containers/3cc2351ab11b/stats',
timeout=60,
stream=True
)
##################
# IMAGES TESTS #
##################
def test_pull(self):
try:
self.client.pull('joffrey/test001')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0],
url_prefix + 'images/create'
)
self.assertEqual(
args[1]['params'],
{'tag': None, 'fromImage': 'joffrey/test001'}
)
self.assertFalse(args[1]['stream'])
def test_pull_stream(self):
try:
self.client.pull('joffrey/test001', stream=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
args = fake_request.call_args
self.assertEqual(
args[0][0],
url_prefix + 'images/create'
)
self.assertEqual(
args[1]['params'],
{'tag': None, 'fromImage': 'joffrey/test001'}
)
self.assertTrue(args[1]['stream'])
def test_commit(self):
try:
self.client.commit(fake_api.FAKE_CONTAINER_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'commit',
data='{}',
headers={'Content-Type': 'application/json'},
params={
'repo': None,
'comment': None,
'tag': None,
'container': '3cc2351ab11b',
'author': None
},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_remove_image(self):
try:
self.client.remove_image(fake_api.FAKE_IMAGE_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/e9aa60c60128',
params={'force': False, 'noprune': False},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_image_history(self):
try:
self.client.history(fake_api.FAKE_IMAGE_NAME)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/test_image/history',
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_import_image(self):
try:
self.client.import_image(
fake_api.FAKE_TARBALL_PATH,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromSrc': fake_api.FAKE_TARBALL_PATH
},
data=None,
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_import_image_from_file(self):
buf = tempfile.NamedTemporaryFile(delete=False)
try:
# pretent the buffer is a file
self.client.import_image(
buf.name,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromSrc': '-'
},
data='',
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
buf.close()
os.remove(buf.name)
def test_import_image_from_image(self):
try:
self.client.import_image(
image=fake_api.FAKE_IMAGE_NAME,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromImage': fake_api.FAKE_IMAGE_NAME
},
data=None,
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_image(self):
try:
self.client.inspect_image(fake_api.FAKE_IMAGE_NAME)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/test_image/json',
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_insert_image(self):
try:
self.client.insert(fake_api.FAKE_IMAGE_NAME,
fake_api.FAKE_URL, fake_api.FAKE_PATH)
except docker.errors.DeprecatedMethod as e:
self.assertTrue(
docker.utils.compare_version('1.12', self.client._version) >= 0
)
return
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/test_image/insert',
params={
'url': fake_api.FAKE_URL,
'path': fake_api.FAKE_PATH
},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_push_image(self):
try:
with mock.patch('docker.auth.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/test_image/push',
params={
'tag': None
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=False,
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_push_image_with_tag(self):
try:
with mock.patch('docker.auth.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(
fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/test_image/push',
params={
'tag': fake_api.FAKE_TAG_NAME,
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=False,
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_push_image_stream(self):
try:
with mock.patch('docker.auth.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME, stream=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/test_image/push',
params={
'tag': None
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=True,
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image(self):
try:
self.client.tag(fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': None,
'repo': 'repo',
'force': 0
},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image_tag(self):
try:
self.client.tag(
fake_api.FAKE_IMAGE_ID,
fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': 'tag',
'repo': 'repo',
'force': 0
},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image_force(self):
try:
self.client.tag(
fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME, force=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': None,
'repo': 'repo',
'force': 1
},
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_get_image(self):
try:
self.client.get_image(fake_api.FAKE_IMAGE_ID)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/e9aa60c60128/get',
stream=True,
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
def test_load_image(self):
try:
self.client.load_image('Byte Stream....')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
fake_request.assert_called_with(
url_prefix + 'images/load',
data='Byte Stream....',
timeout=docker.client.DEFAULT_TIMEOUT_SECONDS
)
#################
# BUILDER TESTS #
#################
def test_build_container(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
try:
self.client.build(fileobj=script)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_container_pull(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
try:
self.client.build(fileobj=script, pull=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_container_stream(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
try:
self.client.build(fileobj=script, stream=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_container_custom_context(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
context = docker.utils.mkbuildcontext(script)
try:
self.client.build(fileobj=context, custom_context=True)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_container_custom_context_gzip(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
context = docker.utils.mkbuildcontext(script)
gz_context = gzip.GzipFile(fileobj=context)
try:
self.client.build(
fileobj=gz_context,
custom_context=True,
encoding="gzip"
)
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_remote_with_registry_auth(self):
try:
self.client._auth_configs = {
'https://example.com': {
'user': 'example',
'password': 'example',
'email': 'example@example.com'
}
}
self.client.build(path='https://github.com/docker-library/mongo')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
def test_build_container_with_named_dockerfile(self):
try:
self.client.build('.', dockerfile='nameddockerfile')
except Exception as e:
self.fail('Command should not raise exception: {0}'.format(e))
#######################
# PY SPECIFIC TESTS #
#######################
def test_load_config_no_file(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
cfg = docker.auth.load_config(folder)
self.assertTrue(cfg is not None)
def test_load_config(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, '.dockercfg')
with open(dockercfg_path, 'w') as f:
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
f.write('auth = {0}\n'.format(auth_))
f.write('email = sakuya@scarlet.net')
cfg = docker.auth.load_config(dockercfg_path)
self.assertTrue(docker.auth.INDEX_URL in cfg)
self.assertNotEqual(cfg[docker.auth.INDEX_URL], None)
cfg = cfg[docker.auth.INDEX_URL]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_with_random_name(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder,
'.{0}.dockercfg'.format(
random.randrange(100000)))
registry = 'https://your.private.registry.io'
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
config = {
registry: {
'auth': '{0}'.format(auth_),
'email': 'sakuya@scarlet.net'
}
}
with open(dockercfg_path, 'w') as f:
f.write(json.dumps(config))
cfg = docker.auth.load_config(dockercfg_path)
self.assertTrue(registry in cfg)
self.assertNotEqual(cfg[registry], None)
cfg = cfg[registry]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('auth'), None)
def test_tar_with_excludes(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
for d in ['test/foo', 'bar']:
os.makedirs(os.path.join(base, d))
for f in ['a.txt', 'b.py', 'other.png']:
with open(os.path.join(base, d, f), 'w') as f:
f.write("content")
for exclude, names in (
(['*.py'], ['bar', 'bar/a.txt', 'bar/other.png',
'test', 'test/foo', 'test/foo/a.txt',
'test/foo/other.png']),
(['*.png', 'bar'], ['test', 'test/foo', 'test/foo/a.txt',
'test/foo/b.py']),
(['test/foo', 'a.txt'], ['bar', 'bar/a.txt', 'bar/b.py',
'bar/other.png', 'test']),
):
with docker.utils.tar(base, exclude=exclude) as archive:
tar = tarfile.open(fileobj=archive)
self.assertEqual(sorted(tar.getnames()), names)
def test_tar_with_empty_directory(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
for d in ['foo', 'bar']:
os.makedirs(os.path.join(base, d))
with docker.utils.tar(base) as archive:
tar = tarfile.open(fileobj=archive)
self.assertEqual(sorted(tar.getnames()), ['bar', 'foo'])
def test_tar_with_file_symlinks(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
with open(os.path.join(base, 'foo'), 'w') as f:
f.write("content")
os.makedirs(os.path.join(base, 'bar'))
os.symlink('../foo', os.path.join(base, 'bar/foo'))
with docker.utils.tar(base) as archive:
tar = tarfile.open(fileobj=archive)
self.assertEqual(sorted(tar.getnames()), ['bar', 'bar/foo', 'foo'])
def test_tar_with_directory_symlinks(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
for d in ['foo', 'bar']:
os.makedirs(os.path.join(base, d))
os.symlink('../foo', os.path.join(base, 'bar/foo'))
with docker.utils.tar(base) as archive:
tar = tarfile.open(fileobj=archive)
self.assertEqual(sorted(tar.getnames()), ['bar', 'bar/foo', 'foo'])
#######################
# HOST CONFIG TESTS #
#######################
def test_create_host_config_secopt(self):
security_opt = ['apparmor:test_profile']
result = create_host_config(security_opt=security_opt)
self.assertIn('SecurityOpt', result)
self.assertEqual(result['SecurityOpt'], security_opt)
self.assertRaises(
docker.errors.DockerException, create_host_config,
security_opt='wrong'
)
class StreamTest(Cleanup, unittest.TestCase):
def setUp(self):
socket_dir = tempfile.mkdtemp()
self.build_context = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, socket_dir)
self.addCleanup(shutil.rmtree, self.build_context)
self.socket_file = os.path.join(socket_dir, 'test_sock.sock')
self.server_socket = self._setup_socket()
self.stop_server = False
server_thread = threading.Thread(target=self.run_server)
server_thread.setDaemon(True)
server_thread.start()
self.response = None
self.request_handler = None
self.addCleanup(server_thread.join)
self.addCleanup(self.stop)
def stop(self):
self.stop_server = True
def _setup_socket(self):
server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_sock.bind(self.socket_file)
# Non-blocking mode so that we can shut the test down easily
server_sock.setblocking(0)
server_sock.listen(5)
return server_sock
def run_server(self):
try:
while not self.stop_server:
try:
connection, client_address = self.server_socket.accept()
except socket.error:
# Probably no connection to accept yet
time.sleep(0.01)
continue
connection.setblocking(1)
try:
self.request_handler(connection)
finally:
connection.close()
finally:
self.server_socket.close()
def early_response_sending_handler(self, connection):
data = b''
headers = None
connection.sendall(self.response)
while not headers:
data += connection.recv(2048)
parts = data.split(b'\r\n\r\n', 1)
if len(parts) == 2:
headers, data = parts
mo = re.search(r'Content-Length: ([0-9]+)', headers.decode())
assert mo
content_length = int(mo.group(1))
while True:
if len(data) >= content_length:
break
data += connection.recv(2048)
def test_early_stream_response(self):
self.request_handler = self.early_response_sending_handler
lines = []
for i in range(0, 50):
line = str(i).encode()
lines += [('%x' % len(line)).encode(), line]
lines.append(b'0')
lines.append(b'')
self.response = (
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
) + b'\r\n'.join(lines)
with docker.Client(base_url="http+unix://" + self.socket_file) \
as client:
for i in range(5):
try:
stream = client.build(
path=self.build_context,
stream=True
)
break
except requests.ConnectionError as e:
if i == 4:
raise e
self.assertEqual(list(stream), [
str(i).encode() for i in range(50)])
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2014 Montavista Software, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import copy
import ceilometerclient.client as cc
from oslo_log import log as logging
import six
from congress.datasources import datasource_driver
from congress.datasources import datasource_utils as ds_utils
LOG = logging.getLogger(__name__)
def d6service(name, keys, inbox, datapath, args):
"""Create a dataservice instance.
This method is called by d6cage to create a dataservice
instance. There are a couple of parameters we found useful
to add to that call, so we included them here instead of
modifying d6cage (and all the d6cage.createservice calls).
"""
return CeilometerDriver(name, keys, inbox, datapath, args)
# TODO(thinrichs): figure out how to move even more of this boilerplate
# into DataSourceDriver. E.g. change all the classes to Driver instead of
# NeutronDriver, CeilometerDriver, etc. and move the d6instantiate function
# to DataSourceDriver.
class CeilometerDriver(datasource_driver.DataSourceDriver,
datasource_driver.ExecutionDriver):
METERS = "meters"
ALARMS = "alarms"
EVENTS = "events"
EVENT_TRAITS = "events.traits"
ALARM_THRESHOLD_RULE = "alarms.threshold_rule"
STATISTICS = "statistics"
value_trans = {'translation-type': 'VALUE'}
meters_translator = {
'translation-type': 'HDICT',
'table-name': METERS,
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'meter_id', 'translator': value_trans},
{'fieldname': 'name', 'translator': value_trans},
{'fieldname': 'type', 'translator': value_trans},
{'fieldname': 'unit', 'translator': value_trans},
{'fieldname': 'source', 'translator': value_trans},
{'fieldname': 'resource_id', 'translator': value_trans},
{'fieldname': 'user_id', 'translator': value_trans},
{'fieldname': 'project_id', 'translator': value_trans})}
alarms_translator = {
'translation-type': 'HDICT',
'table-name': ALARMS,
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'alarm_id', 'translator': value_trans},
{'fieldname': 'name', 'translator': value_trans},
{'fieldname': 'state', 'translator': value_trans},
{'fieldname': 'enabled', 'translator': value_trans},
{'fieldname': 'threshold_rule', 'col': 'threshold_rule_id',
'translator': {'translation-type': 'VDICT',
'table-name': ALARM_THRESHOLD_RULE,
'id-col': 'threshold_rule_id',
'key-col': 'key', 'val-col': 'value',
'translator': value_trans}},
{'fieldname': 'type', 'translator': value_trans},
{'fieldname': 'description', 'translator': value_trans},
{'fieldname': 'time_constraints', 'translator': value_trans},
{'fieldname': 'user_id', 'translator': value_trans},
{'fieldname': 'project_id', 'translator': value_trans},
{'fieldname': 'alarm_actions', 'translator': value_trans},
{'fieldname': 'ok_actions', 'translator': value_trans},
{'fieldname': 'insufficient_data_actions', 'translator':
value_trans},
{'fieldname': 'repeat_actions', 'translator': value_trans},
{'fieldname': 'timestamp', 'translator': value_trans},
{'fieldname': 'state_timestamp', 'translator': value_trans},
)}
events_translator = {
'translation-type': 'HDICT',
'table-name': EVENTS,
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'message_id', 'translator': value_trans},
{'fieldname': 'event_type', 'translator': value_trans},
{'fieldname': 'generated', 'translator': value_trans},
{'fieldname': 'traits',
'translator': {'translation-type': 'HDICT',
'table-name': EVENT_TRAITS,
'selector-type': 'DICT_SELECTOR',
'in-list': True,
'parent-key': 'message_id',
'parent-col-name': 'event_message_id',
'field-translators':
({'fieldname': 'name',
'translator': value_trans},
{'fieldname': 'type',
'translator': value_trans},
{'fieldname': 'value',
'translator': value_trans}
)}}
)}
def safe_id(x):
if isinstance(x, six.string_types):
return x
try:
return x['resource_id']
except KeyError:
return str(x)
statistics_translator = {
'translation-type': 'HDICT',
'table-name': STATISTICS,
'selector-type': 'DICT_SELECTOR',
'field-translators':
({'fieldname': 'meter_name', 'translator': value_trans},
{'fieldname': 'groupby', 'col': 'resource_id',
'translator': {'translation-type': 'VALUE',
'extract-fn': safe_id}},
{'fieldname': 'avg', 'translator': value_trans},
{'fieldname': 'count', 'translator': value_trans},
{'fieldname': 'duration', 'translator': value_trans},
{'fieldname': 'duration_start', 'translator': value_trans},
{'fieldname': 'duration_end', 'translator': value_trans},
{'fieldname': 'max', 'translator': value_trans},
{'fieldname': 'min', 'translator': value_trans},
{'fieldname': 'period', 'translator': value_trans},
{'fieldname': 'period_end', 'translator': value_trans},
{'fieldname': 'period_start', 'translator': value_trans},
{'fieldname': 'sum', 'translator': value_trans},
{'fieldname': 'unit', 'translator': value_trans})}
TRANSLATORS = [meters_translator, alarms_translator, events_translator,
statistics_translator]
def __init__(self, name='', keys='', inbox=None, datapath=None, args=None):
super(CeilometerDriver, self).__init__(name, keys, inbox,
datapath, args)
datasource_driver.ExecutionDriver.__init__(self)
self.creds = self.get_ceilometer_credentials_v2(args)
self.ceilometer_client = cc.get_client(**self.creds)
self._init_end_start_poll()
@staticmethod
def get_datasource_info():
result = {}
result['id'] = 'ceilometer'
result['description'] = ('Datasource driver that interfaces with '
'ceilometer.')
result['config'] = ds_utils.get_openstack_required_config()
result['secret'] = ['password']
return result
def update_from_datasource(self):
"""Read Data from Ceilometer datasource.
And to fill up the current state of the policy engine.
"""
LOG.debug("Ceilometer grabbing meters")
meters = self.ceilometer_client.meters.list()
self._translate_meters(meters)
LOG.debug("METERS: %s" % str(self.state[self.METERS]))
LOG.debug("Ceilometer grabbing alarms")
alarms = self.ceilometer_client.alarms.list()
self._translate_alarms(alarms)
LOG.debug("ALARMS: %s" % str(self.state[self.ALARMS]))
LOG.debug("THRESHOLD: %s"
% str(self.state[self.ALARM_THRESHOLD_RULE]))
LOG.debug("Ceilometer grabbing events")
events = self.ceilometer_client.events.list()
self._translate_events(events)
LOG.debug("EVENTS: %s" % str(self.state[self.EVENTS]))
LOG.debug("TRAITS: %s" % str(self.state[self.EVENT_TRAITS]))
LOG.debug("Ceilometer grabbing statistics")
statistics = self._get_statistics(meters)
self._translate_statistics(statistics)
LOG.debug("STATISTICS: %s" % str(self.state[self.STATISTICS]))
def _get_statistics(self, meters):
statistics = []
names = set()
for m in meters:
LOG.debug("Adding meter %s" % m.name)
names.add(m.name)
for meter_name in names:
LOG.debug("Getting all Resource ID for meter: %s"
% meter_name)
stat_list = self.ceilometer_client.statistics.list(
meter_name, groupby=['resource_id'])
LOG.debug("Statistics List: %s" % stat_list)
if (stat_list):
for temp in stat_list:
temp_dict = copy.copy(temp.to_dict())
temp_dict['meter_name'] = meter_name
statistics.append(temp_dict)
return statistics
def get_ceilometer_credentials_v2(self, creds):
d = {}
d['version'] = '2'
d['username'] = creds['username']
d['password'] = creds['password']
d['auth_url'] = creds['auth_url']
d['tenant_name'] = creds['tenant_name']
return d
@ds_utils.update_state_on_changed(METERS)
def _translate_meters(self, obj):
"""Translate the meters represented by OBJ into tables."""
meters = [o.to_dict() for o in obj]
LOG.debug("METERS: %s" % str(meters))
row_data = CeilometerDriver.convert_objs(meters,
self.meters_translator)
return row_data
@ds_utils.update_state_on_changed(ALARMS)
def _translate_alarms(self, obj):
"""Translate the alarms represented by OBJ into tables."""
alarms = [o.to_dict() for o in obj]
LOG.debug("ALARMS: %s" % str(alarms))
row_data = CeilometerDriver.convert_objs(alarms,
self.alarms_translator)
return row_data
@ds_utils.update_state_on_changed(EVENTS)
def _translate_events(self, obj):
"""Translate the events represented by OBJ into tables."""
events = [o.to_dict() for o in obj]
LOG.debug("EVENTS: %s" % str(events))
row_data = CeilometerDriver.convert_objs(events,
self.events_translator)
return row_data
@ds_utils.update_state_on_changed(STATISTICS)
def _translate_statistics(self, obj):
"""Translate the statistics represented by OBJ into tables."""
LOG.debug("STATISTICS: %s" % str(obj))
row_data = CeilometerDriver.convert_objs(obj,
self.statistics_translator)
return row_data
def execute(self, action, action_args):
"""Overwrite ExecutionDriver.execute()."""
# action can be written as a method or an API call.
func = getattr(self, action, None)
if func and self.is_executable(func):
func(action_args)
else:
self._execute_api(self.ceilometer_client, action, action_args)
|
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import datetime
import pytest
import pytz
from cryptography import x509
from cryptography.hazmat.backends.interfaces import (
DSABackend, EllipticCurveBackend, RSABackend, X509Backend
)
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.x509.oid import AuthorityInformationAccessOID, NameOID
from .hazmat.primitives.fixtures_dsa import DSA_KEY_2048
from .hazmat.primitives.fixtures_rsa import RSA_KEY_2048, RSA_KEY_512
from .hazmat.primitives.test_ec import _skip_curve_unsupported
class TestCertificateRevocationListBuilder(object):
def test_issuer_name_invalid(self):
builder = x509.CertificateRevocationListBuilder()
with pytest.raises(TypeError):
builder.issuer_name("notanx509name")
def test_set_issuer_name_twice(self):
builder = x509.CertificateRevocationListBuilder().issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, u'US')])
)
with pytest.raises(ValueError):
builder.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, u'US')])
)
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
def test_aware_last_update(self, backend):
last_time = datetime.datetime(2012, 1, 16, 22, 43)
tz = pytz.timezone("US/Pacific")
last_time = tz.localize(last_time)
utc_last = datetime.datetime(2012, 1, 17, 6, 43)
next_time = datetime.datetime(2022, 1, 17, 6, 43)
private_key = RSA_KEY_2048.private_key(backend)
builder = x509.CertificateRevocationListBuilder().issuer_name(
x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"cryptography.io CA")
])
).last_update(last_time).next_update(next_time)
crl = builder.sign(private_key, hashes.SHA256(), backend)
assert crl.last_update == utc_last
def test_last_update_invalid(self):
builder = x509.CertificateRevocationListBuilder()
with pytest.raises(TypeError):
builder.last_update("notadatetime")
def test_last_update_before_unix_epoch(self):
builder = x509.CertificateRevocationListBuilder()
with pytest.raises(ValueError):
builder.last_update(datetime.datetime(1960, 8, 10))
def test_set_last_update_twice(self):
builder = x509.CertificateRevocationListBuilder().last_update(
datetime.datetime(2002, 1, 1, 12, 1)
)
with pytest.raises(ValueError):
builder.last_update(datetime.datetime(2002, 1, 1, 12, 1))
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
def test_aware_next_update(self, backend):
next_time = datetime.datetime(2022, 1, 16, 22, 43)
tz = pytz.timezone("US/Pacific")
next_time = tz.localize(next_time)
utc_next = datetime.datetime(2022, 1, 17, 6, 43)
last_time = datetime.datetime(2012, 1, 17, 6, 43)
private_key = RSA_KEY_2048.private_key(backend)
builder = x509.CertificateRevocationListBuilder().issuer_name(
x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"cryptography.io CA")
])
).last_update(last_time).next_update(next_time)
crl = builder.sign(private_key, hashes.SHA256(), backend)
assert crl.next_update == utc_next
def test_next_update_invalid(self):
builder = x509.CertificateRevocationListBuilder()
with pytest.raises(TypeError):
builder.next_update("notadatetime")
def test_next_update_before_unix_epoch(self):
builder = x509.CertificateRevocationListBuilder()
with pytest.raises(ValueError):
builder.next_update(datetime.datetime(1960, 8, 10))
def test_set_next_update_twice(self):
builder = x509.CertificateRevocationListBuilder().next_update(
datetime.datetime(2002, 1, 1, 12, 1)
)
with pytest.raises(ValueError):
builder.next_update(datetime.datetime(2002, 1, 1, 12, 1))
def test_last_update_after_next_update(self):
builder = x509.CertificateRevocationListBuilder()
builder = builder.next_update(
datetime.datetime(2002, 1, 1, 12, 1)
)
with pytest.raises(ValueError):
builder.last_update(datetime.datetime(2003, 1, 1, 12, 1))
def test_next_update_after_last_update(self):
builder = x509.CertificateRevocationListBuilder()
builder = builder.last_update(
datetime.datetime(2002, 1, 1, 12, 1)
)
with pytest.raises(ValueError):
builder.next_update(datetime.datetime(2001, 1, 1, 12, 1))
def test_add_extension_checks_for_duplicates(self):
builder = x509.CertificateRevocationListBuilder().add_extension(
x509.CRLNumber(1), False
)
with pytest.raises(ValueError):
builder.add_extension(x509.CRLNumber(2), False)
def test_add_invalid_extension(self):
builder = x509.CertificateRevocationListBuilder()
with pytest.raises(TypeError):
builder.add_extension(
object(), False
)
def test_add_invalid_revoked_certificate(self):
builder = x509.CertificateRevocationListBuilder()
with pytest.raises(TypeError):
builder.add_revoked_certificate(object())
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
def test_no_issuer_name(self, backend):
private_key = RSA_KEY_2048.private_key(backend)
builder = x509.CertificateRevocationListBuilder().last_update(
datetime.datetime(2002, 1, 1, 12, 1)
).next_update(
datetime.datetime(2030, 1, 1, 12, 1)
)
with pytest.raises(ValueError):
builder.sign(private_key, hashes.SHA256(), backend)
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
def test_no_last_update(self, backend):
private_key = RSA_KEY_2048.private_key(backend)
builder = x509.CertificateRevocationListBuilder().issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, u'US')])
).next_update(
datetime.datetime(2030, 1, 1, 12, 1)
)
with pytest.raises(ValueError):
builder.sign(private_key, hashes.SHA256(), backend)
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
def test_no_next_update(self, backend):
private_key = RSA_KEY_2048.private_key(backend)
builder = x509.CertificateRevocationListBuilder().issuer_name(
x509.Name([x509.NameAttribute(NameOID.COUNTRY_NAME, u'US')])
).last_update(
datetime.datetime(2030, 1, 1, 12, 1)
)
with pytest.raises(ValueError):
builder.sign(private_key, hashes.SHA256(), backend)
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
def test_sign_empty_list(self, backend):
private_key = RSA_KEY_2048.private_key(backend)
last_update = datetime.datetime(2002, 1, 1, 12, 1)
next_update = datetime.datetime(2030, 1, 1, 12, 1)
builder = x509.CertificateRevocationListBuilder().issuer_name(
x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"cryptography.io CA")
])
).last_update(last_update).next_update(next_update)
crl = builder.sign(private_key, hashes.SHA256(), backend)
assert len(crl) == 0
assert crl.last_update == last_update
assert crl.next_update == next_update
@pytest.mark.parametrize(
"extension",
[
x509.CRLNumber(13),
x509.AuthorityKeyIdentifier(
b"\xc3\x9c\xf3\xfc\xd3F\x084\xbb\xceF\x7f\xa0|[\xf3\xe2\x08"
b"\xcbY",
None,
None
),
x509.AuthorityInformationAccess([
x509.AccessDescription(
AuthorityInformationAccessOID.CA_ISSUERS,
x509.DNSName(u"cryptography.io")
)
]),
x509.IssuerAlternativeName([
x509.UniformResourceIdentifier(u"https://cryptography.io"),
])
]
)
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
def test_sign_extensions(self, backend, extension):
private_key = RSA_KEY_2048.private_key(backend)
last_update = datetime.datetime(2002, 1, 1, 12, 1)
next_update = datetime.datetime(2030, 1, 1, 12, 1)
builder = x509.CertificateRevocationListBuilder().issuer_name(
x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"cryptography.io CA")
])
).last_update(
last_update
).next_update(
next_update
).add_extension(
extension, False
)
crl = builder.sign(private_key, hashes.SHA256(), backend)
assert len(crl) == 0
assert len(crl.extensions) == 1
ext = crl.extensions.get_extension_for_class(type(extension))
assert ext.critical is False
assert ext.value == extension
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
def test_sign_multiple_extensions_critical(self, backend):
private_key = RSA_KEY_2048.private_key(backend)
last_update = datetime.datetime(2002, 1, 1, 12, 1)
next_update = datetime.datetime(2030, 1, 1, 12, 1)
ian = x509.IssuerAlternativeName([
x509.UniformResourceIdentifier(u"https://cryptography.io"),
])
crl_number = x509.CRLNumber(13)
builder = x509.CertificateRevocationListBuilder().issuer_name(
x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"cryptography.io CA")
])
).last_update(
last_update
).next_update(
next_update
).add_extension(
crl_number, False
).add_extension(
ian, True
)
crl = builder.sign(private_key, hashes.SHA256(), backend)
assert len(crl) == 0
assert len(crl.extensions) == 2
ext1 = crl.extensions.get_extension_for_class(x509.CRLNumber)
assert ext1.critical is False
assert ext1.value == crl_number
ext2 = crl.extensions.get_extension_for_class(
x509.IssuerAlternativeName
)
assert ext2.critical is True
assert ext2.value == ian
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
def test_add_unsupported_extension(self, backend):
private_key = RSA_KEY_2048.private_key(backend)
last_update = datetime.datetime(2002, 1, 1, 12, 1)
next_update = datetime.datetime(2030, 1, 1, 12, 1)
builder = x509.CertificateRevocationListBuilder().issuer_name(
x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"cryptography.io CA")
])
).last_update(
last_update
).next_update(
next_update
).add_extension(
x509.OCSPNoCheck(), False
)
with pytest.raises(NotImplementedError):
builder.sign(private_key, hashes.SHA256(), backend)
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
def test_sign_rsa_key_too_small(self, backend):
private_key = RSA_KEY_512.private_key(backend)
last_update = datetime.datetime(2002, 1, 1, 12, 1)
next_update = datetime.datetime(2030, 1, 1, 12, 1)
builder = x509.CertificateRevocationListBuilder().issuer_name(
x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"cryptography.io CA")
])
).last_update(
last_update
).next_update(
next_update
)
with pytest.raises(ValueError):
builder.sign(private_key, hashes.SHA512(), backend)
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
def test_sign_with_invalid_hash(self, backend):
private_key = RSA_KEY_2048.private_key(backend)
last_update = datetime.datetime(2002, 1, 1, 12, 1)
next_update = datetime.datetime(2030, 1, 1, 12, 1)
builder = x509.CertificateRevocationListBuilder().issuer_name(
x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"cryptography.io CA")
])
).last_update(
last_update
).next_update(
next_update
)
with pytest.raises(TypeError):
builder.sign(private_key, object(), backend)
@pytest.mark.requires_backend_interface(interface=DSABackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
def test_sign_dsa_key(self, backend):
if backend._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_101:
pytest.skip("Requires a newer OpenSSL. Must be >= 1.0.1")
private_key = DSA_KEY_2048.private_key(backend)
invalidity_date = x509.InvalidityDate(
datetime.datetime(2002, 1, 1, 0, 0)
)
ian = x509.IssuerAlternativeName([
x509.UniformResourceIdentifier(u"https://cryptography.io"),
])
revoked_cert0 = x509.RevokedCertificateBuilder().serial_number(
2
).revocation_date(
datetime.datetime(2012, 1, 1, 1, 1)
).add_extension(
invalidity_date, False
).build(backend)
last_update = datetime.datetime(2002, 1, 1, 12, 1)
next_update = datetime.datetime(2030, 1, 1, 12, 1)
builder = x509.CertificateRevocationListBuilder().issuer_name(
x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"cryptography.io CA")
])
).last_update(
last_update
).next_update(
next_update
).add_revoked_certificate(
revoked_cert0
).add_extension(
ian, False
)
crl = builder.sign(private_key, hashes.SHA256(), backend)
assert crl.extensions.get_extension_for_class(
x509.IssuerAlternativeName
).value == ian
assert crl[0].serial_number == revoked_cert0.serial_number
assert crl[0].revocation_date == revoked_cert0.revocation_date
assert len(crl[0].extensions) == 1
ext = crl[0].extensions.get_extension_for_class(x509.InvalidityDate)
assert ext.critical is False
assert ext.value == invalidity_date
@pytest.mark.requires_backend_interface(interface=EllipticCurveBackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
def test_sign_ec_key_unsupported(self, backend):
if backend._lib.CRYPTOGRAPHY_OPENSSL_LESS_THAN_101:
pytest.skip("Requires a newer OpenSSL. Must be >= 1.0.1")
_skip_curve_unsupported(backend, ec.SECP256R1())
private_key = ec.generate_private_key(ec.SECP256R1(), backend)
invalidity_date = x509.InvalidityDate(
datetime.datetime(2002, 1, 1, 0, 0)
)
ian = x509.IssuerAlternativeName([
x509.UniformResourceIdentifier(u"https://cryptography.io"),
])
revoked_cert0 = x509.RevokedCertificateBuilder().serial_number(
2
).revocation_date(
datetime.datetime(2012, 1, 1, 1, 1)
).add_extension(
invalidity_date, False
).build(backend)
last_update = datetime.datetime(2002, 1, 1, 12, 1)
next_update = datetime.datetime(2030, 1, 1, 12, 1)
builder = x509.CertificateRevocationListBuilder().issuer_name(
x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"cryptography.io CA")
])
).last_update(
last_update
).next_update(
next_update
).add_revoked_certificate(
revoked_cert0
).add_extension(
ian, False
)
crl = builder.sign(private_key, hashes.SHA256(), backend)
assert crl.extensions.get_extension_for_class(
x509.IssuerAlternativeName
).value == ian
assert crl[0].serial_number == revoked_cert0.serial_number
assert crl[0].revocation_date == revoked_cert0.revocation_date
assert len(crl[0].extensions) == 1
ext = crl[0].extensions.get_extension_for_class(x509.InvalidityDate)
assert ext.critical is False
assert ext.value == invalidity_date
@pytest.mark.requires_backend_interface(interface=RSABackend)
@pytest.mark.requires_backend_interface(interface=X509Backend)
def test_sign_with_revoked_certificates(self, backend):
private_key = RSA_KEY_2048.private_key(backend)
last_update = datetime.datetime(2002, 1, 1, 12, 1)
next_update = datetime.datetime(2030, 1, 1, 12, 1)
invalidity_date = x509.InvalidityDate(
datetime.datetime(2002, 1, 1, 0, 0)
)
revoked_cert0 = x509.RevokedCertificateBuilder().serial_number(
38
).revocation_date(
datetime.datetime(2011, 1, 1, 1, 1)
).build(backend)
revoked_cert1 = x509.RevokedCertificateBuilder().serial_number(
2
).revocation_date(
datetime.datetime(2012, 1, 1, 1, 1)
).add_extension(
invalidity_date, False
).build(backend)
builder = x509.CertificateRevocationListBuilder().issuer_name(
x509.Name([
x509.NameAttribute(NameOID.COMMON_NAME, u"cryptography.io CA")
])
).last_update(
last_update
).next_update(
next_update
).add_revoked_certificate(
revoked_cert0
).add_revoked_certificate(
revoked_cert1
)
crl = builder.sign(private_key, hashes.SHA256(), backend)
assert len(crl) == 2
assert crl.last_update == last_update
assert crl.next_update == next_update
assert crl[0].serial_number == revoked_cert0.serial_number
assert crl[0].revocation_date == revoked_cert0.revocation_date
assert len(crl[0].extensions) == 0
assert crl[1].serial_number == revoked_cert1.serial_number
assert crl[1].revocation_date == revoked_cert1.revocation_date
assert len(crl[1].extensions) == 1
ext = crl[1].extensions.get_extension_for_class(x509.InvalidityDate)
assert ext.critical is False
assert ext.value == invalidity_date
|
|
import json, sys, re, hashlib, smtplib, base64, urllib, os, difflib
from auth import *
from django.http import *
from django.shortcuts import render_to_response
from django.views.decorators.csrf import csrf_exempt
from django.core.context_processors import csrf
from django.db.utils import IntegrityError
from utils import *
from models import *
p = os.path.abspath(os.path.dirname(__file__))
if(os.path.abspath(p+"/..") not in sys.path):
sys.path.append(os.path.abspath(p+"/.."))
'''
@author: Anant Bhardwaj
@date: Feb 12, 2012
'''
def home (request):
try:
conferences = Conference.objects.all().values()
login = get_login(request)
return render_to_response('home.html', {
'conferences':conferences,
'login_id': login[0],
'login_name': login[1]
}
)
except:
pass
def team (request):
current_team = json.loads(
open(p+'/fixtures/' + 'team.json').read())
past_collaborators = json.loads(
open(p+'/fixtures/' + 'collaborators.json').read())
login = get_login(request)
return render_to_response(
'team.html', {'current_team': current_team,
'past_collaborators': past_collaborators,
'login_id': login[0],
'login_name': login[1]
}
)
def conf (request, conf):
conf = conf.lower()
try:
request.session[kConf] = conf
Conference.objects.get(unique_name=conf)
return HttpResponseRedirect('/%s/papers' %(conf))
except Conference.DoesNotExist:
try:
c = Conference.objects.get(confer_name=conf)
request.session[kConf] = c.unique_name
return HttpResponseRedirect('/%s/papers' %(c.unique_name))
except:
return HttpResponseRedirect('/')
except:
return HttpResponseRedirect('/')
def papers (request, conf):
conf = conf.lower()
try:
Conference.objects.get(unique_name=conf)
request.session[kConf] = conf
login = get_login(request)
return render_to_response('papers.html', {
'conf':conf,
'login_id': login[0],
'login_name': login[1]
}
)
except:
return HttpResponseRedirect('/')
def schedule (request, conf):
conf = conf.lower()
try:
Conference.objects.get(unique_name=conf)
request.session[kConf] = conf
login = get_login(request)
return render_to_response('schedule.html', {
'conf':conf,
'login_id': login[0],
'login_name': login[1]
}
)
except:
return HttpResponseRedirect('/')
def paper (request, conf):
conf = conf.lower()
try:
request.session[kConf] = conf
login = get_login(request)
return render_to_response('paper.html', {
'conf':conf,
'login_id': login[0],
'login_name': login[1]
}
)
except:
return HttpResponseRedirect('/')
@login_required
def meetups (request, conf):
conf = conf.lower()
try:
similar_people = []
request.session[kConf] = conf
login = get_login(request)
user = User.objects.get(email=login[0])
meetups_enabled = user.meetups_enabled
if meetups_enabled:
similar_people = get_similar_people(login[0], conf)
return render_to_response('meetups.html', {
'conf':conf,
'similar_people': similar_people,
'meetups_enabled': meetups_enabled,
'login_id': login[0],
'login_name': login[1]
}
)
except Exception, e:
print e
return HttpResponseRedirect('/')
'''
AJAX Calls
'''
@csrf_exempt
def data (request):
recs = []
likes = []
error = False
msg = 'OK'
login = None
login_name = None
try:
login = request.session[kLogIn]
login_name = request.session[kName]
conf = request.session[kConf]
registration = get_registration(login, conf)
data = None
try:
data = Likes.objects.get(registration = registration)
except:
pass
if not data or not data.likes:
default_likes = []
try:
prefs = json.loads(
open(p+'/../data/%s/prefs.json' %(conf)).read())
name = request.session[kFName] + ' ' + request.session[kLName]
name = name.lower()
if name in prefs:
default_likes = prefs[name]
else:
matches = difflib.get_close_matches(name, prefs.keys())
if len(matches) > 0:
default_likes = prefs[matches[0]]
except Exception, e:
pass
data = Likes(registration = registration, likes=json.dumps(default_likes))
data.save()
likes.extend(json.loads(data.likes))
except Exception, e:
error = True
msg = str(e)
return HttpResponse(json.dumps({
'login_id': login,
'login_name': login_name,
'recs':recs,
'likes':likes,
'error': error,
'msg':msg}), mimetype="application/json")
@csrf_exempt
@login_required
def log (request, action):
try:
login = request.session[kLogIn]
conf = request.session[kConf]
registration = get_registration(login, conf)
insert_log(registration, action)
return HttpResponse(
json.dumps({'error':False}), mimetype="application/json")
except:
return HttpResponse(
json.dumps({'error':True}), mimetype="application/json")
@csrf_exempt
@login_required
def like (request, like_str):
login = request.session[kLogIn]
likes = []
res = {}
error = False
msg = "OK"
try:
papers = json.loads(request.POST["papers"])
conf = request.session[kConf]
registration = get_registration(login, conf)
data = None
insert_log(registration, like_str, papers)
try:
data = Likes.objects.get(registration = registration)
likes.extend(json.loads(data.likes))
except:
data = Likes(registration = registration, likes = json.dumps([]))
data.save()
for paper_id in papers:
if(like_str=='star' and (paper_id not in likes) and paper_id != ''):
likes.append(paper_id)
if(like_str=='unstar' and (paper_id in likes) and paper_id != ''):
likes.remove(paper_id)
l = list(set(likes))
data.likes = json.dumps(l)
data.save()
recs = []
except Exception, e:
error = True
msg = str(e)
return HttpResponse(
json.dumps({
'recs':recs,
'likes':l,
'error':error,
'msg':msg
}
),
mimetype="application/json"
)
|
|
"""Support for interface with an LG webOS Smart TV."""
import asyncio
from contextlib import suppress
from datetime import timedelta
from functools import wraps
import logging
from aiopylgtv import PyLGTVCmdException, PyLGTVPairException, WebOsClient
from websockets.exceptions import ConnectionClosed
from homeassistant import util
from homeassistant.components.media_player import DEVICE_CLASS_TV, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_CHANNEL,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.components.webostv.const import (
ATTR_PAYLOAD,
ATTR_SOUND_OUTPUT,
CONF_ON_ACTION,
CONF_SOURCES,
DOMAIN,
LIVE_TV_APP_ID,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_CUSTOMIZE,
CONF_HOST,
CONF_NAME,
ENTITY_MATCH_ALL,
ENTITY_MATCH_NONE,
STATE_OFF,
STATE_ON,
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.script import Script
_LOGGER = logging.getLogger(__name__)
SUPPORT_WEBOSTV = (
SUPPORT_TURN_OFF
| SUPPORT_NEXT_TRACK
| SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_SELECT_SOURCE
| SUPPORT_PLAY_MEDIA
| SUPPORT_PLAY
)
SUPPORT_WEBOSTV_VOLUME = SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_STEP
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(seconds=1)
SCAN_INTERVAL = timedelta(seconds=10)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the LG webOS Smart TV platform."""
if discovery_info is None:
return
host = discovery_info[CONF_HOST]
name = discovery_info[CONF_NAME]
customize = discovery_info[CONF_CUSTOMIZE]
turn_on_action = discovery_info.get(CONF_ON_ACTION)
client = hass.data[DOMAIN][host]["client"]
on_script = Script(hass, turn_on_action, name, DOMAIN) if turn_on_action else None
entity = LgWebOSMediaPlayerEntity(client, name, customize, on_script)
async_add_entities([entity], update_before_add=False)
def cmd(func):
"""Catch command exceptions."""
@wraps(func)
async def wrapper(obj, *args, **kwargs):
"""Wrap all command methods."""
try:
await func(obj, *args, **kwargs)
except (
asyncio.TimeoutError,
asyncio.CancelledError,
PyLGTVCmdException,
) as exc:
# If TV is off, we expect calls to fail.
if obj.state == STATE_OFF:
level = logging.INFO
else:
level = logging.ERROR
_LOGGER.log(
level,
"Error calling %s on entity %s: %r",
func.__name__,
obj.entity_id,
exc,
)
return wrapper
class LgWebOSMediaPlayerEntity(MediaPlayerEntity):
"""Representation of a LG webOS Smart TV."""
def __init__(self, client: WebOsClient, name: str, customize, on_script=None):
"""Initialize the webos device."""
self._client = client
self._name = name
self._unique_id = client.client_key
self._customize = customize
self._on_script = on_script
# Assume that the TV is not paused
self._paused = False
self._current_source = None
self._source_list: dict = {}
async def async_added_to_hass(self):
"""Connect and subscribe to dispatcher signals and state updates."""
async_dispatcher_connect(self.hass, DOMAIN, self.async_signal_handler)
await self._client.register_state_update_callback(
self.async_handle_state_update
)
async def async_will_remove_from_hass(self):
"""Call disconnect on removal."""
self._client.unregister_state_update_callback(self.async_handle_state_update)
async def async_signal_handler(self, data):
"""Handle domain-specific signal by calling appropriate method."""
entity_ids = data[ATTR_ENTITY_ID]
if entity_ids == ENTITY_MATCH_NONE:
return
if entity_ids == ENTITY_MATCH_ALL or self.entity_id in entity_ids:
params = {
key: value
for key, value in data.items()
if key not in ["entity_id", "method"]
}
await getattr(self, data["method"])(**params)
async def async_handle_state_update(self):
"""Update state from WebOsClient."""
self.update_sources()
self.async_write_ha_state()
def update_sources(self):
"""Update list of sources from current source, apps, inputs and configured list."""
source_list = self._source_list
self._source_list = {}
conf_sources = self._customize[CONF_SOURCES]
found_live_tv = False
for app in self._client.apps.values():
if app["id"] == LIVE_TV_APP_ID:
found_live_tv = True
if app["id"] == self._client.current_appId:
self._current_source = app["title"]
self._source_list[app["title"]] = app
elif (
not conf_sources
or app["id"] in conf_sources
or any(word in app["title"] for word in conf_sources)
or any(word in app["id"] for word in conf_sources)
):
self._source_list[app["title"]] = app
for source in self._client.inputs.values():
if source["appId"] == LIVE_TV_APP_ID:
found_live_tv = True
if source["appId"] == self._client.current_appId:
self._current_source = source["label"]
self._source_list[source["label"]] = source
elif (
not conf_sources
or source["label"] in conf_sources
or any(source["label"].find(word) != -1 for word in conf_sources)
):
self._source_list[source["label"]] = source
# special handling of live tv since this might not appear in the app or input lists in some cases
if not found_live_tv:
app = {"id": LIVE_TV_APP_ID, "title": "Live TV"}
if LIVE_TV_APP_ID == self._client.current_appId:
self._current_source = app["title"]
self._source_list["Live TV"] = app
elif (
not conf_sources
or app["id"] in conf_sources
or any(word in app["title"] for word in conf_sources)
or any(word in app["id"] for word in conf_sources)
):
self._source_list["Live TV"] = app
if not self._source_list and source_list:
self._source_list = source_list
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
async def async_update(self):
"""Connect."""
if not self._client.is_connected():
with suppress(
OSError,
ConnectionClosed,
ConnectionRefusedError,
asyncio.TimeoutError,
asyncio.CancelledError,
PyLGTVPairException,
PyLGTVCmdException,
):
await self._client.connect()
@property
def unique_id(self):
"""Return the unique id of the device."""
return self._unique_id
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_class(self):
"""Return the device class of the device."""
return DEVICE_CLASS_TV
@property
def state(self):
"""Return the state of the device."""
if self._client.is_on:
return STATE_ON
return STATE_OFF
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._client.muted
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if self._client.volume is not None:
return self._client.volume / 100.0
return None
@property
def source(self):
"""Return the current input source."""
return self._current_source
@property
def source_list(self):
"""List of available input sources."""
return sorted(self._source_list)
@property
def media_content_type(self):
"""Content type of current playing media."""
if self._client.current_appId == LIVE_TV_APP_ID:
return MEDIA_TYPE_CHANNEL
return None
@property
def media_title(self):
"""Title of current playing media."""
if (self._client.current_appId == LIVE_TV_APP_ID) and (
self._client.current_channel is not None
):
return self._client.current_channel.get("channelName")
return None
@property
def media_image_url(self):
"""Image url of current playing media."""
if self._client.current_appId in self._client.apps:
icon = self._client.apps[self._client.current_appId]["largeIcon"]
if not icon.startswith("http"):
icon = self._client.apps[self._client.current_appId]["icon"]
return icon
return None
@property
def supported_features(self):
"""Flag media player features that are supported."""
supported = SUPPORT_WEBOSTV
if (self._client.sound_output == "external_arc") or (
self._client.sound_output == "external_speaker"
):
supported = supported | SUPPORT_WEBOSTV_VOLUME
elif self._client.sound_output != "lineout":
supported = supported | SUPPORT_WEBOSTV_VOLUME | SUPPORT_VOLUME_SET
if self._on_script:
supported = supported | SUPPORT_TURN_ON
return supported
@property
def extra_state_attributes(self):
"""Return device specific state attributes."""
if self._client.sound_output is None and self.state == STATE_OFF:
return {}
return {ATTR_SOUND_OUTPUT: self._client.sound_output}
@cmd
async def async_turn_off(self):
"""Turn off media player."""
await self._client.power_off()
async def async_turn_on(self):
"""Turn on the media player."""
if self._on_script:
await self._on_script.async_run(context=self._context)
@cmd
async def async_volume_up(self):
"""Volume up the media player."""
await self._client.volume_up()
@cmd
async def async_volume_down(self):
"""Volume down media player."""
await self._client.volume_down()
@cmd
async def async_set_volume_level(self, volume):
"""Set volume level, range 0..1."""
tv_volume = int(round(volume * 100))
await self._client.set_volume(tv_volume)
@cmd
async def async_mute_volume(self, mute):
"""Send mute command."""
await self._client.set_mute(mute)
@cmd
async def async_select_sound_output(self, sound_output):
"""Select the sound output."""
await self._client.change_sound_output(sound_output)
@cmd
async def async_media_play_pause(self):
"""Simulate play pause media player."""
if self._paused:
await self.async_media_play()
else:
await self.async_media_pause()
@cmd
async def async_select_source(self, source):
"""Select input source."""
source_dict = self._source_list.get(source)
if source_dict is None:
_LOGGER.warning("Source %s not found for %s", source, self.name)
return
if source_dict.get("title"):
await self._client.launch_app(source_dict["id"])
elif source_dict.get("label"):
await self._client.set_input(source_dict["id"])
@cmd
async def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
_LOGGER.debug("Call play media type <%s>, Id <%s>", media_type, media_id)
if media_type == MEDIA_TYPE_CHANNEL:
_LOGGER.debug("Searching channel")
partial_match_channel_id = None
perfect_match_channel_id = None
for channel in self._client.channels:
if media_id == channel["channelNumber"]:
perfect_match_channel_id = channel["channelId"]
continue
if media_id.lower() == channel["channelName"].lower():
perfect_match_channel_id = channel["channelId"]
continue
if media_id.lower() in channel["channelName"].lower():
partial_match_channel_id = channel["channelId"]
if perfect_match_channel_id is not None:
_LOGGER.info(
"Switching to channel <%s> with perfect match",
perfect_match_channel_id,
)
await self._client.set_channel(perfect_match_channel_id)
elif partial_match_channel_id is not None:
_LOGGER.info(
"Switching to channel <%s> with partial match",
partial_match_channel_id,
)
await self._client.set_channel(partial_match_channel_id)
@cmd
async def async_media_play(self):
"""Send play command."""
self._paused = False
await self._client.play()
@cmd
async def async_media_pause(self):
"""Send media pause command to media player."""
self._paused = True
await self._client.pause()
@cmd
async def async_media_stop(self):
"""Send stop command to media player."""
await self._client.stop()
@cmd
async def async_media_next_track(self):
"""Send next track command."""
current_input = self._client.get_input()
if current_input == LIVE_TV_APP_ID:
await self._client.channel_up()
else:
await self._client.fast_forward()
@cmd
async def async_media_previous_track(self):
"""Send the previous track command."""
current_input = self._client.get_input()
if current_input == LIVE_TV_APP_ID:
await self._client.channel_down()
else:
await self._client.rewind()
@cmd
async def async_button(self, button):
"""Send a button press."""
await self._client.button(button)
@cmd
async def async_command(self, command, **kwargs):
"""Send a command."""
await self._client.request(command, payload=kwargs.get(ATTR_PAYLOAD))
|
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_fortiguard_filter
short_description: Filters for FortiCloud in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify log_fortiguard feature and filter category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
log_fortiguard_filter:
description:
- Filters for FortiCloud.
default: null
type: dict
suboptions:
anomaly:
description:
- Enable/disable anomaly logging.
type: str
choices:
- enable
- disable
dlp_archive:
description:
- Enable/disable DLP archive logging.
type: str
choices:
- enable
- disable
dns:
description:
- Enable/disable detailed DNS event logging.
type: str
choices:
- enable
- disable
filter:
description:
- FortiCloud log filter.
type: str
filter_type:
description:
- Include/exclude logs that match the filter.
type: str
choices:
- include
- exclude
forward_traffic:
description:
- Enable/disable forward traffic logging.
type: str
choices:
- enable
- disable
gtp:
description:
- Enable/disable GTP messages logging.
type: str
choices:
- enable
- disable
local_traffic:
description:
- Enable/disable local in or out traffic logging.
type: str
choices:
- enable
- disable
multicast_traffic:
description:
- Enable/disable multicast traffic logging.
type: str
choices:
- enable
- disable
netscan_discovery:
description:
- Enable/disable netscan discovery event logging.
type: str
netscan_vulnerability:
description:
- Enable/disable netscan vulnerability event logging.
type: str
severity:
description:
- Lowest severity level to log.
type: str
choices:
- emergency
- alert
- critical
- error
- warning
- notification
- information
- debug
sniffer_traffic:
description:
- Enable/disable sniffer traffic logging.
type: str
choices:
- enable
- disable
ssh:
description:
- Enable/disable SSH logging.
type: str
choices:
- enable
- disable
voip:
description:
- Enable/disable VoIP logging.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Filters for FortiCloud.
fortios_log_fortiguard_filter:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
log_fortiguard_filter:
anomaly: "enable"
dlp_archive: "enable"
dns: "enable"
filter: "<your_own_value>"
filter_type: "include"
forward_traffic: "enable"
gtp: "enable"
local_traffic: "enable"
multicast_traffic: "enable"
netscan_discovery: "<your_own_value>"
netscan_vulnerability: "<your_own_value>"
severity: "emergency"
sniffer_traffic: "enable"
ssh: "enable"
voip: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_log_fortiguard_filter_data(json):
option_list = ['anomaly', 'dlp_archive', 'dns',
'filter', 'filter_type', 'forward_traffic',
'gtp', 'local_traffic', 'multicast_traffic',
'netscan_discovery', 'netscan_vulnerability', 'severity',
'sniffer_traffic', 'ssh', 'voip']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def log_fortiguard_filter(data, fos):
vdom = data['vdom']
log_fortiguard_filter_data = data['log_fortiguard_filter']
filtered_data = underscore_to_hyphen(filter_log_fortiguard_filter_data(log_fortiguard_filter_data))
return fos.set('log.fortiguard',
'filter',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_log_fortiguard(data, fos):
if data['log_fortiguard_filter']:
resp = log_fortiguard_filter(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"log_fortiguard_filter": {
"required": False, "type": "dict", "default": None,
"options": {
"anomaly": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dlp_archive": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dns": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"filter": {"required": False, "type": "str"},
"filter_type": {"required": False, "type": "str",
"choices": ["include", "exclude"]},
"forward_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"gtp": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"multicast_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"netscan_discovery": {"required": False, "type": "str"},
"netscan_vulnerability": {"required": False, "type": "str"},
"severity": {"required": False, "type": "str",
"choices": ["emergency", "alert", "critical",
"error", "warning", "notification",
"information", "debug"]},
"sniffer_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssh": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"voip": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_log_fortiguard(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_log_fortiguard(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
|
import json
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.sites.models import Site
from django.test.utils import override_settings
import mock
from nose.tools import eq_
from pyquery import PyQuery as pq
from kitsune.flagit.models import FlaggedObject
from kitsune.products.tests import ProductFactory
from kitsune.questions.models import (
Question, QuestionVote, AnswerVote, Answer, QuestionLocale)
from kitsune.questions.tests import (
AnswerFactory, QuestionFactory, TestCaseBase, QuestionLocaleFactory)
from kitsune.questions.views import parse_troubleshooting
from kitsune.search.tests.test_es import ElasticTestCase
from kitsune.sumo.templatetags.jinja_helpers import urlparams
from kitsune.sumo.tests import (
get, MobileTestCase, LocalizingClient, eq_msg, set_waffle_flag, template_used)
from kitsune.sumo.urlresolvers import reverse
from kitsune.products.tests import TopicFactory
from kitsune.users.models import Profile
from kitsune.users.tests import UserFactory, add_permission
from kitsune.wiki.tests import DocumentFactory, RevisionFactory
class AAQTests(ElasticTestCase):
client_class = LocalizingClient
def test_bleaching(self):
"""Tests whether summaries are bleached"""
p = ProductFactory(slug=u'firefox')
l = QuestionLocale.objects.get(locale=settings.LANGUAGE_CODE)
p.questions_locales.add(l)
TopicFactory(title='Fix problems', slug='fix-problems', product=p)
QuestionFactory(
product=p,
title=u'CupcakesQuestion cupcakes',
content=u'cupcakes are best with <unbleached>flour</unbleached>')
self.refresh()
url = urlparams(
reverse('questions.aaq_step4', args=['desktop', 'fix-problems']),
search='cupcakes')
response = self.client.get(url, follow=True)
eq_(200, response.status_code)
assert 'CupcakesQuestion' in response.content
assert '<unbleached>' not in response.content
assert 'cupcakes are best with' in response.content
# TODO: test whether when _search_suggetions fails with a handled
# error that the user can still ask a question.
def test_search_suggestions_questions(self):
"""Verifies the view doesn't kick up an HTTP 500"""
p = ProductFactory(slug=u'firefox')
l = QuestionLocale.objects.get(locale=settings.LANGUAGE_CODE)
p.questions_locales.add(l)
TopicFactory(title='Fix problems', slug='fix-problems', product=p)
q = QuestionFactory(product=p, title=u'CupcakesQuestion cupcakes')
d = DocumentFactory(title=u'CupcakesKB cupcakes', category=10)
d.products.add(p)
RevisionFactory(document=d, is_approved=True)
self.refresh()
url = urlparams(
reverse('questions.aaq_step4', args=['desktop', 'fix-problems']),
search='cupcakes')
response = self.client.get(url, follow=True)
eq_(200, response.status_code)
assert 'CupcakesQuestion' in response.content
assert 'CupcakesKB' in response.content
# Verify that archived articles and questions aren't shown...
# Archive both and they shouldn't appear anymore.
q.is_archived = True
q.save()
d.is_archived = True
d.save()
self.refresh()
response = self.client.get(url, follow=True)
eq_(200, response.status_code)
assert 'CupcakesQuestion' not in response.content
assert 'CupcakesKB' not in response.content
def test_search_suggestion_questions_locale(self):
"""Verifies the right languages show up in search suggestions."""
QuestionLocaleFactory(locale='de')
p = ProductFactory(slug=u'firefox')
for l in QuestionLocale.objects.all():
p.questions_locales.add(l)
TopicFactory(title='Fix problems', slug='fix-problems', product=p)
QuestionFactory(title='question cupcakes?', product=p, locale='en-US')
QuestionFactory(title='question donuts?', product=p, locale='en-US')
QuestionFactory(title='question pies?', product=p, locale='pt-BR')
QuestionFactory(title='question pastries?', product=p, locale='de')
self.refresh()
def sub_test(locale, *titles):
url = urlparams(reverse('questions.aaq_step4',
args=['desktop', 'fix-problems'],
locale=locale),
search='question')
response = self.client.get(url, follow=True)
doc = pq(response.content)
eq_msg(len(doc('.result.question')), len(titles),
'Wrong number of results for {0}'.format(locale))
for substr in titles:
assert substr in doc('.result.question h3 a').text()
sub_test('en-US', 'cupcakes?', 'donuts?')
sub_test('pt-BR', 'cupcakes?', 'donuts?', 'pies?')
sub_test('de', 'cupcakes?', 'donuts?', 'pastries?')
def test_ratelimit(self):
"""Make sure posting new questions is ratelimited"""
data = {'title': 'A test question',
'content': 'I have this question that I hope...',
'sites_affected': 'http://example.com',
'ff_version': '3.6.6',
'os': 'Intel Mac OS X 10.6',
'plugins': '* Shockwave Flash 10.1 r53',
'useragent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X '
'10.6; en-US; rv:1.9.2.6) Gecko/20100625 '
'Firefox/3.6.6'}
p = ProductFactory(slug='firefox')
l = QuestionLocale.objects.get(locale=settings.LANGUAGE_CODE)
p.questions_locales.add(l)
TopicFactory(slug='fix-problems', product=p)
url = urlparams(
reverse('questions.aaq_step5', args=['desktop', 'fix-problems']),
search='A test question')
u = UserFactory()
self.client.login(username=u.username, password='testpass')
for i in range(0, 5):
self.client.post(url, data, follow=True)
response = self.client.post(url, data, follow=True)
eq_(403, response.status_code)
def test_first_step(self):
"""Make sure the first step doesn't blow up
Oddly, none of the other tests cover this simple case.
"""
url = reverse('questions.aaq_step1')
res = self.client.get(url)
eq_(200, res.status_code)
def test_redirect_bad_locales(self):
"""Non-AAQ locales should redirect."""
url_fr = reverse('questions.aaq_step1', locale='fr')
url_en = reverse('questions.aaq_step1', locale='en-US')
res = self.client.get(url_fr)
eq_(302, res.status_code)
# This has some http://... stuff at the beginning. Ignore that.
assert res['location'].endswith(url_en)
@override_settings(WIKI_DEFAULT_LANGUAGE='fr')
def test_no_redirect_english(self):
"""The default language should never redirect, even if it isn't an AAQ language."""
"""Non-AAQ locales should redirect."""
url_fr = reverse('questions.aaq_step1', locale='fr')
res = self.client.get(url_fr)
eq_(200, res.status_code)
def test_redirect_locale_not_enabled(self):
"""AAQ should redirect for products with questions disabled for the
current locale"""
url_fi = reverse('questions.aaq_step1', locale='fi')
res = self.client.get(url_fi)
eq_(200, res.status_code)
p = ProductFactory(slug='firefox')
url_fi = reverse('questions.aaq_step2', locale='fi', args=['desktop'])
url_en = reverse('questions.aaq_step2', locale='en-US',
args=['desktop'])
res = self.client.get(url_fi)
eq_(302, res.status_code)
assert res['location'].endswith(url_en)
l = QuestionLocale.objects.get(locale='fi')
p.questions_locales.add(l)
res = self.client.get(url_fi)
eq_(200, res.status_code)
class MobileAAQTests(MobileTestCase):
client_class = LocalizingClient
data = {'title': 'A test question',
'content': 'I have this question that I hope...',
'sites_affected': 'http://example.com',
'ff_version': '3.6.6',
'os': 'Intel Mac OS X 10.6',
'plugins': '* Shockwave Flash 10.1 r53',
'useragent': 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X '
'10.6; en-US; rv:1.9.2.6) Gecko/20100625 '
'Firefox/3.6.6'}
def _new_question(self, post_it=False):
"""Post a new question and return the response."""
p = ProductFactory(slug='mobile')
l = QuestionLocale.objects.get(locale=settings.LANGUAGE_CODE)
p.questions_locales.add(l)
t = TopicFactory(slug='fix-problems', product=p)
url = urlparams(
reverse('questions.aaq_step5', args=[p.slug, t.slug]),
search='A test question')
if post_it:
return self.client.post(url, self.data, follow=True)
return self.client.get(url, follow=True)
def test_logged_out(self):
"""New question is posted through mobile."""
response = self._new_question()
eq_(200, response.status_code)
assert template_used(response, 'questions/mobile/new_question_login.html')
@mock.patch.object(Site.objects, 'get_current')
def test_logged_in_get(self, get_current):
"""New question is posted through mobile."""
get_current.return_value.domain = 'testserver'
u = UserFactory()
self.client.login(username=u.username, password='testpass')
response = self._new_question()
eq_(200, response.status_code)
assert template_used(response, 'questions/mobile/new_question.html')
@mock.patch.object(Site.objects, 'get_current')
def test_logged_in_post(self, get_current):
"""New question is posted through mobile."""
get_current.return_value.domain = 'testserver'
u = UserFactory()
self.client.login(username=u.username, password='testpass')
response = self._new_question(post_it=True)
eq_(200, response.status_code)
assert Question.objects.filter(title='A test question')
@mock.patch.object(Site.objects, 'get_current')
def test_aaq_new_question_inactive(self, get_current):
"""New question is posted through mobile."""
get_current.return_value.domain = 'testserver'
# Log in first.
u = UserFactory()
self.client.login(username=u.username, password='testpass')
# Then become inactive.
u.is_active = False
u.save()
# Set 'in-aaq' for the session. It isn't already set because this
# test doesn't do a GET of the form first.
s = self.client.session
s['in-aaq'] = True
s.save()
response = self._new_question(post_it=True)
eq_(200, response.status_code)
assert template_used(response, 'questions/mobile/confirm_email.html')
def test_aaq_login_form(self):
"""The AAQ authentication forms contain the identifying fields.
Added this test because it is hard to debug what happened when this
fields somehow go missing.
"""
res = self._new_question()
doc = pq(res.content)
eq_(1, len(doc('#login-form input[name=login]')))
eq_(1, len(doc('#register-form input[name=register]')))
@set_waffle_flag('new_aaq')
class ReactAAQTests(TestCaseBase):
def test_waffle_flag(self):
url = reverse('questions.aaq_step1')
response = self.client.get(url, follow=True)
assert template_used(response, 'questions/new_question_react.html')
def test_only_marked_topics(self):
t1 = TopicFactory(in_aaq=True)
TopicFactory(in_aaq=False)
url = reverse('questions.aaq_step1')
response = self.client.get(url, follow=True)
doc = pq(response.content)
topics = json.loads(doc('.data[name=topics]').text())
eq_(len(topics), 1)
eq_(topics[0]['id'], t1.id)
class TestQuestionUpdates(TestCaseBase):
"""Tests that questions are only updated in the right cases."""
client_class = LocalizingClient
date_format = '%Y%M%d%H%m%S'
def setUp(self):
super(TestQuestionUpdates, self).setUp()
self.u = UserFactory(is_superuser=True)
self.client.login(username=self.u.username, password='testpass')
self.q = QuestionFactory(updated=datetime(2012, 7, 9, 9, 0, 0))
self.a = AnswerFactory(question=self.q)
# Get the question from the database so we have a consistent level of
# precision during the test.
self.q = Question.objects.get(pk=self.q.id)
def tearDown(self):
self.client.logout()
self.u.delete()
self.q.delete()
def _request_and_no_update(self, url, req_type='POST', data={}):
updated = self.q.updated
if req_type == 'POST':
self.client.post(url, data, follow=True)
elif req_type == 'GET':
self.client.get(url, data, follow=True)
else:
raise ValueError('req_type must be either "GET" or "POST"')
self.q = Question.objects.get(pk=self.q.id)
eq_(updated.strftime(self.date_format),
self.q.updated.strftime(self.date_format))
def test_no_update_edit(self):
url = urlparams(reverse('questions.edit_question', args=[self.q.id]))
self._request_and_no_update(url, req_type='POST', data={
'title': 'A new title.',
'content': 'Some new content.'
})
def test_no_update_solve(self):
url = urlparams(reverse('questions.solve',
args=[self.q.id, self.a.id]))
self._request_and_no_update(url)
def test_no_update_unsolve(self):
url = urlparams(reverse('questions.unsolve',
args=[self.q.id, self.a.id]))
self._request_and_no_update(url)
def test_no_update_vote(self):
url = urlparams(reverse('questions.vote', args=[self.q.id]))
self._request_and_no_update(url, req_type='POST')
def test_no_update_lock(self):
url = urlparams(reverse('questions.lock', args=[self.q.id]))
self._request_and_no_update(url, req_type='POST')
# Now unlock
self._request_and_no_update(url, req_type='POST')
def test_no_update_tagging(self):
url = urlparams(reverse('questions.add_tag', args=[self.q.id]))
self._request_and_no_update(url, req_type='POST', data={
'tag-name': 'foo'
})
url = urlparams(reverse('questions.remove_tag', args=[self.q.id]))
self._request_and_no_update(url, req_type='POST', data={
'remove-tag-foo': 1
})
class TroubleshootingParsingTests(TestCaseBase):
def test_empty_troubleshooting_info(self):
"""Test a troubleshooting value that is valid JSON, but junk.
This should trigger the parser to return None, which should not
cause a 500.
"""
q = QuestionFactory()
q.add_metadata(troubleshooting='{"foo": "bar"}')
# This case should not raise an error.
response = get(self.client, 'questions.details', args=[q.id])
eq_(200, response.status_code)
def test_weird_list_troubleshooting_info(self):
"""Test the corner case in which 'modifiedPReferences' is in a
list in troubleshooting data. This is weird, but caused a bug."""
q = QuestionFactory()
q.add_metadata(troubleshooting='["modifiedPreferences"]')
# This case should not raise an error.
response = get(self.client, 'questions.details', args=[q.id])
eq_(200, response.status_code)
def test_string_keys_troubleshooting(self):
"""Test something that looks like troubleshooting data, but
isn't formatted quite right. The parser should return None to
indicate that something isn't right."""
troubleshooting = '''{
"accessibility": {
"isActive": true
},
"application": {
"name": "Firefox",
"supportURL": "Some random url.",
"userAgent": "A user agent.",
"version": "42.2"
},
"extensions": [],
"graphics": "This really should not be a string."
"javaScript": {},
"modifiedPreferences": {},
"userJS": {
"exists": False
}
}'''
assert parse_troubleshooting(troubleshooting) is None
def test_troubleshooting_parser(self):
"""Test that the troubleshooting parser likes good data."""
troubleshooting = '''
{
"accessibility": {
"isActive": true
},
"application": {
"name": "Firefox",
"supportURL": "Some random url.",
"userAgent": "A user agent.",
"version": "42.2"
},
"extensions": [],
"graphics": {},
"javaScript": {},
"modifiedPreferences": {},
"userJS": {
"exists": false
}
}'''
assert parse_troubleshooting(troubleshooting) is not None
class TestQuestionList(TestCaseBase):
def test_locale_filter(self):
"""Only questions for the current locale should be shown on the
questions front page for AAQ locales."""
eq_(Question.objects.count(), 0)
p = ProductFactory(slug=u'firefox')
TopicFactory(title='Fix problems', slug='fix-problems', product=p)
QuestionFactory(title='question cupcakes?', product=p, locale='en-US')
QuestionFactory(title='question donuts?', product=p, locale='en-US')
QuestionFactory(title='question pies?', product=p, locale='pt-BR')
QuestionFactory(title='question pastries?', product=p, locale='de')
def sub_test(locale, *titles):
url = urlparams(reverse(
'questions.list', args=['all'], locale=locale))
response = self.client.get(url, follow=True)
doc = pq(response.content)
eq_msg(len(doc('section[id^=question]')), len(titles),
'Wrong number of results for {0}'.format(locale))
for substr in titles:
assert substr in doc('.questions section .content h2 a').text()
# en-US and pt-BR are both in AAQ_LANGUAGES, so should be filtered.
sub_test('en-US', 'cupcakes?', 'donuts?')
sub_test('pt-BR', 'pies?')
# de is not in AAQ_LANGUAGES, so should show en-US, but not pt-BR
sub_test('de', 'cupcakes?', 'donuts?', 'pastries?')
class TestQuestionReply(TestCaseBase):
def setUp(self):
u = UserFactory()
self.client.login(username=u.username, password='testpass')
self.question = QuestionFactory()
def test_reply_to_spam_question(self):
self.question.is_spam = True
self.question.save()
res = self.client.post(
reverse('questions.reply', args=[self.question.id]),
{'content': 'The best reply evar!'})
eq_(res.status_code, 404)
def test_needs_info(self):
eq_(self.question.needs_info, False)
res = self.client.post(
reverse('questions.reply', args=[self.question.id]),
{'content': 'More info please', 'needsinfo': ''})
eq_(res.status_code, 302)
q = Question.objects.get(id=self.question.id)
eq_(q.needs_info, True)
def test_clear_needs_info(self):
self.question.set_needs_info()
eq_(self.question.needs_info, True)
res = self.client.post(
reverse('questions.reply', args=[self.question.id]),
{'content': 'More info please', 'clear_needsinfo': ''})
eq_(res.status_code, 302)
q = Question.objects.get(id=self.question.id)
eq_(q.needs_info, False)
class TestMarkingSolved(TestCaseBase):
def setUp(self):
u = UserFactory()
self.client.login(username=u.username, password='testpass')
self.question = QuestionFactory(creator=u)
self.answer = AnswerFactory(question=self.question)
def test_cannot_mark_spam_answer(self):
self.answer.is_spam = True
self.answer.save()
res = self.client.get(
reverse('questions.solve',
args=[self.question.id, self.answer.id]))
eq_(res.status_code, 404)
def test_cannot_mark_answers_on_spam_question(self):
self.question.is_spam = True
self.question.save()
res = self.client.get(
reverse('questions.solve',
args=[self.question.id, self.answer.id]))
eq_(res.status_code, 404)
class TestVoteAnswers(TestCaseBase):
def setUp(self):
u = UserFactory()
self.client.login(username=u.username, password='testpass')
self.question = QuestionFactory()
self.answer = AnswerFactory(question=self.question)
def test_cannot_vote_for_answers_on_spam_question(self):
self.question.is_spam = True
self.question.save()
res = self.client.post(
reverse('questions.answer_vote',
args=[self.question.id, self.answer.id]))
eq_(res.status_code, 404)
def test_cannot_vote_for_answers_marked_spam(self):
self.answer.is_spam = True
self.answer.save()
res = self.client.post(
reverse('questions.answer_vote',
args=[self.question.id, self.answer.id]))
eq_(res.status_code, 404)
class TestVoteQuestions(TestCaseBase):
def setUp(self):
u = UserFactory()
self.client.login(username=u.username, password='testpass')
self.question = QuestionFactory()
def test_cannot_vote_on_spam_question(self):
self.question.is_spam = True
self.question.save()
res = self.client.post(
reverse('questions.vote', args=[self.question.id]))
eq_(res.status_code, 404)
class TestQuestionDetails(TestCaseBase):
def setUp(self):
self.question = QuestionFactory()
def test_mods_can_see_spam_details(self):
self.question.is_spam = True
self.question.save()
res = get(self.client, 'questions.details', args=[self.question.id])
eq_(404, res.status_code)
u = UserFactory()
add_permission(u, FlaggedObject, 'can_moderate')
self.client.login(username=u.username, password='testpass')
res = get(self.client, 'questions.details', args=[self.question.id])
eq_(200, res.status_code)
class TestRateLimiting(TestCaseBase):
client_class = LocalizingClient
def _check_question_vote(self, q, ignored):
"""Try and vote on `q`. If `ignored` is false, assert the
request worked. If `ignored` is True, assert the request didn't
do anything."""
url = reverse('questions.vote', args=[q.id], locale='en-US')
votes = QuestionVote.objects.filter(question=q).count()
res = self.client.post(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(data.get('ignored', False), ignored)
if ignored:
eq_(QuestionVote.objects.filter(question=q).count(), votes)
else:
eq_(QuestionVote.objects.filter(question=q).count(), votes + 1)
def _check_answer_vote(self, q, a, ignored):
"""Try and vote on `a`. If `ignored` is false, assert the
request worked. If `ignored` is True, assert the request didn't
do anything."""
url = reverse('questions.answer_vote', args=[q.id, a.id],
locale='en-US')
votes = AnswerVote.objects.filter(answer=a).count()
res = self.client.post(url, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
eq_(res.status_code, 200)
data = json.loads(res.content)
eq_(data.get('ignored', False), ignored)
if ignored:
eq_(AnswerVote.objects.filter(answer=a).count(), votes)
else:
eq_(AnswerVote.objects.filter(answer=a).count(), votes + 1)
def test_question_vote_limit(self):
"""Test that an anonymous user's votes are ignored after 10
question votes."""
questions = [QuestionFactory() for _ in range(11)]
# The rate limit is 10 per day. So make 10 requests. (0 through 9)
for i in range(10):
self._check_question_vote(questions[i], False)
# Now make another, it should fail.
self._check_question_vote(questions[10], True)
def test_answer_vote_limit(self):
"""Test that an anonymous user's votes are ignored after 10
answer votes."""
q = QuestionFactory()
answers = AnswerFactory.create_batch(11, question=q)
# The rate limit is 10 per day. So make 10 requests. (0 through 9)
for i in range(10):
self._check_answer_vote(q, answers[i], False)
# Now make another, it should fail.
self._check_answer_vote(q, answers[10], True)
def test_question_vote_logged_in(self):
"""This exhausts the rate limit, then logs in, and exhausts it
again."""
questions = [QuestionFactory() for _ in range(11)]
u = UserFactory(password='testpass')
# The rate limit is 10 per day. So make 10 requests. (0 through 9)
for i in range(10):
self._check_question_vote(questions[i], False)
# The rate limit has been hit, so this fails.
self._check_question_vote(questions[10], True)
# Login.
self.client.login(username=u.username, password='testpass')
for i in range(10):
self._check_question_vote(questions[i], False)
# Now the user has hit the rate limit too, so this should fail.
self._check_question_vote(questions[10], True)
# Logging out out won't help
self.client.logout()
self._check_question_vote(questions[10], True)
def test_answer_vote_logged_in(self):
"""This exhausts the rate limit, then logs in, and exhausts it
again."""
q = QuestionFactory()
answers = [AnswerFactory(question=q) for _ in range(12)]
u = UserFactory(password='testpass')
# The rate limit is 10 per day. So make 10 requests. (0 through 9)
for i in range(10):
self._check_answer_vote(q, answers[i], False)
# The ratelimit has been hit, so the next request will fail.
self._check_answer_vote(q, answers[11], True)
# Login.
self.client.login(username=u.username, password='testpass')
for i in range(10):
self._check_answer_vote(q, answers[i], False)
# Now the user has hit the rate limit too, so this should fail.
self._check_answer_vote(q, answers[10], True)
# Logging out out won't help
self.client.logout()
self._check_answer_vote(q, answers[11], True)
def test_answers_limit(self):
"""Only four answers per minute can be posted."""
# Login
u = UserFactory(password='testpass')
self.client.login(username=u.username, password='testpass')
q = QuestionFactory()
content = 'lorem ipsum dolor sit amet'
url = reverse('questions.reply', args=[q.id])
for i in range(7):
self.client.post(url, {'content': content})
eq_(4, Answer.objects.count())
class TestScreenShare(TestCaseBase):
def setUp(self):
self.user = UserFactory()
add_permission(self.user, Profile, 'screen_share')
self.question = QuestionFactory()
def test_screen_share_answer(self):
"""Test that the answer gets created when the screen sharing invite is sent."""
eq_(self.question.answers.count(), 0)
self.client.login(username=self.user.username, password='testpass')
url = reverse('questions.screen_share', args=[self.question.id])
res = self.client.post(url, follow=True)
eq_(res.status_code, 200)
eq_(self.question.answers.count(), 1)
def test_screen_share_metadata(self):
"""Test that the screen sharing meta data is added to the question."""
eq_(self.question.metadata.get('screen_sharing'), None)
self.client.login(username=self.user.username, password='testpass')
url = reverse('questions.screen_share', args=[self.question.id])
res = self.client.post(url, follow=True)
eq_(res.status_code, 200)
q = Question.objects.get(pk=self.question.pk)
eq_(q.metadata.get('screen_sharing'), 'true')
class TestStats(ElasticTestCase):
client_class = LocalizingClient
def test_stats(self):
"""Tests questions/dashboard/metrics view"""
p = ProductFactory()
t = TopicFactory(title='Websites', slug='websites', product=p)
QuestionFactory(
title=u'cupcakes',
content=u'Cupcakes rock!',
created=datetime.now() - timedelta(days=1),
topic=t)
self.refresh()
response = self.client.get(reverse('questions.metrics'))
eq_(200, response.status_code)
# If there's histogram data, this is probably good enough to
# denote its existence.
assert ' data-graph="[' in response.content
class TestEditDetails(TestCaseBase):
def setUp(self):
u = UserFactory()
add_permission(u, Question, 'change_question')
assert u.has_perm('questions.change_question')
self.user = u
p = ProductFactory()
t = TopicFactory(product=p)
q = QuestionFactory(product=p, topic=t)
self.product = p
self.topic = t
self.question = q
def _request(self, user=None, data=None):
"""Make a request to edit details"""
if user is None:
user = self.user
self.client.login(username=user.username, password='testpass')
url = reverse('questions.edit_details',
kwargs={'question_id': self.question.id})
return self.client.post(url, data=data)
def test_permissions(self):
"""Test that the new permission works"""
data = {
'product': self.product.id,
'topic': self.topic.id,
'locale': self.question.locale
}
u = UserFactory()
response = self._request(u, data=data)
eq_(403, response.status_code)
response = self._request(data=data)
eq_(302, response.status_code)
def test_missing_data(self):
"""Test for missing data"""
data = {
'product': self.product.id,
'locale': self.question.locale
}
response = self._request(data=data)
eq_(400, response.status_code)
data = {
'topic': self.topic.id,
'locale': self.question.locale
}
response = self._request(data=data)
eq_(400, response.status_code)
data = {
'product': self.product.id,
'topic': self.topic.id
}
response = self._request(data=data)
eq_(400, response.status_code)
def test_bad_data(self):
"""Test for bad data"""
data = {
'product': ProductFactory().id,
'topic': TopicFactory().id,
'locale': self.question.locale
}
response = self._request(data=data)
eq_(400, response.status_code)
data = {
'product': self.product.id,
'topic': self.topic.id,
'locale': 'zu'
}
response = self._request(data=data)
eq_(400, response.status_code)
def test_change_topic(self):
"""Test changing the topic"""
t_new = TopicFactory(product=self.product)
data = {
'product': self.product.id,
'topic': t_new.id,
'locale': self.question.locale
}
assert t_new.id != self.topic.id
response = self._request(data=data)
eq_(302, response.status_code)
q = Question.objects.get(id=self.question.id)
eq_(t_new.id, q.topic.id)
def test_change_product(self):
"""Test changing the product"""
t_new = TopicFactory()
p_new = t_new.product
assert self.topic.id != t_new.id
assert self.product.id != p_new.id
data = {
'product': p_new.id,
'topic': t_new.id,
'locale': self.question.locale
}
response = self._request(data=data)
eq_(302, response.status_code)
q = Question.objects.get(id=self.question.id)
eq_(p_new.id, q.product.id)
eq_(t_new.id, q.topic.id)
def test_change_locale(self):
locale = 'hu'
assert locale in QuestionLocale.objects.locales_list()
assert locale != self.question.locale
data = {
'product': self.product.id,
'topic': self.topic.id,
'locale': locale
}
response = self._request(data=data)
eq_(302, response.status_code)
q = Question.objects.get(id=self.question.id)
eq_(q.locale, locale)
|
|
"""This module contains async actions used to interact
with the spider metadata cache via the service's API.
"""
import base64
import hashlib
import httplib
import json
import logging
import tornado.httpclient
import async_actions
import spider_metadata_cache
import spider_metadata_cache.jsonschemas
_logger = logging.getLogger(__name__)
class AsyncAction(async_actions.AsyncAction):
"""Abstract base class for all async actions."""
def __init__(self, docker_image_name, async_state=None):
async_actions.AsyncAction.__init__(self, async_state)
self.docker_image_name = docker_image_name
@property
def spider_metadata_id(self):
"""Translate docker image name into a spider_metadata_id."""
return hashlib.sha1(self.docker_image_name).hexdigest()
def create_log_msg_for_spider_metadata_cache_http_client_response(self, response):
return self.create_log_msg_for_http_client_response(response, 'Spider Metadata Cache')
class AsyncReadSpiderMetadata(AsyncAction):
"""Async wrapper around spider metadata cache API to read
spider metadata from the cache.
"""
# FD = Failure Details
FD_OK = 0x0000
FD_ERROR = 0x0080
FD_ERROR_READING = FD_ERROR | 0x0001
FD_ERROR_UNEXPECTED_RESPONSE = FD_ERROR | 0x0002
FD_ERROR_BASE64_DECODING = FD_ERROR | 0x0003
def __init__(self, docker_image_name, async_state=None):
AsyncAction.__init__(self, docker_image_name, async_state)
self.failure_detail = None
self._callback = None
def read(self, callback):
assert self._callback is None
self._callback = callback
msg_fmt = "Reading spider metadata '%s'"
_logger.info(msg_fmt, self.docker_image_name)
url = '%s/%s/spider_metadata/%s' % (
self.config.base_spider_metadata_cache_service_url,
spider_metadata_cache.__api_version__,
self.spider_metadata_id,
)
request = tornado.httpclient.HTTPRequest(url)
http_client = tornado.httpclient.AsyncHTTPClient()
http_client.fetch(
request,
callback=self._on_http_client_fetch_done)
def _on_http_client_fetch_done(self, response):
_logger.info(self.create_log_msg_for_spider_metadata_cache_http_client_response(response))
if response.code not in (httplib.OK, httplib.NOT_FOUND):
msg_fmt = "Reading spider metadata for '%s' failed"
_logger.error(msg_fmt, self.docker_image_name)
self._call_callback(type(self).FD_ERROR_READING)
return
if response.code == httplib.NOT_FOUND:
msg_fmt = "Reading spider metadata for '%s' did not find any spider metadata"
_logger.info(msg_fmt, self.docker_image_name)
self._call_callback(type(self).FD_OK)
return
json_response_body = self.get_json_response_body(
response,
spider_metadata_cache.jsonschemas.get_spider_metadata_response)
if json_response_body is None:
msg_fmt = "Reading spider metadata for '%s' got unexpected response from spider metadata cache"
_logger.error(msg_fmt, self.docker_image_name)
self._call_callback(type(self).FD_ERROR_UNEXPECTED_RESPONSE)
return
base64_encoded_spider_metadata = json_response_body['base64_encoded_spider_metadata']
try:
spider_metadata = json.loads(base64.b64decode(base64_encoded_spider_metadata))
except Exception:
msg_fmt = "Reading spider metadata for '%s' encountered error base 64 & json decoding spider metadata"
_logger.error(msg_fmt, self.docker_image_name)
self._call_callback(type(self).FD_ERROR_BASE64_DECODING)
return
msg_fmt = "Reading spider metadata for '%s' completed successfully"
_logger.info(msg_fmt, self.docker_image_name)
self._call_callback(type(self).FD_OK, spider_metadata)
def _call_callback(self, failure_detail, spider_metadata=None):
assert self._callback is not None
assert self.failure_detail is None
self.failure_detail = failure_detail
is_ok = not bool(self.failure_detail & type(self).FD_ERROR)
self._callback(is_ok, spider_metadata if is_ok else None, self)
self._callback = None
class AsyncWriteSpiderMetadata(AsyncAction):
"""Async wrapper around spider metadata cache
API to write spider metadata to the cache.
"""
# FD = Failure Details
FD_OK = 0x0000
FD_ERROR = 0x0080
FD_ERROR_WRITING = FD_ERROR | 0x0001
def __init__(self,
docker_image_name,
spider_metadata,
ttl_in_seconds,
async_state=None):
AsyncAction.__init__(self, docker_image_name, async_state)
self.spider_metadata = spider_metadata
self.ttl_in_seconds = ttl_in_seconds
self.failure_detail = None
self._callback = None
def write(self, callback):
assert self._callback is None
self._callback = callback
msg_fmt = "Writing spider metadata '%s'"
_logger.info(msg_fmt, self.docker_image_name)
url = '%s/%s/spider_metadata/%s' % (
self.config.base_spider_metadata_cache_service_url,
spider_metadata_cache.__api_version__,
self.spider_metadata_id,
)
headers = {
'Content-Type': 'application/json; charset=utf-8',
}
body = {
'base64_encoded_spider_metadata': base64.b64encode(json.dumps(self.spider_metadata)),
'ttl_in_seconds': self.ttl_in_seconds,
}
request = tornado.httpclient.HTTPRequest(
url,
method='PUT',
headers=headers,
body=json.dumps(body))
http_client = tornado.httpclient.AsyncHTTPClient()
http_client.fetch(
request,
callback=self._on_http_client_fetch_done)
def _on_http_client_fetch_done(self, response):
_logger.info(self.create_log_msg_for_spider_metadata_cache_http_client_response(response))
if response.code != httplib.OK:
msg_fmt = "Writing spider metadata for '%s' failed"
_logger.error(msg_fmt, self.docker_image_name)
self._call_callback(type(self).FD_ERROR_WRITING)
return
msg_fmt = "Writing spider metadata for '%s' succeeded"
_logger.info(msg_fmt, self.docker_image_name)
self._call_callback(type(self).FD_OK)
def _call_callback(self, failure_detail):
assert self._callback is not None
assert self.failure_detail is None
self.failure_detail = failure_detail
is_ok = not bool(self.failure_detail & type(self).FD_ERROR)
self._callback(is_ok, self)
self._callback = None
|
|
# Copyright 2011 Andrew Bogott for the Wikimedia Foundation
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from six.moves import urllib
import webob
from nova.api.openstack.compute import floating_ip_dns \
as fipdns_v21
from nova import context
from nova import db
from nova import exception
from nova import network
from nova import test
from nova.tests.unit.api.openstack import fakes
name = "arbitraryname"
name2 = "anotherarbitraryname"
test_ipv4_address = '10.0.0.66'
test_ipv4_address2 = '10.0.0.67'
test_ipv6_address = 'fe80:0:0:0:0:0:a00:42'
domain = "example.org"
domain2 = "example.net"
floating_ip_id = '1'
def _quote_domain(domain):
"""Domain names tend to have .'s in them. Urllib doesn't quote dots,
but Routes tends to choke on them, so we need an extra level of
by-hand quoting here. This function needs to duplicate the one in
python-novaclient/novaclient/v1_1/floating_ip_dns.py
"""
return urllib.parse.quote(domain.replace('.', '%2E'))
def network_api_get_floating_ip(self, context, id):
return {'id': floating_ip_id, 'address': test_ipv4_address,
'fixed_ip': None}
def network_get_dns_domains(self, context):
return [{'domain': 'example.org', 'scope': 'public'},
{'domain': 'example.com', 'scope': 'public',
'project': 'project1'},
{'domain': 'private.example.com', 'scope': 'private',
'availability_zone': 'avzone'}]
def network_get_dns_entries_by_address(self, context, address, domain):
return [name, name2]
def network_get_dns_entries_by_name(self, context, address, domain):
return [test_ipv4_address]
def network_add_dns_entry(self, context, address, name, dns_type, domain):
return {'dns_entry': {'ip': test_ipv4_address,
'name': name,
'type': dns_type,
'domain': domain}}
def network_modify_dns_entry(self, context, address, name, domain):
return {'dns_entry': {'name': name,
'ip': address,
'domain': domain}}
def network_create_private_dns_domain(self, context, domain, avail_zone):
pass
def network_create_public_dns_domain(self, context, domain, project):
pass
class FloatingIpDNSTestV21(test.TestCase):
floating_ip_dns = fipdns_v21
def _create_floating_ip(self):
"""Create a floating ip object."""
host = "fake_host"
db.floating_ip_create(self.context,
{'address': test_ipv4_address,
'host': host})
db.floating_ip_create(self.context,
{'address': test_ipv6_address,
'host': host})
def _delete_floating_ip(self):
db.floating_ip_destroy(self.context, test_ipv4_address)
db.floating_ip_destroy(self.context, test_ipv6_address)
def _check_status(self, expected_status, res, controller_method):
self.assertEqual(expected_status, controller_method.wsgi_code)
def _bad_request(self):
return webob.exc.HTTPBadRequest
def setUp(self):
super(FloatingIpDNSTestV21, self).setUp()
# None of these APIs are implemented for Neutron.
self.flags(use_neutron=False)
self.stub_out("nova.network.api.API.get_dns_domains",
network_get_dns_domains)
self.stub_out("nova.network.api.API.get_dns_entries_by_address",
network_get_dns_entries_by_address)
self.stub_out("nova.network.api.API.get_dns_entries_by_name",
network_get_dns_entries_by_name)
self.stub_out("nova.network.api.API.get_floating_ip",
network_api_get_floating_ip)
self.stub_out("nova.network.api.API.add_dns_entry",
network_add_dns_entry)
self.stub_out("nova.network.api.API.modify_dns_entry",
network_modify_dns_entry)
self.stub_out("nova.network.api.API.create_public_dns_domain",
network_create_public_dns_domain)
self.stub_out("nova.network.api.API.create_private_dns_domain",
network_create_private_dns_domain)
self.context = context.get_admin_context()
self._create_floating_ip()
temp = self.floating_ip_dns.FloatingIPDNSDomainController()
self.domain_controller = temp
self.entry_controller = self.floating_ip_dns.\
FloatingIPDNSEntryController()
self.admin_req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.req = fakes.HTTPRequest.blank('')
def tearDown(self):
self._delete_floating_ip()
super(FloatingIpDNSTestV21, self).tearDown()
def test_dns_domains_list(self):
res_dict = self.domain_controller.index(self.req)
entries = res_dict['domain_entries']
self.assertTrue(entries)
self.assertEqual(entries[0]['domain'], "example.org")
self.assertFalse(entries[0]['project'])
self.assertFalse(entries[0]['availability_zone'])
self.assertEqual(entries[1]['domain'], "example.com")
self.assertEqual(entries[1]['project'], "project1")
self.assertFalse(entries[1]['availability_zone'])
self.assertEqual(entries[2]['domain'], "private.example.com")
self.assertFalse(entries[2]['project'])
self.assertEqual(entries[2]['availability_zone'], "avzone")
def _test_get_dns_entries_by_address(self, address):
entries = self.entry_controller.show(self.req, _quote_domain(domain),
address)
entries = entries.obj
self.assertEqual(len(entries['dns_entries']), 2)
self.assertEqual(entries['dns_entries'][0]['name'],
name)
self.assertEqual(entries['dns_entries'][1]['name'],
name2)
self.assertEqual(entries['dns_entries'][0]['domain'],
domain)
def test_get_dns_entries_by_ipv4_address(self):
self._test_get_dns_entries_by_address(test_ipv4_address)
def test_get_dns_entries_by_ipv6_address(self):
self._test_get_dns_entries_by_address(test_ipv6_address)
def test_get_dns_entries_by_name(self):
entry = self.entry_controller.show(self.req, _quote_domain(domain),
name)
self.assertEqual(entry['dns_entry']['ip'],
test_ipv4_address)
self.assertEqual(entry['dns_entry']['domain'],
domain)
@mock.patch.object(network.api.API, "get_dns_entries_by_name",
side_effect=webob.exc.HTTPNotFound())
def test_dns_entries_not_found(self, mock_get_entries):
self.assertRaises(webob.exc.HTTPNotFound,
self.entry_controller.show,
self.req, _quote_domain(domain), 'nonexistent')
self.assertTrue(mock_get_entries.called)
def test_create_entry(self):
body = {'dns_entry':
{'ip': test_ipv4_address,
'dns_type': 'A'}}
entry = self.entry_controller.update(self.req, _quote_domain(domain),
name, body=body)
self.assertEqual(entry['dns_entry']['ip'], test_ipv4_address)
def test_create_domain(self):
self._test_create_domain(self.req)
def _test_create_domain(self, req):
body = {'domain_entry':
{'scope': 'private',
'project': 'testproject'}}
self.assertRaises(self._bad_request(),
self.domain_controller.update, req,
_quote_domain(domain), body=body)
body = {'domain_entry':
{'scope': 'public',
'availability_zone': 'zone1'}}
self.assertRaises(self._bad_request(),
self.domain_controller.update, req,
_quote_domain(domain), body=body)
body = {'domain_entry':
{'scope': 'public',
'project': 'testproject'}}
entry = self.domain_controller.update(req,
_quote_domain(domain), body=body)
self.assertEqual(entry['domain_entry']['domain'], domain)
self.assertEqual(entry['domain_entry']['scope'], 'public')
self.assertEqual(entry['domain_entry']['project'], 'testproject')
body = {'domain_entry':
{'scope': 'private',
'availability_zone': 'zone1'}}
entry = self.domain_controller.update(req,
_quote_domain(domain), body=body)
self.assertEqual(entry['domain_entry']['domain'], domain)
self.assertEqual(entry['domain_entry']['scope'], 'private')
self.assertEqual(entry['domain_entry']['availability_zone'], 'zone1')
@mock.patch.object(network.api.API, "delete_dns_entry")
def test_delete_entry(self, mock_del_entry):
delete = self.entry_controller.delete
res = delete(self.req, _quote_domain(domain), name)
self._check_status(202, res, delete)
mock_del_entry.assert_called_once_with(mock.ANY, name, domain)
@mock.patch.object(network.api.API, "delete_dns_entry",
side_effect=exception.NotFound)
def test_delete_entry_notfound(self, mock_del_entry):
self.assertRaises(webob.exc.HTTPNotFound,
self.entry_controller.delete, self.req, _quote_domain(domain),
name)
self.assertTrue(mock_del_entry.called)
def test_delete_domain(self):
self._test_delete_domain(self.req)
@mock.patch.object(network.api.API, "delete_dns_domain")
def _test_delete_domain(self, req, mock_del_dom):
delete = self.domain_controller.delete
res = delete(req, _quote_domain(domain))
self._check_status(202, res, delete)
mock_del_dom.assert_called_once_with(mock.ANY, domain)
def test_delete_domain_notfound(self):
self._test_delete_domain_notfound(self.req)
@mock.patch.object(network.api.API, "delete_dns_domain",
side_effect=exception.NotFound)
def _test_delete_domain_notfound(self, req, mock_del_dom):
self.assertRaises(
webob.exc.HTTPNotFound, self.domain_controller.delete,
req, _quote_domain(domain))
self.assertTrue(mock_del_dom.called)
def test_modify(self):
body = {'dns_entry':
{'ip': test_ipv4_address2,
'dns_type': 'A'}}
entry = self.entry_controller.update(self.req, domain, name, body=body)
self.assertEqual(entry['dns_entry']['ip'], test_ipv4_address2)
def test_not_implemented_dns_entry_update(self):
body = {'dns_entry':
{'ip': test_ipv4_address,
'dns_type': 'A'}}
with mock.patch.object(network.api.API, 'modify_dns_entry',
side_effect=NotImplementedError()):
self.assertRaises(webob.exc.HTTPNotImplemented,
self.entry_controller.update, self.req,
_quote_domain(domain), name, body=body)
def test_not_implemented_dns_entry_show(self):
with mock.patch.object(network.api.API, 'get_dns_entries_by_name',
side_effect=NotImplementedError()):
self.assertRaises(webob.exc.HTTPNotImplemented,
self.entry_controller.show,
self.req, _quote_domain(domain), name)
def test_not_implemented_delete_entry(self):
with mock.patch.object(network.api.API, 'delete_dns_entry',
side_effect=NotImplementedError()):
self.assertRaises(webob.exc.HTTPNotImplemented,
self.entry_controller.delete, self.req,
_quote_domain(domain), name)
def test_not_implemented_delete_domain(self):
with mock.patch.object(network.api.API, 'delete_dns_domain',
side_effect=NotImplementedError()):
self.assertRaises(webob.exc.HTTPNotImplemented,
self.domain_controller.delete, self.admin_req,
_quote_domain(domain))
def test_not_implemented_create_domain(self):
body = {'domain_entry':
{'scope': 'private',
'availability_zone': 'zone1'}}
with mock.patch.object(network.api.API, 'create_private_dns_domain',
side_effect=NotImplementedError()):
self.assertRaises(webob.exc.HTTPNotImplemented,
self.domain_controller.update, self.admin_req,
_quote_domain(domain), body=body)
def test_not_implemented_dns_domains_list(self):
with mock.patch.object(network.api.API, 'get_dns_domains',
side_effect=NotImplementedError()):
self.assertRaises(webob.exc.HTTPNotImplemented,
self.domain_controller.index, self.req)
class FloatingIPDNSDomainPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(FloatingIPDNSDomainPolicyEnforcementV21, self).setUp()
self.controller = fipdns_v21.FloatingIPDNSDomainController()
self.rule_name = "os_compute_api:os-floating-ip-dns"
self.policy.set_rules({self.rule_name: "project:non_fake"})
self.req = fakes.HTTPRequest.blank('')
def test_get_floating_ip_dns_policy_failed(self):
rule_name = "os_compute_api:os-floating-ip-dns"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index, self.req)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_update_floating_ip_dns_policy_failed(self):
rule_name = "os_compute_api:os-floating-ip-dns:domain:update"
self.policy.set_rules({rule_name: "project:non_fake"})
body = {'domain_entry':
{'scope': 'public',
'project': 'testproject'}}
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.update, self.req, _quote_domain(domain), body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_delete_floating_ip_dns_policy_failed(self):
rule_name = "os_compute_api:os-floating-ip-dns:domain:delete"
self.policy.set_rules({rule_name: "project:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.delete, self.req, _quote_domain(domain))
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
class FloatingIPDNSEntryPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(FloatingIPDNSEntryPolicyEnforcementV21, self).setUp()
self.controller = fipdns_v21.FloatingIPDNSEntryController()
self.rule_name = "os_compute_api:os-floating-ip-dns"
self.policy.set_rules({self.rule_name: "project:non_fake"})
self.req = fakes.HTTPRequest.blank('')
def test_show_floating_ip_dns_entry_policy_failed(self):
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.show, self.req,
_quote_domain(domain), test_ipv4_address)
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
def test_update_floating_ip_dns_policy_failed(self):
body = {'dns_entry':
{'ip': test_ipv4_address,
'dns_type': 'A'}}
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.update, self.req, _quote_domain(domain),
name, body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
def test_delete_floating_ip_dns_policy_failed(self):
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.delete, self.req, _quote_domain(domain), name)
self.assertEqual(
"Policy doesn't allow %s to be performed." % self.rule_name,
exc.format_message())
class FloatingIpDNSDomainDeprecationTest(test.NoDBTestCase):
def setUp(self):
super(FloatingIpDNSDomainDeprecationTest, self).setUp()
self.controller = fipdns_v21.FloatingIPDNSDomainController()
self.req = fakes.HTTPRequest.blank('', version='2.36')
def test_all_apis_return_not_found(self):
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.index, self.req)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.update, self.req, fakes.FAKE_UUID, {})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.delete, self.req, fakes.FAKE_UUID)
class FloatingIpDNSEntryDeprecationTest(test.NoDBTestCase):
def setUp(self):
super(FloatingIpDNSEntryDeprecationTest, self).setUp()
self.controller = fipdns_v21.FloatingIPDNSEntryController()
self.req = fakes.HTTPRequest.blank('', version='2.36')
def test_all_apis_return_not_found(self):
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.show, self.req, fakes.FAKE_UUID, fakes.FAKE_UUID)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.update, self.req, fakes.FAKE_UUID, fakes.FAKE_UUID,
{})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.delete, self.req, fakes.FAKE_UUID, fakes.FAKE_UUID)
|
|
__author__ = "Andre Merzky, Ole Weidner"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
import os
import sys
import pwd
import time
import string
import getpass
import radical.utils as ru
import radical.utils.logger as rul
import saga
import saga.exceptions as se
import saga.utils.misc as sumisc
import saga.utils.pty_process as supp
import pty_exceptions as ptye
# ------------------------------------------------------------------------------
#
# ssh options:
# -e none : no escape character
# -M : master mode for connection sharing
# -S control_path : slave mode for connection sharing
# -t : force pty allocation
# -x : disable x11 forwarding
#
# ServerAliveInterval
# CheckHostIP no
# ConnectTimeout
# ControlMaster yes | no | no ...
# ControlPath $BASE/ssh_control_%n_%p.$$.sock
# %r (remote id)? would need inspection
# ControlPersist 100 : close master after 100 seconds idle
# EscapeChar none : transparent for binary data
# TCPKeepAlive yes : detect connection failure
#
# LoginGraceTime seconds : disconnect if no login after n seconds
#
# ------------------------------------------------------------------------------
# these arrays help to map requested client schemas to master schemas
_SCHEMAS_SH = ['sh', 'fork', 'local', 'file']
_SCHEMAS_SSH = ['ssh', 'scp', 'sftp']
_SCHEMAS_GSI = ['gsissh', 'gsiscp', 'gsisftp']
_SCHEMAS = _SCHEMAS_SH + _SCHEMAS_SSH + _SCHEMAS_GSI
# FIXME: '-o ControlPersist' is only supported for newer ssh versions. We
# should add detection, and enable that if available -- for now, just diable it.
#
# FIXME: we should use '%n' instead of '%h', but that is not supported by older
# ssh versions...
# ssh master/slave flag magic # FIXME: make timeouts configurable
_SSH_FLAGS_MASTER = "-o ControlMaster=%(share_mode)s -o ControlPath=%(ctrl)s -o TCPKeepAlive=no -o ServerAliveInterval=10 -o ServerAliveCountMax=20 %(connect_timeout)s"
_SSH_FLAGS_SLAVE = "-o ControlMaster=%(share_mode)s -o ControlPath=%(ctrl)s -o TCPKeepAlive=no -o ServerAliveInterval=10 -o ServerAliveCountMax=20 %(connect_timeout)s"
_SCP_FLAGS = ""
_SFTP_FLAGS = ""
# FIXME: right now, we create a shell connection as master --
# but a master does not actually need a shell, as it is never really
# used to run commands...
_SCRIPTS = {
'ssh' : {
'master' : '%(ssh_env)s "%(ssh_exe)s" %(ssh_args)s %(m_flags)s %(host_str)s',
'shell' : '%(ssh_env)s "%(ssh_exe)s" %(ssh_args)s %(s_flags)s %(host_str)s',
'copy_is_posix': True
},
'scp' : {
'copy_to' : '%(scp_env)s "%(scp_exe)s" %(scp_args)s %(s_flags)s %(cp_flags)s "%(src)s" "%(scp_root)s%(tgt)s"',
'copy_from' : '%(scp_env)s "%(scp_exe)s" %(scp_args)s %(s_flags)s %(cp_flags)s "%(scp_root)s%(src)s" "%(tgt)s"',
'copy_to_in' : '',
'copy_from_in' : '',
'copy_is_posix': False
},
'sftp' : {
'copy_to' : '%(sftp_env)s "%(sftp_exe)s" %(sftp_args)s %(s_flags)s %(host_str)s',
'copy_from' : '%(sftp_env)s "%(sftp_exe)s" %(sftp_args)s %(s_flags)s %(host_str)s',
'copy_to_in' : 'mput "%(src)s" "%(tgt)s"',
'copy_from_in' : 'mget "%(src)s" "%(tgt)s"',
'copy_is_posix': False
},
'sh' : {
'master' : '%(sh_env)s "%(sh_exe)s" %(sh_args)s',
'shell' : '%(sh_env)s "%(sh_exe)s" %(sh_args)s',
'copy_to' : '%(sh_env)s "%(sh_exe)s" %(sh_args)s',
'copy_from' : '%(sh_env)s "%(sh_exe)s" %(sh_args)s',
'copy_to_in' : 'cd ~ && "%(cp_exe)s" -v %(cp_flags)s "%(src)s" "%(tgt)s"',
'copy_from_in' : 'cd ~ && "%(cp_exe)s" -v %(cp_flags)s "%(src)s" "%(tgt)s"',
'copy_is_posix': True
}
}
# ------------------------------------------------------------------------------
#
class PTYShellFactory (object) :
"""
This is the place where all master and all client shell connections get
created. But also, this factory maintains a registry of master connections,
to quickly spawn slave connections for any customer w/o repeated
authorization overhead. Masters are unique per
a host/user/port/context/shell_type hash.
Any ssh master connection in this registry can idle, and may thus shut down
after ``ControlPersist`` seconds (see options).
data model::
self.registry
|
+-- "host[:port]"
| |
| +-- str(context)
| | |
| | +-- "shell_type" (ssh)
| | | |
| | | +-- pty_process (gc_timeout)
| | | +-- shell_initialize()
| | | +-- shell_finalize()
| | | +-- shell_alive()
| | |
| | +-- ...
| |
| +-- ...
|
+-- ...
When Slave connections are requested, a suitable master connection is looked
for and used. 'Suitable' means: ssh master for scp and sftp slaves; gsissh
for gsiscp and gsisftp slaves; and sh master for file slaves
"""
__metaclass__ = ru.Singleton
# --------------------------------------------------------------------------
#
def __init__ (self) :
self.logger = ru.get_logger ('radical.saga.pty')
self.registry = {}
self.rlock = ru.RLock ('pty shell factory')
# --------------------------------------------------------------------------
#
def initialize (self, url, session=None, prompt=None, logger=None,
posix=True, interactive=True) :
with self.rlock :
# make sure we have a valid url type
url = saga.Url (url)
if not prompt :
prompt = "^(.*[\$#%>\]])\s*$"
if not logger :
logger = self.logger
# collect all information we have/need about the requested master
# connection
info = self._create_master_entry (url, session, prompt, logger,
posix, interactive)
# we got master info - register the master, and create the instance!
type_s = str(info['shell_type'])
user_s = str(info['user'])
host_s = str(info['host_str'])
# Now, if we don't have that master, yet, we need to instantiate it
if not host_s in self.registry : self.registry[host_s] = {}
if not user_s in self.registry[host_s] : self.registry[host_s][user_s] = {}
if not type_s in self.registry[host_s][user_s] :
# new master: create an instance, and register it
m_cmd = info['scripts'][info['shell_type']]['master'] % info
logger.debug ("open master pty for [%s] [%s] %s: %s'" \
% (type_s, host_s, user_s, m_cmd))
info['pty'] = supp.PTYProcess (m_cmd, logger=logger)
if not info['pty'].alive () :
raise se.NoSuccess._log (logger, \
"Shell not connected to %s" % info['host_str'])
# authorization, prompt setup, etc. Initialize as shell if not
# explicitly marked as non-posix shell
self._initialize_pty (info['pty'], info)
# master was created - register it
self.registry[host_s][user_s][type_s] = info
else :
# we already have a master: make sure it is alive, and restart as
# needed
info = self.registry[host_s][user_s][type_s]
if not info['pty'].alive (recover=True) :
raise se.IncorrectState._log (logger, \
"Lost shell connection to %s" % info['host_str'])
return info
# --------------------------------------------------------------------------
#
def _initialize_pty (self, pty_shell, info, posix=None) :
# posix: only for posix shells we use prompt triggers. sftp for example
# does not deal well with triggers (no printf).
with self.rlock :
# import pprint
# pprint.pprint (info)
shell_pass = info['pass']
key_pass = info['key_pass']
prompt = info['prompt']
logger = info['logger']
latency = info['latency']
timeout = info['ssh_timeout']
pty_shell.latency = latency
if posix == None:
posix = info['posix']
# if we did not see a decent prompt within 'delay' time, something
# went wrong. Try to prompt a prompt (duh!) Delay should be
# minimum 0.1 second (to avoid flooding of local shells), and at
# maximum 1 second (to keep startup time reasonable)
# most one second. We try to get within that range with 10*latency.
delay = min (1.0, max (0.1, 10 * latency))
try :
prompt_patterns = ["[Pp]assword:\s*$", # password prompt
"Enter passphrase for .*:\s*$", # passphrase prompt
"Token_Response.*:\s*$", # passtoken prompt
"Enter PASSCODE:$", # RSA SecureID
"want to continue connecting", # hostkey confirmation
".*HELLO_\\d+_SAGA$", # prompt detection helper
prompt] # greedy native shell prompt
# use a very aggressive, but portable prompt setting scheme.
# Error messages may appear for tcsh and others. Excuse
# non-posix shells
if posix:
pty_shell.write (" export PS1='$' ; set prompt='$'\n")
# find a prompt
n, match = pty_shell.find (prompt_patterns, delay)
# this loop will run until we finally find the shell prompt, or
# if we think we have tried enough and give up. On success
# we'll try to set a different prompt, and when we found that,
# too, we exit the loop and are be ready to running shell
# commands.
retries = 0
retry_trigger = True
used_trigger = False
found_trigger = ""
time_start = time.time()
while True :
# --------------------------------------------------------------
if n == None :
# we found none of the prompts, yet, and need to try
# again. But to avoid hanging on invalid prompts, we
# print 'HELLO_x_SAGA', and search for that one, too.
# We actually do 'printf HELLO_%d_SAGA x' so that the
# pattern only appears in the result, not in the
# command...
if time.time() - time_start > timeout:
raise se.NoSuccess ("Could not detect shell prompt (timeout)")
# make sure we retry a finite time...
retries += 1
if not retry_trigger :
# just waiting for the *right* trigger or prompt,
# don't need new ones...
continue
if posix:
# use a very aggressive, but portable prompt setting scheme
pty_shell.write (" export PS1='$' > /dev/null 2>&1 || set prompt='$'\n")
pty_shell.write (" printf 'HELLO_%%d_SAGA\\n' %d\n" % retries)
used_trigger = True
# FIXME: consider better timeout
n, match = pty_shell.find (prompt_patterns, delay)
# --------------------------------------------------------------
elif n == 0 :
logger.info ("got password prompt")
if not shell_pass :
raise se.AuthenticationFailed ("prompted for unknown password (%s)" \
% match)
pty_shell.write ("%s\n" % shell_pass, nolog=True)
n, match = pty_shell.find (prompt_patterns, delay)
# --------------------------------------------------------------
elif n == 1 :
logger.info ("got passphrase prompt : %s" % match)
start = string.find (match, "'", 0)
end = string.find (match, "'", start+1)
if start == -1 or end == -1 :
raise se.AuthenticationFailed ("could not extract key name (%s)" % match)
key = match[start+1:end]
if not key in key_pass :
raise se.AuthenticationFailed ("prompted for unknown key password (%s)" \
% key)
pty_shell.write ("%s\n" % key_pass[key], nolog=True)
n, match = pty_shell.find (prompt_patterns, delay)
# --------------------------------------------------------------
elif n == 2 or n == 3:
logger.info ("got token prompt")
import getpass
token = getpass.getpass ("enter token: ")
pty_shell.write ("%s\n" % token.strip(), nolog=True)
n, match = pty_shell.find (prompt_patterns, delay)
# --------------------------------------------------------------
elif n == 4:
logger.info ("got hostkey prompt")
pty_shell.write ("yes\n")
n, match = pty_shell.find (prompt_patterns, delay)
# --------------------------------------------------------------
elif n == 5:
# one of the trigger commands got through -- we can now
# hope to find the prompt (or the next trigger...)
logger.debug ("got shell prompt trigger (%s) (%s)" % (n, match))
found_trigger = match
retry_trigger = False
n, match = pty_shell.find (prompt_patterns, delay)
continue
# --------------------------------------------------------------
elif n == 6 :
logger.debug ("got initial shell prompt (%s) (%s)" % (n, match))
if retries :
if used_trigger :
# we already sent triggers -- so this match is only
# useful if saw the *correct* shell prompt trigger
# first
trigger = "HELLO_%d_SAGA" % retries
if not trigger in found_trigger :
logger.debug ("waiting for prompt trigger %s: (%s) (%s)" \
% (trigger, n, match))
# but more retries won't help...
retry_trigger = False
attempts = 0
n = None
while not n :
attempts += 1
n, match = pty_shell.find (prompt_patterns, delay)
if not n :
if attempts == 1 :
if posix:
pty_shell.write (" printf 'HELLO_%%d_SAGA\\n' %d\n" % retries)
if attempts > 100 :
raise se.NoSuccess ("Could not detect shell prompt (timeout)")
continue
logger.debug ("Got initial shell prompt (%s) (%s)" % (n, match))
# we are done waiting for a prompt
break
except Exception as e :
logger.exception(e)
raise ptye.translate_exception (e)
# --------------------------------------------------------------------------
#
def get_cp_slave (self, s_cmd, info, posix=None) :
with self.rlock :
if posix == None:
posix = info.get('copy_is_posix')
# print '> -- new cp shell to %s' % s_cmd
cp_slave = supp.PTYProcess (s_cmd, info['logger'])
self._initialize_pty (cp_slave, info, posix)
return cp_slave
# --------------------------------------------------------------------------
#
def run_shell (self, info) :
"""
This initiates a master connection. If there is a suitable master
connection in the registry, it is re-used, and no new master connection
is created. If needed, the existing master connection is revived.
"""
# if True :
with self.rlock :
s_cmd = info['scripts'][info['shell_type']]['shell'] % info
# at this point, we do have a valid, living master
sh_slave = supp.PTYProcess (s_cmd, info['logger'])
# authorization, prompt setup, etc
self._initialize_pty (sh_slave, info)
return sh_slave
# --------------------------------------------------------------------------
#
def _which(self, cmd):
ret = ru.which(cmd)
if not ret:
raise RuntimeError('cmd %s not found' % cmd)
return ret
# --------------------------------------------------------------------------
#
def _create_master_entry (self, url, session, prompt, logger, posix,
interactive) :
# FIXME: cache 'which' results, etc
# FIXME: check 'which' results
with self.rlock :
info = {'posix' : posix}
# get and evaluate session config
if not session :
session = saga.Session (default=True)
session_cfg = session.get_config ('saga.utils.pty')
info['ssh_copy_mode'] = session_cfg['ssh_copy_mode'].get_value ()
info['ssh_share_mode'] = session_cfg['ssh_share_mode'].get_value ()
info['ssh_timeout'] = session_cfg['ssh_timeout'].get_value ()
logger.info ("ssh copy mode set to '%s'" % info['ssh_copy_mode' ])
logger.info ("ssh share mode set to '%s'" % info['ssh_share_mode'])
logger.info ("ssh timeout set to '%s'" % info['ssh_timeout'])
# fill the info dict with details for this master channel, and all
# related future slave channels
info['schema'] = url.schema.lower ()
info['host_str'] = url.host
info['prompt'] = prompt
info['logger'] = logger
info['url'] = url
info['pass'] = ""
info['key_pass'] = {}
info['scripts'] = _SCRIPTS
if not info['schema'] :
info['schema'] = 'local'
# find out what type of shell we have to deal with
if info['schema'] in _SCHEMAS_SSH :
info['shell_type'] = "ssh"
info['copy_mode'] = info['ssh_copy_mode']
info['share_mode'] = info['ssh_share_mode']
info['ssh_exe'] = self._which ("ssh")
info['scp_exe'] = self._which ("scp")
info['sftp_exe'] = self._which ("sftp")
elif info['schema'] in _SCHEMAS_GSI :
info['shell_type'] = "ssh"
info['copy_mode'] = info['ssh_copy_mode']
info['share_mode'] = info['ssh_share_mode']
info['ssh_exe'] = self._which ("gsissh")
info['scp_exe'] = self._which ("gsiscp")
info['sftp_exe'] = self._which ("gsisftp")
elif info['schema'] in _SCHEMAS_SH :
info['shell_type'] = "sh"
info['copy_mode'] = "sh"
info['share_mode'] = "auto"
info['sh_env'] = "/usr/bin/env TERM=vt100 PS1='PROMPT-$?->'"
info['cp_env'] = "/usr/bin/env TERM=vt100 PS1='PROMPT-$?->'"
info['scp_root'] = "/"
if interactive: info['sh_args'] = "-i"
else : info['sh_args'] = ""
if "SHELL" in os.environ :
info['sh_exe'] = self._which (os.environ["SHELL"])
info['cp_exe'] = self._which ("cp")
else :
info['sh_exe'] = self._which ("sh")
info['cp_exe'] = self._which ("cp")
else :
raise se.BadParameter._log (self.logger, \
"cannot handle schema '%s://'" % url.schema)
# If an SSH timeout has been specified set up the ConnectTimeout
# string
if info['ssh_timeout']:
info['ssh_connect_timeout'] = ('-o ConnectTimeout=%s'
% int(float(info['ssh_timeout'])))
else:
info['ssh_connect_timeout'] = ''
# depending on type, create command line (args, env etc)
#
# We always set term=vt100 to avoid ansi-escape sequences in the prompt
# and elsewhere. Also, we have to make sure that the shell is an
# interactive login shell, so that it interprets the users startup
# files, and reacts on commands.
try :
info['latency'] = sumisc.get_host_latency (url)
# FIXME: note that get_host_latency is considered broken (see
# saga/utils/misc.py line 73), and will return a constant 250ms.
except Exception as e :
info['latency'] = 1.0 # generic value assuming slow link
info['logger'].warning ("Could not contact host '%s': %s" % (url, e))
if info['shell_type'] == "sh" :
info['sh_env'] = "/usr/bin/env TERM=vt100 " # avoid ansi escapes
if not sumisc.host_is_local (url.host) :
raise se.BadParameter._log (self.logger, \
"expect local host for '%s://', not '%s'" % (url.schema, url.host))
if 'user' in info and info['user'] :
pass
else :
info['user'] = getpass.getuser ()
else :
info['ssh_env'] = "/usr/bin/env TERM=vt100 " # avoid ansi escapes
info['scp_env'] = "/usr/bin/env TERM=vt100 " # avoid ansi escapes
info['sftp_env'] = "/usr/bin/env TERM=vt100 " # avoid ansi escapes
info['ssh_args'] = "-t " # force pty
info['scp_args'] = _SCP_FLAGS
info['sftp_args'] = _SFTP_FLAGS
if session :
for context in session.contexts :
# ssh can also handle UserPass contexts, and ssh type contexts.
# gsissh can handle the same, but also X509 contexts.
if context.type.lower () == "ssh" :
if info['schema'] in _SCHEMAS_SSH + _SCHEMAS_GSI :
if context.attribute_exists ("user_id") and context.user_id :
info['user'] = context.user_id
if context.attribute_exists ("user_key") and context.user_key :
info['ssh_args'] += "-o IdentityFile=%s " % context.user_key
info['scp_args'] += "-o IdentityFile=%s " % context.user_key
info['sftp_args'] += "-o IdentityFile=%s " % context.user_key
if context.attribute_exists ("user_pass") and context.user_pass :
info['key_pass'][context.user_key] = context.user_pass
if context.type.lower () == "userpass" :
if info['schema'] in _SCHEMAS_SSH + _SCHEMAS_GSI :
if context.attribute_exists ("user_id") and context.user_id :
info['user'] = context.user_id
if context.attribute_exists ("user_pass") and context.user_pass :
info['pass'] = context.user_pass
if context.type.lower () == "x509" :
if info['schema'] in _SCHEMAS_GSI :
if context.attribute_exists ("user_proxy") and context.user_proxy :
info['ssh_env'] += "X509_USER_PROXY='%s' " % context.user_proxy
info['scp_env'] += "X509_USER_PROXY='%s' " % context.user_proxy
info['sftp_env'] += "X509_USER_PROXY='%s' " % context.user_proxy
if context.attribute_exists ("user_cert") and context.user_cert :
info['ssh_env'] += "X509_USER_CERT='%s' " % context.user_cert
info['scp_env'] += "X509_USER_CERT='%s' " % context.user_cert
info['sftp_env'] += "X509_USER_CERT='%s' " % context.user_cert
if context.attribute_exists ("user_key") and context.user_key :
info['ssh_env'] += "X509_USER_key='%s' " % context.user_key
info['scp_env'] += "X509_USER_key='%s' " % context.user_key
info['sftp_env'] += "X509_USER_key='%s' " % context.user_key
if context.attribute_exists ("cert_repository") and context.cert_repository :
info['ssh_env'] += "X509_CERT_DIR='%s' " % context.cert_repository
info['scp_env'] += "X509_CERT_DIR='%s' " % context.cert_repository
info['sftp_env'] += "X509_CERT_DIR='%s' " % context.cert_repository
if url.port and url.port != -1 :
info['ssh_args'] += "-o Port=%d " % int(url.port)
info['scp_args'] += "-o Port=%d " % int(url.port)
info['sftp_args'] += "-o Port=%d " % int(url.port)
# all ssh based shells allow for user_id and user_pass from contexts
# -- but the data given in the URL take precedence
if url.username : info['user'] = url.username
if url.password : info['pass'] = url.password
ctrl_user = pwd.getpwuid (os.getuid ()).pw_name
ctrl_base = "/tmp/saga_ssh_%s" % ctrl_user
if 'user' in info and info['user'] :
info['host_str'] = "%s@%s" % (info['user'], info['host_str'])
info['ctrl'] = "%s_%%h_%%p.%s.ctrl" % (ctrl_base, info['user'])
else :
info['user'] = getpass.getuser ()
info['ctrl'] = "%s_%%h_%%p.ctrl" % (ctrl_base)
info['m_flags'] = _SSH_FLAGS_MASTER % ({'share_mode' : info['share_mode'],
'ctrl' : info['ctrl'],
'connect_timeout': info['ssh_connect_timeout']})
info['s_flags'] = _SSH_FLAGS_SLAVE % ({'share_mode' : info['share_mode'],
'ctrl' : info['ctrl'],
'connect_timeout': info['ssh_connect_timeout']})
logger.debug('SSH Connection M_FLAGS: %s' % info['m_flags'])
logger.debug('SSH Connection S_FLAGS: %s' % info['s_flags'])
# we want the userauth and hostname parts of the URL, to get the
# scp-scope fs root.
info['scp_root'] = ""
has_auth = False
if url.username :
info['scp_root'] += url.username
has_auth = True
if url.password :
info['scp_root'] += ":"
info['scp_root'] += url.password
has_auth = True
if has_auth :
info['scp_root'] += "@"
info['scp_root'] += "%s:" % url.host
# FIXME: port needs to be handled as parameter
# if url.port :
# info['scp_root'] += ":%d" % url.port
# keep all collected info in the master dict, and return it for
# registration
return info
# ------------------------------------------------------------------------------
|
|
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Purpose of this module is to hold common script/commandline functionality.
This ranges from optparse, to a basic script wrapper setup (much like
what is used for chromite.bin.*).
"""
from __future__ import print_function
import argparse
import collections
import datetime
import functools
import os
import optparse
import signal
import sys
import urlparse
# TODO(build): sort the cbuildbot.constants/lib.constants issue;
# lib shouldn't have to import from buildbot like this.
from chromite.cbuildbot import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import gs
from chromite.lib import osutils
from chromite.lib import path_util
from chromite.lib import terminal
from chromite.lib import workspace_lib
DEVICE_SCHEME_FILE = 'file'
DEVICE_SCHEME_SSH = 'ssh'
DEVICE_SCHEME_USB = 'usb'
# Command line options to automatically convert from relative or absolute paths
# to locator paths when RunInsideChroot() is used.
_LOCATOR_OVERRIDE_OPTIONS = ('brick', 'blueprint')
class ChrootRequiredError(Exception):
"""Raised when a command must be run in the chroot
This exception is intended to be caught by code which will restart execution
in the chroot. Throwing this exception allows contexts to be exited and
general cleanup to happen before we exec an external binary.
The command to run inside the chroot, and (optionally) special cros_sdk
arguments are attached to the exception. Any adjustments to the arguments
should be done before raising the exception.
"""
def __init__(self, cmd, chroot_args=None):
"""Constructor for ChrootRequiredError.
Args:
cmd: Command line to run inside the chroot as a list of strings.
chroot_args: Arguments to pass directly to cros_sdk.
"""
super(ChrootRequiredError, self).__init__(self)
self.cmd = cmd
self.chroot_args = chroot_args
class ExecRequiredError(Exception):
"""Raised when a command needs to exec, after cleanup.
This exception is intended to be caught by code which will exec another
command. Throwing this exception allows contexts to be exited and general
cleanup to happen before we exec an external binary.
The command to run is attached to the exception. Any adjustments to the
arguments should be done before raising the exception.
"""
def __init__(self, cmd):
"""Constructor for ExecRequiredError.
Args:
cmd: Command line to run inside the chroot as a list of strings.
"""
super(ExecRequiredError, self).__init__(self)
self.cmd = cmd
def AbsolutePath(_option, _opt, value):
"""Expand paths and make them absolute."""
return osutils.ExpandPath(value)
def NormalizeGSPath(value):
"""Normalize GS paths."""
url = gs.CanonicalizeURL(value, strict=True)
return '%s%s' % (gs.BASE_GS_URL, os.path.normpath(url[len(gs.BASE_GS_URL):]))
def NormalizeLocalOrGSPath(value):
"""Normalize a local or GS path."""
ptype = 'gs_path' if gs.PathIsGs(value) else 'path'
return VALID_TYPES[ptype](value)
def ParseBool(value):
"""Parse bool argument into a bool value.
For the existing type=bool functionality, the parser uses the built-in bool(x)
function to determine the value. This function will only return false if x
is False or omitted. Even with this type specified, however, arguments that
are generated from a command line initially get parsed as a string, and for
any string value passed in to bool(x), it will always return True.
Args:
value: String representing a boolean value.
Returns:
True or False.
"""
return cros_build_lib.BooleanShellValue(value, False)
def ParseDate(value):
"""Parse date argument into a datetime.date object.
Args:
value: String representing a single date in "YYYY-MM-DD" format.
Returns:
A datetime.date object.
"""
try:
return datetime.datetime.strptime(value, '%Y-%m-%d').date()
except ValueError:
# Give a helpful error message about the format expected. Putting this
# message in the exception is useless because argparse ignores the
# exception message and just says the value is invalid.
logging.error('Date is expected to be in format YYYY-MM-DD.')
raise
def NormalizeUri(value):
"""Normalize a local path or URI."""
o = urlparse.urlparse(value)
if o.scheme == 'file':
# Trim off the file:// prefix.
return VALID_TYPES['path'](value[7:])
elif o.scheme not in ('', 'gs'):
o = list(o)
o[2] = os.path.normpath(o[2])
return urlparse.urlunparse(o)
else:
return NormalizeLocalOrGSPath(value)
# A Device object holds information parsed from the command line input:
# scheme: DEVICE_SCHEME_SSH, DEVICE_SCHEME_USB, or DEVICE_SCHEME_FILE.
# username: String SSH username or None.
# hostname: String SSH hostname or None.
# port: Int SSH port or None.
# path: String USB/file path or None.
# raw: String raw input from the command line.
# For now this is a superset of all information for USB, SSH, or file devices.
# If functionality diverges based on type, it may be useful to split this into
# separate device classes instead.
Device = cros_build_lib.Collection(
'Device', scheme=None, username=None, hostname=None, port=None, path=None,
raw=None)
class DeviceParser(object):
"""Parses devices as an argparse argument type.
In addition to parsing user input, this class will also ensure that only
supported device schemes are accepted by the parser. For example,
`cros deploy` only makes sense with an SSH device, but `cros flash` can use
SSH, USB, or file device schemes.
If the device input is malformed or the scheme is wrong, an error message will
be printed and the program will exit.
Valid device inputs are:
- [ssh://][username@]hostname[:port].
- usb://[path].
- file://path or /absolute_path.
Usage:
parser = argparse.ArgumentParser()
parser.add_argument(
'ssh_device',
type=commandline.DeviceParser(commandline.DEVICE_SCHEME_SSH))
parser.add_argument(
'usb_or_file_device',
type=commandline.DeviceParser([commandline.DEVICE_SCHEME_USB,
commandline.DEVICE_SCHEME_FILE]))
"""
def __init__(self, schemes):
"""Initializes the parser.
See the class comments for usage examples.
Args:
schemes: A scheme or list of schemes to accept.
"""
self.schemes = [schemes] if isinstance(schemes, basestring) else schemes
# Provide __name__ for argparse to print on failure, or else it will use
# repr() which creates a confusing error message.
self.__name__ = type(self).__name__
def __call__(self, value):
"""Parses a device input and enforces constraints.
DeviceParser is an object so that a set of valid schemes can be specified,
but argparse expects a parsing function, so we overload __call__() for
argparse to use.
Args:
value: String representing a device target. See class comments for
valid device input formats.
Returns:
A Device object.
Raises:
ValueError: |value| is not a valid device specifier or doesn't
match the supported list of schemes.
"""
try:
device = self._ParseDevice(value)
self._EnforceConstraints(device, value)
return device
except ValueError as e:
# argparse ignores exception messages, so print the message manually.
logging.error(e)
raise
except Exception as e:
logging.error('Internal error while parsing device input: %s', e)
raise
def _EnforceConstraints(self, device, value):
"""Verifies that user-specified constraints are upheld.
Checks that the parsed device has a scheme that matches what the user
expects. Additional constraints can be added if needed.
Args:
device: Device object.
value: String representing a device target.
Raises:
ValueError: |device| has the wrong scheme.
"""
if device.scheme not in self.schemes:
raise ValueError('Unsupported scheme "%s" for device "%s"' %
(device.scheme, value))
def _ParseDevice(self, value):
"""Parse a device argument.
Args:
value: String representing a device target.
Returns:
A Device object.
Raises:
ValueError: |value| is not a valid device specifier.
"""
parsed = urlparse.urlparse(value)
if not parsed.scheme:
# Default to a file scheme for absolute paths, SSH scheme otherwise.
if value and value[0] == '/':
scheme = DEVICE_SCHEME_FILE
else:
# urlparse won't provide hostname/username/port unless a scheme is
# specified so we need to re-parse.
parsed = urlparse.urlparse('%s://%s' % (DEVICE_SCHEME_SSH, value))
scheme = DEVICE_SCHEME_SSH
else:
scheme = parsed.scheme.lower()
if scheme == DEVICE_SCHEME_SSH:
hostname = parsed.hostname
if not hostname:
raise ValueError('Hostname is required for device "%s"' % value)
return Device(scheme=scheme, username=parsed.username, hostname=hostname,
port=parsed.port, raw=value)
elif scheme == DEVICE_SCHEME_USB:
path = parsed.netloc + parsed.path
# Change path '' to None for consistency.
return Device(scheme=scheme, path=path if path else None, raw=value)
elif scheme == DEVICE_SCHEME_FILE:
path = parsed.netloc + parsed.path
if not path:
raise ValueError('Path is required for "%s"' % value)
return Device(scheme=scheme, path=path, raw=value)
else:
raise ValueError('Unknown device scheme "%s" in "%s"' % (scheme, value))
def NormalizeWorkspacePath(path, default_dir=None, extension=None):
"""Normalize a workspace path.
Converts |path| into a locator and applies |default_dir| and/or
|extension| if specified.
Args:
path: Relative, absolute, or locator path in the CWD workspace.
default_dir: If |path| does not contain '/', prepend this
directory to the result.
extension: If |path| doesn't end in this extension, append this
extension to the result.
Returns:
Workspace locator corresponding to the modified |path|.
Raises:
ValueError: |path| isn't in the workspace.
"""
if default_dir and '/' not in path:
path = os.path.join(default_dir, path)
if extension:
extension = '.' + extension
if os.path.splitext(path)[1] != extension:
path += extension
if workspace_lib.IsLocator(path):
return path
locator = workspace_lib.PathToLocator(path)
if not locator:
# argparse ignores exception messages; log it as well so the user sees it.
error_message = '%s is not in the current workspace.' % path
logging.error(error_message)
raise ValueError(error_message)
return locator
def NormalizeBrickPath(path):
"""Normalize a brick path using some common assumptions.
Makes the following changes to |path|:
1. Put non-paths in //bricks (e.g. foo -> //bricks/foo).
2. Convert to a workspace locator.
Args:
path: brick path.
Returns:
Locator to the brick.
"""
return NormalizeWorkspacePath(path, default_dir='//bricks')
def NormalizeBspPath(path):
"""Normalize a BSP path using some common assumptions.
Makes the following changes to |path|:
1. Put non-paths in //bsps (e.g. foo -> //bsps/foo).
2. Convert to a workspace locator.
Args:
path: BSP path.
Returns:
Locator to the BSP.
"""
return NormalizeWorkspacePath(path, default_dir='//bsps')
def NormalizeBlueprintPath(path):
"""Normalize a blueprint path using some common assumptions.
Makes the following changes to |path|:
1. Put non-paths in //blueprints (e.g. foo -> //blueprints/foo).
2. Add .json if not already present.
3. Convert to a workspace locator.
Args:
path: blueprint path.
Returns:
Locator to the blueprint.
"""
return NormalizeWorkspacePath(path, default_dir='//blueprints',
extension='json')
VALID_TYPES = {
'bool': ParseBool,
'date': ParseDate,
'path': osutils.ExpandPath,
'gs_path': NormalizeGSPath,
'local_or_gs_path': NormalizeLocalOrGSPath,
'path_or_uri': NormalizeUri,
'blueprint_path': NormalizeBlueprintPath,
'brick_path': NormalizeBrickPath,
'bsp_path': NormalizeBspPath,
'workspace_path': NormalizeWorkspacePath,
}
def OptparseWrapCheck(desc, check_f, _option, opt, value):
"""Optparse adapter for type checking functionality."""
try:
return check_f(value)
except ValueError:
raise optparse.OptionValueError(
'Invalid %s given: --%s=%s' % (desc, opt, value))
class Option(optparse.Option):
"""Subclass to implement path evaluation & other useful types."""
_EXTRA_TYPES = ('path', 'gs_path')
TYPES = optparse.Option.TYPES + _EXTRA_TYPES
TYPE_CHECKER = optparse.Option.TYPE_CHECKER.copy()
for t in _EXTRA_TYPES:
TYPE_CHECKER[t] = functools.partial(OptparseWrapCheck, t, VALID_TYPES[t])
class FilteringOption(Option):
"""Subclass that supports Option filtering for FilteringOptionParser"""
def take_action(self, action, dest, opt, value, values, parser):
if action in FilteringOption.ACTIONS:
Option.take_action(self, action, dest, opt, value, values, parser)
if value is None:
value = []
elif not self.nargs or self.nargs <= 1:
value = [value]
parser.AddParsedArg(self, opt, [str(v) for v in value])
# TODO: logging.Formatter is not a subclass of object in python
# 2.6. Make ColoredFormatter explicitly inherit from object so that
# functions such as super() will not fail. This should be removed
# after python is upgraded to 2.7 on master2 (crbug.com/409273).
class ColoredFormatter(logging.Formatter, object):
"""A logging formatter that can color the messages."""
_COLOR_MAPPING = {
'WARNING': terminal.Color.YELLOW,
'ERROR': terminal.Color.RED,
}
def __init__(self, *args, **kwargs):
"""Initializes the formatter.
Args:
args: See logging.Formatter for specifics.
kwargs: See logging.Formatter for specifics.
enable_color: Whether to enable colored logging. Defaults
to None, where terminal.Color will set to a sane default.
"""
self.color = terminal.Color(enabled=kwargs.pop('enable_color', None))
super(ColoredFormatter, self).__init__(*args, **kwargs)
def format(self, record, **kwargs):
"""Formats |record| with color."""
msg = super(ColoredFormatter, self).format(record, **kwargs)
color = self._COLOR_MAPPING.get(record.levelname)
return msg if not color else self.color.Color(color, msg)
class ChromiteStreamHandler(logging.StreamHandler):
"""A stream handler for logging."""
class BaseParser(object):
"""Base parser class that includes the logic to add logging controls."""
DEFAULT_LOG_LEVELS = ('fatal', 'critical', 'error', 'warning', 'notice',
'info', 'debug')
DEFAULT_LOG_LEVEL = 'info'
ALLOW_LOGGING = True
def __init__(self, **kwargs):
"""Initialize this parser instance.
kwargs:
logging: Defaults to ALLOW_LOGGING from the class; if given,
add --log-level.
default_log_level: If logging is enabled, override the default logging
level. Defaults to the class's DEFAULT_LOG_LEVEL value.
log_levels: If logging is enabled, this overrides the enumeration of
allowed logging levels. If not given, defaults to the classes
DEFAULT_LOG_LEVELS value.
manual_debug: If logging is enabled and this is True, suppress addition
of a --debug alias. This option defaults to True unless 'debug' has
been exempted from the allowed logging level targets.
caching: If given, must be either a callable that discerns the cache
location if it wasn't specified (the prototype must be akin to
lambda parser, values:calculated_cache_dir_path; it may return None to
indicate that it handles setting the value on its own later in the
parsing including setting the env), or True; if True, the
machinery defaults to invoking the class's FindCacheDir method
(which can be overridden). FindCacheDir $CROS_CACHEDIR, falling
back to $REPO/.cache, finally falling back to $TMP.
Note that the cache_dir is not created, just discerned where it
should live.
If False, or caching is not given, then no --cache-dir option will be
added.
"""
self.debug_enabled = False
self.caching_group = None
self.debug_group = None
self.default_log_level = None
self.log_levels = None
self.logging_enabled = kwargs.get('logging', self.ALLOW_LOGGING)
self.default_log_level = kwargs.get('default_log_level',
self.DEFAULT_LOG_LEVEL)
self.log_levels = tuple(x.lower() for x in
kwargs.get('log_levels', self.DEFAULT_LOG_LEVELS))
self.debug_enabled = (not kwargs.get('manual_debug', False)
and 'debug' in self.log_levels)
self.caching = kwargs.get('caching', False)
@staticmethod
def PopUsedArgs(kwarg_dict):
"""Removes keys used by the base parser from the kwarg namespace."""
parser_keys = ['logging', 'default_log_level', 'log_levels', 'manual_debug',
'caching']
for key in parser_keys:
kwarg_dict.pop(key, None)
def SetupOptions(self):
"""Sets up special chromite options for an OptionParser."""
if self.logging_enabled:
self.debug_group = self.add_option_group('Debug options')
self.add_option_to_group(
self.debug_group, '--log-level', choices=self.log_levels,
default=self.default_log_level,
help='Set logging level to report at.')
self.add_option_to_group(
self.debug_group, '--log_format', action='store',
default=constants.LOGGER_FMT,
help='Set logging format to use.')
if self.debug_enabled:
self.add_option_to_group(
self.debug_group, '--debug', action='store_const', const='debug',
dest='log_level', help='Alias for `--log-level=debug`. '
'Useful for debugging bugs/failures.')
self.add_option_to_group(
self.debug_group, '--nocolor', action='store_false', dest='color',
default=None,
help='Do not use colorized output (or `export NOCOLOR=true`)')
if self.caching:
self.caching_group = self.add_option_group('Caching Options')
self.add_option_to_group(
self.caching_group, '--cache-dir', default=None, type='path',
help='Override the calculated chromeos cache directory; '
"typically defaults to '$REPO/.cache' .")
def SetupLogging(self, opts):
"""Sets up logging based on |opts|."""
value = opts.log_level.upper()
logger = logging.getLogger()
logger.setLevel(getattr(logging, value))
formatter = ColoredFormatter(fmt=opts.log_format,
datefmt=constants.LOGGER_DATE_FMT,
enable_color=opts.color)
# Only set colored formatter for ChromiteStreamHandler instances,
# which could have been added by ScriptWrapperMain() below.
chromite_handlers = [x for x in logger.handlers if
isinstance(x, ChromiteStreamHandler)]
for handler in chromite_handlers:
handler.setFormatter(formatter)
return value
def DoPostParseSetup(self, opts, args):
"""Method called to handle post opts/args setup.
This can be anything from logging setup to positional arg count validation.
Args:
opts: optparse.Values or argparse.Namespace instance
args: position arguments unconsumed from parsing.
Returns:
(opts, args), w/ whatever modification done.
"""
if self.logging_enabled:
value = self.SetupLogging(opts)
if self.debug_enabled:
opts.debug = (value == 'DEBUG')
if self.caching:
path = os.environ.get(constants.SHARED_CACHE_ENVVAR)
if path is not None and opts.cache_dir is None:
opts.cache_dir = os.path.abspath(path)
opts.cache_dir_specified = opts.cache_dir is not None
if not opts.cache_dir_specified:
func = self.FindCacheDir if not callable(self.caching) else self.caching
opts.cache_dir = func(self, opts)
if opts.cache_dir is not None:
self.ConfigureCacheDir(opts.cache_dir)
# Overrides for automatic path-to-locator conversion.
for option in _LOCATOR_OVERRIDE_OPTIONS:
value = getattr(opts, '%s_locator_override' % option, None)
if value:
setattr(opts, option, value)
return opts, args
@staticmethod
def ConfigureCacheDir(cache_dir):
if cache_dir is None:
os.environ.pop(constants.SHARED_CACHE_ENVVAR, None)
logging.debug('Removed cache_dir setting')
else:
os.environ[constants.SHARED_CACHE_ENVVAR] = cache_dir
logging.debug('Configured cache_dir to %r', cache_dir)
@classmethod
def FindCacheDir(cls, _parser, _opts):
logging.debug('Cache dir lookup.')
return path_util.FindCacheDir()
def add_option_group(self, *args, **kwargs):
"""Returns a new option group see optparse.OptionParser.add_option_group."""
raise NotImplementedError('Subclass must override this method')
@staticmethod
def add_option_to_group(group, *args, **kwargs):
"""Adds the given option defined by args and kwargs to group."""
group.add_option(*args, **kwargs)
class ArgumentNamespace(argparse.Namespace):
"""Class to mimic argparse.Namespace with value freezing support."""
__metaclass__ = cros_build_lib.FrozenAttributesClass
_FROZEN_ERR_MSG = 'Option values are frozen, cannot alter %s.'
# Note that because optparse.Values is not a new-style class this class
# must use the mixin FrozenAttributesMixin rather than the metaclass
# FrozenAttributesClass.
class OptionValues(cros_build_lib.FrozenAttributesMixin, optparse.Values):
"""Class to mimic optparse.Values with value freezing support."""
_FROZEN_ERR_MSG = 'Option values are frozen, cannot alter %s.'
def __init__(self, defaults, *args, **kwargs):
cros_build_lib.FrozenAttributesMixin.__init__(self)
optparse.Values.__init__(self, defaults, *args, **kwargs)
# Used by FilteringParser.
self.parsed_args = None
class OptionParser(optparse.OptionParser, BaseParser):
"""Custom parser adding our custom option class in.
Aside from adding a couple of types (path for absolute paths,
gs_path for google storage urls, and log_level for logging level control),
this additionally exposes logging control by default; if undesired,
either derive from this class setting ALLOW_LOGGING to False, or
pass in logging=False to the constructor.
"""
DEFAULT_OPTION_CLASS = Option
def __init__(self, usage=None, **kwargs):
BaseParser.__init__(self, **kwargs)
self.PopUsedArgs(kwargs)
kwargs.setdefault('option_class', self.DEFAULT_OPTION_CLASS)
optparse.OptionParser.__init__(self, usage=usage, **kwargs)
self.SetupOptions()
def parse_args(self, args=None, values=None):
# If no Values object is specified then use our custom OptionValues.
if values is None:
values = OptionValues(defaults=self.defaults)
opts, remaining = optparse.OptionParser.parse_args(
self, args=args, values=values)
return self.DoPostParseSetup(opts, remaining)
PassedOption = collections.namedtuple(
'PassedOption', ['opt_inst', 'opt_str', 'value_str'])
class FilteringParser(OptionParser):
"""Custom option parser for filtering options."""
DEFAULT_OPTION_CLASS = FilteringOption
def parse_args(self, args=None, values=None):
# If no Values object is specified then use our custom OptionValues.
if values is None:
values = OptionValues(defaults=self.defaults)
values.parsed_args = []
return OptionParser.parse_args(self, args=args, values=values)
def AddParsedArg(self, opt_inst, opt_str, value_str):
"""Add a parsed argument with attributes.
Args:
opt_inst: An instance of a raw optparse.Option object that represents the
option.
opt_str: The option string.
value_str: A list of string-ified values dentified by OptParse.
"""
self.values.parsed_args.append(PassedOption(opt_inst, opt_str, value_str))
@staticmethod
def FilterArgs(parsed_args, filter_fn):
"""Filter the argument by passing it through a function.
Args:
parsed_args: The list of parsed argument namedtuples to filter. Tuples
are of the form (opt_inst, opt_str, value_str).
filter_fn: A function with signature f(PassedOption), and returns True if
the argument is to be passed through. False if not.
Returns:
A tuple containing two lists - one of accepted arguments and one of
removed arguments.
"""
removed = []
accepted = []
for arg in parsed_args:
target = accepted if filter_fn(arg) else removed
target.append(arg.opt_str)
target.extend(arg.value_str)
return accepted, removed
class SharedParser(argparse.ArgumentParser):
"""A type of parser that may be used as a shared parent for subparsers."""
def __init__(self, **kwargs):
kwargs.setdefault('add_help', False)
argparse.ArgumentParser.__init__(self, **kwargs)
class ArgumentParser(BaseParser, argparse.ArgumentParser):
"""Custom argument parser for use by chromite.
This class additionally exposes logging control by default; if undesired,
either derive from this class setting ALLOW_LOGGING to False, or
pass in logging=False to the constructor.
"""
def __init__(self, usage=None, **kwargs):
kwargs.setdefault('formatter_class', argparse.RawDescriptionHelpFormatter)
BaseParser.__init__(self, **kwargs)
self.PopUsedArgs(kwargs)
argparse.ArgumentParser.__init__(self, usage=usage, **kwargs)
self._SetupTypes()
self.SetupOptions()
self._SetupLocatorOverride()
def _SetupTypes(self):
"""Register types with ArgumentParser."""
for t, check_f in VALID_TYPES.iteritems():
self.register('type', t, check_f)
def _SetupLocatorOverride(self):
"""Create hidden arguments for automatic path-to-locator conversion."""
group = self.add_argument_group('Locators', description=argparse.SUPPRESS)
for option in _LOCATOR_OVERRIDE_OPTIONS:
group.add_argument('--%s-locator-override' % option,
help=argparse.SUPPRESS)
def add_option_group(self, *args, **kwargs):
"""Return an argument group rather than an option group."""
return self.add_argument_group(*args, **kwargs)
@staticmethod
def add_option_to_group(group, *args, **kwargs):
"""Adds an argument rather than an option to the given group."""
return group.add_argument(*args, **kwargs)
def parse_args(self, args=None, namespace=None):
"""Translates OptionParser call to equivalent ArgumentParser call."""
# If no Namespace object is specified then use our custom ArgumentNamespace.
if namespace is None:
namespace = ArgumentNamespace()
# Unlike OptionParser, ArgParser works only with a single namespace and no
# args. Re-use BaseParser DoPostParseSetup but only take the namespace.
namespace = argparse.ArgumentParser.parse_args(
self, args=args, namespace=namespace)
return self.DoPostParseSetup(namespace, None)[0]
class _ShutDownException(SystemExit):
"""Exception raised when user hits CTRL+C."""
def __init__(self, sig_num, message):
self.signal = sig_num
# Setup a usage message primarily for any code that may intercept it
# while this exception is crashing back up the stack to us.
SystemExit.__init__(self, message)
self.args = (sig_num, message)
def _DefaultHandler(signum, _frame):
# Don't double process sigterms; just trigger shutdown from the first
# exception.
signal.signal(signum, signal.SIG_IGN)
raise _ShutDownException(
signum, 'Received signal %i; shutting down' % (signum,))
def _RestartInChroot(cmd, chroot_args):
"""Rerun inside the chroot.
Args:
cmd: Command line to run inside the chroot as a list of strings.
chroot_args: Arguments to pass directly to cros_sdk (or None).
"""
return cros_build_lib.RunCommand(cmd, error_code_ok=True,
enter_chroot=True, chroot_args=chroot_args,
cwd=constants.SOURCE_ROOT,
mute_output=False).returncode
def _AddCliCommandOption(argv, option, value=None):
"""Adds an option to command line |argv|.
Use this to add options to a CliCommand argument list rather than
extending the list directly in order to avoid bugs when using
argparse.REMAINDER.
For example, with `brillo chroot` we want this:
brillo chroot --extra-arg -- ls
not this:
brillo chroot -- ls --extra-arg
Args:
argv: Current argument list; will be modified by this function.
option: New option to add.
value: Option value if required; None to omit.
Returns:
|argv|.
Raises:
ValueError: |option| is not an optional argument.
"""
if not option.startswith('-'):
raise ValueError('"%s" must be an option (starting with -)' % option)
# Insert at index 2 to put the option after the subcommand for readability,
# e.g. `brillo chroot --option` rather than `brillo --option chroot`.
argv.insert(2, option)
if value is not None:
argv.insert(3, value)
return argv
def RunInsideChroot(command, auto_detect_brick=False,
auto_detect_workspace=True, auto_locator_override=True):
"""Restart the current command inside the chroot.
This method is only valid for any code that is run via ScriptWrapperMain.
It allows proper cleanup of the local context by raising an exception handled
in ScriptWrapperMain.
If cwd is in a brick, and --board/--host is not explicitly set, set
--brick explicitly as we might not be able to detect the curr_brick_locator
inside the chroot (cwd will have changed).
Args:
command: An instance of CliCommand to be restarted inside the chroot.
auto_detect_brick: If true, sets --brick explicitly.
auto_detect_workspace: If true, sets up workspace automatically.
auto_locator_override: If true, adds arguments to override absolute
or relative paths with locators for certain options.
"""
if cros_build_lib.IsInsideChroot():
return
# Produce the command line to execute inside the chroot.
argv = sys.argv[:]
argv[0] = path_util.ToChrootPath(argv[0])
target_arg = any(getattr(command.options, arg, None)
for arg in ('blueprint', 'board', 'brick', 'host'))
if auto_detect_brick and not target_arg and command.curr_brick_locator:
_AddCliCommandOption(argv, '--brick', command.curr_brick_locator)
# Provide locators so that paths can be found from inside the chroot.
if auto_locator_override:
for option in _LOCATOR_OVERRIDE_OPTIONS:
path = getattr(command.options, option, None)
if path and not workspace_lib.IsLocator(path):
locator = workspace_lib.PathToLocator(path)
if locator:
_AddCliCommandOption(argv, '--%s-locator-override' % option, locator)
# Enter the chroot for the workspace, if we are in a workspace.
chroot_args = None
if auto_detect_workspace:
workspace_path = workspace_lib.WorkspacePath()
if workspace_path:
chroot_args = ['--chroot', workspace_lib.ChrootPath(workspace_path),
'--workspace', workspace_path]
raise ChrootRequiredError(argv, chroot_args)
def ReExec():
"""Restart the current command.
This method is only valid for any code that is run via ScriptWrapperMain.
It allows proper cleanup of the local context by raising an exception handled
in ScriptWrapperMain.
"""
# The command to exec.
raise ExecRequiredError(sys.argv[:])
def ScriptWrapperMain(find_target_func, argv=None,
log_level=logging.DEBUG,
log_format=constants.LOGGER_FMT):
"""Function usable for chromite.script.* style wrapping.
Note that this function invokes sys.exit on the way out by default.
Args:
find_target_func: a function, which, when given the absolute
pathway the script was invoked via (for example,
/home/ferringb/cros/trunk/chromite/bin/cros_sdk; note that any
trailing .py from the path name will be removed),
will return the main function to invoke (that functor will take
a single arg- a list of arguments, and shall return either None
or an integer, to indicate the exit code).
argv: sys.argv, or an equivalent tuple for testing. If nothing is
given, sys.argv is defaulted to.
log_level: Default logging level to start at.
log_format: Default logging format to use.
"""
if argv is None:
argv = sys.argv[:]
target = os.path.abspath(argv[0])
name = os.path.basename(target)
if target.endswith('.py'):
target = os.path.splitext(target)[0]
target = find_target_func(target)
if target is None:
print('Internal error detected- no main functor found in module %r.' %
(name,), file=sys.stderr)
sys.exit(100)
# Set up basic logging information for all modules that use logging.
# Note a script target may setup default logging in its module namespace
# which will take precedence over this.
logger = logging.getLogger()
logger.setLevel(log_level)
logger_handler = ChromiteStreamHandler()
logger_handler.setFormatter(
logging.Formatter(fmt=log_format, datefmt=constants.LOGGER_DATE_FMT))
logger.addHandler(logger_handler)
signal.signal(signal.SIGTERM, _DefaultHandler)
ret = 1
try:
ret = target(argv[1:])
except _ShutDownException as e:
sys.stdout.flush()
print('%s: Signaled to shutdown: caught %i signal.' % (name, e.signal),
file=sys.stderr)
sys.stderr.flush()
except SystemExit as e:
# Right now, let this crash through- longer term, we'll update the scripts
# in question to not use sys.exit, and make this into a flagged error.
raise
except ChrootRequiredError as e:
ret = _RestartInChroot(e.cmd, e.chroot_args)
except ExecRequiredError as e:
logging.shutdown()
# This does not return.
os.execv(e.cmd[0], e.cmd)
except Exception as e:
sys.stdout.flush()
print('%s: Unhandled exception:' % (name,), file=sys.stderr)
sys.stderr.flush()
raise
finally:
logging.shutdown()
if ret is None:
ret = 0
sys.exit(ret)
|
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Determining whether files are being measured/reported or not."""
import importlib.util
import inspect
import itertools
import os
import platform
import re
import sys
import sysconfig
import traceback
from coverage import env
from coverage.disposition import FileDisposition, disposition_init
from coverage.exceptions import CoverageException
from coverage.files import TreeMatcher, FnmatchMatcher, ModuleMatcher
from coverage.files import prep_patterns, find_python_files, canonical_filename
from coverage.misc import sys_modules_saved
from coverage.python import source_for_file, source_for_morf
# Pypy has some unusual stuff in the "stdlib". Consider those locations
# when deciding where the stdlib is. These modules are not used for anything,
# they are modules importable from the pypy lib directories, so that we can
# find those directories.
_structseq = _pypy_irc_topic = None
if env.PYPY:
try:
import _structseq
except ImportError:
pass
try:
import _pypy_irc_topic
except ImportError:
pass
def canonical_path(morf, directory=False):
"""Return the canonical path of the module or file `morf`.
If the module is a package, then return its directory. If it is a
module, then return its file, unless `directory` is True, in which
case return its enclosing directory.
"""
morf_path = canonical_filename(source_for_morf(morf))
if morf_path.endswith("__init__.py") or directory:
morf_path = os.path.split(morf_path)[0]
return morf_path
def name_for_module(filename, frame):
"""Get the name of the module for a filename and frame.
For configurability's sake, we allow __main__ modules to be matched by
their importable name.
If loaded via runpy (aka -m), we can usually recover the "original"
full dotted module name, otherwise, we resort to interpreting the
file name to get the module's name. In the case that the module name
can't be determined, None is returned.
"""
module_globals = frame.f_globals if frame is not None else {}
if module_globals is None: # pragma: only ironpython
# IronPython doesn't provide globals: https://github.com/IronLanguages/main/issues/1296
module_globals = {}
dunder_name = module_globals.get('__name__', None)
if isinstance(dunder_name, str) and dunder_name != '__main__':
# This is the usual case: an imported module.
return dunder_name
loader = module_globals.get('__loader__', None)
for attrname in ('fullname', 'name'): # attribute renamed in py3.2
if hasattr(loader, attrname):
fullname = getattr(loader, attrname)
else:
continue
if isinstance(fullname, str) and fullname != '__main__':
# Module loaded via: runpy -m
return fullname
# Script as first argument to Python command line.
inspectedname = inspect.getmodulename(filename)
if inspectedname is not None:
return inspectedname
else:
return dunder_name
def module_is_namespace(mod):
"""Is the module object `mod` a PEP420 namespace module?"""
return hasattr(mod, '__path__') and getattr(mod, '__file__', None) is None
def module_has_file(mod):
"""Does the module object `mod` have an existing __file__ ?"""
mod__file__ = getattr(mod, '__file__', None)
if mod__file__ is None:
return False
return os.path.exists(mod__file__)
def file_and_path_for_module(modulename):
"""Find the file and search path for `modulename`.
Returns:
filename: The filename of the module, or None.
path: A list (possibly empty) of directories to find submodules in.
"""
filename = None
path = []
try:
spec = importlib.util.find_spec(modulename)
except ImportError:
pass
else:
if spec is not None:
if spec.origin != "namespace":
filename = spec.origin
path = list(spec.submodule_search_locations or ())
return filename, path
def add_stdlib_paths(paths):
"""Add paths where the stdlib can be found to the set `paths`."""
# Look at where some standard modules are located. That's the
# indication for "installed with the interpreter". In some
# environments (virtualenv, for example), these modules may be
# spread across a few locations. Look at all the candidate modules
# we've imported, and take all the different ones.
modules_we_happen_to_have = [
inspect, itertools, os, platform, re, sysconfig, traceback,
_pypy_irc_topic, _structseq,
]
for m in modules_we_happen_to_have:
if m is not None and hasattr(m, "__file__"):
paths.add(canonical_path(m, directory=True))
if _structseq and not hasattr(_structseq, '__file__'):
# PyPy 2.4 has no __file__ in the builtin modules, but the code
# objects still have the file names. So dig into one to find
# the path to exclude. The "filename" might be synthetic,
# don't be fooled by those.
structseq_file = _structseq.structseq_new.__code__.co_filename
if not structseq_file.startswith("<"):
paths.add(canonical_path(structseq_file))
def add_third_party_paths(paths):
"""Add locations for third-party packages to the set `paths`."""
# Get the paths that sysconfig knows about.
scheme_names = set(sysconfig.get_scheme_names())
for scheme in scheme_names:
# https://foss.heptapod.net/pypy/pypy/-/issues/3433
better_scheme = "pypy_posix" if scheme == "pypy" else scheme
if os.name in better_scheme.split("_"):
config_paths = sysconfig.get_paths(scheme)
for path_name in ["platlib", "purelib", "scripts"]:
paths.add(config_paths[path_name])
def add_coverage_paths(paths):
"""Add paths where coverage.py code can be found to the set `paths`."""
cover_path = canonical_path(__file__, directory=True)
paths.add(cover_path)
if env.TESTING:
# Don't include our own test code.
paths.add(os.path.join(cover_path, "tests"))
# When testing, we use PyContracts, which should be considered
# part of coverage.py, and it uses six. Exclude those directories
# just as we exclude ourselves.
import contracts
import six
for mod in [contracts, six]:
paths.add(canonical_path(mod))
class InOrOut:
"""Machinery for determining what files to measure."""
def __init__(self, warn, debug):
self.warn = warn
self.debug = debug
# The matchers for should_trace.
self.source_match = None
self.source_pkgs_match = None
self.pylib_paths = self.cover_paths = self.third_paths = None
self.pylib_match = self.cover_match = self.third_match = None
self.include_match = self.omit_match = None
self.plugins = []
self.disp_class = FileDisposition
# The source argument can be directories or package names.
self.source = []
self.source_pkgs = []
self.source_pkgs_unmatched = []
self.omit = self.include = None
# Is the source inside a third-party area?
self.source_in_third = False
def configure(self, config):
"""Apply the configuration to get ready for decision-time."""
self.source_pkgs.extend(config.source_pkgs)
for src in config.source or []:
if os.path.isdir(src):
self.source.append(canonical_filename(src))
else:
self.source_pkgs.append(src)
self.source_pkgs_unmatched = self.source_pkgs[:]
self.omit = prep_patterns(config.run_omit)
self.include = prep_patterns(config.run_include)
# The directories for files considered "installed with the interpreter".
self.pylib_paths = set()
if not config.cover_pylib:
add_stdlib_paths(self.pylib_paths)
# To avoid tracing the coverage.py code itself, we skip anything
# located where we are.
self.cover_paths = set()
add_coverage_paths(self.cover_paths)
# Find where third-party packages are installed.
self.third_paths = set()
add_third_party_paths(self.third_paths)
def debug(msg):
if self.debug:
self.debug.write(msg)
# Create the matchers we need for should_trace
if self.source or self.source_pkgs:
against = []
if self.source:
self.source_match = TreeMatcher(self.source, "source")
against.append(f"trees {self.source_match!r}")
if self.source_pkgs:
self.source_pkgs_match = ModuleMatcher(self.source_pkgs, "source_pkgs")
against.append(f"modules {self.source_pkgs_match!r}")
debug("Source matching against " + " and ".join(against))
else:
if self.pylib_paths:
self.pylib_match = TreeMatcher(self.pylib_paths, "pylib")
debug(f"Python stdlib matching: {self.pylib_match!r}")
if self.include:
self.include_match = FnmatchMatcher(self.include, "include")
debug(f"Include matching: {self.include_match!r}")
if self.omit:
self.omit_match = FnmatchMatcher(self.omit, "omit")
debug(f"Omit matching: {self.omit_match!r}")
self.cover_match = TreeMatcher(self.cover_paths, "coverage")
debug(f"Coverage code matching: {self.cover_match!r}")
self.third_match = TreeMatcher(self.third_paths, "third")
debug(f"Third-party lib matching: {self.third_match!r}")
# Check if the source we want to measure has been installed as a
# third-party package.
with sys_modules_saved():
for pkg in self.source_pkgs:
try:
modfile, path = file_and_path_for_module(pkg)
debug(f"Imported source package {pkg!r} as {modfile!r}")
except CoverageException as exc:
debug(f"Couldn't import source package {pkg!r}: {exc}")
continue
if modfile:
if self.third_match.match(modfile):
debug(
f"Source is in third-party because of source_pkg {pkg!r} at {modfile!r}"
)
self.source_in_third = True
else:
for pathdir in path:
if self.third_match.match(pathdir):
debug(
f"Source is in third-party because of {pkg!r} path directory " +
f"at {pathdir!r}"
)
self.source_in_third = True
for src in self.source:
if self.third_match.match(src):
debug(f"Source is in third-party because of source directory {src!r}")
self.source_in_third = True
def should_trace(self, filename, frame=None):
"""Decide whether to trace execution in `filename`, with a reason.
This function is called from the trace function. As each new file name
is encountered, this function determines whether it is traced or not.
Returns a FileDisposition object.
"""
original_filename = filename
disp = disposition_init(self.disp_class, filename)
def nope(disp, reason):
"""Simple helper to make it easy to return NO."""
disp.trace = False
disp.reason = reason
return disp
if original_filename.startswith('<'):
return nope(disp, "not a real original file name")
if frame is not None:
# Compiled Python files have two file names: frame.f_code.co_filename is
# the file name at the time the .pyc was compiled. The second name is
# __file__, which is where the .pyc was actually loaded from. Since
# .pyc files can be moved after compilation (for example, by being
# installed), we look for __file__ in the frame and prefer it to the
# co_filename value.
dunder_file = frame.f_globals and frame.f_globals.get('__file__')
if dunder_file:
filename = source_for_file(dunder_file)
if original_filename and not original_filename.startswith('<'):
orig = os.path.basename(original_filename)
if orig != os.path.basename(filename):
# Files shouldn't be renamed when moved. This happens when
# exec'ing code. If it seems like something is wrong with
# the frame's file name, then just use the original.
filename = original_filename
if not filename:
# Empty string is pretty useless.
return nope(disp, "empty string isn't a file name")
if filename.startswith('memory:'):
return nope(disp, "memory isn't traceable")
if filename.startswith('<'):
# Lots of non-file execution is represented with artificial
# file names like "<string>", "<doctest readme.txt[0]>", or
# "<exec_function>". Don't ever trace these executions, since we
# can't do anything with the data later anyway.
return nope(disp, "not a real file name")
# Jython reports the .class file to the tracer, use the source file.
if filename.endswith("$py.class"):
filename = filename[:-9] + ".py"
canonical = canonical_filename(filename)
disp.canonical_filename = canonical
# Try the plugins, see if they have an opinion about the file.
plugin = None
for plugin in self.plugins.file_tracers:
if not plugin._coverage_enabled:
continue
try:
file_tracer = plugin.file_tracer(canonical)
if file_tracer is not None:
file_tracer._coverage_plugin = plugin
disp.trace = True
disp.file_tracer = file_tracer
if file_tracer.has_dynamic_source_filename():
disp.has_dynamic_filename = True
else:
disp.source_filename = canonical_filename(
file_tracer.source_filename()
)
break
except Exception:
plugin_name = plugin._coverage_plugin_name
tb = traceback.format_exc()
self.warn(f"Disabling plug-in {plugin_name!r} due to an exception:\n{tb}")
plugin._coverage_enabled = False
continue
else:
# No plugin wanted it: it's Python.
disp.trace = True
disp.source_filename = canonical
if not disp.has_dynamic_filename:
if not disp.source_filename:
raise CoverageException(
f"Plugin {plugin!r} didn't set source_filename for '{disp.original_filename}'"
)
reason = self.check_include_omit_etc(disp.source_filename, frame)
if reason:
nope(disp, reason)
return disp
def check_include_omit_etc(self, filename, frame):
"""Check a file name against the include, omit, etc, rules.
Returns a string or None. String means, don't trace, and is the reason
why. None means no reason found to not trace.
"""
modulename = name_for_module(filename, frame)
# If the user specified source or include, then that's authoritative
# about the outer bound of what to measure and we don't have to apply
# any canned exclusions. If they didn't, then we have to exclude the
# stdlib and coverage.py directories.
if self.source_match or self.source_pkgs_match:
extra = ""
ok = False
if self.source_pkgs_match:
if self.source_pkgs_match.match(modulename):
ok = True
if modulename in self.source_pkgs_unmatched:
self.source_pkgs_unmatched.remove(modulename)
else:
extra = f"module {modulename!r} "
if not ok and self.source_match:
if self.source_match.match(filename):
ok = True
if not ok:
return extra + "falls outside the --source spec"
if not self.source_in_third:
if self.third_match.match(filename):
return "inside --source, but is third-party"
elif self.include_match:
if not self.include_match.match(filename):
return "falls outside the --include trees"
else:
# We exclude the coverage.py code itself, since a little of it
# will be measured otherwise.
if self.cover_match.match(filename):
return "is part of coverage.py"
# If we aren't supposed to trace installed code, then check if this
# is near the Python standard library and skip it if so.
if self.pylib_match and self.pylib_match.match(filename):
return "is in the stdlib"
# Exclude anything in the third-party installation areas.
if self.third_match.match(filename):
return "is a third-party module"
# Check the file against the omit pattern.
if self.omit_match and self.omit_match.match(filename):
return "is inside an --omit pattern"
# No point tracing a file we can't later write to SQLite.
try:
filename.encode("utf-8")
except UnicodeEncodeError:
return "non-encodable filename"
# No reason found to skip this file.
return None
def warn_conflicting_settings(self):
"""Warn if there are settings that conflict."""
if self.include:
if self.source or self.source_pkgs:
self.warn("--include is ignored because --source is set", slug="include-ignored")
def warn_already_imported_files(self):
"""Warn if files have already been imported that we will be measuring."""
if self.include or self.source or self.source_pkgs:
warned = set()
for mod in list(sys.modules.values()):
filename = getattr(mod, "__file__", None)
if filename is None:
continue
if filename in warned:
continue
if len(getattr(mod, "__path__", ())) > 1:
# A namespace package, which confuses this code, so ignore it.
continue
disp = self.should_trace(filename)
if disp.has_dynamic_filename:
# A plugin with dynamic filenames: the Python file
# shouldn't cause a warning, since it won't be the subject
# of tracing anyway.
continue
if disp.trace:
msg = f"Already imported a file that will be measured: {filename}"
self.warn(msg, slug="already-imported")
warned.add(filename)
elif self.debug and self.debug.should('trace'):
self.debug.write(
"Didn't trace already imported file {!r}: {}".format(
disp.original_filename, disp.reason
)
)
def warn_unimported_source(self):
"""Warn about source packages that were of interest, but never traced."""
for pkg in self.source_pkgs_unmatched:
self._warn_about_unmeasured_code(pkg)
def _warn_about_unmeasured_code(self, pkg):
"""Warn about a package or module that we never traced.
`pkg` is a string, the name of the package or module.
"""
mod = sys.modules.get(pkg)
if mod is None:
self.warn(f"Module {pkg} was never imported.", slug="module-not-imported")
return
if module_is_namespace(mod):
# A namespace package. It's OK for this not to have been traced,
# since there is no code directly in it.
return
if not module_has_file(mod):
self.warn(f"Module {pkg} has no Python source.", slug="module-not-python")
return
# The module was in sys.modules, and seems like a module with code, but
# we never measured it. I guess that means it was imported before
# coverage even started.
msg = f"Module {pkg} was previously imported, but not measured"
self.warn(msg, slug="module-not-measured")
def find_possibly_unexecuted_files(self):
"""Find files in the areas of interest that might be untraced.
Yields pairs: file path, and responsible plug-in name.
"""
for pkg in self.source_pkgs:
if (not pkg in sys.modules or
not module_has_file(sys.modules[pkg])):
continue
pkg_file = source_for_file(sys.modules[pkg].__file__)
yield from self._find_executable_files(canonical_path(pkg_file))
for src in self.source:
yield from self._find_executable_files(src)
def _find_plugin_files(self, src_dir):
"""Get executable files from the plugins."""
for plugin in self.plugins.file_tracers:
for x_file in plugin.find_executable_files(src_dir):
yield x_file, plugin._coverage_plugin_name
def _find_executable_files(self, src_dir):
"""Find executable files in `src_dir`.
Search for files in `src_dir` that can be executed because they
are probably importable. Don't include ones that have been omitted
by the configuration.
Yield the file path, and the plugin name that handles the file.
"""
py_files = ((py_file, None) for py_file in find_python_files(src_dir))
plugin_files = self._find_plugin_files(src_dir)
for file_path, plugin_name in itertools.chain(py_files, plugin_files):
file_path = canonical_filename(file_path)
if self.omit_match and self.omit_match.match(file_path):
# Turns out this file was omitted, so don't pull it back
# in as unexecuted.
continue
yield file_path, plugin_name
def sys_info(self):
"""Our information for Coverage.sys_info.
Returns a list of (key, value) pairs.
"""
info = [
("coverage_paths", self.cover_paths),
("stdlib_paths", self.pylib_paths),
("third_party_paths", self.third_paths),
]
matcher_names = [
'source_match', 'source_pkgs_match',
'include_match', 'omit_match',
'cover_match', 'pylib_match', 'third_match',
]
for matcher_name in matcher_names:
matcher = getattr(self, matcher_name)
if matcher:
matcher_info = matcher.info()
else:
matcher_info = '-none-'
info.append((matcher_name, matcher_info))
return info
|
|
# Copyright 2013 Georgia Tech Research Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# http://healthcare-robotics.com/
## @package hrl_haptic_mpc
#
# @author Jeff Hawke
# @version 0.1
# @copyright Apache 2.0
import roslib
roslib.load_manifest("hrl_haptic_mpc")
import rospy
import tf
import hrl_lib.transforms as tr
import hrl_haptic_manipulation_in_clutter_msgs.msg as haptic_msgs
import geometry_msgs.msg
import std_msgs.msg
import numpy as np
import threading, copy
import sys
## Provides an interface to process TaxelArrays published on multiple topics, including trimming the data and transforming the reference frame
class TaxelArrayClient():
## Constructor
# @param skin_topic_list List of strings specifying the topics to subscribe to for TaxelArray messages
# @param torso_frame String indicating which frame ID is the base frame for the arm
# @param tf_listener TF Listener object, if one exists. If this is None, the class will create one.
def __init__(self, skin_topic_list, torso_frame="/torso_lift_link", tf_listener=None):
## Lock for skin data structure.
self.data_lock = threading.RLock()
## Lock for skin data topics.
self.topic_lock = threading.RLock()
## torso_frame Torso frame ID used as the base frame for the associated arm, eg, "/torso_lift_link"
self.torso_frame = torso_frame
## Threshold used to control how the data is trimmed - should be set by whatever instatiates a TaxelArrayClient. Default: 0.0 (ie, trim nothing)
self.trim_threshold = 0.0
## Dictionary containing the skin subscribers, indexed by topic name
self.skin_subs = {}
## Dictionary containing the raw TaxelArray messages heard by the client, indexed by topic name
self.skin_data = {}
## Dictionary containing the processed TaxelArray messages heard by the client, indexed by topic name
self.trimmed_skin_data = {}
try:
if tf_listener == None:
## TF Listener
self.tf_lstnr = tf.TransformListener()
else:
## TF Listener
self.tf_lstnr = tf_listener
except rospy.ServiceException, e:
rospy.loginfo("ServiceException caught while instantiating a TF listener. Seems to be normal")
pass
## List of skin topics used by the client
self.skin_topic_list = skin_topic_list
# Initialise a subscriber for each topic in the list
for skin_topic in self.skin_topic_list:
self.addSkinTopic(skin_topic)
## Add skin topic subscriber.
rospy.Subscriber("/haptic_mpc/add_taxel_array", std_msgs.msg.String, self.addSkinTopicCallback)
## Remove skin topic subscriber
rospy.Subscriber("/haptic_mpc/remove_taxel_array", std_msgs.msg.String, self.removeSkinTopicCallback)
## Current topics ROS publisher. Publishes the list of topics on change.
self.current_topics_pub = rospy.Publisher("/haptic_mpc/skin_topics", haptic_msgs.StringArray, latch=True)
self.current_topics_pub.publish(self.skin_data.keys())
## Set the trim threshold used by the client. Should be greater or equal to 0.0.
# @param threshold Desired threshold. Should be greater than or equal to 0.0.
def setTrimThreshold(self, threshold):
self.trim_threshold = threshold
## Callback function which sets the topic.
# @param msg std_msgs/String message.
def addSkinTopicCallback(self, msg):
rospy.loginfo("Adding skin TaxelArray topic: %s" % str(msg.data))
self.addSkinTopic(msg.data)
rospy.loginfo("Current skin topics: \n%s", str(self.skin_subs.keys()))
self.current_topics_pub.publish(self.skin_subs.keys())
## Callback function to removed skin topic.
# @param msg StringArray message.
def removeSkinTopicCallback(self, msg):
rospy.loginfo("Removing skin TaxelArray topic: %s" % str(msg.data))
self.removeSkinTopic(msg.data)
rospy.loginfo("Current skin topics: \n%s", str(self.skin_subs.keys()))
self.current_topics_pub.publish(self.skin_subs.keys())
## Add skin topic to internal data structures.
# @param skin_topic String specifying the topic to be added.
def addSkinTopic(self, skin_topic):
if skin_topic in self.skin_subs.keys():
return
with self.topic_lock:
self.skin_topic_list.append(skin_topic)
self.skin_data[skin_topic] = haptic_msgs.TaxelArray()
self.skin_subs[skin_topic] = rospy.Subscriber(skin_topic, haptic_msgs.TaxelArray, self.skinCallback, skin_topic)
## Remove skin topic from internal data structures.
# @param skin_topic String specifying the topic to be removed.
def removeSkinTopic(self, skin_topic):
if skin_topic not in self.skin_subs.keys():
rospy.loginfo("Skin topic not found")
return
with self.topic_lock:
self.skin_topic_list.remove(skin_topic)
self.skin_data.pop(skin_topic)
self.skin_subs[skin_topic].unregister()
self.skin_subs.pop(skin_topic)
## Skin Callback. Store the message in the data dictionary, indexed by topic.
# Keeps the raw data in dictionary 'skin_data' and the transformed, trimmed data in 'trimmed_skin_data'
# @param msg TaxelArray message object
# @param skin_topic The topic name triggering the callback. Used to identify what sensor the TaxelArray came from (as there may be multiple publishers running)
def skinCallback(self, msg, skin_topic):
with self.data_lock:
#self.skin_data[skin_topic] = msg # Data should be of type TaxelArray
# DIRTY DIRTY DIRTY HACK to ignore pr2 wrist taxel.
# if self.joint_angles and self.joint_angles[5] < np.radians(-90.0):
# #print self.joint_angles
# # Potentially also 10 - middle forearm, 13/19 - edges
# if skin_topic =="/pr2_fabric_forearm_sensor/taxels/forces":
# #print "trimming value 16"
# #print msg
# msg.values_x = list(msg.values_x)
# msg.values_y = list(msg.values_y)
# msg.values_z = list(msg.values_z)
# msg.values_x[16] = 0.0
# msg.values_y[16] = 0.0
# msg.values_z[16] = 0.0
#
# #msg.values_x[10] = 0.0
# #msg.values_y[10] = 0.0
# #msg.values_z[10] = 0.0
#
# msg.values_x[13] = 0.0
# msg.values_y[13] = 0.0
# msg.values_z[13] = 0.0
#
# msg.values_x[19] = 0.0
# msg.values_y[19] = 0.0
# msg.values_z[19] = 0.0
trimmed_msg = self.trimTaxelArray(msg, self.trim_threshold)
transformed_msg = self.transformTaxelArray(trimmed_msg, self.torso_frame)
self.trimmed_skin_data[skin_topic] = transformed_msg
transformed_full_msg = self.transformTaxelArray(msg, self.torso_frame)
self.skin_data[skin_topic] = transformed_full_msg
## Transform a single taxel array message from one frame to another
# @param ta_msg TaxelArray message object to be transformed
# @param new_frame The desired frame name
# @return The transformed message with all values in the new coordinate frame.
def transformTaxelArray(self, ta_msg, new_frame):
# Get the transformation from the desired frame to current frame
if ta_msg.header.frame_id == "":
return ta_msg
self.tf_lstnr.waitForTransform(new_frame, ta_msg.header.frame_id, rospy.Time(0), rospy.Duration(4.0))
t1, q1 = self.tf_lstnr.lookupTransform(new_frame, ta_msg.header.frame_id, rospy.Time(0))
t1 = np.matrix(t1).reshape(3,1)
r1 = tr.quaternion_to_matrix(q1)
# Create new message data structure
new_ta_msg = copy.copy(ta_msg)
new_ta_msg.header.frame_id = new_frame
# Perform the transformation
pts = np.column_stack((ta_msg.centers_x, ta_msg.centers_y, ta_msg.centers_z))
nrmls = np.column_stack((ta_msg.normals_x, ta_msg.normals_y, ta_msg.normals_z))
values = np.column_stack((ta_msg.values_x, ta_msg.values_y, ta_msg.values_z))
pts = r1 * np.matrix(pts).T + t1
nrmls = r1 * np.matrix(nrmls).T
values = r1 * np.matrix(values).T
# Reformat the transformed data to be repackaged as a TaxelArray message
pts_array = np.asarray(pts)
nrmls_array = np.asarray(nrmls)
values_array = np.asarray(values)
new_ta_msg.centers_x = pts_array[0, :].tolist()
new_ta_msg.centers_y = pts_array[1, :].tolist()
new_ta_msg.centers_z = pts_array[2, :].tolist()
new_ta_msg.normals_x = nrmls_array[0, :].tolist()
new_ta_msg.normals_y = nrmls_array[1, :].tolist()
new_ta_msg.normals_z = nrmls_array[2, :].tolist()
new_ta_msg.values_x = values_array[0, :].tolist()
new_ta_msg.values_y = values_array[1, :].tolist()
new_ta_msg.values_z = values_array[2, :].tolist()
return new_ta_msg
## Return a trimmed copy of the the skin_data dictionary. Each TaxelArray within the structure will be trimmed.
# @param threshold Threshold parameter (float greater than 0.0)
def trimSkinContacts(self, threshold):
with self.data_lock:
skin_data = copy.copy(self.skin_data)
for ta_topic in skin_data.keys():
skin_data[ta_topic] = self.trimTaxelArray(skin_data[ta_topic], threshold)
with self.data_lock:
self.trimmed_skin_data = skin_data
return skin_data
## Trim a passed TaxelArray to only incorporate forces of significance.
# Returns a trimmed TaxelArray message object with forces of magnitude > threshold. The data is otherwise unchanged.
def trimTaxelArray(self, ta_msg, threshold):
if threshold < 0.0:
rospy.logerr("SkinClient Error: Threshold passed to trimContacts must be >= 0.0")
return ta_msg
# Copy the message info data
new_ta_msg = haptic_msgs.TaxelArray()
new_ta_msg.header = copy.copy(ta_msg.header)
new_ta_msg.sensor_type = copy.copy(ta_msg.sensor_type)
# For each taxel entry in the TaxelArray, check if the force (or distance) is greater than the threshold
for i in range(0, len(ta_msg.centers_x)):
# Avoid overflow
if abs(ta_msg.values_x[i])<1e-6:
values_x = 0.0
else:
values_x = ta_msg.values_x[i]
if abs(ta_msg.values_y[i])<1e-6:
values_y = 0.0
else:
values_y = ta_msg.values_y[i]
if abs(ta_msg.values_z[i])<1e-6:
values_z = 0.0
else:
values_z = ta_msg.values_z[i]
magnitude = np.sqrt(values_x*values_x + values_y*values_y + values_z*values_z)
threshold_valid = False
if ta_msg.sensor_type == "force" and (magnitude >= threshold or magnitude <= -threshold):
threshold_valid = True
elif ta_msg.sensor_type == "distance" and (magnitude <= abs(threshold)):
threshold_valid = True
elif ta_msg.sensor_type == '' and (magnitude >= threshold or magnitude <= -threshold): # If nothing is set, treat it as force
threshold_valid = True
if threshold_valid:
# Copy the values to the new data structure
new_ta_msg.values_x.append(ta_msg.values_x[i])
new_ta_msg.values_y.append(ta_msg.values_y[i])
new_ta_msg.values_z.append(ta_msg.values_z[i])
new_ta_msg.centers_x.append(ta_msg.centers_x[i])
new_ta_msg.centers_y.append(ta_msg.centers_y[i])
new_ta_msg.centers_z.append(ta_msg.centers_z[i])
new_ta_msg.normals_x.append(ta_msg.normals_x[i])
new_ta_msg.normals_y.append(ta_msg.normals_y[i])
new_ta_msg.normals_z.append(ta_msg.normals_z[i])
if len(ta_msg.contact_cost) > 0:
new_ta_msg.contact_cost.append(ta_msg.contact_cost[i])
# TODO SURVY: Persist the id of this taxel too.
if i < len(ta_msg.link_names): # Some taxel arrays weren't publishing a link name list. Check if this exists.
new_ta_msg.link_names.append(ta_msg.link_names[i])
return new_ta_msg
## getSkinData accessor function
# Returns a copy of the skin_data dictionary
def getSkinData(self):
with self.data_lock:
return copy.copy(self.skin_data)
## getTrimmedSkinData accessor function
# Returns a copy of the trimmed_skin_data dictionary
def getTrimmedSkinData(self):
with self.data_lock:
return copy.copy(self.trimmed_skin_data)
# Returns a list of Point objects, each of which is corresponds to a taxel relative to the arm's base link frame.
# @param ta_msg TaxelArray message type
# @return
# TODO REMOVE THIS - unused?
def getContactLocationsFromTaxelArray(self, ta_msg):
points_list = []
for i in range(0, len(ta_msg.centers_x)):
point_vector = np.matrix([ta_msg.centers_x[i], ta_msg.centers_y[i], ta_msg.centers_z[i]]).T
points_list.append(point_vector)
return points_list
## Returns a list of taxel locations and list of joint numbers after which the
# joint torque will have no effect on the contact force, and optionally a time stamp
# Must be implemented by every robot specific skin client.
def getTaxelLocationAndJointList(self):
raise RuntimeError('Unimplemented function.')
|
|
# -*- coding:utf-8 -*-
import datetime
import decimal
import warnings
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends import utils
from django.utils import six, timezone
from django.utils.dateparse import parse_duration
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
# Integer field safe ranges by `internal_type` as documented
# in docs/ref/models/fields.txt.
integer_field_ranges = {
'SmallIntegerField': (-32768, 32767),
'IntegerField': (-2147483648, 2147483647),
'BigIntegerField': (-9223372036854775808, 9223372036854775807),
'PositiveSmallIntegerField': (0, 32767),
'PositiveIntegerField': (0, 2147483647),
}
def __init__(self, connection):
self.connection = connection
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def bulk_batch_size(self, fields, objs):
"""
Returns the maximum allowed batch size for the backend. The fields
are the fields going to be inserted in the batch, the objs contains
all the objects to be inserted.
"""
return len(objs)
def cache_key_culling_sql(self):
"""
Returns an SQL query that retrieves the first cache key greater than the
n smallest.
This is used by the 'db' cache backend to determine where to start
culling.
"""
return "SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s"
def unification_cast_sql(self, output_field):
"""
Given a field instance, returns the SQL necessary to cast the result of
a union to that type. Note that the resulting string should contain a
'%s' placeholder for the expression being cast.
"""
return '%s'
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method')
def date_interval_sql(self, timedelta):
"""
Implements the date interval functionality for expressions
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_interval_sql() method')
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a date object with only
the given specificity.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetrunc_sql() method')
def datetime_cast_date_sql(self, field_name, tzname):
"""
Returns the SQL necessary to cast a datetime value to date value.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_date() method')
def datetime_extract_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that extracts a value from the given
datetime field field_name, and a tuple of parameters.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_extract_sql() method')
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
"""
Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute' or
'second', returns the SQL that truncates the given datetime field
field_name to a datetime object with only the given specificity, and
a tuple of parameters.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunk_sql() method')
def time_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'hour', 'minute' or 'second', returns the SQL
that extracts a value from the given time field field_name.
"""
return self.date_extract_sql(lookup_type, field_name)
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def distinct_sql(self, fields):
"""
Returns an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only the given fields are being
checked for duplicates.
"""
if fields:
raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
else:
return 'DISTINCT'
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type, internal_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), and an internal type
(e.g. 'GenericIPAddressField'), returns the SQL necessary to cast it
before using it in a WHERE statement. Note that the resulting string
should contain a '%s' placeholder for the column being searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def for_update_sql(self, nowait=False):
"""
Returns the FOR UPDATE SQL clause to lock rows for an update operation.
"""
if nowait:
return 'FOR UPDATE NOWAIT'
else:
return 'FOR UPDATE'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
# Convert params to contain Unicode values.
to_unicode = lambda s: force_text(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple(to_unicode(val) for val in params)
elif params is None:
u_params = ()
else:
u_params = {to_unicode(k): to_unicode(v) for k, v in params.items()}
return six.text_type("QUERY = %r - PARAMS = %r") % (sql, u_params)
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type, internal_type=None):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc.). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a no_limit_value() method')
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def prepare_sql_script(self, sql):
"""
Takes an SQL script that may contain multiple lines and returns a list
of statements to feed to successive cursor.execute() calls.
Since few databases are able to process raw SQL scripts in a single
cursor.execute() call and PEP 249 doesn't talk about this use case,
the default implementation is conservative.
"""
try:
import sqlparse
except ImportError:
raise ImproperlyConfigured(
"sqlparse is required if you don't split your SQL "
"statements manually."
)
else:
return [sqlparse.format(statement, strip_comments=True)
for statement in sqlparse.split(sql) if statement]
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a quote_name() method')
def random_function_sql(self):
"""
Returns an SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method')
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
return "SAVEPOINT %s" % self.quote_name(sid)
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
def set_time_zone_sql(self):
"""
Returns the SQL that will set the connection's time zone.
Returns '' if the backend doesn't support time zones.
"""
return ''
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The returned value also includes SQL statements required to reset DB
sequences passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
The `allow_cascade` argument determines whether truncation may cascade
to tables with foreign keys pointing the tables being truncated.
PostgreSQL requires a cascade even if these tables are empty.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations must provide an sql_flush() method')
def sequence_reset_by_name_sql(self, style, sequences):
"""
Returns a list of the SQL statements required to reset sequences
passed in :param sequences:.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return []
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
"""
Returns the SQL statement required to end a transaction.
"""
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be used in a query to define the tablespace.
Returns '' if the backend doesn't support tablespaces.
If inline is True, the SQL is appended to a row; otherwise it's appended
to the entire CREATE TABLE or CREATE INDEX statement.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
return force_text(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). This method will raise a ValueError
if the value is invalid, otherwise returns validated value.
"""
return value
def adapt_unknown_value(self, value):
"""
Transforms a value to something compatible with the backend driver.
This method only depends on the type of the value. It's designed for
cases where the target type isn't known, such as .raw() SQL queries.
As a consequence it may not work perfectly in all circumstances.
"""
if isinstance(value, datetime.datetime): # must be before date
return self.adapt_datetimefield_value(value)
elif isinstance(value, datetime.date):
return self.adapt_datefield_value(value)
elif isinstance(value, datetime.time):
return self.adapt_timefield_value(value)
elif isinstance(value, decimal.Decimal):
return self.adapt_decimalfield_value(value)
else:
return value
def adapt_datefield_value(self, value):
"""
Transforms a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return six.text_type(value)
def adapt_datetimefield_value(self, value):
"""
Transforms a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return six.text_type(value)
def adapt_timefield_value(self, value):
"""
Transforms a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
if timezone.is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return six.text_type(value)
def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None):
"""
Transforms a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
return utils.format_number(value, max_digits, decimal_places)
def adapt_ipaddressfield_value(self, value):
"""
Transforms a string representation of an IP address into the expected
type for the backend driver.
"""
return value or None
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.date(value, 1, 1)
second = datetime.date(value, 12, 31)
first = self.adapt_datefield_value(first)
second = self.adapt_datefield_value(second)
return [first, second]
def year_lookup_bounds_for_datetime_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateTimeField value using a year
lookup.
`value` is an int, containing the looked-up year.
"""
first = datetime.datetime(value, 1, 1)
second = datetime.datetime(value, 12, 31, 23, 59, 59, 999999)
if settings.USE_TZ:
tz = timezone.get_current_timezone()
first = timezone.make_aware(first, tz)
second = timezone.make_aware(second, tz)
first = self.adapt_datetimefield_value(first)
second = self.adapt_datetimefield_value(second)
return [first, second]
def get_db_converters(self, expression):
"""
Get a list of functions needed to convert field data.
Some field types on some backends do not provide data in the correct
format, this is the hook for converter functions.
"""
return []
def convert_durationfield_value(self, value, expression, connection, context):
if value is not None:
value = str(decimal.Decimal(value) / decimal.Decimal(1000000))
value = parse_duration(value)
return value
def check_aggregate_support(self, aggregate_func):
warnings.warn(
"check_aggregate_support has been deprecated. Use "
"check_expression_support instead.",
RemovedInDjango20Warning, stacklevel=2)
return self.check_expression_support(aggregate_func)
def check_expression_support(self, expression):
"""
Check that the backend supports the provided expression.
This is used on specific backends to rule out known expressions
that have problematic or nonexistent implementations. If the
expression has a known problem, the backend should raise
NotImplementedError.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
def combine_duration_expression(self, connector, sub_expressions):
return self.combine_expression(connector, sub_expressions)
def modify_insert_params(self, placeholder, params):
"""Allow modification of insert parameters. Needed for Oracle Spatial
backend due to #10888.
"""
return params
def integer_field_range(self, internal_type):
"""
Given an integer field internal type (e.g. 'PositiveIntegerField'),
returns a tuple of the (min_value, max_value) form representing the
range of the column type bound to the field.
"""
return self.integer_field_ranges[internal_type]
|
|
"""Support for the Netatmo cameras."""
import logging
from pyatmo import NoDevice
import requests
import voluptuous as vol
from homeassistant.components.camera import (
CAMERA_SERVICE_SCHEMA,
PLATFORM_SCHEMA,
SUPPORT_STREAM,
Camera,
)
from homeassistant.const import CONF_VERIFY_SSL, STATE_OFF, STATE_ON
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from . import CameraData
from .const import DATA_NETATMO_AUTH, DOMAIN
_LOGGER = logging.getLogger(__name__)
CONF_HOME = "home"
CONF_CAMERAS = "cameras"
CONF_QUALITY = "quality"
DEFAULT_QUALITY = "high"
VALID_QUALITIES = ["high", "medium", "low", "poor"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
vol.Optional(CONF_HOME): cv.string,
vol.Optional(CONF_CAMERAS, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_QUALITY, default=DEFAULT_QUALITY): vol.All(
cv.string, vol.In(VALID_QUALITIES)
),
}
)
_BOOL_TO_STATE = {True: STATE_ON, False: STATE_OFF}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up access to Netatmo cameras."""
home = config.get(CONF_HOME)
verify_ssl = config.get(CONF_VERIFY_SSL, True)
quality = config.get(CONF_QUALITY, DEFAULT_QUALITY)
auth = hass.data[DATA_NETATMO_AUTH]
try:
data = CameraData(hass, auth, home)
for camera_name in data.get_camera_names():
camera_type = data.get_camera_type(camera=camera_name, home=home)
if CONF_CAMERAS in config:
if (
config[CONF_CAMERAS] != []
and camera_name not in config[CONF_CAMERAS]
):
continue
add_entities(
[
NetatmoCamera(
data, camera_name, home, camera_type, verify_ssl, quality
)
]
)
data.get_persons()
except NoDevice:
return None
async def async_service_handler(call):
"""Handle service call."""
_LOGGER.debug(
"Service handler invoked with service=%s and data=%s",
call.service,
call.data,
)
service = call.service
entity_id = call.data["entity_id"][0]
async_dispatcher_send(hass, f"{service}_{entity_id}")
hass.services.async_register(
DOMAIN, "set_light_auto", async_service_handler, CAMERA_SERVICE_SCHEMA
)
hass.services.async_register(
DOMAIN, "set_light_on", async_service_handler, CAMERA_SERVICE_SCHEMA
)
hass.services.async_register(
DOMAIN, "set_light_off", async_service_handler, CAMERA_SERVICE_SCHEMA
)
class NetatmoCamera(Camera):
"""Representation of the images published from a Netatmo camera."""
def __init__(self, data, camera_name, home, camera_type, verify_ssl, quality):
"""Set up for access to the Netatmo camera images."""
super().__init__()
self._data = data
self._camera_name = camera_name
self._home = home
if home:
self._name = home + " / " + camera_name
else:
self._name = camera_name
self._cameratype = camera_type
self._verify_ssl = verify_ssl
self._quality = quality
# URLs.
self._vpnurl = None
self._localurl = None
# Identifier
self._id = None
# Monitoring status.
self._status = None
# SD Card status
self._sd_status = None
# Power status
self._alim_status = None
# Is local
self._is_local = None
# VPN URL
self._vpn_url = None
# Light mode status
self._light_mode_status = None
def camera_image(self):
"""Return a still image response from the camera."""
try:
if self._localurl:
response = requests.get(
f"{self._localurl}/live/snapshot_720.jpg", timeout=10
)
elif self._vpnurl:
response = requests.get(
f"{self._vpnurl}/live/snapshot_720.jpg",
timeout=10,
verify=self._verify_ssl,
)
else:
_LOGGER.error("Welcome VPN URL is None")
self._data.update()
(self._vpnurl, self._localurl) = self._data.camera_data.cameraUrls(
camera=self._camera_name
)
return None
except requests.exceptions.RequestException as error:
_LOGGER.error("Welcome URL changed: %s", error)
self._data.update()
(self._vpnurl, self._localurl) = self._data.camera_data.cameraUrls(
camera=self._camera_name
)
return None
return response.content
# Entity property overrides
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return True
@property
def name(self):
"""Return the name of this Netatmo camera device."""
return self._name
@property
def device_state_attributes(self):
"""Return the Netatmo-specific camera state attributes."""
_LOGGER.debug("Getting new attributes from camera netatmo '%s'", self._name)
attr = {}
attr["id"] = self._id
attr["status"] = self._status
attr["sd_status"] = self._sd_status
attr["alim_status"] = self._alim_status
attr["is_local"] = self._is_local
attr["vpn_url"] = self._vpn_url
if self.model == "Presence":
attr["light_mode_status"] = self._light_mode_status
_LOGGER.debug("Attributes of '%s' = %s", self._name, attr)
return attr
@property
def available(self):
"""Return True if entity is available."""
return bool(self._alim_status == "on")
@property
def supported_features(self):
"""Return supported features."""
return SUPPORT_STREAM
@property
def is_recording(self):
"""Return true if the device is recording."""
return bool(self._status == "on")
@property
def brand(self):
"""Return the camera brand."""
return "Netatmo"
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return bool(self._status == "on")
@property
def is_on(self):
"""Return true if on."""
return self.is_streaming
async def stream_source(self):
"""Return the stream source."""
url = "{0}/live/files/{1}/index.m3u8"
if self._localurl:
return url.format(self._localurl, self._quality)
return url.format(self._vpnurl, self._quality)
@property
def model(self):
"""Return the camera model."""
if self._cameratype == "NOC":
return "Presence"
if self._cameratype == "NACamera":
return "Welcome"
return None
# Other Entity method overrides
async def async_added_to_hass(self):
"""Subscribe to signals and add camera to list."""
_LOGGER.debug("Registering services for entity_id=%s", self.entity_id)
async_dispatcher_connect(
self.hass, f"set_light_auto_{self.entity_id}", self.set_light_auto
)
async_dispatcher_connect(
self.hass, f"set_light_on_{self.entity_id}", self.set_light_on
)
async_dispatcher_connect(
self.hass, f"set_light_off_{self.entity_id}", self.set_light_off
)
def update(self):
"""Update entity status."""
_LOGGER.debug("Updating camera netatmo '%s'", self._name)
# Refresh camera data.
self._data.update()
# URLs.
self._vpnurl, self._localurl = self._data.camera_data.cameraUrls(
camera=self._camera_name
)
# Identifier
self._id = self._data.camera_data.cameraByName(
camera=self._camera_name, home=self._home
)["id"]
# Monitoring status.
self._status = self._data.camera_data.cameraByName(
camera=self._camera_name, home=self._home
)["status"]
_LOGGER.debug("Status of '%s' = %s", self._name, self._status)
# SD Card status
self._sd_status = self._data.camera_data.cameraByName(
camera=self._camera_name, home=self._home
)["sd_status"]
# Power status
self._alim_status = self._data.camera_data.cameraByName(
camera=self._camera_name, home=self._home
)["alim_status"]
# Is local
self._is_local = self._data.camera_data.cameraByName(
camera=self._camera_name, home=self._home
)["is_local"]
# VPN URL
self._vpn_url = self._data.camera_data.cameraByName(
camera=self._camera_name, home=self._home
)["vpn_url"]
self.is_streaming = self._alim_status == "on"
if self.model == "Presence":
# Light mode status
self._light_mode_status = self._data.camera_data.cameraByName(
camera=self._camera_name, home=self._home
)["light_mode_status"]
# Camera method overrides
def enable_motion_detection(self):
"""Enable motion detection in the camera."""
_LOGGER.debug("Enable motion detection of the camera '%s'", self._name)
self._enable_motion_detection(True)
def disable_motion_detection(self):
"""Disable motion detection in camera."""
_LOGGER.debug("Disable motion detection of the camera '%s'", self._name)
self._enable_motion_detection(False)
def _enable_motion_detection(self, enable):
"""Enable or disable motion detection."""
try:
if self._localurl:
requests.get(
f"{self._localurl}/command/changestatus?status={_BOOL_TO_STATE.get(enable)}",
timeout=10,
)
elif self._vpnurl:
requests.get(
f"{self._vpnurl}/command/changestatus?status={_BOOL_TO_STATE.get(enable)}",
timeout=10,
verify=self._verify_ssl,
)
else:
_LOGGER.error("Welcome/Presence VPN URL is None")
self._data.update()
(self._vpnurl, self._localurl) = self._data.camera_data.cameraUrls(
camera=self._camera_name
)
return None
except requests.exceptions.RequestException as error:
_LOGGER.error("Welcome/Presence URL changed: %s", error)
self._data.update()
(self._vpnurl, self._localurl) = self._data.camera_data.cameraUrls(
camera=self._camera_name
)
return None
else:
self.async_schedule_update_ha_state(True)
# Netatmo Presence specific camera method.
def set_light_auto(self):
"""Set flood light in automatic mode."""
_LOGGER.debug(
"Set the flood light in automatic mode for the camera '%s'", self._name
)
self._set_light_mode("auto")
def set_light_on(self):
"""Set flood light on."""
_LOGGER.debug("Set the flood light on for the camera '%s'", self._name)
self._set_light_mode("on")
def set_light_off(self):
"""Set flood light off."""
_LOGGER.debug("Set the flood light off for the camera '%s'", self._name)
self._set_light_mode("off")
def _set_light_mode(self, mode):
"""Set light mode ('auto', 'on', 'off')."""
if self.model == "Presence":
try:
config = '{"mode":"' + mode + '"}'
if self._localurl:
requests.get(
f"{self._localurl}/command/floodlight_set_config?config={config}",
timeout=10,
)
elif self._vpnurl:
requests.get(
f"{self._vpnurl}/command/floodlight_set_config?config={config}",
timeout=10,
verify=self._verify_ssl,
)
else:
_LOGGER.error("Presence VPN URL is None")
self._data.update()
(self._vpnurl, self._localurl) = self._data.camera_data.cameraUrls(
camera=self._camera_name
)
return None
except requests.exceptions.RequestException as error:
_LOGGER.error("Presence URL changed: %s", error)
self._data.update()
(self._vpnurl, self._localurl) = self._data.camera_data.cameraUrls(
camera=self._camera_name
)
return None
else:
self.async_schedule_update_ha_state(True)
else:
_LOGGER.error("Unsupported camera model for light mode")
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_versionedobjects import base as obj_base
from oslo_versionedobjects import fields as obj_fields
from neutron.common import utils
from neutron.db import models_v2
from neutron.db import rbac_db_models
from neutron.objects import base
from neutron.objects import common_types
from neutron.objects import rbac_db
@obj_base.VersionedObjectRegistry.register
class DNSNameServer(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = models_v2.DNSNameServer
primary_keys = ['address', 'subnet_id']
foreign_keys = {'Subnet': {'subnet_id': 'id'}}
fields = {
'address': obj_fields.StringField(),
'subnet_id': obj_fields.UUIDField(),
'order': obj_fields.IntegerField()
}
@classmethod
def get_objects(cls, context, _pager=None, **kwargs):
"""Fetch DNSNameServer objects with default sort by 'order' field.
"""
if not _pager:
_pager = base.Pager()
if not _pager.sorts:
# (NOTE) True means ASC, False is DESC
_pager.sorts = [('order', True)]
return super(DNSNameServer, cls).get_objects(context, _pager,
**kwargs)
@obj_base.VersionedObjectRegistry.register
class Route(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = models_v2.SubnetRoute
primary_keys = ['destination', 'nexthop', 'subnet_id']
foreign_keys = {'Subnet': {'subnet_id': 'id'}}
fields = {
'subnet_id': obj_fields.UUIDField(),
'destination': common_types.IPNetworkField(),
'nexthop': obj_fields.IPAddressField()
}
@classmethod
def modify_fields_from_db(cls, db_obj):
# TODO(korzen) remove this method when IP and CIDR decorator ready
result = super(Route, cls).modify_fields_from_db(db_obj)
if 'destination' in result:
result['destination'] = utils.AuthenticIPNetwork(
result['destination'])
if 'nexthop' in result:
result['nexthop'] = netaddr.IPAddress(result['nexthop'])
return result
@classmethod
def modify_fields_to_db(cls, fields):
# TODO(korzen) remove this method when IP and CIDR decorator ready
result = super(Route, cls).modify_fields_to_db(fields)
if 'destination' in result:
result['destination'] = cls.filter_to_str(result['destination'])
if 'nexthop' in fields:
result['nexthop'] = cls.filter_to_str(result['nexthop'])
return result
@obj_base.VersionedObjectRegistry.register
class IPAllocationPool(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = models_v2.IPAllocationPool
foreign_keys = {'Subnet': {'subnet_id': 'id'}}
fields_need_translation = {
'start': 'first_ip',
'end': 'last_ip'
}
fields = {
'id': obj_fields.UUIDField(),
'subnet_id': obj_fields.UUIDField(),
'start': obj_fields.IPAddressField(),
'end': obj_fields.IPAddressField()
}
@classmethod
def modify_fields_from_db(cls, db_obj):
# TODO(korzen) remove this method when IP and CIDR decorator ready
result = super(IPAllocationPool, cls).modify_fields_from_db(db_obj)
if 'start' in result:
result['start'] = netaddr.IPAddress(result['start'])
if 'end' in result:
result['end'] = netaddr.IPAddress(result['end'])
return result
@classmethod
def modify_fields_to_db(cls, fields):
# TODO(korzen) remove this method when IP and CIDR decorator ready
result = super(IPAllocationPool, cls).modify_fields_to_db(fields)
if 'first_ip' in result:
result['first_ip'] = cls.filter_to_str(result['first_ip'])
if 'last_ip' in result:
result['last_ip'] = cls.filter_to_str(result['last_ip'])
return result
# RBAC metaclass is not applied here because 'shared' attribute of Subnet
# is dependent on Network 'shared' state, and in Subnet object
# it can be read-only. The necessary changes are applied manually:
# - defined 'shared' attribute in 'fields'
# - added 'shared' to synthetic_fields
# - registered extra_filter_name for 'shared' attribute
# - added loading shared attribute based on network 'rbac_entries'
@obj_base.VersionedObjectRegistry.register
class Subnet(base.NeutronDbObject):
# Version 1.0: Initial version
VERSION = '1.0'
db_model = models_v2.Subnet
fields = {
'id': obj_fields.UUIDField(),
'project_id': obj_fields.StringField(nullable=True),
'name': obj_fields.StringField(nullable=True),
'network_id': obj_fields.UUIDField(),
'segment_id': obj_fields.UUIDField(nullable=True),
'subnetpool_id': obj_fields.UUIDField(nullable=True),
'ip_version': common_types.IPVersionEnumField(),
'cidr': common_types.IPNetworkField(),
'gateway_ip': obj_fields.IPAddressField(nullable=True),
'allocation_pools': obj_fields.ListOfObjectsField('IPAllocationPool',
nullable=True),
'enable_dhcp': obj_fields.BooleanField(nullable=True),
'shared': obj_fields.BooleanField(nullable=True),
'dns_nameservers': obj_fields.ListOfObjectsField('DNSNameServer',
nullable=True),
'host_routes': obj_fields.ListOfObjectsField('Route', nullable=True),
'ipv6_ra_mode': common_types.IPV6ModeEnumField(nullable=True),
'ipv6_address_mode': common_types.IPV6ModeEnumField(nullable=True)
}
synthetic_fields = ['allocation_pools', 'dns_nameservers', 'host_routes',
'shared']
foreign_keys = {'Network': {'network_id': 'id'}}
fields_no_update = ['project_id']
fields_need_translation = {
'project_id': 'tenant_id',
'host_routes': 'routes'
}
def __init__(self, context=None, **kwargs):
super(Subnet, self).__init__(context, **kwargs)
self.add_extra_filter_name('shared')
def obj_load_attr(self, attrname):
if attrname == 'shared':
return self._load_shared()
super(Subnet, self).obj_load_attr(attrname)
def _load_shared(self, db_obj=None):
if db_obj:
# NOTE(korzen) db_obj is passed when Subnet object is loaded
# from DB
rbac_entries = db_obj.get('rbac_entries') or {}
shared = (rbac_db.RbacNeutronDbObjectMixin.
is_network_shared(self.obj_context, rbac_entries))
else:
# NOTE(korzen) this case is used when Subnet object was
# instantiated and without DB interaction (get_object(s), update,
# create), it should be rare case to load 'shared' by that method
shared = (rbac_db.RbacNeutronDbObjectMixin.
get_shared_with_tenant(self.obj_context.elevated(),
rbac_db_models.NetworkRBAC,
self.network_id,
self.project_id))
setattr(self, 'shared', shared)
self.obj_reset_changes(['shared'])
def from_db_object(self, db_obj):
super(Subnet, self).from_db_object(db_obj)
self._load_shared(db_obj)
@classmethod
def modify_fields_from_db(cls, db_obj):
# TODO(korzen) remove this method when IP and CIDR decorator ready
result = super(Subnet, cls).modify_fields_from_db(db_obj)
if 'cidr' in result:
result['cidr'] = utils.AuthenticIPNetwork(result['cidr'])
if 'gateway_ip' in result and result['gateway_ip'] is not None:
result['gateway_ip'] = netaddr.IPAddress(result['gateway_ip'])
return result
@classmethod
def modify_fields_to_db(cls, fields):
# TODO(korzen) remove this method when IP and CIDR decorator ready
result = super(Subnet, cls).modify_fields_to_db(fields)
if 'cidr' in result:
result['cidr'] = cls.filter_to_str(result['cidr'])
if 'gateway_ip' in result and result['gateway_ip'] is not None:
result['gateway_ip'] = cls.filter_to_str(result['gateway_ip'])
return result
|
|
#!/usr/bin/python
import sys
import time
import math
import pdb
from Utils import GeomUtils
import Stroke
from SketchFramework import Point
from Tkinter import *
from tkMessageBox import *
# Constants
TEMPLATE_FILE = "board_templates.dat"
TEMPLATE_SAMPLE = 64 #num points in a template
WIDTH = 800
HEIGHT = 600
MID_W = WIDTH/2
MID_H = HEIGHT/2
def scoreStroke(stroke, template):
sNorm = GeomUtils.strokeNormalizeSpacing(stroke, TEMPLATE_SAMPLE)
centr = GeomUtils.centroid(sNorm.Points)
point_vect = []
templ_vect = []
for q in template:
templ_vect.append(q.X)
templ_vect.append(q.Y)
for p in sNorm.Points:
point_vect.append(p.X - centr.X)
point_vect.append(p.Y - centr.Y)
angularDist = GeomUtils.vectorDistance(point_vect, templ_vect)
return angularDist
def loadTemplates(filename = TEMPLATE_FILE):
print "Loading templates: %s" % filename
try:
fp = open(filename, "r")
except:
return
templates = {}
current_template = None
for line in fp.readlines():
fields = line.split()
if line.startswith("#TEMPLATE"):
assert len(fields) == 2
current_template = fields[1]
templates[current_template] = []
elif line.startswith("#END"):
assert len(fields) == 2
template_name = fields[1]
assert current_template == template_name
current_template = None
else:
assert len(fields) == 2
x = float(fields[0])
y = float(fields[1])
assert current_template is not None
templates[current_template].append(Point.Point(x, y))
return templates
def storeTemplate(normStroke, tag=None, filename = TEMPLATE_FILE, overwrite = False):
print "Saving template %s to: %s" % (tag, filename)
if overwrite:
fp = open (filename, "w")
else:
fp = open (filename, "a")
if type(tag) is str:
print >> fp, "#TEMPLATE %s" % (tag)
for p in normStroke.Points:
print >> fp, "%s %s" % (p.X, p.Y)
print >>fp, "#END %s" % (tag)
fp.close()
class SketchGUI(Frame):
def __init__(self, master = None, **kargs):
"Set up the Tkinter GUI stuff as well as the board logic"
global HEIGHT, WIDTH
Frame.__init__(self, master, **kargs)
self.pack()
#Set up the GUI stuff
self.drawMenuOptions = {}
self.BoardCanvas= Canvas(self, width=WIDTH, height = HEIGHT, bg="white", bd=2)
self.BoardCanvas.pack(side=BOTTOM)
self.BoardCanvas.bind("<ButtonPress-1>", self.CanvasMouseDown)
self.BoardCanvas.bind("<B1-Motion>", self.CanvasMouseDown)
self.BoardCanvas.bind("<ButtonRelease-1>", self.CanvasMouseUp)
self.CurrentPointList = []
self.StrokeList = []
self.templates = {}
self.p_y = self.p_x = None
#self.ResetBoard()
self.MakeMenu()
#LoadStrokes()
#self.Redraw()
def MakeMenu(self):
"Reserve places in the menu for fun actions!"
win = self.master
top_menu = Menu(win)
win.config(menu=top_menu)
self.object_menu = Menu(top_menu)
#top_menu.bind("<ButtonPress-1>",(lambda e: self.RebuildObjectMenu()))
#self.RebuildObjectMenu()
top_menu.add_command(label="Reset Board", command = (lambda :self.Redraw()), underline=1 )
top_menu.add_command(label="Load Templates", command = self.LoadTemplates, underline=1 )
top_menu.add_command(label="Save Template", command = self.SaveTemplate, underline=1 )
top_menu.add_command(label="Recognize Stroke", command = (lambda :self.Redraw()), underline=1 )
top_menu.add_command(label="Input Stroke", command = (lambda : self.Redraw()), underline=1 )
def InvertDraw(self, class_):
"Essentially checkbox behavior for BoardObject.DrawAll variable"
if hasattr(class_, "DrawAll"):
class_.DrawAll = not class_.DrawAll
self.Redraw()
def CanvasMouseDown(self, event):
"Draw a line connecting the points of a stroke as it is being drawn"
x = event.x
y = event.y
#self.BoardCanvas.create_oval(x,y,x,y,activewidth="1", fill="black", outline = "black")
if self.p_x != None and self.p_y != None:
p_x = self.p_x
p_y = self.p_y
self.BoardCanvas.create_line(p_x, p_y, x ,y, fill = "black", width=2)
x = float(event.x)
y = float(HEIGHT - event.y)
t = time.time()
self.CurrentPointList.append(Point.Point(x,y,t))
self.p_x = x
self.p_y = HEIGHT - y
def SaveTemplate(self, numSamples = TEMPLATE_SAMPLE):
if len(self.StrokeList) > 0:
last_stroke = self.StrokeList[-1]
template_name = str(len(self.StrokeList))
sNorm = GeomUtils.strokeNormalizeSpacing(last_stroke, numSamples)
centroid = GeomUtils.centroid(sNorm.Points)
sNorm = sNorm.translate(-1*centroid.X, -1 * centroid.Y)
storeTemplate(sNorm, tag=template_name)
def LoadTemplates(self):
self.templates = loadTemplates()
def CanvasMouseUp(self, event):
"Finish the stroke and add it to the board"
#start a new stroke
new_stroke = Stroke.Stroke(self.CurrentPointList)
self.StrokeList.append(new_stroke)
self.CurrentPointList = []
self.p_x = self.p_y = None
for tag, templ in self.templates.items():
print "Stroke to template %s: %s" % (tag, scoreStroke(new_stroke, templ))
def Redraw(self):
"""Find all the strokes on the board, draw them, then iterate through every object and
have it draw itself"""
global HEIGHT, WIDTH
self.BoardCanvas.delete(ALL)
def drawPoint(self, point):
self.drawCircle(point.X, point.Y, rad = 3)
def drawCircle(self, x, y, rad=1, color="#000000", fill="", width=1.0):
"Draw a circle on the canvas at (x,y) with radius rad. Color should be 24 bit RGB string #RRGGBB. Empty string is transparent"
y = HEIGHT - y
self.BoardCanvas.create_oval(x-rad,y-rad,x+rad,y+rad,width=width, fill=fill, outline = color)
def drawLine(self, x1, y1, x2, y2, LineWidth=2, color="#000000"):
"Draw a line on the canvas from (x1,y1) to (x2,y2). Color should be 24 bit RGB string #RRGGBB"
y1 = HEIGHT - y1
y2 = HEIGHT - y2
self.BoardCanvas.create_line(x1, y1, x2 ,y2, fill = color, width=LineWidth)
def drawText (self, x, y, InText="", size=10, color="#000000"):
"Draw some text (InText) on the canvas at (x,y). Color as defined by 24 bit RGB string #RRGGBB"
y = HEIGHT - y
text_font = ("times", size, "")
self.BoardCanvas.create_text(x,y,text = InText, fill = color, font = text_font, anchor=NW)
def drawStroke(self, stroke, LineWidth = 2, color="#000000"):
prev_p = None
for next_p in stroke.Points:
if prev_p is not None:
self.drawLine(prev_p.X, prev_p.Y, next_p.X, next_p.Y, LineWidth=LineWidth, color=color)
prev_p = next_p
def GUIRun():
root = Tk()
root.title("Template Generator")
app = SketchGUI(master = root)
try:
while 1:
root.update_idletasks()
root.update()
except TclError:
pass
#root.mainloop()
if __name__ == "__main__":
GUIRun()
|
|
import traceback, sys
from unittest import TestResult
import datetime
from tcmessages import TeamcityServiceMessages
PYTHON_VERSION_MAJOR = sys.version_info[0]
def strclass(cls):
if not cls.__name__:
return cls.__module__
return "%s.%s" % (cls.__module__, cls.__name__)
def smart_str(s):
encoding = 'utf-8'
errors = 'strict'
if PYTHON_VERSION_MAJOR < 3:
is_string = isinstance(s, basestring)
else:
is_string = isinstance(s, str)
if not is_string:
try:
return str(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return ' '.join([smart_str(arg) for arg in s])
return unicode(s).encode(encoding, errors)
elif isinstance(s, unicode):
return s.encode(encoding, errors)
else:
return s
class TeamcityTestResult(TestResult):
def __init__(self, stream=sys.stdout, *args, **kwargs):
TestResult.__init__(self)
for arg, value in kwargs.items():
setattr(self, arg, value)
self.output = stream
self.messages = TeamcityServiceMessages(self.output, prepend_linebreak=True)
self.messages.testMatrixEntered()
self.current_failed = False
self.current_suite = None
self.subtest_suite = None
def find_first(self, val):
quot = val[0]
count = 1
quote_ind = val[count:].find(quot)
while quote_ind != -1 and val[count + quote_ind - 1] == "\\":
count = count + quote_ind + 1
quote_ind = val[count:].find(quot)
return val[0:quote_ind + count + 1]
def find_second(self, val):
val_index = val.find("!=")
if val_index != -1:
count = 1
val = val[val_index + 2:].strip()
quot = val[0]
quote_ind = val[count:].find(quot)
while quote_ind != -1 and val[count + quote_ind - 1] == "\\":
count = count + quote_ind + 1
quote_ind = val[count:].find(quot)
return val[0:quote_ind + count + 1]
else:
quot = val[-1]
quote_ind = val[:len(val) - 1].rfind(quot)
while quote_ind != -1 and val[quote_ind - 1] == "\\":
quote_ind = val[:quote_ind - 1].rfind(quot)
return val[quote_ind:]
def formatErr(self, err):
exctype, value, tb = err
return ''.join(traceback.format_exception(exctype, value, tb))
def getTestName(self, test, is_subtest=False):
if is_subtest:
test_name = self.getTestName(test.test_case)
return "{} {}".format(test_name, test._subDescription())
if hasattr(test, '_testMethodName'):
if test._testMethodName == "runTest":
return str(test)
return test._testMethodName
else:
test_name = str(test)
whitespace_index = test_name.index(" ")
if whitespace_index != -1:
test_name = test_name[:whitespace_index]
return test_name
def getTestId(self, test):
return test.id
def addSuccess(self, test):
TestResult.addSuccess(self, test)
def addError(self, test, err):
location = self.init_suite(test)
self.current_failed = True
TestResult.addError(self, test, err)
err = self._exc_info_to_string(err, test)
self.messages.testStarted(self.getTestName(test), location=location)
self.messages.testError(self.getTestName(test),
message='Error', details=err, duration=self.__getDuration(test))
def find_error_value(self, err):
error_value = traceback.extract_tb(err)
error_value = error_value[-1][-1]
return error_value.split('assert')[-1].strip()
def addFailure(self, test, err):
location = self.init_suite(test)
self.current_failed = True
TestResult.addFailure(self, test, err)
error_value = smart_str(err[1])
if not len(error_value):
# means it's test function and we have to extract value from traceback
error_value = self.find_error_value(err[2])
self_find_first = self.find_first(error_value)
self_find_second = self.find_second(error_value)
quotes = ["'", '"']
if (self_find_first[0] == self_find_first[-1] and self_find_first[0] in quotes and
self_find_second[0] == self_find_second[-1] and self_find_second[0] in quotes):
# let's unescape strings to show sexy multiline diff in PyCharm.
# By default all caret return chars are escaped by testing framework
first = self._unescape(self_find_first)
second = self._unescape(self_find_second)
else:
first = second = ""
err = self._exc_info_to_string(err, test)
self.messages.testStarted(self.getTestName(test), location=location)
duration = self.__getDuration(test)
self.messages.testFailed(self.getTestName(test),
message='Failure', details=err, expected=first, actual=second, duration=duration)
def addSkip(self, test, reason):
self.init_suite(test)
self.current_failed = True
self.messages.testIgnored(self.getTestName(test), message=reason)
def _getSuite(self, test):
try:
suite = strclass(test.suite)
suite_location = test.suite.location
location = test.suite.abs_location
if hasattr(test, "lineno"):
location = location + ":" + str(test.lineno)
else:
location = location + ":" + str(test.test.lineno)
except AttributeError:
import inspect
try:
source_file = inspect.getsourcefile(test.__class__)
if source_file:
source_dir_splitted = source_file.split("/")[:-1]
source_dir = "/".join(source_dir_splitted) + "/"
else:
source_dir = ""
except TypeError:
source_dir = ""
suite = strclass(test.__class__)
suite_location = "python_uttestid://" + source_dir + suite
location = "python_uttestid://" + source_dir + str(test.id())
return (suite, location, suite_location)
def startTest(self, test):
self.current_failed = False
setattr(test, "startTime", datetime.datetime.now())
def init_suite(self, test):
suite, location, suite_location = self._getSuite(test)
if suite != self.current_suite:
if self.current_suite:
self.messages.testSuiteFinished(self.current_suite)
self.current_suite = suite
self.messages.testSuiteStarted(self.current_suite, location=suite_location)
return location
def stopTest(self, test):
duration = self.__getDuration(test)
if not self.subtest_suite:
if not self.current_failed:
location = self.init_suite(test)
self.messages.testStarted(self.getTestName(test), location=location)
self.messages.testFinished(self.getTestName(test), duration=int(duration))
else:
self.messages.testSuiteFinished(self.subtest_suite)
self.subtest_suite = None
def __getDuration(self, test):
start = getattr(test, "startTime", datetime.datetime.now())
d = datetime.datetime.now() - start
duration = d.microseconds / 1000 + d.seconds * 1000 + d.days * 86400000
return duration
def addSubTest(self, test, subtest, err):
suite_name = self.getTestName(test) # + " (subTests)"
if not self.subtest_suite:
self.subtest_suite = suite_name
self.messages.testSuiteStarted(self.subtest_suite)
else:
if suite_name != self.subtest_suite:
self.messages.testSuiteFinished(self.subtest_suite)
self.subtest_suite = suite_name
self.messages.testSuiteStarted(self.subtest_suite)
name = self.getTestName(subtest, True)
if err is not None:
error = self._exc_info_to_string(err, test)
self.messages.testStarted(name)
self.messages.testFailed(name, message='Failure', details=error, duration=None)
else:
self.messages.testStarted(name)
self.messages.testFinished(name)
def endLastSuite(self):
if self.current_suite:
self.messages.testSuiteFinished(self.current_suite)
self.current_suite = None
def _unescape(self, text):
# do not use text.decode('string_escape'), it leads to problems with different string encodings given
return text.replace("\\n", "\n")
class TeamcityTestRunner(object):
def __init__(self, stream=sys.stdout):
self.stream = stream
def _makeResult(self, **kwargs):
return TeamcityTestResult(self.stream, **kwargs)
def run(self, test, **kwargs):
result = self._makeResult(**kwargs)
result.messages.testCount(test.countTestCases())
test(result)
result.endLastSuite()
return result
|
|
#
# Copyright (c) 2011-2015 Advanced Micro Devices, Inc.
# All rights reserved.
#
# For use for simulation and test purposes only
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Lisa Hsu
#
import math
import m5
from m5.objects import *
from m5.defines import buildEnv
from Ruby import create_topology
from Ruby import send_evicts
from Cluster import Cluster
from Crossbar import Crossbar
class CntrlBase:
_seqs = 0
@classmethod
def seqCount(cls):
# Use SeqCount not class since we need global count
CntrlBase._seqs += 1
return CntrlBase._seqs - 1
_cntrls = 0
@classmethod
def cntrlCount(cls):
# Use CntlCount not class since we need global count
CntrlBase._cntrls += 1
return CntrlBase._cntrls - 1
_version = 0
@classmethod
def versionCount(cls):
cls._version += 1 # Use count for this particular type
return cls._version - 1
class TccDirCache(RubyCache):
size = "512kB"
assoc = 16
resourceStalls = False
def create(self, options):
self.size = MemorySize(options.tcc_size)
self.size.value += (options.num_compute_units *
(MemorySize(options.tcp_size).value) *
options.tcc_dir_factor) / long(options.num_tccs)
self.start_index_bit = math.log(options.cacheline_size, 2) + \
math.log(options.num_tccs, 2)
self.replacement_policy = PseudoLRUReplacementPolicy()
class L1DCache(RubyCache):
resourceStalls = False
def create(self, options):
self.size = MemorySize(options.l1d_size)
self.assoc = options.l1d_assoc
self.replacement_policy = PseudoLRUReplacementPolicy()
class L1ICache(RubyCache):
resourceStalls = False
def create(self, options):
self.size = MemorySize(options.l1i_size)
self.assoc = options.l1i_assoc
self.replacement_policy = PseudoLRUReplacementPolicy()
class L2Cache(RubyCache):
resourceStalls = False
def create(self, options):
self.size = MemorySize(options.l2_size)
self.assoc = options.l2_assoc
self.replacement_policy = PseudoLRUReplacementPolicy()
class CPCntrl(CorePair_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1Icache = L1ICache()
self.L1Icache.create(options)
self.L1D0cache = L1DCache()
self.L1D0cache.create(options)
self.L1D1cache = L1DCache()
self.L1D1cache.create(options)
self.L2cache = L2Cache()
self.L2cache.create(options)
self.sequencer = RubySequencer()
self.sequencer.icache_hit_latency = 2
self.sequencer.dcache_hit_latency = 2
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1Icache
self.sequencer.dcache = self.L1D0cache
self.sequencer.ruby_system = ruby_system
self.sequencer.coreid = 0
self.sequencer.is_cpu_sequencer = True
self.sequencer1 = RubySequencer()
self.sequencer1.version = self.seqCount()
self.sequencer1.icache = self.L1Icache
self.sequencer1.dcache = self.L1D1cache
self.sequencer1.icache_hit_latency = 2
self.sequencer1.dcache_hit_latency = 2
self.sequencer1.ruby_system = ruby_system
self.sequencer1.coreid = 1
self.sequencer1.is_cpu_sequencer = True
self.issue_latency = options.cpu_to_dir_latency
self.send_evictions = send_evicts(options)
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class TCPCache(RubyCache):
assoc = 8
dataArrayBanks = 16
tagArrayBanks = 4
dataAccessLatency = 4
tagAccessLatency = 1
def create(self, options):
self.size = MemorySize(options.tcp_size)
self.replacement_policy = PseudoLRUReplacementPolicy()
class TCPCntrl(TCP_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1cache = TCPCache(tagAccessLatency = options.TCP_latency)
self.L1cache.resourceStalls = options.no_resource_stalls
self.L1cache.create(options)
self.coalescer = RubyGPUCoalescer()
self.coalescer.version = self.seqCount()
self.coalescer.icache = self.L1cache
self.coalescer.dcache = self.L1cache
self.coalescer.ruby_system = ruby_system
self.coalescer.support_inst_reqs = False
self.coalescer.is_cpu_sequencer = False
self.coalescer.max_outstanding_requests = options.simds_per_cu * \
options.wfs_per_simd * \
options.wf_size
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1cache
self.sequencer.dcache = self.L1cache
self.sequencer.ruby_system = ruby_system
self.sequencer.is_cpu_sequencer = True
self.use_seq_not_coal = False
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def createCP(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1cache = TCPCache(tagAccessLatency = options.TCP_latency)
self.L1cache.resourceStalls = options.no_resource_stalls
self.L1cache.create(options)
self.coalescer = RubyGPUCoalescer()
self.coalescer.version = self.seqCount()
self.coalescer.icache = self.L1cache
self.coalescer.dcache = self.L1cache
self.coalescer.ruby_system = ruby_system
self.coalescer.support_inst_reqs = False
self.coalescer.is_cpu_sequencer = False
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1cache
self.sequencer.dcache = self.L1cache
self.sequencer.ruby_system = ruby_system
self.sequencer.is_cpu_sequencer = True
self.use_seq_not_coal = True
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class SQCCache(RubyCache):
size = "32kB"
assoc = 8
dataArrayBanks = 16
tagArrayBanks = 4
dataAccessLatency = 4
tagAccessLatency = 1
def create(self, options):
self.replacement_policy = PseudoLRUReplacementPolicy()
class SQCCntrl(SQC_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1cache = SQCCache()
self.L1cache.create(options)
self.L1cache.resourceStalls = options.no_resource_stalls
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1cache
self.sequencer.dcache = self.L1cache
self.sequencer.ruby_system = ruby_system
self.sequencer.support_data_reqs = False
self.sequencer.is_cpu_sequencer = False
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def createCP(self, options, ruby_system, system):
self.version = self.versionCount()
self.L1cache = SQCCache()
self.L1cache.create(options)
self.L1cache.resourceStalls = options.no_resource_stalls
self.sequencer = RubySequencer()
self.sequencer.version = self.seqCount()
self.sequencer.icache = self.L1cache
self.sequencer.dcache = self.L1cache
self.sequencer.ruby_system = ruby_system
self.sequencer.support_data_reqs = False
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
class TCC(RubyCache):
assoc = 16
dataAccessLatency = 8
tagAccessLatency = 2
resourceStalls = True
def create(self, options):
self.size = MemorySize(options.tcc_size)
self.size = self.size / options.num_tccs
self.dataArrayBanks = 256 / options.num_tccs #number of data banks
self.tagArrayBanks = 256 / options.num_tccs #number of tag banks
if ((self.size.value / long(self.assoc)) < 128):
self.size.value = long(128 * self.assoc)
self.start_index_bit = math.log(options.cacheline_size, 2) + \
math.log(options.num_tccs, 2)
self.replacement_policy = PseudoLRUReplacementPolicy()
class TCCCntrl(TCC_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L2cache = TCC()
self.L2cache.create(options)
self.l2_response_latency = options.TCC_latency
self.number_of_TBEs = 2048
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def connectWireBuffers(self, req_to_tccdir, resp_to_tccdir,
tcc_unblock_to_tccdir, req_to_tcc,
probe_to_tcc, resp_to_tcc):
self.w_reqToTCCDir = req_to_tccdir
self.w_respToTCCDir = resp_to_tccdir
self.w_TCCUnblockToTCCDir = tcc_unblock_to_tccdir
self.w_reqToTCC = req_to_tcc
self.w_probeToTCC = probe_to_tcc
self.w_respToTCC = resp_to_tcc
class TCCDirCntrl(TCCdir_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.directory = TccDirCache()
self.directory.create(options)
self.number_of_TBEs = 1024
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def connectWireBuffers(self, req_to_tccdir, resp_to_tccdir,
tcc_unblock_to_tccdir, req_to_tcc,
probe_to_tcc, resp_to_tcc):
self.w_reqToTCCDir = req_to_tccdir
self.w_respToTCCDir = resp_to_tccdir
self.w_TCCUnblockToTCCDir = tcc_unblock_to_tccdir
self.w_reqToTCC = req_to_tcc
self.w_probeToTCC = probe_to_tcc
self.w_respToTCC = resp_to_tcc
class L3Cache(RubyCache):
assoc = 8
dataArrayBanks = 256
tagArrayBanks = 256
def create(self, options, ruby_system, system):
self.size = MemorySize(options.l3_size)
self.size.value /= options.num_dirs
self.dataArrayBanks /= options.num_dirs
self.tagArrayBanks /= options.num_dirs
self.dataArrayBanks /= options.num_dirs
self.tagArrayBanks /= options.num_dirs
self.dataAccessLatency = options.l3_data_latency
self.tagAccessLatency = options.l3_tag_latency
self.resourceStalls = options.no_resource_stalls
self.replacement_policy = PseudoLRUReplacementPolicy()
class L3Cntrl(L3Cache_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.L3cache = L3Cache()
self.L3cache.create(options, ruby_system, system)
self.l3_response_latency = max(self.L3cache.dataAccessLatency,
self.L3cache.tagAccessLatency)
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
req_to_l3, probe_to_l3, resp_to_l3):
self.reqToDir = req_to_dir
self.respToDir = resp_to_dir
self.l3UnblockToDir = l3_unblock_to_dir
self.reqToL3 = req_to_l3
self.probeToL3 = probe_to_l3
self.respToL3 = resp_to_l3
class DirMem(RubyDirectoryMemory, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
phys_mem_size = AddrRange(options.mem_size).size()
mem_module_size = phys_mem_size / options.num_dirs
dir_size = MemorySize('0B')
dir_size.value = mem_module_size
self.size = dir_size
class DirCntrl(Directory_Controller, CntrlBase):
def create(self, options, ruby_system, system):
self.version = self.versionCount()
self.response_latency = 30
self.directory = DirMem()
self.directory.create(options, ruby_system, system)
self.L3CacheMemory = L3Cache()
self.L3CacheMemory.create(options, ruby_system, system)
self.l3_hit_latency = max(self.L3CacheMemory.dataAccessLatency,
self.L3CacheMemory.tagAccessLatency)
self.number_of_TBEs = options.num_tbes
self.ruby_system = ruby_system
if options.recycle_latency:
self.recycle_latency = options.recycle_latency
def connectWireBuffers(self, req_to_dir, resp_to_dir, l3_unblock_to_dir,
req_to_l3, probe_to_l3, resp_to_l3):
self.reqToDir = req_to_dir
self.respToDir = resp_to_dir
self.l3UnblockToDir = l3_unblock_to_dir
self.reqToL3 = req_to_l3
self.probeToL3 = probe_to_l3
self.respToL3 = resp_to_l3
def define_options(parser):
parser.add_option("--num-subcaches", type="int", default=4)
parser.add_option("--l3-data-latency", type="int", default=20)
parser.add_option("--l3-tag-latency", type="int", default=15)
parser.add_option("--cpu-to-dir-latency", type="int", default=15)
parser.add_option("--gpu-to-dir-latency", type="int", default=160)
parser.add_option("--no-resource-stalls", action="store_false",
default=True)
parser.add_option("--num-tbes", type="int", default=256)
parser.add_option("--l2-latency", type="int", default=50) # load to use
parser.add_option("--num-tccs", type="int", default=1,
help="number of TCC directories and banks in the GPU")
parser.add_option("--TCP_latency", type="int", default=4,
help="TCP latency")
parser.add_option("--TCC_latency", type="int", default=16,
help="TCC latency")
parser.add_option("--tcc-size", type='string', default='256kB',
help="agregate tcc size")
parser.add_option("--tcp-size", type='string', default='16kB',
help="tcp size")
parser.add_option("--tcc-dir-factor", type='int', default=4,
help="TCCdir size = factor *(TCPs + TCC)")
def create_system(options, full_system, system, dma_devices, ruby_system):
if buildEnv['PROTOCOL'] != 'GPU_RfO':
panic("This script requires the GPU_RfO protocol to be built.")
cpu_sequencers = []
#
# The ruby network creation expects the list of nodes in the system to be
# consistent with the NetDest list. Therefore the l1 controller nodes
# must be listed before the directory nodes and directory nodes before
# dma nodes, etc.
#
cp_cntrl_nodes = []
tcp_cntrl_nodes = []
sqc_cntrl_nodes = []
tcc_cntrl_nodes = []
tccdir_cntrl_nodes = []
dir_cntrl_nodes = []
l3_cntrl_nodes = []
#
# Must create the individual controllers before the network to ensure the
# controller constructors are called before the network constructor
#
TCC_bits = int(math.log(options.num_tccs, 2))
# This is the base crossbar that connects the L3s, Dirs, and cpu/gpu
# Clusters
mainCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s
for i in xrange(options.num_dirs):
dir_cntrl = DirCntrl(TCC_select_num_bits = TCC_bits)
dir_cntrl.create(options, ruby_system, system)
dir_cntrl.number_of_TBEs = 2560 * options.num_compute_units
#Enough TBEs for all TCP TBEs
# Connect the Directory controller to the ruby network
dir_cntrl.requestFromCores = MessageBuffer(ordered = True)
dir_cntrl.requestFromCores.slave = ruby_system.network.master
dir_cntrl.responseFromCores = MessageBuffer()
dir_cntrl.responseFromCores.slave = ruby_system.network.master
dir_cntrl.unblockFromCores = MessageBuffer()
dir_cntrl.unblockFromCores.slave = ruby_system.network.master
dir_cntrl.probeToCore = MessageBuffer()
dir_cntrl.probeToCore.master = ruby_system.network.slave
dir_cntrl.responseToCore = MessageBuffer()
dir_cntrl.responseToCore.master = ruby_system.network.slave
dir_cntrl.triggerQueue = MessageBuffer(ordered = True)
dir_cntrl.L3triggerQueue = MessageBuffer(ordered = True)
dir_cntrl.responseFromMemory = MessageBuffer()
exec("system.dir_cntrl%d = dir_cntrl" % i)
dir_cntrl_nodes.append(dir_cntrl)
mainCluster.add(dir_cntrl)
# For an odd number of CPUs, still create the right number of controllers
cpuCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s
for i in xrange((options.num_cpus + 1) / 2):
cp_cntrl = CPCntrl()
cp_cntrl.create(options, ruby_system, system)
exec("system.cp_cntrl%d = cp_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.extend([cp_cntrl.sequencer, cp_cntrl.sequencer1])
# Connect the CP controllers and the network
cp_cntrl.requestFromCore = MessageBuffer()
cp_cntrl.requestFromCore.master = ruby_system.network.slave
cp_cntrl.responseFromCore = MessageBuffer()
cp_cntrl.responseFromCore.master = ruby_system.network.slave
cp_cntrl.unblockFromCore = MessageBuffer()
cp_cntrl.unblockFromCore.master = ruby_system.network.slave
cp_cntrl.probeToCore = MessageBuffer()
cp_cntrl.probeToCore.slave = ruby_system.network.master
cp_cntrl.responseToCore = MessageBuffer()
cp_cntrl.responseToCore.slave = ruby_system.network.master
cp_cntrl.mandatoryQueue = MessageBuffer()
cp_cntrl.triggerQueue = MessageBuffer(ordered = True)
cpuCluster.add(cp_cntrl)
gpuCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s
for i in xrange(options.num_compute_units):
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
number_of_TBEs = 2560) # max outstanding requests
tcp_cntrl.create(options, ruby_system, system)
exec("system.tcp_cntrl%d = tcp_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(tcp_cntrl.coalescer)
tcp_cntrl_nodes.append(tcp_cntrl)
# Connect the TCP controller to the ruby network
tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.requestFromTCP.master = ruby_system.network.slave
tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseFromTCP.master = ruby_system.network.slave
tcp_cntrl.unblockFromCore = MessageBuffer(ordered = True)
tcp_cntrl.unblockFromCore.master = ruby_system.network.slave
tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
tcp_cntrl.probeToTCP.slave = ruby_system.network.master
tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseToTCP.slave = ruby_system.network.master
tcp_cntrl.mandatoryQueue = MessageBuffer()
gpuCluster.add(tcp_cntrl)
for i in xrange(options.num_sqc):
sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
sqc_cntrl.create(options, ruby_system, system)
exec("system.sqc_cntrl%d = sqc_cntrl" % i)
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(sqc_cntrl.sequencer)
# Connect the SQC controller to the ruby network
sqc_cntrl.requestFromSQC = MessageBuffer(ordered = True)
sqc_cntrl.requestFromSQC.master = ruby_system.network.slave
sqc_cntrl.responseFromSQC = MessageBuffer(ordered = True)
sqc_cntrl.responseFromSQC.master = ruby_system.network.slave
sqc_cntrl.unblockFromCore = MessageBuffer(ordered = True)
sqc_cntrl.unblockFromCore.master = ruby_system.network.slave
sqc_cntrl.probeToSQC = MessageBuffer(ordered = True)
sqc_cntrl.probeToSQC.slave = ruby_system.network.master
sqc_cntrl.responseToSQC = MessageBuffer(ordered = True)
sqc_cntrl.responseToSQC.slave = ruby_system.network.master
sqc_cntrl.mandatoryQueue = MessageBuffer()
# SQC also in GPU cluster
gpuCluster.add(sqc_cntrl)
for i in xrange(options.num_cp):
tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
number_of_TBEs = 2560) # max outstanding requests
tcp_cntrl.createCP(options, ruby_system, system)
exec("system.tcp_cntrl%d = tcp_cntrl" % (options.num_compute_units + i))
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(tcp_cntrl.sequencer)
tcp_cntrl_nodes.append(tcp_cntrl)
# Connect the TCP controller to the ruby network
tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.requestFromTCP.master = ruby_system.network.slave
tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseFromTCP.master = ruby_system.network.slave
tcp_cntrl.unblockFromCore = MessageBuffer(ordered = True)
tcp_cntrl.unblockFromCore.master = ruby_system.network.slave
tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
tcp_cntrl.probeToTCP.slave = ruby_system.network.master
tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
tcp_cntrl.responseToTCP.slave = ruby_system.network.master
tcp_cntrl.mandatoryQueue = MessageBuffer()
gpuCluster.add(tcp_cntrl)
sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
sqc_cntrl.createCP(options, ruby_system, system)
exec("system.sqc_cntrl%d = sqc_cntrl" % (options.num_compute_units + i))
#
# Add controllers and sequencers to the appropriate lists
#
cpu_sequencers.append(sqc_cntrl.sequencer)
# Connect the SQC controller to the ruby network
sqc_cntrl.requestFromSQC = MessageBuffer(ordered = True)
sqc_cntrl.requestFromSQC.master = ruby_system.network.slave
sqc_cntrl.responseFromSQC = MessageBuffer(ordered = True)
sqc_cntrl.responseFromSQC.master = ruby_system.network.slave
sqc_cntrl.unblockFromCore = MessageBuffer(ordered = True)
sqc_cntrl.unblockFromCore.master = ruby_system.network.slave
sqc_cntrl.probeToSQC = MessageBuffer(ordered = True)
sqc_cntrl.probeToSQC.slave = ruby_system.network.master
sqc_cntrl.responseToSQC = MessageBuffer(ordered = True)
sqc_cntrl.responseToSQC.slave = ruby_system.network.master
sqc_cntrl.mandatoryQueue = MessageBuffer()
# SQC also in GPU cluster
gpuCluster.add(sqc_cntrl)
for i in xrange(options.num_tccs):
tcc_cntrl = TCCCntrl(TCC_select_num_bits = TCC_bits,
number_of_TBEs = options.num_compute_units * 2560)
#Enough TBEs for all TCP TBEs
tcc_cntrl.create(options, ruby_system, system)
tcc_cntrl_nodes.append(tcc_cntrl)
tccdir_cntrl = TCCDirCntrl(TCC_select_num_bits = TCC_bits,
number_of_TBEs = options.num_compute_units * 2560)
#Enough TBEs for all TCP TBEs
tccdir_cntrl.create(options, ruby_system, system)
tccdir_cntrl_nodes.append(tccdir_cntrl)
exec("system.tcc_cntrl%d = tcc_cntrl" % i)
exec("system.tccdir_cntrl%d = tccdir_cntrl" % i)
# connect all of the wire buffers between L3 and dirs up
req_to_tccdir = RubyWireBuffer()
resp_to_tccdir = RubyWireBuffer()
tcc_unblock_to_tccdir = RubyWireBuffer()
req_to_tcc = RubyWireBuffer()
probe_to_tcc = RubyWireBuffer()
resp_to_tcc = RubyWireBuffer()
tcc_cntrl.connectWireBuffers(req_to_tccdir, resp_to_tccdir,
tcc_unblock_to_tccdir, req_to_tcc,
probe_to_tcc, resp_to_tcc)
tccdir_cntrl.connectWireBuffers(req_to_tccdir, resp_to_tccdir,
tcc_unblock_to_tccdir, req_to_tcc,
probe_to_tcc, resp_to_tcc)
# Connect the TCC controller to the ruby network
tcc_cntrl.responseFromTCC = MessageBuffer(ordered = True)
tcc_cntrl.responseFromTCC.master = ruby_system.network.slave
tcc_cntrl.responseToTCC = MessageBuffer(ordered = True)
tcc_cntrl.responseToTCC.slave = ruby_system.network.master
# Connect the TCC Dir controller to the ruby network
tccdir_cntrl.requestFromTCP = MessageBuffer(ordered = True)
tccdir_cntrl.requestFromTCP.slave = ruby_system.network.master
tccdir_cntrl.responseFromTCP = MessageBuffer(ordered = True)
tccdir_cntrl.responseFromTCP.slave = ruby_system.network.master
tccdir_cntrl.unblockFromTCP = MessageBuffer(ordered = True)
tccdir_cntrl.unblockFromTCP.slave = ruby_system.network.master
tccdir_cntrl.probeToCore = MessageBuffer(ordered = True)
tccdir_cntrl.probeToCore.master = ruby_system.network.slave
tccdir_cntrl.responseToCore = MessageBuffer(ordered = True)
tccdir_cntrl.responseToCore.master = ruby_system.network.slave
tccdir_cntrl.probeFromNB = MessageBuffer()
tccdir_cntrl.probeFromNB.slave = ruby_system.network.master
tccdir_cntrl.responseFromNB = MessageBuffer()
tccdir_cntrl.responseFromNB.slave = ruby_system.network.master
tccdir_cntrl.requestToNB = MessageBuffer()
tccdir_cntrl.requestToNB.master = ruby_system.network.slave
tccdir_cntrl.responseToNB = MessageBuffer()
tccdir_cntrl.responseToNB.master = ruby_system.network.slave
tccdir_cntrl.unblockToNB = MessageBuffer()
tccdir_cntrl.unblockToNB.master = ruby_system.network.slave
tccdir_cntrl.triggerQueue = MessageBuffer(ordered = True)
# TCC cntrls added to the GPU cluster
gpuCluster.add(tcc_cntrl)
gpuCluster.add(tccdir_cntrl)
# Assuming no DMA devices
assert(len(dma_devices) == 0)
# Add cpu/gpu clusters to main cluster
mainCluster.add(cpuCluster)
mainCluster.add(gpuCluster)
ruby_system.network.number_of_virtual_networks = 10
return (cpu_sequencers, dir_cntrl_nodes, mainCluster)
|
|
import sys
import pytest
from numpy.testing import assert_allclose
import numpy as np
import scipy.sparse as sparse
from keras.backend import theano_backend as KTH
from keras.backend import tensorflow_backend as KTF
from keras.utils.np_utils import convert_kernel
def check_single_tensor_operation(function_name, input_shape, **kwargs):
val = np.random.random(input_shape) - 0.5
xth = KTH.variable(val)
xtf = KTF.variable(val)
zth = KTH.eval(getattr(KTH, function_name)(xth, **kwargs))
ztf = KTF.eval(getattr(KTF, function_name)(xtf, **kwargs))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
def check_two_tensor_operation(function_name, x_input_shape,
y_input_shape, **kwargs):
xval = np.random.random(x_input_shape) - 0.5
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
yval = np.random.random(y_input_shape) - 0.5
yth = KTH.variable(yval)
ytf = KTF.variable(yval)
zth = KTH.eval(getattr(KTH, function_name)(xth, yth, **kwargs))
ztf = KTF.eval(getattr(KTF, function_name)(xtf, ytf, **kwargs))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
def check_composed_tensor_operations(first_function_name, first_function_args,
second_function_name, second_function_args,
input_shape):
''' Creates a random tensor t0 with shape input_shape and compute
t1 = first_function_name(t0, **first_function_args)
t2 = second_function_name(t1, **second_function_args)
with both Theano and TensorFlow backends and ensures the answers match.
'''
val = np.random.random(input_shape) - 0.5
xth = KTH.variable(val)
xtf = KTF.variable(val)
yth = getattr(KTH, first_function_name)(xth, **first_function_args)
ytf = getattr(KTF, first_function_name)(xtf, **first_function_args)
zth = KTH.eval(getattr(KTH, second_function_name)(yth, **second_function_args))
ztf = KTF.eval(getattr(KTF, second_function_name)(ytf, **second_function_args))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
class TestBackend(object):
def test_linear_operations(self):
check_two_tensor_operation('dot', (4, 2), (2, 4))
check_two_tensor_operation('dot', (4, 2), (5, 2, 3))
check_two_tensor_operation('batch_dot', (4, 2, 3), (4, 5, 3),
axes=(2, 2))
check_single_tensor_operation('transpose', (4, 2))
check_single_tensor_operation('reverse', (4, 3, 2), axes=1)
check_single_tensor_operation('reverse', (4, 3, 2), axes=(1, 2))
def test_shape_operations(self):
# concatenate
xval = np.random.random((4, 3))
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
yval = np.random.random((4, 2))
yth = KTH.variable(yval)
ytf = KTF.variable(yval)
zth = KTH.eval(KTH.concatenate([xth, yth], axis=-1))
ztf = KTF.eval(KTF.concatenate([xtf, ytf], axis=-1))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
check_single_tensor_operation('reshape', (4, 2), shape=(8, 1))
check_single_tensor_operation('permute_dimensions', (4, 2, 3),
pattern=(2, 0, 1))
check_single_tensor_operation('repeat', (4, 1), n=3)
check_single_tensor_operation('flatten', (4, 1))
check_single_tensor_operation('expand_dims', (4, 3), dim=-1)
check_single_tensor_operation('expand_dims', (4, 3, 2), dim=1)
check_single_tensor_operation('squeeze', (4, 3, 1), axis=2)
check_single_tensor_operation('squeeze', (4, 1, 1), axis=1)
check_composed_tensor_operations('reshape', {'shape': (4, 3, 1, 1)},
'squeeze', {'axis': 2},
(4, 3, 1, 1))
def test_repeat_elements(self):
reps = 3
for ndims in [1, 2, 3]:
shape = np.arange(2, 2 + ndims)
arr = np.arange(np.prod(shape)).reshape(shape)
arr_th = KTH.variable(arr)
arr_tf = KTF.variable(arr)
for rep_axis in range(ndims):
np_rep = np.repeat(arr, reps, axis=rep_axis)
th_rep = KTH.eval(
KTH.repeat_elements(arr_th, reps, axis=rep_axis))
tf_rep = KTF.eval(
KTF.repeat_elements(arr_tf, reps, axis=rep_axis))
assert th_rep.shape == np_rep.shape
assert tf_rep.shape == np_rep.shape
assert_allclose(np_rep, th_rep, atol=1e-05)
assert_allclose(np_rep, tf_rep, atol=1e-05)
def test_tile(self):
shape = (3, 4)
arr = np.arange(np.prod(shape)).reshape(shape)
arr_th = KTH.variable(arr)
arr_tf = KTF.variable(arr)
n = (2, 1)
th_rep = KTH.eval(KTH.tile(arr_th, n))
tf_rep = KTF.eval(KTF.tile(arr_tf, n))
assert_allclose(tf_rep, th_rep, atol=1e-05)
def test_value_manipulation(self):
val = np.random.random((4, 2))
xth = KTH.variable(val)
xtf = KTF.variable(val)
# get_value
valth = KTH.get_value(xth)
valtf = KTF.get_value(xtf)
assert valtf.shape == valth.shape
assert_allclose(valth, valtf, atol=1e-05)
# set_value
val = np.random.random((4, 2))
KTH.set_value(xth, val)
KTF.set_value(xtf, val)
valth = KTH.get_value(xth)
valtf = KTF.get_value(xtf)
assert valtf.shape == valth.shape
assert_allclose(valth, valtf, atol=1e-05)
# count_params
assert KTH.count_params(xth) == KTF.count_params(xtf)
# print_tensor
check_single_tensor_operation('print_tensor', ())
check_single_tensor_operation('print_tensor', (2,))
check_single_tensor_operation('print_tensor', (4, 3))
check_single_tensor_operation('print_tensor', (1, 2, 3))
val = np.random.random((3, 2))
xth = KTH.variable(val)
xtf = KTF.variable(val)
assert KTH.get_variable_shape(xth) == KTF.get_variable_shape(xtf)
def test_elementwise_operations(self):
check_single_tensor_operation('max', (4, 2))
check_single_tensor_operation('max', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('min', (4, 2))
check_single_tensor_operation('min', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('min', (4, 2, 3), axis=[1, -1])
check_single_tensor_operation('mean', (4, 2))
check_single_tensor_operation('mean', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('mean', (4, 2, 3), axis=-1, keepdims=True)
check_single_tensor_operation('mean', (4, 2, 3), axis=[1, -1])
check_single_tensor_operation('std', (4, 2))
check_single_tensor_operation('std', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('std', (4, 2, 3), axis=[1, -1])
check_single_tensor_operation('prod', (4, 2))
check_single_tensor_operation('prod', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('prod', (4, 2, 3), axis=[1, -1])
# does not work yet, wait for bool <-> int casting in TF (coming soon)
# check_single_tensor_operation('any', (4, 2))
# check_single_tensor_operation('any', (4, 2), axis=1, keepdims=True)
#
# check_single_tensor_operation('any', (4, 2))
# check_single_tensor_operation('any', (4, 2), axis=1, keepdims=True)
check_single_tensor_operation('argmax', (4, 2))
check_single_tensor_operation('argmax', (4, 2), axis=1)
check_single_tensor_operation('argmin', (4, 2))
check_single_tensor_operation('argmin', (4, 2), axis=1)
check_single_tensor_operation('square', (4, 2))
check_single_tensor_operation('abs', (4, 2))
check_single_tensor_operation('sqrt', (4, 2))
check_single_tensor_operation('exp', (4, 2))
check_single_tensor_operation('log', (4, 2))
check_single_tensor_operation('round', (4, 2))
check_single_tensor_operation('sign', (4, 2))
check_single_tensor_operation('pow', (4, 2), a=3)
check_single_tensor_operation('clip', (4, 2), min_value=0.4,
max_value=0.6)
# two-tensor ops
check_two_tensor_operation('equal', (4, 2), (4, 2))
check_two_tensor_operation('not_equal', (4, 2), (4, 2))
check_two_tensor_operation('greater', (4, 2), (4, 2))
check_two_tensor_operation('greater_equal', (4, 2), (4, 2))
check_two_tensor_operation('lesser', (4, 2), (4, 2))
check_two_tensor_operation('lesser_equal', (4, 2), (4, 2))
check_two_tensor_operation('maximum', (4, 2), (4, 2))
check_two_tensor_operation('minimum', (4, 2), (4, 2))
def test_gradient(self):
val = np.random.random((4, 2))
xth = KTH.variable(val)
xtf = KTF.variable(val)
expth = xth * KTH.exp(xth)
exptf = xtf * KTF.exp(xtf)
lossth = KTH.sum(expth)
losstf = KTF.sum(exptf)
zero_lossth = KTH.stop_gradient(lossth)
zero_losstf = KTF.stop_gradient(losstf)
gradth = KTH.gradients(lossth, [expth])
gradtf = KTF.gradients(losstf, [exptf])
zero_gradth = KTH.gradients(lossth + zero_lossth, [expth])
zero_gradtf = KTF.gradients(losstf + zero_losstf, [exptf])
zth = KTH.eval(gradth[0])
ztf = KTF.eval(gradtf[0])
zero_zth = KTH.eval(zero_gradth[0])
zero_ztf = KTF.eval(zero_gradtf[0])
assert zth.shape == ztf.shape
assert zero_zth.shape == zero_ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
assert_allclose(zero_zth, zero_ztf, atol=1e-05)
assert_allclose(zero_zth, zth, atol=1e-05)
assert_allclose(zero_ztf, ztf, atol=1e-05)
def test_function(self):
val = np.random.random((4, 2))
input_val = np.random.random((4, 2))
xth = KTH.variable(val)
xtf = KTF.variable(val)
yth = KTH.placeholder(ndim=2)
ytf = KTF.placeholder(ndim=2)
exp_th = KTH.square(xth) + yth
exp_tf = KTF.square(xtf) + ytf
update_th = xth * 2
update_tf = xtf * 2
fth = KTH.function([yth], [exp_th], updates=[(xth, update_th)])
ftf = KTF.function([ytf], [exp_tf], updates=[(xtf, update_tf)])
function_outputs_th = fth([input_val])[0]
function_outputs_tf = ftf([input_val])[0]
assert function_outputs_th.shape == function_outputs_tf.shape
assert_allclose(function_outputs_th, function_outputs_tf, atol=1e-05)
new_val_th = KTH.get_value(xth)
new_val_tf = KTF.get_value(xtf)
assert new_val_th.shape == new_val_tf.shape
assert_allclose(new_val_th, new_val_tf, atol=1e-05)
def test_rnn(self):
# implement a simple RNN
input_dim = 8
output_dim = 4
timesteps = 5
input_val = np.random.random((32, timesteps, input_dim))
init_state_val = np.random.random((32, output_dim))
W_i_val = np.random.random((input_dim, output_dim))
W_o_val = np.random.random((output_dim, output_dim))
def rnn_step_fn(input_dim, output_dim, K):
W_i = K.variable(W_i_val)
W_o = K.variable(W_o_val)
def step_function(x, states):
assert len(states) == 1
prev_output = states[0]
output = K.dot(x, W_i) + K.dot(prev_output, W_o)
return output, [output]
return step_function
# test default setup
th_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTH)
th_inputs = KTH.variable(input_val)
th_initial_states = [KTH.variable(init_state_val)]
last_output, outputs, new_states = KTH.rnn(th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=False,
mask=None)
th_last_output = KTH.eval(last_output)
th_outputs = KTH.eval(outputs)
assert len(new_states) == 1
th_state = KTH.eval(new_states[0])
tf_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTF)
tf_inputs = KTF.variable(input_val)
tf_initial_states = [KTF.variable(init_state_val)]
last_output, outputs, new_states = KTF.rnn(tf_rnn_step_fn, tf_inputs,
tf_initial_states,
go_backwards=False,
mask=None)
tf_last_output = KTF.eval(last_output)
tf_outputs = KTF.eval(outputs)
assert len(new_states) == 1
tf_state = KTF.eval(new_states[0])
assert_allclose(tf_last_output, th_last_output, atol=1e-04)
assert_allclose(tf_outputs, th_outputs, atol=1e-04)
assert_allclose(tf_state, th_state, atol=1e-04)
# test unroll
unrolled_last_output, unrolled_outputs, unrolled_new_states = KTH.rnn(
th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=False,
mask=None,
unroll=True,
input_length=timesteps)
unrolled_th_last_output = KTH.eval(unrolled_last_output)
unrolled_th_outputs = KTH.eval(unrolled_outputs)
assert len(unrolled_new_states) == 1
unrolled_th_state = KTH.eval(unrolled_new_states[0])
assert_allclose(th_last_output, unrolled_th_last_output, atol=1e-04)
assert_allclose(th_outputs, unrolled_th_outputs, atol=1e-04)
assert_allclose(th_state, unrolled_th_state, atol=1e-04)
# test go_backwards
th_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTH)
th_inputs = KTH.variable(input_val)
th_initial_states = [KTH.variable(init_state_val)]
last_output, outputs, new_states = KTH.rnn(th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=True,
mask=None)
th_last_output = KTH.eval(last_output)
th_outputs = KTH.eval(outputs)
assert len(new_states) == 1
th_state = KTH.eval(new_states[0])
tf_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTF)
tf_inputs = KTF.variable(input_val)
tf_initial_states = [KTF.variable(init_state_val)]
last_output, outputs, new_states = KTF.rnn(tf_rnn_step_fn, tf_inputs,
tf_initial_states,
go_backwards=True,
mask=None)
tf_last_output = KTF.eval(last_output)
tf_outputs = KTF.eval(outputs)
assert len(new_states) == 1
tf_state = KTF.eval(new_states[0])
assert_allclose(tf_last_output, th_last_output, atol=1e-04)
assert_allclose(tf_outputs, th_outputs, atol=1e-04)
assert_allclose(tf_state, th_state, atol=1e-04)
# test unroll with backwards = True
bwd_last_output, bwd_outputs, bwd_new_states = KTH.rnn(
th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=True,
mask=None)
bwd_th_last_output = KTH.eval(bwd_last_output)
bwd_th_outputs = KTH.eval(bwd_outputs)
assert len(bwd_new_states) == 1
bwd_th_state = KTH.eval(bwd_new_states[0])
bwd_unrolled_last_output, bwd_unrolled_outputs, bwd_unrolled_new_states = KTH.rnn(
th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=True,
mask=None,
unroll=True,
input_length=timesteps)
bwd_unrolled_th_last_output = KTH.eval(bwd_unrolled_last_output)
bwd_unrolled_th_outputs = KTH.eval(bwd_unrolled_outputs)
assert len(bwd_unrolled_new_states) == 1
bwd_unrolled_th_state = KTH.eval(bwd_unrolled_new_states[0])
assert_allclose(bwd_th_last_output, bwd_unrolled_th_last_output, atol=1e-04)
assert_allclose(bwd_th_outputs, bwd_unrolled_th_outputs, atol=1e-04)
assert_allclose(bwd_th_state, bwd_unrolled_th_state, atol=1e-04)
# test unroll with masking
np_mask = np.random.randint(2, size=(32, timesteps))
th_mask = KTH.variable(np_mask)
masked_last_output, masked_outputs, masked_new_states = KTH.rnn(
th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=False,
mask=th_mask)
masked_th_last_output = KTH.eval(masked_last_output)
masked_th_outputs = KTH.eval(masked_outputs)
assert len(masked_new_states) == 1
masked_th_state = KTH.eval(masked_new_states[0])
unrolled_masked_last_output, unrolled_masked_outputs, unrolled_masked_new_states = KTH.rnn(
th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=False,
mask=th_mask,
unroll=True,
input_length=timesteps)
unrolled_masked_th_last_output = KTH.eval(unrolled_masked_last_output)
unrolled_masked_th_outputs = KTH.eval(unrolled_masked_outputs)
assert len(unrolled_masked_new_states) == 1
unrolled_masked_th_state = KTH.eval(unrolled_masked_new_states[0])
assert_allclose(unrolled_masked_th_last_output, masked_th_last_output, atol=1e-04)
assert_allclose(unrolled_masked_th_outputs, masked_th_outputs, atol=1e-04)
assert_allclose(unrolled_masked_th_state, masked_th_state, atol=1e-04)
def test_rnn_no_states(self):
# implement a simple RNN without states
input_dim = 8
output_dim = 4
timesteps = 5
input_val = np.random.random((32, timesteps, input_dim))
W_i_val = np.random.random((input_dim, output_dim))
def rnn_step_fn(input_dim, output_dim, K):
W_i = K.variable(W_i_val)
def step_function(x, states):
assert len(states) == 0
output = K.dot(x, W_i)
return output, []
return step_function
# test default setup
th_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTH)
th_inputs = KTH.variable(input_val)
th_initial_states = []
last_output, outputs, new_states = KTH.rnn(th_rnn_step_fn, th_inputs,
th_initial_states,
go_backwards=False,
mask=None)
th_last_output = KTH.eval(last_output)
th_outputs = KTH.eval(outputs)
assert len(new_states) == 0
tf_rnn_step_fn = rnn_step_fn(input_dim, output_dim, KTF)
tf_inputs = KTF.variable(input_val)
tf_initial_states = []
last_output, outputs, new_states = KTF.rnn(tf_rnn_step_fn, tf_inputs,
tf_initial_states,
go_backwards=False,
mask=None)
tf_last_output = KTF.eval(last_output)
tf_outputs = KTF.eval(outputs)
assert len(new_states) == 0
assert_allclose(tf_last_output, th_last_output, atol=1e-04)
assert_allclose(tf_outputs, th_outputs, atol=1e-04)
def test_switch(self):
val = np.random.random()
xth = KTH.variable(val)
xth = KTH.switch(xth >= 0.5, xth * 0.1, xth * 0.2)
xtf = KTF.variable(val)
xtf = KTF.switch(xtf >= 0.5, xtf * 0.1, xtf * 0.2)
zth = KTH.eval(xth)
ztf = KTF.eval(xtf)
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
def test_nn_operations(self):
check_single_tensor_operation('relu', (4, 2), alpha=0.1, max_value=0.5)
check_single_tensor_operation('softmax', (4, 10))
check_single_tensor_operation('softplus', (4, 10))
check_single_tensor_operation('sigmoid', (4, 2))
check_single_tensor_operation('hard_sigmoid', (4, 2))
check_single_tensor_operation('tanh', (4, 2))
# dropout
val = np.random.random((100, 100))
xth = KTH.variable(val)
xtf = KTF.variable(val)
zth = KTH.eval(KTH.dropout(xth, level=0.2))
ztf = KTF.eval(KTF.dropout(xtf, level=0.2))
assert zth.shape == ztf.shape
# dropout patterns are different, only check mean
assert np.abs(zth.mean() - ztf.mean()) < 0.05
check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2), from_logits=True)
check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2), from_logits=True)
check_two_tensor_operation('binary_crossentropy', (4, 2), (4, 2), from_logits=False)
check_two_tensor_operation('categorical_crossentropy', (4, 2), (4, 2), from_logits=False)
check_single_tensor_operation('l2_normalize', (4, 3), axis=-1)
check_single_tensor_operation('l2_normalize', (4, 3), axis=1)
def test_conv2d(self):
# TH kernel shape: (depth, input_depth, rows, cols)
# TF kernel shape: (rows, cols, input_depth, depth)
for input_shape in [(2, 3, 4, 5), (2, 3, 5, 6)]:
for kernel_shape in [(4, 3, 2, 2), (4, 3, 3, 4)]:
xval = np.random.random(input_shape)
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
kernel_val = np.random.random(kernel_shape) - 0.5
kernel_th = KTH.variable(convert_kernel(kernel_val))
kernel_tf = KTF.variable(kernel_val)
zth = KTH.eval(KTH.conv2d(xth, kernel_th, dim_ordering='th'))
ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf, dim_ordering='th'))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
input_shape = (1, 6, 5, 3)
kernel_shape = (3, 3, 3, 2)
xval = np.random.random(input_shape)
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
kernel_val = np.random.random(kernel_shape) - 0.5
kernel_th = KTH.variable(convert_kernel(kernel_val, dim_ordering='tf'))
kernel_tf = KTF.variable(kernel_val)
zth = KTH.eval(KTH.conv2d(xth, kernel_th, dim_ordering='tf'))
ztf = KTF.eval(KTF.conv2d(xtf, kernel_tf, dim_ordering='tf'))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
def test_conv3d(self):
# TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
# TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, input_depth)
# TH kernel shape: (depth, input_depth, x, y, z)
# TF kernel shape: (x, y, z, input_depth, depth)
# test in dim_ordering = th
for input_shape in [(2, 3, 4, 5, 4), (2, 3, 5, 4, 6)]:
for kernel_shape in [(4, 3, 2, 2, 2), (4, 3, 3, 2, 4)]:
xval = np.random.random(input_shape)
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
kernel_val = np.random.random(kernel_shape) - 0.5
kernel_th = KTH.variable(convert_kernel(kernel_val))
kernel_tf = KTF.variable(kernel_val)
zth = KTH.eval(KTH.conv3d(xth, kernel_th, dim_ordering='th'))
ztf = KTF.eval(KTF.conv3d(xtf, kernel_tf, dim_ordering='th'))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
# test in dim_ordering = tf
input_shape = (1, 2, 2, 2, 1)
kernel_shape = (2, 2, 2, 1, 1)
xval = np.random.random(input_shape)
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
kernel_val = np.random.random(kernel_shape) - 0.5
kernel_th = KTH.variable(convert_kernel(kernel_val, dim_ordering='tf'))
kernel_tf = KTF.variable(kernel_val)
zth = KTH.eval(KTH.conv3d(xth, kernel_th, dim_ordering='tf'))
ztf = KTF.eval(KTF.conv3d(xtf, kernel_tf, dim_ordering='tf'))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
def test_pool2d(self):
check_single_tensor_operation('pool2d', (5, 10, 12, 3), pool_size=(2, 2),
strides=(1, 1), border_mode='valid')
check_single_tensor_operation('pool2d', (5, 9, 11, 3), pool_size=(2, 2),
strides=(1, 1), border_mode='valid')
check_single_tensor_operation('pool2d', (5, 9, 11, 3), pool_size=(2, 3),
strides=(1, 1), border_mode='valid')
def test_pool3d(self):
check_single_tensor_operation('pool3d', (5, 10, 12, 5, 3), pool_size=(2, 2, 2),
strides=(1, 1, 1), border_mode='valid')
check_single_tensor_operation('pool3d', (5, 9, 11, 5, 3), pool_size=(2, 2, 2),
strides=(1, 1, 1), border_mode='valid')
check_single_tensor_operation('pool3d', (5, 9, 11, 5, 3), pool_size=(2, 3, 2),
strides=(1, 1, 1), border_mode='valid')
def test_random_normal(self):
mean = 0.
std = 1.
rand = KTF.eval(KTF.random_normal((1000, 1000), mean=mean, std=std))
assert(rand.shape == (1000, 1000))
assert(np.abs(np.mean(rand) - mean) < 0.01)
assert(np.abs(np.std(rand) - std) < 0.01)
rand = KTH.eval(KTH.random_normal((1000, 1000), mean=mean, std=std))
assert(rand.shape == (1000, 1000))
assert(np.abs(np.mean(rand) - mean) < 0.01)
assert(np.abs(np.std(rand) - std) < 0.01)
def test_random_uniform(self):
min = -1.
max = 1.
rand = KTF.eval(KTF.random_uniform((1000, 1000), min, max))
assert(rand.shape == (1000, 1000))
assert(np.abs(np.mean(rand)) < 0.01)
assert(np.max(rand) <= max)
assert(np.min(rand) >= min)
rand = KTH.eval(KTH.random_uniform((1000, 1000), min, max))
assert(rand.shape == (1000, 1000))
assert(np.abs(np.mean(rand)) < 0.01)
assert(np.max(rand) <= max)
assert(np.min(rand) >= min)
def test_random_binomial(self):
p = 0.5
rand = KTF.eval(KTF.random_binomial((1000, 1000), p))
assert(rand.shape == (1000, 1000))
assert(np.abs(np.mean(rand) - p) < 0.01)
assert(np.max(rand) == 1)
assert(np.min(rand) == 0)
rand = KTH.eval(KTH.random_binomial((1000, 1000), p))
assert(rand.shape == (1000, 1000))
assert(np.abs(np.mean(rand) - p) < 0.01)
assert(np.max(rand) == 1)
assert(np.min(rand) == 0)
def test_ctc(self):
# simplified version of TensorFlow's test
label_lens = np.expand_dims(np.asarray([5, 4]), 1)
input_lens = np.expand_dims(np.asarray([5, 5]), 1) # number of timesteps
# the Theano and Tensorflow CTC code use different methods to ensure
# numerical stability. The Theano code subtracts out the max
# before the final log, so the results are different but scale
# identically and still train properly
loss_log_probs_tf = [3.34211, 5.42262]
loss_log_probs_th = [1.73308, 3.81351]
# dimensions are batch x time x categories
labels = np.asarray([[0, 1, 2, 1, 0], [0, 1, 1, 0, -1]])
inputs = np.asarray(
[[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]]],
dtype=np.float32)
labels_tf = KTF.variable(labels, dtype="int32")
inputs_tf = KTF.variable(inputs, dtype="float32")
input_lens_tf = KTF.variable(input_lens, dtype="int32")
label_lens_tf = KTF.variable(label_lens, dtype="int32")
res = KTF.eval(KTF.ctc_batch_cost(labels_tf, inputs_tf, input_lens_tf, label_lens_tf))
assert_allclose(res[:, 0], loss_log_probs_tf, atol=1e-05)
labels_th = KTH.variable(labels, dtype="int32")
inputs_th = KTH.variable(inputs, dtype="float32")
input_lens_th = KTH.variable(input_lens, dtype="int32")
label_lens_th = KTH.variable(label_lens, dtype="int32")
res = KTH.eval(KTH.ctc_batch_cost(labels_th, inputs_th, input_lens_th, label_lens_th))
assert_allclose(res[0, :], loss_log_probs_th, atol=1e-05)
def test_ctc_decode_greedy(self):
# Test adapted from tensorflow
"""Test two batch entries - best path decoder."""
max_time_steps = 6
seq_len_0 = 4
input_prob_matrix_0 = np.asarray(
[[1.0, 0.0, 0.0, 0.0], # t=0
[0.0, 0.0, 0.4, 0.6], # t=1
[0.0, 0.0, 0.4, 0.6], # t=2
[0.0, 0.9, 0.1, 0.0], # t=3
[0.0, 0.0, 0.0, 0.0], # t=4 (ignored)
[0.0, 0.0, 0.0, 0.0]], # t=5 (ignored)
dtype=np.float32)
input_log_prob_matrix_0 = np.log(input_prob_matrix_0)
seq_len_1 = 5
# dimensions are time x depth
input_prob_matrix_1 = np.asarray(
[[0.1, 0.9, 0.0, 0.0], # t=0
[0.0, 0.9, 0.1, 0.0], # t=1
[0.0, 0.0, 0.1, 0.9], # t=2
[0.0, 0.9, 0.1, 0.1], # t=3
[0.9, 0.1, 0.0, 0.0], # t=4
[0.0, 0.0, 0.0, 0.0]], # t=5 (ignored)
dtype=np.float32)
# len max_time_steps array of batch_size x depth matrices
inputs = [np.vstack([input_prob_matrix_0[t, :],
input_prob_matrix_1[t, :]])
for t in range(max_time_steps)]
# change tensorflow order to keras backend order
inputs = KTF.variable(np.asarray(inputs).transpose((1, 0, 2)))
# batch_size length vector of sequence_lengths
input_length = KTF.variable(np.array([seq_len_0, seq_len_1], dtype=np.int32))
# batch_size length vector of negative log probabilities
log_prob_truth = np.array([
np.sum(-np.log([1.0, 0.6, 0.6, 0.9])),
np.sum(-np.log([0.9, 0.9, 0.9, 0.9, 0.9]))
], np.float32)[:, np.newaxis]
# keras output, unlike tensorflow, is a dense (not sparse) tensor
decode_truth = np.array([[0, 1, -1], [1, 1, 0]])
decode_pred_tf, log_prob_pred_tf = KTF.ctc_decode(inputs,
input_length,
greedy=True)
assert len(decode_pred_tf) == 1
decode_pred = KTF.eval(decode_pred_tf[0])
log_prob_pred = KTF.eval(log_prob_pred_tf)
assert np.alltrue(decode_truth == decode_pred)
assert np.allclose(log_prob_truth, log_prob_pred)
def test_ctc_decode_beam_search(self):
"""Test one batch, two beams - hibernating beam search."""
depth = 6
seq_len_0 = 5
input_prob_matrix_0 = np.asarray(
[[0.30999, 0.309938, 0.0679938, 0.0673362, 0.0708352, 0.173908],
[0.215136, 0.439699, 0.0370931, 0.0393967, 0.0381581, 0.230517],
[0.199959, 0.489485, 0.0233221, 0.0251417, 0.0233289, 0.238763],
[0.279611, 0.452966, 0.0204795, 0.0209126, 0.0194803, 0.20655],
[0.51286, 0.288951, 0.0243026, 0.0220788, 0.0219297, 0.129878],
# Random entry added in at time=5
[0.155251, 0.164444, 0.173517, 0.176138, 0.169979, 0.160671]],
dtype=np.float32)
# len max_time_steps array of batch_size x depth matrices
inputs = ([input_prob_matrix_0[t, :][np.newaxis, :]
for t in range(seq_len_0)] + # Pad to max_time_steps = 8
2 * [np.zeros((1, depth), dtype=np.float32)])
inputs = KTF.variable(np.asarray(inputs).transpose((1, 0, 2)))
# batch_size length vector of sequence_lengths
input_length = KTF.variable(np.array([seq_len_0], dtype=np.int32))
# batch_size length vector of negative log probabilities
log_prob_truth = np.array([
0.584855, # output beam 0
0.389139 # output beam 1
], np.float32)[np.newaxis, :]
decode_truth = [np.array([1, 0]), np.array([0, 1, 0])]
beam_width = 2
top_paths = 2
decode_pred_tf, log_prob_pred_tf = KTF.ctc_decode(inputs,
input_length,
greedy=False,
beam_width=beam_width,
top_paths=top_paths)
assert len(decode_pred_tf) == top_paths
log_prob_pred = KTF.eval(log_prob_pred_tf)
for i in range(top_paths):
assert np.alltrue(decode_truth[i] == KTF.eval(decode_pred_tf[i]))
assert np.allclose(log_prob_truth, log_prob_pred)
def test_one_hot(self):
input_length = 10
nb_classes = 20
batch_size = 30
indices = np.random.randint(0, nb_classes, size=(batch_size, input_length))
oh = np.eye(nb_classes)[indices]
for K in [KTH, KTF]:
koh = K.eval(K.one_hot(K.variable(indices, dtype='int32'), nb_classes))
assert np.all(koh == oh)
def test_sparse_dot(self):
x_d = np.array([0, 7, 2, 3], dtype=np.float32)
x_r = np.array([0, 2, 2, 3], dtype=np.int64)
x_c = np.array([4, 3, 2, 3], dtype=np.int64)
x_sparse = sparse.csr_matrix((x_d, (x_r, x_c)), shape=(4, 5))
x_dense = x_sparse.toarray()
W = np.random.random((5, 4))
backends = [KTF]
if KTH.th_sparse_module:
# Theano has some dependency issues for sparse
backends.append(KTH)
for K in backends:
t_W = K.variable(W)
k_s = K.eval(K.dot(K.variable(x_sparse), t_W))
k_d = K.eval(K.dot(K.variable(x_dense), t_W))
assert k_s.shape == k_d.shape
assert_allclose(k_s, k_d, atol=1e-05)
def test_sparse_concat(self):
x_d = np.array([0, 7, 2, 3], dtype=np.float32)
x_r = np.array([0, 2, 2, 3], dtype=np.int64)
x_c = np.array([4, 3, 2, 3], dtype=np.int64)
x_sparse_1 = sparse.csr_matrix((x_d, (x_r, x_c)), shape=(4, 5))
x_d = np.array([0, 7, 2, 3], dtype=np.float32)
x_r = np.array([0, 2, 2, 3], dtype=np.int64)
x_c = np.array([4, 3, 2, 3], dtype=np.int64)
x_sparse_2 = sparse.csr_matrix((x_d, (x_r, x_c)), shape=(4, 5))
x_dense_1 = x_sparse_1.toarray()
x_dense_2 = x_sparse_2.toarray()
backends = [KTF]
if KTH.th_sparse_module:
# Theano has some dependency issues for sparse
backends.append(KTH)
for K in backends:
k_s = K.concatenate([K.variable(x_sparse_1), K.variable(x_sparse_2)])
assert K.is_sparse(k_s)
k_s_d = K.eval(k_s)
k_d = K.eval(K.concatenate([K.variable(x_dense_1), K.variable(x_dense_2)]))
assert k_s_d.shape == k_d.shape
assert_allclose(k_s_d, k_d, atol=1e-05)
if __name__ == '__main__':
pytest.main([__file__])
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build MSan instrumented libs on Google Container Builder."""
# Usage:
# 1. build_msan_libs.py [--no-track-origins] build_packages
# 2. Wait for builds to complete on
# https://console.cloud.google.com/gcr/builds?project=google.com:clusterfuzz
# 3. Once all builds have succeeded, run:
# build_msan_libs.py [--no-track-origins] merge
import argparse
import datetime
from googleapiclient.discovery import build
UPLOAD_BUCKET = 'clusterfuzz-chromium-msan-libs'
DISTRO_VERSION = '16.04'
BUILD_TIMEOUT = 2 * 60 * 60
# For Chromium on Ubuntu 16.04
PACKAGES = [
'libappindicator3-1',
'libasound2',
'libatk1.0-0',
'libatk-bridge2.0-0',
'libatspi2.0-0',
'libavahi-client3',
'libavahi-common3',
'libcairo2',
'libcairo-gobject2',
'libcap2',
'libcomerr2',
'libcroco3',
'libcups2',
'libdatrie1',
'libdbus-1-3',
'libdbusmenu-glib4',
'libdbusmenu-gtk3-4',
'libepoxy0',
'libexpat1',
'libffi6',
'libfontconfig1',
'libfreetype6',
'libgcrypt20',
'libgdk-pixbuf2.0-0',
'libglib2.0-0',
'libgmp10',
'libgnutls30',
'libgpg-error0',
'libgraphite2-3',
'libgssapi-krb5-2',
'libgtk-3-0',
'libharfbuzz0b',
'libhogweed4',
'libidn11',
'libido3-0.1-0',
'libindicator3-7',
'libk5crypto3',
'libkeyutils1',
'libkrb5-3',
'libkrb5support0',
'liblz4-1',
'liblzma5',
'libnettle6',
'libnspr4',
'libnss3',
'libp11-kit0',
'libpango-1.0-0',
'libpangocairo-1.0-0',
'libpangoft2-1.0-0',
'libpci3',
'libpcre3',
'libpixman-1-0',
'libpng12-0',
'libpulse0',
'librsvg2-2',
'libselinux1',
'libsqlite3-0',
'libsystemd0',
'libtasn1-6',
'libthai0',
'libudev1',
'libwayland-client0',
'libwayland-cursor0',
'libx11-6',
'libx11-xcb1',
'libxau6',
'libxcb1',
'libxcb-render0',
'libxcb-shm0',
'libxcomposite1',
'libxcursor1',
'libxdamage1',
'libxdmcp6',
'libxext6',
'libxfixes3',
'libxi6',
'libxinerama1',
'libxkbcommon0',
'libxml2',
'libxrandr2',
'libxrender1',
'libxss1',
'libxtst6',
'zlib1g',
]
def bucket_path(no_track_origins):
"""Return the bucket path to upload to."""
if no_track_origins:
subdir = 'no-origins'
else:
subdir = 'chained-origins'
return 'gs://%s/%s/%s' % (UPLOAD_BUCKET, DISTRO_VERSION, subdir)
def build_steps(package_name, no_track_origins=False):
"""Return build steps for a package."""
zip_name = package_name + '.zip'
build_args = ['msan_build.py', '--no-build-deps', package_name, '/workspace']
if no_track_origins:
build_args.append('--no-track-origins')
return [
{
# Build package.
'args': build_args,
# Use OSS-Fuzz's MSan builder.
'name': 'gcr.io/oss-fuzz-base/base-msan-builder',
},
{
# Zip results.
'args': ['zip', '-r', '-y', zip_name, '.'],
'name': 'gcr.io/oss-fuzz-base/base-msan-builder',
},
{
# Upload.
'args': [
'cp',
zip_name,
'%s/packages/%s' % (bucket_path(no_track_origins), zip_name),
],
'name':
'gcr.io/cloud-builders/gsutil',
},
]
def get_build(steps):
"""Get a build given steps."""
return {
'steps': steps,
'timeout': str(BUILD_TIMEOUT) + 's',
'options': {
'machineType': 'N1_HIGHCPU_8',
},
}
def merge_steps(no_track_origins=False):
"""Get merge steps to merge individual packages into a single zip."""
timestamp = datetime.datetime.utcnow().strftime('%Y%m%d%H%M')
filename = 'latest-%s.zip' % timestamp
return [
{
# Download all individual packages.
'args': [
'-m', 'cp', '-r',
bucket_path(no_track_origins) + '/packages/', '.'
],
'name':
'gcr.io/cloud-builders/gsutil',
},
{
# Extract.
'args': [
'bash',
'-c',
'mkdir all && cd all && unzip -o "../packages/*.zip"',
],
'name':
'gcr.io/oss-fuzz-base/base-msan-builder',
},
{
# Zip.
'args': [
'bash', '-c',
'find -L -name \'*.so*\' | zip -y %s -@' % filename
],
'dir':
'all',
'name':
'gcr.io/oss-fuzz-base/base-msan-builder',
},
{
# Upload.
'args': [
'cp',
filename,
bucket_path(no_track_origins) + '/' + filename,
],
'dir':
'all',
'name':
'gcr.io/cloud-builders/gsutil',
},
]
def start_build(cloudbuild, build_body):
"""Start a build."""
build_info = cloudbuild.projects().builds().create(
projectId='google.com:clusterfuzz', body=build_body).execute()
return build_info['metadata']['build']['id']
def main():
parser = argparse.ArgumentParser(
'build_msan_libs.py', description='MSan builder.')
parser.add_argument(
'--no-track-origins',
action='store_true',
help='Build with -fsanitize-memory-track-origins=0.')
parser.add_argument(
'command',
choices=['build_packages', 'merge'],
help='The command to run.')
args = parser.parse_args()
cloudbuild = build('cloudbuild', 'v1', cache_discovery=False)
if args.command == 'build_packages':
for package in PACKAGES:
build_body = get_build(build_steps(package, args.no_track_origins))
print(start_build(cloudbuild, build_body))
else: # merge
print(
start_build(cloudbuild, get_build(merge_steps(args.no_track_origins))))
if __name__ == '__main__':
main()
|
|
import socket
import sys
import threading
import time
class Server:
def __init__(self, port, listen=20):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = port
self.listen = listen
def start(self):
self.socket.bind(('',self.port))
self.socket.listen(self.listen)
def stop(self):
self.socket.close()
def accept(self):
return self.socket.accept()
class Gameserver:
def __init__(self, port=2020, maxplayer=5):
self.port = port
self.server = Server(port, maxplayer)
self.live = 0
self.stat = []
self.livecon = {}
def start(self):
try:
self.server.start()
except OSError as e:
print("Port or adress already in use!\nTerminating")
sys.exit()
self.running = True
while self.running:
try:
threading.Thread(target=self.handle,args=self.server.accept(),daemon=True).start()
except Exception as e:
self.stop()
def stop(self):
self.running = False
time.sleep(2.0)
self.server.stop()
def handle(self, conn, addr):
self.live += 1
def smsg(msg):
conn.send(msg.encode("utf-8"))
connect = str(conn.recv(64), "utf-8")
if connect.startswith("/connect"):
toconsole("new client connected [{}]".format(self.live))
else:
toconsole("client using wrong protocol ({}) to connect: \"{}\"".format(addr[0],connect))
self.live -= 1
return False
username = str(conn.recv(2048), "utf-8")
if username.startswith("/username "):
username = username[10:].split()[0]
else:
toconsole("client using wrong protocol ({}) for username".format(addr[0]))
self.live -= 1
return False
toconsole("{} joined the game".format(username))
self.stat.append(username)
self.livecon[username]=(conn, self.live)
self.chat=[]
if self.live == 3:
self.giveRoles()
def move(direc):
oldx ,oldy = self.positions[username]
self.field[oldy][oldx]["player"].remove(username)
oldc = ""
newc = ""
if direc=="/left" and oldx>0:
self.positions[username]=(oldx-1,oldy)
oldc = "W"
newc = "o"
elif direc=="/right" and oldx<2:
self.positions[username]=(oldx+1,oldy)
oldc = "O"
newc = "w"
elif direc=="/up" and oldy>0:
self.positions[username]=(oldx,oldy-1)
oldc = "N"
newc = "s"
elif direc=="/down" and oldy<2:
self.positions[username]=(oldx,oldy+1)
oldc = "S"
newc = "n"
x,y =self.positions[username]
self.field[oldy][oldx]["history"]+=oldc
self.field[y][x]["history"]+=newc
self.field[y][x]["player"].append(username)
toconsole("{} going left from [{};{}] to [{};{}]".format(username, oldx,oldy,x,y))
return "/youpos {} {}".format(x,y)
while self.running:
try:
if self.live < 3:
smsg("/waiting for other players [{}/3]".format(self.live))
time.sleep(.1)
if str(conn.recv(1024), "utf-8") == "/ok wait":
time.sleep(3.0)
else:
toconsole(username + " error while waiting")
raise Exception
else:
resp = "{} {}".format(str(conn.recv(512), "utf-8"),"/end/")
trimr = resp[:-6]
toconsole(username + " " + resp)
stringhist = ""
persadd =""
if resp[0]=="/":
cmd, arg = resp.split(maxsplit=1)
if cmd == "/chat":
self.chat.append("/chat {}: {}".format(username ,arg[:-6]))
if cmd == "/left" or cmd == "/right" or cmd =="/up" or cmd == "/down":
persadd += move(cmd) + "\n"
if cmd == "/gotit":
pass
for line in self.chat[-5:]:
stringhist += line + "\n"
for name in self.escapee:
thisadd = ""
if name == username:
toconsole("sending own info log to {}".format(name))
self.sendto(name, stringhist+persadd)
else:
toconsole("sending info log to {}".format(name))
if self.positions[name] == self.positions[username]:
thisadd = "/meet {}\n".format(username)
self.sendto(name, stringhist+thisadd)
except Exception as e:
toconsole(username + " generic error")
break
conn.close()
del self.livecon[username]
self.stat.remove(username)
self.live -= 1
toconsole("{} left the game".format(username))
def smsl(self, conn, msg):
conn.send(msg.encode("utf-8"))
def giveRoles(self):
self.field = [[{},{},{}],[{},{},{}],[{},{},{}]]
self.escapee = []
self.positions = {}
import random
for row in self.field:
for cell in row:
cell["history"]=""
cell["player"]=[]
for key in self.livecon:
if self.livecon[key][1] == 1:
self.smsl(self.livecon[key][0],"/you MURDERER")
toconsole("{} is MURDERER".format(key))
self.murderer = [key]
else:
scapcon = self.livecon[key][0]
self.smsl(scapcon,"/you ESCAPEE")
x=random.randrange(0,3)
y=random.randrange(0,3)
self.positions[key]=(x,y)
self.field[y][x]["player"].append(key)
self.field[y][x]["history"]+="B"
toconsole("{} is ESCAPEE in [{};{}]".format(key,x,y))
self.escapee.append(key)
time.sleep(.2)
self.smsl(scapcon, "/youpos {} {}".format(x,y))
time.sleep(.5)
self.startround = time.time()
def tellMurderer(self):
time.sleep(55)
self.sendto(self.murderer[0],"/startsoon")
def checkIfHidden(self):
time.sleep(60)
self.sendall("/murdernow")
#threading.Thread(target=tellMurderer, args=(self),daemon=True).start()
#threading.Thread(target=checkIfHidden,args=(self),daemon=True).start()
def sendall(self, msg):
for key in self.livecon:
self.smsl(self.livecon[key][0],msg)
def sendto(self, client, msg):
for key in self.livecon:
if key == client:
self.smsl(self.livecon[key][0],msg)
def toconsole(msg):
print("{}\nS> ".format(msg), end="")
if __name__ == '__main__':
s = Gameserver(2020)
server = threading.Thread(target=s.start, daemon=True)
server.start()
helpstr = """
help
show this list
stat
returns list of all players
live
returns number of live connections
lcon
returns dictionary of player pos
kill
kills the server
sendall <msg>
sends the msg to all players
send <player> <msg>
sends the msg to player
newr
starts new round
"""
while True:
try:
cmd = input("Command: ")
cml = cmd.split()
if cml[0] == "stat":
print(s.stat)
elif cml[0] == "help":
print(helpstr)
elif cml[0] == "live":
print(s.live)
elif cml[0] == "kill":
server.join(.5)
s.stop()
del s
break
elif cml[0] == "lcon":
print(s.livecon)
elif cml[0] == "sendall":
s.sendall(cmd[8:])
elif cml[0] == "send":
s.sendto(cml[1], cml[2])
elif cml[0] == "newr":
server.join(.2)
s.stop()
time.sleep(1.5)
del s
s = Gameserver(2020)
server = threading.Thread(target=s.start, daemon=True)
server.start()
except IndexError:
print("wrong command")
except KeyboardInterrupt:
pass
sys.exit()
|
|
"""Provides functionality to interact with climate devices."""
from abc import abstractmethod
from datetime import timedelta
import functools as ft
import logging
from typing import Any, Dict, List, Optional
import voluptuous as vol
from homeassistant.const import (
ATTR_TEMPERATURE,
PRECISION_TENTHS,
PRECISION_WHOLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
ENTITY_SERVICE_SCHEMA,
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.temperature import display_temp as show_temp
from homeassistant.helpers.typing import ConfigType, HomeAssistantType, ServiceDataType
from homeassistant.util.temperature import convert as convert_temperature
from .const import (
ATTR_AUX_HEAT,
ATTR_CURRENT_HUMIDITY,
ATTR_CURRENT_TEMPERATURE,
ATTR_FAN_MODE,
ATTR_FAN_MODES,
ATTR_HUMIDITY,
ATTR_HVAC_ACTION,
ATTR_HVAC_MODE,
ATTR_HVAC_MODES,
ATTR_MAX_HUMIDITY,
ATTR_MAX_TEMP,
ATTR_MIN_HUMIDITY,
ATTR_MIN_TEMP,
ATTR_PRESET_MODE,
ATTR_PRESET_MODES,
ATTR_SWING_MODE,
ATTR_SWING_MODES,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
ATTR_TARGET_TEMP_STEP,
DOMAIN,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
HVAC_MODES,
SERVICE_SET_AUX_HEAT,
SERVICE_SET_FAN_MODE,
SERVICE_SET_HUMIDITY,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_PRESET_MODE,
SERVICE_SET_SWING_MODE,
SERVICE_SET_TEMPERATURE,
SUPPORT_AUX_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_HUMIDITY,
SUPPORT_TARGET_TEMPERATURE_RANGE,
SUPPORT_TARGET_TEMPERATURE,
)
DEFAULT_MIN_TEMP = 7
DEFAULT_MAX_TEMP = 35
DEFAULT_MIN_HUMIDITY = 30
DEFAULT_MAX_HUMIDITY = 99
ENTITY_ID_FORMAT = DOMAIN + ".{}"
SCAN_INTERVAL = timedelta(seconds=60)
CONVERTIBLE_ATTRIBUTE = [ATTR_TEMPERATURE, ATTR_TARGET_TEMP_LOW, ATTR_TARGET_TEMP_HIGH]
_LOGGER = logging.getLogger(__name__)
SET_AUX_HEAT_SCHEMA = ENTITY_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_AUX_HEAT): cv.boolean}
)
SET_TEMPERATURE_SCHEMA = vol.Schema(
vol.All(
cv.has_at_least_one_key(
ATTR_TEMPERATURE, ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW
),
ENTITY_SERVICE_SCHEMA.extend(
{
vol.Exclusive(ATTR_TEMPERATURE, "temperature"): vol.Coerce(float),
vol.Inclusive(ATTR_TARGET_TEMP_HIGH, "temperature"): vol.Coerce(float),
vol.Inclusive(ATTR_TARGET_TEMP_LOW, "temperature"): vol.Coerce(float),
vol.Optional(ATTR_HVAC_MODE): vol.In(HVAC_MODES),
}
),
)
)
SET_FAN_MODE_SCHEMA = ENTITY_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_FAN_MODE): cv.string}
)
SET_PRESET_MODE_SCHEMA = ENTITY_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_PRESET_MODE): cv.string}
)
SET_HVAC_MODE_SCHEMA = ENTITY_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_HVAC_MODE): vol.In(HVAC_MODES)}
)
SET_HUMIDITY_SCHEMA = ENTITY_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_HUMIDITY): vol.Coerce(float)}
)
SET_SWING_MODE_SCHEMA = ENTITY_SERVICE_SCHEMA.extend(
{vol.Required(ATTR_SWING_MODE): cv.string}
)
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up climate devices."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_TURN_ON, ENTITY_SERVICE_SCHEMA, "async_turn_on"
)
component.async_register_entity_service(
SERVICE_TURN_OFF, ENTITY_SERVICE_SCHEMA, "async_turn_off"
)
component.async_register_entity_service(
SERVICE_SET_HVAC_MODE, SET_HVAC_MODE_SCHEMA, "async_set_hvac_mode"
)
component.async_register_entity_service(
SERVICE_SET_PRESET_MODE, SET_PRESET_MODE_SCHEMA, "async_set_preset_mode"
)
component.async_register_entity_service(
SERVICE_SET_AUX_HEAT, SET_AUX_HEAT_SCHEMA, async_service_aux_heat
)
component.async_register_entity_service(
SERVICE_SET_TEMPERATURE, SET_TEMPERATURE_SCHEMA, async_service_temperature_set
)
component.async_register_entity_service(
SERVICE_SET_HUMIDITY, SET_HUMIDITY_SCHEMA, "async_set_humidity"
)
component.async_register_entity_service(
SERVICE_SET_FAN_MODE, SET_FAN_MODE_SCHEMA, "async_set_fan_mode"
)
component.async_register_entity_service(
SERVICE_SET_SWING_MODE, SET_SWING_MODE_SCHEMA, "async_set_swing_mode"
)
return True
async def async_setup_entry(hass: HomeAssistantType, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass: HomeAssistantType, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class ClimateDevice(Entity):
"""Representation of a climate device."""
@property
def state(self) -> str:
"""Return the current state."""
return self.hvac_mode
@property
def precision(self) -> float:
"""Return the precision of the system."""
if self.hass.config.units.temperature_unit == TEMP_CELSIUS:
return PRECISION_TENTHS
return PRECISION_WHOLE
@property
def state_attributes(self) -> Dict[str, Any]:
"""Return the optional state attributes."""
supported_features = self.supported_features
data = {
ATTR_HVAC_MODES: self.hvac_modes,
ATTR_CURRENT_TEMPERATURE: show_temp(
self.hass,
self.current_temperature,
self.temperature_unit,
self.precision,
),
ATTR_MIN_TEMP: show_temp(
self.hass, self.min_temp, self.temperature_unit, self.precision
),
ATTR_MAX_TEMP: show_temp(
self.hass, self.max_temp, self.temperature_unit, self.precision
),
}
if self.target_temperature_step:
data[ATTR_TARGET_TEMP_STEP] = self.target_temperature_step
if supported_features & SUPPORT_TARGET_TEMPERATURE:
data[ATTR_TEMPERATURE] = show_temp(
self.hass,
self.target_temperature,
self.temperature_unit,
self.precision,
)
if supported_features & SUPPORT_TARGET_TEMPERATURE_RANGE:
data[ATTR_TARGET_TEMP_HIGH] = show_temp(
self.hass,
self.target_temperature_high,
self.temperature_unit,
self.precision,
)
data[ATTR_TARGET_TEMP_LOW] = show_temp(
self.hass,
self.target_temperature_low,
self.temperature_unit,
self.precision,
)
if self.current_humidity is not None:
data[ATTR_CURRENT_HUMIDITY] = self.current_humidity
if supported_features & SUPPORT_TARGET_HUMIDITY:
data[ATTR_HUMIDITY] = self.target_humidity
data[ATTR_MIN_HUMIDITY] = self.min_humidity
data[ATTR_MAX_HUMIDITY] = self.max_humidity
if supported_features & SUPPORT_FAN_MODE:
data[ATTR_FAN_MODE] = self.fan_mode
data[ATTR_FAN_MODES] = self.fan_modes
if self.hvac_action:
data[ATTR_HVAC_ACTION] = self.hvac_action
if supported_features & SUPPORT_PRESET_MODE:
data[ATTR_PRESET_MODE] = self.preset_mode
data[ATTR_PRESET_MODES] = self.preset_modes
if supported_features & SUPPORT_SWING_MODE:
data[ATTR_SWING_MODE] = self.swing_mode
data[ATTR_SWING_MODES] = self.swing_modes
if supported_features & SUPPORT_AUX_HEAT:
data[ATTR_AUX_HEAT] = STATE_ON if self.is_aux_heat else STATE_OFF
return data
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement used by the platform."""
raise NotImplementedError()
@property
def current_humidity(self) -> Optional[int]:
"""Return the current humidity."""
return None
@property
def target_humidity(self) -> Optional[int]:
"""Return the humidity we try to reach."""
return None
@property
@abstractmethod
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
@property
@abstractmethod
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
@property
def hvac_action(self) -> Optional[str]:
"""Return the current running hvac operation if supported.
Need to be one of CURRENT_HVAC_*.
"""
return None
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature."""
return None
@property
def target_temperature(self) -> Optional[float]:
"""Return the temperature we try to reach."""
return None
@property
def target_temperature_step(self) -> Optional[float]:
"""Return the supported step of target temperature."""
return None
@property
def target_temperature_high(self) -> Optional[float]:
"""Return the highbound target temperature we try to reach.
Requires SUPPORT_TARGET_TEMPERATURE_RANGE.
"""
raise NotImplementedError
@property
def target_temperature_low(self) -> Optional[float]:
"""Return the lowbound target temperature we try to reach.
Requires SUPPORT_TARGET_TEMPERATURE_RANGE.
"""
raise NotImplementedError
@property
def preset_mode(self) -> Optional[str]:
"""Return the current preset mode, e.g., home, away, temp.
Requires SUPPORT_PRESET_MODE.
"""
raise NotImplementedError
@property
def preset_modes(self) -> Optional[List[str]]:
"""Return a list of available preset modes.
Requires SUPPORT_PRESET_MODE.
"""
raise NotImplementedError
@property
def is_aux_heat(self) -> Optional[bool]:
"""Return true if aux heater.
Requires SUPPORT_AUX_HEAT.
"""
raise NotImplementedError
@property
def fan_mode(self) -> Optional[str]:
"""Return the fan setting.
Requires SUPPORT_FAN_MODE.
"""
raise NotImplementedError
@property
def fan_modes(self) -> Optional[List[str]]:
"""Return the list of available fan modes.
Requires SUPPORT_FAN_MODE.
"""
raise NotImplementedError
@property
def swing_mode(self) -> Optional[str]:
"""Return the swing setting.
Requires SUPPORT_SWING_MODE.
"""
raise NotImplementedError
@property
def swing_modes(self) -> Optional[List[str]]:
"""Return the list of available swing modes.
Requires SUPPORT_SWING_MODE.
"""
raise NotImplementedError
def set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
raise NotImplementedError()
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
await self.hass.async_add_executor_job(
ft.partial(self.set_temperature, **kwargs)
)
def set_humidity(self, humidity: int) -> None:
"""Set new target humidity."""
raise NotImplementedError()
async def async_set_humidity(self, humidity: int) -> None:
"""Set new target humidity."""
await self.hass.async_add_executor_job(self.set_humidity, humidity)
def set_fan_mode(self, fan_mode: str) -> None:
"""Set new target fan mode."""
raise NotImplementedError()
async def async_set_fan_mode(self, fan_mode: str) -> None:
"""Set new target fan mode."""
await self.hass.async_add_executor_job(self.set_fan_mode, fan_mode)
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
raise NotImplementedError()
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
await self.hass.async_add_executor_job(self.set_hvac_mode, hvac_mode)
def set_swing_mode(self, swing_mode: str) -> None:
"""Set new target swing operation."""
raise NotImplementedError()
async def async_set_swing_mode(self, swing_mode: str) -> None:
"""Set new target swing operation."""
await self.hass.async_add_executor_job(self.set_swing_mode, swing_mode)
def set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
raise NotImplementedError()
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
await self.hass.async_add_executor_job(self.set_preset_mode, preset_mode)
def turn_aux_heat_on(self) -> None:
"""Turn auxiliary heater on."""
raise NotImplementedError()
async def async_turn_aux_heat_on(self) -> None:
"""Turn auxiliary heater on."""
await self.hass.async_add_executor_job(self.turn_aux_heat_on)
def turn_aux_heat_off(self) -> None:
"""Turn auxiliary heater off."""
raise NotImplementedError()
async def async_turn_aux_heat_off(self) -> None:
"""Turn auxiliary heater off."""
await self.hass.async_add_executor_job(self.turn_aux_heat_off)
async def async_turn_on(self) -> None:
"""Turn the entity on."""
if hasattr(self, "turn_on"):
# pylint: disable=no-member
await self.hass.async_add_executor_job(self.turn_on)
return
# Fake turn on
for mode in (HVAC_MODE_HEAT_COOL, HVAC_MODE_HEAT, HVAC_MODE_COOL):
if mode not in self.hvac_modes:
continue
await self.async_set_hvac_mode(mode)
break
async def async_turn_off(self) -> None:
"""Turn the entity off."""
if hasattr(self, "turn_off"):
# pylint: disable=no-member
await self.hass.async_add_executor_job(self.turn_off)
return
# Fake turn off
if HVAC_MODE_OFF in self.hvac_modes:
await self.async_set_hvac_mode(HVAC_MODE_OFF)
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
raise NotImplementedError()
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
return convert_temperature(
DEFAULT_MIN_TEMP, TEMP_CELSIUS, self.temperature_unit
)
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
return convert_temperature(
DEFAULT_MAX_TEMP, TEMP_CELSIUS, self.temperature_unit
)
@property
def min_humidity(self) -> int:
"""Return the minimum humidity."""
return DEFAULT_MIN_HUMIDITY
@property
def max_humidity(self) -> int:
"""Return the maximum humidity."""
return DEFAULT_MAX_HUMIDITY
async def async_service_aux_heat(
entity: ClimateDevice, service: ServiceDataType
) -> None:
"""Handle aux heat service."""
if service.data[ATTR_AUX_HEAT]:
await entity.async_turn_aux_heat_on()
else:
await entity.async_turn_aux_heat_off()
async def async_service_temperature_set(
entity: ClimateDevice, service: ServiceDataType
) -> None:
"""Handle set temperature service."""
hass = entity.hass
kwargs = {}
for value, temp in service.data.items():
if value in CONVERTIBLE_ATTRIBUTE:
kwargs[value] = convert_temperature(
temp, hass.config.units.temperature_unit, entity.temperature_unit
)
else:
kwargs[value] = temp
await entity.async_set_temperature(**kwargs)
|
|
import os
import json
import logging
import mimetypes
import md5
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from django.views.generic import ListView, DetailView
from django.utils.datastructures import SortedDict
from taggit.models import Tag
import requests
from .base import ProjectOnboardMixin
from builds.filters import VersionSlugFilter
from builds.models import Version
from projects.models import Project, ImportedFile
from search.indexes import PageIndex
from search.views import LOG_TEMPLATE
log = logging.getLogger(__name__)
search_log = logging.getLogger(__name__ + '.search')
mimetypes.add_type("application/epub+zip", ".epub")
class ProjectIndex(ListView):
model = Project
def get_queryset(self):
queryset = Project.objects.public(self.request.user)
if self.kwargs.get('tag'):
self.tag = get_object_or_404(Tag, slug=self.kwargs.get('tag'))
queryset = queryset.filter(tags__name__in=[self.tag.slug])
else:
self.tag = None
if self.kwargs.get('username'):
self.user = get_object_or_404(User, username=self.kwargs.get('username'))
queryset = queryset.filter(user=self.user)
else:
self.user = None
return queryset
def get_context_data(self, **kwargs):
context = super(ProjectIndex, self).get_context_data(**kwargs)
context['person'] = self.user
context['tag'] = self.tag
return context
project_index = ProjectIndex.as_view()
class ProjectDetailView(ProjectOnboardMixin, DetailView):
'''Display project onboard steps'''
model = Project
slug_url_kwarg = 'project_slug'
def get_queryset(self):
return Project.objects.protected(self.request.user)
def get_context_data(self, **kwargs):
context = super(ProjectDetailView, self).get_context_data(**kwargs)
project = self.get_object()
context['versions'] = Version.objects.public(
user=self.request.user, project=project)
context['filter'] = VersionSlugFilter(self.request.GET,
queryset=context['versions'])
protocol = 'http'
if self.request.is_secure():
protocol = 'https'
context['badge_url'] = "%s://%s%s?version=%s" % (
protocol,
settings.PRODUCTION_DOMAIN,
reverse('project_badge', args=[project.slug]),
project.get_default_version(),
)
context['site_url'] = "%s://%s%s?badge=%s" % (
protocol,
settings.PRODUCTION_DOMAIN,
reverse('projects_detail', args=[project.slug]),
project.get_default_version(),
)
return context
def _badge_return(redirect, url):
if redirect:
return HttpResponseRedirect(url)
else:
response = requests.get(url)
http_response = HttpResponse(response.content, mimetype="image/svg+xml")
http_response['Cache-Control'] = 'no-cache'
http_response['Etag'] = md5.new(url)
return http_response
def project_badge(request, project_slug, redirect=False):
"""
Return a sweet badge for the project
"""
version_slug = request.GET.get('version', 'latest')
style = request.GET.get('style', 'flat')
try:
version = Version.objects.public(request.user).get(project__slug=project_slug, slug=version_slug)
except Version.DoesNotExist:
url = 'http://img.shields.io/badge/docs-unknown%20version-yellow.svg?style={style}'.format(style=style)
return _badge_return(redirect, url)
version_builds = version.builds.filter(type='html', state='finished').order_by('-date')
if not version_builds.exists():
url = 'http://img.shields.io/badge/docs-no%20builds-yellow.svg?style={style}'.format(style=style)
return _badge_return(redirect, url)
last_build = version_builds[0]
if last_build.success:
color = 'brightgreen'
else:
color = 'red'
url = 'http://img.shields.io/badge/docs-%s-%s.svg?style=%s' % (version.slug.replace('-', '--'), color, style)
return _badge_return(redirect, url)
def project_downloads(request, project_slug):
"""
A detail view for a project with various dataz
"""
project = get_object_or_404(Project.objects.protected(request.user), slug=project_slug)
versions = Version.objects.public(user=request.user, project=project)
version_data = SortedDict()
for version in versions:
data = version.get_downloads()
# Don't show ones that have no downloads.
if data:
version_data[version.slug] = data
# in case the MEDIA_URL is a protocol relative URL we just assume
# we want http as the protcol, so that Dash is able to handle the URL
if settings.MEDIA_URL.startswith('//'):
media_url_prefix = u'http:'
# but in case we're in debug mode and the MEDIA_URL is just a path
# we prefix it with a hardcoded host name and protocol
elif settings.MEDIA_URL.startswith('/') and settings.DEBUG:
media_url_prefix = u'http://%s' % request.get_host()
else:
media_url_prefix = ''
return render_to_response(
'projects/project_downloads.html',
{
'project': project,
'version_data': version_data,
'versions': versions,
'media_url_prefix': media_url_prefix,
},
context_instance=RequestContext(request),
)
def project_download_media(request, project_slug, type, version_slug):
"""
Download a specific piece of media.
Perform an auth check if serving in private mode.
"""
# Do private project auth checks
queryset = Project.objects.protected(request.user).filter(slug=project_slug)
if not queryset.exists():
raise Http404
DEFAULT_PRIVACY_LEVEL = getattr(settings, 'DEFAULT_PRIVACY_LEVEL', 'public')
if DEFAULT_PRIVACY_LEVEL == 'public' or settings.DEBUG:
path = os.path.join(settings.MEDIA_URL, type, project_slug, version_slug,
'%s.%s' % (project_slug, type.replace('htmlzip', 'zip')))
return HttpResponseRedirect(path)
else:
# Get relative media path
path = queryset[0].get_production_media_path(type=type, version_slug=version_slug).replace(
settings.PRODUCTION_ROOT, '/prod_artifacts'
)
mimetype, encoding = mimetypes.guess_type(path)
mimetype = mimetype or 'application/octet-stream'
response = HttpResponse(mimetype=mimetype)
if encoding:
response["Content-Encoding"] = encoding
response['X-Accel-Redirect'] = path
# Include version in filename; this fixes a long-standing bug
filename = "%s-%s.%s" % (project_slug, version_slug, path.split('.')[-1])
response['Content-Disposition'] = 'filename=%s' % filename
return response
def search_autocomplete(request):
"""
return a json list of project names
"""
if 'term' in request.GET:
term = request.GET['term']
else:
raise Http404
queryset = (Project.objects.public(request.user).filter(name__icontains=term)[:20])
ret_list = []
for project in queryset:
ret_list.append({
'label': project.name,
'value': project.slug,
})
json_response = json.dumps(ret_list)
return HttpResponse(json_response, mimetype='text/javascript')
def version_autocomplete(request, project_slug):
"""
return a json list of version names
"""
queryset = Project.objects.public(request.user)
get_object_or_404(queryset, slug=project_slug)
versions = Version.objects.public(request.user)
if 'term' in request.GET:
term = request.GET['term']
else:
raise Http404
version_queryset = versions.filter(slug__icontains=term)[:20]
names = version_queryset.values_list('slug', flat=True)
json_response = json.dumps(list(names))
return HttpResponse(json_response, mimetype='text/javascript')
def version_filter_autocomplete(request, project_slug):
queryset = Project.objects.public(request.user)
project = get_object_or_404(queryset, slug=project_slug)
versions = Version.objects.public(request.user)
filter = VersionSlugFilter(request.GET, queryset=versions)
format = request.GET.get('format', 'json')
if format == 'json':
names = filter.qs.values_list('slug', flat=True)
json_response = json.dumps(list(names))
return HttpResponse(json_response, mimetype='text/javascript')
elif format == 'html':
return render_to_response(
'core/version_list.html',
{
'project': project,
'versions': versions,
'filter': filter,
},
context_instance=RequestContext(request),
)
else:
raise HttpResponse(status=400)
def file_autocomplete(request, project_slug):
"""
return a json list of version names
"""
if 'term' in request.GET:
term = request.GET['term']
else:
raise Http404
queryset = ImportedFile.objects.filter(project__slug=project_slug, path__icontains=term)[:20]
ret_list = []
for file in queryset:
ret_list.append({
'label': file.path,
'value': file.path,
})
json_response = json.dumps(ret_list)
return HttpResponse(json_response, mimetype='text/javascript')
def elastic_project_search(request, project_slug):
"""
Use elastic search to search in a project.
"""
queryset = Project.objects.protected(request.user)
project = get_object_or_404(queryset, slug=project_slug)
version_slug = request.GET.get('version', 'latest')
query = request.GET.get('q', None)
if query:
user = ''
if request.user.is_authenticated():
user = request.user
log.info(LOG_TEMPLATE.format(
user=user,
project=project or '',
type='inproject',
version=version_slug or '',
language='',
msg=query or '',
))
if query:
kwargs = {}
body = {
"query": {
"bool": {
"should": [
{"match": {"title": {"query": query, "boost": 10}}},
{"match": {"headers": {"query": query, "boost": 5}}},
{"match": {"content": {"query": query}}},
]
}
},
"highlight": {
"fields": {
"title": {},
"headers": {},
"content": {},
}
},
"fields": ["title", "project", "version", "path"],
"filter": {
"and": [
{"term": {"project": project_slug}},
{"term": {"version": version_slug}},
]
},
"size": 50 # TODO: Support pagination.
}
# Add routing to optimize search by hitting the right shard.
kwargs['routing'] = project_slug
results = PageIndex().search(body, **kwargs)
else:
results = {}
if results:
# pre and post 1.0 compat
for num, hit in enumerate(results['hits']['hits']):
for key, val in hit['fields'].items():
if isinstance(val, list):
results['hits']['hits'][num]['fields'][key] = val[0]
return render_to_response(
'search/elastic_project_search.html',
{
'project': project,
'query': query,
'results': results,
},
context_instance=RequestContext(request),
)
def project_versions(request, project_slug):
"""
Shows the available versions and lets the user choose which ones he would
like to have built.
"""
project = get_object_or_404(Project.objects.protected(request.user),
slug=project_slug)
versions = Version.objects.public(user=request.user, project=project, only_active=False)
active_versions = versions.filter(active=True)
inactive_versions = versions.filter(active=False)
inactive_filter = VersionSlugFilter(request.GET, queryset=inactive_versions)
active_filter = VersionSlugFilter(request.GET, queryset=active_versions)
return render_to_response(
'projects/project_version_list.html',
{
'inactive_filter': inactive_filter,
'active_filter': active_filter,
'project': project,
},
context_instance=RequestContext(request)
)
|
|
"""Contains a class for clang binary based completion.
Attributes:
log (logging.Logger): logger for this module
"""
import re
import sublime
import time
import logging
from os import path
from ..utils.tools import Tools
from ..utils.file import File
from ..utils.subl.row_col import ZeroIndexedRowCol
from ..utils.subl.row_col import OneIndexedRowCol
from .base_complete import BaseCompleter
from .compiler_variant import ClangCompilerVariant
from .compiler_variant import ClangClCompilerVariant
log = logging.getLogger("ECC")
DUMMY_INFO_MSG = """
EasyClangComplete:
"use_libclang" is false
"show_type_info" is true.
Unfortunately, there is no way to show type info
if you are not using libclang.
Please use libclang or set "show_type_info" to false.
If you *are* using libclang and still see this, open an issue.
"""
class Completer(BaseCompleter):
"""Encapsulate completions based on the output from clang_binary.
Attributes:
clang_binary (str): e.g. "clang++" or "clang++-3.6"
flags_dict (dict): compilation flags lists for each view
std_flag (TYPE): std flag, e.g. "std=c++11"
completions (list): current completions
compl_regex (regex): regex to parse raw completion for name and content
compl_content_regex (regex): regex to parse the content of the
completion opts_regex (regex): regex to detect optional parameters
triggers
group_params (str): string for a group to capture function parameters
group_types (str): string for a group to capture type names
group_opts (str): string for a group to capture optional parameters
PARAM_CHARS (str): chars allowed to be part of function or type
PARAM_TAG (str): function params tag for convenience
TYPE_TAG (str): type name tag for convenience
"""
name = "bin"
clang_binary = None
PARAM_TAG = "param"
TYPE_TAG = "type"
PARAM_CHARS = r"\w\s\*\&\<\>:,\(\)\$\{\}!_\."
TYPE_CHARS = r"\w\s\*\&\<\>:,\(\)\$\{\}\[\]!"
group_params = "(?P<{param_tag}>[{param_chars}]+)".format(
param_chars=PARAM_CHARS,
param_tag=PARAM_TAG)
group_types = "(?P<{type_tag}>[{type_chars}]+)".format(
type_tag=TYPE_TAG,
type_chars=TYPE_CHARS)
compl_str_mask = "{complete_flag}={file}:{row}:{col}"
compl_regex = re.compile(r"COMPLETION:\s(?P<name>.*)\s:\s(?P<content>.*)")
compl_content_regex = re.compile(
r"\<#{group_params}#\>|\[#{group_types}#\]".format(
group_params=group_params, group_types=group_types))
opts_regex = re.compile("{#|#}")
def __init__(self, settings, error_vis):
"""Initialize the Completer.
Args:
settings (SettingsStorage): object that stores all settings
error_vis (ErrorVis): an object of error visualizer
"""
# init common completer interface
super().__init__(settings, error_vis)
# Create compiler options of specific variant of the compiler.
filename = path.splitext(path.basename(self.clang_binary))[0]
if filename.startswith('clang-cl'):
self.compiler_variant = ClangClCompilerVariant()
else:
self.compiler_variant = ClangCompilerVariant()
def complete(self, completion_request):
"""Create a list of autocompletions. Called asynchronously.
It builds up a clang command that is then executed
as a subprocess. The output is parsed for completions.
"""
log.debug("completing with cmd command")
view = completion_request.get_view()
start = time.time()
output_text = self.run_clang_command(
view, "complete", completion_request.get_trigger_position())
raw_complete = output_text.splitlines()
end = time.time()
log.debug("code complete done in %s seconds", end - start)
completions = Completer._parse_completions(raw_complete)
log.debug('completions: %s' % completions)
return (completion_request, completions)
def info(self, tooltip_request, settings):
"""Provide information about object in given location.
Using the current translation unit it queries libclang for available
information about cursor.
Args:
tooltip_request (ActionRequest): A request for action
from the plugin.
settings: All plugin settings.
Returns:
(ActionRequest, str): completion request along with the
info details read from the translation unit.
"""
# This is a dummy implementation that just shows an error to the user.
sublime.error_message(DUMMY_INFO_MSG)
def update(self, view, settings):
"""Update build for current view.
Args:
view (sublime.View): this view
show_errors (TYPE): do we need to show errors? If not this is a
dummy function as we gain nothing from building it with binary.
"""
if not settings.show_errors:
# in this class there is no need to rebuild the file. It brings no
# benefits. We only want to do it if we need to show errors.
return False
start = time.time()
output_text = self.run_clang_command(view, "update")
end = time.time()
log.debug("rebuilding done in %s seconds", end - start)
self.save_errors(output_text)
self.show_errors(view)
def get_declaration_location(self, view, row_col):
"""Get location of declaration from given location in file."""
sublime.error_message("Not supported for this backend.")
def run_clang_command(self, view, task_type, location=0):
"""Construct and run clang command based on task.
Args:
view (sublime.View): current view
task_type (str): one of: {"complete", "update"}
location (int, optional): cursor location
Returns:
str: Output from command
"""
file_body = view.substr(sublime.Region(0, view.size()))
tempdir = File.get_temp_dir(Tools.get_unique_str(view.file_name()))
temp_file_name = path.join(tempdir, path.basename(view.file_name()))
with open(temp_file_name, "w", encoding='utf-8') as tmp_file:
tmp_file.write(file_body)
flags = self.clang_flags
if task_type == "update":
# we construct command for update task. No alternations needed, so
# just pass here.
pass
elif task_type == "complete":
# we construct command for complete task
file_row_col = OneIndexedRowCol.from_zero_indexed(
ZeroIndexedRowCol.from_1d_location(view, location))
complete_at_str = Completer.compl_str_mask.format(
complete_flag="-code-completion-at",
file=temp_file_name,
row=file_row_col.row,
col=file_row_col.col)
flags += ["-Xclang"] + [complete_at_str]
else:
log.critical(" unknown type of cmd command wanted.")
return None
# construct cmd from building parts
complete_cmd = [self.clang_binary] + flags + [temp_file_name]
# now run this command
log.debug("clang command: \n%s",
" ".join(["'" + s + "'" for s in complete_cmd]))
return Tools.run_command(complete_cmd)
@staticmethod
def _parse_completions(complete_results):
"""Create snippet-like structures from a list of completions.
Args:
complete_results (list): raw completions list
Returns:
list: updated completions
"""
class Parser:
"""Help class to parse completions with regex.
Attributes:
place_holders (int): number of place holders in use
"""
def __init__(self):
self.place_holders = 0
def tokenize_params(self, match):
"""Create tockens from a match.
Used as part or re.sub function.
Args:
match (re.match): current match
Returns:
str: current match, wrapped in snippet
"""
dict_match = match.groupdict()
if dict_match[Completer.PARAM_TAG]:
self.place_holders += 1
return "${{{count}:{text}}}".format(
count=self.place_holders,
text=dict_match[Completer.PARAM_TAG])
return ''
@staticmethod
def make_pretty(match):
"""Process raw match and remove ugly placeholders.
Needed to have a human readable text for each completion.
Args:
match (re.match): current completion
Returns:
str: match stripped from unneeded placeholders
"""
dict_match = match.groupdict()
if dict_match[Completer.PARAM_TAG]:
return dict_match[Completer.PARAM_TAG]
if dict_match[Completer.TYPE_TAG]:
return dict_match[Completer.TYPE_TAG] + ' '
return ''
completions = []
for completion in complete_results:
pos_search = Completer.compl_regex.search(completion)
if not pos_search:
log.debug(
" completion '%s' did not match pattern '%s'",
completion, Completer.compl_regex.pattern)
continue
comp_dict = pos_search.groupdict()
# log.debug("completions parsed: %s", comp_dict)
trigger = comp_dict['name']
parser = Parser()
# remove optional parameters triggers
comp_dict['content'] = re.sub(
Completer.opts_regex, '', comp_dict['content'])
# tokenize parameters
contents = re.sub(Completer.compl_content_regex,
parser.tokenize_params,
comp_dict['content'])
# make the hint look pretty
hint = re.sub(Completer.compl_content_regex,
Parser.make_pretty,
comp_dict['content'])
completions.append([trigger + "\t" + hint, contents])
return completions
|
|
import hashlib
import json
import os
import pickle
import shutil
from datetime import date, datetime
from django.conf import settings
from django.utils import timezone
from django_test_tools.exceptions import DjangoTestToolsException
BLOCKSIZE = 65536
def create_dated(filename):
"""
Based on the filename will create a full path filename including the date and time in '%Y%m%d_%H%M' format.
The path to the filename will be set in the TEST_OUTPUT_PATH settings variable.
If the TEST_OUTPUT_PATH folder doesn't exist the function will create it.
:param filename: base filename. my_excel_data.xlsx for example
:return: string, full path to file with date and time in the TEST_OUTPUT_PATH folder
"""
if getattr(settings, 'TEST_OUTPUT_PATH', None) is None:
msg = 'You need a the variable TEST_OUTPUT_PATH in settings. It should point to a folder' \
'for temporary data to be written and reviewed.'
raise ValueError(msg)
if not os.path.exists(settings.TEST_OUTPUT_PATH):
os.makedirs(settings.TEST_OUTPUT_PATH)
return add_date(os.path.join(settings.TEST_OUTPUT_PATH, filename))
def hash_file(filename, algorithm='sha1', block_size=BLOCKSIZE):
"""
Creates a unique hash for a file.
:param filename: String with the full path to the file
:param algorithm: String Algorithm to create the hash
:param block_size: int for the size of the block while reading the file
:return: string the hash for the file
"""
try:
hasher = getattr(hashlib, algorithm)()
except AttributeError:
raise ValueError('{} is not a valid hashing algorithm'.format(algorithm))
with open(filename, 'rb') as afile:
buf = afile.read(block_size)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(block_size)
return hasher.hexdigest()
def parametrized(dec):
"""
Need to study this code.
Got it from http://stackoverflow.com/questions/5929107/python-decorators-with-parameters
:param dec:
:return:
"""
def layer(*args, **kwargs):
def repl(f):
return dec(f, *args, **kwargs)
return repl
return layer
@parametrized
def temporary_file(func, extension, delete_on_exit=True):
"""
This method decorator creates a filename with date using the provided extension and delete the file after the method
has been executed.
The settings.TEST_OUTPUT_PATH must be configured in your settings file.
.. code-block:: python
@temporary_file('json')
def test_temporary_file_decorator(self):
filename = self.test_temporary_file_decorator.filename
... write to the file ...
:param func: function to decorate
:param extension: extension of the filename without the dot
:param delete_on_exit: If True the filename will be deleted.
:return: the function
"""
filename = create_dated('{}.{}'.format(func.__name__, extension))
def function_t_return(*args):
results = func(*args)
if os.path.exists(filename) and delete_on_exit:
os.remove(filename)
return results
function_t_return.filename = filename
return function_t_return
@parametrized
def temporary_files(func, extension, delete_on_exit=True, count=2):
"""
This method decorator creates a filename with date using the provided extension and delete the file after the method
has been executed.
The settings.TEST_OUTPUT_PATH must be configured in your settings file.
.. code-block:: python
@temporary_files('json')
def test_temporary_file_decorator(self):
filename = self.test_temporary_file_decorator.filenames[0]
... write to the file ...
:param func: function to decorate
:param extension: extension of the filename without the dot
:param delete_on_exit: If True the filename will be deleted.
:return: the function
"""
filenames = list()
for i in range(count):
filename = create_dated('{}-{}.{}'.format(func.__name__, i, extension))
filenames.append(filename)
def function_t_return(*args):
results = func(*args)
for filename in filenames:
if os.path.exists(filename) and delete_on_exit:
os.remove(filename)
return results
function_t_return.filenames = filenames
return function_t_return
def shorten_path(path, level=2, current_level=1):
"""
This method shortens the path by eliminating the folders on top.
.. code-block:: python
filename = '/user/documents/personal/file.txt'
shortened = shorten_path(filename)
self.assertEqual(shortened, 'personal/file.txt')
:param path: string full path for the filename
:param level: int, number of levels to show.
:param current_level: int, recursing level.
:return: string shortened path
"""
if level == 0:
raise ValueError('The minimum level accepted is one')
path, tail = os.path.split(path)
if level == current_level:
return tail
else:
if path != os.path.sep:
return shorten_path(path, level, current_level + 1) + os.path.sep + tail
return tail
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code
taken from: https://stackoverflow.com/questions/11875770/how-to-overcome-datetime-datetime-not-json-serializable
"""
if isinstance(obj, (datetime, date)):
serial = obj.isoformat()
return serial
raise TypeError("Type %s not serializable" % type(obj))
def serialize_data(data, output_file=None, format='json', **kwargs):
"""
Quick function to serialize a data to file. The data keys will be saved in an alphabetical order
for consistency purposes.
If no output_file is supplied the function will created a dated file in the settings.TEST_OUTPUT_PATH folder.
if the output_file is a folder the dated file will be created on the supplied folder with the serialized date.
if the output_file is a file the data will be serialized to thar file
:param data: Dictionary or list to serialize
:param format: Format to serialize to. Currently json is the only one supported
:param output_file: File to output the data to
:param kwargs:
"""
assert format in ['json', 'pickle'], 'Unsupported format {}'.format(format)
base_filename = kwargs.get('base_filename', 'serialized_data')
if output_file is None:
filename = create_dated('{}.{}'.format(base_filename, format))
elif os.path.isdir(output_file):
filename = os.path.join(output_file, '{}.{}'.format(base_filename, format))
else:
filename = output_file
if format == 'json':
with open(filename, 'w', encoding=kwargs.get('encoding', 'utf-8'), newline='\n') as fp:
json.dump(data, fp, indent=kwargs.get('indent', 4),
default=json_serial, sort_keys=True)
elif format == 'pickle':
with open(filename, 'wb') as output:
pickle.dump(data, output, pickle.HIGHEST_PROTOCOL)
return filename
def add_date(filename, **kwargs):
"""
Adds to a filename the current date and time in '%Y%m%d_%H%M' format.
For a filename /my/path/myexcel.xlsx the function would return /my/path/myexcel_20170101_1305.xlsx.
If the file already exists the function will add seconds to the date to attempt to get a unique name.
The function will detect if another file exists with the same name if it exist it will append seconds to the
filename. For example if file /my/path/myexcel_20170101_1305.xlsx alread exist thte function will return
/my/path/myexcel_20170101_130521.xlsx.
:param filename: string with fullpath to file or just the filename
:param kwargs: dictionary. date_position: suffix or preffix, extension: string to replace extension
:return: string with full path string including the date and time
"""
current_datetime = timezone.localtime(timezone.now()).strftime('%Y%m%d_%H%M%S')
new_filename_data = dict()
suffix_template = '{path}{separator}{filename_with_out_extension}_{datetime}.{extension}'
prefix_template = '{path}{separator}{datetime}_{filename_with_out_extension}.{extension}'
if '/' in filename and '\\' in filename:
raise ValueError('Filename %s contains both / and \\ separators' % filename)
if '\\' in filename:
path_parts = filename.split('\\')
file = path_parts[-1]
path = '\\'.join(path_parts[:-1])
separator = '\\'
elif '/' in filename:
path_parts = filename.split('/')
file = path_parts[-1]
path = '/'.join(path_parts[:-1])
separator = '/'
else:
file = filename
path = ''
separator = ''
new_filename_data['path'] = path
parts = file.split('.')
if kwargs.get('extension', None) is not None:
new_filename_data['extension'] = kwargs['extension']
else:
if len(parts) > 1:
new_filename_data['extension'] = parts[-1]
else:
new_filename_data['extension'] = ''
new_filename_data['separator'] = separator
if new_filename_data['extension'] == '':
new_filename_data['filename_with_out_extension'] = parts[0]
else:
new_filename_data['filename_with_out_extension'] = '.'.join(parts[:-1])
new_filename_data['datetime'] = current_datetime[:-2] # Seconds are stripped
date_position = kwargs.get('date_position', 'suffix')
if date_position == 'suffix':
new_filename = suffix_template.format(**new_filename_data)
if os.path.exists(new_filename):
new_filename_data['datetime'] = current_datetime
new_filename = suffix_template.format(**new_filename_data)
if new_filename_data['extension'] == '':
new_filename = new_filename[:-1]
else:
new_filename = prefix_template.format(**new_filename_data)
if os.path.exists(new_filename):
new_filename_data['datetime'] = current_datetime
new_filename = prefix_template.format(**new_filename_data)
if new_filename_data['extension'] == '':
new_filename = new_filename[:-1]
return new_filename
class TemporaryFolder:
def __init__(self, base_name, delete_on_exit=True):
self.new_path = create_dated(base_name)
self.delete_on_exit = delete_on_exit
def __enter__(self):
os.mkdir(self.new_path)
self.saved_path = os.getcwd()
os.chdir(self.new_path)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
os.chdir(self.saved_path)
if self.delete_on_exit:
shutil.rmtree(self.new_path)
def write(self, filename, content):
with open(filename, 'w', encoding='utf-8') as file:
if isinstance(content, str):
file.writelines(content)
elif isinstance(content, list):
for line in content:
file.write(line)
file.write('\n')
else:
file.writelines(str(content))
return os.path.join(self.new_path, filename)
def compare_file_content(*args, **kwargs):
errors = list()
file1 = args[0]
file2 = args[1]
excluded_lines = kwargs.get('excluded_lines', [])
encoding = kwargs.get('encoding', 'utf-8')
raise_exception = kwargs.get('raise_exception', True)
eol = kwargs.get('eol', '\n')
def get_lines(filename):
with open(filename, 'r', encoding=encoding, newline=eol) as file:
lines = file.readlines()
return lines
lines1 = get_lines(file1)
lines2 = get_lines(file2)
for i in range(len(lines1)):
if i not in excluded_lines:
if lines1[i] != lines2[i]:
msg = 'On line {} expected "{}" got "{}"'.format(i,
lines1[i].replace(eol, ''),
lines2[i].replace(eol, ''))
errors.append(msg)
if raise_exception:
raise DjangoTestToolsException(msg)
return errors
|
|
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from dragonflow._i18n import _LI, _LE
from dragonflow.controller.common import constants as const
from dragonflow.controller.df_base_app import DFlowApp
from neutron.agent.common import config
from oslo_log import log
from ryu.ofproto import ether
config.setup_logging()
LOG = log.getLogger(__name__)
SG_CT_STATE_MASK = const.CT_STATE_NEW | const.CT_STATE_EST | \
const.CT_STATE_REL | const.CT_STATE_INV | const.CT_STATE_TRK
COOKIE_FULLMASK = 0xffffffffffffffff
SG_PRIORITY_OFFSET = 2
class SGApp(DFlowApp):
def __init__(self, *args, **kwargs):
super(SGApp, self).__init__(*args, **kwargs)
self.secgroup_mappings = {}
self.secgroup_rule_mappings = {}
# When the value of a conj_id match is zero, it can match every
# packets with no conj_id, which is not we expected. So We simply skip
# the value of zero, and allocate conj_ids begin with one.
self.next_secgroup_id = 1
self.next_secgroup_rule_id = 0
self.secgroup_refcount = {}
self.remote_secgroup_ref = {}
self.secgroup_associate_local_ports = {}
self.secgroup_aggregate_addresses = {}
@staticmethod
def _split_range(range_start, range_end, full_mask):
bit_flag = 1
last_temp_start = range_start
last_temp_end = last_temp_start
result_list = []
while True:
if ((last_temp_start & bit_flag) == 0) and \
((last_temp_end | bit_flag) <= range_end):
last_temp_end |= bit_flag
bit_flag <<= 1
else:
mask = full_mask - (bit_flag - 1)
result_list.append({"prefix": last_temp_start, "mask": mask})
if last_temp_end >= range_end:
break
bit_flag = 1
last_temp_start = last_temp_end + 1
last_temp_end = last_temp_start
return result_list
@staticmethod
def _try_merge_cidr(current_prefix, current_mask, last_item, full_mask):
prefix_mask = full_mask & (current_mask << 1)
bit_check_flag = prefix_mask ^ current_mask
if (last_item["mask"] == current_mask) and \
((last_item["prefix"] & prefix_mask) ==
(current_prefix & prefix_mask)) and \
((last_item["prefix"] & bit_check_flag) !=
(current_prefix & bit_check_flag)):
return prefix_mask
return None
@staticmethod
def _remove_one_address(cidr_array, address):
full_mask = 0xffffffff
added_cidr = []
removed_cidr = []
new_cidr_array = cidr_array
for index in range(len(cidr_array)):
cidr_item = cidr_array[index]
temp_min = cidr_item["prefix"]
temp_max = temp_min + (full_mask - cidr_item["mask"])
if temp_min <= address <= temp_max:
removed_cidr.append(cidr_item)
if temp_min < address:
added_cidr.extend(
SGApp._split_range(temp_min, address - 1, full_mask)
)
if temp_max > address:
added_cidr.extend(
SGApp._split_range(address + 1, temp_max, full_mask)
)
new_cidr_array = cidr_array[:index]
new_cidr_array.extend(added_cidr)
new_cidr_array.extend(cidr_array[(index + 1):])
break
return new_cidr_array, added_cidr, removed_cidr
@staticmethod
def _add_one_address(cidr_array, address):
full_mask = 0xffffffff
position = None
for index in range(len(cidr_array)):
cidr_item = cidr_array[index]
temp_min = cidr_item["prefix"]
temp_max = temp_min + (full_mask - cidr_item["mask"])
if temp_max >= address:
if temp_min <= address:
return cidr_array, [], []
position = index
break
if position is None:
left_array = list(cidr_array)
right_array = []
else:
left_array = cidr_array[:position]
right_array = cidr_array[position:]
added_cidr = []
removed_cidr = []
new_cidr_array = []
current_prefix = address
current_mask = full_mask
continue_flag = True
while continue_flag:
continue_flag = False
if len(left_array) != 0:
left_item = left_array.pop(-1)
new_mask = SGApp._try_merge_cidr(current_prefix, current_mask,
left_item, full_mask)
if new_mask:
current_prefix &= new_mask
current_mask = new_mask
removed_cidr.append(left_item)
continue_flag = True
continue
else:
left_array.append(left_item)
if len(right_array) != 0:
right_item = right_array.pop(0)
new_mask = SGApp._try_merge_cidr(current_prefix, current_mask,
right_item, full_mask)
if new_mask:
current_prefix &= new_mask
current_mask = new_mask
removed_cidr.append(right_item)
continue_flag = True
continue
else:
right_array.insert(0, right_item)
added_cidr.append({"prefix": current_prefix, "mask": current_mask})
new_cidr_array.extend(left_array)
new_cidr_array.extend(added_cidr)
new_cidr_array.extend(right_array)
return new_cidr_array, added_cidr, removed_cidr
@staticmethod
def _get_cidr_match(item):
cidr = ""
for loop in range(4):
if loop != 0:
cidr += "."
cidr += str(0xff & (item["prefix"] >> (24 - (loop * 8))))
mask = item["mask"]
mask_length = 32
for loop in range(32):
if (mask & 1) == 0:
mask_length -= 1
mask >>= 1
else:
break
cidr += "/" + str(mask_length)
return cidr
@staticmethod
def _get_network_and_mask(cidr):
result = netaddr.IPNetwork(cidr)
return result.network, result.netmask
@staticmethod
def _get_port_range_match(port_item):
if port_item["mask"] != 0xffff:
return port_item["prefix"], port_item["mask"]
return port_item["prefix"]
@staticmethod
def _get_port_match_name(protocol):
if protocol == 1:
port_match_name = 'icmpv4_type'
elif protocol == 6:
port_match_name = 'tcp_dst'
elif protocol == 17:
port_match_name = 'udp_dst'
else:
port_match_name = None
return port_match_name
@staticmethod
def _get_integer_value_from_address(address):
split_list = address.split('.')
value = 0
for item in split_list:
value = (value << 8) + int(item)
return value
@staticmethod
def _get_rule_flows_match_except_net_addresses(secgroup_rule):
protocol = secgroup_rule.protocol
port_range_max = secgroup_rule.port_range_max
port_range_min = secgroup_rule.port_range_min
ethertype = secgroup_rule.ethertype
match_list = []
dl_type_match = {}
protocol_match = {}
port_match_list = [{}]
if ethertype == 'IPv4':
dl_type_match["eth_type"] = ether.ETH_TYPE_IP
if protocol is not None:
if protocol == 'icmp':
protocol = 1
elif protocol == 'tcp':
protocol = 6
elif protocol == 'udp':
protocol = 17
else:
protocol = int(protocol)
protocol_match["ip_proto"] = protocol
port_match_name = SGApp._get_port_match_name(protocol)
if (port_range_min is not None) and \
(port_match_name is not None):
port_match_list = []
if protocol == 1:
icmpv4_match = {port_match_name: int(port_range_min)}
if port_range_max is not None:
icmpv4_match["icmpv4_code"] = int(port_range_max)
port_match_list.append(icmpv4_match)
elif (int(port_range_min) == 1 and
int(port_range_max) == 65535):
port_match_list.append(protocol_match)
else:
split_port_range = SGApp._split_range(
int(port_range_min),
int(port_range_max),
0xffff
)
for port_item in split_port_range:
port_match_list.append(
{port_match_name:
SGApp._get_port_range_match(port_item)}
)
elif ethertype == 'IPv6':
# not support yet
dl_type_match["eth_type"] = ether.ETH_TYPE_IPV6
else:
LOG.error(_LE("wrong ethernet type"))
for port_match in port_match_list:
parameters_merge = dl_type_match.copy()
parameters_merge.update(protocol_match)
parameters_merge.update(port_match)
match_list.append(parameters_merge)
return match_list
@staticmethod
def _get_rule_cookie(rule_id):
rule_cookie = rule_id << const.SECURITY_GROUP_RULE_COOKIE_SHIFT_LEN
return rule_cookie & const.SECURITY_GROUP_RULE_COOKIE_MASK
def _install_security_group_permit_flow_by_direction(self,
security_group_id,
direction):
if direction == 'ingress':
table_id = const.INGRESS_SECURITY_GROUP_TABLE
recirc_table = const.INGRESS_DISPATCH_TABLE
else:
table_id = const.EGRESS_SECURITY_GROUP_TABLE
recirc_table = const.SERVICES_CLASSIFICATION_TABLE
parser = self.get_datapath().ofproto_parser
ofproto = self.get_datapath().ofproto
conj_id, priority = \
self._get_secgroup_conj_id_and_priority(security_group_id)
if conj_id is None:
LOG.error(_LE("the conj_id of the security group %s is none"),
security_group_id)
return
match = parser.OFPMatch(eth_type=ether.ETH_TYPE_IP, conj_id=conj_id)
actions = [parser.NXActionCT(actions=[],
alg=0,
flags=const.CT_FLAG_COMMIT,
recirc_table=recirc_table,
zone_ofs_nbits=15,
zone_src=const.CT_ZONE_REG)]
action_inst = self.get_datapath().ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)
inst = [action_inst]
self.mod_flow(
self.get_datapath(),
inst=inst,
table_id=table_id,
priority=priority,
match=match)
def _install_security_group_flows(self, security_group_id):
self._install_security_group_permit_flow_by_direction(
security_group_id, 'ingress')
self._install_security_group_permit_flow_by_direction(
security_group_id, 'egress')
secgroup = self.db_store.get_security_group(security_group_id)
if secgroup is not None:
for rule in secgroup.rules:
self.add_security_group_rule(secgroup, rule)
def _uninstall_security_group_permit_flow_by_direction(self,
security_group_id,
direction):
if direction == 'ingress':
table_id = const.INGRESS_SECURITY_GROUP_TABLE
else:
table_id = const.EGRESS_SECURITY_GROUP_TABLE
parser = self.get_datapath().ofproto_parser
ofproto = self.get_datapath().ofproto
conj_id, priority = \
self._get_secgroup_conj_id_and_priority(security_group_id)
if conj_id is None:
LOG.error(_LE("the conj_id of the security group %s is none"),
security_group_id)
return
match = parser.OFPMatch(eth_type=ether.ETH_TYPE_IP, conj_id=conj_id)
self.mod_flow(
datapath=self.get_datapath(),
table_id=table_id,
match=match,
command=ofproto.OFPFC_DELETE,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY)
def _uninstall_security_group_flow(self, security_group_id):
self._uninstall_security_group_permit_flow_by_direction(
security_group_id, 'ingress')
self._uninstall_security_group_permit_flow_by_direction(
security_group_id, 'egress')
secgroup = self.db_store.get_security_group(security_group_id)
if secgroup is not None:
for rule in secgroup.rules:
self.remove_security_group_rule(secgroup, rule)
def _install_associating_flow_by_direction(self, security_group_id,
lport, direction):
parser = self.get_datapath().ofproto_parser
ofproto = self.get_datapath().ofproto
if direction == 'ingress':
table_id = const.INGRESS_SECURITY_GROUP_TABLE
tunnel_key = lport.get_tunnel_key()
lport_classify_match = {"reg7": tunnel_key}
else:
table_id = const.EGRESS_SECURITY_GROUP_TABLE
ofport = lport.get_external_value('ofport')
lport_classify_match = {"in_port": ofport}
conj_id, priority = \
self._get_secgroup_conj_id_and_priority(security_group_id)
if conj_id is None:
LOG.error(_LE("the conj_id of the security group (%s) is none"),
security_group_id)
return
match = parser.OFPMatch(ct_state=(const.CT_STATE_TRK |
const.CT_STATE_NEW,
SG_CT_STATE_MASK),
**lport_classify_match)
actions = [parser.NXActionConjunction(clause=0,
n_clauses=2,
id_=conj_id)]
action_inst = self.get_datapath(). \
ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)
inst = [action_inst]
self.mod_flow(
self.get_datapath(),
inst=inst,
table_id=table_id,
priority=priority,
match=match)
def _uninstall_associating_flow_by_direction(self, security_group_id,
lport, direction):
parser = self.get_datapath().ofproto_parser
ofproto = self.get_datapath().ofproto
if direction == 'ingress':
table_id = const.INGRESS_SECURITY_GROUP_TABLE
tunnel_key = lport.get_tunnel_key()
lport_classify_match = {"reg7": tunnel_key}
else:
table_id = const.EGRESS_SECURITY_GROUP_TABLE
ofport = lport.get_external_value('ofport')
lport_classify_match = {"in_port": ofport}
conj_id, priority = \
self._get_secgroup_conj_id_and_priority(security_group_id)
if conj_id is None:
LOG.error(_LE("the conj_id of the security group %s is none"),
security_group_id)
return
match = parser.OFPMatch(ct_state=(const.CT_STATE_TRK |
const.CT_STATE_NEW,
SG_CT_STATE_MASK),
**lport_classify_match)
self.mod_flow(
datapath=self.get_datapath(),
table_id=table_id,
priority=priority,
match=match,
command=ofproto.OFPFC_DELETE_STRICT,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY)
def _install_associating_flows(self, security_group_id, lport):
self._install_associating_flow_by_direction(security_group_id,
lport,
'ingress')
self._install_associating_flow_by_direction(security_group_id,
lport,
'egress')
def _uninstall_associating_flows(self, security_group_id, lport):
self._uninstall_associating_flow_by_direction(security_group_id,
lport,
'ingress')
self._uninstall_associating_flow_by_direction(security_group_id,
lport,
'egress')
def _install_connection_track_flow_by_direction(self, lport, direction):
parser = self.get_datapath().ofproto_parser
ofproto = self.get_datapath().ofproto
if direction == 'ingress':
pre_table_id = const.INGRESS_CONNTRACK_TABLE
table_id = const.INGRESS_SECURITY_GROUP_TABLE
tunnel_key = lport.get_tunnel_key()
lport_classify_match = {"reg7": tunnel_key}
else:
pre_table_id = const.EGRESS_CONNTRACK_TABLE
table_id = const.EGRESS_SECURITY_GROUP_TABLE
ofport = lport.get_external_value('ofport')
lport_classify_match = {"in_port": ofport}
match = parser.OFPMatch(eth_type=ether.ETH_TYPE_IP,
**lport_classify_match)
actions = [parser.NXActionCT(actions=[],
alg=0,
flags=0,
recirc_table=table_id,
zone_ofs_nbits=15,
zone_src=const.METADATA_REG)]
action_inst = self.get_datapath().ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)
inst = [action_inst]
self.mod_flow(
self.get_datapath(),
inst=inst,
table_id=pre_table_id,
priority=const.PRIORITY_MEDIUM,
match=match)
def _uninstall_connection_track_flow_by_direction(self, lport, direction):
parser = self.get_datapath().ofproto_parser
ofproto = self.get_datapath().ofproto
if direction == 'ingress':
pre_table_id = const.INGRESS_CONNTRACK_TABLE
tunnel_key = lport.get_tunnel_key()
lport_classify_match = {"reg7": tunnel_key}
else:
pre_table_id = const.EGRESS_CONNTRACK_TABLE
ofport = lport.get_external_value('ofport')
lport_classify_match = {"in_port": ofport}
match = parser.OFPMatch(eth_type=ether.ETH_TYPE_IP,
**lport_classify_match)
self.mod_flow(
datapath=self.get_datapath(),
table_id=pre_table_id,
match=match,
command=ofproto.OFPFC_DELETE,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY)
def _install_connection_track_flows(self, lport):
self._install_connection_track_flow_by_direction(lport, 'ingress')
self._install_connection_track_flow_by_direction(lport, 'egress')
def _uninstall_connection_track_flows(self, lport):
self._uninstall_connection_track_flow_by_direction(lport, 'ingress')
self._uninstall_connection_track_flow_by_direction(lport, 'egress')
def _update_security_group_rule_flows_by_addresses(self,
secgroup,
secgroup_rule,
added_cidr,
removed_cidr):
conj_id, priority = self._get_secgroup_conj_id_and_priority(secgroup)
if conj_id is None:
LOG.error(_LE("the conj_id of the security group (%s) is none"),
secgroup)
return
parser = self.get_datapath().ofproto_parser
ofproto = self.get_datapath().ofproto
rule_id = self._get_security_rule_mapping(secgroup_rule.id)
match_list = \
SGApp._get_rule_flows_match_except_net_addresses(secgroup_rule)
if secgroup_rule.ethertype == 'IPv4':
if secgroup_rule.direction == 'ingress':
table_id = const.INGRESS_SECURITY_GROUP_TABLE
ipv4_match_item = "ipv4_src"
else:
table_id = const.EGRESS_SECURITY_GROUP_TABLE
ipv4_match_item = "ipv4_dst"
elif secgroup_rule.ethertype == 'IPv6':
# not support yet
LOG.info(_LI("IPv6 rules are not supported yet"))
return
else:
LOG.error(_LE("wrong ethernet type"))
return
actions = [parser.NXActionConjunction(clause=1,
n_clauses=2,
id_=conj_id)]
action_inst = self.get_datapath(). \
ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)
inst = [action_inst]
for added_cidr_item in added_cidr:
for match_item in match_list:
parameters_merge = match_item.copy()
parameters_merge[ipv4_match_item] = \
SGApp._get_network_and_mask(
SGApp._get_cidr_match(added_cidr_item))
match = parser.OFPMatch(**parameters_merge)
self.mod_flow(
self.get_datapath(),
cookie=SGApp._get_rule_cookie(rule_id),
cookie_mask=COOKIE_FULLMASK,
inst=inst,
table_id=table_id,
priority=priority,
match=match)
for removed_cidr_item in removed_cidr:
for match_item in match_list:
parameters_merge = match_item.copy()
parameters_merge[ipv4_match_item] = \
SGApp._get_network_and_mask(
SGApp._get_cidr_match(removed_cidr_item))
match = parser.OFPMatch(**parameters_merge)
self.mod_flow(
datapath=self.get_datapath(),
table_id=table_id,
priority=priority,
match=match,
command=ofproto.OFPFC_DELETE_STRICT,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY)
def _install_security_group_rule_flows(self, secgroup, secgroup_rule):
conj_id, priority = self._get_secgroup_conj_id_and_priority(secgroup)
if conj_id is None:
LOG.error(_LE("the conj_id of the security group %s is none"),
secgroup)
return
parser = self.get_datapath().ofproto_parser
ofproto = self.get_datapath().ofproto
rule_id = self._get_security_rule_mapping(secgroup_rule.id)
remote_group_id = secgroup_rule.remote_group_id
remote_ip_prefix = secgroup_rule.remote_ip_prefix
ethertype = secgroup_rule.ethertype
if secgroup_rule.direction == 'ingress':
table_id = const.INGRESS_SECURITY_GROUP_TABLE
ipv4_match_item = "ipv4_src"
else:
table_id = const.EGRESS_SECURITY_GROUP_TABLE
ipv4_match_item = "ipv4_dst"
match_list = \
SGApp._get_rule_flows_match_except_net_addresses(secgroup_rule)
actions = [parser.NXActionConjunction(clause=1,
n_clauses=2,
id_=conj_id)]
action_inst = self.get_datapath(). \
ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)
inst = [action_inst]
if ethertype == 'IPv4':
addresses_list = [{}]
if remote_group_id is not None:
aggregate_addresses_range = \
self.secgroup_aggregate_addresses.get(remote_group_id)
addresses_list = []
if aggregate_addresses_range is not None:
for aggregate_address in aggregate_addresses_range:
addresses_list.append({
ipv4_match_item: SGApp._get_network_and_mask(
SGApp._get_cidr_match(aggregate_address)
)
})
elif remote_ip_prefix is not None:
addresses_list = [{
ipv4_match_item: SGApp._get_network_and_mask(
remote_ip_prefix
)
}]
for address_item in addresses_list:
for match_item in match_list:
parameters_merge = match_item.copy()
parameters_merge.update(address_item)
match = parser.OFPMatch(**parameters_merge)
self.mod_flow(
self.get_datapath(),
cookie=SGApp._get_rule_cookie(rule_id),
cookie_mask=COOKIE_FULLMASK,
inst=inst,
table_id=table_id,
priority=priority,
match=match)
elif ethertype == 'IPv6':
# not support yet
LOG.info(_LI("IPv6 rules are not supported yet"))
else:
LOG.error(_LE("wrong ethernet type"))
def _uninstall_security_group_rule_flows(self, secgroup_rule):
# uninstall rule flows by its cookie
ofproto = self.get_datapath().ofproto
direction = secgroup_rule.direction
if direction == 'ingress':
table_id = const.INGRESS_SECURITY_GROUP_TABLE
else:
table_id = const.EGRESS_SECURITY_GROUP_TABLE
rule_id = self._get_security_rule_mapping(secgroup_rule.id)
if rule_id is None:
LOG.error(_LE("the rule_id of the security group rule %s is none"),
rule_id)
return
self.mod_flow(
datapath=self.get_datapath(),
cookie=SGApp._get_rule_cookie(rule_id),
cookie_mask=const.SECURITY_GROUP_RULE_COOKIE_MASK,
table_id=table_id,
command=ofproto.OFPFC_DELETE,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY)
def _install_env_init_flow_by_direction(self, direction):
if direction == 'ingress':
table_id = const.INGRESS_SECURITY_GROUP_TABLE
goto_table_id = const.INGRESS_DISPATCH_TABLE
else:
table_id = const.EGRESS_SECURITY_GROUP_TABLE
goto_table_id = const.SERVICES_CLASSIFICATION_TABLE
parser = self.get_datapath().ofproto_parser
# defaults of sg-table to drop packet
drop_inst = None
self.mod_flow(
self.get_datapath(),
inst=drop_inst,
table_id=table_id,
priority=const.PRIORITY_DEFAULT)
# est state, pass
match = parser.OFPMatch(ct_state=(const.CT_STATE_TRK |
const.CT_STATE_EST,
SG_CT_STATE_MASK))
goto_inst = [parser.OFPInstructionGotoTable(goto_table_id)]
self.mod_flow(
self.get_datapath(),
inst=goto_inst,
table_id=table_id,
priority=const.PRIORITY_CT_STATE,
match=match)
# rel state, pass
match = parser.OFPMatch(ct_state=(const.CT_STATE_TRK |
const.CT_STATE_REL,
SG_CT_STATE_MASK))
self.mod_flow(
self.get_datapath(),
inst=goto_inst,
table_id=table_id,
priority=const.PRIORITY_CT_STATE,
match=match)
# inv state, drop
invalid_ct_state_flag = const.CT_STATE_TRK | const.CT_STATE_INV
match = parser.OFPMatch(ct_state=(invalid_ct_state_flag,
invalid_ct_state_flag))
self.mod_flow(
self.get_datapath(),
inst=drop_inst,
table_id=table_id,
priority=const.PRIORITY_CT_STATE,
match=match)
def switch_features_handler(self, ev):
if self.get_datapath() is None:
return
self._install_env_init_flow_by_direction('ingress')
self._install_env_init_flow_by_direction('egress')
def _get_security_rule_mapping(self, lrule_id):
rule_id = self.secgroup_rule_mappings.get(lrule_id)
if rule_id is not None:
return rule_id
else:
self.next_secgroup_rule_id += 1
# TODO(ding bo) verify self.next_network_id didn't wrap
self.secgroup_rule_mappings[lrule_id] = self.next_secgroup_rule_id
return self.next_secgroup_rule_id
def _allocate_security_group_id(self, lgroup_id):
# allocate a number
security_id = self.next_secgroup_id
LOG.info(_LI("allocate a number %(security_id)s to the security group "
"%(lgroup_id)s")
% {'security_id': security_id,
'lgroup_id': lgroup_id})
self.next_secgroup_id += 1
# save in DB
# TODO(yuanwei)
# save in local mapping
self.secgroup_mappings[lgroup_id] = security_id
def _release_security_group_id(self, lgroup_id):
# release in local mapping
security_id = self.secgroup_mappings.get(lgroup_id)
LOG.info(_LI("release the allocated number %(security_id)s of the"
"security group %(lgroup_id)s")
% {'security_id': security_id,
'lgroup_id': lgroup_id})
if security_id is not None:
del self.secgroup_mappings[lgroup_id]
# release in DB
# TODO(yuan wei)
# release this number
# TODO(yuan wei)
def _get_secgroup_conj_id_and_priority(self, lgroup_id):
security_id = self.secgroup_mappings.get(lgroup_id)
if security_id is not None:
return security_id, (SG_PRIORITY_OFFSET + security_id)
return None, None
def remove_local_port(self, lport):
if self.get_datapath() is None:
LOG.error(_LE("datapath is none"))
return
secgroups = lport.get_security_groups()
if secgroups is None:
return
# uninstall ct table
self._uninstall_connection_track_flows(lport)
ip = lport.get_ip()
for secgroup_id in secgroups:
# uninstall associating flow
self._uninstall_associating_flows(secgroup_id, lport)
# update the record of aggregate addresses of ports associated
# with this security group.
aggregate_addresses_range = \
self.secgroup_aggregate_addresses.get(secgroup_id)
if aggregate_addresses_range is not None:
new_cidr_array, added_cidr, removed_cidr = \
SGApp._remove_one_address(
aggregate_addresses_range,
SGApp._get_integer_value_from_address(ip)
)
if len(new_cidr_array) == 0:
del self.secgroup_aggregate_addresses[secgroup_id]
else:
self.secgroup_aggregate_addresses[secgroup_id] = \
new_cidr_array
# update the flows representing those rules each of which
# specifies this security group as its
# parameter of remote group.
secrules = self.remote_secgroup_ref.get(secgroup_id)
if secrules is not None:
for rule_info in secrules.values():
self._update_security_group_rule_flows_by_addresses(
rule_info.security_group_id,
rule_info,
added_cidr,
removed_cidr
)
# update the record of ports associated with this security group.
associate_ports = \
self.secgroup_associate_local_ports.get(secgroup_id)
if associate_ports is not None:
if lport.get_id() in associate_ports:
associate_ports.remove(lport.get_id())
if len(associate_ports) == 0:
self._uninstall_security_group_flow(secgroup_id)
self._release_security_group_id(secgroup_id)
del self.secgroup_associate_local_ports[secgroup_id]
def remove_remote_port(self, lport):
if self.get_datapath() is None:
LOG.error(_LE("datapath is none"))
return
secgroups = lport.get_security_groups()
if secgroups is None:
return
ip = lport.get_ip()
for secgroup_id in secgroups:
# update the record of aggregate addresses of ports associated
# with this security group.
aggregate_addresses_range = \
self.secgroup_aggregate_addresses.get(secgroup_id)
if aggregate_addresses_range is not None:
new_cidr_array, added_cidr, removed_cidr = \
SGApp._remove_one_address(
aggregate_addresses_range,
SGApp._get_integer_value_from_address(ip)
)
if len(new_cidr_array) == 0:
del self.secgroup_aggregate_addresses[secgroup_id]
else:
self.secgroup_aggregate_addresses[secgroup_id] =\
new_cidr_array
# update the flows representing those rules each of which
# specifies this security group as its
# parameter of remote group.
secrules = self.remote_secgroup_ref.get(secgroup_id)
if secrules is not None:
for rule_info in secrules.values():
self._update_security_group_rule_flows_by_addresses(
rule_info.security_group_id,
rule_info,
added_cidr,
removed_cidr
)
def add_local_port(self, lport):
if self.get_datapath() is None:
LOG.error(_LE("datapath is none"))
return
secgroups = lport.get_security_groups()
if secgroups is None:
return
ip = lport.get_ip()
for secgroup_id in secgroups:
# update the record of aggregate addresses of ports associated
# with this security group.
aggregate_addresses_range = \
self.secgroup_aggregate_addresses.get(secgroup_id)
if aggregate_addresses_range is None:
aggregate_addresses_range = []
new_cidr_array, added_cidr, removed_cidr = SGApp._add_one_address(
aggregate_addresses_range,
SGApp._get_integer_value_from_address(ip)
)
self.secgroup_aggregate_addresses[secgroup_id] = new_cidr_array
# update the flows representing those rules each of which specifies
# this security group as its parameter
# of remote group.
secrules = self.remote_secgroup_ref.get(secgroup_id)
if secrules is not None:
for rule_info in secrules.values():
self._update_security_group_rule_flows_by_addresses(
rule_info.security_group_id,
rule_info,
added_cidr,
removed_cidr)
# update the record of ports associated with this security group.
associate_ports = \
self.secgroup_associate_local_ports.get(secgroup_id)
if associate_ports is None:
self.secgroup_associate_local_ports[secgroup_id] = \
[lport.get_id()]
self._allocate_security_group_id(secgroup_id)
self._install_security_group_flows(secgroup_id)
elif lport.get_id() not in associate_ports:
associate_ports.append(lport.get_id())
# install associating flow
self._install_associating_flows(secgroup_id, lport)
# install ct table
self._install_connection_track_flows(lport)
def add_remote_port(self, lport):
if self.get_datapath() is None:
LOG.error(_LE("datapath is none"))
return
secgroups = lport.get_security_groups()
if secgroups is None:
return
ip = lport.get_ip()
for secgroup_id in secgroups:
# update the record of aggregate addresses of ports associated
# with this security group.
aggregate_addresses_range = \
self.secgroup_aggregate_addresses.get(secgroup_id)
if aggregate_addresses_range is None:
aggregate_addresses_range = []
new_cidr_array, added_cidr, removed_cidr =\
SGApp._add_one_address(
aggregate_addresses_range,
SGApp._get_integer_value_from_address(ip)
)
self.secgroup_aggregate_addresses[secgroup_id] = new_cidr_array
# update the flows representing those rules each of which specifies
# this security group as its parameter of remote group.
secrules = self.remote_secgroup_ref.get(secgroup_id)
if secrules is not None:
for rule_info in secrules.values():
self._update_security_group_rule_flows_by_addresses(
rule_info.security_group_id,
rule_info,
added_cidr,
removed_cidr
)
def add_security_group_rule(self, secgroup, secgroup_rule):
LOG.info(_LI("add a rule %(rule)s to security group %(secgroup)s")
% {'rule': secgroup_rule, 'secgroup': secgroup.name})
if self.get_datapath() is None:
LOG.error(_LE("datapath is none"))
return
conj_id, priority = \
self._get_secgroup_conj_id_and_priority(secgroup.name)
if conj_id is None:
# this security group wasn't associated with a local port
LOG.info(_LI("this security group %s wasn't associated with"
" a local port"), secgroup.name)
return
# update the record of rules each of which specifies a same security
# group as its parameter of remote group.
remote_group_id = secgroup_rule.remote_group_id
if remote_group_id is not None:
associate_rules = self.remote_secgroup_ref.get(remote_group_id)
if associate_rules is None:
self.remote_secgroup_ref[remote_group_id] = \
{secgroup_rule.id: secgroup_rule}
else:
associate_rules[secgroup_rule.id] = secgroup_rule
self._install_security_group_rule_flows(secgroup.name, secgroup_rule)
def remove_security_group_rule(self, secgroup, secgroup_rule):
LOG.info(_LI("remove a rule %(rule)s to security group %(secgroup)s")
% {'rule': secgroup_rule, 'secgroup': secgroup.name})
if self.get_datapath() is None:
LOG.error(_LE("datapath is none"))
return
conj_id, priority = \
self._get_secgroup_conj_id_and_priority(secgroup.name)
if conj_id is None:
# this security group wasn't associated with a local port
LOG.info(_LI("this security group %s wasn't associated with"
" a local port"), secgroup.name)
return
# update the record of rules each of which specifies a same security
# group as its parameter of remote group.
remote_group_id = secgroup_rule.remote_group_id
if remote_group_id is not None:
associate_rules = self.remote_secgroup_ref.get(remote_group_id)
if associate_rules is not None:
del associate_rules[secgroup_rule.id]
if len(associate_rules) == 0:
del self.remote_secgroup_ref[remote_group_id]
self._uninstall_security_group_rule_flows(secgroup_rule)
|
|
# Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for introspection rules."""
from unittest import mock
from oslo_utils import uuidutils
from ironic_inspector import db
from ironic_inspector.plugins import base as plugins_base
from ironic_inspector import rules
from ironic_inspector.test import base as test_base
from ironic_inspector import utils
class BaseTest(test_base.NodeTest):
def setUp(self):
super(BaseTest, self).setUp()
self.uuid = uuidutils.generate_uuid()
self.conditions_json = [
{'op': 'eq', 'field': 'memory_mb', 'value': 1024},
{'op': 'eq', 'field': 'local_gb', 'value': 60},
]
self.actions_json = [
{'action': 'fail', 'message': 'boom!'}
]
self.data = {
'memory_mb': 1024,
'local_gb': 42,
}
self.scope = "inner circle"
@staticmethod
def condition_defaults(condition):
condition = condition.copy()
condition.setdefault('multiple', 'any')
condition.setdefault('invert', False)
return condition
class TestCreateRule(BaseTest):
def test_only_actions(self):
rule = rules.create([], self.actions_json)
rule_json = rule.as_dict()
self.assertTrue(rule_json.pop('uuid'))
self.assertEqual({'description': None,
'conditions': [],
'actions': self.actions_json,
'scope': None},
rule_json)
def test_create_action_none_value(self):
self.actions_json = [{'action': 'set-attribute',
'path': '/properties/cpus', 'value': None}]
rule = rules.create([], self.actions_json)
rule_json = rule.as_dict()
self.assertTrue(rule_json.pop('uuid'))
self.assertEqual({'description': None,
'conditions': [],
'actions': self.actions_json,
'scope': None},
rule_json)
def test_duplicate_uuid(self):
rules.create([], self.actions_json, uuid=self.uuid)
self.assertRaisesRegex(utils.Error, 'already exists',
rules.create, [], self.actions_json,
uuid=self.uuid)
def test_with_conditions(self):
self.conditions_json.extend([
# multiple present&default, invert absent
{'op': 'eq', 'field': 'local_gb', 'value': 60, 'multiple': 'any'},
# multiple absent, invert present&default
{'op': 'eq', 'field': 'local_gb', 'value': 60, 'invert': False},
# multiple&invert present&non-default
{'op': 'eq', 'field': 'memory_mb', 'value': 1024,
'multiple': 'all', 'invert': True},
])
rule = rules.create(self.conditions_json, self.actions_json)
rule_json = rule.as_dict()
self.assertTrue(rule_json.pop('uuid'))
self.assertEqual({'description': None,
'conditions': [BaseTest.condition_defaults(cond)
for cond in self.conditions_json],
'actions': self.actions_json,
'scope': None},
rule_json)
def test_invalid_condition(self):
del self.conditions_json[0]['op']
self.assertRaisesRegex(utils.Error,
'Validation failed for conditions',
rules.create,
self.conditions_json, self.actions_json)
self.conditions_json[0]['op'] = 'foobar'
self.assertRaisesRegex(utils.Error,
'Validation failed for conditions',
rules.create,
self.conditions_json, self.actions_json)
def test_invalid_condition_field(self):
self.conditions_json[0]['field'] = '!*!'
self.assertRaisesRegex(utils.Error,
'Unable to parse field JSON path',
rules.create,
self.conditions_json, self.actions_json)
def test_invalid_condition_parameters(self):
self.conditions_json[0]['foo'] = 'bar'
self.assertRaisesRegex(utils.Error,
'Invalid parameters for operator',
rules.create,
self.conditions_json, self.actions_json)
def test_no_actions(self):
self.assertRaisesRegex(utils.Error,
'Validation failed for actions',
rules.create,
self.conditions_json, [])
def test_invalid_action(self):
del self.actions_json[0]['action']
self.assertRaisesRegex(utils.Error,
'Validation failed for actions',
rules.create,
self.conditions_json, self.actions_json)
self.actions_json[0]['action'] = 'foobar'
self.assertRaisesRegex(utils.Error,
'Validation failed for actions',
rules.create,
self.conditions_json, self.actions_json)
def test_invalid_action_parameters(self):
self.actions_json[0]['foo'] = 'bar'
self.assertRaisesRegex(utils.Error,
'Invalid parameters for action',
rules.create,
self.conditions_json, self.actions_json)
def test_scope(self):
rule = rules.create([], self.actions_json, scope=self.scope)
rule_json = rule.as_dict()
self.assertTrue(rule_json.pop('uuid'))
self.assertEqual({'description': None,
'conditions': [],
'actions': self.actions_json,
'scope': self.scope},
rule_json)
class TestGetRule(BaseTest):
def setUp(self):
super(TestGetRule, self).setUp()
rules.create(self.conditions_json, self.actions_json, uuid=self.uuid)
def test_get(self):
rule_json = rules.get(self.uuid).as_dict()
self.assertTrue(rule_json.pop('uuid'))
self.assertEqual({'description': None,
'conditions': [BaseTest.condition_defaults(cond)
for cond in self.conditions_json],
'actions': self.actions_json,
'scope': None},
rule_json)
def test_not_found(self):
self.assertRaises(utils.Error, rules.get, 'foobar')
def test_get_all(self):
uuid2 = uuidutils.generate_uuid()
rules.create(self.conditions_json, self.actions_json, uuid=uuid2)
self.assertEqual({self.uuid, uuid2},
{r.as_dict()['uuid'] for r in rules.get_all()})
class TestDeleteRule(BaseTest):
def setUp(self):
super(TestDeleteRule, self).setUp()
self.uuid2 = uuidutils.generate_uuid()
rules.create(self.conditions_json, self.actions_json, uuid=self.uuid)
rules.create(self.conditions_json, self.actions_json, uuid=self.uuid2)
def test_delete(self):
rules.delete(self.uuid)
self.assertEqual([(self.uuid2,)], db.model_query(db.Rule.uuid).all())
self.assertFalse(db.model_query(db.RuleCondition)
.filter_by(rule=self.uuid).all())
self.assertFalse(db.model_query(db.RuleAction)
.filter_by(rule=self.uuid).all())
def test_delete_non_existing(self):
self.assertRaises(utils.Error, rules.delete, 'foo')
def test_delete_all(self):
rules.delete_all()
self.assertFalse(db.model_query(db.Rule).all())
self.assertFalse(db.model_query(db.RuleCondition).all())
self.assertFalse(db.model_query(db.RuleAction).all())
@mock.patch.object(plugins_base, 'rule_conditions_manager', autospec=True)
class TestCheckConditions(BaseTest):
def setUp(self):
super(TestCheckConditions, self).setUp()
self.rule = rules.create(conditions_json=self.conditions_json,
actions_json=self.actions_json)
self.cond_mock = mock.Mock(spec=plugins_base.RuleConditionPlugin)
self.cond_mock.ALLOW_NONE = False
self.ext_mock = mock.Mock(spec=['obj'], obj=self.cond_mock)
def test_ok(self, mock_ext_mgr):
mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock
self.cond_mock.check.return_value = True
res = self.rule.check_conditions(self.node_info, self.data)
self.cond_mock.check.assert_any_call(self.node_info, 1024,
{'value': 1024})
self.cond_mock.check.assert_any_call(self.node_info, 42,
{'value': 60})
self.assertEqual(len(self.conditions_json),
self.cond_mock.check.call_count)
self.assertTrue(res)
def test_invert(self, mock_ext_mgr):
self.conditions_json = [
{'op': 'eq', 'field': 'memory_mb', 'value': 42,
'invert': True},
]
self.rule = rules.create(conditions_json=self.conditions_json,
actions_json=self.actions_json)
mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock
self.cond_mock.check.return_value = False
res = self.rule.check_conditions(self.node_info, self.data)
self.cond_mock.check.assert_called_once_with(self.node_info, 1024,
{'value': 42})
self.assertTrue(res)
def test_no_field(self, mock_ext_mgr):
mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock
self.cond_mock.check.return_value = True
del self.data['local_gb']
res = self.rule.check_conditions(self.node_info, self.data)
self.cond_mock.check.assert_called_once_with(self.node_info, 1024,
{'value': 1024})
self.assertFalse(res)
def test_no_field_none_allowed(self, mock_ext_mgr):
mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock
self.cond_mock.ALLOW_NONE = True
self.cond_mock.check.return_value = True
del self.data['local_gb']
res = self.rule.check_conditions(self.node_info, self.data)
self.cond_mock.check.assert_any_call(self.node_info, 1024,
{'value': 1024})
self.cond_mock.check.assert_any_call(self.node_info, None,
{'value': 60})
self.assertEqual(len(self.conditions_json),
self.cond_mock.check.call_count)
self.assertTrue(res)
def test_fail(self, mock_ext_mgr):
mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock
self.cond_mock.check.return_value = False
res = self.rule.check_conditions(self.node_info, self.data)
self.cond_mock.check.assert_called_once_with(self.node_info, 1024,
{'value': 1024})
self.assertFalse(res)
class TestCheckConditionsMultiple(BaseTest):
def setUp(self):
super(TestCheckConditionsMultiple, self).setUp()
self.conditions_json = [
{'op': 'eq', 'field': 'interfaces[*].ip', 'value': '1.2.3.4'}
]
def _build_data(self, ips):
return {
'interfaces': [
{'ip': ip} for ip in ips
]
}
def test_default(self):
rule = rules.create(conditions_json=self.conditions_json,
actions_json=self.actions_json)
data_set = [
(['1.1.1.1', '1.2.3.4', '1.3.2.2'], True),
(['1.2.3.4'], True),
(['1.1.1.1', '1.3.2.2'], False),
(['1.2.3.4', '1.3.2.2'], True),
]
for ips, result in data_set:
data = self._build_data(ips)
self.assertIs(result, rule.check_conditions(self.node_info, data),
data)
def test_any(self):
self.conditions_json[0]['multiple'] = 'any'
rule = rules.create(conditions_json=self.conditions_json,
actions_json=self.actions_json)
data_set = [
(['1.1.1.1', '1.2.3.4', '1.3.2.2'], True),
(['1.2.3.4'], True),
(['1.1.1.1', '1.3.2.2'], False),
(['1.2.3.4', '1.3.2.2'], True),
]
for ips, result in data_set:
data = self._build_data(ips)
self.assertIs(result, rule.check_conditions(self.node_info, data),
data)
def test_all(self):
self.conditions_json[0]['multiple'] = 'all'
rule = rules.create(conditions_json=self.conditions_json,
actions_json=self.actions_json)
data_set = [
(['1.1.1.1', '1.2.3.4', '1.3.2.2'], False),
(['1.2.3.4'], True),
(['1.1.1.1', '1.3.2.2'], False),
(['1.2.3.4', '1.3.2.2'], False),
]
for ips, result in data_set:
data = self._build_data(ips)
self.assertIs(result, rule.check_conditions(self.node_info, data),
data)
def test_first(self):
self.conditions_json[0]['multiple'] = 'first'
rule = rules.create(conditions_json=self.conditions_json,
actions_json=self.actions_json)
data_set = [
(['1.1.1.1', '1.2.3.4', '1.3.2.2'], False),
(['1.2.3.4'], True),
(['1.1.1.1', '1.3.2.2'], False),
(['1.2.3.4', '1.3.2.2'], True),
]
for ips, result in data_set:
data = self._build_data(ips)
self.assertIs(result, rule.check_conditions(self.node_info, data),
data)
class TestCheckConditionsSchemePath(BaseTest):
def test_conditions_data_path(self):
self.data_set = [
([{'op': 'eq', 'field': 'data://memory_mb', 'value': 1024}],
True),
([{'op': 'gt', 'field': 'data://local_gb', 'value': 42}],
False)
]
for condition, res in self.data_set:
rule = rules.create(conditions_json=condition,
actions_json=self.actions_json)
self.assertIs(res,
rule.check_conditions(self.node_info, self.data),
self.data)
def test_conditions_node_path(self):
self.node_set = [
([{'op': 'eq', 'field': 'node://driver_info.ipmi_address',
'value': self.bmc_address}],
True),
([{'op': 'eq', 'field': 'node://driver', 'value': 'fake'}],
False)
]
for condition, res in self.node_set:
rule = rules.create(conditions_json=condition,
actions_json=self.actions_json)
self.assertIs(res,
rule.check_conditions(self.node_info, self.data))
@mock.patch.object(plugins_base, 'rule_actions_manager', autospec=True)
class TestApplyActions(BaseTest):
def setUp(self):
super(TestApplyActions, self).setUp()
self.actions_json.append({'action': 'example'})
self.rule = rules.create(conditions_json=self.conditions_json,
actions_json=self.actions_json)
self.act_mock = mock.Mock(spec=plugins_base.RuleActionPlugin)
self.act_mock.FORMATTED_PARAMS = ['value']
self.ext_mock = mock.Mock(spec=['obj'], obj=self.act_mock)
def test_apply(self, mock_ext_mgr):
mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock
self.rule.apply_actions(self.node_info, data=self.data)
self.act_mock.apply.assert_any_call(self.node_info,
{'message': 'boom!'})
self.act_mock.apply.assert_any_call(self.node_info, {})
self.assertEqual(len(self.actions_json),
self.act_mock.apply.call_count)
def test_apply_data_format_value(self, mock_ext_mgr):
self.rule = rules.create(actions_json=[
{'action': 'set-attribute',
'path': '/driver_info/ipmi_address',
'value': '{data[memory_mb]}'}],
conditions_json=self.conditions_json
)
mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock
self.rule.apply_actions(self.node_info, data=self.data)
self.assertEqual(1, self.act_mock.apply.call_count)
def test_apply_data_format_value_dict(self, mock_ext_mgr):
self.data.update({'val_outer': {'val_inner': 17},
'key_outer': {'key_inner': 'baz'}})
self.rule = rules.create(actions_json=[
{'action': 'set-attribute',
'path': '/driver_info/foo',
'value': {'{data[key_outer][key_inner]}':
'{data[val_outer][val_inner]}'}}],
conditions_json=self.conditions_json
)
mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock
self.rule.apply_actions(self.node_info, data=self.data)
self.act_mock.apply.assert_called_once_with(self.node_info, {
# String-formatted values will be coerced to be strings.
'value': {'baz': '17'},
'path': '/driver_info/foo'
})
def test_apply_data_format_value_list(self, mock_ext_mgr):
self.data.update({'outer': {'inner': 'baz'}})
self.rule = rules.create(actions_json=[
{'action': 'set-attribute',
'path': '/driver_info/foo',
'value': ['basic', ['{data[outer][inner]}']]}],
conditions_json=self.conditions_json
)
mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock
self.rule.apply_actions(self.node_info, data=self.data)
self.act_mock.apply.assert_called_once_with(self.node_info, {
'value': ['basic', ['baz']],
'path': '/driver_info/foo'
})
def test_apply_data_format_value_primitives(self, mock_ext_mgr):
self.data.update({'outer': {'inner': False}})
self.rule = rules.create(actions_json=[
{'action': 'set-attribute',
'path': '/driver_info/foo',
'value': {42: {True: [3.14, 'foo', '{data[outer][inner]}']}}}],
conditions_json=self.conditions_json
)
mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock
self.rule.apply_actions(self.node_info, data=self.data)
self.act_mock.apply.assert_called_once_with(self.node_info, {
# String-formatted values will be coerced to be strings.
'value': {42: {True: [3.14, 'foo', 'False']}},
'path': '/driver_info/foo'
})
def test_apply_data_format_value_fail(self, mock_ext_mgr):
self.rule = rules.create(
actions_json=[
{'action': 'set-attribute',
'path': '/driver_info/ipmi_address',
'value': '{data[inventory][bmc_address]}'}],
conditions_json=self.conditions_json
)
mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock
self.assertRaises(utils.Error, self.rule.apply_actions,
self.node_info, data=self.data)
def test_apply_data_format_value_nested_fail(self, mock_ext_mgr):
self.data.update({'outer': {'inner': 'baz'}})
self.rule = rules.create(actions_json=[
{'action': 'set-attribute',
'path': '/driver_info/foo',
'value': ['basic', ['{data[outer][nonexistent]}']]}],
conditions_json=self.conditions_json
)
mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock
self.assertRaises(utils.Error, self.rule.apply_actions,
self.node_info, data=self.data)
def test_apply_data_non_format_value(self, mock_ext_mgr):
self.rule = rules.create(actions_json=[
{'action': 'set-attribute',
'path': '/driver_info/ipmi_address',
'value': 1}],
conditions_json=self.conditions_json
)
mock_ext_mgr.return_value.__getitem__.return_value = self.ext_mock
self.rule.apply_actions(self.node_info, data=self.data)
self.assertEqual(1, self.act_mock.apply.call_count)
@mock.patch.object(rules, 'get_all', autospec=True)
class TestApply(BaseTest):
def setUp(self):
super(TestApply, self).setUp()
self.rules = [mock.Mock(spec=rules.IntrospectionRule),
mock.Mock(spec=rules.IntrospectionRule)]
def test_no_rules(self, mock_get_all):
mock_get_all.return_value = []
rules.apply(self.node_info, self.data)
def test_apply(self, mock_get_all):
mock_get_all.return_value = self.rules
for idx, rule in enumerate(self.rules):
rule.check_conditions.return_value = not bool(idx)
rules.apply(self.node_info, self.data)
for idx, rule in enumerate(self.rules):
rule.check_conditions.assert_called_once_with(self.node_info,
self.data)
if rule.check_conditions.return_value:
rule.apply_actions.assert_called_once_with(
self.node_info, data=self.data)
else:
self.assertFalse(rule.apply_actions.called)
@mock.patch.object(rules, 'get_all', autospec=True)
class TestRuleScope(BaseTest):
"""Test that rules are only applied on the nodes that fall in their scope.
Check that:
- global rule is applied to all nodes
- different rules with scopes are applied to different nodes
- rule without matching scope is not applied
"""
def setUp(self):
super(TestRuleScope, self).setUp()
"""
rule_global
rule_scope_1
rule_scope_2
rule_out_scope
"""
self.rules = [rules.IntrospectionRule("", "", "", "", None),
rules.IntrospectionRule("", "", "", "", "scope_1"),
rules.IntrospectionRule("", "", "", "", "scope_2"),
rules.IntrospectionRule("", "", "", "", "scope_3")]
for r in self.rules:
r.check_conditions = mock.Mock()
r.check_conditions.return_value = True
r.apply_actions = mock.Mock()
r.apply_actions.return_value = True
def test_node_no_scope(self, mock_get_all):
mock_get_all.return_value = self.rules
self.node_info.node().properties['inspection_scope'] = None
rules.apply(self.node_info, self.data)
self.rules[0].apply_actions.assert_called_once() # global
self.rules[1].apply_actions.assert_not_called() # scope_1
self.rules[2].apply_actions.assert_not_called() # scope_2
self.rules[3].apply_actions.assert_not_called() # scope_3
def test_node_scope_1(self, mock_get_all):
mock_get_all.return_value = self.rules
self.node_info.node().properties['inspection_scope'] = "scope_1"
rules.apply(self.node_info, self.data)
self.rules[0].apply_actions.assert_called_once() # global
self.rules[1].apply_actions.assert_called_once() # scope_1
self.rules[2].apply_actions.assert_not_called() # scope_2
self.rules[3].apply_actions.assert_not_called() # scope_3
def test_node_scope_2(self, mock_get_all):
mock_get_all.return_value = self.rules
self.node_info.node().properties['inspection_scope'] = "scope_2"
rules.apply(self.node_info, self.data)
self.rules[0].apply_actions.assert_called_once() # global
self.rules[1].apply_actions.assert_not_called() # scope_1
self.rules[2].apply_actions.assert_called_once() # scope_2
self.rules[3].apply_actions.assert_not_called() # scope_3
|
|
import os
import sys
import pygame
import copy
from pygame.locals import *
import drawable
import util
import mapitem
import hare, cat, fox
from common.constants import *
from client.constants import *
from common import boundint
class MapChar(mapitem.MapItem):
def __init__(self, mapCloneMethod, battleCloneMethod, speciesAbbrev, inImages,
speedBase, speedTerrainModifiers, speedTerritoryModifiers,
speedHealthMod, team, name, battleChar, portrait, homeTerrain):
self.mapCloneMethod = mapCloneMethod
self.battleCloneMethod = battleCloneMethod
self.speciesAbbrev = speciesAbbrev
self.team = team
self.name = name
self.initPortrait(portrait)
self.battleChar = battleChar
self.homeTerrain = homeTerrain
self.altarCount = 0
self.speedBase = speedBase
self.speedTerrainModifiers = speedTerrainModifiers
self.speedTerritoryModifiers = speedTerritoryModifiers
self.speedHealthModifier = speedHealthMod
self.vel = [0.0, 0.0]
self.target = None
self.targetBuffer = []
self.currTerrain = 0
self.oldTerrain = 0
self.region = None
self.currTerritory = "allied"
self.oldTerritory = "allied"
self.addToPos = [0, 0]
self.blinkOn = True
self.blinkTick = 0
self.removed = False
self.rezzing = False
self.rezTime = 0
self.respawnTime = boundint.BoundInt(0, RESPAWN_MAX, 0)
self.healthRegainTick = 0
self.triggerColor = BATTLE_TRIGGER_AREA_COLOR_WITH_ALPHA
self.triggerSize = BATTLE_TRIGGER_RANGE
super(MapChar, self).__init__((0, 0), inImages)
def update(self):
if self.speedHasChanged():
self.startMovement()
self.oldTerrain = self.currTerrain
self.oldTerritory = self.currTerritory
if (not self.isDead()):
for i in range(2):
self.precisePos[i] += self.vel[i]
self.regainHealth()
self.checkForStop()
if self.rezzing:
self.rezTime += 1
if self.rezTime == REZ_TIME:
self.rezTime = 0
self.rez()
else:
self.rezTime = 0
def speedHasChanged(self):
return ( (self.oldTerrain != self.currTerrain) or
(self.oldTerritory != self.currTerritory) )
def checkForStop(self):
if not self.target is None:
currDist = util.distance(self.precisePos, self.target)
nextLoc = []
for i in range(2):
nextLoc.append(self.precisePos[i] + self.vel[i])
nextDist = util.distance(nextLoc, self.target)
if nextDist > currDist:
if len(self.targetBuffer) > 0:
self.target = self.targetBuffer[0]
self.targetBuffer = self.targetBuffer[1:]
self.startMovement()
else:
self.stop()
def stop(self):
self.vel = [0.0, 0.0]
self.target = None
self.targetBuffer = []
def modifyImages(self):
colorSwap(self.images[0][0], TOKEN_BORDER_NEUTRAL,
TOKEN_BORDER_OFF[self.team], 5)
colorSwap(self.images[1][0], TOKEN_BORDER_NEUTRAL,
TOKEN_BORDER_HIGHLIGHTED[self.team], 5)
colorSwap(self.images[2][0], TOKEN_BORDER_NEUTRAL,
TOKEN_BORDER_SELECTED[self.team], 5)
def setTarget(self, target, waypoint):
if self.isDead() and (not self.rezzing):
waypoint = False
if waypoint and (not self.target is None):
self.setTargetWaypoint(target)
return
else:
self.setTargetSingle(target)
self.startMovement()
def startMovement(self):
if self.target is None:
return
direction = util.get_direction(self.precisePos, self.target)
speed = self.getCurrSpeed()
for i in range(2):
self.vel[i] = direction[i] * speed
def setTargetWaypoint(self, target):
if self.target is None:
self.target = target
else:
if (not target is None) and (len(self.targetBuffer) < MAX_WAYPOINTS):
self.targetBuffer.append(target)
def setTargetSingle(self, target):
self.target = target
self.targetBuffer = []
def getCurrSpeed(self):
val = (self.speedBase *
self.speedTerrainModifiers[self.currTerrain] *
self.speedTerritoryModifiers[self.currTerritory] *
self.getSpeedHealthModifier())
return val
def getSpeedHealthModifier(self):
hp = self.battleChar.hp
currDamageRatio = float( (float(hp.maximum) - float(hp.value))
/ float(hp.maximum))
loss = (1 - self.speedHealthModifier) * currDamageRatio
return 1.00 - loss
def initPortrait(self, p):
q = pygame.Surface(UNIT_HUD_PORTRAIT_SIZE)
if not p is None:
q.blit(p, (0, 0))
q.convert()
self.portrait = q
def getHP(self):
return self.battleChar.hp.value
def getMaxHP(self):
return self.battleChar.hp.maximum
def getSuperEnergy(self):
return self.battleChar.superEnergy.value
def addSuperEnergy(self, val):
self.battleChar.superEnergy.add(val)
def isDead(self):
return (self.battleChar.hp.value <= 0)
def resetRegion(self):
if self.region is None:
return
if (util.distance(self.precisePos, self.region.pos) >
MAP_REGION_SIZE):
self.region = None
def regainHealth(self):
if ( (not self.battleChar.hp.isMax()) and (not self.isDead()) ):
self.healthRegainTick += 1
if self.healthRegainTick == HEALTH_REGAIN_SPEED:
self.healthRegainTick = 0
if (self.currTerrain == FORTRESS):
hpGain = FORTRESS_HEALTH_REGAIN_AMOUNT
else:
hpGain = PASSIVE_HEALTH_REGAIN_AMOUNT
self.battleChar.hp.add(hpGain)
else:
self.healthRegainTick = 0
def revive(self):
self.respawnTime.setToMin()
self.setPos(self.target)
self.rezzing = True
def rez(self):
self.battleChar.hp.setToMax()
self.removed = False
self.rezzing = False
def getDamagePercentText(self):
return str(self.damagePercent()) + "%"
def damagePercent(self):
mult = 100
if self.currTerrain == self.homeTerrain:
mult += HOME_TERRAIN_DAMAGE_BONUS
elif self.currTerrain == FORTRESS:
mult += FORTRESS_DAMAGE_BONUS
mult += (ALTAR_DAMAGE_BONUS * self.altarCount)
return mult
def getSmallImage(self):
return self.images[3][0]
def getTextString(self, expand=False):
text = self.name + "#" + self.speciesAbbrev + "#" + str(self.battleChar.currSuperMove) + "#"
if expand:
size = len(text)
if size > CHARACTER_TRANSFER_NET_MESSAGE_SIZE:
raise
add = CHARACTER_TRANSFER_NET_MESSAGE_SIZE - size
for i in range(add):
text = text + "-"
return text
def makeCharacterFromInfo(info, team):
if info.speciesName == "hr":
data = [Hare, hare.Hare]
elif info.speciesName == "fx":
data = [Fox, fox.Fox]
elif info.speciesName == "ct":
data = [Cat, cat.Cat]
return data[0](team, data[1](info.name, info.currSuperMove), info.name)
def turnStringIntoInfo(text):
vals = text.split('#')
name = vals[0]
speciesName = vals[1]
currSuperMove = int(vals[2])
return CharInfo(name, speciesName, currSuperMove)
def convertNetData(dataList, team):
charList = []
for d in dataList:
info = turnStringIntoInfo(d)
charList.append(makeCharacterFromInfo(info, team))
return charList
class CharInfo(object):
def __init__(self, name, speciesName, currSuperMove, valid=True):
self.name = name
self.speciesName = speciesName
self.currSuperMove = currSuperMove
self.valid = valid
def Hare(team, battleChar, name="Hare", portrait=None):
c = MapChar(Hare, hare.Hare, "hr", HARE_TOKENS, HARE_MAP_SPEED_BASE,
HARE_MAP_SPEED_TERRAIN_MODIFIERS,
HARE_MAP_SPEED_TERRITORY_MODIFIERS,
HARE_HEALTH_SPEED_MODIFIER,
team, name, battleChar, portrait, HOME_TERRAINS["hare"])
return c
def Fox(team, battleChar, name="Fox", portrait=None):
c = MapChar(Fox, fox.Fox, "fx", FOX_TOKENS, FOX_MAP_SPEED_BASE,
FOX_MAP_SPEED_TERRAIN_MODIFIERS,
FOX_MAP_SPEED_TERRITORY_MODIFIERS,
FOX_HEALTH_SPEED_MODIFIER,
team, name, battleChar, portrait, HOME_TERRAINS["fox"])
return c
def Cat(team, battleChar, name="Cat", portrait=None):
c = MapChar(Cat, cat.Cat, "ct", CAT_TOKENS, CAT_MAP_SPEED_BASE,
CAT_MAP_SPEED_TERRAIN_MODIFIERS,
CAT_MAP_SPEED_TERRITORY_MODIFIERS,
CAT_HEALTH_SPEED_MODIFIER,
team, name, battleChar, portrait, HOME_TERRAINS["cat"])
return c
|
|
# -*- coding: utf-8 -*-
import datetime
import json
import logging
from cronq import interval_parser
from cronq.backends.mysql import Storage
from cronq.utils import json_serial
from cronq.utils import query_category_id
from cronq.utils import query_category_name
from cronq.utils import query_id
from cronq.utils import query_page
from cronq.utils import query_per_page
from cronq.utils import query_sort
from cronq.utils import split_command
from cronq.utils import task_status
from cronq.utils import took
from flask import Blueprint
from flask import Response
from flask import abort
from flask import flash
from flask import g
from flask import redirect
from flask import render_template
from flask import request
from flask import url_for
blueprint_http = Blueprint('blueprint_http', __name__)
blueprint_http.add_app_template_filter(split_command, 'split_command')
blueprint_http.add_app_template_filter(task_status, 'task_status')
blueprint_http.add_app_template_filter(took, 'took')
logger = logging.getLogger(__name__)
@blueprint_http.before_request
def create_storage():
if request.path.startswith('/static/'):
return
g.storage = Storage(isolation_level=None)
@blueprint_http.after_request
def remove_storage(request):
if hasattr(g, 'storage'):
try:
g.storage.close()
except Exception:
logger.exception("exception in remove storage")
return request
@blueprint_http.route('/')
def index():
jobs = list(g.storage.jobs())
categories = list(g.storage.categories())
categories = {category['id']: category for category in categories}
return render_template('index.html', jobs=jobs, categories=categories)
@blueprint_http.route('/_status')
def status():
return Response(
json.dumps({'status': 'OK'}),
mimetype='application/json',
)
@blueprint_http.route('/job/<int:id>', methods=['GET', 'POST'])
def job(id):
if request.method == 'POST' and request.form.get('run_now') is not None:
g.storage.run_job_now(id)
flash('Job submitted at {0}'.format(datetime.datetime.utcnow()))
return redirect(url_for('.job', id=id))
job_doc = g.storage.get_job(id)
chunks = g.storage.last_event_chunks_for_job(id, 20)
title = job_doc.get('name', '')
return render_template('job.html', job=job_doc, chunks=chunks, title=title)
@blueprint_http.route('/run/<string:id>')
def run_id(id):
events = list(g.storage.events_for_run_id(id))
job_id = events[0]['job_id']
job = g.storage.get_job(job_id)
return render_template('run_id.html', events=events, job=job)
@blueprint_http.route('/failures')
def failures():
failure_events = list(g.storage.failures())
names = {job['id']: job['name'] for job in g.storage.jobs()}
for event in failure_events:
event['job_name'] = names[event['job_id']]
return render_template('failures.html', events=failure_events)
@blueprint_http.route('/api/category/<string:name>', methods=['PUT', 'POST'])
def category(name):
data = request.json
logger.info("Retrieving jobs")
existing_jobs = g.storage.jobs_for_category(name=name)
logger.info("Retrieving category")
category_id = g.storage.category_id_for_name(name)
job_lookup = {}
logger.info("Validating jobs")
if not validate_unique_job_names(data.get('jobs', [])):
abort(400)
logger.info("Indexing existing jobs")
for job in existing_jobs:
job_lookup[job['name']] = job
logger.info("Processing posted jobs")
for job in data.get('jobs', []):
name = job['name']
logger.info("Calcuating next run for {0}".format(name))
next_run, duration = interval_parser.next_run_and_duration_from_8601(
job['schedule'])
existing_job = job_lookup.get(name, {})
new_id = existing_job.get('id')
new_interval = duration.total_seconds()
command = job['command']
logger.info("Adding job {0}".format(name))
g.storage.add_job(
name,
new_interval,
command,
next_run,
new_id,
category_id,
routing_key=job.get('routing_key')
)
if existing_job:
del job_lookup[name]
logger.info("Removing old jobs: {0}".format(job_lookup.keys()))
remove_jobs(g.storage, job_lookup.itervalues())
return '{"status": "success"}'
@blueprint_http.route('/api/categories', methods=['GET'])
def api_categories():
_id = query_id(request.args)
categories = g.storage.categories(_id=_id)
return Response(
json.dumps({
'data': {
'categories': list(categories)
},
}, default=json_serial),
mimetype='application/json',
)
@blueprint_http.route('/api/categories/<string:name>', methods=['GET'])
def api_category_show(name):
category = g.storage.categories_first(name)
return Response(
json.dumps({
'data': {
'category': {
'id': category.id,
'name': category.name,
},
},
}, default=json_serial),
mimetype='application/json',
)
@blueprint_http.route('/api/jobs', methods=['GET'])
def api_jobs():
per_page = query_per_page(request.args)
page = query_page(request.args)
sort = query_sort(request.args, allowed_fields=['id', 'name', 'category_id'])
category_id = query_category_id(request.args)
_id = query_id(request.args)
category_name = query_category_name(request.args)
if category_name:
category = g.storage.categories_first(category_name)
if category is None:
return Response(
json.dumps({
'data': {
'jobs': [],
},
}, default=json_serial),
mimetype='application/json'
)
category_id = category.id
jobs = g.storage.jobs(
_id=_id,
category_id=category_id,
page=page,
per_page=per_page,
sort=sort,
include_runs=True)
return Response(
json.dumps({
'data': {
'jobs': list(jobs),
},
}, default=json_serial),
mimetype='application/json'
)
@blueprint_http.route('/api/jobs/<int:id>', methods=['GET'])
def api_job_show(id):
jobs = list(g.storage.jobs(_id=id, per_page=1, include_runs=True))
if len(jobs) != 1:
return Response(
json.dumps({
'error': {
'message': 'Job not found for id {0}'.format(id),
},
}, default=json_serial),
mimetype='application/json'
)
return Response(
json.dumps({
'data': {
'job': job[0],
},
}, default=json_serial),
mimetype='application/json'
)
@blueprint_http.route('/api/jobs/<int:id>/run', methods=['POST'])
def api_job_run(id):
jobs = list(g.storage.jobs(_id=id, per_page=1))
if len(jobs) != 1:
return Response(
json.dumps({
'error': {
'message': 'Job not found for id {0}'.format(id),
},
}, default=json_serial),
mimetype='application/json'
)
g.storage.run_job_now(id)
return Response(
json.dumps({
'success': {
'message': 'Job submitted at {0}'.format(datetime.datetime.utcnow()),
},
}, default=json_serial),
mimetype='application/json'
)
def remove_jobs(storage, jobs):
for job in jobs:
g.storage.remove_job(job['id'])
def validate_unique_job_names(jobs):
job_names = [job['name'] for job in jobs]
return len(job_names) == len(set(job_names))
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import sys
from collections import defaultdict
from contextlib import contextmanager
from twitter.common.collections import OrderedSet
from pants.base.build_environment import get_buildroot, get_scm
from pants.base.worker_pool import SubprocPool
from pants.base.workunit import WorkUnitLabel
from pants.build_graph.target import Target
from pants.goal.products import Products
from pants.goal.workspace import ScmWorkspace
from pants.process.lock import OwnerPrintingInterProcessFileLock
from pants.reporting.report import Report
from pants.source.source_root import SourceRootConfig
class Context(object):
"""Contains the context for a single run of pants.
Task implementations can access configuration data from pants.ini and any flags they have exposed
here as well as information about the targets involved in the run.
Advanced uses of the context include adding new targets to it for upstream or downstream goals to
operate on and mapping of products a goal creates to the targets the products are associated with.
:API: public
"""
class Log(object):
"""A logger facade that logs into the pants reporting framework."""
def __init__(self, run_tracker):
self._run_tracker = run_tracker
def debug(self, *msg_elements):
self._run_tracker.log(Report.DEBUG, *msg_elements)
def info(self, *msg_elements):
self._run_tracker.log(Report.INFO, *msg_elements)
def warn(self, *msg_elements):
self._run_tracker.log(Report.WARN, *msg_elements)
def error(self, *msg_elements):
self._run_tracker.log(Report.ERROR, *msg_elements)
def fatal(self, *msg_elements):
self._run_tracker.log(Report.FATAL, *msg_elements)
# TODO: Figure out a more structured way to construct and use context than this big flat
# repository of attributes?
def __init__(self, options, run_tracker, target_roots,
requested_goals=None, target_base=None, build_graph=None,
build_file_parser=None, address_mapper=None, console_outstream=None, scm=None,
workspace=None, invalidation_report=None):
self._options = options
self.build_graph = build_graph
self.build_file_parser = build_file_parser
self.address_mapper = address_mapper
self.run_tracker = run_tracker
self._log = self.Log(run_tracker)
self._target_base = target_base or Target
self._products = Products()
self._buildroot = get_buildroot()
self._source_roots = SourceRootConfig.global_instance().get_source_roots()
self._lock = OwnerPrintingInterProcessFileLock(os.path.join(self._buildroot, '.pants.workdir.file_lock'))
self._java_sysprops = None # Computed lazily.
self.requested_goals = requested_goals or []
self._console_outstream = console_outstream or sys.stdout
self._scm = scm or get_scm()
self._workspace = workspace or (ScmWorkspace(self._scm) if self._scm else None)
self._replace_targets(target_roots)
self._invalidation_report = invalidation_report
@property
def options(self):
"""Returns the new-style options.
:API: public
"""
return self._options
@property
def log(self):
"""Returns the preferred logger for goals to use.
:API: public
"""
return self._log
@property
def products(self):
"""Returns the Products manager for the current run.
:API: public
"""
return self._products
@property
def source_roots(self):
"""Returns the :class:`pants.source.source_root.SourceRoots` instance for the current run.
:API: public
"""
return self._source_roots
@property
def target_roots(self):
"""Returns the targets specified on the command line.
This set is strictly a subset of all targets in play for the run as returned by self.targets().
Note that for a command line invocation that uses wildcard selectors : or ::, the targets
globbed by the wildcards are considered to be target roots.
:API: public
"""
return self._target_roots
@property
def console_outstream(self):
"""Returns the output stream to write console messages to.
:API: public
"""
return self._console_outstream
@property
def scm(self):
"""Returns the current workspace's scm, if any.
:API: public
"""
return self._scm
@property
def workspace(self):
"""Returns the current workspace, if any."""
return self._workspace
@property
def invalidation_report(self):
return self._invalidation_report
def __str__(self):
ident = Target.identify(self.targets())
return 'Context(id:{}, targets:{})'.format(ident, self.targets())
def submit_background_work_chain(self, work_chain, parent_workunit_name=None):
"""
:API: public
"""
background_root_workunit = self.run_tracker.get_background_root_workunit()
if parent_workunit_name:
# We have to keep this workunit alive until all its child work is done, so
# we manipulate the context manually instead of using it as a contextmanager.
# This is slightly funky, but the with-context usage is so pervasive and
# useful elsewhere that it's worth the funkiness in this one place.
workunit_parent_ctx = self.run_tracker.new_workunit_under_parent(
name=parent_workunit_name, labels=[WorkUnitLabel.MULTITOOL], parent=background_root_workunit)
workunit_parent = workunit_parent_ctx.__enter__()
done_hook = lambda: workunit_parent_ctx.__exit__(None, None, None)
else:
workunit_parent = background_root_workunit # Run directly under the root.
done_hook = None
self.run_tracker.background_worker_pool().submit_async_work_chain(
work_chain, workunit_parent=workunit_parent, done_hook=done_hook)
def background_worker_pool(self):
"""Returns the pool to which tasks can submit background work.
:API: public
"""
return self.run_tracker.background_worker_pool()
def subproc_map(self, f, items):
"""Map function `f` over `items` in subprocesses and return the result.
:API: public
:param f: A multiproc-friendly (importable) work function.
:param items: A iterable of pickleable arguments to f.
"""
try:
# Pool.map (and async_map().get() w/o timeout) can miss SIGINT.
# See: http://stackoverflow.com/a/1408476, http://bugs.python.org/issue8844
# Instead, we map_async(...), wait *with a timeout* until ready, then .get()
# NB: in 2.x, wait() with timeout wakes up often to check, burning CPU. Oh well.
res = SubprocPool.foreground().map_async(f, items)
while not res.ready():
res.wait(60) # Repeatedly wait for up to a minute.
if not res.ready():
self.log.debug('subproc_map result still not ready...')
return res.get()
except KeyboardInterrupt:
SubprocPool.shutdown(True)
raise
@contextmanager
def new_workunit(self, name, labels=None, cmd='', log_config=None):
"""Create a new workunit under the calling thread's current workunit.
:API: public
"""
with self.run_tracker.new_workunit(name=name, labels=labels, cmd=cmd, log_config=log_config) as workunit:
yield workunit
def acquire_lock(self):
""" Acquire the global lock for the root directory associated with this context. When
a goal requires serialization, it will call this to acquire the lock.
:API: public
"""
if self.options.for_global_scope().lock:
if not self._lock.acquired:
self._lock.acquire()
def release_lock(self):
"""Release the global lock if it's held.
Returns True if the lock was held before this call.
:API: public
"""
if not self._lock.acquired:
return False
else:
self._lock.release()
return True
def is_unlocked(self):
"""Whether the global lock object is actively holding the lock.
:API: public
"""
return not self._lock.acquired
def _replace_targets(self, target_roots):
# Replaces all targets in the context with the given roots and their transitive dependencies.
#
# If another task has already retrieved the current targets, mutable state may have been
# initialized somewhere, making it now unsafe to replace targets. Thus callers of this method
# must know what they're doing!
#
# TODO(John Sirois): This currently has only 1 use (outside ContextTest) in pantsbuild/pants and
# only 1 remaining known use case in the Foursquare codebase that will be able to go away with
# the post RoundEngine engine - kill the method at that time.
self._target_roots = list(target_roots)
def add_new_target(self, address, target_type, target_base=None, dependencies=None,
derived_from=None, **kwargs):
"""Creates a new target, adds it to the context and returns it.
This method ensures the target resolves files against the given target_base, creating the
directory if needed and registering a source root.
:API: public
"""
rel_target_base = target_base or address.spec_path
abs_target_base = os.path.join(get_buildroot(), rel_target_base)
if not os.path.exists(abs_target_base):
os.makedirs(abs_target_base)
# TODO: Adding source roots on the fly like this is yucky, but hopefully this
# method will go away entirely under the new engine. It's primarily used for injecting
# synthetic codegen targets, and that isn't how codegen will work in the future.
if not self.source_roots.find_by_path(rel_target_base):
# TODO: Set the lang and root category (source/test/thirdparty) based on the target type?
self.source_roots.add_source_root(rel_target_base)
if dependencies:
dependencies = [dep.address for dep in dependencies]
self.build_graph.inject_synthetic_target(address=address,
target_type=target_type,
dependencies=dependencies,
derived_from=derived_from,
**kwargs)
new_target = self.build_graph.get_target(address)
return new_target
def targets(self, predicate=None, **kwargs):
"""Selects targets in-play in this run from the target roots and their transitive dependencies.
Also includes any new synthetic targets created from the target roots or their transitive
dependencies during the course of the run.
See Target.closure_for_targets for remaining parameters.
:API: public
:param predicate: If specified, the predicate will be used to narrow the scope of targets
returned.
:param bool postorder: `True` to gather transitive dependencies with a postorder traversal;
`False` or preorder by default.
:returns: A list of matching targets.
"""
target_set = self._collect_targets(self.target_roots, **kwargs)
synthetics = OrderedSet()
for synthetic_address in self.build_graph.synthetic_addresses:
if self.build_graph.get_concrete_derived_from(synthetic_address) in target_set:
synthetics.add(self.build_graph.get_target(synthetic_address))
target_set.update(self._collect_targets(synthetics, **kwargs))
return filter(predicate, target_set)
def _collect_targets(self, root_targets, **kwargs):
return Target.closure_for_targets(
target_roots=root_targets,
**kwargs
)
def dependents(self, on_predicate=None, from_predicate=None):
"""Returns a map from targets that satisfy the from_predicate to targets they depend on that
satisfy the on_predicate.
:API: public
"""
core = set(self.targets(on_predicate))
dependees = defaultdict(set)
for target in self.targets(from_predicate):
for dependency in target.dependencies:
if dependency in core:
dependees[target].add(dependency)
return dependees
def resolve(self, spec):
"""Returns an iterator over the target(s) the given address points to.
:API: public
"""
return self.build_graph.resolve(spec)
def scan(self, root=None):
"""Scans and parses all BUILD files found under ``root``.
Only BUILD files found under ``root`` are parsed as roots in the graph, but any dependencies of
targets parsed in the root tree's BUILD files will be followed and this may lead to BUILD files
outside of ``root`` being parsed and included in the returned build graph.
:API: public
:param string root: The path to scan; by default, the build root.
:returns: A new build graph encapsulating the targets found.
"""
build_graph = self.build_graph.clone_new()
for address in self.address_mapper.scan_addresses(root):
build_graph.inject_address_closure(address)
return build_graph
|
|
import quex.blackboard as blackboard
import quex.input.regular_expression.core as regular_expression
import quex.input.files.code_fragment as code_fragment
import quex.input.files.indentation_setup as indentation_setup
import quex.input.files.consistency_check as consistency_check
import quex.input.regular_expression.snap_character_string as snap_character_string
from quex.input.regular_expression.construct import Pattern
from quex.blackboard import setup as Setup, \
E_SpecialPatterns
from quex.engine.generator.action_info import CodeFragment, UserCodeFragment, GeneratedCode, PatternActionInfo
from quex.engine.generator.languages.address import get_label
import quex.engine.generator.skipper.character_set as skip_character_set
import quex.engine.generator.skipper.range as skip_range
import quex.engine.generator.skipper.nested_range as skip_nested_range
import quex.engine.generator.state.indentation_counter as indentation_counter
from quex.engine.misc.file_in import EndOfStreamException, \
check, \
check_or_die, \
copy, \
error_msg, \
get_current_line_info_number, \
read_identifier, \
read_option_start, \
read_option_value, \
read_until_letter, \
read_until_whitespace, \
skip_whitespace, \
verify_word_in_list
from quex.engine.state_machine.core import StateMachine
import quex.engine.state_machine.check.identity as identity_checker
import quex.engine.state_machine.sequentialize as sequentialize
import quex.engine.state_machine.repeat as repeat
import quex.engine.state_machine.algorithm.beautifier as beautifier
import quex.engine.state_machine.algorithm.nfa_to_dfa as nfa_to_dfa
import quex.engine.state_machine.algorithm.hopcroft_minimization as hopcroft
from copy import deepcopy
# ModeDescription/Mode Objects:
#
# During parsing 'ModeDescription' objects are generated. Once parsing is over,
# the descriptions are translated into 'real' mode objects where code can be generated
# from. All matters of inheritance and pattern resolution are handled in the
# transition from description to real mode.
#-----------------------------------------------------------------------------------------
# mode_description_db: storing the mode information into a dictionary:
# key = mode name
# item = ModeDescription object
#-----------------------------------------------------------------------------------------
mode_description_db = {}
class OptionInfo:
"""This type is used only in context of a dictionary, the key
to the dictionary is the option's name."""
def __init__(self, Type, Domain=None, Default=-1):
# self.name = Option see comment above
self.type = Type
self.domain = Domain
self.default_value = Default
class ModeDescription:
def __init__(self, Name, Filename, LineN):
self.filename = Filename
self.line_n = LineN
self.name = Name
self.base_modes = []
# Read pattern information into dictionary object. This allows for the following:
# (i) inheritance of pattern behavior in different modes.
# (ii) 'virtual' patterns in the sense that their behavior can be
# overwritten.
self.__matches = {} # genuine patterns as specified in the mode declaration
self.__repriorization_db = {} # patterns of the base class to be reprioritized
# # map: pattern --> new pattern index
self.__deletion_db = {} # patterns of the base class to be deleted
# The list of actual pattern action pairs is constructed inside the function
# '__post_process(...)'. Function 'get_pattern_action_pairs(...) calls it
# in case that this variable is still [].
self.__pattern_action_pair_list = []
# (*) Default Options
self.options = {}
for name, descr in mode_option_info_db.items():
# Not only copy the reference, copy the default value object!
self.options[name] = deepcopy(descr.default_value)
# (*) Default Event Handler: Empty
self.events = {}
for name in event_handler_db.keys():
self.events[name] = CodeFragment()
# Register ModeDescription at the mode database
mode_description_db[Name] = self
def add_match(self, PatternStr, Action, ThePattern, Comment=""):
assert ThePattern.sm.is_DFA_compliant()
assert ThePattern.inverse_pre_context_sm is None \
or ThePattern.inverse_pre_context_sm.is_DFA_compliant()
if self.__matches.has_key(PatternStr):
error_msg("Pattern '%s' appeared twice in mode definition.\n" % PatternStr + \
"Only the last definition is considered.",
Action.filename, Action.line_n, DontExitF=True)
if len(ThePattern.sm.get_orphaned_state_index_list()) != 0 \
or ( ThePattern.inverse_pre_context_sm is not None \
and len(ThePattern.inverse_pre_context_sm.get_orphaned_state_index_list()) != 0):
error_msg("Pattern '%s' resulted in state machine with orphan states.\n" % PatternStr + \
"(After Transformation to internal encoding).\n" + \
"Please, submit a bug at quex.sourceforge.net.",
DontExitF=True, WarningF=True)
self.__matches[PatternStr] = PatternActionInfo(ThePattern, Action, PatternStr,
ModeName=self.name, Comment=Comment)
def add_match_priority(self, Pattern, ThePattern, PatternIdx, FileName, LineN):
if self.__matches.has_key(Pattern):
error_msg("Pattern '%s' appeared twice in mode definition.\n" % Pattern + \
"Only this priority mark is considered.", FileName, LineN)
self.__repriorization_db[Pattern] = [ThePattern, FileName, LineN, PatternIdx]
def add_match_deletion(self, Pattern, ThePattern, FileName, LineN):
if self.__matches.has_key(Pattern):
error_msg("Deletion of '%s' which appeared before in same mode.\n" % Pattern + \
"Deletion of pattern.", FileName, LineN)
self.__deletion_db[Pattern] = [ThePattern, FileName, LineN]
def add_option(self, Option, Value):
""" SANITY CHECK:
-- which options are concatinated to a list
-- which ones are replaced
-- what are the values of the options
"""
assert mode_option_info_db.has_key(Option)
option_info = mode_option_info_db[Option]
if option_info.type == "list":
self.options.setdefault(Option, []).append(Value)
else:
if option_info.domain is not None: assert Value in option_info.domain
self.options[Option] = Value
def get_pattern_action_pair(self, PatternStr):
return self.__matches[PatternStr]
def get_match_list(self):
return self.__matches.values()
def get_repriorization_db(self):
return self.__repriorization_db
def get_deletion_db(self):
return self.__deletion_db
def has_event_handler(self):
for fragment in self.events.values():
if fragment.get_code() != "": return True
return False
def has_pattern(self, PatternStr):
return self.__matches.has_key(PatternStr)
def has_own_matches(self):
return len(self.__matches) != 0
def has_matches(self):
if self.__matches != {}: return True
for name in self.base_modes:
if mode_description_db[name].has_matches(): return True
return False
class Mode:
def __init__(self, Other):
"""Translate a ModeDescription into a real Mode. Here is the place were
all rules of inheritance mechanisms and pattern precedence are applied.
"""
assert isinstance(Other, ModeDescription)
self.name = Other.name
self.filename = Other.filename
self.line_n = Other.line_n
self.options = Other.options
self.__base_mode_sequence = []
self.__determine_base_mode_sequence(Other, [])
# (1) Collect Event Handlers
self.__event_handler_code_fragment_list = {}
self.__collect_event_handler()
# (2) Collect Pattern/Action Pairs
self.__history_repriorization = []
self.__history_deletion = []
self.__pattern_action_pair_list = self.__collect_pattern_action_pairs()
# (3) Collection Options
self.__collect_options()
def insert_code_fragment_at_front(self, EventName, TheCodeFragment):
assert isinstance(TheCodeFragment, CodeFragment)
assert EventName == "on_end_of_stream"
self.__event_handler_code_fragment_list[EventName].insert(0, TheCodeFragment)
def set_code_fragment_list(self, EventName, TheCodeFragment):
assert isinstance(TheCodeFragment, CodeFragment)
assert EventName in ["on_end_of_stream", "on_failure"]
assert len(self.__event_handler_code_fragment_list[EventName]) == 0
self.__event_handler_code_fragment_list[EventName] = [TheCodeFragment]
def has_base_mode(self):
return len(self.__base_mode_sequence) != 1
def has_code_fragment_list(self, EventName):
assert self.__event_handler_code_fragment_list.has_key(EventName)
return len(self.__event_handler_code_fragment_list[EventName]) != 0
def get_base_mode_sequence(self):
return self.__base_mode_sequence
def get_base_mode_name_list(self):
return map(lambda mode: mode.name, self.__base_mode_sequence)
def get_code_fragment_list(self, EventName):
assert self.__event_handler_code_fragment_list.has_key(EventName)
return self.__event_handler_code_fragment_list[EventName]
def get_pattern_action_pair_list(self):
return self.__pattern_action_pair_list
def get_indentation_counter_terminal_index(self):
"""Under some circumstances a terminal code need to jump to the indentation
counter directly. Thus, it must be known in what terminal it is actually
located.
RETURNS: None, if no indentation counter is involved.
> 0, terminal id of the terminal that contains the indentation
counter.
"""
for info in self.__pattern_action_pair_list:
action = info.action()
if action.__class__.__name__ != "GeneratedCode": continue
elif action.function != indentation_counter.do: continue
return info.pattern().sm.get_id()
return None
def get_documentation(self):
L = max(map(lambda mode: len(mode.name), self.__base_mode_sequence))
txt = "\nMODE: %s\n" % self.name
txt += "\n"
if len(self.__base_mode_sequence) != 1:
txt += " BASE MODE SEQUENCE:\n"
base_mode_name_list = map(lambda mode: mode.name, self.__base_mode_sequence[:-1])
base_mode_name_list.reverse()
for name in base_mode_name_list:
txt += " %s\n" % name
txt += "\n"
if len(self.__history_deletion) != 0:
txt += " DELETION ACTIONS:\n"
for entry in self.__history_deletion:
txt += " %s: %s%s (from mode %s)\n" % \
(entry[0], " " * (L - len(self.name)), entry[1], entry[2])
txt += "\n"
if len(self.__history_repriorization) != 0:
txt += " PRIORITY-MARK ACTIONS:\n"
self.__history_repriorization.sort(lambda x, y: cmp(x[4], y[4]))
for entry in self.__history_repriorization:
txt += " %s: %s%s (from mode %s) (%i) --> (%i)\n" % \
(entry[0], " " * (L - len(self.name)), entry[1], entry[2], entry[3], entry[4])
txt += "\n"
if len(self.__pattern_action_pair_list) != 0:
txt += " PATTERN-ACTION PAIRS:\n"
self.__pattern_action_pair_list.sort(lambda x, y:
cmp(x.pattern().sm.get_id(),
y.pattern().sm.get_id()))
for pattern_action_pair in self.__pattern_action_pair_list:
txt += " (%3i) %s: %s%s\n" % \
(pattern_action_pair.pattern().sm.get_id(),
pattern_action_pair.mode_name, " " * (L - len(self.name)),
pattern_action_pair.pattern_string())
txt += "\n"
return txt
def default_indentation_handler_sufficient(Mode):
"""If no user defined indentation handler is defined, then the
default token handler is sufficient.
"""
return not Mode.has_code_fragment_list("on_indentation_error") \
and not Mode.has_code_fragment_list("on_indentation_bad") \
and not Mode.has_code_fragment_list("on_indent") \
and not Mode.has_code_fragment_list("on_dedent") \
and not Mode.has_code_fragment_list("on_nodent")
def __determine_base_mode_sequence(self, ModeDescr, InheritancePath):
"""Determine the sequence of base modes. The type of sequencing determines
also the pattern precedence. The 'deep first' scheme is chosen here. For
example a mode hierarchie of
A
/ \
B C
/ \ / \
D E F G
results in a sequence: (A, B, D, E, C, F, G).reverse()
This means, that patterns and event handlers of 'E' have precedence over
'C' because they are the childs of a preceding base mode.
This function detects circular inheritance.
"""
if ModeDescr.name in InheritancePath:
msg = "mode '%s'\n" % InheritancePath[0]
for mode_name in InheritancePath[InheritancePath.index(ModeDescr.name) + 1:]:
msg += " inherits mode '%s'\n" % mode_name
msg += " inherits mode '%s'" % ModeDescr.name
error_msg("circular inheritance detected:\n" + msg, ModeDescr.filename, ModeDescr.line_n)
base_mode_name_list_reversed = deepcopy(ModeDescr.base_modes)
#base_mode_name_list_reversed.reverse()
for name in base_mode_name_list_reversed:
# -- does mode exist?
verify_word_in_list(name, mode_description_db.keys(),
"Mode '%s' inherits mode '%s' which does not exist." % (ModeDescr.name, name),
ModeDescr.filename, ModeDescr.line_n)
if name in map(lambda m: m.name, self.__base_mode_sequence): continue
# -- grab the mode description
mode_descr = mode_description_db[name]
self.__determine_base_mode_sequence(mode_descr, InheritancePath + [ModeDescr.name])
self.__base_mode_sequence.append(ModeDescr)
return self.__base_mode_sequence
def __collect_event_handler(self):
"""Collect event handlers from base mode and the current mode.
Event handlers of the most 'base' mode come first, then the
derived event handlers.
See '__determine_base_mode_sequence(...) for details about the line-up.
"""
for event_name in event_handler_db.keys():
self.__event_handler_code_fragment_list[event_name] = []
for mode_descr in self.__base_mode_sequence:
for event_name in event_handler_db.keys():
fragment = mode_descr.events[event_name]
if fragment is not None and fragment.get_code() != "":
self.__event_handler_code_fragment_list[event_name].append(fragment)
return
def __collect_pattern_action_pairs(self):
"""Collect patterns of all inherited modes. Patterns are like virtual functions
in C++ or other object oriented programming languages. Also, the patterns of the
uppest mode has the highest priority, i.e. comes first.
"""
def __ensure_pattern_indeces_follow_precedence(MatchList, RepriorizationDB, PrevMaxPatternIndex):
"""When a derived mode is defined before its base mode, then its pattern ids
(according to the time they were created) are lower than thos of the base
mode. This would imply that they have higher precedence, which is against
our matching rules. Here, pattern ids are adapted to be higher than a certain
minimum, and follow the same precedence sequence.
"""
# Patterns of a 'lower precedence mode' **must** have higher pattern ids
# that patterns of a 'higher precedence mode'. This is to ensure that
# base mode patterns precede derived mode patterns.
min_pattern_index = min(map(lambda match: match.pattern().sm.get_id(),
MatchList))
if min_pattern_index > PrevMaxPatternIndex:
return MatchList, RepriorizationDB
match_list = deepcopy(MatchList)
repriorization_db = deepcopy(RepriorizationDB)
# Determine the offset for each pattern
offset = PrevMaxPatternIndex + 1 - min_pattern_index
assert offset >= 1
# Assign new pattern ids starting from MinPatternID
for match in match_list:
current_pattern_id = match.pattern().sm.get_id()
match.pattern().sm.set_id(current_pattern_id + offset)
# The reprioritizations must also be adapted
## for key, info in repriorization_db.items():
## print "##reprio:", key, info[-1], info[-1] + offset
for info in repriorization_db.items():
info[-1] += offset
return match_list, repriorization_db
def __handle_deletion_and_repriorization(CurrentModeName, pattern_action_pair_list,
repriorization_db, deletion_db):
def __validate_marks(DB, DoneDB, CommentStr):
ok_f = True
for pattern, info in DB.items():
if DoneDB.has_key(pattern): continue
ok_f = False
error_msg("Pattern '%s' was marked %s but does not\n" % (pattern, CommentStr) + \
"exist in any base mode of mode '%s'." % self.name,
info[1], info[2], DontExitF=True, WarningF=False)
return ok_f
def __is_in_patterns(AllegedIdenticalSM, MyDB):
for pattern_str, info in MyDB.items():
pattern = info[0]
if identity_checker.do(AllegedIdenticalSM, pattern): return pattern_str
return ""
# DELETION / PRIORITY-MARK
deletion_done_db = {}
repriorization_done_db = {}
i = 0
size = len(pattern_action_pair_list)
while i < size:
match = pattern_action_pair_list[i]
pattern = match.pattern()
found_pattern = __is_in_patterns(pattern, deletion_db)
if found_pattern != "":
# Delete pattern from the list of pattern action pairs
del pattern_action_pair_list[i]
size -= 1
# Mark 'deletion applied'
deletion_done_db[found_pattern] = True
self.__history_deletion.append([CurrentModeName, match.pattern, match.mode_name])
continue
found_pattern = __is_in_patterns(pattern, repriorization_db)
if found_pattern != "":
# Adapt the pattern index, this automatically adapts the match precedence
old_pattern_id = pattern.sm.get_id()
new_pattern_id = repriorization_db[found_pattern][-1]
new_match = deepcopy(match)
new_match.pattern().sm.set_id(new_pattern_id)
pattern_action_pair_list[i] = new_match
# Mark 'repriorization applied'
repriorization_done_db[found_pattern] = True
self.__history_repriorization.append([CurrentModeName, match.pattern, match.mode_name,
old_pattern_id, new_pattern_id])
i += 1
# Ensure that all mentioned marks really had some effect.
if not __validate_marks(deletion_db, deletion_done_db, "for DELETION") \
or not __validate_marks(repriorization_db, repriorization_done_db, "with PRIORITY-MARK"):
error_msg("Abort.")
return
def __add_new_pattern_action_pair(pattern_action_pair_list, PatternActionPair):
# Shallow copy is enough! Later on, there might be actions that
# generate source code, and then the source code takes the place of
# the action. For this to work, inherited actions must be de-antangled.
pattern_action_pair_list.append(copy(PatternActionPair))
result = []
prev_max_pattern_index = -1
# Iterate from the base to the top (include this mode's pattern)
for mode_descr in self.__base_mode_sequence:
repriorization_db = {}
consider_pattern_action_pairs_f = mode_descr.has_own_matches()
if consider_pattern_action_pairs_f:
match_list, repriorization_db = \
__ensure_pattern_indeces_follow_precedence(mode_descr.get_match_list(),
mode_descr.get_repriorization_db(),
prev_max_pattern_index)
# Delete/Repriorize patterns from more basic modes
__handle_deletion_and_repriorization(mode_descr.name, result,
repriorization_db, mode_descr.get_deletion_db())
if consider_pattern_action_pairs_f:
# Add the new pattern action pairs
for pattern_action_pair in match_list:
__add_new_pattern_action_pair(result, pattern_action_pair)
# Determine the max pattern index at this level of inheritance
prev_max_pattern_index = max([prev_max_pattern_index] + \
map(lambda match: match.pattern().sm.get_id(),
match_list))
return result
def __collect_options(self):
for mode in self.__base_mode_sequence[:-1]:
for name, option_descr in mode_option_info_db.items():
if option_descr.type != "list": continue
# Need to decouple by means of 'deepcopy'
self.options.setdefault(name, []).extend(mode.options[name])
mode_option_info_db = {
# -- a mode can be inheritable or not or only inheritable. if a mode
# is only inheritable it is not printed on its on, only as a base
# mode for another mode. default is 'yes'
"inheritable": OptionInfo("single", ["no", "yes", "only"], Default="yes"),
# -- a mode can restrict the possible modes to exit to. this for the
# sake of clarity. if no exit is explicitly mentioned all modes are
# possible. if it is tried to transit to a mode which is not in
# the list of explicitly stated exits, an error occurs.
# entrys work respectively.
"exit": OptionInfo("list", Default=[]),
"entry": OptionInfo("list", Default=[]),
# -- a mode can restrict the exits and entrys explicitly mentioned
# then, a derived mode cannot add now exits or entrys
"restrict": OptionInfo("list", ["exit", "entry"], Default=[]),
# -- a mode can have 'skippers' that effectivels skip ranges that are out of interest.
"skip": OptionInfo("list", Default=[]), # "multiple: RE-character-set
"skip_range": OptionInfo("list", Default=[]), # "multiple: RE-character-string RE-character-string
"skip_nested_range": OptionInfo("list", Default=[]), # "multiple: RE-character-string RE-character-string
# -- indentation setup information
"indentation": OptionInfo("single", Default=None),
}
event_handler_db = {
"on_entry": "On entry of a mode.",
"on_exit": "On exit of a mode.",
"on_indent": "On opening indentation.",
"on_nodent": "On same indentation.",
"on_dedent": "On closing indentation'.",
"on_n_dedent": "On closing indentation'.",
"on_indentation_error": "Closing indentation on non-border.",
"on_indentation_bad": "On bad character in indentation.",
"on_indentation": "General Indentation Handler.",
"on_match": "On each match (before pattern action).",
"on_after_match": "On each match (after pattern action).",
"on_failure": "In case that no pattern matches.",
"on_skip_range_open": "On missing skip range delimiter.",
"on_end_of_stream": "On end of file/stream.",
}
def parse(fh):
"""This function parses a mode description and enters it into the
'mode_description_db'. Once all modes are parsed
they can be translated into 'real' modes and are located in
'blackboard.mode_db'.
"""
# NOTE: Catching of EOF happens in caller: parse_section(...)
skip_whitespace(fh)
mode_name = read_identifier(fh)
if mode_name == "":
error_msg("missing identifier at beginning of mode definition.", fh)
# NOTE: constructor does register this mode in the mode_db
new_mode = ModeDescription(mode_name, fh.name, get_current_line_info_number(fh))
# (*) inherited modes / options
skip_whitespace(fh)
dummy = fh.read(1)
if dummy not in [":", "{"]:
error_msg("missing ':' or '{' after mode '%s'" % mode_name, fh)
if dummy == ":":
__parse_option_list(new_mode, fh)
# (*) read in pattern-action pairs and events
while __parse_element(new_mode, fh):
pass
# (*) check for modes w/o pattern definitions
if not new_mode.has_event_handler() and not new_mode.has_own_matches():
if new_mode.options["inheritable"] != "only":
new_mode.options["inheritable"] = "only"
error_msg("Mode without pattern and event handlers needs to be 'inheritable only'.\n" + \
"<inheritable: only> has been added automatically.", fh, DontExitF=True)
def finalize():
"""After all modes have been defined, the mode descriptions can now
be translated into 'real' modes.
"""
global mode_description_db
# (*) Translate each mode description int a 'real' mode
for name, mode_descr in mode_description_db.iteritems():
blackboard.mode_db[name] = Mode(mode_descr)
# (*) perform consistency check
consistency_check.do(blackboard.mode_db)
def __parse_option_list(new_mode, fh):
position = fh.tell()
try:
# ':' => inherited modes/options follow
skip_whitespace(fh)
__parse_base_mode_list(fh, new_mode)
while __parse_option(fh, new_mode):
pass
except EndOfStreamException:
fh.seek(position)
error_msg("End of file reached while parsing options of mode '%s'." % new_mode.name, fh)
def __parse_base_mode_list(fh, new_mode):
new_mode.base_modes = []
trailing_comma_f = False
while 1 + 1 == 2:
if check(fh, "{"): fh.seek(-1, 1); break
elif check(fh, "<"): fh.seek(-1, 1); break
skip_whitespace(fh)
identifier = read_identifier(fh)
if identifier == "": break
new_mode.base_modes.append(identifier)
trailing_comma_f = False
if not check(fh, ","): break
trailing_comma_f = True
if trailing_comma_f:
error_msg("Trailing ',' after base mode '%s'." % new_mode.base_modes[-1], fh,
DontExitF=True, WarningF=True)
elif len(new_mode.base_modes) != 0:
# This check is a 'service' -- for those who follow the old convention
pos = fh.tell()
skip_whitespace(fh)
dummy_identifier = read_identifier(fh)
if dummy_identifier != "":
error_msg("Missing separating ',' between base modes '%s' and '%s'.\n" \
% (new_mode.base_modes[-1], dummy_identifier) + \
"(The comma separator is mandatory since quex 0.53.1)", fh)
fh.seek(pos)
def __parse_string(fh, Name):
pos = fh.tell()
if fh.read(1) != "\"":
pos = fh.tell()
msg = fh.read(5)
fh.seek(pos)
error_msg("%s can\n" % Name +
"only be a string and must start with a quote like \".\n" +
"Found '%s'" % msg, fh)
sequence = snap_character_string.get_character_code_sequence(fh)
end_pos = fh.tell()
fh.seek(pos)
msg = fh.read(end_pos - pos)
return msg, sequence
def __parse_option(fh, new_mode):
def get_pattern_object(SM):
if not SM.is_DFA_compliant(): result = nfa_to_dfa.do(SM)
else: result = SM
result = hopcroft.do(result, CreateNewStateMachineF=False)
return Pattern(result, AllowStateMachineTrafoF=True)
identifier = read_option_start(fh)
if identifier is None: return False
verify_word_in_list(identifier, mode_option_info_db.keys(),
"mode option", fh.name, get_current_line_info_number(fh))
if identifier == "skip":
# A skipper 'eats' characters at the beginning of a pattern that belong
# to a specified set of characters. A useful application is most probably
# the whitespace skipper '[ \t\n]'. The skipper definition allows quex to
# implement a very effective way to skip these regions.
pattern_str, trigger_set = regular_expression.parse_character_set(fh, PatternStringF=True)
skip_whitespace(fh)
if fh.read(1) != ">":
error_msg("missing closing '>' for mode option '%s'." % identifier, fh)
if trigger_set.is_empty():
error_msg("Empty trigger set for skipper." % identifier, fh)
# TriggerSet skipping is implemented the following way: As soon as one element of the
# trigger set appears, the state machine enters the 'trigger set skipper section'.
# Enter the skipper as if the opener pattern was a normal pattern and the 'skipper' is the action.
# NOTE: The correspondent CodeFragment for skipping is created in 'implement_skippers(...)'
pattern_sm = StateMachine()
pattern_sm.add_transition(pattern_sm.init_state_index, trigger_set, AcceptanceF=True)
# Skipper code is to be generated later
action = GeneratedCode(skip_character_set.do,
FileName = fh.name,
LineN = get_current_line_info_number(fh))
action.data["character_set"] = trigger_set
new_mode.add_match(pattern_str, action, get_pattern_object(pattern_sm),
Comment=E_SpecialPatterns.SKIP)
return True
elif identifier in ["skip_range", "skip_nested_range"]:
# A non-nesting skipper can contain a full fledged regular expression as opener,
# since it only effects the trigger. Not so the nested range skipper-see below.
# -- opener
skip_whitespace(fh)
if identifier == "skip_nested_range":
# Nested range state machines only accept 'strings' not state machines
opener_str, opener_sequence = __parse_string(fh, "Opener pattern for 'skip_nested_range'")
opener_sm = StateMachine.from_sequence(opener_sequence)
else:
opener_str, opener_pattern = regular_expression.parse(fh)
opener_sm = opener_pattern.sm
# For 'range skipping' the opener sequence is not needed, only the opener state
# machine is webbed into the pattern matching state machine.
opener_sequence = None
skip_whitespace(fh)
# -- closer
closer_str, closer_sequence = __parse_string(fh, "Closing pattern for 'skip_range' or 'skip_nested_range'")
skip_whitespace(fh)
if fh.read(1) != ">":
error_msg("missing closing '>' for mode option '%s'" % identifier, fh)
# Skipper code is to be generated later
generator_function, comment = {
"skip_range": (skip_range.do, E_SpecialPatterns.SKIP_RANGE),
"skip_nested_range": (skip_nested_range.do, E_SpecialPatterns.SKIP_NESTED_RANGE),
}[identifier]
action = GeneratedCode(generator_function,
FileName = fh.name,
LineN = get_current_line_info_number(fh))
action.data["opener_sequence"] = opener_sequence
action.data["closer_sequence"] = closer_sequence
action.data["mode_name"] = new_mode.name
new_mode.add_match(opener_str, action, get_pattern_object(opener_sm), Comment=comment)
return True
elif identifier == "indentation":
value = indentation_setup.do(fh)
# Enter 'Newline' and 'Suppressed Newline' as matches into the engine.
# Similar to skippers, the indentation count is then triggered by the newline.
# -- Suppressed Newline = Suppressor followed by Newline,
# then newline does not trigger indentation counting.
suppressed_newline_pattern_str = ""
if value.newline_suppressor_state_machine.get() is not None:
suppressed_newline_pattern_str = \
"(" + value.newline_suppressor_state_machine.pattern_string() + ")" \
+ "(" + value.newline_state_machine.pattern_string() + ")"
suppressed_newline_sm = \
sequentialize.do([value.newline_suppressor_state_machine.get(),
value.newline_state_machine.get()])
FileName = value.newline_suppressor_state_machine.file_name
LineN = value.newline_suppressor_state_machine.line_n
# Go back to start.
code = UserCodeFragment("goto %s;" % get_label("$start", U=True), FileName, LineN)
new_mode.add_match(suppressed_newline_pattern_str, code,
get_pattern_object(suppressed_newline_sm),
Comment=E_SpecialPatterns.SUPPRESSED_INDENTATION_NEWLINE)
# When there is an empty line, then there shall be no indentation count on it.
# Here comes the trick:
#
# Let newline
# be defined as: newline ([space]* newline])*
#
# This way empty lines are eating away before the indentation count is activated.
# -- 'space'
x0 = StateMachine()
x0.add_transition(x0.init_state_index, value.indentation_count_character_set(),
AcceptanceF=True)
# -- '[space]*'
x1 = repeat.do(x0)
# -- '[space]* newline'
x2 = sequentialize.do([x1, value.newline_state_machine.get()])
# -- '([space]* newline)*'
x3 = repeat.do(x2)
# -- 'newline ([space]* newline)*'
x4 = sequentialize.do([value.newline_state_machine.get(), x3])
# -- nfa to dfa; hopcroft optimization
sm = beautifier.do(x4)
FileName = value.newline_state_machine.file_name
LineN = value.newline_state_machine.line_n
action = GeneratedCode(indentation_counter.do, FileName, LineN)
action.data["indentation_setup"] = value
new_mode.add_match(value.newline_state_machine.pattern_string(), action,
get_pattern_object(sm),
Comment=E_SpecialPatterns.INDENTATION_NEWLINE)
# Announce the mode to which the setup belongs
value.set_containing_mode_name(new_mode.name)
else:
value = read_option_value(fh)
# The 'verify_word_in_list()' call must have ensured that the following holds
assert mode_option_info_db.has_key(identifier)
# Is the option of the appropriate value?
option_info = mode_option_info_db[identifier]
if option_info.domain is not None and value not in option_info.domain:
error_msg("Tried to set value '%s' for option '%s'. " % (value, identifier) + \
"Though, possible for this option are only: %s." % repr(option_info.domain)[1:-1], fh)
# Finally, set the option
new_mode.add_option(identifier, value)
return True
def __parse_element(new_mode, fh):
"""Returns: False, if a closing '}' has been found.
True, else.
"""
position = fh.tell()
try:
description = "Pattern or event handler name.\n" + \
"Missing closing '}' for end of mode"
skip_whitespace(fh)
# NOTE: Do not use 'read_word' since we need to continue directly after
# whitespace, if a regular expression is to be parsed.
position = fh.tell()
word = read_until_whitespace(fh)
if word == "}": return False
# -- check for 'on_entry', 'on_exit', ...
if __parse_event(new_mode, fh, word): return True
fh.seek(position)
description = "Start of mode element: regular expression"
pattern_str, pattern = regular_expression.parse(fh)
if new_mode.has_pattern(pattern_str):
previous = new_mode.get_pattern_action_pair(pattern_str)
error_msg("Pattern has been defined twice.", fh, DontExitF=True)
error_msg("First defined here.",
previous.action().filename, previous.action().line_n)
position = fh.tell()
description = "Start of mode element: code fragment for '%s'" % pattern_str
__parse_action(new_mode, fh, pattern_str, pattern)
except EndOfStreamException:
fh.seek(position)
error_msg("End of file reached while parsing %s." % description, fh)
return True
def __parse_action(new_mode, fh, pattern_str, pattern):
position = fh.tell()
try:
skip_whitespace(fh)
position = fh.tell()
code_obj = code_fragment.parse(fh, "regular expression", ErrorOnFailureF=False)
if code_obj is not None:
new_mode.add_match(pattern_str, code_obj, pattern)
return
fh.seek(position)
word = read_until_letter(fh, [";"])
if word == "PRIORITY-MARK":
# This mark 'lowers' the priority of a pattern to the priority of the current
# pattern index (important for inherited patterns, that have higher precedence).
# The parser already constructed a state machine for the pattern that is to
# be assigned a new priority. Since, this machine is not used, let us just
# use its id.
fh.seek(-1, 1)
check_or_die(fh, ";", ". Since quex version 0.33.5 this is required.")
new_mode.add_match_priority(pattern_str, pattern, pattern.sm.get_id(),
fh.name, get_current_line_info_number(fh))
elif word == "DELETION":
# This mark deletes any pattern that was inherited with the same 'name'
fh.seek(-1, 1)
check_or_die(fh, ";", ". Since quex version 0.33.5 this is required.")
new_mode.add_match_deletion(pattern_str, pattern, fh.name, get_current_line_info_number(fh))
else:
error_msg("Missing token '{', 'PRIORITY-MARK', 'DELETION', or '=>' after '%s'.\n" % pattern_str + \
"found: '%s'. Note, that since quex version 0.33.5 it is required to add a ';'\n" % word + \
"to the commands PRIORITY-MARK and DELETION.", fh)
except EndOfStreamException:
fh.seek(position)
error_msg("End of file reached while parsing action code for pattern.", fh)
def __parse_event(new_mode, fh, word):
pos = fh.tell()
# Allow '<<EOF>>' and '<<FAIL>>' out of respect for classical tools like 'lex'
if word == "<<EOF>>": word = "on_end_of_stream"
elif word == "<<FAIL>>": word = "on_failure"
elif word in blackboard.all_section_title_list:
error_msg("Pattern '%s' is a quex section title. Has the closing '}' of mode %s \n" % (word, new_mode.name) \
+ "been forgotten? Else use quotes, i.e. \"%s\"." % word, fh)
elif len(word) < 3 or word[:3] != "on_": return False
comment = "Unknown event handler '%s'. \n" % word + \
"Note, that any pattern starting with 'on_' is considered an event handler.\n" + \
"use double quotes to bracket patterns that start with 'on_'."
__general_validate(fh, new_mode, word, pos)
verify_word_in_list(word, event_handler_db.keys(), comment, fh)
__validate_required_token_policy_queue(word, fh, pos)
continue_f = True
if word == "on_end_of_stream":
# When a termination token is sent, no other token shall follow.
# => Enforce return from the analyzer! Do not allow CONTINUE!
continue_f = False
new_mode.events[word] = code_fragment.parse(fh, "%s::%s event handler" % (new_mode.name, word),
ContinueF=continue_f)
return True
def __general_validate(fh, Mode, Name, pos):
if Name == "on_indentation":
fh.seek(pos)
error_msg("Definition of 'on_indentation' is no longer supported since version 0.51.1.\n"
"Please, use 'on_indent' for the event of an opening indentation, 'on_dedent'\n"
"for closing indentation, and 'on_nodent' for no change in indentation.", fh)
def error_dedent_and_ndedent(code, A, B):
filename = "(unknown)"
line_n = "0"
if hasattr(code, "filename"): filename = code.filename
if hasattr(code, "line_n"): line_n = code.line_n
error_msg("Indentation event handler '%s' cannot be defined, because\n" % A,
fh, DontExitF=True, WarningF=False)
error_msg("the alternative '%s' has already been defined." % B,
filename, line_n)
if Name == "on_dedent" and Mode.events.has_key("on_n_dedent"):
fh.seek(pos)
code = Mode.events["on_n_dedent"]
if code.get_code() != "":
error_dedent_and_ndedent(code, "on_dedent", "on_n_dedent")
if Name == "on_n_dedent" and Mode.events.has_key("on_dedent"):
fh.seek(pos)
code = Mode.events["on_dedent"]
if code.get_code() != "":
error_dedent_and_ndedent(code, "on_n_dedent", "on_dedent")
def __validate_required_token_policy_queue(Name, fh, pos):
"""Some handlers are better only used with token policy 'queue'."""
if Name not in ["on_entry", "on_exit",
"on_indent", "on_n_dedent", "on_dedent", "on_nodent",
"on_indentation_bad", "on_indentation_error",
"on_indentation"]:
return
if Setup.token_policy == "queue":
return
if Setup.warning_disabled_no_token_queue_f:
return
fh.seek(pos)
error_msg("Using '%s' event handler, while the token queue is disabled.\n" % Name + \
"Use '--token-policy queue', so then tokens can be sent safer\n" + \
"from inside this event handler. Disable this warning by command\n"
"line option '--no-warning-on-no-token-queue'.", fh, DontExitF=True)
|
|
# PyVision License
#
# Copyright (c) 2006-2008 David S. Bolme
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cv
import os
import os.path
import subprocess
import sys
import random
import tempfile
import unittest
import pyvision as pv
from pyvision.analysis.face import EyesFile
from pyvision.analysis.FaceAnalysis.FaceDetectionTest import FaceDetectionTest
import pickle
#from pyvision.optimize.GeneticAlgorithm import GeneticAlgorithm,ChoiceVariable
import time
#from pyvision.analysis.Table import Table
class CascadeNotFound(Exception):
pass
class HaarTrainingError(Exception):
pass
DEFAULT_CASCADE=os.path.join(pv.__path__[0],"config","haarcascade_frontalface_alt.xml")
OPENCV_CASCADE=os.path.join(pv.__path__[0],"config","haarcascade_frontalface_alt.xml")
CELEB1_CASCADE=os.path.join(pv.__path__[0],"config","facedetector_celebdb1.xml")
CELEB2_CASCADE=os.path.join(pv.__path__[0],"config","facedetector_celebdb2.xml")
FULLBODY_CASCADE=os.path.join(pv.__path__[0],"config","haarcascade_fullbody.xml")
UPPERBODY_CASCADE=os.path.join(pv.__path__[0],"config","haarcascade_upperbody.xml")
LOWERBODY_CASCADE=os.path.join(pv.__path__[0],"config","haarcascade_lowerbody.xml")
DEFAULT_NEGATIVE=os.path.join(pv.__path__[0],"data","nonface")
# These are the average left and right eye locations relative to the face detection rectangle for the
# haarcascade_frontalface_alt cascade file. Estimated using the first 1000 images from FERET.
# To find the expected left eye location for a 64X64 detection rectangle: 64*AVE_LEFT_EYE
AVE_LEFT_EYE = pv.Point(0.300655,0.381525,0.000000)
AVE_RIGHT_EYE = pv.Point(0.708847,0.379736,0.000000)
class CascadeDetector:
''' This class is a wrapper around the OpenCV cascade detectior. '''
def __init__(self, cascade_name=DEFAULT_CASCADE,orig_size=None,min_size=(60,60), image_scale=1.3, haar_scale=1.2, min_neighbors=2, haar_flags=0):
''' Init the detector and create the cascade classifier '''
self.cascade_name = cascade_name
self.min_size = min_size
self.image_scale = image_scale
self.haar_scale = haar_scale
self.min_neighbors = min_neighbors
self.haar_flags = haar_flags
if cascade_name != None:
if not os.path.isfile(cascade_name):
raise CascadeNotFound("Could not find file: "+cascade_name)
# Save data for later pickling
if orig_size == None:
orig_size = (1,1)
else:
orig_size = (orig_size[0],orig_size[1])
self.cascade_data = open(cascade_name).read()
self.cascade = cv.Load( cascade_name )
self.storage = cv.CreateMemStorage(0)
self.trained = True
def __call__(self,im):
''' This function is the same as detect. '''
return self.detect(im)
def __getstate__(self):
''' Function required to save and load the state from pickel. '''
state = {}
for key,value in self.__dict__.iteritems():
if key in ['cascade','storage']:
continue
state[key] = value
return state
def __setstate__(self,state):
''' Function required to save and load the state from pickel. '''
# Modeled after SVM pickling
for key,value in state.iteritems():
self.__dict__[key] = value
filename = tempfile.mktemp()
open(filename,'w').write(self.cascade_data)
self.cascade = cv.Load( filename )
self.storage = cv.CreateMemStorage(0)
os.remove(filename)
def _resizeImage(self, image, scale=None, size=None):
''' Resize an image by a scale or a size. Internal use only.'''
if scale != None and type(scale) in (int,float):
size = (int(image.width*scale),int(image.height*scale))
elif size != None and type(size) in [list,tuple]:
size = (int(size[0]),int(size[1]))
else:
pass
depth = image.depth
channels = image.nChannels
resized = cv.CreateImage( (size[0],size[1]), depth, channels )
cv.Resize( image, resized, cv.CV_INTER_LINEAR )
return resized
def detect(self, im):
''' Runs the cascade classifer on an image. '''
image = im.asOpenCV()
min_size = (self.min_size[0],self.min_size[1])
# Create a resized gray scale image
if image.nChannels == 3:
gray = cv.CreateImage( (image.width,image.height), image.depth, 1 )
cv.CvtColor( image, gray, cv.CV_BGR2GRAY );
image = gray
image = self._resizeImage(image,self.image_scale)
# Equalize the image
cv.EqualizeHist( image, image )
# Detect faces
faces = cv.HaarDetectObjects( image, self.cascade, self.storage,
self.haar_scale, self.min_neighbors, self.haar_flags, min_size );
# Transform and return the points
result = []
for r in faces:
rect = pv.Rect(r[0][0]/self.image_scale, r[0][1]/self.image_scale, r[0][2]/self.image_scale, r[0][3]/self.image_scale)
result.append(rect)
return result
def trainHaarClassifier(pos_rects,
neg_images,
tile_size=(20,20),
nneg=2000,
nstages=20,
mem = 1500,
maxtreesplits = 0,
mode='BASIC',
minhitrate=0.9990,
maxfalsealarm=0.50,
max_run_time=72*3600,
verbose=False,
createsamples='/usr/local/bin/opencv-createsamples',
haartraining='/usr/local/bin/opencv-haartraining',
):
'''
Train the detector.
'''
# Create a directory for training.
training_dir = tempfile.mktemp()
os.makedirs(training_dir, 0700)
random_name = "haar_"
for i in range(8):
random_name += random.choice('abcdefghijklmnopqrstuvwxyz')
cascade_name = random_name+"_cascade"
pos_name = random_name+"_pos.txt"
pos_vec_name = random_name+"_pos.vec"
neg_name = random_name+"_neg.txt"
# Add positives to the positives file.
pos_filename = os.path.join(training_dir,pos_name)
pos_file = open(pos_filename,'w')
num_pos = 0
for im_name,rects in pos_rects:
num_pos += len(rects)
if len(rects) > 0: pos_file.write("%s %d "%(im_name,len(rects)))
for rect in rects:
pos_file.write("%d %d %d %d "%(rect.x,rect.y,rect.w,rect.h))
num_pos += 1
pos_file.write("\n")
pos_file.close()
# Add negatives to the negitives file.
neg_filename = os.path.join(training_dir,neg_name)
neg_file = open(neg_filename,'w')
for im_name in neg_images:
neg_file.write("%s\n"%im_name)
neg_file.close()
# Create positives vec.
proc = subprocess.Popen(
(createsamples,
'-info',pos_name,
'-vec',pos_vec_name,
'-num',str(num_pos),
'-w',str(tile_size[0]),
'-h',str(tile_size[1]),
),
cwd=training_dir
,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.STDOUT
)
proc.wait()
if verbose:
print proc.stdout.read()
# Run haar training
success = False
start_time = time.time()
if verbose:
proc = subprocess.Popen(
(haartraining,
'-data',cascade_name,
'-vec',pos_vec_name,
'-bg',neg_name,
'-nstages',str(nstages),
'-mode','ALL',
'-npos',str(num_pos),
'-nneg',str(nneg),
'-mem',str(mem),
'-w',str(tile_size[0]),
'-h',str(tile_size[1]),
'-maxtreesplits',str(maxtreesplits),
'-minhitrate',str(minhitrate),
'-maxfalsealarm',str(maxfalsealarm),
'-mode',mode,
),
cwd=training_dir
#,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.STDOUT
)
else:
proc = subprocess.Popen(
(haartraining,
'-data',cascade_name,
'-vec',pos_vec_name,
'-bg',neg_name,
'-nstages',str(nstages),
'-mode','ALL',
'-npos',str(num_pos),
'-nneg',str(nneg),
'-mem',str(mem),
'-w',str(tile_size[0]),
'-h',str(tile_size[1]),
'-maxtreesplits',str(maxtreesplits),
'-minhitrate',str(minhitrate),
'-maxfalsealarm',str(maxfalsealarm),
'-mode',mode,
),
cwd=training_dir
,stdin=subprocess.PIPE,stdout=subprocess.PIPE,stderr=subprocess.STDOUT
)
while True:
if proc.poll() != None:
break
if time.time() - start_time > max_run_time:
print "Haar Training time exceeded. Killing process..."
os.kill(proc.pid,6) #6 = abort, 3=quit, 9=kill
proc.wait()
break
#out = proc.stdout.read()
#if verbose:
# print out
time.sleep(1)
if proc.returncode == 0:
if verbose: print "Cascade successful."
success = True
else:
print "Problem with return code:",proc.returncode
levels = os.listdir(os.path.join(training_dir,cascade_name))
nlevels = len(levels)
# Load the detector if training was successful.
detector = None
if success:
detector = CascadeDetector(os.path.join(training_dir,cascade_name+'.xml'))
else:
levels = os.listdir(os.path.join(training_dir,cascade_name))
nlevels = len(levels)
if nlevels > 0:
print "Loading partail cascade..."
cascade = cvLoadHaarClassifierCascade( os.path.join(training_dir,cascade_name), cvSize(tile_size[0],tile_size[1]))
cvSave(os.path.join(training_dir,cascade_name+'.xml'),cascade)
detector = CascadeDetector(os.path.join(training_dir,cascade_name+'.xml'),orig_size=tile_size)
else:
print "Cascade Failure. Could not create classifier."
# Clean up the temporary files
os.system("rm -rf %s"%training_dir)
time.sleep(5)
return detector
SCRAPS_FACE_DATA = os.path.join(pv.__path__[0],"data","csuScrapShots")
NONFACE_DATA = os.path.join(pv.__path__[0],"data","NonFace")
BAD_CASCADE=os.path.join(pv.__path__[0],"config","not_there.xml")
class _TestCascadeDetector(unittest.TestCase):
''' Unit tests for the Cascade Detector '''
def test_detect_bad_file(self):
'''
If the cascade file does not exist, opencv can crash without warning.
This makes sure a test is run to make sure the cascade is there.
'''
self.assertRaises(CascadeNotFound,CascadeDetector,BAD_CASCADE)
def test_face_detection_pickle(self):
fd = CascadeDetector(OPENCV_CASCADE)
fdt = FaceDetectionTest(name='scraps')
buffer = pickle.dumps(fd)
fd = pickle.loads(buffer)
self.eyes = EyesFile(os.path.join(SCRAPS_FACE_DATA,"coords.txt"))
for filename in self.eyes.files():
img = pv.Image(os.path.join(SCRAPS_FACE_DATA, filename + ".pgm"))
rects = fd(img)
truth = self.eyes.getFaces(img.filename)
fdt.addSample(truth,rects,im=img)
self.assertAlmostEqual( fdt.pos_rate , 0.98265895953757221, places = 2 ) # TODO: Version 2 performance is better
def test_detect_scraps_opencv(self):
fd = CascadeDetector(OPENCV_CASCADE)
fdt = FaceDetectionTest(name='scraps')
self.eyes = EyesFile(os.path.join(SCRAPS_FACE_DATA,"coords.txt"))
for filename in self.eyes.files():
img = pv.Image(os.path.join(SCRAPS_FACE_DATA, filename + ".pgm"))
rects = fd(img)
truth = self.eyes.getFaces(img.filename)
fdt.addSample(truth,rects,im=img)
self.assertAlmostEqual( fdt.pos_rate , 0.98265895953757221, places = 2 ) # TODO: Version 2 performance is better
def test_detect_scraps_celeb1(self):
fd = CascadeDetector(CELEB1_CASCADE)
fdt = FaceDetectionTest(name='scraps')
self.eyes = EyesFile(os.path.join(SCRAPS_FACE_DATA,"coords.txt"))
for filename in self.eyes.files():
img = pv.Image(os.path.join(SCRAPS_FACE_DATA, filename + ".pgm"))
rects = fd(img)
truth = self.eyes.getFaces(img.filename)
fdt.addSample(truth,rects,im=img)
self.assertAlmostEqual( fdt.pos_rate , 0.76878612716763006, places = 2 ) # TODO: Version 2 performance is better
def test_detect_scraps_celeb2(self):
fd = CascadeDetector(CELEB2_CASCADE)
fdt = FaceDetectionTest(name='scraps')
self.eyes = EyesFile(os.path.join(SCRAPS_FACE_DATA,"coords.txt"))
for filename in self.eyes.files():
img = pv.Image(os.path.join(SCRAPS_FACE_DATA, filename + ".pgm"))
rects = fd(img)
truth = self.eyes.getFaces(img.filename)
fdt.addSample(truth,rects,im=img)
self.assertAlmostEqual( fdt.pos_rate , 0.925, places = 2 )
def donttest_detector_train(self): # TODO: Cascade training fails for Version OpenCV 2.0
positives = []
self.eyes = EyesFile(os.path.join(SCRAPS_FACE_DATA,"coords.txt"))
n = len(self.eyes.files())
for filename in self.eyes.files()[:n/2]:
img = pv.Image(os.path.join(SCRAPS_FACE_DATA, filename + ".pgm"))
faces = self.eyes.getFaces(img.filename)
positives.append([os.path.join(SCRAPS_FACE_DATA,img.filename),faces])
neg_files = []
for im_name in os.listdir(NONFACE_DATA):
if im_name[-4:] != ".jpg": continue
neg_files.append(os.path.join(NONFACE_DATA,im_name))
fd = trainHaarClassifier(positives,neg_files,nstages=6,maxtreesplits=0,max_run_time=300)
fdt = FaceDetectionTest(name='scraps')
self.eyes = EyesFile(os.path.join(SCRAPS_FACE_DATA,"coords.txt"))
for filename in self.eyes.files():
img = pv.Image(os.path.join(SCRAPS_FACE_DATA, filename + ".pgm"))
rects = fd(img)
truth = self.eyes.getFaces(img.filename)
fdt.addSample(truth,rects,im=img)
self.assertAlmostEqual( fdt.pos_rate , 0.9942196531791907 )
|
|
# -*- coding: utf-8 -*-
"""
eve.utils
~~~~~~~~~
Utility functions and classes.
:copyright: (c) 2015 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
import sys
import eve
import hashlib
import werkzeug.exceptions
from cerberus import Validator
from copy import copy
from flask import request
from flask import current_app as app
from datetime import datetime, timedelta
from bson.json_util import dumps
from eve import RFC1123_DATE_FORMAT
class Config(object):
""" Helper class used through the code to access configuration settings.
If the main flaskapp object is not instantiated yet, returns the default
setting in the eve __init__.py module, otherwise returns the flaskapp
config value (which value might override the static defaults).
"""
def __getattr__(self, name):
try:
# will return 'working outside of application context' if the
# current_app is not available yet
return app.config.get(name)
except:
# fallback to the module-level default value
return getattr(eve, name)
# makes an instance of the Config helper class available to all the modules
# importing eve.utils.
config = Config()
class ParsedRequest(object):
""" This class, by means of its attributes, describes a client request.
.. versuinchanged;; 9,5
'args' keyword.
.. versonchanged:: 0.1.0
'embedded' keyword.
.. versionchanged:: 0.0.6
Projection queries ('?projection={"name": 1}')
"""
# `where` value of the query string (?where). Defaults to None.
where = None
# `projection` value of the query string (?projection). Defaults to None.
projection = None
# `sort` value of the query string (?sort). Defaults to None.
sort = None
# `page` value of the query string (?page). Defaults to 1.
page = 1
# `max_result` value of the query string (?max_results). Defaults to
# `PAGINATION_DEFAULT` unless pagination is disabled.
max_results = 0
# `If-Modified-Since` request header value. Defaults to None.
if_modified_since = None
# `If-None_match` request header value. Defaults to None.
if_none_match = None
# `If-Match` request header value. Default to None.
if_match = None
# `embedded` value of the query string (?embedded). Defaults to None.
embedded = None
# `show_deleted` True when the SHOW_DELETED_PARAM is included in query.
# Only relevant when soft delete is enabled. Defaults to False.
show_deleted = False
# `args` value of the original request. Defaults to None.
args = None
def parse_request(resource):
""" Parses a client request, returning instance of :class:`ParsedRequest`
containing relevant request data.
:param resource: the resource currently being accessed by the client.
.. versionchanged:: 0.5
Support for custom query parameters via configuration settings.
Minor DRY updates.
.. versionchagend:: 0.1.0
Support for embedded documents.
.. versionchanged:: 0.0.6
projection queries ('?projection={"name": 1}')
.. versionchanged: 0.0.5
Support for optional filters, sorting and pagination.
"""
args = request.args
headers = request.headers
r = ParsedRequest()
r.args = args
settings = config.DOMAIN[resource]
if settings['allowed_filters']:
r.where = args.get(config.QUERY_WHERE)
if settings['projection']:
r.projection = args.get(config.QUERY_PROJECTION)
if settings['sorting']:
r.sort = args.get(config.QUERY_SORT)
if settings['embedding']:
r.embedded = args.get(config.QUERY_EMBEDDED)
r.show_deleted = config.SHOW_DELETED_PARAM in args
max_results_default = config.PAGINATION_DEFAULT if \
settings['pagination'] else 0
try:
r.max_results = int(float(args[config.QUERY_MAX_RESULTS]))
assert r.max_results > 0
except (ValueError, werkzeug.exceptions.BadRequestKeyError,
AssertionError):
r.max_results = max_results_default
if settings['pagination']:
# TODO should probably return a 400 if 'page' is < 1 or non-numeric
if config.QUERY_PAGE in args:
try:
r.page = abs(int(args.get(config.QUERY_PAGE))) or 1
except ValueError:
pass
# TODO should probably return a 400 if 'max_results' < 1 or
# non-numeric
if r.max_results > config.PAGINATION_LIMIT:
r.max_results = config.PAGINATION_LIMIT
if headers:
r.if_modified_since = weak_date(headers.get('If-Modified-Since'))
# TODO if_none_match and if_match should probably be validated as
# valid etags, returning 400 on fail. Not sure however since
# we're just going to use these for string-type comparision
r.if_none_match = headers.get('If-None-Match')
r.if_match = headers.get('If-Match')
return r
def weak_date(date):
""" Returns a RFC-1123 string corresponding to a datetime value plus
a 1 second timedelta. This is needed because when saved, documents
LAST_UPDATED values have higher resolution than If-Modified-Since's, which
is limited to seconds.
:param date: the date to be adjusted.
"""
return datetime.strptime(date, RFC1123_DATE_FORMAT) + \
timedelta(seconds=1) if date else None
def str_to_date(string):
""" Converts a date string formatted as defined in the configuration
to the corresponding datetime value.
:param string: the RFC-1123 string to convert to datetime value.
"""
return datetime.strptime(string, config.DATE_FORMAT) if string else None
def date_to_str(date):
""" Converts a datetime value to the format defined in the configuration file.
:param date: the datetime value to convert.
"""
return datetime.strftime(date, config.DATE_FORMAT) if date else None
def date_to_rfc1123(date):
""" Converts a datetime value to the corresponding RFC-1123 string.
:param date: the datetime value to convert.
"""
return datetime.strftime(date, RFC1123_DATE_FORMAT) if date else None
def home_link():
""" Returns a link to the API entry point/home page.
.. versionchanged:: 0.5
Link is relative to API root.
.. versionchanged:: 0.0.3
Now returning a JSON link.
"""
return {'title': 'home', 'href': '/'}
def api_prefix(url_prefix=None, api_version=None):
""" Returns the prefix to API endpoints, according to the URL_PREFIX and
API_VERSION configuration settings.
:param url_prefix: the prefix string. If `None`, defaults to the current
:class:`~eve.flaskapp` configuration setting.
The class itself will call this function while
initializing. In that case, it will pass its settings
as arguments (as they are not externally available yet)
:param api_version: the api version string. If `None`, defaults to the
current :class:`~eve.flaskapp` configuration setting.
The class itself will call this function while
initializing. In that case, it will pass its settings
as arguments (as they are not externally available yet)
.. versionadded:: 0.0.3
"""
if url_prefix is None:
url_prefix = config.URL_PREFIX
if api_version is None:
api_version = config.API_VERSION
prefix = '/%s' % url_prefix if url_prefix else ''
version = '/%s' % api_version if api_version else ''
return prefix + version
def querydef(max_results=config.PAGINATION_DEFAULT, where=None, sort=None,
version=None, page=None):
""" Returns a valid query string.
:param max_results: `max_result` part of the query string. Defaults to
`PAGINATION_DEFAULT`
:param where: `where` part of the query string. Defaults to None.
:param sort: `sort` part of the query string. Defaults to None.
:param page: `version` part of the query string. Defaults to None.
:param page: `page` part of the query string. Defaults to None.
.. versionchanged:: 0.5
Support for customizable query parameters.
Add version to query string (#475).
"""
where_part = '&%s=%s' % (config.QUERY_WHERE, where) if where else ''
sort_part = '&%s=%s' % (config.QUERY_SORT, sort) if sort else ''
page_part = '&%s=%s' % (config.QUERY_PAGE, page) if page and page > 1 \
else ''
version_part = '&%s=%s' % (config.VERSION_PARAM, version) if version \
else ''
max_results_part = '%s=%s' % (config.QUERY_MAX_RESULTS, max_results) \
if max_results != config.PAGINATION_DEFAULT else ''
# remove sort set by Eve if version is set
if version and sort is not None:
sort_part = '&%s=%s' % (config.QUERY_SORT, sort) \
if sort != '[("%s", 1)]' % config.VERSION else ''
return ('?' + ''.join([max_results_part, where_part, sort_part,
version_part, page_part]).lstrip('&')).rstrip('?')
def document_etag(value, ignore_fields=None):
""" Computes and returns a valid ETag for the input value.
:param value: the value to compute the ETag with.
:param ignore_fields: `ignore_fields` list of fields to skip to
compute the ETag value.
.. versionchanged:: 0.5.4
Use json_encoder_class. See #624.
.. versionchanged:: 0.0.4
Using bson.json_util.dumps over str(value) to make etag computation
consistent between different runs and/or server instances (#16).
"""
if ignore_fields:
def filter_ignore_fields(d, fields):
# recursive function to remove the fields that they are in d,
# field is a list of fields to skip or dotted fields to look up
# to nested keys such as ["foo", "dict.bar", "dict.joe"]
for field in fields:
key, _, value = field.partition(".")
if value:
filter_ignore_fields(d[key], [value])
elif field in d:
d.pop(field)
else:
# not required fields can be not present
pass
value_ = copy(value)
filter_ignore_fields(value_, ignore_fields)
else:
value_ = value
h = hashlib.sha1()
json_encoder = app.data.json_encoder_class()
h.update(dumps(value_, sort_keys=True,
default=json_encoder.default).encode('utf-8'))
return h.hexdigest()
def extract_key_values(key, d):
""" Extracts all values that match a key, even in nested dicts.
:param key: the lookup key.
:param d: the dict to scan.
.. versionadded: 0.0.7
"""
if key in d:
yield d[key]
for k in d:
if isinstance(d[k], dict):
for j in extract_key_values(key, d[k]):
yield j
def debug_error_message(msg):
""" Returns the error message `msg` if config.DEBUG is True
otherwise returns `None` which will cause Werkzeug to provide
a generic error message
:param msg: The error message to return if config.DEBUG is True
.. versionadded: 0.0.9
"""
if getattr(config, 'DEBUG', False):
return msg
return None
def validate_filters(where, resource):
""" Report any filter which is not allowed by `allowed_filters`
:param where: the where clause, as a dict.
:param resource: the resource being inspected.
.. versionchanged: 0.5
If the data layer supports a list of allowed operators, take them
into consideration when validating the query string (#388).
Recursively validate the whole query string.
.. versionadded: 0.0.9
"""
operators = getattr(app.data, 'operators', set())
allowed = config.DOMAIN[resource]['allowed_filters'] + list(operators)
def validate_filter(filter):
for key, value in filter.items():
if '*' not in allowed and key not in allowed:
return "filter on '%s' not allowed" % key
if key in ('$or', '$and', '$nor'):
if not isinstance(value, list):
return "operator '%s' expects a list of sub-queries" % key
for v in value:
if not isinstance(v, dict):
return "operator '%s' expects a list of sub-queries" \
% key
r = validate_filter(v)
if r:
return r
else:
if config.VALIDATE_FILTERS:
res_schema = config.DOMAIN[resource]['schema']
if key not in res_schema:
return "filter on '%s' is invalid"
else:
field_schema = res_schema.get(key)
v = Validator({key: field_schema})
if not v.validate({key: value}):
return "filter on '%s' is invalid"
else:
return None
if '*' in allowed and not config.VALIDATE_FILTERS:
return None
return validate_filter(where)
def auto_fields(resource):
""" Returns a list of automatically handled fields for a resource.
:param resource: the resource currently being accessed by the client.
.. versionchanged: 0.5
ETAG is now a preserved meta data (#369).
.. versionadded:: 0.4
"""
resource_def = config.DOMAIN[resource]
# preserved meta data
fields = [resource_def['id_field'], config.LAST_UPDATED,
config.DATE_CREATED, config.ETAG]
# on-the-fly meta data (not in data store)
fields += [config.ISSUES, config.STATUS, config.LINKS]
if resource_def['versioning'] is True:
fields.append(config.VERSION)
fields.append(config.LATEST_VERSION) # on-the-fly meta data
fields.append(resource_def['id_field'] + config.VERSION_ID_SUFFIX)
if resource_def['soft_delete'] is True:
fields.append(config.DELETED)
return fields
# Base string type that is compatible with both Python 2.x and 3.x.
str_type = str if sys.version_info[0] == 3 else basestring
|
|
import unittest
import os
import sys
sys.path.append(os.path.realpath(os.path.dirname(__file__) + "/.."))
from courier import widgets, templates
from courier.app import serialize_
class ButtonWidgetTests(unittest.TestCase):
def test_isdict(self):
btn = widgets.Button (None, None)
self.assertTrue(type(btn.to_json()) is dict)
def test_format(self):
btn = widgets.Button("test_type", "test_title")
self.assertEquals(btn.to_json(), {
"type":"test_type",
"title":"test_title"
})
def test_optional_format(self):
btn = widgets.Button("test_type")
self.assertEquals(btn.to_json(), {
"type":"test_type"
})
def test_wrong_format(self):
btn = widgets.Button("test_type", "test_title")
self.assertNotEqual(btn.to_json(), {
"type": "test",
"title": "test"
})
class ShareButtonWidgetTests(unittest.TestCase):
def test_isdict(self):
btn = widgets.ShareButton()
self.assertTrue(type(btn.to_json()) is dict)
def test_format(self):
btn = widgets.ShareButton()
self.assertEquals(btn.to_json(), {
"type":"element_share"
})
def test_wrong_format(self):
btn = widgets.ShareButton()
self.assertNotEqual(btn.to_json(), {
"type": "element_share",
"title": "test"
})
class UnlinkAccountWidgetTests(unittest.TestCase):
def test_isdict(self):
btn = widgets.UnlinkAccount()
self.assertTrue(type(btn.to_json()) is dict)
def test_format(self):
btn = widgets.UnlinkAccount()
self.assertEquals(btn.to_json(), {
"type":"account_unlink"
})
def test_wrong_format(self):
btn = widgets.UnlinkAccount()
self.assertNotEqual(btn.to_json(), {
"type": "account_unlink",
"title": "test"
})
class LoginButtonWidgetTests(unittest.TestCase):
def test_isdict(self):
btn = widgets.LoginButton(None)
self.assertTrue(type(btn.to_json()) is dict)
def test_format(self):
btn = widgets.LoginButton("url")
self.assertEquals(btn.to_json(), {
"type":"account_link",
"url": "url"
})
def test_wrong_format(self):
btn = widgets.LoginButton("url")
self.assertNotEqual(btn.to_json(), {
"type": "account_link",
"url": "tst"
})
class CallButtonWidgetTests(unittest.TestCase):
def test_isdict(self):
btn = widgets.CallButton(None, None)
self.assertTrue(type(btn.to_json()) is dict)
def test_format(self):
btn = widgets.CallButton("title", "32480923")
self.assertEquals(btn.to_json(), {
"type": "phone_number",
"title":"title",
"payload": "32480923"
})
def test_wrong_format(self):
btn = widgets.CallButton("title", "32480923")
self.assertNotEqual(btn.to_json(), {
"type": "phone_number",
"title": "account_link",
"payload": "tst"
})
class PostbackButtonWidgetTests(unittest.TestCase):
def test_isdict(self):
btn = widgets.PostbackButton(None, None)
self.assertTrue(type(btn.to_json()) is dict)
def test_format(self):
btn = widgets.PostbackButton("Bookmark Item", "DEVELOPER_DEFINED_PAYLOAD")
self.assertEquals(btn.to_json(), {
"type": "postback",
"title": "Bookmark Item",
"payload": "DEVELOPER_DEFINED_PAYLOAD"
})
def test_wrong_format(self):
btn = widgets.PostbackButton("title", "32480923")
self.assertNotEqual(btn.to_json(), {
"type": "postback",
"title": "Bookmark Item",
"payload": "PAYLOAD"
})
class URLButtonWidgetTests(unittest.TestCase):
def test_isdict(self):
btn = widgets.URLButton(None, None)
self.assertTrue(type(btn.to_json()) is dict)
def test_format(self):
btn = widgets.URLButton("View Item", "https://petersfancyapparel.com/classic_white_tshirt",
webview_height_ratio=widgets.URLButton.WebviewHeightRatio.compact)
self.assertEquals(btn.to_json(), {
"type": "web_url",
"url": "https://petersfancyapparel.com/classic_white_tshirt",
"title": "View Item",
"webview_height_ratio": "compact"
})
def test_wrong_format(self):
btn = widgets.URLButton("View Item", "https://petersfancyapparel.com/classic_white_tshirt",
widgets.URLButton.WebviewHeightRatio.compact)
self.assertNotEqual(btn.to_json(), {
"type": "web_url",
"url": "http",
"title": "View Item",
"webview_height_ratio": "compact"
})
class BuyButtonWidgetTests(unittest.TestCase):
"""
Need to implement this test
"""
pass
class GenericTemplateTests(unittest.TestCase):
def setUp(self):
self._template = templates.GenericTemplate('Test template', 'https://item-url.com', 'https://item-url.com/img.jpg', 'Test Subtitle',
[widgets.PostbackButton('Hello', 'Hello')])
self._template_string = {
'attachment': {
'type': 'template',
'elements': [{
'title': 'Test template',
'item_url': 'https://item-url.com',
'image_url': 'https://item-url.com/img.jpg',
'subtitle': 'Test Subtitle',
'buttons': [
{
'title': 'Hello',
'type': 'postback',
'payload': 'Hello'
}
]
}]
}
}
def test_generic_template(self):
self.assertEqual(serialize_(self._template.to_json()), self._template_string)
class ButtonTemplateTests(unittest.TestCase):
def setUp(self):
self._template = templates.ButtonTemplate('Test Buttons', [widgets.PostbackButton('Hello', 'Hello')])
self._template_string = {
'attachment': {
'type': 'template',
'payload': {
'template_type': 'button',
'text': 'Test Buttons',
'buttons': [
{
'title': 'Hello',
'type': 'postback',
'payload': 'Hello'
}
]
}
}
}
def test_button_template(self):
self.assertEqual(serialize_(self._template.to_json()), self._template_string)
if __name__ == '__main__':
test_cases_to_run = [ButtonWidgetTests, ShareButtonWidgetTests, UnlinkAccountWidgetTests,
LoginButtonWidgetTests, CallButtonWidgetTests, PostbackButtonWidgetTests,
URLButtonWidgetTests,
# Template Tests
GenericTemplateTests, ButtonTemplateTests]
for test_case in test_cases_to_run:
suite = unittest.TestLoader().loadTestsFromTestCase(test_case)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Serialized DAG table in database."""
import hashlib
import logging
from datetime import datetime, timedelta
from typing import Any, Dict, List, Optional
import sqlalchemy_jsonfield
from sqlalchemy import BigInteger, Column, Index, String, and_
from sqlalchemy.orm import Session, backref, foreign, relationship
from sqlalchemy.sql import exists
from airflow.models.base import ID_LEN, Base
from airflow.models.dag import DAG, DagModel
from airflow.models.dagcode import DagCode
from airflow.models.dagrun import DagRun
from airflow.serialization.serialized_objects import SerializedDAG
from airflow.settings import MIN_SERIALIZED_DAG_UPDATE_INTERVAL, json
from airflow.utils import timezone
from airflow.utils.session import provide_session
from airflow.utils.sqlalchemy import UtcDateTime
log = logging.getLogger(__name__)
class SerializedDagModel(Base):
"""A table for serialized DAGs.
serialized_dag table is a snapshot of DAG files synchronized by scheduler.
This feature is controlled by:
* ``[core] min_serialized_dag_update_interval = 30`` (s):
serialized DAGs are updated in DB when a file gets processed by scheduler,
to reduce DB write rate, there is a minimal interval of updating serialized DAGs.
* ``[scheduler] dag_dir_list_interval = 300`` (s):
interval of deleting serialized DAGs in DB when the files are deleted, suggest
to use a smaller interval such as 60
It is used by webserver to load dags
because reading from database is lightweight compared to importing from files,
it solves the webserver scalability issue.
"""
__tablename__ = 'serialized_dag'
dag_id = Column(String(ID_LEN), primary_key=True)
fileloc = Column(String(2000), nullable=False)
# The max length of fileloc exceeds the limit of indexing.
fileloc_hash = Column(BigInteger, nullable=False)
data = Column(sqlalchemy_jsonfield.JSONField(json=json), nullable=False)
last_updated = Column(UtcDateTime, nullable=False)
dag_hash = Column(String(32), nullable=False)
__table_args__ = (Index('idx_fileloc_hash', fileloc_hash, unique=False),)
dag_runs = relationship(
DagRun,
primaryjoin=dag_id == foreign(DagRun.dag_id),
backref=backref('serialized_dag', uselist=False, innerjoin=True),
)
dag_model = relationship(
DagModel,
primaryjoin=dag_id == DagModel.dag_id, # type: ignore
foreign_keys=dag_id,
uselist=False,
innerjoin=True,
backref=backref('serialized_dag', uselist=False, innerjoin=True),
)
def __init__(self, dag: DAG):
self.dag_id = dag.dag_id
self.fileloc = dag.full_filepath
self.fileloc_hash = DagCode.dag_fileloc_hash(self.fileloc)
self.data = SerializedDAG.to_dict(dag)
self.last_updated = timezone.utcnow()
self.dag_hash = hashlib.md5(json.dumps(self.data, sort_keys=True).encode("utf-8")).hexdigest()
def __repr__(self):
return f"<SerializedDag: {self.dag_id}>"
@classmethod
@provide_session
def write_dag(cls, dag: DAG, min_update_interval: Optional[int] = None, session: Session = None):
"""Serializes a DAG and writes it into database.
If the record already exists, it checks if the Serialized DAG changed or not. If it is
changed, it updates the record, ignores otherwise.
:param dag: a DAG to be written into database
:param min_update_interval: minimal interval in seconds to update serialized DAG
:param session: ORM Session
"""
# Checks if (Current Time - Time when the DAG was written to DB) < min_update_interval
# If Yes, does nothing
# If No or the DAG does not exists, updates / writes Serialized DAG to DB
if min_update_interval is not None:
if session.query(
exists().where(
and_(
cls.dag_id == dag.dag_id,
(timezone.utcnow() - timedelta(seconds=min_update_interval)) < cls.last_updated,
)
)
).scalar():
return
log.debug("Checking if DAG (%s) changed", dag.dag_id)
new_serialized_dag = cls(dag)
serialized_dag_hash_from_db = session.query(cls.dag_hash).filter(cls.dag_id == dag.dag_id).scalar()
if serialized_dag_hash_from_db == new_serialized_dag.dag_hash:
log.debug("Serialized DAG (%s) is unchanged. Skipping writing to DB", dag.dag_id)
return
log.debug("Writing Serialized DAG: %s to the DB", dag.dag_id)
session.merge(new_serialized_dag)
log.debug("DAG: %s written to the DB", dag.dag_id)
@classmethod
@provide_session
def read_all_dags(cls, session: Session = None) -> Dict[str, 'SerializedDAG']:
"""Reads all DAGs in serialized_dag table.
:param session: ORM Session
:returns: a dict of DAGs read from database
"""
serialized_dags = session.query(cls)
dags = {}
for row in serialized_dags:
log.debug("Deserializing DAG: %s", row.dag_id)
dag = row.dag
# Sanity check.
if dag.dag_id == row.dag_id:
dags[row.dag_id] = dag
else:
log.warning(
"dag_id Mismatch in DB: Row with dag_id '%s' has Serialised DAG with '%s' dag_id",
row.dag_id,
dag.dag_id,
)
return dags
@property
def dag(self):
"""The DAG deserialized from the ``data`` column"""
if isinstance(self.data, dict):
dag = SerializedDAG.from_dict(self.data) # type: Any
else:
dag = SerializedDAG.from_json(self.data) # noqa
return dag
@classmethod
@provide_session
def remove_dag(cls, dag_id: str, session: Session = None):
"""Deletes a DAG with given dag_id.
:param dag_id: dag_id to be deleted
:param session: ORM Session
"""
# pylint: disable=no-member
session.execute(cls.__table__.delete().where(cls.dag_id == dag_id))
@classmethod
@provide_session
def remove_deleted_dags(cls, alive_dag_filelocs: List[str], session=None):
"""Deletes DAGs not included in alive_dag_filelocs.
:param alive_dag_filelocs: file paths of alive DAGs
:param session: ORM Session
"""
alive_fileloc_hashes = [DagCode.dag_fileloc_hash(fileloc) for fileloc in alive_dag_filelocs]
log.debug(
"Deleting Serialized DAGs (for which DAG files are deleted) from %s table ", cls.__tablename__
)
# pylint: disable=no-member
session.execute(
cls.__table__.delete().where(
and_(cls.fileloc_hash.notin_(alive_fileloc_hashes), cls.fileloc.notin_(alive_dag_filelocs))
)
)
@classmethod
@provide_session
def has_dag(cls, dag_id: str, session: Session = None) -> bool:
"""Checks a DAG exist in serialized_dag table.
:param dag_id: the DAG to check
:param session: ORM Session
"""
return session.query(exists().where(cls.dag_id == dag_id)).scalar()
@classmethod
@provide_session
def get(cls, dag_id: str, session: Session = None) -> Optional['SerializedDagModel']:
"""
Get the SerializedDAG for the given dag ID.
It will cope with being passed the ID of a subdag by looking up the
root dag_id from the DAG table.
:param dag_id: the DAG to fetch
:param session: ORM Session
"""
row = session.query(cls).filter(cls.dag_id == dag_id).one_or_none()
if row:
return row
# If we didn't find a matching DAG id then ask the DAG table to find
# out the root dag
root_dag_id = session.query(DagModel.root_dag_id).filter(DagModel.dag_id == dag_id).scalar()
return session.query(cls).filter(cls.dag_id == root_dag_id).one_or_none()
@staticmethod
@provide_session
def bulk_sync_to_db(dags: List[DAG], session: Session = None):
"""
Saves DAGs as Serialized DAG objects in the database. Each
DAG is saved in a separate database query.
:param dags: the DAG objects to save to the DB
:type dags: List[airflow.models.dag.DAG]
:param session: ORM Session
:type session: Session
:return: None
"""
for dag in dags:
if not dag.is_subdag:
SerializedDagModel.write_dag(
dag, min_update_interval=MIN_SERIALIZED_DAG_UPDATE_INTERVAL, session=session
)
@classmethod
@provide_session
def get_last_updated_datetime(cls, dag_id: str, session: Session = None) -> datetime:
"""
Get the date when the Serialized DAG associated to DAG was last updated
in serialized_dag table
:param dag_id: DAG ID
:type dag_id: str
:param session: ORM Session
:type session: Session
"""
return session.query(cls.last_updated).filter(cls.dag_id == dag_id).scalar()
@classmethod
@provide_session
def get_latest_version_hash(cls, dag_id: str, session: Session = None) -> str:
"""
Get the latest DAG version for a given DAG ID.
:param dag_id: DAG ID
:type dag_id: str
:param session: ORM Session
:type session: Session
:return: DAG Hash
:rtype: str
"""
return session.query(cls.dag_hash).filter(cls.dag_id == dag_id).scalar()
|
|
from __future__ import absolute_import, division, print_function
from operator import getitem
from tornado import gen
from dask.compatibility import apply
from dask.base import tokenize
from distributed.client import default_client
from .core import Stream
from . import core, sources
class DaskStream(Stream):
""" A Parallel stream using Dask
This object is fully compliant with the ``streamz.core.Stream`` object but
uses a Dask client for execution. Operations like ``map`` and
``accumulate`` submit functions to run on the Dask instance using
``dask.distributed.Client.submit`` and pass around Dask futures.
Time-based operations like ``timed_window``, buffer, and so on operate as
normal.
Typically one transfers between normal Stream and DaskStream objects using
the ``Stream.scatter()`` and ``DaskStream.gather()`` methods.
Examples
--------
>>> from dask.distributed import Client
>>> client = Client()
>>> from streamz import Stream
>>> source = Stream()
>>> source.scatter().map(func).accumulate(binop).gather().sink(...)
See Also
--------
dask.distributed.Client
"""
def __init__(self, *args, **kwargs):
if 'loop' not in kwargs:
kwargs['loop'] = default_client().loop
super(DaskStream, self).__init__(*args, **kwargs)
@DaskStream.register_api()
class map(DaskStream):
def __init__(self, upstream, func, *args, **kwargs):
self.func = func
self.kwargs = kwargs
self.args = args
DaskStream.__init__(self, upstream)
def update(self, x, who=None, metadata=None):
client = default_client()
result = client.submit(self.func, x, *self.args, **self.kwargs)
return self._emit(result, metadata=metadata)
@DaskStream.register_api()
class accumulate(DaskStream):
def __init__(self, upstream, func, start=core.no_default,
returns_state=False, **kwargs):
self.func = func
self.state = start
self.returns_state = returns_state
self.kwargs = kwargs
self.with_state = kwargs.pop('with_state', False)
DaskStream.__init__(self, upstream)
def update(self, x, who=None, metadata=None):
if self.state is core.no_default:
self.state = x
if self.with_state:
return self._emit((self.state, x), metadata=metadata)
else:
return self._emit(x, metadata=metadata)
else:
client = default_client()
result = client.submit(self.func, self.state, x, **self.kwargs)
if self.returns_state:
state = client.submit(getitem, result, 0)
result = client.submit(getitem, result, 1)
else:
state = result
self.state = state
if self.with_state:
return self._emit((self.state, result), metadata=metadata)
else:
return self._emit(result, metadata=metadata)
@core.Stream.register_api()
@DaskStream.register_api()
class scatter(DaskStream):
""" Convert local stream to Dask Stream
All elements flowing through the input will be scattered out to the cluster
"""
@gen.coroutine
def update(self, x, who=None, metadata=None):
client = default_client()
self._retain_refs(metadata)
# We need to make sure that x is treated as it is by dask
# However, client.scatter works internally different for
# lists and dicts. So we always use a dict here to be sure
# we know the format exactly. The key will be taken as the
# dask identifier of the data.
tokenized_x = f"{type(x).__name__}-{tokenize(x)}"
future_as_dict = yield client.scatter({tokenized_x: x}, asynchronous=True)
future = future_as_dict[tokenized_x]
f = yield self._emit(future, metadata=metadata)
self._release_refs(metadata)
raise gen.Return(f)
@DaskStream.register_api()
class gather(core.Stream):
""" Wait on and gather results from DaskStream to local Stream
This waits on every result in the stream and then gathers that result back
to the local stream. Warning, this can restrict parallelism. It is common
to combine a ``gather()`` node with a ``buffer()`` to allow unfinished
futures to pile up.
Examples
--------
>>> local_stream = dask_stream.buffer(20).gather()
See Also
--------
buffer
scatter
"""
@gen.coroutine
def update(self, x, who=None, metadata=None):
client = default_client()
self._retain_refs(metadata)
result = yield client.gather(x, asynchronous=True)
result2 = yield self._emit(result, metadata=metadata)
self._release_refs(metadata)
raise gen.Return(result2)
@DaskStream.register_api()
class starmap(DaskStream):
def __init__(self, upstream, func, **kwargs):
self.func = func
stream_name = kwargs.pop('stream_name', None)
self.kwargs = kwargs
DaskStream.__init__(self, upstream, stream_name=stream_name)
def update(self, x, who=None, metadata=None):
client = default_client()
result = client.submit(apply, self.func, x, self.kwargs)
return self._emit(result, metadata=metadata)
@DaskStream.register_api()
class buffer(DaskStream, core.buffer):
pass
@DaskStream.register_api()
class combine_latest(DaskStream, core.combine_latest):
pass
@DaskStream.register_api()
class delay(DaskStream, core.delay):
pass
@DaskStream.register_api()
class latest(DaskStream, core.latest):
pass
@DaskStream.register_api()
class partition(DaskStream, core.partition):
pass
@DaskStream.register_api()
class rate_limit(DaskStream, core.rate_limit):
pass
@DaskStream.register_api()
class sliding_window(DaskStream, core.sliding_window):
pass
@DaskStream.register_api()
class timed_window(DaskStream, core.timed_window):
pass
@DaskStream.register_api()
class union(DaskStream, core.union):
pass
@DaskStream.register_api()
class zip(DaskStream, core.zip):
pass
@DaskStream.register_api(staticmethod)
class filenames(DaskStream, sources.filenames):
pass
@DaskStream.register_api(staticmethod)
class from_textfile(DaskStream, sources.from_textfile):
pass
|
|
"""Utilities for input validation"""
# Authors: Olivier Grisel and Gael Varoquaux and others (please update me)
# License: BSD 3
import warnings
import numbers
import numpy as np
from scipy import sparse
from ..externals import six
from .fixes import safe_copy
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Array contains NaN or infinity.")
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
# First try an O(n) time, O(1) space solution for the common case that
# there everything is finite; fall back to O(n) space np.isfinite to
# prevent false positives from overflow in sum method.
_assert_all_finite(X.data if sparse.issparse(X) else X)
def safe_asarray(X, dtype=None, order=None):
"""Convert X to an array or sparse matrix.
Prevents copying X when possible; sparse matrices are passed through."""
if sparse.issparse(X):
assert_all_finite(X.data)
else:
X = np.asarray(X, dtype, order)
assert_all_finite(X)
return X
def as_float_array(X, copy=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sparse.issparse(X)):
return safe_asarray(X, dtype=np.float64)
elif sparse.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def array2d(X, dtype=None, order=None, copy=False):
"""Returns at least 2-d array with data from X"""
if sparse.issparse(X):
raise TypeError('A sparse matrix was passed, but dense data '
'is required. Use X.toarray() to convert to dense.')
X_2d = np.asarray(np.atleast_2d(X), dtype=dtype, order=order)
_assert_all_finite(X_2d)
if X is X_2d and copy:
X_2d = safe_copy(X_2d)
return X_2d
def _atleast2d_or_sparse(X, dtype, order, copy, sparse_class, convmethod):
if sparse.issparse(X):
# Note: order is ignored because CSR matrices hold data in 1-d arrays
if dtype is None or X.dtype == dtype:
X = getattr(X, convmethod)()
else:
X = sparse_class(X, dtype=dtype)
_assert_all_finite(X.data)
else:
X = array2d(X, dtype=dtype, order=order, copy=copy)
_assert_all_finite(X)
return X
def atleast2d_or_csc(X, dtype=None, order=None, copy=False):
"""Like numpy.atleast_2d, but converts sparse matrices to CSC format.
Also, converts np.matrix to np.ndarray.
"""
return _atleast2d_or_sparse(X, dtype, order, copy, sparse.csc_matrix,
"tocsc")
def atleast2d_or_csr(X, dtype=None, order=None, copy=False):
"""Like numpy.atleast_2d, but converts sparse matrices to CSR format
Also, converts np.matrix to np.ndarray.
"""
return _atleast2d_or_sparse(X, dtype, order, copy, sparse.csr_matrix,
"tocsr")
def _num_samples(x):
"""Return number of samples in array-like x."""
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
raise TypeError("Expected sequence or array-like, got %r" % x)
return x.shape[0] if hasattr(x, 'shape') else len(x)
def check_arrays(*arrays, **options):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
By default lists and tuples are converted to numpy arrays.
It is possible to enforce certain properties, such as dtype, continguity
and sparse matrix format (if a sparse matrix is passed).
Converting lists to arrays can be disabled by setting ``allow_lists=True``.
Lists can then contain arbitrary objects and are not checked for dtype,
finiteness or anything else but length. Arrays are still checked
and possibly converted.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays, unless allow_lists is specified.
sparse_format : 'csr', 'csc' or 'dense', None by default
If not None, any scipy.sparse matrix is converted to
Compressed Sparse Rows or Compressed Sparse Columns representations.
If 'dense', an error is raised when a sparse array is
passed.
copy : boolean, False by default
If copy is True, ensure that returned arrays are copies of the original
(if not already converted to another format earlier in the process).
check_ccontiguous : boolean, False by default
Check that the arrays are C contiguous
dtype : a numpy dtype instance, None by default
Enforce a specific dtype.
allow_lists : bool
Allow lists of arbitrary objects as input, just check their length.
Disables
"""
sparse_format = options.pop('sparse_format', None)
if sparse_format not in (None, 'csr', 'csc', 'dense'):
raise ValueError('Unexpected sparse format: %r' % sparse_format)
copy = options.pop('copy', False)
check_ccontiguous = options.pop('check_ccontiguous', False)
dtype = options.pop('dtype', None)
allow_lists = options.pop('allow_lists', False)
if options:
raise TypeError("Unexpected keyword arguments: %r" % options.keys())
if len(arrays) == 0:
return None
n_samples = _num_samples(arrays[0])
checked_arrays = []
for array in arrays:
array_orig = array
if array is None:
# special case: ignore optional y=None kwarg pattern
checked_arrays.append(array)
continue
size = _num_samples(array)
if size != n_samples:
raise ValueError("Found array with dim %d. Expected %d"
% (size, n_samples))
if not allow_lists or hasattr(array, "shape"):
if sparse.issparse(array):
if sparse_format == 'csr':
array = array.tocsr()
elif sparse_format == 'csc':
array = array.tocsc()
elif sparse_format == 'dense':
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if check_ccontiguous:
array.data = np.ascontiguousarray(array.data, dtype=dtype)
else:
array.data = np.asarray(array.data, dtype=dtype)
_assert_all_finite(array.data)
else:
if check_ccontiguous:
array = np.ascontiguousarray(array, dtype=dtype)
else:
array = np.asarray(array, dtype=dtype)
_assert_all_finite(array)
if copy and array is array_orig:
array = array.copy()
checked_arrays.append(array)
return checked_arrays
def warn_if_not_float(X, estimator='This algorithm'):
"""Warning utility function to check that data type is floating point"""
if not isinstance(estimator, six.string_types):
estimator = estimator.__class__.__name__
if X.dtype.kind != 'f':
warnings.warn("%s assumes floating point values as input, "
"got %s" % (estimator, X.dtype))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
|
|
from landlab import Component
from landlab.utils.decorators import use_file_name_or_kwds
import numpy as np
import six
_VALID_METHODS = set(['Grid', 'Multi'])
def assert_method_is_valid(method):
if method not in _VALID_METHODS:
raise ValueError('%s: Invalid method name' % method)
class SoilMoisture(Component):
"""
Landlab component that simulates root-zone average soil moisture at each
cell using inputs of potential evapotranspiration, live leaf area index,
and vegetation cover.
.. codeauthor:: Sai Nudurupati and Erkan Istanbulluoglu
Construction::
SoilMoisture(grid, runon=0., f_bare=0.7, soil_ew=0.1,
intercept_cap_grass= 1., zr_grass=0.3, I_B_grass=20.,
I_V_grass=24., K_s_grass=42., pc_grass=0.43, fc_grass=0.56,
sc_grass=0.33, wp_grass=0.13, hgw_grass=0.1, beta_grass=13.8,
LAI_max_grass=2., LAIR_max_grass=2.88,
intercept_cap_shrub=1.5, zr_shrub=0.5, I_B_shrub=20.,
I_V_shrub=40., K_s_shrub=42., pc_shrub=0.43, fc_shrub=0.56,
sc_shrub=0.24, wp_shrub=0.13, hgw_shrub=0.1, beta_shrub=13.8,
LAI_max_shrub=2., LAIR_max_shrub=2.,
intercept_cap_tree=2., zr_tree=1.3, I_B_tree=20.,
I_V_tree=40., K_s_tree=42., pc_tree=0.43, fc_tree=0.56,
sc_tree=0.22, wp_tree=0.15, hgw_tree=0.1, beta_tree=13.8,
LAI_max_tree=4., LAIR_max_tree=4.,
intercept_cap_bare=1., zr_bare=0.15, I_B_bare=20.,
I_V_bare=20., K_s_bare=42., pc_bare=0.43, fc_bare=0.56, sc_bare=0.33,
wp_bare=0.13, hgw_bare=0.1, beta_bare=13.8,
LAI_max_bare=0.01, LAIR_max_bare=0.01)
Parameters
----------
grid: RasterModelGrid
A grid.
runon: float, optional
Runon from higher elevation (mm).
f_bare: float, optional
Fraction to partition PET for bare soil (None).
soil_ew: float, optional
Residual Evaporation after wilting (mm/day).
intercept_cap: float, optional
Plant Functional Type (PFT) specific full canopy interception
capacity.
zr: float, optional
Root depth (m).
I_B: float, optional
Infiltration capacity of bare soil (mm/h).
I_V: float, optional
Infiltration capacity of vegetated soil (mm/h).
K_s: float, optional
Hydraulic conductivity of soil (mm/h).
pc: float, optional
Soil porosity (None).
fc: float, optional
Soil saturation degree at field capacity (None).
sc: float, optional
Soil saturation degree at stomatal closure (None).
wp: float, optional
Soil saturation degree at wilting point (None).
hgw: float, optional
Soil saturation degree at hygroscopic point (None).
beta: float, optional
Deep percolation constant = 2*b+3 where b is
water retention (None).
LAI_max: float, optional
Maximum leaf area index (m^2/m^2).
LAIR_max: float, optional
Reference leaf area index (m^2/m^2).
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.components.soil_moisture import SoilMoisture
>>> grid = RasterModelGrid((5, 4), spacing=(0.2, 0.2))
>>> SoilMoisture.name
'Soil Moisture'
>>> sorted(SoilMoisture.output_var_names) # doctest: +NORMALIZE_WHITESPACE
['soil_moisture__root_zone_leakage',
'soil_moisture__saturation_fraction',
'surface__evapotranspiration',
'surface__runoff',
'vegetation__water_stress']
>>> sorted(SoilMoisture.units) # doctest: +NORMALIZE_WHITESPACE
[('rainfall__daily_depth', 'mm'),
('soil_moisture__initial_saturation_fraction', 'None'),
('soil_moisture__root_zone_leakage', 'mm'),
('soil_moisture__saturation_fraction', 'None'),
('surface__evapotranspiration', 'mm'),
('surface__potential_evapotranspiration_rate', 'mm'),
('surface__runoff', 'mm'),
('vegetation__cover_fraction', 'None'),
('vegetation__live_leaf_area_index', 'None'),
('vegetation__plant_functional_type', 'None'),
('vegetation__water_stress', 'None')]
>>> grid['cell']['vegetation__plant_functional_type']= (
... np.zeros(grid.number_of_cells, dtype=int))
>>> SM = SoilMoisture(grid)
>>> SM.grid.number_of_cell_rows
3
>>> SM.grid.number_of_cell_columns
2
>>> SM.grid is grid
True
>>> import numpy as np
>>> np.allclose(grid.at_cell['soil_moisture__saturation_fraction'], 0.)
True
>>> grid['cell']['surface__potential_evapotranspiration_rate']= np.array([
... 0.2554777, 0.2554777 , 0.22110221, 0.22110221,
... 0.24813062, 0.24813062])
>>> grid['cell']['soil_moisture__initial_saturation_fraction']= (
... 0.75 * np.ones(grid.number_of_cells))
>>> grid['cell']['vegetation__live_leaf_area_index']= (
... 2. * np.ones(grid.number_of_cells))
>>> grid['cell']['vegetation__cover_fraction']= (
... np.ones(grid.number_of_cells))
>>> current_time = 0.5
>>> grid['cell']['rainfall__daily_depth'] = (
... 25. * np.ones(grid.number_of_cells))
>>> current_time = SM.update(current_time)
>>> np.allclose(grid.at_cell['soil_moisture__saturation_fraction'], 0.)
False
"""
_name = 'Soil Moisture'
_input_var_names = (
'vegetation__cover_fraction',
'vegetation__live_leaf_area_index',
'surface__potential_evapotranspiration_rate',
'surface__potential_evapotranspiration_rate__grass',
'soil_moisture__initial_saturation_fraction',
'vegetation__plant_functional_type',
'rainfall__daily_depth',
)
_output_var_names = (
'vegetation__water_stress',
'soil_moisture__saturation_fraction',
'soil_moisture__root_zone_leakage',
'surface__runoff',
'surface__runon',
'surface__evapotranspiration',
)
_var_units = {
'vegetation__cover_fraction': 'None',
'vegetation__live_leaf_area_index': 'None',
'surface__potential_evapotranspiration_rate': 'mm',
'surface__potential_evapotranspiration_rate__grass': 'mm',
'vegetation__plant_functional_type': 'None',
'vegetation__water_stress': 'None',
'soil_moisture__saturation_fraction': 'None',
'soil_moisture__initial_saturation_fraction': 'None',
'soil_moisture__root_zone_leakage': 'mm',
'surface__runoff': 'mm',
'surface__runon': 'mm',
'surface__evapotranspiration': 'mm',
'rainfall__daily_depth': 'mm',
}
_var_mapping = {
'vegetation__cover_fraction': 'cell',
'vegetation__live_leaf_area_index': 'cell',
'surface__potential_evapotranspiration_rate': 'cell',
'surface__potential_evapotranspiration_rate__grass': 'cell',
'vegetation__plant_functional_type': 'cell',
'vegetation__water_stress': 'cell',
'soil_moisture__saturation_fraction': 'cell',
'soil_moisture__initial_saturation_fraction': 'cell',
'soil_moisture__root_zone_leakage': 'cell',
'surface__runoff': 'cell',
'surface__runon': 'cell',
'surface__evapotranspiration': 'cell',
'rainfall__daily_depth': 'cell',
}
_var_doc = {
'vegetation__cover_fraction':
'fraction of land covered by vegetation',
'vegetation__live_leaf_area_index':
'one-sided green leaf area per unit ground surface area',
'surface__potential_evapotranspiration_rate':
'potential sum of evaporation and plant transpiration',
'surface__potential_evapotranspiration_rate__grass':
'potential sum of evaporation and grass transpiration, \
for partitioning bare soil evapotranspiration rate',
'vegetation__plant_functional_type':
'classification of plants (int), grass=0, shrub=1, tree=2, \
bare=3, shrub_seedling=4, tree_seedling=5',
'vegetation__water_stress':
'parameter that represents nonlinear effects of water deficit \
on plants',
'soil_moisture__saturation_fraction':
'relative volumetric water content (theta) - limits=[0,1]',
'soil_moisture__initial_saturation_fraction':
'initial soil_moisture__saturation_fraction',
'soil_moisture__root_zone_leakage':
'leakage of water into deeper portions of the soil not accessible \
to the plant',
'surface__runoff':
'infiltration excess runoff from ground surface',
'surface__runon':
'infiltration excess runon',
'surface__evapotranspiration':
'actual sum of evaporation and plant transpiration',
'rainfall__daily_depth':
'Rain in (mm) as a field, allowing spatio-temporal soil moisture \
saturation analysis.',
}
@use_file_name_or_kwds
def __init__(self, grid, ordered_cells=None, runon_switch=0,
f_bare=0.7, soil_ew=0.1,
intercept_cap_grass=1., zr_grass=0.3, I_B_grass=20.,
I_V_grass=24., K_s_grass=42., pc_grass=0.43, fc_grass=0.56,
sc_grass=0.33, wp_grass=0.13, hgw_grass=0.1, beta_grass=13.8,
LAI_max_grass=2., LAIR_max_grass=2.88,
intercept_cap_shrub=1.5, zr_shrub=0.5, I_B_shrub=20.,
I_V_shrub=40., K_s_shrub=42., pc_shrub=0.43, fc_shrub=0.56,
sc_shrub=0.24, wp_shrub=0.13, hgw_shrub=0.1, beta_shrub=13.8,
LAI_max_shrub=2., LAIR_max_shrub=2.,
intercept_cap_tree=2., zr_tree=1.3, I_B_tree=20.,
I_V_tree=40., K_s_tree=42., pc_tree=0.43, fc_tree=0.56,
sc_tree=0.22, wp_tree=0.15, hgw_tree=0.1, beta_tree=13.8,
LAI_max_tree=4., LAIR_max_tree=4.,
intercept_cap_bare=1., zr_bare=0.15, I_B_bare=20.,
I_V_bare=20., K_s_bare=42., pc_bare=0.43, fc_bare=0.56,
sc_bare=0.33, wp_bare=0.13, hgw_bare=0.1, beta_bare=13.8,
LAI_max_bare=0.01, LAIR_max_bare=0.01, **kwds):
"""
Parameters
----------
grid: RasterModelGrid
A grid.
runon_switch: int, optional
To indicate whether runon needs to considered (mm);
0 - No runon, 1 - runon.
ordered_cells: numpy.array, required if runon_switch = 1
ordered_cells has the grid cells sorted in an order of descending
channel length in a delineated watershed
f_bare: float, optional
Fraction to partition PET for bare soil (None).
soil_ew: float, optional
Residual Evaporation after wilting (mm/day).
intercept_cap: float, optional
Plant Functional Type (PFT) specific full canopy interception
capacity.
zr: float, optional
Root depth (m).
I_B: float, optional
Infiltration capacity of bare soil (mm/h).
I_V: float, optional
Infiltration capacity of vegetated soil (mm/h).
K_s: float, optional
Hydraulic conductivity of soil (mm/h).
pc: float, optional
Soil porosity (None).
fc: float, optional
Soil saturation degree at field capacity (None).
sc: float, optional
Soil saturation degree at stomatal closure (None).
wp: float, optional
Soil saturation degree at wilting point (None).
hgw: float, optional
Soil saturation degree at hygroscopic point (None).
beta: float, optional
Deep percolation constant = 2*b+4 where b is
water retention (None).
LAI_max: float, optional
Maximum leaf area index (m^2/m^2).
LAIR_max: float, optional
Reference leaf area index (m^2/m^2).
"""
self._method = kwds.pop('method', 'Grid')
assert_method_is_valid(self._method)
super(SoilMoisture, self).__init__(grid)
self.initialize(ordered_cells=ordered_cells, runon_switch=runon_switch,
f_bare=f_bare, soil_ew=soil_ew,
intercept_cap_grass=intercept_cap_grass,
zr_grass=zr_grass, I_B_grass=I_B_grass,
I_V_grass=I_V_grass, K_s_grass=K_s_grass,
pc_grass=pc_grass, fc_grass=fc_grass,
sc_grass=sc_grass, wp_grass=wp_grass,
hgw_grass=hgw_grass, beta_grass=beta_grass,
LAI_max_grass=LAI_max_grass,
LAIR_max_grass=LAIR_max_grass,
intercept_cap_shrub=intercept_cap_shrub,
zr_shrub=zr_shrub, I_B_shrub=I_B_shrub,
I_V_shrub=I_V_shrub, K_s_shrub=K_s_shrub,
pc_shrub=pc_shrub, fc_shrub=fc_shrub,
sc_shrub=sc_shrub, wp_shrub=wp_shrub,
hgw_shrub=hgw_shrub, beta_shrub=beta_shrub,
LAI_max_shrub=LAI_max_shrub,
LAIR_max_shrub=LAIR_max_shrub,
intercept_cap_tree=intercept_cap_tree, zr_tree=zr_tree,
I_B_tree=I_B_tree, I_V_tree=I_V_tree,
K_s_tree=K_s_tree, pc_tree=pc_tree,
fc_tree=fc_tree, sc_tree=sc_tree, wp_tree=wp_tree,
hgw_tree=hgw_tree, beta_tree=beta_tree,
LAI_max_tree=LAI_max_tree, LAIR_max_tree=LAIR_max_tree,
intercept_cap_bare=intercept_cap_bare, zr_bare=zr_bare,
I_B_bare=I_B_bare, I_V_bare=I_V_bare,
K_s_bare=K_s_bare, pc_bare=pc_bare,
fc_bare=fc_bare, sc_bare=sc_bare, wp_bare=wp_bare,
hgw_bare=hgw_bare, beta_bare=beta_bare,
LAI_max_bare=LAI_max_bare,
LAIR_max_bare=LAIR_max_bare, **kwds)
for name in self._input_var_names:
if name not in self.grid.at_cell:
self.grid.add_zeros('cell', name, units=self._var_units[name])
for name in self._output_var_names:
if name not in self.grid.at_cell:
self.grid.add_zeros('cell', name, units=self._var_units[name])
self._nodal_values = self.grid['node']
self._cell_values = self.grid['cell']
def initialize(self, ordered_cells=None, runon_switch=0, f_bare=0.7,
soil_ew=0.1, intercept_cap_grass=1., zr_grass=0.3,
I_B_grass=20., I_V_grass=24., K_s_grass=42.,
pc_grass=0.43, fc_grass=0.56,
sc_grass=0.33, wp_grass=0.13, hgw_grass=0.1,
beta_grass=13.8, LAI_max_grass=2., LAIR_max_grass=2.88,
intercept_cap_shrub=1.5, zr_shrub=0.5, I_B_shrub=20.,
I_V_shrub=40., K_s_shrub=42., pc_shrub=0.43,
fc_shrub=0.56, sc_shrub=0.24,
wp_shrub=0.13, hgw_shrub=0.1, beta_shrub=13.8,
LAI_max_shrub=2., LAIR_max_shrub=2.,
intercept_cap_tree=2., zr_tree=1.3, I_B_tree=20.,
I_V_tree=40., K_s_tree=42., pc_tree=0.43,
fc_tree=0.56, sc_tree=0.22,
wp_tree=0.15, hgw_tree=0.1, beta_tree=13.8,
LAI_max_tree=4., LAIR_max_tree=4.,
intercept_cap_bare=1., zr_bare=0.15, I_B_bare=20.,
I_V_bare=20., K_s_bare=42., pc_bare=0.43,
fc_bare=0.56, sc_bare=0.33,
wp_bare=0.13, hgw_bare=0.1, beta_bare=13.8,
LAI_max_bare=0.01, LAIR_max_bare=0.01, **kwds):
# GRASS = 0; SHRUB = 1; TREE = 2; BARE = 3;
# SHRUBSEEDLING = 4; TREESEEDLING = 5
"""
Parameters
----------
grid: RasterModelGrid
A grid.
runon_switch: int, optional
To indicate whether runon needs to considered (mm);
0 - No runon, 1 - runon.
ordered_cells: numpy.array, required if runon_switch = 1
ordered_cells has the grid cells sorted in an order of descending
channel length in a delineated watershed
f_bare: float, optional
Fraction to partition PET for bare soil (None).
soil_ew: float, optional
Residual Evaporation after wilting (mm/day).
intercept_cap: float, optional
Plant Functional Type (PFT) specific full canopy interception
capacity.
zr: float, optional
Root depth (m).
I_B: float, optional
Infiltration capacity of bare soil (mm/h).
I_V: float, optional
Infiltration capacity of vegetated soil (mm/h).
K_s: float, optional
Hydraulic conductivity of soil (mm/h).
pc: float, optional
Soil porosity (None).
fc: float, optional
Soil saturation degree at field capacity (None).
sc: float, optional
Soil saturation degree at stomatal closure (None).
wp: float, optional
Soil saturation degree at wilting point (None).
hgw: float, optional
Soil saturation degree at hygroscopic point (None).
beta: float, optional
Deep percolation constant = 2*b+4 where b is
water retention (None).
parameter (None)
LAI_max: float, optional
Maximum leaf area index (m^2/m^2).
LAIR_max: float, optional
Reference leaf area index (m^2/m^2).
"""
self._vegtype = self.grid['cell']['vegetation__plant_functional_type']
self._runon_switch = runon_switch
self._fbare = f_bare
self.ordered_cells = ordered_cells
self._interception_cap = np.choose(self._vegtype, [
intercept_cap_grass, intercept_cap_shrub, intercept_cap_tree,
intercept_cap_bare, intercept_cap_shrub, intercept_cap_tree])
self._zr = np.choose(self._vegtype, [
zr_grass, zr_shrub, zr_tree, zr_bare, zr_shrub, zr_tree])
self._soil_Ib = np.choose(self._vegtype, [
I_B_grass, I_B_shrub, I_B_tree, I_B_bare, I_B_shrub, I_B_tree])
self._soil_Iv = np.choose(self._vegtype, [
I_V_grass, I_V_shrub, I_V_tree, I_V_bare, I_V_shrub, I_V_tree])
self._soil_Ks = np.choose(self._vegtype, [
K_s_grass, K_s_shrub, K_s_tree, K_s_bare, K_s_shrub, K_s_tree])
self._soil_Ew = soil_ew
self._soil_pc = np.choose(self._vegtype, [
pc_grass, pc_shrub, pc_tree, pc_bare, pc_shrub, pc_tree])
self._soil_fc = np.choose(self._vegtype, [
fc_grass, fc_shrub, fc_tree, fc_bare, fc_shrub, fc_tree])
self._soil_sc = np.choose(self._vegtype, [
sc_grass, sc_shrub, sc_tree, sc_bare, sc_shrub, sc_tree])
self._soil_wp = np.choose(self._vegtype, [
wp_grass, wp_shrub, wp_tree, wp_bare, wp_shrub, wp_tree])
self._soil_hgw = np.choose(self._vegtype, [
hgw_grass, hgw_shrub, hgw_tree, hgw_bare, hgw_shrub, hgw_tree])
self._soil_beta = np.choose(self._vegtype, [
beta_grass, beta_shrub, beta_tree,
beta_bare, beta_shrub, beta_tree])
self._LAI_max = np.choose(self._vegtype, [
LAI_max_grass, LAI_max_shrub, LAI_max_tree,
LAI_max_bare, LAI_max_shrub, LAI_max_tree])
self._LAIR_max = np.choose(self._vegtype, [
LAIR_max_grass, LAIR_max_shrub, LAIR_max_tree,
LAIR_max_bare, LAIR_max_shrub, LAIR_max_tree])
def update(self, current_time, Tb=24., Tr=0., **kwds):
"""
Update fields with current loading conditions.
Parameters
----------
current_time: float
Current time (years).
Tr: float, optional
Storm duration (hours).
Tb: float, optional
Inter-storm duration (hours).
"""
P_ = self._cell_values['rainfall__daily_depth']
self._PET = (
self._cell_values['surface__potential_evapotranspiration_rate'])
self._pet_g = (
self._cell_values['surface__potential_evapotranspiration_rate__grass'])
self._SO = (
self._cell_values['soil_moisture__initial_saturation_fraction'])
self._vegcover = self._cell_values['vegetation__cover_fraction']
self._water_stress = self._cell_values['vegetation__water_stress']
self._S = self._cell_values['soil_moisture__saturation_fraction']
self._D = self._cell_values['soil_moisture__root_zone_leakage']
self._ETA = self._cell_values['surface__evapotranspiration']
self._fr = (self._cell_values['vegetation__live_leaf_area_index'] /
self._LAIR_max)
self._runoff = self._cell_values['surface__runoff']
self._runon = self._cell_values['surface__runon']
self._runoff[:] = 0. # Initializing runoff to zero
self._runon[:] = 0. # Initializing runon to zero
# LAIl = self._cell_values['vegetation__live_leaf_area_index']
# LAIt = LAIl+self._cell_values['DeadLeafAreaIndex']
# if LAIt.all() == 0.:
# self._fr = np.zeros(self.grid.number_of_cells)
# else:
# self._fr = (self._vegcover[0]*LAIl/LAIt)
self._fr[self._fr > 1.] = 1.
self._Sini = np.zeros(self._SO.shape)
self._ETmax = np.zeros(self._SO.shape)
self._ts = np.zeros(self._SO.shape) # record Time to Saturation
self._precip_int = np.zeros(self._SO.shape)
# Adding routine to add runon & runoff
if self._runon_switch:
# Make sure that flow_router has been called before
r_cell = (self.grid.cell_at_node[
self.grid.at_node['flow__receiver_node']])
r_cell = r_cell[r_cell != -1]
# finding cells that aren't part of
# ordered cells (delineated watershed)
diff_cells = (np.setdiff1d(
range(0, self.grid.number_of_cells), self.ordered_cells))
# trvrsl_order has ordered cells computed first and then the rest
# of the cells
trvrsl_order = np.concatenate((self.ordered_cells, diff_cells),
axis=0)
else:
# trvrsl_order is just regular 'all cells' if no runon is computed
trvrsl_order = range(0, self.grid.number_of_cells)
for cell in trvrsl_order:
# Routine to calculate runon
if self._runon_switch:
if cell in self.ordered_cells:
donors = []
donors = list(np.where(r_cell == cell)[0])
if len(donors) != 0:
for k in range(0, len(donors)):
self._runon[cell] += self._runoff[donors[k]]
P = P_[cell]
runon = self._runon[cell]
if runon < 0:
six._print('Runon < 0!')
# print cell
s = self._SO[cell]
fbare = self._fbare
ZR = self._zr[cell]
pc = self._soil_pc[cell]
fc = self._soil_fc[cell]
scc = self._soil_sc[cell]
wp = self._soil_wp[cell]
hgw = self._soil_hgw[cell]
beta = self._soil_beta[cell]
Ks = self._soil_Ks[cell]
if self._vegtype[cell] == 0: # 0 - GRASS
sc = scc*self._fr[cell]+(1-self._fr[cell])*fc
else:
sc = scc
# Infiltration capacity
Inf_cap = (self._soil_Ib[cell]*(1-self._vegcover[cell]) +
self._soil_Iv[cell]*self._vegcover[cell])
# Interception capacity
Int_cap = min(self._vegcover[cell]*self._interception_cap[cell],
P*self._vegcover[cell])
# Effective precipitation depth
Peff = max((P + max(runon, 0.) - Int_cap), 0.)
mu = (Ks/1000.0)/(pc*ZR*(np.exp(beta*(1.-fc))-1.))
if self._vegtype[cell] == 3:
Ep = max((fbare*self._pet_g[cell]), 0.0001)
else:
Ep = max((self._PET[cell]*self._fr[cell] +
fbare*self._pet_g[cell]*(1.-self._fr[cell])) -
Int_cap, 0.0001) # mm/d
self._ETmax[cell] = Ep
nu = ((Ep / 24.) / 1000.) / (pc*ZR) # Loss function parameter
# Loss function parameter
nuw = ((self._soil_Ew/24.)/1000.)/(pc*ZR)
# Precipitation Intensity
precip_int = Peff/Tr
self._precip_int[cell] = precip_int
# Time to saturation Ts
if precip_int <= 0.:
Ts = np.inf
else:
Ts = (((1 - self._SO[cell]) * (pc * ZR * 1000.)) /
(precip_int*(1-np.exp((-1)*Inf_cap/precip_int))))
self._ts[cell] = Ts
# Computing runoff
# If using Poisson storms with Tr = 0, (Precip_int * Tr = Precip)
if Tr == 0.:
self._runoff[cell] = max((Peff - Inf_cap), 0.)
sini = min(self._SO[cell] + ((Peff -
self._runoff[cell])/(pc*ZR*1000.)), 1.)
# If using regular storms with (Tr != 0.)
elif Tr < Ts:
self._runoff[cell] = max((precip_int - Inf_cap)*Tr, 0.)
sini = min(self._SO[cell] + ((precip_int * Tr -
self._runoff[cell])/(pc*ZR*1000.)), 1.)
else:
sini = 1
self._runoff[cell] = max(((precip_int-Inf_cap)*Ts +
(precip_int*(Tr-Ts))), 0.)
if sini >= fc:
tfc = (1./(beta*(mu-nu)))*(beta*(fc-sini) + np.log((
nu-mu+mu*np.exp(beta*(sini-fc)))/nu))
tsc = ((fc-sc)/nu)+tfc
twp = ((sc-wp)/(nu-nuw))*np.log(nu/nuw)+tsc
if Tb < tfc:
s = abs(sini-(1./beta)*np.log(((nu-mu+mu *
np.exp(beta*(sini-fc)))*np.exp(beta*(nu-mu)*Tb) -
mu*np.exp(beta*(sini-fc)))/(nu-mu)))
self._D[cell] = ((pc*ZR*1000.)*(sini-s))-(Tb*(Ep/24.))
self._ETA[cell] = (Tb*(Ep/24.))
elif Tb >= tfc and Tb < tsc:
s = fc-(nu*(Tb-tfc))
self._D[cell] = ((pc*ZR*1000.)*(sini-fc))-((tfc)*(Ep/24.))
self._ETA[cell] = (Tb*(Ep/24.))
elif Tb >= tsc and Tb < twp:
s = (wp+(sc-wp)*((nu/(nu-nuw))*np.exp((-1)*((nu-nuw) /
(sc-wp))*(Tb-tsc))-(nuw/(nu-nuw))))
self._D[cell] = ((pc*ZR*1000.)*(sini-fc))-(tfc*Ep/24.)
self._ETA[cell] = (1000.*ZR*pc*(sini-s))-self._D[cell]
else:
s = (hgw+(wp-hgw)*np.exp((-1)*(nuw/(wp-hgw)) *
max(Tb-twp, 0.)))
self._D[cell] = ((pc*ZR*1000.)*(sini-fc))-(tfc*Ep/24.)
self._ETA[cell] = (1000.*ZR*pc*(sini-s))-self._D[cell]
elif sini < fc and sini >= sc:
tfc = 0.
tsc = (sini-sc)/nu
twp = ((sc-wp)/(nu-nuw))*np.log(nu/nuw)+tsc
if Tb < tsc:
s = sini - nu*Tb
self._D[cell] = 0.
self._ETA[cell] = 1000.*ZR*pc*(sini-s)
elif Tb >= tsc and Tb < twp:
s = (wp+(sc-wp)*((nu/(nu-nuw))*np.exp((-1) *
((nu-nuw)/(sc-wp))*(Tb-tsc))-(nuw/(nu-nuw))))
self._D[cell] = 0
self._ETA[cell] = (1000.*ZR*pc*(sini-s))
else:
s = hgw+(wp-hgw)*np.exp((-1)*(nuw/(wp-hgw))*(Tb-twp))
self._D[cell] = 0.
self._ETA[cell] = (1000.*ZR*pc*(sini-s))
elif sini < sc and sini >= wp:
tfc = 0
tsc = 0
twp = (((sc-wp)/(nu-nuw))*np.log(1+(nu-nuw)*(sini-wp) /
(nuw*(sc-wp))))
if Tb < twp:
s = (wp+((sc-wp)/(nu-nuw))*((np.exp((-1)*((nu-nuw) /
(sc-wp))*Tb))*(nuw+((nu-nuw)/(sc-wp))*(sini-wp))-nuw))
self._D[cell] = 0.
self._ETA[cell] = (1000.*ZR*pc*(sini-s))
else:
s = hgw+(wp-hgw)*np.exp((-1)*(nuw/(wp-hgw))*(Tb-twp))
self._D[cell] = 0.
self._ETA[cell] = (1000.*ZR*pc*(sini-s))
else:
tfc = 0.
tsc = 0.
twp = 0.
s = hgw+(sini-hgw)*np.exp((-1)*(nuw/(wp-hgw))*Tb)
self._D[cell] = 0.
self._ETA[cell] = (1000.*ZR*pc*(sini-s))
self._water_stress[cell] = min(((max(((sc - (s+sini)/2.) /
(sc - wp)), 0.))**4.), 1.0)
self._S[cell] = s
self._SO[cell] = s
self._Sini[cell] = sini
current_time += (Tb+Tr)/(24.*365.25)
return current_time
|
|
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '.'))
import hmac
import base64
from dash_utils import *
from dash_hashs import *
from dash_jacobian import *
def get_pubkey_format(pub):
two = 2
three = 3
four = 4
if isinstance(pub, (tuple, list)):
return 'decimal'
elif len(pub) == 65 and pub[0] == four:
return 'bin'
elif len(pub) == 130 and pub[0:2] == '04':
return 'hex'
elif len(pub) == 33 and pub[0] in [two, three]:
return 'bin_compressed'
elif len(pub) == 66 and pub[0:2] in ['02', '03']:
return 'hex_compressed'
elif len(pub) == 64:
return 'bin_electrum'
elif len(pub) == 128:
return 'hex_electrum'
else:
raise Exception("Pubkey not in recognized format")
def encode_pubkey(pub, formt):
if not isinstance(pub, (tuple, list)):
pub = decode_pubkey(pub)
if formt == 'decimal':
return pub
elif formt == 'bin':
return b'\x04' + encode(pub[0], 256, 32) + encode(pub[1], 256, 32)
elif formt == 'bin_compressed':
return from_int_to_byte(2 + (pub[1] % 2)) + encode(pub[0], 256, 32)
elif formt == 'hex':
return '04' + encode(pub[0], 16, 64) + encode(pub[1], 16, 64)
elif formt == 'hex_compressed':
return '0' + str(2 + (pub[1] % 2)) + encode(pub[0], 16, 64)
elif formt == 'bin_electrum':
return encode(pub[0], 256, 32) + encode(pub[1], 256, 32)
elif formt == 'hex_electrum':
return encode(pub[0], 16, 64) + encode(pub[1], 16, 64)
else:
raise Exception("Invalid format!")
def decode_pubkey(pub, formt=None):
if not formt:
formt = get_pubkey_format(pub)
if formt == 'decimal':
return pub
elif formt == 'bin':
return (decode(pub[1:33], 256), decode(pub[33:65], 256))
elif formt == 'bin_compressed':
x = decode(pub[1:33], 256)
beta = pow(int(x * x * x + A * x + B), int((P + 1) // 4), int(P))
y = (P - beta) if ((beta + from_byte_to_int(pub[0])) % 2) else beta
return (x, y)
elif formt == 'hex':
return (decode(pub[2:66], 16), decode(pub[66:130], 16))
elif formt == 'hex_compressed':
return decode_pubkey(safe_from_hex(pub), 'bin_compressed')
elif formt == 'bin_electrum':
return (decode(pub[:32], 256), decode(pub[32:64], 256))
elif formt == 'hex_electrum':
return (decode(pub[:64], 16), decode(pub[64:128], 16))
else:
raise Exception("Invalid format!")
def encode_privkey(priv, formt, vbyte=0):
if not isinstance(priv, int_types):
return encode_privkey(decode_privkey(priv), formt, vbyte)
if formt == 'decimal':
return priv
elif formt == 'bin':
return encode(priv, 256, 32)
elif formt == 'bin_compressed':
return encode(priv, 256, 32) + b'\x01'
elif formt == 'hex':
return encode(priv, 16, 64)
elif formt == 'hex_compressed':
return encode(priv, 16, 64) + '01'
elif formt == 'wif':
return bin_to_b58check(encode(priv, 256, 32), 128 + int(vbyte))
elif formt == 'wif_compressed':
return bin_to_b58check(
encode(
priv,
256,
32) +
b'\x01',
128 +
int(vbyte))
else:
raise Exception("Invalid format!")
def decode_privkey(priv, formt=None):
if not formt:
formt = get_privkey_format(priv)
if formt == 'decimal':
return priv
elif formt == 'bin':
return decode(priv, 256)
elif formt == 'bin_compressed':
return decode(priv[:32], 256)
elif formt == 'hex':
return decode(priv, 16)
elif formt == 'hex_compressed':
return decode(priv[:64], 16)
elif formt == 'wif':
return decode(b58check_to_bin(priv), 256)
elif formt == 'wif_compressed':
return decode(b58check_to_bin(priv)[:32], 256)
else:
raise Exception("WIF does not represent privkey")
def get_privkey_format(priv):
if isinstance(priv, int_types):
return 'decimal'
elif len(priv) == 32:
return 'bin'
elif len(priv) == 33:
return 'bin_compressed'
elif len(priv) == 64:
return 'hex'
elif len(priv) == 66:
return 'hex_compressed'
else:
bin_p = b58check_to_bin(priv)
if len(bin_p) == 32:
return 'wif'
elif len(bin_p) == 33:
return 'wif_compressed'
else:
raise Exception("WIF does not represent privkey")
def b58check_to_bin(inp):
import re
leadingzbytes = len(re.match('^1*', inp).group(0))
data = b'\x00' * leadingzbytes + changebase(inp, 58, 256)
assert double_sha256(data[:-4])[:4] == data[-4:]
return data[1:-4]
def decode_sig(sig):
bytez = base64.b64decode(sig)
return from_byte_to_int(
bytez[0]), decode(
bytez[
1:33], 256), decode(
bytez[
33:], 256)
def is_address(addr):
import re
ADDR_RE = re.compile("^[123mn][a-km-zA-HJ-NP-Z0-9]{26,33}$")
return bool(ADDR_RE.match(addr))
def privkey_to_pubkey(privkey):
f = get_privkey_format(privkey)
privkey = decode_privkey(privkey, f)
if privkey >= N:
raise Exception("Invalid privkey")
if f in ['bin', 'bin_compressed', 'hex', 'hex_compressed', 'decimal']:
return encode_pubkey(fast_multiply(G, privkey), f)
else:
return encode_pubkey(
fast_multiply(
G, privkey), f.replace(
'wif', 'hex'))
privtopub = privkey_to_pubkey
def ecdsa_verify_addr(msg, sig, addr):
assert is_address(addr)
Q = ecdsa_recover(msg, sig)
magic = get_version_byte(addr)
return (
addr == pubtoaddr(
Q,
int(magic))) or (
addr == pubtoaddr(
compress(Q),
int(magic)))
def ecdsa_verify(msg, sig, pub):
if is_address(pub):
return ecdsa_verify_addr(msg, sig, pub)
return ecdsa_raw_verify(electrum_sig_hash(msg), decode_sig(sig), pub)
def ecdsa_raw_verify(msghash, vrs, pub):
v, r, s = vrs
if not (27 <= v <= 34):
return False
w = inv(s, N)
z = hash_to_int(msghash)
u1, u2 = z * w % N, r * w % N
x, y = fast_add(
fast_multiply(
G, u1), fast_multiply(
decode_pubkey(pub), u2))
return bool(r == x and (r % N) and (s % N))
def encode_sig(v, r, s):
vb, rb, sb = from_int_to_byte(v), encode(r, 256), encode(s, 256)
result = base64.b64encode(
vb + b'\x00' * (32 - len(rb)) + rb + b'\x00' * (32 - len(sb)) + sb)
return str(result, 'utf-8')
def deterministic_generate_k(msghash, priv):
v = b'\x01' * 32
k = b'\x00' * 32
priv = encode_privkey(priv, 'bin')
msghash = encode(hash_to_int(msghash), 256, 32)
k = hmac.new(k, v + b'\x00' + priv + msghash, hashlib.sha256).digest()
v = hmac.new(k, v, hashlib.sha256).digest()
k = hmac.new(k, v + b'\x01' + priv + msghash, hashlib.sha256).digest()
v = hmac.new(k, v, hashlib.sha256).digest()
return decode(hmac.new(k, v, hashlib.sha256).digest(), 256)
def ecdsa_raw_sign(msghash, priv):
z = hash_to_int(msghash)
k = deterministic_generate_k(msghash, priv)
r, y = fast_multiply(G, k)
s = inv(k, N) * (z + r * decode_privkey(priv)) % N
v, r, s = 27 + ((y % 2) ^ (0 if s * 2 < N else 1)
), r, s if s * 2 < N else N - s
if 'compressed' in get_privkey_format(priv):
v += 4
return v, r, s
def electrum_sig_hash(message):
padded = b"\x19DarkCoin Signed Message:\n" + \
num_to_varint(len(message)) + from_string_to_bytes(message)
return double_sha256(padded)
def ecdsa_sign(msg, priv):
v, r, s = ecdsa_raw_sign(electrum_sig_hash(msg), priv)
sig = encode_sig(v, r, s)
assert ecdsa_verify(msg, sig, privtopub(
priv)), "Bad Sig!\t %s\nv = %d\n,r = %d\ns = %d" % (sig, v, r, s)
return sig
#
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import next, object
import binascii
import copy
import json
import logging
import re
import struct
import sys
from django.urls import reverse
from desktop.auth.backend import is_admin
from desktop.conf import USE_DEFAULT_CONFIGURATION, has_connectors
from desktop.lib.conf import BoundConfig
from desktop.lib.exceptions import StructuredException
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import force_unicode
from desktop.lib.paths import SAFE_CHARACTERS_URI_COMPONENTS
from desktop.lib.rest.http_client import RestException
from desktop.lib.thrift_util import unpack_guid, unpack_guid_base64
from desktop.models import DefaultConfiguration, Document2
from notebook.connectors.base import Api, QueryError, QueryExpired, OperationTimeout, OperationNotSupported, _get_snippet_name, Notebook, \
get_interpreter, patch_snippet_for_connector
if sys.version_info[0] > 2:
from urllib.parse import quote as urllib_quote, unquote as urllib_unquote
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
from urllib import quote as urllib_quote, unquote as urllib_unquote
LOG = logging.getLogger(__name__)
try:
from beeswax import conf as beeswax_conf, data_export
from beeswax.api import _autocomplete, _get_sample_data
from beeswax.conf import CONFIG_WHITELIST as hive_settings, DOWNLOAD_ROW_LIMIT, DOWNLOAD_BYTES_LIMIT, MAX_NUMBER_OF_SESSIONS, \
has_session_pool, has_multiple_sessions, CLOSE_SESSIONS
from beeswax.data_export import upload
from beeswax.design import hql_query
from beeswax.models import QUERY_TYPES, HiveServerQueryHandle, HiveServerQueryHistory, QueryHistory, Session
from beeswax.server import dbms
from beeswax.server.dbms import get_query_server_config, QueryServerException
from beeswax.views import parse_out_jobs, parse_out_queries
except ImportError as e:
LOG.warning('Hive and HiveServer2 interfaces are not enabled: %s' % e)
hive_settings = None
try:
from impala import api # Force checking if Impala is enabled
from impala.conf import CONFIG_WHITELIST as impala_settings
from impala.server import get_api as get_impalad_api, ImpalaDaemonApiException, _get_impala_server_url
except ImportError as e:
LOG.warning("Impala app is not enabled")
impala_settings = None
try:
from jobbrowser.apis.query_api import _get_api
from jobbrowser.conf import ENABLE_QUERY_BROWSER, ENABLE_HIVE_QUERY_BROWSER
from jobbrowser.views import get_job
has_query_browser = ENABLE_QUERY_BROWSER.get()
has_hive_query_browser = ENABLE_HIVE_QUERY_BROWSER.get()
has_jobbrowser = True
except (AttributeError, ImportError, RuntimeError) as e:
LOG.warning("Job Browser app is not enabled")
has_jobbrowser = False
has_query_browser = False
has_hive_query_browser = False
DEFAULT_HIVE_ENGINE = 'mr'
def query_error_handler(func):
def decorator(*args, **kwargs):
try:
return func(*args, **kwargs)
except StructuredException as e:
message = force_unicode(str(e))
if 'timed out' in message:
raise OperationTimeout(e)
else:
raise QueryError(message)
except QueryServerException as e:
message = force_unicode(str(e))
if 'Invalid query handle' in message or 'Invalid OperationHandle' in message:
raise QueryExpired(e)
else:
raise QueryError(message)
return decorator
def is_hive_enabled():
return hive_settings is not None and type(hive_settings) == BoundConfig
def is_impala_enabled():
return impala_settings is not None and type(impala_settings) == BoundConfig
class HiveConfiguration(object):
APP_NAME = 'hive'
PROPERTIES = [{
"multiple": True,
"defaultValue": [],
"value": [],
"nice_name": _("Files"),
"key": "files",
"help_text": _("Add one or more files, jars, or archives to the list of resources."),
"type": "hdfs-files"
}, {
"multiple": True,
"defaultValue": [],
"value": [],
"nice_name": _("Functions"),
"key": "functions",
"help_text": _("Add one or more registered UDFs (requires function name and fully-qualified class name)."),
"type": "functions"
}, {
"multiple": True,
"defaultValue": [],
"value": [],
"nice_name": _("Settings"),
"key": "settings",
"help_text": _("Hive and Hadoop configuration properties."),
"type": "settings",
"options": [config.lower() for config in hive_settings.get()] if is_hive_enabled() and hasattr(hive_settings, 'get') else []
}
]
class ImpalaConfiguration(object):
APP_NAME = 'impala'
PROPERTIES = [{
"multiple": True,
"defaultValue": [],
"value": [],
"nice_name": _("Settings"),
"key": "settings",
"help_text": _("Impala configuration properties."),
"type": "settings",
"options": [config.lower() for config in impala_settings.get()] if is_impala_enabled() else []
}
]
class HS2Api(Api):
@staticmethod
def get_properties(lang='hive'):
return ImpalaConfiguration.PROPERTIES if lang == 'impala' else HiveConfiguration.PROPERTIES
@query_error_handler
def create_session(self, lang='hive', properties=None):
application = 'beeswax' if lang == 'hive' or lang == 'llap' else lang
if has_session_pool():
session = Session.objects.get_tez_session(self.user, application, MAX_NUMBER_OF_SESSIONS.get())
elif not has_multiple_sessions():
session = Session.objects.get_session(self.user, application=application)
else:
session = None
reuse_session = session is not None
if not reuse_session:
db = dbms.get(self.user, query_server=get_query_server_config(name=lang, connector=self.interpreter))
session = db.open_session(self.user)
response = {
'type': lang,
'id': session.id
}
if not properties:
config = None
if USE_DEFAULT_CONFIGURATION.get():
config = DefaultConfiguration.objects.get_configuration_for_user(app=lang, user=self.user)
if config is not None:
properties = config.properties_list
else:
properties = self.get_properties(lang)
response['properties'] = properties
response['configuration'] = json.loads(session.properties)
response['reuse_session'] = reuse_session
response['session_id'] = ''
try:
decoded_guid = session.get_handle().sessionId.guid
response['session_id'] = unpack_guid(decoded_guid)
except Exception as e:
LOG.warning('Failed to decode session handle: %s' % e)
if lang == 'impala' and session:
http_addr = _get_impala_server_url(session)
response['http_addr'] = http_addr
return response
@query_error_handler
def close_session(self, session):
app_name = session.get('type')
session_id = session.get('id')
source_method = session.get("sourceMethod")
if not session_id:
session = Session.objects.get_session(self.user, application=app_name)
decoded_guid = session.get_handle().sessionId.guid
session_decoded_id = unpack_guid(decoded_guid)
if source_method == "dt_logout":
LOG.debug("Closing Impala session id %s on logout for user %s" % (session_decoded_id, self.user.username))
query_server = get_query_server_config(name=app_name)
response = {'status': -1, 'message': ''}
session_record = None
try:
filters = {'id': session_id, 'application': query_server['server_name']}
if not is_admin(self.user):
filters['owner'] = self.user
session_record = Session.objects.get(**filters)
except Session.DoesNotExist:
response['message'] = _('Session does not exist or you do not have permissions to close the session.')
if session_record:
session_record = dbms.get(self.user, query_server).close_session(session_record)
response['status'] = 0
response['message'] = _('Session successfully closed.')
response['session'] = {'id': session_id, 'application': session_record.application, 'status': session_record.status_code}
return response
def close_session_idle(self, notebook, session):
idle = True
response = {'result': []}
for snippet in [_s for _s in notebook['snippets'] if _s['type'] == session['type']]:
try:
if snippet['status'] != 'running':
response['result'].append(self.close_statement(notebook, snippet))
else:
idle = False
LOG.info('Not closing SQL snippet as still running.')
except QueryExpired:
pass
except Exception as e:
LOG.exception('Error closing statement %s' % str(e))
try:
if idle and CLOSE_SESSIONS.get():
response['result'].append(self.close_session(session))
except QueryExpired:
pass
except Exception as e:
LOG.exception('Error closing statement %s' % str(e))
return response['result']
@query_error_handler
def execute(self, notebook, snippet):
db = self._get_db(snippet, interpreter=self.interpreter)
statement = self._get_current_statement(notebook, snippet)
session = self._get_session(notebook, snippet['type'])
query = self._prepare_hql_query(snippet, statement['statement'], session)
_session = self._get_session_by_id(notebook, snippet['type'])
try:
if statement.get('statement_id') == 0: # TODO: move this to client
if query.database and not statement['statement'].lower().startswith('set'):
result = db.use(query.database, session=_session)
if result.session:
_session = result.session
handle = db.client.query(query, session=_session)
except QueryServerException as ex:
raise QueryError(ex.message, handle=statement)
# All good
server_id, server_guid = handle.get()
if sys.version_info[0] > 2:
server_id = server_id.decode('utf-8')
server_guid = server_guid.decode('utf-8')
response = {
'secret': server_id,
'guid': server_guid,
'operation_type': handle.operation_type,
'has_result_set': handle.has_result_set,
'modified_row_count': handle.modified_row_count,
'log_context': handle.log_context,
'session_guid': handle.session_guid,
'session_id': handle.session_id,
'session_type': snippet['type']
}
response.update(statement)
return response
@query_error_handler
def check_status(self, notebook, snippet):
response = {}
db = self._get_db(snippet, interpreter=self.interpreter)
handle = self._get_handle(snippet)
operation = db.get_operation_status(handle)
status = HiveServerQueryHistory.STATE_MAP[operation.operationState]
if status.value in (QueryHistory.STATE.failed.value, QueryHistory.STATE.expired.value):
if operation.errorMessage and 'transition from CANCELED to ERROR' in operation.errorMessage: # Hive case on canceled query
raise QueryExpired()
elif operation.errorMessage and re.search(
'Cannot validate serde: org.apache.hive.hcatalog.data.JsonSerDe', str(operation.errorMessage)
):
raise QueryError(message=operation.errorMessage + _('. Is hive-hcatalog-core.jar registered?'))
else:
raise QueryError(operation.errorMessage)
response['status'] = 'running' if status.value in (
QueryHistory.STATE.running.value, QueryHistory.STATE.submitted.value
) else 'available'
if operation.hasResultSet is not None:
response['has_result_set'] = operation.hasResultSet # HIVE-12442 - With LLAP hasResultSet can change after get_operation_status
return response
@query_error_handler
def fetch_result(self, notebook, snippet, rows, start_over):
db = self._get_db(snippet, interpreter=self.interpreter)
handle = self._get_handle(snippet)
try:
results = db.fetch(handle, start_over=start_over, rows=rows)
except QueryServerException as ex:
if re.search('(client inactivity)|(Invalid query handle)', str(ex)) and ex.message:
raise QueryExpired(message=ex.message)
else:
raise QueryError(ex)
# No escaping...
return {
'has_more': results.has_more,
'data': results.rows(),
'meta': [{
'name': column.name,
'type': column.type,
'comment': column.comment
} for column in results.data_table.cols()
],
'type': 'table'
}
@query_error_handler
def fetch_result_size(self, notebook, snippet):
resp = {
'rows': None,
'size': None,
'message': ''
}
if snippet.get('status') != 'available':
raise QueryError(_('Result status is not available'))
if has_connectors():
# TODO: Add dialect to snippet and update fetchResultSize() in notebook.ko
interpreter = get_interpreter(connector_type=snippet['type'])
snippet_dialect = interpreter['dialect']
else:
snippet_dialect = snippet['type']
if snippet_dialect not in ('hive', 'impala'):
raise OperationNotSupported(_('Cannot fetch result metadata for snippet type: %s') % snippet_dialect)
if snippet_dialect == 'hive':
resp['rows'], resp['size'], resp['message'] = self._get_hive_result_size(notebook, snippet)
else:
resp['rows'], resp['size'], resp['message'] = self._get_impala_result_size(notebook, snippet)
return resp
@query_error_handler
def cancel(self, notebook, snippet):
db = self._get_db(snippet, interpreter=self.interpreter)
handle = self._get_handle(snippet)
db.cancel_operation(handle)
return {'status': 0}
@query_error_handler
def get_log(self, notebook, snippet, startFrom=None, size=None):
db = self._get_db(snippet, interpreter=self.interpreter)
handle = self._get_handle(snippet)
return db.get_log(handle, start_over=startFrom == 0)
@query_error_handler
def close_statement(self, notebook, snippet):
db = self._get_db(snippet, interpreter=self.interpreter)
try:
handle = self._get_handle(snippet)
db.close_operation(handle)
except Exception as e:
if 'no valid handle' in str(e):
return {'status': -1} # skipped
else:
raise e
return {'status': 0}
def can_start_over(self, notebook, snippet):
try:
db = self._get_db(snippet, interpreter=self.interpreter)
handle = self._get_handle(snippet)
# Test handle to verify if still valid
db.fetch(handle, start_over=True, rows=1)
can_start_over = True
except Exception as e:
raise e
return can_start_over
@query_error_handler
def progress(self, notebook, snippet, logs=''):
patch_snippet_for_connector(snippet)
if snippet['dialect'] == 'hive':
match = re.search('Total jobs = (\d+)', logs, re.MULTILINE)
total = int(match.group(1)) if match else 1
started = logs.count('Starting Job')
ended = logs.count('Ended Job')
progress = int((started + ended) * 100 / (total * 2))
return max(progress, 5) # Return 5% progress as a minimum
elif snippet['dialect'] == 'impala':
match = re.findall('(\d+)% Complete', logs, re.MULTILINE)
# Retrieve the last reported progress percentage if it exists
return int(match[-1]) if match and isinstance(match, list) else 0
else:
return 50
@query_error_handler
def get_jobs(self, notebook, snippet, logs):
jobs = []
patch_snippet_for_connector(snippet)
if snippet['dialect'] == 'hive':
engine = self._get_hive_execution_engine(notebook, snippet)
jobs_with_state = parse_out_jobs(logs, engine=engine, with_state=True)
queries_with_state = parse_out_queries(logs, engine=engine, with_state=True)
jobs = [{
'name': job.get('job_id', ''),
'url': reverse('jobbrowser:jobbrowser.views.single_job', kwargs={'job': job.get('job_id', '')}) if has_jobbrowser else '',
'started': job.get('started', False),
'finished': job.get('finished', False)
}
for job in jobs_with_state
]
if has_hive_query_browser:
jobs += [{
'name': job.get('job_id', ''),
'url': 'api/job/queries-hive/',
'started': job.get('started', False),
'finished': job.get('finished', False)
}
for job in queries_with_state
]
elif snippet['dialect'] == 'impala' and has_query_browser:
guid = snippet['result']['handle']['guid']
if isinstance(guid, str):
guid = guid.encode('utf-8')
query_id = unpack_guid_base64(guid)
progress = min(
self.progress(notebook, snippet, logs), 99
) if snippet['status'] != 'available' and snippet['status'] != 'success' else 100
jobs = [{
'name': query_id,
'url': '/hue/jobbrowser#!id=%s' % query_id,
'started': True,
'finished': False,
'percentJob': progress
}]
return jobs
@query_error_handler
def autocomplete(self, snippet, database=None, table=None, column=None, nested=None, operation=None):
db = self._get_db(snippet, interpreter=self.interpreter)
query = None
if snippet.get('query'):
query = snippet.get('query')
elif snippet.get('source') == 'query':
document = Document2.objects.get(id=database)
document.can_read_or_exception(self.user)
notebook = Notebook(document=document).get_data()
snippet = notebook['snippets'][0]
query = self._get_current_statement(notebook, snippet)['statement']
database, table = '', ''
resp = _autocomplete(db, database, table, column, nested, query=query, cluster=self.interpreter, operation=operation)
if resp.get('error'):
resp['message'] = resp.pop('error')
if 'Read timed out' in resp['message']:
raise QueryExpired(resp['message'])
return resp
@query_error_handler
def get_sample_data(self, snippet, database=None, table=None, column=None, is_async=False, operation=None):
try:
db = self._get_db(snippet, is_async=is_async, interpreter=self.interpreter)
return _get_sample_data(db, database, table, column, is_async, operation=operation, cluster=self.interpreter)
except QueryServerException as ex:
raise QueryError(ex.message)
@query_error_handler
def explain(self, notebook, snippet):
db = self._get_db(snippet, interpreter=self.interpreter)
response = self._get_current_statement(notebook, snippet)
session = self._get_session(notebook, snippet['type'])
statement = response.pop('statement')
explanation = ''
query = self._prepare_hql_query(snippet, statement, session)
if statement:
try:
db.use(query.database)
explanation = db.explain(query).textual
statement = query.get_query_statement(0)
except QueryServerException as ex:
explanation = str(ex.message)
return {
'status': 0,
'explanation': explanation,
'statement': statement,
}
@query_error_handler
def export_data_as_hdfs_file(self, snippet, target_file, overwrite):
db = self._get_db(snippet, interpreter=self.interpreter)
handle = self._get_handle(snippet)
max_rows = DOWNLOAD_ROW_LIMIT.get()
max_bytes = DOWNLOAD_BYTES_LIMIT.get()
upload(target_file, handle, self.request.user, db, self.request.fs, max_rows=max_rows, max_bytes=max_bytes)
return '/filebrowser/view=%s' % urllib_quote(
urllib_quote(target_file.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS)
) # Quote twice, because of issue in the routing on client
def export_data_as_table(self, notebook, snippet, destination, is_temporary=False, location=None):
db = self._get_db(snippet, interpreter=self.interpreter)
response = self._get_current_statement(notebook, snippet)
session = self._get_session(notebook, snippet['type'])
query = self._prepare_hql_query(snippet, response.pop('statement'), session)
if 'select' not in query.hql_query.strip().lower():
raise PopupException(_('Only SELECT statements can be saved. Provided statement: %(query)s') % {'query': query.hql_query})
database = snippet.get('database') or 'default'
table = destination
if '.' in table:
database, table = table.split('.', 1)
db.use(query.database)
hql = 'CREATE %sTABLE `%s`.`%s` %sAS %s' % (
'TEMPORARY ' if is_temporary else '', database, table, "LOCATION '%s' " % location if location else '', query.hql_query
)
success_url = reverse('metastore:describe_table', kwargs={'database': database, 'table': table})
return hql, success_url
def export_large_data_to_hdfs(self, notebook, snippet, destination):
response = self._get_current_statement(notebook, snippet)
session = self._get_session(notebook, snippet['type'])
query = self._prepare_hql_query(snippet, response.pop('statement'), session)
if 'select' not in query.hql_query.strip().lower():
raise PopupException(_('Only SELECT statements can be saved. Provided statement: %(query)s') % {'query': query.hql_query})
hql = '''
DROP TABLE IF EXISTS `%(table)s`;
CREATE TABLE `%(table)s` ROW FORMAT DELIMITED
FIELDS TERMINATED BY '\\t'
ESCAPED BY '\\\\'
LINES TERMINATED BY '\\n'
STORED AS TEXTFILE LOCATION '%(location)s'
AS
%(hql)s;
ALTER TABLE `%(table)s` SET TBLPROPERTIES('EXTERNAL'='TRUE');
DROP TABLE IF EXISTS `%(table)s`;
''' % {
'table': _get_snippet_name(notebook, unique=True, table_format=True),
'location': self.request.fs.netnormpath(destination),
'hql': query.hql_query
}
success_url = '/filebrowser/view=%s' % urllib_quote(destination.encode('utf-8'), safe=SAFE_CHARACTERS_URI_COMPONENTS)
return hql, success_url
def upgrade_properties(self, lang='hive', properties=None):
upgraded_properties = copy.deepcopy(self.get_properties(lang))
# Check that current properties is a list of dictionary objects with 'key' and 'value' keys
if not isinstance(properties, list) or \
not all(isinstance(prop, dict) for prop in properties) or \
not all('key' in prop for prop in properties) or not all('value' in prop for prop in properties):
LOG.warning('Current properties are not formatted correctly, will replace with defaults.')
return upgraded_properties
valid_props_dict = dict((prop["key"], prop) for prop in upgraded_properties)
curr_props_dict = dict((prop['key'], prop) for prop in properties)
# Upgrade based on valid properties as needed
if set(valid_props_dict.keys()) != set(curr_props_dict.keys()):
settings = next((prop for prop in upgraded_properties if prop['key'] == 'settings'), None)
if settings is not None and isinstance(properties, list):
settings['value'] = properties
else: # No upgrade needed so return existing properties
upgraded_properties = properties
return upgraded_properties
def _get_session(self, notebook, type='hive'):
session = next((session for session in notebook['sessions'] if session['type'] == type), None)
return session
def _get_session_by_id(self, notebook, type='hive'):
session = self._get_session(notebook, type)
if session:
session_id = session.get('id')
if session_id:
filters = {'id': session_id, 'application': 'beeswax' if type == 'hive' or type == 'llap' else type}
if not is_admin(self.user):
filters['owner'] = self.user
return Session.objects.get(**filters)
def _get_hive_execution_engine(self, notebook, snippet):
# Get hive.execution.engine from snippet properties, if none, then get from session
properties = snippet['properties']
settings = properties.get('settings', [])
if not settings:
session = self._get_session(notebook, 'hive')
if not session:
LOG.warning('Cannot get jobs, failed to find active HS2 session for user: %s' % self.user.username)
elif session.get('configuration') and session['configuration'].get('hive.execution.engine'):
return session['configuration'].get('hive.execution.engine')
else:
properties = session['properties']
settings = next((prop['value'] for prop in properties if prop['key'] == 'settings'), None)
if settings:
engine = next((setting['value'] for setting in settings if setting['key'] == 'hive.execution.engine'), DEFAULT_HIVE_ENGINE)
else:
engine = DEFAULT_HIVE_ENGINE
return engine
def _prepare_hql_query(self, snippet, statement, session):
settings = snippet['properties'].get('settings', None)
file_resources = snippet['properties'].get('files', None)
functions = snippet['properties'].get('functions', None)
properties = session['properties'] if session else []
# Get properties from session if not defined in snippet
if not settings:
settings = next((prop['value'] for prop in properties if prop['key'] == 'settings'), None)
if not file_resources:
file_resources = next((prop['value'] for prop in properties if prop['key'] == 'files'), None)
if not functions:
functions = next((prop['value'] for prop in properties if prop['key'] == 'functions'), None)
database = snippet.get('database') or 'default'
return hql_query(
statement,
query_type=QUERY_TYPES[0],
settings=settings,
file_resources=file_resources,
functions=functions,
database=database
)
def get_browse_query(self, snippet, database, table, partition_spec=None):
db = self._get_db(snippet, interpreter=self.interpreter)
table = db.get_table(database, table)
if table.is_impala_only:
snippet['type'] = 'impala'
db = self._get_db(snippet, interpreter=self.interpreter)
if partition_spec is not None:
decoded_spec = urllib_unquote(partition_spec)
return db.get_partition(database, table.name, decoded_spec, generate_ddl_only=True)
else:
return db.get_select_star_query(database, table, limit=100)
def _get_handle(self, snippet):
try:
handle = snippet['result']['handle'].copy()
handle['secret'], handle['guid'] = HiveServerQueryHandle.get_decoded(handle['secret'], handle['guid'])
except KeyError:
raise Exception('Operation has no valid handle attached')
except binascii.Error:
LOG.warning('Handle already base 64 decoded')
for key in list(handle.keys()):
if key not in ('log_context', 'secret', 'has_result_set', 'operation_type', 'modified_row_count', 'guid'):
handle.pop(key)
return HiveServerQueryHandle(**handle)
def _get_db(self, snippet, is_async=False, interpreter=None):
if interpreter and interpreter.get('dialect'):
dialect = interpreter['dialect']
else:
dialect = snippet['type'] # Backward compatibility without connectors
if not is_async and dialect == 'hive':
name = 'beeswax'
elif dialect == 'hive':
name = 'hive'
elif dialect == 'llap':
name = 'llap'
elif dialect == 'impala':
name = 'impala'
else:
name = 'sparksql'
# Note: name is not used if interpreter is present
return dbms.get(self.user, query_server=get_query_server_config(name=name, connector=interpreter))
def _parse_job_counters(self, job_id):
# Attempt to fetch total records from the job's Hive counter
total_records, total_size = None, None
job = get_job(self.request, job_id=job_id)
if not job or not job.counters:
raise PopupException(_('Failed to get job details or job does not contain counters data.'))
counter_groups = job.counters.get('counterGroup') # Returns list of counter groups with 'counterGroupName' and 'counter'
if counter_groups:
# Extract totalCounterValue from HIVE counter group
hive_counters = next((group for group in counter_groups if group.get('counterGroupName', '').upper() == 'HIVE'), None)
if hive_counters:
total_records = next(
(counter.get('totalCounterValue') for counter in hive_counters['counter'] if counter['name'] == 'RECORDS_OUT_0'),
None
)
else:
LOG.info("No HIVE counter group found for job: %s" % job_id)
# Extract totalCounterValue from FileSystemCounter counter group
fs_counters = next(
(group for group in counter_groups if group.get('counterGroupName') == 'org.apache.hadoop.mapreduce.FileSystemCounter'),
None
)
if fs_counters:
total_size = next(
(counter.get('totalCounterValue') for counter in fs_counters['counter'] if counter['name'] == 'HDFS_BYTES_WRITTEN'),
None
)
else:
LOG.info("No FileSystemCounter counter group found for job: %s" % job_id)
return total_records, total_size
def _get_hive_result_size(self, notebook, snippet):
total_records, total_size, msg = None, None, None
engine = self._get_hive_execution_engine(notebook, snippet).lower()
logs = self.get_log(notebook, snippet, startFrom=0)
if engine == 'mr':
jobs = self.get_jobs(notebook, snippet, logs)
if jobs:
last_job_id = jobs[-1].get('name')
LOG.info("Hive query executed %d jobs, last job is: %s" % (len(jobs), last_job_id))
total_records, total_size = self._parse_job_counters(job_id=last_job_id)
else:
msg = _('Hive query did not execute any jobs.')
elif engine == 'spark':
total_records_re = "RECORDS_OUT_0: (?P<total_records>\d+)"
total_size_re = "Spark Job\[[a-z0-9-]+\] Metrics[A-Za-z0-9:\s]+ResultSize: (?P<total_size>\d+)"
total_records_match = re.search(total_records_re, logs, re.MULTILINE)
total_size_match = re.search(total_size_re, logs, re.MULTILINE)
if total_records_match:
total_records = int(total_records_match.group('total_records'))
if total_size_match:
total_size = int(total_size_match.group('total_size'))
return total_records, total_size, msg
def _get_impala_result_size(self, notebook, snippet):
total_records_match = None
total_records, total_size, msg = None, None, None
query_id = self._get_impala_query_id(snippet)
server_url = _get_api(self.user, snippet)._url
if query_id:
LOG.debug("Attempting to get Impala query profile at server_url %s for query ID: %s" % (server_url, query_id))
fragment = self._get_impala_query_profile(server_url, query_id=query_id)
total_records_re = \
"Coordinator Fragment F\d\d.+?RowsReturned: \d+(?:.\d+[KMB])? \((?P<total_records>\d+)\).*?(Averaged Fragment F\d\d)"
total_records_match = re.search(total_records_re, fragment, re.MULTILINE | re.DOTALL)
if total_records_match:
total_records = int(total_records_match.group('total_records'))
query_plan = self._get_impala_profile_plan(query_id, fragment)
if query_plan:
LOG.debug('Query plan for Impala query %s: %s' % (query_id, query_plan))
else:
LOG.info('Query plan for Impala query %s not found.' % query_id)
return total_records, total_size, msg
def _get_impala_query_id(self, snippet):
guid = None
if 'result' in snippet and 'handle' in snippet['result'] and 'guid' in snippet['result']['handle']:
try:
guid = unpack_guid_base64(snippet['result']['handle']['guid'])
except Exception as e:
LOG.warning('Failed to decode operation handle guid: %s' % e)
else:
LOG.warning('Snippet does not contain a valid result handle, cannot extract Impala query ID.')
return guid
def _get_impala_query_profile(self, server_url, query_id):
api = get_impalad_api(user=self.user, url=server_url)
try:
query_profile = api.get_query_profile(query_id)
profile = query_profile.get('profile')
except (RestException, ImpalaDaemonApiException) as e:
raise PopupException(_("Failed to get query profile from Impala Daemon server: %s") % e)
if not profile:
raise PopupException(_("Could not find profile in query profile response from Impala Daemon Server."))
return profile
def _get_impala_profile_plan(self, query_id, profile):
query_plan_re = "Query \(id=%(query_id)s\):.+?Execution Profile %(query_id)s" % {'query_id': query_id}
query_plan_match = re.search(query_plan_re, profile, re.MULTILINE | re.DOTALL)
return query_plan_match.group() if query_plan_match else None
def describe_column(self, notebook, snippet, database=None, table=None, column=None):
db = self._get_db(snippet, interpreter=self.interpreter)
return db.get_table_columns_stats(database, table, column)
def describe_table(self, notebook, snippet, database=None, table=None):
db = self._get_db(snippet, interpreter=self.interpreter)
tb = db.get_table(database, table)
return {
'status': 0,
'name': tb.name,
'partition_keys': [{'name': part.name, 'type': part.type} for part in tb.partition_keys],
'primary_keys': [{'name': pk.name} for pk in tb.primary_keys],
'cols': [{'name': col.name, 'type': col.type, 'comment': col.comment} for col in tb.cols],
'path_location': tb.path_location,
'hdfs_link': tb.hdfs_link,
'comment': tb.comment,
'is_view': tb.is_view,
'properties': tb.properties,
'details': tb.details,
'stats': tb.stats
}
def describe_database(self, notebook, snippet, database=None):
db = self._get_db(snippet, interpreter=self.interpreter)
return db.get_database(database)
def get_log_is_full_log(self, notebook, snippet):
return snippet['type'] != 'hive' and snippet['type'] != 'impala'
|
|
#This script recognize set of reflectors, and allows only according movement, while iterating it.
#Written by Michael Simkin 2014
import golly as g
import copy
data = []
data.append(["2o5b2o$2o5bo$5bobo$5b2o$b2o$bobo$2bo2$5b3o$5b3o$5b3o$8b3o$8b3o$8b3o!", "P8 Reflector"])
data.append(["22b2o$22bo$11b2o7bobo$11bobo6b2o$6b2o4b3o$5bo2bo4b3o$7bo4b3o$3bo7bobo$2bob2o5b2o$2bo$b2o!", "P30 Reflector", [(1, -1)]])
data.append(["13bo$11b3o$10bo$10b2o3$18b2ob2o$19bob2o$19bo$11b2o4b3o$11b2o3bo3b2o$16b4o2bo$2b2o15bob2o$bobo12b3o2bo$bo13bo5bo$2o14b5o$18bo!", "Snark1"])
data.append(["13bo$11b3o$10bo$10b2o3$18b2o$19bo$19bob2o$11b2o4b3o2bo$11b2o3bo3b2o$16b4o$2b2o15bo$bobo12b3o$bo13bo$2o14b5o$20bo$18bo$18b2o!", "Snark2"])
def FindPeriod(obj):
evolved = g.evolve(obj, 1)
for i in xrange(1, 1000):
if str(evolved) == str(obj):
return i
evolved = g.evolve(evolved, 1)
return -1
def GetSize(obj):
maxX = -1
maxY = -1
minX = 1000
minY = 1000
for i in xrange(0, len(obj), 2):
if obj[i] > maxX:
maxX = obj[i]
if obj[i + 1] > maxY:
maxY = obj[i + 1]
if obj[i] < minX:
minX = obj[i]
if obj[i + 1] < minY:
minY = obj[i + 1]
return (maxX - minX, maxY - minY)
def GetTransList():
transList = []
for i in xrange(-1, 2, 2):
for j in xrange(-1, 2, 2):
transList.append((i, 0, 0, j))
transList.append((0, i, j, 0))
return transList
def GetObjectClickArray(obj, objName, t, period):
result = []
for i in xrange(0, len(obj), 2):
x = obj[i]
y = obj[i + 1]
l = copy.copy(obj)
for j in xrange(0, len(obj), 2):
l[j] -= x
l[j + 1] -= y
result.append([l, objName, t, period])
return result
def GetObjectArray(iniobj):
result = []
transList = GetTransList()
period = FindPeriod(iniobj[0])
for i in xrange(0, period):
obj = g.evolve(iniobj[0], i)
for t in transList:
dxx, dxy, dyx, dyy = t
curobj = g.transform(obj, 0, 0, dxx, dxy, dyx, dyy)
result.extend(GetObjectClickArray(curobj, iniobj[1], t, period))
return result
def IsObjectExists(objectArray, x, y):
for obj, objName, t, p in objectArray:
found = True
for i in xrange(0, len(obj), 2):
dx = obj[i]
dy = obj[i + 1]
if g.getcell(x + dx, y + dy) == 0:
found = False
break
if found:
return [obj, objName, x, y, t, p]
return None
def GetObjectByClick(event):
x = int(event.split()[1])
y = int(event.split()[2])
found = False
for i in [0, -1, 1, -2, 2]:
for j in [0, -1, 1]:
if found:
break
o = IsObjectExists(objectArray, x + i, y + j)
if o != None:
g.show("found!")
for k in xrange(0, len(o[0]), 2):
dx = o[0][k]
dy = o[0][k + 1]
g.setcell(x + i + dx, y + j + dy, 0)
found = True
g.update()
if found:
return o
else :
return None
def ClearRect(x, y, w, h):
for i in xrange(0, w):
for j in xrange(0, h):
g.setcell(x + i, y + j, 0)
def GetDirection(t):
dxx, dxy, dyx, dyy = t
if dxy == 0:
return dxx * dyy
else:
return dxy * dyx
def GetEvolveDirection(t):
dxx, dxy, dyx, dyy = t
if dxy == 0:
return -dxx
else:
return -dxy
def FinishMove(d, w, h, x0, y0, p, t):
under = d[0]
obj = d[1]
x = d[2]
y = d[3]
if under != -1:
g.putcells(under)
g.putcells(g.evolve(obj, p + GetEvolveDirection(t) * ((4 * (x - x0)) % p)), x, y)
g.update()
def UpdateMove(d, w, h, x0, y0, p, t):
under = d[0]
obj = d[1]
x = d[2]
y = d[3]
if under != -1:
ClearRect(x - w, y - h, 2 * w + 1, 2 * h + 1)
g.putcells(under)
val = g.getxy()
if val == "":
return
x1 = int(val.split()[0])
y1 = y0 + GetDirection(t) * (x1 - x0)
d[0] = g.getcells([x1 - w, y1 - h, 2 * w + 1, 2 * h + 1])
#ClearRect(x1 - w, y1 - h, 2 * w + 1, 2 * h + 1)
g.putcells(g.evolve(obj, p + GetEvolveDirection(t) * ((4 * (x1 - x0)) % p)), x1, y1)
g.update()
d[2] = x1
d[3] = y1
def InitUpdateMove(obj, x, y):
return [-1, obj, x, y]
objectArray = []
for d in data:
objectArray.extend(GetObjectArray([g.parse(d[0]), d[1]]))
moving = False
g.show("Select known object with left click, exit with right click")
handling = False
searching = False
while True:
event = g.getevent()
if handling or searching:
continue
handling = True
if "click" in event:
if "right" in event:
g.show("finish smart movement")
g.exit()
if not moving:
searching = True
g.show("searching...")
g.update()
found = GetObjectByClick(event)
if found != None:
p = found[5]
t = found[4]
curMove = InitUpdateMove(found[0], found[2], found[3])
g.show("Found: " + str(found[1]) + ", click left to place and continue, right to place and exit")
w, h = GetSize(found[0])
moving = True
searching = False
else:
if "left" in event and not searching:
moving = False
FinishMove(curMove, w, h, found[2], found[3], p, t)
g.show("Object moved, select known object with left click, exit with right click")
g.update()
if moving and event == "":
UpdateMove(curMove, w, h, found[2], found[3], p, t)
handling = False
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.contrib.postgres.fields
class Migration(migrations.Migration):
dependencies = [
('hs_app_timeseries', '0004_auto_20160526_2026'),
]
operations = [
migrations.CreateModel(
name='CVAggregationStatistic',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('term', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('is_dirty', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CVElevationDatum',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('term', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('is_dirty', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CVMedium',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('term', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('is_dirty', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CVMethodType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('term', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('is_dirty', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CVSiteType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('term', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('is_dirty', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CVSpeciation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('term', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('is_dirty', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CVStatus',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('term', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('is_dirty', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CVUnitsType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('term', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('is_dirty', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CVVariableName',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('term', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('is_dirty', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CVVariableType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('term', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('is_dirty', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='method',
name='is_dirty',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='method',
name='series_ids',
field=django.contrib.postgres.fields.ArrayField(default=[], base_field=models.CharField(max_length=36, null=True, blank=True), size=None),
),
migrations.AddField(
model_name='processinglevel',
name='is_dirty',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='processinglevel',
name='series_ids',
field=django.contrib.postgres.fields.ArrayField(default=[], base_field=models.CharField(max_length=36, null=True, blank=True), size=None),
),
migrations.AddField(
model_name='site',
name='is_dirty',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='site',
name='series_ids',
field=django.contrib.postgres.fields.ArrayField(default=[], base_field=models.CharField(max_length=36, null=True, blank=True), size=None),
),
migrations.AddField(
model_name='timeseriesmetadata',
name='is_dirty',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='timeseriesresult',
name='is_dirty',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='timeseriesresult',
name='series_ids',
field=django.contrib.postgres.fields.ArrayField(default=[], base_field=models.CharField(max_length=36, null=True, blank=True), size=None),
),
migrations.AddField(
model_name='variable',
name='is_dirty',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='variable',
name='series_ids',
field=django.contrib.postgres.fields.ArrayField(default=[], base_field=models.CharField(max_length=36, null=True, blank=True), size=None),
),
migrations.AlterUniqueTogether(
name='method',
unique_together=set([]),
),
migrations.AlterUniqueTogether(
name='processinglevel',
unique_together=set([]),
),
migrations.AlterUniqueTogether(
name='site',
unique_together=set([]),
),
migrations.AlterUniqueTogether(
name='timeseriesresult',
unique_together=set([]),
),
migrations.AlterUniqueTogether(
name='variable',
unique_together=set([]),
),
migrations.AddField(
model_name='cvvariabletype',
name='metadata',
field=models.ForeignKey(related_name='cv_variable_types', to='hs_app_timeseries.TimeSeriesMetaData'),
),
migrations.AddField(
model_name='cvvariablename',
name='metadata',
field=models.ForeignKey(related_name='cv_variable_names', to='hs_app_timeseries.TimeSeriesMetaData'),
),
migrations.AddField(
model_name='cvunitstype',
name='metadata',
field=models.ForeignKey(related_name='cv_units_types', to='hs_app_timeseries.TimeSeriesMetaData'),
),
migrations.AddField(
model_name='cvstatus',
name='metadata',
field=models.ForeignKey(related_name='cv_statuses', to='hs_app_timeseries.TimeSeriesMetaData'),
),
migrations.AddField(
model_name='cvspeciation',
name='metadata',
field=models.ForeignKey(related_name='cv_speciations', to='hs_app_timeseries.TimeSeriesMetaData'),
),
migrations.AddField(
model_name='cvsitetype',
name='metadata',
field=models.ForeignKey(related_name='cv_site_types', to='hs_app_timeseries.TimeSeriesMetaData'),
),
migrations.AddField(
model_name='cvmethodtype',
name='metadata',
field=models.ForeignKey(related_name='cv_method_types', to='hs_app_timeseries.TimeSeriesMetaData'),
),
migrations.AddField(
model_name='cvmedium',
name='metadata',
field=models.ForeignKey(related_name='cv_mediums', to='hs_app_timeseries.TimeSeriesMetaData'),
),
migrations.AddField(
model_name='cvelevationdatum',
name='metadata',
field=models.ForeignKey(related_name='cv_elevation_datums', to='hs_app_timeseries.TimeSeriesMetaData'),
),
migrations.AddField(
model_name='cvaggregationstatistic',
name='metadata',
field=models.ForeignKey(related_name='cv_aggregation_statistics', to='hs_app_timeseries.TimeSeriesMetaData'),
),
]
|
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import udp_ep
module = 'udp_arb_mux'
testbench = 'test_%s_4' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../lib/axis/rtl/arbiter.v")
srcs.append("../lib/axis/rtl/priority_encoder.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
S_COUNT = 4
DATA_WIDTH = 8
KEEP_ENABLE = (DATA_WIDTH>8)
KEEP_WIDTH = (DATA_WIDTH/8)
ID_ENABLE = 1
ID_WIDTH = 8
DEST_ENABLE = 1
DEST_WIDTH = 8
USER_ENABLE = 1
USER_WIDTH = 1
ARB_TYPE_ROUND_ROBIN = 0
ARB_LSB_HIGH_PRIORITY = 1
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
s_udp_hdr_valid_list = [Signal(bool(0)) for i in range(S_COUNT)]
s_eth_dest_mac_list = [Signal(intbv(0)[48:]) for i in range(S_COUNT)]
s_eth_src_mac_list = [Signal(intbv(0)[48:]) for i in range(S_COUNT)]
s_eth_type_list = [Signal(intbv(0)[16:]) for i in range(S_COUNT)]
s_ip_version_list = [Signal(intbv(0)[4:]) for i in range(S_COUNT)]
s_ip_ihl_list = [Signal(intbv(0)[4:]) for i in range(S_COUNT)]
s_ip_dscp_list = [Signal(intbv(0)[6:]) for i in range(S_COUNT)]
s_ip_ecn_list = [Signal(intbv(0)[2:]) for i in range(S_COUNT)]
s_ip_length_list = [Signal(intbv(0)[16:]) for i in range(S_COUNT)]
s_ip_identification_list = [Signal(intbv(0)[16:]) for i in range(S_COUNT)]
s_ip_flags_list = [Signal(intbv(0)[3:]) for i in range(S_COUNT)]
s_ip_fragment_offset_list = [Signal(intbv(0)[13:]) for i in range(S_COUNT)]
s_ip_ttl_list = [Signal(intbv(0)[8:]) for i in range(S_COUNT)]
s_ip_protocol_list = [Signal(intbv(0)[8:]) for i in range(S_COUNT)]
s_ip_header_checksum_list = [Signal(intbv(0)[16:]) for i in range(S_COUNT)]
s_ip_source_ip_list = [Signal(intbv(0)[32:]) for i in range(S_COUNT)]
s_ip_dest_ip_list = [Signal(intbv(0)[32:]) for i in range(S_COUNT)]
s_udp_source_port_list = [Signal(intbv(0)[16:]) for i in range(S_COUNT)]
s_udp_dest_port_list = [Signal(intbv(0)[16:]) for i in range(S_COUNT)]
s_udp_length_list = [Signal(intbv(0)[16:]) for i in range(S_COUNT)]
s_udp_checksum_list = [Signal(intbv(0)[16:]) for i in range(S_COUNT)]
s_udp_payload_axis_tdata_list = [Signal(intbv(0)[DATA_WIDTH:]) for i in range(S_COUNT)]
s_udp_payload_axis_tkeep_list = [Signal(intbv(1)[KEEP_WIDTH:]) for i in range(S_COUNT)]
s_udp_payload_axis_tvalid_list = [Signal(bool(0)) for i in range(S_COUNT)]
s_udp_payload_axis_tlast_list = [Signal(bool(0)) for i in range(S_COUNT)]
s_udp_payload_axis_tid_list = [Signal(intbv(0)[ID_WIDTH:]) for i in range(S_COUNT)]
s_udp_payload_axis_tdest_list = [Signal(intbv(0)[DEST_WIDTH:]) for i in range(S_COUNT)]
s_udp_payload_axis_tuser_list = [Signal(intbv(0)[USER_WIDTH:]) for i in range(S_COUNT)]
s_udp_hdr_valid = ConcatSignal(*reversed(s_udp_hdr_valid_list))
s_eth_dest_mac = ConcatSignal(*reversed(s_eth_dest_mac_list))
s_eth_src_mac = ConcatSignal(*reversed(s_eth_src_mac_list))
s_eth_type = ConcatSignal(*reversed(s_eth_type_list))
s_ip_version = ConcatSignal(*reversed(s_ip_version_list))
s_ip_ihl = ConcatSignal(*reversed(s_ip_ihl_list))
s_ip_dscp = ConcatSignal(*reversed(s_ip_dscp_list))
s_ip_ecn = ConcatSignal(*reversed(s_ip_ecn_list))
s_ip_length = ConcatSignal(*reversed(s_ip_length_list))
s_ip_identification = ConcatSignal(*reversed(s_ip_identification_list))
s_ip_flags = ConcatSignal(*reversed(s_ip_flags_list))
s_ip_fragment_offset = ConcatSignal(*reversed(s_ip_fragment_offset_list))
s_ip_ttl = ConcatSignal(*reversed(s_ip_ttl_list))
s_ip_protocol = ConcatSignal(*reversed(s_ip_protocol_list))
s_ip_header_checksum = ConcatSignal(*reversed(s_ip_header_checksum_list))
s_ip_source_ip = ConcatSignal(*reversed(s_ip_source_ip_list))
s_ip_dest_ip = ConcatSignal(*reversed(s_ip_dest_ip_list))
s_udp_source_port = ConcatSignal(*reversed(s_udp_source_port_list))
s_udp_dest_port = ConcatSignal(*reversed(s_udp_dest_port_list))
s_udp_length = ConcatSignal(*reversed(s_udp_length_list))
s_udp_checksum = ConcatSignal(*reversed(s_udp_checksum_list))
s_udp_payload_axis_tdata = ConcatSignal(*reversed(s_udp_payload_axis_tdata_list))
s_udp_payload_axis_tkeep = ConcatSignal(*reversed(s_udp_payload_axis_tkeep_list))
s_udp_payload_axis_tvalid = ConcatSignal(*reversed(s_udp_payload_axis_tvalid_list))
s_udp_payload_axis_tlast = ConcatSignal(*reversed(s_udp_payload_axis_tlast_list))
s_udp_payload_axis_tid = ConcatSignal(*reversed(s_udp_payload_axis_tid_list))
s_udp_payload_axis_tdest = ConcatSignal(*reversed(s_udp_payload_axis_tdest_list))
s_udp_payload_axis_tuser = ConcatSignal(*reversed(s_udp_payload_axis_tuser_list))
m_udp_hdr_ready = Signal(bool(0))
m_udp_payload_axis_tready = Signal(bool(0))
# Outputs
s_udp_hdr_ready = Signal(intbv(0)[S_COUNT:])
s_udp_payload_axis_tready = Signal(intbv(0)[S_COUNT:])
s_udp_hdr_ready_list = [s_udp_hdr_ready(i) for i in range(S_COUNT)]
s_udp_payload_axis_tready_list = [s_udp_payload_axis_tready(i) for i in range(S_COUNT)]
m_udp_hdr_valid = Signal(bool(0))
m_eth_dest_mac = Signal(intbv(0)[48:])
m_eth_src_mac = Signal(intbv(0)[48:])
m_eth_type = Signal(intbv(0)[16:])
m_ip_version = Signal(intbv(0)[4:])
m_ip_ihl = Signal(intbv(0)[4:])
m_ip_dscp = Signal(intbv(0)[6:])
m_ip_ecn = Signal(intbv(0)[2:])
m_ip_length = Signal(intbv(0)[16:])
m_ip_identification = Signal(intbv(0)[16:])
m_ip_flags = Signal(intbv(0)[3:])
m_ip_fragment_offset = Signal(intbv(0)[13:])
m_ip_ttl = Signal(intbv(0)[8:])
m_ip_protocol = Signal(intbv(0)[8:])
m_ip_header_checksum = Signal(intbv(0)[16:])
m_ip_source_ip = Signal(intbv(0)[32:])
m_ip_dest_ip = Signal(intbv(0)[32:])
m_udp_source_port = Signal(intbv(0)[16:])
m_udp_dest_port = Signal(intbv(0)[16:])
m_udp_length = Signal(intbv(0)[16:])
m_udp_checksum = Signal(intbv(0)[16:])
m_udp_payload_axis_tdata = Signal(intbv(0)[DATA_WIDTH:])
m_udp_payload_axis_tkeep = Signal(intbv(1)[KEEP_WIDTH:])
m_udp_payload_axis_tvalid = Signal(bool(0))
m_udp_payload_axis_tlast = Signal(bool(0))
m_udp_payload_axis_tid = Signal(intbv(0)[ID_WIDTH:])
m_udp_payload_axis_tdest = Signal(intbv(0)[DEST_WIDTH:])
m_udp_payload_axis_tuser = Signal(intbv(0)[USER_WIDTH:])
# sources and sinks
source_pause_list = []
source_list = []
source_logic_list = []
sink_pause = Signal(bool(0))
for k in range(S_COUNT):
s = udp_ep.UDPFrameSource()
p = Signal(bool(0))
source_list.append(s)
source_pause_list.append(p)
source_logic_list.append(s.create_logic(
clk,
rst,
udp_hdr_ready=s_udp_hdr_ready_list[k],
udp_hdr_valid=s_udp_hdr_valid_list[k],
eth_dest_mac=s_eth_dest_mac_list[k],
eth_src_mac=s_eth_src_mac_list[k],
eth_type=s_eth_type_list[k],
ip_version=s_ip_version_list[k],
ip_ihl=s_ip_ihl_list[k],
ip_dscp=s_ip_dscp_list[k],
ip_ecn=s_ip_ecn_list[k],
ip_length=s_ip_length_list[k],
ip_identification=s_ip_identification_list[k],
ip_flags=s_ip_flags_list[k],
ip_fragment_offset=s_ip_fragment_offset_list[k],
ip_ttl=s_ip_ttl_list[k],
ip_protocol=s_ip_protocol_list[k],
ip_header_checksum=s_ip_header_checksum_list[k],
ip_source_ip=s_ip_source_ip_list[k],
ip_dest_ip=s_ip_dest_ip_list[k],
udp_source_port=s_udp_source_port_list[k],
udp_dest_port=s_udp_dest_port_list[k],
udp_length=s_udp_length_list[k],
udp_checksum=s_udp_checksum_list[k],
udp_payload_tdata=s_udp_payload_axis_tdata_list[k],
udp_payload_tkeep=s_udp_payload_axis_tkeep_list[k],
udp_payload_tvalid=s_udp_payload_axis_tvalid_list[k],
udp_payload_tready=s_udp_payload_axis_tready_list[k],
udp_payload_tlast=s_udp_payload_axis_tlast_list[k],
udp_payload_tuser=s_udp_payload_axis_tuser_list[k],
pause=p,
name='source_%d' % k
))
sink = udp_ep.UDPFrameSink()
sink_logic = sink.create_logic(
clk,
rst,
udp_hdr_ready=m_udp_hdr_ready,
udp_hdr_valid=m_udp_hdr_valid,
eth_dest_mac=m_eth_dest_mac,
eth_src_mac=m_eth_src_mac,
eth_type=m_eth_type,
ip_version=m_ip_version,
ip_ihl=m_ip_ihl,
ip_dscp=m_ip_dscp,
ip_ecn=m_ip_ecn,
ip_length=m_ip_length,
ip_identification=m_ip_identification,
ip_flags=m_ip_flags,
ip_fragment_offset=m_ip_fragment_offset,
ip_ttl=m_ip_ttl,
ip_protocol=m_ip_protocol,
ip_header_checksum=m_ip_header_checksum,
ip_source_ip=m_ip_source_ip,
ip_dest_ip=m_ip_dest_ip,
udp_source_port=m_udp_source_port,
udp_dest_port=m_udp_dest_port,
udp_length=m_udp_length,
udp_checksum=m_udp_checksum,
udp_payload_tdata=m_udp_payload_axis_tdata,
udp_payload_tkeep=m_udp_payload_axis_tkeep,
udp_payload_tvalid=m_udp_payload_axis_tvalid,
udp_payload_tready=m_udp_payload_axis_tready,
udp_payload_tlast=m_udp_payload_axis_tlast,
udp_payload_tuser=m_udp_payload_axis_tuser,
pause=sink_pause,
name='sink'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
s_udp_hdr_valid=s_udp_hdr_valid,
s_udp_hdr_ready=s_udp_hdr_ready,
s_eth_dest_mac=s_eth_dest_mac,
s_eth_src_mac=s_eth_src_mac,
s_eth_type=s_eth_type,
s_ip_version=s_ip_version,
s_ip_ihl=s_ip_ihl,
s_ip_dscp=s_ip_dscp,
s_ip_ecn=s_ip_ecn,
s_ip_length=s_ip_length,
s_ip_identification=s_ip_identification,
s_ip_flags=s_ip_flags,
s_ip_fragment_offset=s_ip_fragment_offset,
s_ip_ttl=s_ip_ttl,
s_ip_protocol=s_ip_protocol,
s_ip_header_checksum=s_ip_header_checksum,
s_ip_source_ip=s_ip_source_ip,
s_ip_dest_ip=s_ip_dest_ip,
s_udp_source_port=s_udp_source_port,
s_udp_dest_port=s_udp_dest_port,
s_udp_length=s_udp_length,
s_udp_checksum=s_udp_checksum,
s_udp_payload_axis_tdata=s_udp_payload_axis_tdata,
s_udp_payload_axis_tkeep=s_udp_payload_axis_tkeep,
s_udp_payload_axis_tvalid=s_udp_payload_axis_tvalid,
s_udp_payload_axis_tready=s_udp_payload_axis_tready,
s_udp_payload_axis_tlast=s_udp_payload_axis_tlast,
s_udp_payload_axis_tid=s_udp_payload_axis_tid,
s_udp_payload_axis_tdest=s_udp_payload_axis_tdest,
s_udp_payload_axis_tuser=s_udp_payload_axis_tuser,
m_udp_hdr_valid=m_udp_hdr_valid,
m_udp_hdr_ready=m_udp_hdr_ready,
m_eth_dest_mac=m_eth_dest_mac,
m_eth_src_mac=m_eth_src_mac,
m_eth_type=m_eth_type,
m_ip_version=m_ip_version,
m_ip_ihl=m_ip_ihl,
m_ip_dscp=m_ip_dscp,
m_ip_ecn=m_ip_ecn,
m_ip_length=m_ip_length,
m_ip_identification=m_ip_identification,
m_ip_flags=m_ip_flags,
m_ip_fragment_offset=m_ip_fragment_offset,
m_ip_ttl=m_ip_ttl,
m_ip_protocol=m_ip_protocol,
m_ip_header_checksum=m_ip_header_checksum,
m_ip_source_ip=m_ip_source_ip,
m_ip_dest_ip=m_ip_dest_ip,
m_udp_source_port=m_udp_source_port,
m_udp_dest_port=m_udp_dest_port,
m_udp_length=m_udp_length,
m_udp_checksum=m_udp_checksum,
m_udp_payload_axis_tdata=m_udp_payload_axis_tdata,
m_udp_payload_axis_tkeep=m_udp_payload_axis_tkeep,
m_udp_payload_axis_tvalid=m_udp_payload_axis_tvalid,
m_udp_payload_axis_tready=m_udp_payload_axis_tready,
m_udp_payload_axis_tlast=m_udp_payload_axis_tlast,
m_udp_payload_axis_tid=m_udp_payload_axis_tid,
m_udp_payload_axis_tdest=m_udp_payload_axis_tdest,
m_udp_payload_axis_tuser=m_udp_payload_axis_tuser
)
@always(delay(4))
def clkgen():
clk.next = not clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
yield clk.posedge
yield clk.posedge
print("test 1: port 0")
current_test.next = 1
test_frame = udp_ep.UDPFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x8000
test_frame.ip_version = 4
test_frame.ip_ihl = 5
test_frame.ip_dscp = 0
test_frame.ip_ecn = 0
test_frame.ip_length = None
test_frame.ip_identification = 0
test_frame.ip_flags = 2
test_frame.ip_fragment_offset = 0
test_frame.ip_ttl = 64
test_frame.ip_protocol = 0x11
test_frame.ip_header_checksum = None
test_frame.ip_source_ip = 0xc0a80165
test_frame.ip_dest_ip = 0xc0a80164
test_frame.udp_source_port = 1
test_frame.udp_dest_port = 2
test_frame.udp_length = None
test_frame.udp_checksum = None
test_frame.payload = bytearray(range(32))
test_frame.build()
source_list[0].send(test_frame)
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 2: port 1")
current_test.next = 2
test_frame = udp_ep.UDPFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x8000
test_frame.ip_version = 4
test_frame.ip_ihl = 5
test_frame.ip_dscp = 0
test_frame.ip_ecn = 0
test_frame.ip_length = None
test_frame.ip_identification = 0
test_frame.ip_flags = 2
test_frame.ip_fragment_offset = 0
test_frame.ip_ttl = 64
test_frame.ip_protocol = 0x11
test_frame.ip_header_checksum = None
test_frame.ip_source_ip = 0xc0a80165
test_frame.ip_dest_ip = 0xc0a80164
test_frame.udp_source_port = 1
test_frame.udp_dest_port = 2
test_frame.udp_length = None
test_frame.udp_checksum = None
test_frame.payload = bytearray(range(32))
test_frame.build()
source_list[1].send(test_frame)
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame
yield delay(100)
yield clk.posedge
print("test 3: back-to-back packets, same port")
current_test.next = 3
test_frame1 = udp_ep.UDPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A5152535455
test_frame1.eth_type = 0x8000
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_dscp = 0
test_frame1.ip_ecn = 0
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80165
test_frame1.ip_dest_ip = 0xc0a80164
test_frame1.udp_source_port = 1
test_frame1.udp_dest_port = 2
test_frame1.udp_length = None
test_frame1.udp_checksum = None
test_frame1.payload = bytearray(range(32))
test_frame1.build()
test_frame2 = udp_ep.UDPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A5152535455
test_frame2.eth_type = 0x8000
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_dscp = 0
test_frame2.ip_ecn = 0
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80165
test_frame2.ip_dest_ip = 0xc0a80164
test_frame2.udp_source_port = 1
test_frame2.udp_dest_port = 2
test_frame2.udp_length = None
test_frame2.udp_checksum = None
test_frame2.payload = bytearray(range(32))
test_frame2.build()
source_list[0].send(test_frame1)
source_list[0].send(test_frame2)
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame1
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 4: back-to-back packets, different ports")
current_test.next = 4
test_frame1 = udp_ep.UDPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A0152535455
test_frame1.eth_type = 0x8000
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_dscp = 0
test_frame1.ip_ecn = 0
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80165
test_frame1.ip_dest_ip = 0xc0a80164
test_frame1.udp_source_port = 1
test_frame1.udp_dest_port = 2
test_frame1.udp_length = None
test_frame1.udp_checksum = None
test_frame1.payload = bytearray(range(32))
test_frame1.build()
test_frame2 = udp_ep.UDPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A0252535455
test_frame2.eth_type = 0x8000
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_dscp = 0
test_frame2.ip_ecn = 0
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80165
test_frame2.ip_dest_ip = 0xc0a80164
test_frame2.udp_source_port = 1
test_frame2.udp_dest_port = 2
test_frame2.udp_length = None
test_frame2.udp_checksum = None
test_frame2.payload = bytearray(range(32))
test_frame2.build()
source_list[1].send(test_frame1)
source_list[2].send(test_frame2)
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame1
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 5: alterate pause source")
current_test.next = 5
test_frame1 = udp_ep.UDPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A0152535455
test_frame1.eth_type = 0x8000
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_dscp = 0
test_frame1.ip_ecn = 0
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80165
test_frame1.ip_dest_ip = 0xc0a80164
test_frame1.udp_source_port = 1
test_frame1.udp_dest_port = 2
test_frame1.udp_length = None
test_frame1.udp_checksum = None
test_frame1.payload = bytearray(range(32))
test_frame1.build()
test_frame2 = udp_ep.UDPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A0252535455
test_frame2.eth_type = 0x8000
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_dscp = 0
test_frame2.ip_ecn = 0
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80165
test_frame2.ip_dest_ip = 0xc0a80164
test_frame2.udp_source_port = 1
test_frame2.udp_dest_port = 2
test_frame2.udp_length = None
test_frame2.udp_checksum = None
test_frame2.payload = bytearray(range(32))
test_frame2.build()
source_list[1].send(test_frame1)
source_list[2].send(test_frame2)
yield clk.posedge
yield clk.posedge
while s_udp_payload_axis_tvalid:
yield clk.posedge
yield clk.posedge
for k in range(S_COUNT):
source_pause_list[k].next = False
yield clk.posedge
for k in range(S_COUNT):
source_pause_list[k].next = True
yield clk.posedge
for k in range(S_COUNT):
source_pause_list[k].next = False
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame1
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 6: alterate pause sink")
current_test.next = 6
test_frame1 = udp_ep.UDPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A0152535455
test_frame1.eth_type = 0x8000
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_dscp = 0
test_frame1.ip_ecn = 0
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80165
test_frame1.ip_dest_ip = 0xc0a80164
test_frame1.udp_source_port = 1
test_frame1.udp_dest_port = 2
test_frame1.udp_length = None
test_frame1.udp_checksum = None
test_frame1.payload = bytearray(range(32))
test_frame1.build()
test_frame2 = udp_ep.UDPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A0252535455
test_frame2.eth_type = 0x8000
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_dscp = 0
test_frame2.ip_ecn = 0
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80165
test_frame2.ip_dest_ip = 0xc0a80164
test_frame2.udp_source_port = 1
test_frame2.udp_dest_port = 2
test_frame2.udp_length = None
test_frame2.udp_checksum = None
test_frame2.payload = bytearray(range(32))
test_frame2.build()
source_list[1].send(test_frame1)
source_list[2].send(test_frame2)
yield clk.posedge
yield clk.posedge
while s_udp_payload_axis_tvalid:
sink_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
sink_pause.next = False
yield clk.posedge
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame1
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame2
yield delay(100)
yield clk.posedge
print("test 7: back-to-back packets, different ports, arbitration test")
current_test.next = 7
test_frame1 = udp_ep.UDPFrame()
test_frame1.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame1.eth_src_mac = 0x5A0152535455
test_frame1.eth_type = 0x8000
test_frame1.ip_version = 4
test_frame1.ip_ihl = 5
test_frame1.ip_dscp = 0
test_frame1.ip_ecn = 0
test_frame1.ip_length = None
test_frame1.ip_identification = 0
test_frame1.ip_flags = 2
test_frame1.ip_fragment_offset = 0
test_frame1.ip_ttl = 64
test_frame1.ip_protocol = 0x11
test_frame1.ip_header_checksum = None
test_frame1.ip_source_ip = 0xc0a80165
test_frame1.ip_dest_ip = 0xc0a80164
test_frame1.udp_source_port = 1
test_frame1.udp_dest_port = 2
test_frame1.udp_length = None
test_frame1.udp_checksum = None
test_frame1.payload = bytearray(range(32))
test_frame1.build()
test_frame2 = udp_ep.UDPFrame()
test_frame2.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame2.eth_src_mac = 0x5A0252535455
test_frame2.eth_type = 0x8000
test_frame2.ip_version = 4
test_frame2.ip_ihl = 5
test_frame2.ip_dscp = 0
test_frame2.ip_ecn = 0
test_frame2.ip_length = None
test_frame2.ip_identification = 0
test_frame2.ip_flags = 2
test_frame2.ip_fragment_offset = 0
test_frame2.ip_ttl = 64
test_frame2.ip_protocol = 0x11
test_frame2.ip_header_checksum = None
test_frame2.ip_source_ip = 0xc0a80165
test_frame2.ip_dest_ip = 0xc0a80164
test_frame2.udp_source_port = 1
test_frame2.udp_dest_port = 2
test_frame2.udp_length = None
test_frame2.udp_checksum = None
test_frame2.payload = bytearray(range(32))
test_frame2.build()
source_list[1].send(test_frame1)
source_list[2].send(test_frame2)
source_list[2].send(test_frame2)
source_list[2].send(test_frame2)
source_list[2].send(test_frame2)
source_list[2].send(test_frame2)
yield clk.posedge
yield delay(800)
yield clk.posedge
source_list[1].send(test_frame1)
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame1
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame2
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame2
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame2
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame1
yield sink.wait()
rx_frame = sink.recv()
assert rx_frame == test_frame2
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
|
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Optimizers(update equation) for SGD method.
TODO(yuyang18): Complete comments.
"""
import paddle.trainer_config_helpers.config_parser_utils as config_parser_utils
import paddle.trainer_config_helpers.optimizers as v1_optimizers
from paddle.proto.OptimizerConfig_pb2 import OptimizerConfig
__all__ = [
'Momentum', 'Adam', 'Adamax', 'AdaGrad', 'DecayedAdaGrad', 'AdaDelta',
'RMSProp', 'ModelAverage', 'L2Regularization'
]
class Optimizer(object):
def __init__(self, **kwargs):
import py_paddle.swig_paddle as swig_api
if 'batch_size' in kwargs:
del kwargs['batch_size'] # not important for python library.
def __impl__():
v1_optimizers.settings(batch_size=1, **kwargs)
self.__opt_conf_proto__ = config_parser_utils.parse_optimizer_config(
__impl__)
self.__opt_conf__ = swig_api.OptimizationConfig.createFromProto(
self.__opt_conf_proto__)
def enable_types(self):
"""
get enable_types for each optimizer.
enable_types = [value, gradient, momentum, etc]
For each optimizer(SGD, Adam), GradientMachine should enable different
buffers.
"""
import py_paddle.swig_paddle as swig_api
tmp = swig_api.ParameterOptimizer.create(self.__opt_conf__)
assert isinstance(tmp, swig_api.ParameterOptimizer)
return tmp.getParameterTypes()
def __create_local_updater__(self):
import py_paddle.swig_paddle as swig_api
return swig_api.ParameterUpdater.createLocalUpdater(self.__opt_conf__)
def __create_remote_updater__(self, pass_num, use_sparse_updater):
import py_paddle.swig_paddle as swig_api
return swig_api.ParameterUpdater.createRemoteUpdater(
self.__opt_conf__, pass_num, use_sparse_updater)
def __create_new_remote_updater__(self, pserver_spec, use_etcd):
import py_paddle.swig_paddle as swig_api
return swig_api.ParameterUpdater.createNewRemoteUpdater(
self.__opt_conf__, pserver_spec, use_etcd)
def create_updater(self, is_local, num_passes, use_sparse_updater,
pserver_spec, use_etcd):
"""
create proper parameter_updater by configuration.
:param is_local: create local or remote parameter updater
:param num_passes: remote parameter updater will use this to config
parameter server.
:param use_sparse_updater: when use remote updater, if some parameter is
sparse, updater should do some extra thing:
.. code-block:: python
if use_sparse_remote_updater:
gradient_machine.prefetch(in_args)
parameter_updater.getParametersRemote()
:param pserver_spec: pserver location, eg: localhost:3000, if use etcd,
pserver_spec should be the etcd endpoints, eg: http://localhost:2379
:return: parameter_updater
"""
if is_local:
parameter_updater = self.__create_local_updater__()
else:
if pserver_spec is None:
parameter_updater = self.__create_remote_updater__(
num_passes, use_sparse_updater)
else:
parameter_updater = self.__create_new_remote_updater__(
pserver_spec, use_etcd)
return parameter_updater
class Momentum(Optimizer):
"""
SGD Optimizer.
SGD is an optimization method, trying to find a neural network that
minimize the "cost/error" of it by iteration. In paddle's implementation
SGD Optimizer is synchronized, which means all gradients will be wait to
calculate and reduced into one gradient, then do optimize operation.
The neural network consider the learning problem of minimizing an objective
function, that has the form of a sum
.. math::
Q(w) = \\sum_{i}^{n} Q_i(w)
The value of function Q sometimes is the cost of neural network (Mean
Square Error between prediction and label for example). The function Q is
parametrised by w, the weight/bias of neural network. And weights is what to
be learned. The i is the i-th observation in (trainning) data.
So, the SGD method will optimize the weight by
.. math::
w = w - \\eta \\nabla Q(w) = w - \\eta \\sum_{i}^{n} \\nabla Q_i(w)
where :math:`\\eta` is learning rate. And :math:`n` is batch size.
"""
def __init__(self, momentum=None, sparse=False, **kwargs):
learning_method = v1_optimizers.MomentumOptimizer(
momentum=momentum, sparse=sparse)
super(Momentum, self).__init__(
learning_method=learning_method, **kwargs)
class Adam(Optimizer):
"""
Adam optimizer.
The details of please refer `Adam: A Method for Stochastic Optimization
<https://arxiv.org/abs/1412.6980>`_
.. math::
m(w, t) & = \\beta_1 m(w, t-1) + (1 - \\beta_1) \\nabla Q_i(w) \\\\
v(w, t) & = \\beta_2 v(w, t-1) + (1 - \\beta_2)(\\nabla Q_i(w)) ^2 \\\\
w & = w - \\frac{\\eta}{\\sqrt{v(w,t) + \\epsilon}}
:param beta1: the :math:`\\beta_1` in equation.
:type beta1: float
:param beta2: the :math:`\\beta_2` in equation.
:type beta2: float
:param epsilon: the :math:`\\epsilon` in equation. It is used to prevent
divided by zero.
:type epsilon: float
"""
def __init__(self, beta1=0.9, beta2=0.999, epsilon=1e-8, **kwargs):
learning_method = v1_optimizers.AdamOptimizer(
beta1=beta1, beta2=beta2, epsilon=epsilon)
super(Adam, self).__init__(learning_method=learning_method, **kwargs)
class Adamax(Optimizer):
"""
Adamax optimizer.
The details of please refer this `Adam: A Method for Stochastic Optimization
<https://arxiv.org/abs/1412.6980>`_
.. math::
m_t & = \\beta_1 * m_{t-1} + (1-\\beta_1)* \\nabla Q_i(w) \\\\
u_t & = max(\\beta_2*u_{t-1}, abs(\\nabla Q_i(w))) \\\\
w_t & = w_{t-1} - (\\eta/(1-\\beta_1^t))*m_t/u_t
:param beta1: the :math:`\\beta_1` in the equation.
:type beta1: float
:param beta2: the :math:`\\beta_2` in the equation.
:type beta2: float
"""
def __init__(self, beta1=0.9, beta2=0.999, **kwargs):
learning_method = v1_optimizers.AdamaxOptimizer(
beta1=beta1, beta2=beta2)
super(Adamax, self).__init__(learning_method=learning_method, **kwargs)
class AdaGrad(Optimizer):
"""
Adagrad(for ADAptive GRAdient algorithm) optimizer.
For details please refer this `Adaptive Subgradient Methods for
Online Learning and Stochastic Optimization
<http://www.magicbroom.info/Papers/DuchiHaSi10.pdf>`_.
.. math::
G &= \\sum_{\\tau=1}^{t} g_{\\tau} g_{\\tau}^T \\\\
w & = w - \\eta diag(G)^{-\\frac{1}{2}} \\circ g
"""
def __init__(self, **kwargs):
learning_method = v1_optimizers.AdaGradOptimizer()
super(AdaGrad, self).__init__(learning_method=learning_method, **kwargs)
class DecayedAdaGrad(Optimizer):
"""
AdaGrad method with decayed sum gradients. The equations of this method
show as follow.
.. math::
E(g_t^2) &= \\rho * E(g_{t-1}^2) + (1-\\rho) * g^2 \\\\
learning\\_rate &= 1/sqrt( ( E(g_t^2) + \\epsilon )
:param rho: The :math:`\\rho` parameter in that equation
:type rho: float
:param epsilon: The :math:`\\epsilon` parameter in that equation.
:type epsilon: float
"""
def __init__(self, rho=0.95, epsilon=1e-06, **kwargs):
learning_method = v1_optimizers.DecayedAdaGradOptimizer(
rho=rho, epsilon=epsilon)
super(DecayedAdaGrad, self).__init__(
learning_method=learning_method, **kwargs)
class AdaDelta(Optimizer):
"""
AdaDelta method. The details of adadelta please refer to this
`ADADELTA: AN ADAPTIVE LEARNING RATE METHOD
<http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf>`_.
.. math::
E(g_t^2) &= \\rho * E(g_{t-1}^2) + (1-\\rho) * g^2 \\\\
learning\\_rate &= sqrt( ( E(dx_{t-1}^2) + \\epsilon ) / ( \\
E(g_t^2) + \\epsilon ) ) \\\\
E(dx_t^2) &= \\rho * E(dx_{t-1}^2) + (1-\\rho) * (-g*learning\\_rate)^2
:param rho: :math:`\\rho` in equation
:type rho: float
:param epsilon: :math:`\\rho` in equation
:type epsilon: float
"""
def __init__(self, rho=0.95, epsilon=1e-06, **kwargs):
learning_method = v1_optimizers.AdaDeltaOptimizer(
rho=rho, epsilon=epsilon)
super(AdaDelta, self).__init__(
learning_method=learning_method, **kwargs)
class RMSProp(Optimizer):
"""
RMSProp(for Root Mean Square Propagation) optimizer. For details please
refer this `slide <http://www.cs.toronto.edu/~tijmen/csc321/slides/
lecture_slides_lec6.pdf>`_.
The equations of this method as follows:
.. math::
v(w, t) & = \\rho v(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2 \\\\
w & = w - \\frac{\\eta} {\\sqrt{v(w,t) + \\epsilon}} \\nabla Q_{i}(w)
:param rho: the :math:`\\rho` in the equation. The forgetting factor.
:type rho: float
:param epsilon: the :math:`\\epsilon` in the equation.
:type epsilon: float
"""
def __init__(self, rho=0.95, epsilon=1e-6, **kwargs):
learning_method = v1_optimizers.RMSPropOptimizer(
rho=rho, epsilon=epsilon)
super(RMSProp, self).__init__(learning_method=learning_method, **kwargs)
ModelAverage = v1_optimizers.ModelAverage
L2Regularization = v1_optimizers.L2Regularization
if __name__ == '__main__':
import py_paddle.swig_paddle as swig_api
swig_api.initPaddle('--use_gpu=false')
for opt in [
Momentum(), Adam(), Adamax(), AdaGrad(), DecayedAdaGrad(),
AdaDelta(), RMSProp(), Adam(
model_average=ModelAverage(average_window=0.5),
regularization=L2Regularization(rate=0.5),
gradient_clipping_threshold=25)
]:
print opt, opt.enable_types()
|
|
# Copyright (C) Ivan Kravets <me@ikravets.com>
# See LICENSE for details.
import re
from imp import load_source
from os import listdir
from os.path import isdir, isfile, join
import click
from platformio import exception, util
from platformio.app import get_state_item, set_state_item
from platformio.pkgmanager import PackageManager
PLATFORM_PACKAGES = {
"framework-arduinoavr": [
("Arduino Wiring-based Framework (AVR Core, 1.6)",
"http://arduino.cc/en/Reference/HomePage")
],
"framework-arduinosam": [
("Arduino Wiring-based Framework (SAM Core, 1.6)",
"http://arduino.cc/en/Reference/HomePage")
],
"framework-arduinoteensy": [
("Arduino Wiring-based Framework",
"http://arduino.cc/en/Reference/HomePage")
],
"framework-arduinomsp430": [
("Arduino Wiring-based Framework (MSP430 Core)",
"http://arduino.cc/en/Reference/HomePage")
],
"framework-arduinoespressif": [
("Arduino Wiring-based Framework (ESP8266 Core)",
"https://github.com/esp8266/Arduino")
],
"framework-energiamsp430": [
("Energia Wiring-based Framework (MSP430 Core)",
"http://energia.nu/reference/")
],
"framework-energiativa": [
("Energia Wiring-based Framework (LM4F Core)",
"http://energia.nu/reference/")
],
"framework-cmsis": [
("Vendor-independent hardware abstraction layer for the Cortex-M "
"processor series",
"http://www.arm.com/products/processors/"
"cortex-m/cortex-microcontroller-software-interface-standard.php")
],
"framework-spl": [
("Standard Peripheral Library for STM32 MCUs",
"http://www.st.com"
"/web/catalog/tools/FM147/CL1794/SC961/SS1743/PF257890")
],
"framework-libopencm3": [
("libOpenCM3 Framework", "http://www.libopencm3.org/")
],
"framework-mbed": [
("mbed Framework", "http://mbed.org")
],
"sdk-esp8266": [
("ESP8266 SDK", "http://bbs.espressif.com")
],
"ldscripts": [
("Linker Scripts",
"https://sourceware.org/binutils/docs/ld/Scripts.html")
],
"toolchain-atmelavr": [
("avr-gcc", "https://gcc.gnu.org/wiki/avr-gcc"),
("GDB", "http://www.gnu.org/software/gdb/"),
("AVaRICE", "http://avarice.sourceforge.net/"),
("SimulAVR", "http://www.nongnu.org/simulavr/")
],
"toolchain-gccarmnoneeabi": [
("gcc-arm-embedded", "https://launchpad.net/gcc-arm-embedded"),
("GDB", "http://www.gnu.org/software/gdb/")
],
"toolchain-xtensa": [
("xtensa-gcc", "https://github.com/jcmvbkbc/gcc-xtensa"),
("GDB", "http://www.gnu.org/software/gdb/")
],
"toolchain-timsp430": [
("msp-gcc", "http://sourceforge.net/projects/mspgcc/"),
("GDB", "http://www.gnu.org/software/gdb/")
],
"tool-avrdude": [
("AVRDUDE", "http://www.nongnu.org/avrdude/")
],
"tool-micronucleus": [
("Micronucleus", "https://github.com/micronucleus/micronucleus")
],
"tool-bossac": [
("BOSSA CLI", "https://sourceforge.net/projects/b-o-s-s-a/")
],
"tool-stlink": [
("ST-Link", "https://github.com/texane/stlink")
],
"tool-teensy": [
("Teensy Loader", "https://www.pjrc.com/teensy/loader.html")
],
"tool-lm4flash": [
("Flash Programmer", "http://www.ti.com/tool/lmflashprogrammer")
],
"tool-mspdebug": [
("MSPDebug", "http://mspdebug.sourceforge.net/")
],
"tool-esptool": [
("esptool-ck", "https://github.com/igrr/esptool-ck")
]
}
def get_packages():
return PLATFORM_PACKAGES
class PlatformFactory(object):
@staticmethod
def get_clsname(type_):
return "%s%sPlatform" % (type_.upper()[0], type_.lower()[1:])
@staticmethod
def load_module(type_, path):
module = None
try:
module = load_source(
"platformio.platforms.%s" % type_, path)
except ImportError:
raise exception.UnknownPlatform(type_)
return module
@classmethod
@util.memoized
def _lookup_platforms(cls):
platforms = {}
for d in (util.get_home_dir(), util.get_source_dir()):
pdir = join(d, "platforms")
if not isdir(pdir):
continue
for p in sorted(listdir(pdir)):
if (p in ("__init__.py", "base.py") or not
p.endswith(".py")):
continue
type_ = p[:-3]
path = join(pdir, p)
try:
isplatform = hasattr(
cls.load_module(type_, path),
cls.get_clsname(type_)
)
if isplatform:
platforms[type_] = path
except exception.UnknownPlatform:
pass
return platforms
@classmethod
def get_platforms(cls, installed=False):
platforms = cls._lookup_platforms()
if not installed:
return platforms
installed_platforms = {}
for type_ in get_state_item("installed_platforms", []):
if type_ in platforms:
installed_platforms[type_] = platforms[type_]
return installed_platforms
@classmethod
def newPlatform(cls, type_):
platforms = cls.get_platforms()
if type_ not in platforms:
raise exception.UnknownPlatform(type_)
_instance = getattr(
cls.load_module(type_, platforms[type_]),
cls.get_clsname(type_)
)()
assert isinstance(_instance, BasePlatform)
return _instance
class BasePlatform(object):
PACKAGES = {}
LINE_ERROR_RE = re.compile(r"(\s+error|error[:\s]+)", re.I)
def __init__(self):
self._found_error = False
self._last_echo_line = None
# 1 = errors
# 2 = 1 + warnings
# 3 = 2 + others
self._verbose_level = 3
def get_type(self):
return self.__class__.__name__[:-8].lower()
def get_name(self):
return self.get_type().title()
def get_build_script(self):
builtin = join(util.get_source_dir(), "builder", "scripts", "%s.py" %
self.get_type())
if isfile(builtin):
return builtin
raise NotImplementedError()
def get_description(self):
if self.__doc__:
doclines = [l.strip() for l in self.__doc__.splitlines() if
l.strip()]
return " ".join(doclines[:-1]).strip()
else:
raise NotImplementedError()
def get_vendor_url(self):
if self.__doc__ and "http" in self.__doc__:
return self.__doc__[self.__doc__.index("http"):].strip()
else:
raise NotImplementedError()
def get_packages(self):
return self.PACKAGES
def get_pkg_alias(self, pkgname):
return self.PACKAGES[pkgname].get("alias", None)
def pkg_aliases_to_names(self, aliases):
names = []
for alias in aliases:
name = alias
# lookup by package aliases
for _name, _opts in self.get_packages().items():
if _opts.get("alias", None) == alias:
name = _name
break
names.append(name)
return names
def get_default_packages(self):
return [k for k, v in self.get_packages().items()
if v.get("default", False)]
def get_installed_packages(self):
pm = PackageManager()
return [n for n in self.get_packages().keys() if pm.is_installed(n)]
def install(self, with_packages, without_packages, skip_default_packages):
with_packages = set(self.pkg_aliases_to_names(with_packages))
without_packages = set(self.pkg_aliases_to_names(without_packages))
upkgs = with_packages | without_packages
ppkgs = set(self.get_packages().keys())
if not upkgs.issubset(ppkgs):
raise exception.UnknownPackage(", ".join(upkgs - ppkgs))
requirements = []
for name, opts in self.get_packages().items():
if name in without_packages:
continue
elif (name in with_packages or (not skip_default_packages and
opts['default'])):
requirements.append(name)
pm = PackageManager()
for name in requirements:
pm.install(name)
# register installed platform
data = get_state_item("installed_platforms", [])
if self.get_type() not in data:
data.append(self.get_type())
set_state_item("installed_platforms", data)
return len(requirements)
def uninstall(self):
platform = self.get_type()
installed_platforms = PlatformFactory.get_platforms(
installed=True).keys()
if platform not in installed_platforms:
raise exception.PlatformNotInstalledYet(platform)
deppkgs = set()
for item in installed_platforms:
if item == platform:
continue
p = PlatformFactory.newPlatform(item)
deppkgs = deppkgs.union(set(p.get_packages().keys()))
pm = PackageManager()
for name in self.get_packages().keys():
if not pm.is_installed(name) or name in deppkgs:
continue
pm.uninstall(name)
# unregister installed platform
installed_platforms.remove(platform)
set_state_item("installed_platforms", installed_platforms)
return True
def update(self):
pm = PackageManager()
for name in self.get_installed_packages():
pm.update(name)
def is_outdated(self):
pm = PackageManager()
obsolated = pm.get_outdated()
return not set(self.get_packages().keys()).isdisjoint(set(obsolated))
def run(self, variables, targets, verbose):
assert isinstance(variables, list)
assert isinstance(targets, list)
self._verbose_level = int(verbose)
installed_platforms = PlatformFactory.get_platforms(
installed=True).keys()
installed_packages = PackageManager.get_installed()
if self.get_type() not in installed_platforms:
raise exception.PlatformNotInstalledYet(self.get_type())
if "clean" in targets:
targets.remove("clean")
targets.append("-c")
if not any([v.startswith("BUILD_SCRIPT=") for v in variables]):
variables.append("BUILD_SCRIPT=%s" % self.get_build_script())
for v in variables:
if not v.startswith("BUILD_SCRIPT="):
continue
_, path = v.split("=", 2)
if not isfile(path):
raise exception.BuildScriptNotFound(path)
# append aliases of the installed packages
for name, options in self.get_packages().items():
if "alias" not in options or name not in installed_packages:
continue
variables.append(
"PIOPACKAGE_%s=%s" % (options['alias'].upper(), name))
self._found_error = False
try:
result = util.exec_command(
[
"scons",
"-Q",
"-f", join(util.get_source_dir(), "builder", "main.py")
] + variables + targets,
stdout=util.AsyncPipe(self.on_run_out),
stderr=util.AsyncPipe(self.on_run_err)
)
except OSError:
raise exception.SConsNotInstalled()
assert "returncode" in result
if self._found_error:
result['returncode'] = 1
if self._last_echo_line == ".":
click.echo("")
return result
def on_run_out(self, line):
self._echo_line(line, level=3)
def on_run_err(self, line):
is_error = self.LINE_ERROR_RE.search(line) is not None
if is_error:
self._found_error = True
self._echo_line(line, level=1 if is_error else 2)
def _echo_line(self, line, level):
assert 1 <= level <= 3
fg = ("red", "yellow", None)[level - 1]
if level == 3 and "is up to date" in line:
fg = "green"
if level > self._verbose_level:
click.secho(".", fg=fg, err=level < 3, nl=False)
self._last_echo_line = "."
return
if self._last_echo_line == ".":
click.echo("")
self._last_echo_line = line
click.secho(line, fg=fg, err=level < 3)
|
|
import subprocess
import unittest2 as unittest
from gevent.queue import Queue, Empty
from mock import Mock, call, patch
from pytz import utc
from job_runner_worker.worker import (
execute_run, kill_run, _get_child_pids, _truncate_log
)
class ModuleTestCase(unittest.TestCase):
"""
Tests for :mod:`job_runner_worker.worker`.
"""
@patch('job_runner_worker.worker.subprocess', subprocess)
@patch('job_runner_worker.worker.RunLog')
@patch('job_runner_worker.worker.datetime')
@patch('job_runner_worker.worker.config')
def test_execute_run(self, config, datetime, RunLog):
"""
Test :func:`.execute_run`.
"""
config.get.return_value = '/tmp'
run = Mock()
run.run_log = None
run.id = 1234
run.job.script_content = (
u'#!/usr/bin/env bash\n\necho "H\xe9llo World!";\n')
event_queue = Mock()
exit_queue = Mock()
run_queue = Queue()
run_queue.put(run)
exit_queue_return = [Empty, None]
def exit_queue_side_effect(*args, **kwargs):
value = exit_queue_return.pop(0)
if callable(value):
raise value()
exit_queue.get.side_effect = exit_queue_side_effect
execute_run(run_queue, event_queue, exit_queue)
dts = datetime.now.return_value.isoformat.return_value
self.assertTrue('pid' in run.patch.call_args_list[1][0][0])
self.assertEqual(dts, run.patch.call_args_list[0][0][0]['start_dts'])
self.assertEqual(
u'H\xe9llo World!\n'.encode('utf-8'),
RunLog.return_value.post.call_args_list[0][0][0]['content']
)
self.assertEqual([
call({
'return_dts': dts,
'return_success': True,
})
], run.patch.call_args_list[2:])
self.assertEqual([
call('{"kind": "run", "event": "started", "run_id": 1234}'),
call('{"kind": "run", "event": "returned", "run_id": 1234}'),
], event_queue.put.call_args_list)
datetime.now.assert_called_with(utc)
@patch('job_runner_worker.worker.subprocess', subprocess)
@patch('job_runner_worker.worker.datetime')
@patch('job_runner_worker.worker.config')
def test_execute_run_with_log(self, config, datetime):
"""
Test :func:`.execute_run` with existing log.
"""
config.get.return_value = '/tmp'
run = Mock()
run.id = 1234
run.job.script_content = (
u'#!/usr/bin/env bash\n\necho "H\xe9llo World!";\n')
event_queue = Mock()
exit_queue = Mock()
run_queue = Queue()
run_queue.put(run)
exit_queue_return = [Empty, None]
def exit_queue_side_effect(*args, **kwargs):
value = exit_queue_return.pop(0)
if callable(value):
raise value()
exit_queue.get.side_effect = exit_queue_side_effect
execute_run(run_queue, event_queue, exit_queue)
dts = datetime.now.return_value.isoformat.return_value
self.assertTrue('pid' in run.patch.call_args_list[1][0][0])
self.assertEqual(dts, run.patch.call_args_list[0][0][0]['start_dts'])
self.assertEqual(
u'H\xe9llo World!\n'.encode('utf-8'),
run.run_log.patch.call_args_list[0][0][0]['content']
)
self.assertEqual([
call({
'return_dts': dts,
'return_success': True,
})
], run.patch.call_args_list[2:])
self.assertEqual([
call('{"kind": "run", "event": "started", "run_id": 1234}'),
call('{"kind": "run", "event": "returned", "run_id": 1234}'),
], event_queue.put.call_args_list)
datetime.now.assert_called_with(utc)
@patch('job_runner_worker.worker.subprocess', subprocess)
@patch('job_runner_worker.worker.RunLog')
@patch('job_runner_worker.worker.datetime')
@patch('job_runner_worker.worker.config')
def test_execute_bad_shebang(self, config, datetime, RunLog):
"""
Test :func:`.execute_run` when the shebang is invalid.
"""
config.get.return_value = '/tmp'
run = Mock()
run.run_log = None
run.id = 1234
run.job.script_content = (
u'#!I love cheese\n\necho "H\xe9llo World!";\n')
event_queue = Mock()
exit_queue = Mock()
run_queue = Queue()
run_queue.put(run)
exit_queue_return = [Empty, None]
def exit_queue_side_effect(*args, **kwargs):
value = exit_queue_return.pop(0)
if callable(value):
raise value()
exit_queue.get.side_effect = exit_queue_side_effect
execute_run(run_queue, event_queue, exit_queue)
dts = datetime.now.return_value.isoformat.return_value
self.assertEqual(dts, run.patch.call_args_list[0][0][0]['start_dts'])
log_out = RunLog.return_value.post.call_args_list[0][0][0]['content']
self.assertTrue(
log_out.startswith('[job runner worker] Could not execute job:')
)
self.assertEqual([
call({
'return_dts': dts,
'return_success': False,
})
], run.patch.call_args_list[1:])
self.assertEqual([
call('{"kind": "run", "event": "started", "run_id": 1234}'),
call('{"kind": "run", "event": "returned", "run_id": 1234}'),
], event_queue.put.call_args_list)
datetime.now.assert_called_with(utc)
@patch('job_runner_worker.worker.subprocess', subprocess)
@patch('job_runner_worker.worker.RunLog')
@patch('job_runner_worker.worker.datetime')
@patch('job_runner_worker.worker.config')
def test_execute_no_shebang(self, config, datetime, RunLog):
"""
Test :func:`.execute_run` when the shebang is invalid.
"""
config.get.return_value = '/tmp'
run = Mock()
run.run_log = None
run.id = 1234
run.job.script_content = (
u'I love cheese\n\necho "H\xe9llo World!";\n')
event_queue = Mock()
exit_queue = Mock()
run_queue = Queue()
run_queue.put(run)
exit_queue_return = [Empty, None]
def exit_queue_side_effect(*args, **kwargs):
value = exit_queue_return.pop(0)
if callable(value):
raise value()
exit_queue.get.side_effect = exit_queue_side_effect
execute_run(run_queue, event_queue, exit_queue)
dts = datetime.now.return_value.isoformat.return_value
self.assertEqual(dts, run.patch.call_args_list[0][0][0]['start_dts'])
log_out = RunLog.return_value.post.call_args_list[0][0][0]['content']
self.assertTrue(
log_out.startswith('[job runner worker] Could not execute job:')
)
self.assertEqual([
call({
'return_dts': dts,
'return_success': False,
})
], run.patch.call_args_list[1:])
self.assertEqual([
call('{"kind": "run", "event": "started", "run_id": 1234}'),
call('{"kind": "run", "event": "returned", "run_id": 1234}'),
], event_queue.put.call_args_list)
datetime.now.assert_called_with(utc)
@patch('job_runner_worker.worker._kill_pid_tree')
@patch('job_runner_worker.worker.datetime')
def test_kill_run(self, datetime, kill_pid_tree_mock):
"""
Test :func:`.kill_run`.
"""
event_queue = Mock()
kill_request = Mock()
kill_request.id = 1234
kill_request.run.pid = 5678
dts = datetime.now.return_value.isoformat.return_value
kill_queue = Queue()
kill_queue.put(kill_request)
exit_queue = Mock()
exit_queue_return = [Empty, None]
def exit_queue_side_effect(*args, **kwargs):
value = exit_queue_return.pop(0)
if callable(value):
raise value()
exit_queue.get.side_effect = exit_queue_side_effect
kill_run(kill_queue, event_queue, exit_queue)
kill_pid_tree_mock.assert_called_with(5678)
kill_request.patch.assert_called_with({
'execute_dts': dts,
})
event_queue.put.assert_called_with((
'{"kill_request_id": 1234, "kind": "kill_request", '
'"event": "executed"}'
))
@patch('job_runner_worker.worker.subprocess')
def test__get_child_pids(self, subprocess_mock):
"""
Test :func:`._get_child_pids`.
"""
sub_proc = subprocess_mock.Popen.return_value
sub_proc.wait.return_value = 0
sub_proc.communicate.return_value = [' 123\n 456\n 789\n', '']
self.assertEqual([123, 456, 789], _get_child_pids(321))
@patch('job_runner_worker.worker.config')
def test__truncate_log(self, config):
"""
Test :func:`._truncate_log`.
"""
config.getint.return_value = 100
input_string = '{0}{1}'.format(
''.join(['a'] * 30), ''.join(['b'] * 100))
expected_out = '{0}\n\n[truncated]\n\n{1}'.format(
''.join(['a'] * 20),
''.join(['b'] * 80),
)
self.assertEqual(expected_out, _truncate_log(input_string))
|
|
# card-fight-thingy - Simplistic battle card game... thingy
#
# The MIT License (MIT)
#
# Copyright (c) 2015 The Underscores
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
from . import general
from card_fight_thingy.general import getInt
from . import player
from card_fight_thingy.player import Player
from . import card
from . import parser
maxPlayers = 10
gameOn = False
def initGame(pCount):
"""Initializes game with pCount number of players"""
global player_stack
player_stack = []
for p in range(pCount):
player_stack.append(Player(p))
def dispPlayers(stack):
for i, plyr in enumerate(stack):
if plyr is None: continue
if plyr.defense[0] == 0:
# Defense stack is empty
defStr = ""
else:
defStr = "[{} {} {}]".format(*plyr.defense)
print("Player #{}:\t{}HP\t{}".format(i + 1, plyr.health, defStr))
sys.stdout.write("\n")
def tryCardApply(current_player, current_card, victim):
"""
Try to use the card from the player.
Return True if success, otherwise return False and a reason
"""
if type(current_card) is card.Card_Def:
if not current_card.apply(current_player):
return (False, "Defense stack is full. Try again...\n")
elif type(current_card) is card.Card_Atk:
# TODO: Checking len(player_stack) here only works if there
# were only ever 2 players. Make it so that it will work when
# the game comes down to 2 players
# Check if there are only two players. If so, automatically
# select the second player
#if len(player_stack) == 2:
# victim = 1 if pNum == 2 else 2
#else:
if not victim:
return (False, "Did not say who to attack. Try again...\n")
if not (1 <= victim <= len(player_stack)):
return (
False,
"Player {} does not exist. Try again...\n".format(victim)
)
if player_stack[victim-1] == None:
return (
False,
"Player {} is dead. Try again...\n".format(victim)
)
if victim-1 == current_player.number:
return (False, "You cannot attack yourself. Try again...\n")
sys.stdout.write("\n")
if current_card.apply(player_stack[victim - 1]):
# Player was killed, remove from list
# We do not pop a dead player from the player stack since each
# players' identifying number is associated with its indice
player_stack[victim - 1] = None
else:
return (False, "Did not expect object {!r}".format(type(current_card)))
return (True, "")
def takeTurn(pNum):
"""Have player at global player_stack index pNum take their turn"""
global player_stack
if player_stack[pNum] is None: return
print("Player #" + str(pNum+1) + "'s turn...\n")
curPlyr = player_stack[pNum]
while True:
curPlyr.showCards()
sys.stdout.write("\n")
# Try to get valid input
action, cardNum, victim = parser.tokenize(input("Enter action : "))
if action.lower() in ('u', "use"):
if not cardNum:
print("No card number given. Try again...")
continue
#DOIT: Check if card in range.
ok, msg = tryCardApply(curPlyr, curPlyr.cards[cardNum - 1], victim)
if not ok:
print(msg)
continue
elif action.lower() in ('d', "discard"):
if not cardNum:
print("No card number given. Try again...")
continue
#DOIT: Check if card in range.
# Fall through
pass
elif action.lower() in ('q', 'quit', 'exit'):
while True:
action = input("Are you sure you want to quit to menu? (Y/n) : ")
if action == '' or action[0].lower() == 'y':
doQuit = True
break
elif action[0].lower() == 'n':
doQuit = False
break
else:
print("Invalid option. Try again...\n")
continue
sys.stdout.write('\n')
if doQuit:
winners = []
topHP = 0
for p in player_stack:
if not p: continue
if p.health > topHP:
winners = [p.number + 1]
topHP = p.health
elif p.health == topHP:
winners.append(p.number+1)
endGame(winners)
break
else:
continue
else:
print("Do not know action {!r}. Try again...".format(action))
continue
curPlyr.removeCard(cardNum-1)
break
def endGame(whoWon=(None,)):
global gameOn
if gameOn == False:
return False
gameOn = False
if not isinstance(whoWon, (list, tuple)):
whoWon = (whoWon,)
if len(whoWon) == 0 or not whoWon[0]:
print("Game over. No one won.")
elif len(whoWon) == 1:
print("Game over. Player {} wins.\n".format(whoWon[0]))
elif len(whoWon) == 2:
print("Game over. Players {} and {} tied.\n".format(whoWon[0], whoWon[1]))
else:
winStr = ""
for p_i, p in enumerate(whoWon):
if p_i < len(whoWon)-1:
winStr += "{}, ".format(p)
else:
winStr += "and {}".format(p)
print("Game over. Players {} tied.\n".format(winStr))
def playGame():
"""
Sets the game in motion after the game has been initialized with initGame()
"""
global player_stack, gameOn
gameOn = True
while gameOn:
for p in player_stack:
if p == None: continue
dispPlayers(player_stack)
takeTurn(p.number)
if not gameOn: break
# Check if only one player remains
c = 0
winner = None
for p in player_stack:
if p is None: continue
c += 1
if c > 1:
break
else:
winner = p
if c == 1:
endGame(winner.number+1)
continue
def newGame():
"""
Initializes game with user-defined number of players,
then sets the game in motion
"""
while True:
pCount = getInt("How many players? Maximum of {}: ".format(maxPlayers),
2, maxPlayers
)
if not pCount:
continue
else:
sys.stdout.write("\n")
break
initGame(pCount)
playGame()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""LSTM Block Cell ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.contrib.rnn.python.ops import core_rnn_cell
from tensorflow.contrib.rnn.python.ops import fused_rnn_cell
from tensorflow.contrib.util import loader
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import resource_loader
_lstm_ops_so = loader.load_op_library(
resource_loader.get_path_to_datafile("_lstm_ops.so"))
# pylint: disable=invalid-name
def _lstm_block_cell(x,
cs_prev,
h_prev,
w,
b,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""Computes the LSTM cell forward propagation for 1 time step.
This implementation uses 1 weight matrix and 1 bias vector, and there's an
optional peephole connection.
This kernel op implements the following mathematical equations:
```python
xh = [x, h_prev]
[i, f, ci, o] = xh * w + b
f = f + forget_bias
if not use_peephole:
wci = wcf = wco = 0
i = sigmoid(cs_prev * wci + i)
f = sigmoid(cs_prev * wcf + f)
ci = tanh(ci)
cs = ci .* i + cs_prev .* f
cs = clip(cs, cell_clip)
o = sigmoid(cs * wco + f)
co = tanh(cs)
h = co .* o
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`.
The input to the LSTM cell, shape (batch_size, num_inputs).
cs_prev: A `Tensor`. Must have the same type as `x`.
Value of the cell state at previous time step.
h_prev: A `Tensor`. Must have the same type as `x`.
Output of the previous cell at previous time step.
w: A `Tensor`. Must have the same type as `x`. The weight matrix.
b: A `Tensor`. Must have the same type as `x`. The bias vector.
wci: A `Tensor`. Must have the same type as `x`.
The weight matrix for input gate peephole connection.
wcf: A `Tensor`. Must have the same type as `x`.
The weight matrix for forget gate peephole connection.
wco: A `Tensor`. Must have the same type as `x`.
The weight matrix for output gate peephole connection.
forget_bias: An optional `float`. Defaults to `1`. The forget gate bias.
cell_clip: An optional `float`. Defaults to `3`.
Value to clip the 'cs' value to.
use_peephole: An optional `bool`. Defaults to `False`.
Whether to use peephole weights.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A `Tensor`. Has the same type as `x`. The input gate.
cs: A `Tensor`. Has the same type as `x`. The cell state before the tanh.
f: A `Tensor`. Has the same type as `x`. The forget gate.
o: A `Tensor`. Has the same type as `x`. The output gate.
ci: A `Tensor`. Has the same type as `x`. The cell input.
co: A `Tensor`. Has the same type as `x`. The cell after the tanh.
h: A `Tensor`. Has the same type as `x`. The output h vector.
Raises:
ValueError: If cell_size is None.
"""
if wci is None:
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wco = wci
wcf = wci
# pylint: disable=protected-access
return _lstm_ops_so.lstm_block_cell(
x=x,
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wco=wco,
wcf=wcf,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip,
use_peephole=use_peephole,
name=name)
# pylint: enable=protected-access
def _block_lstm(seq_len_max,
x,
w,
b,
cs_prev=None,
h_prev=None,
wci=None,
wcf=None,
wco=None,
forget_bias=None,
cell_clip=None,
use_peephole=None,
name=None):
r"""TODO(williamchan): add doc.
Args:
seq_len_max: A `Tensor` of type `int64`.
x: A list of at least 1 `Tensor` objects of the same type in: `float32`.
w: A `Tensor`. Must have the same type as `x`.
b: A `Tensor`. Must have the same type as `x`.
cs_prev: A `Tensor`. Must have the same type as `x`.
h_prev: A `Tensor`. Must have the same type as `x`.
wci: A `Tensor`. Must have the same type as `x`.
wcf: A `Tensor`. Must have the same type as `x`.
wco: A `Tensor`. Must have the same type as `x`.
forget_bias: An optional `float`. Defaults to `1`.
cell_clip: An optional `float`. Defaults to `3`.
use_peephole: An optional `bool`. Defaults to `False`.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (i, cs, f, o, ci, co, h).
i: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
cs: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
f: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
o: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
ci: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
co: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
h: A list with the same number of `Tensor` objects as `x` of `Tensor`
objects of the same type as x.
Raises:
ValueError: If `b` does not have a valid shape.
"""
batch_size = x[0].get_shape().with_rank(2)[0].value
cell_size4 = b.get_shape().with_rank(1)[0].value
if cell_size4 is None:
raise ValueError("`b` shape must not be None.")
cell_size = cell_size4 / 4
zero_state = None
if cs_prev is None or h_prev is None:
zero_state = array_ops.constant(
0, dtype=dtypes.float32, shape=[batch_size, cell_size])
if cs_prev is None:
cs_prev = zero_state
if h_prev is None:
h_prev = zero_state
if wci is None:
wci = array_ops.constant(0, dtype=dtypes.float32, shape=[cell_size])
wco = wci
wcf = wci
# pylint: disable=protected-access
i, cs, f, o, ci, co, h = _lstm_ops_so.block_lstm(
seq_len_max=seq_len_max,
x=array_ops.stack(x),
cs_prev=cs_prev,
h_prev=h_prev,
w=w,
wci=wci,
wco=wco,
wcf=wcf,
b=b,
forget_bias=forget_bias,
cell_clip=cell_clip,
name=name,
use_peephole=use_peephole)
return array_ops.unstack(i), array_ops.unstack(cs), array_ops.unstack(
f), array_ops.unstack(o), array_ops.unstack(ci), array_ops.unstack(
co), array_ops.unstack(h)
# pylint: enable=protected-access
# pylint: enable=invalid-name
_lstm_block_cell_grad_outputs = ["cs_prev_grad", "dicfo"]
@ops.RegisterGradient("LSTMBlockCell")
def _LSTMBlockCellGrad(op, *grad):
"""Gradient for LSTMBlockCell."""
(x, cs_prev, h_prev, w, wci, wco, wcf, b) = op.inputs
(i, cs, f, o, ci, co, _) = op.outputs
(_, cs_grad, _, _, _, _, h_grad) = grad
batch_size = x.get_shape().with_rank(2)[0].value
if batch_size is None:
batch_size = -1
input_size = x.get_shape().with_rank(2)[1].value
if input_size is None:
raise ValueError("input_size from `x` should not be None.")
cell_size = cs_prev.get_shape().with_rank(2)[1].value
if cell_size is None:
raise ValueError("cell_size from `cs_prev` should not be None.")
(cs_prev_grad, dicfo, wci_grad, wcf_grad,
wco_grad) = _lstm_ops_so.lstm_block_cell_grad(
x,
cs_prev,
h_prev,
w,
wci,
wcf,
wco,
b,
i,
cs,
f,
o,
ci,
co,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
# Backprop from dicfo to xh.
xh_grad = math_ops.matmul(dicfo, w, transpose_b=True)
x_grad = array_ops.slice(xh_grad, (0, 0), (batch_size, input_size))
x_grad.get_shape().merge_with(x.get_shape())
h_prev_grad = array_ops.slice(xh_grad, (0, input_size),
(batch_size, cell_size))
h_prev_grad.get_shape().merge_with(h_prev.get_shape())
# Backprop from dicfo to w.
xh = array_ops.concat([x, h_prev], 1)
w_grad = math_ops.matmul(xh, dicfo, transpose_a=True)
w_grad.get_shape().merge_with(w.get_shape())
# Backprop from dicfo to b.
b_grad = nn_ops.bias_add_grad(dicfo)
b_grad.get_shape().merge_with(b.get_shape())
return (x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wcf_grad,
wco_grad, b_grad)
@ops.RegisterGradient("BlockLSTM")
def _BlockLSTMGrad(op, *grad):
"""Gradient for BlockLSTM."""
seq_len_max, x, cs_prev, h_prev, w, wci, wco, wcf, b = op.inputs
i, cs, f, o, ci, co, h = op.outputs
cs_grad = grad[1]
h_grad = grad[6]
(x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wco_grad, wcf_grad,
b_grad) = _lstm_ops_so.block_lstm_grad(
seq_len_max,
x,
cs_prev,
h_prev,
w,
wci,
wco,
wcf,
b,
i,
cs,
f,
o,
ci,
co,
h,
cs_grad,
h_grad,
use_peephole=op.get_attr("use_peephole"))
return [None, x_grad, cs_prev_grad, h_prev_grad, w_grad, wci_grad, wco_grad,
wcf_grad, b_grad]
class LSTMBlockCell(core_rnn_cell.RNNCell):
"""Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add `forget_bias` (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
Unlike `core_rnn_cell.LSTMCell`, this is a monolithic op and should be much
faster. The weight and bias matrixes should be compatible as long as the
variable scope matches.
"""
def __init__(self,
num_units,
forget_bias=1.0,
use_peephole=False):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
use_peephole: Whether to use peephole connections or not.
"""
self._num_units = num_units
self._forget_bias = forget_bias
self._use_peephole = use_peephole
self._names = {
"W": "weights",
"b": "biases",
"wci": "w_i_diag",
"wco": "w_o_diag",
"wcf": "w_f_diag",
"scope": "lstm_cell"
}
@property
def state_size(self):
return core_rnn_cell.LSTMStateTuple(self._num_units, self._num_units)
@property
def output_size(self):
return self._num_units
def __call__(self, x, states_prev, scope=None):
"""Long short-term memory cell (LSTM)."""
with vs.variable_scope(scope or self._names["scope"]):
x_shape = x.get_shape().with_rank(2)
if not x_shape[1].value:
raise ValueError("Expecting x_shape[1] to be set: %s" % str(x_shape))
if len(states_prev) != 2:
raise ValueError("Expecting states_prev to be a tuple with length 2.")
input_size = x_shape[1].value
w = vs.get_variable(self._names["W"], [input_size + self._num_units,
self._num_units * 4])
b = vs.get_variable(
self._names["b"], [w.get_shape().with_rank(2)[1].value],
initializer=init_ops.constant_initializer(0.0))
if self._use_peephole:
wci = vs.get_variable(self._names["wci"], [self._num_units])
wco = vs.get_variable(self._names["wco"], [self._num_units])
wcf = vs.get_variable(self._names["wcf"], [self._num_units])
else:
wci = wco = wcf = array_ops.zeros([self._num_units])
(cs_prev, h_prev) = states_prev
(_, cs, _, _, _, _, h) = _lstm_block_cell(
x,
cs_prev,
h_prev,
w,
b,
wci=wci,
wco=wco,
wcf=wcf,
forget_bias=self._forget_bias,
use_peephole=self._use_peephole)
new_state = core_rnn_cell.LSTMStateTuple(cs, h)
return h, new_state
class LSTMBlockWrapper(fused_rnn_cell.FusedRNNCell):
"""This is a helper class that provides housekeeping for LSTM cells.
This may be useful for alternative LSTM and similar type of cells.
The subclasses must implement `_call_cell` method and `num_units` property.
"""
@abc.abstractproperty
def num_units(self):
"""Number of units in this cell (output dimension)."""
pass
@abc.abstractmethod
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
This method must be implemented by subclasses and does the actual work
of calling the cell.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An int32
or int64 vector (tensor) size [batch_size], values in [0, time_len) or
None.
Returns:
A pair containing:
- State: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
"""
pass
def __call__(self,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
or a list of `time_len` tensors of shape `[batch_size, input_size]`.
initial_state: a tuple `(initial_cell_state, initial_output)` with tensors
of shape `[batch_size, self._num_units]`. If this is not provided, the
cell is expected to create a zero initial state of type `dtype`.
dtype: The data type for the initial state and expected output. Required
if `initial_state` is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len).`
Defaults to `time_len` for each element.
scope: `VariableScope` for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `3-D` tensor of shape `[time_len, batch_size, output_size]`
or a list of time_len tensors of shape `[batch_size, output_size]`,
to match the type of the `inputs`.
- Final state: a tuple `(cell_state, output)` matching `initial_state`.
Raises:
ValueError: in case of shape mismatches
"""
with vs.variable_scope(scope or "lstm_block_wrapper"):
is_list = isinstance(inputs, list)
if is_list:
inputs = array_ops.stack(inputs)
inputs_shape = inputs.get_shape().with_rank(3)
if not inputs_shape[2]:
raise ValueError("Expecting inputs_shape[2] to be set: %s" %
inputs_shape)
batch_size = inputs_shape[1].value
if batch_size is None:
batch_size = array_ops.shape(inputs)[1]
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
# Provide default values for initial_state and dtype
if initial_state is None:
if dtype is None:
raise ValueError(
"Either initial_state or dtype needs to be specified")
z = array_ops.zeros(
array_ops.stack([batch_size, self.num_units]), dtype=dtype)
initial_state = z, z
else:
if len(initial_state) != 2:
raise ValueError(
"Expecting initial_state to be a tuple with length 2 or None")
if dtype is None:
dtype = initial_state[0].dtype
# create the actual cell
if sequence_length is not None:
sequence_length = ops.convert_to_tensor(sequence_length)
initial_cell_state, initial_output = initial_state # pylint: disable=unpacking-non-sequence
cell_states, outputs = self._call_cell(inputs, initial_cell_state,
initial_output, dtype,
sequence_length)
if sequence_length is not None:
# Mask out the part beyond sequence_length
mask = array_ops.transpose(
array_ops.sequence_mask(
sequence_length, time_len, dtype=dtype), [1, 0])
mask = array_ops.tile(
array_ops.expand_dims(mask, [-1]), [1, 1, self.num_units])
outputs *= mask
# Prepend initial states to cell_states and outputs for indexing to work
# correctly,since we want to access the last valid state at
# sequence_length - 1, which can even be -1, corresponding to the
# initial state.
mod_cell_states = array_ops.concat(
[array_ops.expand_dims(initial_cell_state, [0]), cell_states], 0)
mod_outputs = array_ops.concat(
[array_ops.expand_dims(initial_output, [0]), outputs], 0)
final_cell_state = self._gather_states(mod_cell_states, sequence_length,
batch_size)
final_output = self._gather_states(mod_outputs, sequence_length,
batch_size)
else:
# No sequence_lengths used: final state is the last state
final_cell_state = cell_states[-1]
final_output = outputs[-1]
if is_list:
# Input was a list, so return a list
outputs = array_ops.unstack(outputs)
final_state = core_rnn_cell.LSTMStateTuple(final_cell_state,
final_output)
return outputs, final_state
def _gather_states(self, data, indices, batch_size):
"""Produce `out`, s.t. out(i, j) = data(indices(i), i, j)."""
mod_indices = indices * batch_size + math_ops.range(batch_size)
return array_ops.gather(
array_ops.reshape(data, [-1, self.num_units]), mod_indices)
class LSTMBlockFusedCell(LSTMBlockWrapper):
"""FusedRNNCell implementation of LSTM.
This is an extremely efficient LSTM implementation, that uses a single TF op
for the entire LSTM. It should be both faster and more memory-efficient than
LSTMBlockCell defined above.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
The variable naming is consistent with `core_rnn_cell.LSTMCell`.
"""
def __init__(self,
num_units,
forget_bias=1.0,
cell_clip=None,
use_peephole=False):
"""Initialize the LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
cell_clip: clip the cell to this value. Defaults to `3`.
use_peephole: Whether to use peephole connections or not.
"""
self._num_units = num_units
self._forget_bias = forget_bias
self._cell_clip = cell_clip
self._use_peephole = use_peephole
@property
def num_units(self):
"""Number of units in this cell (output dimension)."""
return self._num_units
def _call_cell(self, inputs, initial_cell_state, initial_output, dtype,
sequence_length):
"""Run this LSTM on inputs, starting from the given state.
Args:
inputs: `3-D` tensor with shape `[time_len, batch_size, input_size]`
initial_cell_state: initial value for cell state, shape `[batch_size,
self._num_units]`
initial_output: initial value of cell output, shape `[batch_size,
self._num_units]`
dtype: The data type for the initial state and expected output.
sequence_length: Specifies the length of each sequence in inputs. An
`int32` or `int64` vector (tensor) size `[batch_size]`, values in `[0,
time_len)` or None.
Returns:
A pair containing:
- Cell state (cs): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
- Output (h): A `3-D` tensor of shape `[time_len, batch_size,
output_size]`
"""
inputs_shape = inputs.get_shape().with_rank(3)
time_len = inputs_shape[0].value
if time_len is None:
time_len = array_ops.shape(inputs)[0]
input_size = inputs_shape[2].value
w = vs.get_variable(
"weights",
[input_size + self._num_units, self._num_units * 4], dtype=dtype)
b = vs.get_variable(
"biases", [w.get_shape().with_rank(2)[1]],
initializer=init_ops.constant_initializer(0.0),
dtype=dtype)
if self._use_peephole:
wci = vs.get_variable("w_i_diag", [self._num_units], dtype=dtype)
wco = vs.get_variable("w_o_diag", [self._num_units], dtype=dtype)
wcf = vs.get_variable("w_f_diag", [self._num_units], dtype=dtype)
else:
wci = wco = wcf = array_ops.zeros([self._num_units], dtype=dtype)
if sequence_length is None:
max_seq_len = math_ops.to_int64(time_len)
else:
max_seq_len = math_ops.to_int64(math_ops.reduce_max(sequence_length))
_, cs, _, _, _, _, h = _lstm_ops_so.block_lstm(
seq_len_max=max_seq_len,
x=inputs,
cs_prev=initial_cell_state,
h_prev=initial_output,
w=w,
wci=wci,
wco=wco,
wcf=wcf,
b=b,
forget_bias=self._forget_bias,
cell_clip=self._cell_clip,
use_peephole=self._use_peephole)
return cs, h
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Support for creating Kay forms from Datastore data models.
Taken from google.appengine.ext.db.djangoforms
Terminology notes:
- forms: always refers to the Kay newforms subpackage
- field: always refers to a Kay forms.Field instance
- property: always refers to a db.Property instance
Mapping between properties and fields:
+====================+===================+==============+====================+
| Property subclass | Field subclass | datatype | widget; notes |
+====================+===================+==============+====================+
| StringProperty | TextField | unicode | Textarea |
| | | | if multiline |
+--------------------+-------------------+--------------+--------------------+
| TextProperty | TextField | unicode | Textarea |
+--------------------+-------------------+--------------+--------------------+
| BlobProperty | FileField | str | skipped in v0.96 |
+--------------------+-------------------+--------------+--------------------+
| DateTimeProperty | DateTimeField | datetime | skipped |
| | | | if auto_now[_add] |
+--------------------+-------------------+--------------+--------------------+
| DateProperty | DateField | date | ditto |
+--------------------+-------------------+--------------+--------------------+
| TimeProperty | TimeField | time | ditto |
+--------------------+-------------------+--------------+--------------------+
| IntegerProperty | IntegerField | int or long | |
+--------------------+-------------------+--------------+--------------------+
| FloatProperty | FloatField | float | CharField in v0.96 |
+--------------------+-------------------+--------------+--------------------+
| BooleanProperty | BooleanField | bool | |
+--------------------+-------------------+--------------+--------------------+
| UserProperty | TextField | users.User | |
+--------------------+-------------------+--------------+--------------------+
| StringListProperty | TextField | list of str | Textarea |
+--------------------+-------------------+--------------+--------------------+
| LinkProperty | TextField | str | |
+--------------------+-------------------+--------------+--------------------+
| ReferenceProperty | ModelField* | db.Model | |
+--------------------+-------------------+--------------+--------------------+
| _ReverseReferenceP.| None | <iterable> | always skipped |
+====================+===================+==============+====================+
"""
import itertools
import logging
from google.appengine.api import users
from google.appengine.ext import db
from kay import exceptions
from kay.utils import forms
from kay.utils import datastructures
from kay.i18n import lazy_gettext as _
from kay.exceptions import ImproperlyConfigured
from kay.models import NamedModel
def monkey_patch(name, bases, namespace):
"""A 'metaclass' for adding new methods to an existing class.
In this version, existing methods can't be overridden; this is by
design, to avoid accidents.
Usage example:
class PatchClass(TargetClass):
__metaclass__ = monkey_patch
def foo(self, ...): ...
def bar(self, ...): ...
This is equivalent to:
def foo(self, ...): ...
def bar(self, ...): ...
TargetClass.foo = foo
TargetClass.bar = bar
PatchClass = TargetClass
Note that PatchClass becomes an alias for TargetClass; by convention
it is recommended to give PatchClass the same name as TargetClass.
"""
assert len(bases) == 1, 'Exactly one base class is required'
base = bases[0]
for name, value in namespace.iteritems():
if name not in ('__metaclass__', '__module__'):
assert name not in base.__dict__, "Won't override attribute %r" % (name,)
setattr(base, name, value)
return base
class Property(db.Property):
__metaclass__ = monkey_patch
def get_model_property_name(self):
"""Return the attribute name of this property in this property's Model class"""
matching_prop_names = [prop_name for (prop_name, prop) in
self.model_class.properties().items() if prop.name == self.name]
if len(matching_prop_names) != 1:
raise Exception('Model class "%s" must have exactly one property with'
' the datastore storage name "%s". Found %d properties'
' with that name: %s' % (
self.model_class.__name__,
self.name,
len(matching_prop_names),
matching_prop_names
)
)
return matching_prop_names[0]
def get_form_field(self, form_class=forms.TextField, **kwargs):
"""Return a Django form field appropriate for this property.
Args:
form_class: a forms.Field subclass, default forms.CharField
Additional keyword arguments are passed to the form_class constructor,
with certain defaults:
required: self.required
label: prettified self.verbose_name, if not None
widget: a forms.Select instance if self.choices is non-empty
initial: self.default, if not None
Returns:
A fully configured instance of form_class, or None if no form
field should be generated for this property.
"""
defaults = {'required': self.required}
if self.verbose_name is None:
defaults['label'] = (
self.get_model_property_name().capitalize().replace('_', ' ')
)
else:
defaults['label'] = self.verbose_name
if self.choices:
choices = []
if not self.required or (self.default is None and
'initial' not in kwargs):
choices.append(('', '---------'))
for choice in self.choices:
choices.append((unicode(choice), unicode(choice)))
defaults['choices'] = choices
form_class = forms.ChoiceField
if self.default is not None:
defaults['default'] = self.default
defaults.update(kwargs)
return form_class(**defaults)
def get_value_for_form(self, instance):
"""Extract the property value from the instance for use in a form.
Override this to do a property- or field-specific type conversion.
Args:
instance: a db.Model instance
Returns:
The property's value extracted from the instance, possibly
converted to a type suitable for a form field; possibly None.
By default this returns the instance attribute's value unchanged.
"""
return getattr(instance, self.get_model_property_name())
def make_value_from_form(self, value):
"""Convert a form value to a property value.
Override this to do a property- or field-specific type conversion.
Args:
value: the cleaned value retrieved from the form field
Returns:
A value suitable for assignment to a model instance's property;
possibly None.
By default this converts the value to self.data_type if it
isn't already an instance of that type, except if the value is
empty, in which case we return None.
"""
if value in (None, ''):
return None
if not isinstance(value, self.data_type):
value = self.data_type(value)
return value
class UserProperty(db.Property):
"""This class exists solely to log a warning when it is used."""
def __init__(self, *args, **kwds):
logging.warn("Please don't use modelforms.UserProperty; "
"use db.UserProperty instead.")
super(UserProperty, self).__init__(*args, **kwds)
class EmailProperty(db.EmailProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
defaults = {'form_class': forms.EmailField}
defaults.update(kwargs)
return super(EmailProperty, self).get_form_field(**defaults)
class StringProperty(db.StringProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a string property.
This sets the widget default to forms.Textarea if the property's
multiline attribute is set.
"""
defaults = {}
if self.multiline:
defaults['widget'] = forms.Textarea
defaults.update(kwargs)
return super(StringProperty, self).get_form_field(**defaults)
class TextProperty(db.TextProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a text property.
This sets the widget default to forms.Textarea.
"""
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextProperty, self).get_form_field(**defaults)
class BlobProperty(db.BlobProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a blob property.
"""
if not hasattr(forms, 'FileField'):
return None
defaults = {'form_class': forms.FileField}
defaults.update(kwargs)
return super(BlobProperty, self).get_form_field(**defaults)
def get_value_for_form(self, instance):
"""Extract the property value from the instance for use in a form.
There is no way to convert a Blob into an initial value for a file
upload, so we always return None.
"""
return None
def make_value_from_form(self, value):
"""Convert a form value to a property value.
This extracts the content from the UploadedFile instance returned
by the FileField instance.
"""
if value.__class__.__name__ == 'UploadedFile':
return db.Blob(value.content)
return super(BlobProperty, self).make_value_from_form(value)
class DateTimeProperty(db.DateTimeProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a date-time property.
This defaults to a DateTimeField instance, except if auto_now or
auto_now_add is set, in which case None is returned, as such
'auto' fields should not be rendered as part of the form.
"""
if self.auto_now or self.auto_now_add:
return None
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeProperty, self).get_form_field(**defaults)
class DateProperty(db.DateProperty):
# TODO:
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a date property.
This defaults to a DateField instance, except if auto_now or
auto_now_add is set, in which case None is returned, as such
'auto' fields should not be rendered as part of the form.
"""
if self.auto_now or self.auto_now_add:
return None
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateProperty, self).get_form_field(**defaults)
class TimeProperty(db.TimeProperty):
# TODO:
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a time property.
This defaults to a TimeField instance, except if auto_now or
auto_now_add is set, in which case None is returned, as such
'auto' fields should not be rendered as part of the form.
"""
if self.auto_now or self.auto_now_add:
return None
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeProperty, self).get_form_field(**defaults)
class IntegerProperty(db.IntegerProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for an integer property.
This defaults to an IntegerField instance.
"""
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerProperty, self).get_form_field(**defaults)
class FloatProperty(db.FloatProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for an integer property.
This defaults to a FloatField instance when using Django 0.97 or
later. For 0.96 this defaults to the CharField class.
"""
defaults = {}
if hasattr(forms, 'FloatField'):
defaults['form_class'] = forms.FloatField
defaults.update(kwargs)
return super(FloatProperty, self).get_form_field(**defaults)
class BooleanProperty(db.BooleanProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a boolean property.
This defaults to a BooleanField.
"""
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanProperty, self).get_form_field(**defaults)
def make_value_from_form(self, value):
"""Convert a form value to a property value.
This is needed to ensure that False is not replaced with None.
"""
if value is None:
return None
if isinstance(value, basestring) and value.lower() == 'false':
return False
return bool(value)
class StringListProperty(db.StringListProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a StringList property.
This defaults to a Textarea widget with a blank initial value.
"""
defaults = {'field': forms.TextField(), 'form_class': forms.LineSeparated,
'min_size': 0}
defaults.update(kwargs)
return super(StringListProperty, self).get_form_field(**defaults)
def get_value_for_form(self, instance):
"""Extract the property value from the instance for use in a form.
This joins a list of strings with newlines.
"""
value = super(StringListProperty, self).get_value_for_form(instance)
if not value:
return None
if isinstance(value, list):
value = '\n'.join(value)
return value
def make_value_from_form(self, value):
"""Convert a form value to a property value.
This breaks the string into lines.
"""
if not value:
return []
if isinstance(value, basestring):
value = value.splitlines()
return value
class LinkProperty(db.LinkProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a URL property.
This defaults to a URLField instance.
"""
defaults = {'form_class': forms.TextField}
defaults.update(kwargs)
return super(LinkProperty, self).get_form_field(**defaults)
class _WrapIter(object):
"""Helper class whose iter() calls a given function to get an iterator."""
def __init__(self, function):
self._function = function
def __iter__(self):
return self._function()
class ReferenceProperty(db.ReferenceProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a reference property.
This defaults to a ModelChoiceField instance.
"""
defaults = {'form_class': forms.ModelField,
'model': self.reference_class}
defaults.update(kwargs)
return super(ReferenceProperty, self).get_form_field(**defaults)
def get_value_for_form(self, instance):
"""Extract the property value from the instance for use in a form.
This return the key object for the referenced object, or None.
"""
value = super(ReferenceProperty, self).get_value_for_form(instance)
if value is not None:
value = value.key()
return value
def make_value_from_form(self, value):
"""Convert a form value to a property value.
This turns a key string or object into a model instance.
"""
if value:
if not isinstance(value, db.Model):
value = db.get(value)
return value
class _ReverseReferenceProperty(db._ReverseReferenceProperty):
__metaclass__ = monkey_patch
def get_form_field(self, **kwargs):
"""Return a Django form field appropriate for a reverse reference.
This always returns None, since reverse references are always
automatic.
"""
return None
def property_clean(prop, value):
"""Apply Property level validation to value.
Calls .make_value_from_form() and .validate() on the property and catches
exceptions generated by either. The exceptions are converted to
forms.ValidationError exceptions.
Args:
prop: The property to validate against.
value: The value to validate.
Raises:
forms.ValidationError if the value cannot be validated.
"""
if value is not None:
try:
prop.validate(prop.make_value_from_form(value))
except (db.BadValueError, ValueError), e:
raise forms.ValidationError(unicode(e))
class ModelFormOptions(object):
"""A simple class to hold internal options for a ModelForm class.
Instance attributes:
model: a db.Model class, or None
fields: list of field names to be defined, or None
exclude: list of field names to be skipped, or None
These instance attributes are copied from the 'Meta' class that is
usually present in a ModelForm class, and all default to None.
"""
def __init__(self, options=None):
self.model = getattr(options, 'model', None)
self.fields = getattr(options, 'fields', None)
self.exclude = getattr(options, 'exclude', None)
self.help_texts = getattr(options, 'help_texts', {})
class ModelFormMetaclass(forms.FormMeta):
"""The metaclass for the ModelForm class defined below.
See the docs for ModelForm below for a usage example.
"""
bad_attr_names = ('data', 'errors', 'raw_data')
def __new__(cls, class_name, bases, attrs):
"""Constructor for a new ModelForm class instance.
The signature of this method is determined by Python internals.
"""
fields = sorted(((field_name, attrs.pop(field_name))
for field_name, obj in attrs.items()
if isinstance(obj, forms.Field)),
key=lambda obj: obj[1].creation_counter)
for base in bases[::-1]:
if hasattr(base, '_base_fields'):
fields = base._base_fields.items() + fields
declared_fields = datastructures.OrderedDict()
for field_name, obj in fields:
declared_fields[field_name] = obj
opts = ModelFormOptions(attrs.get('Meta', None))
attrs['_meta'] = opts
base_models = []
for base in bases:
base_opts = getattr(base, '_meta', None)
base_model = getattr(base_opts, 'model', None)
if base_model is not None:
base_models.append(base_model)
if len(base_models) > 1:
raise exceptions.ImproperlyConfigured(
"%s's base classes define more than one model." % class_name)
if opts.model is not None:
if base_models and base_models[0] is not opts.model:
raise exceptions.ImproperlyConfigured(
'%s defines a different model than its parent.' % class_name)
model_fields = datastructures.OrderedDict()
for name, prop in sorted(opts.model.properties().iteritems(),
key=lambda prop: prop[1].creation_counter):
if opts.fields and name not in opts.fields:
continue
if opts.exclude and name in opts.exclude:
continue
form_field = prop.get_form_field(
help_text=opts.help_texts.get(name, None))
if form_field is not None:
model_fields[name] = form_field
for bad_attr_name in ModelFormMetaclass.bad_attr_names:
if model_fields.has_key(bad_attr_name):
raise ImproperlyConfigured("When you use ModelForm, you can not"
" use these names as field names: %s"
% str(ModelFormMetaclass.bad_attr_names))
# Preserve order in model definition
original_ordered_names = model_fields.keys()
model_fields.update(declared_fields)
extra_index = len(original_ordered_names)
for name, field in model_fields.iteritems():
if name in original_ordered_names:
field._position_hint = original_ordered_names.index(name)
else:
field._position_hint = extra_index
extra_index += 1
attrs['_base_fields'] = model_fields
props = opts.model.properties()
for name, field in model_fields.iteritems():
prop = props.get(name)
if prop:
def check_for_property_field(form, value, prop=prop):
property_clean(prop, value)
return True
field.validators.append(check_for_property_field)
else:
attrs['_base_fields'] = declared_fields
# corresponds with form not rendered
# maybe i should handle this in forms.FormMeta
return super(ModelFormMetaclass, cls).__new__(cls,
class_name, bases, attrs)
class BaseModelForm(forms.Form):
"""Base class for ModelForm.
This overrides the forms.BaseForm constructor and adds a save() method.
This class does not have a special metaclass; the magic metaclass is
added by the subclass ModelForm.
"""
def __init__(self, instance=None, initial=None, **kwargs):
"""Constructor.
Args (all optional and defaulting to None):
data: dict of data values, typically from a POST request)
initial: dict of initial values
instance: Model instance to be used for additional initial values
Except for initial and instance, these arguments are passed on to
the forms.BaseForm constructor unchanged, but only if not None.
Leave these blank (i.e. None)
"""
opts = self._meta
self.instance = instance
object_data = {}
if instance is not None:
for name, prop in instance.properties().iteritems():
if opts.fields and name not in opts.fields:
continue
if opts.exclude and name in opts.exclude:
continue
object_data[name] = prop.get_value_for_form(instance)
if initial is not None:
object_data.update(initial)
kwargs['initial'] = object_data
kwargs = dict((name, value)
for name, value in kwargs.iteritems()
if value is not None)
super(BaseModelForm, self).__init__(**kwargs)
def save(self, commit=True, **kwargs):
"""Save this form's cleaned data into a model instance.
Args:
commit: optional bool, default True; if true, the model instance
is also saved to the datastore.
Returns:
A model instance. If a model instance was already associated
with this form instance (either passed to the constructor with
instance=... or by a previous save() call), that same instance
is updated and returned; if no instance was associated yet, one
is created by this call.
Raises:
ValueError if the data couldn't be validated.
"""
if not self.is_valid:
raise ValueError('Cannot save a non valid form')
opts = self._meta
instance = self.instance
if instance is None:
fail_message = 'created'
else:
fail_message = 'updated'
if self.errors:
raise ValueError("The %s could not be %s because the data didn't "
'validate.' % (opts.model.kind(), fail_message))
cleaned_data = self.data
converted_data = {}
propiter = itertools.chain(
opts.model.properties().iteritems(),
iter([('key_name', StringProperty(name='key_name'))])
)
for name, prop in propiter:
if cleaned_data.has_key(name):
value = cleaned_data.get(name)
if not value and prop.default is not None:
value = prop.default
# For new entities, use the datastore property names as the keys
# instead of the model property names
if instance is not None:
data_name = name
else:
data_name = getattr(opts.model, name).name
converted_data[data_name] = prop.make_value_from_form(value)
try:
converted_data.update(kwargs)
if instance is None:
if issubclass(opts.model, NamedModel):
logging.debug("commit argument ignored.")
instance = opts.model.create_new_entity(**converted_data)
else:
instance = opts.model(**converted_data)
self.instance = instance
else:
for name, value in converted_data.iteritems():
if name == 'key_name':
continue
setattr(instance, name, value)
except db.BadValueError, err:
raise ValueError('The %s could not be %s (%s)' %
(opts.model.kind(), fail_message, err))
if commit:
instance.put()
return instance
class ModelForm(BaseModelForm):
"""A Django form tied to a Datastore model.
Note that this particular class just sets the metaclass; all other
functionality is defined in the base class, BaseModelForm, above.
Usage example:
from google.appengine.ext import db
from google.appengine.ext.db import djangoforms
# First, define a model class
class MyModel(db.Model):
foo = db.StringProperty()
bar = db.IntegerProperty(required=True, default=42)
# Now define a form class
class MyForm(djangoforms.ModelForm):
class Meta:
model = MyModel
You can now instantiate MyForm without arguments to create an
unbound form, or with data from a POST request to create a bound
form. You can also pass a model instance with the instance=...
keyword argument to create an unbound (!) form whose initial values
are taken from the instance. For bound forms, use the save() method
to return a model instance.
Like Django's own corresponding ModelForm class, the nested Meta
class can have two other attributes:
fields: if present and non-empty, a list of field names to be
included in the form; properties not listed here are
excluded from the form
exclude: if present and non-empty, a list of field names to be
excluded from the form
If exclude and fields are both non-empty, names occurring in both
are excluded (i.e. exclude wins). By default all property in the
model have a corresponding form field defined.
It is also possible to define form fields explicitly. This gives
more control over the widget used, constraints, initial value, and
so on. Such form fields are not affected by the nested Meta class's
fields and exclude attributes.
If you define a form field named 'key_name' it will be treated
specially and will be used as the value for the key_name parameter
to the Model constructor. This allows you to create instances with
named keys. The 'key_name' field will be ignored when updating an
instance (although it will still be shown on the form).
"""
__metaclass__ = ModelFormMetaclass
|
|
"""
Copyright (c) 2014-2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
class GMIIFrame(object):
def __init__(self, data=b'', error=None):
self.data = b''
self.error = None
if type(data) is GMIIFrame:
self.data = data.data
self.error = data.error
else:
self.data = bytearray(data)
def build(self):
if self.data is None:
return
f = list(self.data)
d = []
er = []
i = 0
assert_er = False
if (type(self.error) is int or type(self.error) is bool) and self.error:
assert_er = True
self.error = None
while len(f) > 0:
d.append(f.pop(0))
if self.error is None:
er.append(0)
else:
er.append(self.error[i])
i += 1
if assert_er:
er[-1] = 1
self.error = 1
return d, er
def parse(self, d, er):
if d is None or er is None:
return
self.data = bytearray(d)
self.error = er
def __eq__(self, other):
if type(other) is GMIIFrame:
return self.data == other.data
return False
def __repr__(self):
return 'GMIIFrame(data=%s, error=%s)' % (repr(self.data), repr(self.error))
def __iter__(self):
return self.data.__iter__()
class GMIISource(object):
def __init__(self):
self.has_logic = False
self.queue = []
def send(self, frame):
self.queue.append(GMIIFrame(frame))
def count(self):
return len(self.queue)
def empty(self):
return not self.queue
def create_logic(self,
clk,
rst,
txd,
tx_en,
tx_er,
clk_enable=True,
mii_select=False,
name=None
):
assert not self.has_logic
self.has_logic = True
assert len(txd) == 8
@instance
def logic():
frame = None
d = []
er = []
ifg_cnt = 0
while True:
yield clk.posedge, rst.posedge
if rst:
frame = None
txd.next = 0
tx_en.next = 0
tx_er.next = 0
d = []
er = []
ifg_cnt = 0
else:
if not clk_enable:
pass
elif ifg_cnt > 0:
ifg_cnt -= 1
txd.next = 0
tx_er.next = 0
tx_en.next = 0
elif len(d) > 0:
txd.next = d.pop(0)
tx_er.next = er.pop(0)
tx_en.next = 1
if len(d) == 0:
if mii_select:
ifg_cnt = 12*2
else:
ifg_cnt = 12
elif self.queue:
frame = GMIIFrame(self.queue.pop(0))
d, er = frame.build()
if name is not None:
print("[%s] Sending frame %s" % (name, repr(frame)))
if mii_select:
d2 = []
for b in d:
d2.append(b & 0x0F)
d2.append(b >> 4)
d = d2
er2 = []
for b in er:
er2.append(b)
er2.append(b)
er = er2
txd.next = d.pop(0)
tx_er.next = er.pop(0)
tx_en.next = 1
else:
txd.next = 0
tx_er.next = 0
tx_en.next = 0
return instances()
class GMIISink(object):
def __init__(self):
self.has_logic = False
self.queue = []
self.sync = Signal(intbv(0))
def recv(self):
if self.queue:
return self.queue.pop(0)
return None
def count(self):
return len(self.queue)
def empty(self):
return not self.queue
def wait(self, timeout=0):
yield delay(0)
if self.queue:
return
if timeout:
yield self.sync, delay(timeout)
else:
yield self.sync
def create_logic(self,
clk,
rst,
rxd,
rx_dv,
rx_er,
clk_enable=True,
mii_select=False,
name=None
):
assert not self.has_logic
self.has_logic = True
assert len(rxd) == 8
@instance
def logic():
frame = None
d = []
er = []
while True:
yield clk.posedge, rst.posedge
if rst:
frame = None
d = []
er = []
else:
if not clk_enable:
pass
elif rx_dv:
if frame is None:
frame = GMIIFrame()
d = []
er = []
d.append(int(rxd))
er.append(int(rx_er))
elif frame is not None:
if len(d) > 0:
if mii_select:
odd = True
sync = False
b = 0
be = 0
d2 = []
er2 = []
for n, e in zip(d, er):
odd = not odd
b = (n & 0x0F) << 4 | b >> 4
be |= e
if not sync and b == 0xD5:
odd = True
sync = True
if odd:
d2.append(b)
er2.append(be)
be = False
d = d2
er = er2
frame.parse(d, er)
self.queue.append(frame)
self.sync.next = not self.sync
if name is not None:
print("[%s] Got frame %s" % (name, repr(frame)))
frame = None
d = []
er = []
return instances()
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import base64
import logging
from urlparse import urlparse
from google.appengine.api import urlfetch
from google.appengine.ext import db, deferred, ndb
from google.appengine.ext.ndb.key import Key
from typing import Tuple, List
from mcfw.consts import MISSING
from mcfw.exceptions import HttpNotFoundException
from mcfw.rpc import returns, arguments
from rogerthat.bizz.communities.communities import get_community
from rogerthat.bizz.communities.homescreen import maybe_publish_home_screens
from rogerthat.bizz.service import _validate_service_identity
from rogerthat.consts import FAST_QUEUE
from rogerthat.dal import put_and_invalidate_cache
from rogerthat.models import ServiceIdentity
from rogerthat.models.news import MediaType
from rogerthat.models.settings import SyncedNameValue, ServiceLocation, SyncedField, ServiceInfo, MediaItem
from rogerthat.rpc import users
from rogerthat.service.api.system import put_avatar
from rogerthat.to.service import ServiceIdentityDetailsTO
from rogerthat.utils import now, try_or_defer
from rogerthat.utils.channel import send_message
from rogerthat.utils.cloud_tasks import schedule_tasks, create_task
from rogerthat.utils.transactions import run_in_transaction
from rogerthat.utils.zip_utils import replace_file_in_zip_blob
from solutions import translate
from solutions.common.bizz import broadcast_updates_pending, SolutionModule
from solutions.common.cron.news.rss import parse_rss_items
from solutions.common.dal import get_solution_settings, get_solution_main_branding, \
get_solution_settings_or_identity_settings
from solutions.common.exceptions.settings import InvalidRssLinksException
from solutions.common.models import SolutionSettings, \
SolutionBrandingSettings, SolutionRssScraperSettings, SolutionRssLink, SolutionMainBranding, \
SolutionServiceConsent
from solutions.common.models.forms import UploadedFile
from solutions.common.to import SolutionSettingsTO, SolutionRssSettingsTO
from solutions.common.to.forms import MediaItemTO
from solutions.common.to.settings import ServiceInfoTO, PrivacySettingsTO, PrivacySettingsGroupTO
from solutions.common.utils import is_default_service_identity, send_client_action
SLN_LOGO_WIDTH = 640
SLN_LOGO_HEIGHT = 240
SLN_LOGO_MAX_SIZE = 102400 # 100 kB
SLN_AVATAR_WIDTH = 150
SLN_AVATAR_HEIGHT = 150
SLN_AVATAR_MAX_SIZE = 51200 # 50 kB
def validate_sln_settings(sln_settings):
# type: (SolutionSettings) -> None
for identity in sln_settings.identities:
to = ServiceIdentityDetailsTO(identifier=identity, name=sln_settings.name)
_validate_service_identity(to)
@returns(tuple)
@arguments(service_user=users.User, service_identity=unicode, data=SolutionSettingsTO)
def save_settings(service_user, service_identity, data):
# type: (users.User, unicode, SolutionSettingsTO) -> Tuple[SolutionSettings, SolutionIdentitySettings]
sln_settings = get_solution_settings(service_user)
sln_i_settings = get_solution_settings_or_identity_settings(sln_settings, service_identity)
if data.events_visible is not None:
sln_settings.events_visible = data.events_visible
if data.inbox_email_reminders is not None:
sln_i_settings.inbox_email_reminders_enabled = data.inbox_email_reminders
if data.iban is not MISSING:
sln_settings.iban = data.iban
if data.bic is not MISSING:
sln_settings.bic = data.bic
sln_settings.updates_pending = True
validate_sln_settings(sln_settings)
if not is_default_service_identity(service_identity):
sln_i_settings.put()
sln_settings.put()
broadcast_updates_pending(sln_settings)
return sln_settings, sln_i_settings
@returns(SolutionBrandingSettings)
@arguments(service_user=users.User, image_url=unicode)
def set_avatar(service_user, image_url):
result = urlfetch.fetch(image_url) # type: urlfetch._URLFetchResult
jpg_bytes = result.content
def trans():
keys = SolutionBrandingSettings.create_key(service_user), SolutionSettings.create_key(service_user)
branding_settings, sln_settings = db.get(keys) # type: SolutionBrandingSettings, SolutionSettings
if branding_settings.avatar_url == image_url:
return sln_settings, branding_settings
sln_settings.updates_pending = True
branding_settings.avatar_url = image_url
branding_settings.modification_time = now()
put_and_invalidate_cache(sln_settings, branding_settings)
put_avatar(base64.b64encode(jpg_bytes))
return sln_settings, branding_settings
sln_settings, branding_settings = run_in_transaction(trans, xg=True)
send_message(sln_settings.service_user, 'solutions.common.settings.avatar.updated', avatar_url=image_url)
broadcast_updates_pending(sln_settings)
return branding_settings
@returns(SolutionBrandingSettings)
@arguments(service_user=users.User, image_url=unicode, service_identity=unicode)
def set_logo(service_user, image_url, service_identity):
branding_settings = db.get(SolutionBrandingSettings.create_key(service_user)) # type: SolutionBrandingSettings
if image_url == branding_settings.logo_url:
return branding_settings
branding_settings.logo_url = image_url
branding_settings.modification_time = now()
branding_settings.put()
tasks = [
create_task(save_logo_to_media, service_user, service_identity, branding_settings.logo_url),
create_task(_regenerate_branding_with_logo, service_user),
]
schedule_tasks(tasks, FAST_QUEUE)
return branding_settings
def save_logo_to_media(service_user, service_identity, logo_url):
service_info, changed = _save_logo_to_media(service_user, service_identity, logo_url)
if changed:
send_client_action(service_user, {'type': '[settings] Update service info complete',
'payload': ServiceInfoTO.from_model(service_info).to_dict()})
@ndb.transactional()
def _save_logo_to_media(service_user, service_identity, logo_url):
# type: (users.User, str, str) -> Tuple[ServiceInfo, bool]
service_info = get_service_info(service_user, service_identity)
for media in service_info.media:
if media.content == logo_url:
# Already present in cover media list, don't do anything
return service_info, False
media_item = MediaItem(type=MediaType.IMAGE, content=logo_url)
service_info.media.insert(0, media_item)
service_info.put()
return service_info, True
def _regenerate_branding_with_logo(service_user):
users.set_user(service_user)
logging.info("%s: Replacing logo.png in the sln main branding zip with the uploaded logo" % service_user.email())
sln_main_branding, branding_settings = db.get((SolutionMainBranding.create_key(service_user),
SolutionBrandingSettings.create_key(service_user)))
picture = branding_settings.download_logo()
zip_content = replace_file_in_zip_blob(sln_main_branding.blob, 'logo.jpg', str(picture))
def trans():
sln_main_branding = get_solution_main_branding(service_user)
sln_main_branding.blob = db.Blob(zip_content)
sln_main_branding.branding_creation_time = 0
common_settings = get_solution_settings(service_user)
common_settings.updates_pending = True
put_and_invalidate_cache(sln_main_branding, common_settings)
return common_settings
common_settings = run_in_transaction(trans, xg=True)
broadcast_updates_pending(common_settings)
send_message(service_user, 'solutions.common.settings.logo.updated', logo_url=branding_settings.logo_url)
def _validate_rss_urls(urls):
# type: (set[str]) -> tuple[list[str], list[str]]
rpcs = []
invalid_urls = []
valid_urls = []
for rss_url in urls:
rpc = urlfetch.create_rpc(deadline=30)
try:
urlfetch.make_fetch_call(rpc, rss_url)
rpcs.append(rpc)
except Exception as e:
logging.debug('Error while creating fetch call for %s: %s', rss_url, e.message)
rpcs.append(None)
for rss_url, rpc in zip(urls, rpcs):
if not rpc:
invalid_urls.append(rss_url)
continue
try:
response = rpc.get_result() # type: urlfetch._URLFetchResult
except Exception as e:
logging.debug('Error while fetching %s: %s', rss_url, e.message)
invalid_urls.append(rss_url)
continue
if response.status_code != 200:
invalid_urls.append(rss_url)
else:
try:
items, _ = parse_rss_items(response.content, rss_url)
if not items:
raise Exception('Missing items %s' % rss_url)
except Exception as e:
logging.debug('Error while validating url: %s' % e.message, exc_info=True)
invalid_urls.append(rss_url)
return valid_urls, invalid_urls
@ndb.non_transactional()
def _get_lang(service_user):
return get_solution_settings(service_user).main_language
@ndb.transactional()
def save_rss_urls(service_user, service_identity, data):
# type: (users.User, unicode, SolutionRssSettingsTO) -> SolutionRssScraperSettings
rss_settings_key = SolutionRssScraperSettings.create_key(service_user, service_identity)
rss_settings = rss_settings_key.get() # type: SolutionRssScraperSettings
current_dict = {}
if not rss_settings:
rss_settings = SolutionRssScraperSettings(key=rss_settings_key)
else:
for rss_links in rss_settings.rss_links:
if not current_dict.get(rss_links.url, False):
current_dict[rss_links.url] = rss_links.dry_runned
_, invalid_urls = _validate_rss_urls({scraper.url for scraper in data.scrapers if scraper.url not in current_dict})
if invalid_urls:
raise InvalidRssLinksException(invalid_urls, _get_lang(service_user))
scraper_urls = []
rss_links = []
for scraper in reversed(data.scrapers):
if scraper.url in scraper_urls:
continue
scraper_urls.append(scraper.url)
rss_links.append(SolutionRssLink(url=scraper.url,
notify=scraper.notify,
dry_runned=current_dict.get(scraper.url, False),
group_type=scraper.group_type if scraper.group_type else None,
community_ids=scraper.community_ids))
rss_settings.rss_links = [rss_link for rss_link in reversed(rss_links)]
rss_settings.put()
return rss_settings
def get_service_info(service_user, service_identity):
# type: (users.User, str) -> ServiceInfo
if not service_identity:
service_identity = ServiceIdentity.DEFAULT
return ServiceInfo.create_key(service_user, service_identity).get()
def get_media_item_models_from_to(media_items, media_models):
# type: (List[MediaItemTO], List[UploadedFile] ) -> List[MediaItem]
# media_models should never contain None unless the UploadedFile model was very recently deleted
file_models = {f.key.urlsafe(): f for f in media_models}
media_list = []
for media_item in media_items:
if media_item.file_reference:
file_model = file_models.get(media_item.file_reference)
if file_model:
media_list.append(MediaItem.from_file_model(file_model))
else:
logging.info('File model with key %s not found, skipping...', Key(urlsafe=media_item.file_reference))
else:
media_list.append(MediaItem(type=media_item.type,
content=media_item.content,
thumbnail_url=media_item.thumbnail_url))
return media_list
def update_service_info(service_user, service_identity, data):
# type: (users.User, str, ServiceInfoTO) -> ServiceInfo
service_info = get_service_info(service_user, service_identity)
service_info_dict = service_info.to_dict()
service_info.addresses = [ServiceLocation.from_to(a) for a in data.addresses]
media_to_get = [Key(urlsafe=media.file_reference) for media in data.media if media.file_reference]
service_info.media = get_media_item_models_from_to(data.media, ndb.get_multi(media_to_get))
service_info.currency = data.currency
service_info.description = data.description
service_info.email_addresses = [SyncedNameValue.from_to(v) for v in data.email_addresses]
service_info.keywords = data.keywords
service_info.name = data.name
service_info.phone_numbers = [SyncedNameValue.from_to(v) for v in data.phone_numbers]
service_info.main_place_type = data.main_place_type
service_info.place_types = data.place_types
service_info.synced_fields = [SyncedField.from_to(v) for v in data.synced_fields]
service_info.timezone = data.timezone
service_info.visible = data.visible
service_info.websites = [SyncedNameValue.from_to(v) for v in data.websites]
if service_info_dict != service_info.to_dict():
sln_settings = get_solution_settings(service_user)
service_info.visible = service_info.visible and sln_settings.hidden_by_city is None
service_info.put()
sln_settings.updates_pending = True
# Temporarily copying these properties until we have cleaned up all usages of them
# TODO: remove properties from SolutionSettings
sln_settings.timezone = service_info.timezone
sln_settings.currency = service_info.currency
sln_settings.name = service_info.name
sln_settings.put()
try_or_defer(broadcast_updates_pending, sln_settings)
deferred.defer(maybe_publish_home_screens, service_user)
return service_info
def validate_url(url, check_existence=True):
url = url.strip() \
.lower() \
.replace('W.W.W.,', 'www.') \
.replace('www,', 'www.')
if not url:
return None
if url.startswith('www.'):
# We can't assume https here
url = 'http://%s' % url
if not url.startswith('http'):
url = 'http://%s' % url
if '.' not in url:
return None
if check_existence:
return resolve_url(url)
return url
def resolve_url(url):
try:
result = urlfetch.fetch(url, urlfetch.HEAD, follow_redirects=False,
deadline=5) # type: urlfetch._URLFetchResult
if result.status_code == 200:
return url
elif result.status_code in (301, 302):
return result.headers['location']
except Exception as e:
logging.debug('Error while checking url %s: %s', url, e.message, exc_info=True)
return None
def parse_facebook_url(url):
try:
# type: (str) -> Optional[str]
page = url.strip() \
.replace('m.facebook', 'facebook') \
.replace('fb.com', 'facebook.com') \
.replace('nl-nl.', '') \
.replace('http:', 'https:')
if page.startswith('@'):
page = 'https://www.facebook.com/%s' % page.strip('@')
elif not page.lower().startswith('https'):
page = 'https://%s' % page
parsed = validate_url(page, check_existence=False)
if not parsed:
return None
result = urlparse(page) # type: ParseResult
netloc = result.netloc.lower()
if not netloc.startswith('business') and not netloc.startswith('www.'):
netloc = 'www.%s' % netloc
if netloc in ('business.facebook.com', 'www.facebook.com'):
page_url = 'https://{netloc}{path}'.format(netloc=netloc, path=result.path)
if 'id=' in result.query or 'q=' in result.query:
return page_url + '?%s' % result.query
return page_url
except:
logging.debug('parse_facebook_url invalid_url: %s', url, exc_info=True)
return None
def get_cirklo_privacy_groups(lang):
from markdown import Markdown
from solutions.common.markdown_newtab import NewTabExtension
groups = [
PrivacySettingsGroupTO(
page=1,
description='<h4>%s</h4>' % translate(lang, 'consent_share_with_city'),
items=[PrivacySettingsTO(
type=SolutionServiceConsent.TYPE_CITY_CONTACT,
enabled=False,
label=translate(lang, 'consent_city_contact')
)]
)
]
md = Markdown(output='html', extensions=['nl2br', NewTabExtension()])
lines = [
'#### %s' % translate(lang, 'cirklo_info_title'),
translate(lang, 'cirklo_info_text'),
'',
translate(lang, 'cirklo_participation_text'),
]
groups.append(PrivacySettingsGroupTO(
page=2,
description=md.convert('\n\n'.join(lines)),
items=[PrivacySettingsTO(
type=SolutionServiceConsent.TYPE_CIRKLO_SHARE,
enabled=False,
label=translate(lang, 'consent_cirklo_share')
)]))
return groups
def get_consents_for_community(community_id, lang, user_consent_types):
from markdown import Markdown
from solutions.common.markdown_newtab import NewTabExtension
community = get_community(community_id)
if not community:
raise HttpNotFoundException('Community %s not found' % community_id)
service_user = users.User(community.main_service)
city_service_settings = get_solution_settings(service_user)
groups = [
PrivacySettingsGroupTO(
page=1,
description='<h4>%s</h4>' % translate(lang, 'consent_share_with_city'),
items=[PrivacySettingsTO(
type=SolutionServiceConsent.TYPE_CITY_CONTACT,
enabled=SolutionServiceConsent.TYPE_CITY_CONTACT in user_consent_types,
label=translate(lang, 'consent_city_contact')
)]
),
PrivacySettingsGroupTO(
page=1,
description='<h4>%s</h4>' % translate(lang, 'consent_platform_communication'),
items=[
PrivacySettingsTO(
type=SolutionServiceConsent.TYPE_NEWSLETTER,
enabled=SolutionServiceConsent.TYPE_NEWSLETTER in user_consent_types,
label=translate(lang, 'email_consent_newsletter')
), PrivacySettingsTO(
type=SolutionServiceConsent.TYPE_EMAIL_MARKETING,
enabled=SolutionServiceConsent.TYPE_EMAIL_MARKETING in user_consent_types,
label=translate(lang, 'email_consent_marketing')
)
]
)
]
md = Markdown(output='html', extensions=['nl2br', NewTabExtension()])
if SolutionModule.CIRKLO_VOUCHERS in city_service_settings.modules:
lines = [
'#### %s' % translate(lang, 'cirklo_info_title'),
translate(lang, 'cirklo_info_text'),
'',
translate(lang, 'cirklo_participation_text'),
]
groups.append(PrivacySettingsGroupTO(
page=2,
description=md.convert('\n\n'.join(lines)),
items=[PrivacySettingsTO(
type=SolutionServiceConsent.TYPE_CIRKLO_SHARE,
enabled=SolutionServiceConsent.TYPE_CIRKLO_SHARE in user_consent_types,
label=translate(lang, 'consent_cirklo_share')
)]))
return groups
|
|
import pickle
from sh import fgrep
from sh import nova
from sh import tail
from datetime import datetime
import json
import sys
import os
import pprint
pp = pprint.PrettyPrinter(indent=4)
# import shelve
from cloudmesh.config.cm_config import cm_config
# from openstack.cm_compute import openstack as os_client
# Error Cannot Import Openstack
from cloudmesh.iaas.openstack.cm_compute import openstack
from cloudmesh.iaas.eucalyptus.eucalyptus_new import eucalyptus
from cloudmesh.iaas.azure.cm_azure import cm_azure as azure
from cloudmesh_base.locations import config_file
try:
# from sh import fgmetric
from fgmetric.FGMetricsAPI import FGMetricsAPI
# OR
# from sh import fgmetric
except:
# TODO THIS IS NOT HOW WE DO IT, SETUP CONTAINS ALL REQUEMENTS
# FIX SETUP
print "---------------------"
print "fgmetric not imported"
print "---------------------"
pass
try:
from bson import json_util
except:
# TODO THIS IS NOT HOW WE DO IT, SETUP CONTAINS ALL REQUEMENTS
# FIX SETUP
print "--------------------------------"
print "Please run 'pip install pymongo'"
print "--------------------------------"
#
# WHY ARE WE NOT MORE CLEANLY SEPARATING METRIC FROM THIS?
class metrics:
#
# global variables that define the information managed by this class
#
# dict that holds vms, flavors, images for al iaas
clouds = {}
# array with keys from the user
keys = []
#
# variables that we can most likely eliminate
#
# user needs to come from credential ...
user = "gvonlasz"
#
# initialization methods
#
def __init__(self):
self.clear()
# Read Yaml File to find all the cloud configurations present
self.config()
try:
self.metric_api = FGMetricsAPI()
except:
pass
def clear(self):
self.clouds = {}
self.keys = []
self.user = "gvonlasz"
#
# some metric methods
#
def get_metrics_cli(self, args):
""" Get usage data from FG Metric CLI"""
""" This is replica with get_metrics but using CLI instead of API """
"""
Args:
args (dict): parameters for CLI with option
Return:
(dict): output of fgmetric in a dict type
Raise:
n/a
"""
try:
res = fgmetric(
args) # args should be list-lized before send it out as a parameter
return json.loads(res, object_hook=json_util.object_hook)
except:
pass
def get_metrics(self, args):
"""Get usage data from FG Metrics"""
if not self.metric_api:
return
try:
args["user"] = args["user"] or self.user
self._set_metric_api_vars(args)
# print args
stats = self.metric_api._set_dict_vars()
metrics = self.metric_api.get_stats()
stats["stats"] = metrics
self.metrics = stats
# print self.metrics
except:
print sys.exc_info()
pass
return self.metrics
def get_realtime_metrics(self, args):
""" get real-time usage data from FG Metrics"""
# IN DEVELOPMENT
if not self.metric_api:
return
try:
print 1
except:
pass
def _set_metric_api_vars(self, args):
self.metric_api.set_date(args["s_date"], args["e_date"])
self.metric_api.set_metric(
"count runtime cores mem disks") # args["metric"])
self.metric_api.set_user(args["user"])
self.metric_api.set_cloud(args["cloud"])
self.metric_api.set_hostname(args["host"])
self.metric_api.set_period(args["period"])
#
# the configuration method that must be called to get the cloud info
#
def config(self):
"""
reads the cloudmesh yaml file that defines which clouds build
the cloudmesh
"""
configuration = cm_config()
pp.pprint(configuration)
for cloud_name in configuration.keys():
print "--------------------"
try:
credential = configuration.get(key=cloud_name)
cloud_type = credential['cm_type']
print credential
print ">>>>>>>", cloud_name, cloud_type
if cloud_type in ['openstack', 'eucalyptus']:
print "AAAAA"
self.clouds[cloud_name] = {
'cm_type': cloud_type, 'credential': credential}
print "BBBB"
try:
self.update(cloud_name, cloud_type)
self.clouds[cloud_name] = {
'cm_type': cloud_type, 'credential': credential}
except:
print "ERROR: can not connect to", cloud_name
print "CCCC"
except Exception, e:
print "ERROR: Not a cloud:", cloud_name, e
return
#
# importnat get methods
#
def get(self):
"""returns the dict that contains all the information"""
return self.clouds
#
# important print methods
#
# includes sanitizing to remove the credentials
#
def __str__(self):
tmp = self._sanitize()
print tmp
def _sanitize(self):
# copy the self.cloud
# delete the attributes called credential for all clouds
all_keys = self.clouds.keys()
for cloud in all_keys:
self.clouds[cloud]['credential'] = {}
return self.clouds
def dump(self):
tmp = self._sanitize()
print json.dumps(tmp, indent=4)
#
# the refresh method that gets upto date information for cloudmesh
# If cloudname is provided only that cloud will be refreshed
# else all the clouds will be refreshed
#
def refresh(self, cloud_name=None):
print "Refershing cloud %s" % cloud_name
servers = {}
cloud = None
if(cloud_name is None):
all_clouds = self.clouds.keys()
for cloud_name in all_clouds:
try:
type = self.clouds[cloud_name]['cm_type']
if type == 'openstack':
cloud = openstack(cloud_name)
elif type == 'eucalyptus':
# Where do i find the project name ? Is there a defaul
# one ?
cloud = eucalyptus(cloud_name, 'fg-82')
elif type == 'azure':
cloud = azure.cm_azure()
cloud.refresh()
self.clouds[cloud_name]['flavors'] = cloud.flavors
self.clouds[cloud_name]['images'] = cloud.images
self.clouds[cloud_name]['servers'] = cloud.servers
except Exception, e:
print e
else:
try:
type = self.clouds[cloud_name]['cm_type']
if type == 'openstack':
cloud = openstack(cloud_name)
elif type == 'eucalyptus':
# Where do i find the project name ? Is there a defaul one ?
# this is obvious;ly wrong as the tennent comes from profile
# TODO EUCALYPTUS DOES NOT YET WORK
cloud = eucalyptus(cloud_name, 'fg-82')
elif type == 'azure':
cloud = azure.cm_azure()
cloud.refresh()
self.clouds[cloud_name]['flavors'] = cloud.flavors
self.clouds[cloud_name]['images'] = cloud.images
self.clouds[cloud_name]['servers'] = cloud.servers
except Exception, e:
print e
return cloud
def update(self, name, type):
servers = self.refresh(name)
self.clouds[name].update({'name': name,
'cm_type': type,
"servers": servers})
return
def add(self, name, type):
try:
self.clouds[name]
print "Error: Cloud %s already exists" % name
except:
self.update(name, type)
"""
def get_keys(self):
return self.keys
def refresh_keys(self):
self.keys = []
result = fgrep(tail(nova("keypair-list"), "-n", "+4"),"-v","+")
for line in result:
(front, name, signature, back) = line.split("|")
self.keys.append(name.strip())
return self.keys
def refresh(self):
keys = self.refresh_keys()
for cloud in keys:
self.refresh(cloud)
# p = Pool(4)
# update = self.refresh
# output = p.map(update, keys)
"""
#
# saves and reads the dict to and from a file
#
def save(self):
log.error("not implemented")
"""
tmp = self._sanitize()
file = open(self.datastore, 'wb')
# pickle.dump(self.keys, file)
pickle.dump(tmp, file)
file.close()
"""
def load(self):
log.error("not implemented")
"""
file = open(self.datastore, 'rb')
# self.keys = pickle.load(file)
self.clouds = pickle.load(file)
''' above returns:
[u'gvonlasz']
So, call pickle again to get more:
{'india': {'name': 'india',
'servers': {u'2731c421-d985-44ce-91bf-2a89ce4ba033': {'cloud': 'india',
'id': u'2731c421-d985-44ce-91bf-2a89ce4ba033',
'ip': u'vlan102=10.1.2.85, 149.165.158.7',
'name': u'gvonlasz-001',
'refresh': '2013-02-11 20:30:04.472583',
'status': u'ACTIVE'},
...
'''
self.clouds = pickle.load(file)
file.close()
"""
#
# TODO: convenient +, += functions to add dicts with cm_type
#
def __add__(self, other):
"""
type based add function c = cloudmesh(...); b = c + other
other can be a dict that contains information about the object
and it will be nicely inserted into the overall cloudmesh dict
the type will be identified via a cm_type attribute in the
dict Nn attribute cm_cloud identifies in which cloud the
element is stored.
"""
if other.cm_type == "image":
print "TODO: not implemented yet"
return
elif other.cm_type == "vm":
print "TODO: not implemented yet"
return
elif other.cm_type == "flavor":
print "TODO: not implemented yet"
return
elif other.cm_type == "cloudmesh":
print "TODO: not implemented yet"
return
else:
print "Error: %s type does not exist", cm_type
print "Error: Ignoring add"
return
def __iadd__(self, other):
"""
type based add function c = cloudmesh(...); c += other other
can be a dict that contains information about the object and
it will be nicely inserted into the overall cloudmesh dict the
type will be identified via a cm_type attribute in the dict.
Nn attribute cm_cloud identifies in which cloud the element is
stored.
"""
if other.cm_type == "image":
print "TODO: not implemented yet"
return
elif other.cm_type == "vm":
print "TODO: not implemented yet"
return
elif other.cm_type == "flavor":
print "TODO: not implemented yet"
return
elif other.cm_type == "cloudmesh":
print "TODO: not implemented yet"
return
else:
print "Error: %s type does not exist", cm_type
print "Error: Ignoring add"
return
#
# MAIN METHOD FOR TESTING
#
if __name__ == "__main__":
c = cloudmesh()
print c.clouds
"""
c.config()
c.dump()
c = cloud_mesh()
c.refresh()
c.add('india', 'openstack')
c.refresh_keys()
c.dump()
c.save()
print 70 * "-"
c.clear()
c.dump()
print 70 * "-"
c.load()
c.dump()
print 70 * "-"
"""
"""
india_os = {
"OS_TENANT_NAME" : '',
"OS_USERNAME" : '',
"OS_PASSWORD" : '',
"OS_AUTH_URL" : '',
}
(attribute, passwd) = fgrep("OS_PASSWORD",config_file("openstack/novarc")).replace("\n","").split("=")
india_os['OS_PASSWORD'] = passwd
username = india_os['OS_USERNAME']
password = india_os['OS_PASSWORD']
authurl = india_os['OS_AUTH_URL']
tenant = india_os['OS_TENANT_NAME']
print password
'''
username = os.environ['OS_USERNAME']
password = os.environ['OS_PASSWORD']
authurl = os.environ['OS_AUTH_URL']
'''
india = cloud_openstack("india", authurl, tenant, username, password)
india._vm_show("gvonlasz-001")
india.dump()
india._vm_show("gvonlasz-001")
india.dump()
"""
|
|
from thespian.test import *
from time import sleep
import pytest
from thespian.actors import *
from datetime import timedelta
class PreRegActor(ActorTypeDispatcher):
def receiveMsg_str(self, regaddr, sender):
self.preRegisterRemoteSystem(regaddr, {})
self.send(sender, 'Registered')
@pytest.fixture(params=['simpleSystemBase',
'multiprocQueueBase',
'multiprocUDPBase',
'multiprocTCPBase',
'multiprocTCPBase-AdminRouting',
'multiprocTCPBase-AdminRoutingTXOnly',
])
def testsystems(request):
sysbase = request.param.partition('-')[0]
adminRouting = request.param.endswith('-AdminRouting')
txOnly = request.param.endswith('-AdminRoutingTXOnly')
victoria_port = get_free_admin_port()
leicester_port = get_free_admin_port()
picadilly_port = get_free_admin_port()
tottenham_port = get_free_admin_port()
convaddrs = [ 'localhost:%d' % victoria_port,
'localhost:%d' % leicester_port,
'localhost:%d' % picadilly_port,
# tottenham cannot be a leader
]
basecaps = { 'Convention Address.IPv4': convaddrs,
'Admin Routing': adminRouting,
}
victoria_caps = basecaps.copy()
victoria_caps.update({ 'Cyan': 19,
'Yellow': 11,
'Green': 11,
'Admin Port': victoria_port,
})
leicester_caps = basecaps.copy()
leicester_caps.update({ 'Blue': 4,
'Black': 8,
'Admin Port': leicester_port,
})
picadilly_caps = basecaps.copy()
picadilly_caps.update({ 'Blue': 6,
'Brown': 12,
'Admin Port': picadilly_port,
})
tottenham_caps = basecaps.copy()
tottenham_caps.update({ 'Brown': 7, 'Red': 10,
'Admin Port': tottenham_port,
})
victoria = ActorSystem(systemBase=sysbase,
transientUnique=True,
logDefs=simpleActorTestLogging(),
capabilities=victoria_caps)
victoria.base_name = request.param
victoria.port_num = victoria_port
leicester = ActorSystem(systemBase=sysbase,
transientUnique=True,
logDefs=simpleActorTestLogging(),
capabilities=leicester_caps)
leicester.base_name = request.param
leicester.port_num = leicester_port
picadilly = ActorSystem(systemBase=sysbase,
transientUnique=True,
logDefs=simpleActorTestLogging(),
capabilities=picadilly_caps)
picadilly.base_name = request.param
picadilly.port_num = picadilly_port
tottenham = ActorSystem(systemBase=sysbase,
transientUnique=True,
logDefs=simpleActorTestLogging(),
capabilities=tottenham_caps)
tottenham.base_name = request.param
tottenham.port_num = tottenham_port
request.addfinalizer(lambda victoria=victoria, leicester=leicester,
picadilly=picadilly, tottenham=tottenham:
tottenham.shutdown() or
leicester.shutdown() or
picadilly.shutdown() or
victoria.shutdown())
if txOnly:
assert 'Registered' == victoria.ask(victoria.createActor(PreRegActor),
'localhost:%d'%victoria.port_num,
timedelta(seconds=3))
assert 'Registered' == leicester.ask(leicester.createActor(PreRegActor),
'localhost:%d'%leicester.port_num,
timedelta(seconds=3))
assert 'Registered' == picadilly.ask(picadilly.createActor(PreRegActor),
'localhost:%d'%picadilly.port_num,
timedelta(seconds=3))
assert 'Registered' == tottenham.ask(tottenham.createActor(PreRegActor),
'localhost:%d'%tottenham.port_num,
timedelta(seconds=3))
sleep(1.25) # allow all systems to join the Convention
return convaddrs, victoria, leicester, picadilly, tottenham
class Sean(Actor):
@staticmethod
def actorSystemCapabilityCheck(capabilities, actorRequirements):
return (capabilities.get('Blue', 0) +
capabilities.get('Green', 0)) > 3;
def receiveMessage(self, message, sender):
if isinstance(message, str):
self.send(sender, '%s is not enough' % message)
class Roger(Actor):
@staticmethod
def actorSystemCapabilityCheck(capabilities, actorRequirements):
return capabilities.get('Cyan', 0) > 0
def receiveMessage(self, message, sender):
if isinstance(message, str):
self.send(sender, "Don't count on it, %s" % message)
class M(Actor):
@staticmethod
def actorSystemCapabilityCheck(capabilities, actorRequirements):
return capabilities.get('Red', 0) > 0
def receiveMessage(self, message, sender):
if isinstance(message, str):
if message == 'Sean':
self.send(sender, self.createActor(Sean))
if message == 'Roger':
self.send(sender, self.createActor(Roger))
class TestFuncHAConvention():
def test01_systems_can_start(self, testsystems):
convaddrs, victoria, leicester, picadilly, tottenham = testsystems
actor_system_unsupported(victoria,
'simpleSystemBase', 'multiprocQueueBase')
pass
def test02_actors_can_start(self, testsystems):
convaddrs, victoria, leicester, picadilly, tottenham = testsystems
actor_system_unsupported(victoria,
'simpleSystemBase', 'multiprocQueueBase')
sean = victoria.createActor(Sean)
roger = victoria.createActor(Roger)
m = picadilly.createActor(M)
sleep(1) # wait for things to settle
r = victoria.ask(sean, "diamonds", 0.25)
assert r == "diamonds is not enough"
r = victoria.ask(roger, "zorin", 0.25)
assert r == "Don't count on it, zorin"
bond1 = leicester.ask(m, "Sean", 0.25)
assert bond1
r = leicester.ask(bond1, "forever", 0.25)
assert r == "forever is not enough"
bond2 = leicester.ask(m, "Roger", 0.25)
assert bond2
r = leicester.ask(bond2, "jaws", 0.25)
assert r == "Don't count on it, jaws"
def test03_actor_create_failure_on_leader_exit(self, testsystems):
convaddrs, victoria, leicester, picadilly, tottenham = testsystems
actor_system_unsupported(victoria,
'simpleSystemBase', 'multiprocQueueBase')
sean = victoria.createActor(Sean)
roger = victoria.createActor(Roger)
m = picadilly.createActor(M)
sleep(1) # wait for things to settle
bond1 = leicester.ask(m, "Sean", 0.25)
assert bond1
r = leicester.ask(bond1, "forever", 0.25)
assert r == "forever is not enough"
bond2 = leicester.ask(m, "Roger", 0.25)
assert bond2
r = leicester.ask(bond2, "jaws", 0.25)
assert r == "Don't count on it, jaws"
victoria.shutdown()
sleep(2)
bond3 = leicester.ask(m, "Sean", 0.25)
assert bond3
r = leicester.ask(bond3, "forever", 0.25)
assert r == "forever is not enough"
bond4 = leicester.ask(m, "Roger", 0.25)
assert (bond4 is None)
def test04_actor_create_on_leader_re_enter(self, testsystems):
convaddrs, victoria, leicester, picadilly, tottenham = testsystems
actor_system_unsupported(victoria,
'simpleSystemBase', 'multiprocQueueBase')
sean = victoria.createActor(Sean)
roger = victoria.createActor(Roger)
m = picadilly.createActor(M)
sleep(1) # wait for things to settle
bond1 = leicester.ask(m, "Sean", 0.25)
assert bond1
r = leicester.ask(bond1, "forever", 0.25)
assert r == "forever is not enough"
bond2 = leicester.ask(m, "Roger", 0.25)
assert bond2
r = leicester.ask(bond2, "jaws", 0.25)
assert r == "Don't count on it, jaws"
victoria.shutdown()
sleep(2)
bond3 = leicester.ask(m, "Sean", 0.25)
assert bond3
r = leicester.ask(bond3, "forever", 0.25)
assert r == "forever is not enough"
bond4 = leicester.ask(m, "Roger", 0.25)
assert (bond4 is None)
# --- same as test03 up to this point ---
victoria2 = ActorSystem(systemBase=victoria.base_name.partition('-')[0],
transientUnique=True,
logDefs=simpleActorTestLogging(),
capabilities={ 'Cyan': 12,
'Admin Port': victoria.port_num,
'Convention Address.IPv4': convaddrs
})
victoria2.base_name = victoria.base_name
victoria2.port_num = victoria.port_num
sleep(2) # wait for victoria to become
try:
bond5 = leicester.ask(m, "Sean", 0.25)
assert bond5
r = leicester.ask(bond5, "money", 0.25)
assert r == "money is not enough"
bond6 = leicester.ask(m, "Roger", 0.25)
assert bond6
r = leicester.ask(bond6, "sharks", 0.25)
assert r == "Don't count on it, sharks"
finally:
victoria2.shutdown()
|
|
# Copyright 2012 Nicira, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from lxml import etree
import mock
import os_vif
from os_vif import exception as osv_exception
from os_vif import objects as osv_objects
from os_vif.objects import fields as osv_fields
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils.fixture import uuidsentinel as uuids
import six
from nova import exception
from nova.network import linux_net
from nova.network import model as network_model
from nova import objects
from nova.pci import utils as pci_utils
from nova import test
from nova.tests.unit import matchers
from nova.tests.unit.virt import fakelibosinfo
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import host
from nova.virt.libvirt import vif
CONF = cfg.CONF
class LibvirtVifTestCase(test.NoDBTestCase):
gateway_bridge_4 = network_model.IP(address='101.168.1.1', type='gateway')
dns_bridge_4 = network_model.IP(address='8.8.8.8', type=None)
ips_bridge_4 = [network_model.IP(address='101.168.1.9', type=None)]
subnet_bridge_4 = network_model.Subnet(
cidr='101.168.1.0/24',
dns=[dns_bridge_4],
gateway=gateway_bridge_4,
routes=None,
dhcp_server='191.168.1.1')
gateway_bridge_6 = network_model.IP(address='101:1db9::1', type='gateway')
subnet_bridge_6 = network_model.Subnet(cidr='101:1db9::/64',
dns=None,
gateway=gateway_bridge_6,
ips=None,
routes=None)
network_bridge = network_model.Network(id=uuids.network,
bridge='br0',
label=None,
subnets=[subnet_bridge_4, subnet_bridge_6],
bridge_interface='eth0',
vlan=99, mtu=9000)
vif_bridge = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_BRIDGE,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=None)
network_bridge_neutron = network_model.Network(id=uuids.network,
bridge=None,
label=None,
subnets=[subnet_bridge_4, subnet_bridge_6],
bridge_interface='eth0',
vlan=99)
vif_bridge_neutron = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_bridge_neutron,
type=None,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=uuids.ovs)
network_ovs = network_model.Network(id=uuids.network,
bridge='br0',
label=None,
subnets=[subnet_bridge_4, subnet_bridge_6],
bridge_interface=None,
vlan=99, mtu=1000)
network_ivs = network_model.Network(id=uuids.network,
bridge='br0',
label=None,
subnets=[subnet_bridge_4, subnet_bridge_6],
bridge_interface=None,
vlan=99)
vif_agilio_ovs = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_AGILIO_OVS,
details={'port_filter': False},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=uuids.ovs)
vif_agilio_ovs_direct = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_AGILIO_OVS,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=uuids.ovs,
devname='tap-xxx-yyy-zzz',
profile={'pci_slot': '0000:0a:00.1'})
vif_agilio_ovs_forwarder = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_AGILIO_OVS,
vnic_type=network_model.VNIC_TYPE_VIRTIO_FORWARDER,
profile={'pci_slot': '0000:0a:00.1'},
details={
network_model.VIF_DETAILS_VHOSTUSER_MODE: 'client',
network_model.VIF_DETAILS_VHOSTUSER_SOCKET: '/tmp/usv-xxx-yyy-zzz',
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True},
ovs_interfaceid=uuids.ovs, mtu=1500)
vif_ovs = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_OVS,
details={'port_filter': False},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=uuids.ovs)
vif_ovs_direct = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_ovs,
vnic_type=network_model.VNIC_TYPE_DIRECT,
profile={'pci_slot': '0000:0a:00.1'},
type=network_model.VIF_TYPE_OVS,
details={'port_filter': False},
ovs_interfaceid=uuids.ovs)
vif_ovs_filter_cap = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=network_model.VIF_TYPE_OVS,
details={'port_filter': True},
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=uuids.ovs)
vif_ovs_legacy = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_ovs,
type=None,
devname=None,
ovs_interfaceid=None)
vif_ivs = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_ivs,
type=network_model.VIF_TYPE_IVS,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=uuids.ovs)
vif_none = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=None,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=None)
network_8021 = network_model.Network(id=uuids.network,
bridge=None,
label=None,
subnets=[subnet_bridge_4,
subnet_bridge_6],
interface='eth0',
vlan=99)
vif_8021qbh = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_802_QBH,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=None,
details={
network_model.VIF_DETAILS_PROFILEID:
'MyPortProfile'},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_hw_veb = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_HW_VEB,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=None,
details={
network_model.VIF_DETAILS_VLAN: 100},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_hw_veb_trusted = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_HW_VEB,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=None,
details={
network_model.VIF_DETAILS_VLAN: 100},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1',
'trusted': 'True'})
vif_hostdev_physical = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_HOSTDEV,
vnic_type=network_model.VNIC_TYPE_DIRECT_PHYSICAL,
ovs_interfaceid=None,
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_hw_veb_macvtap = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_HW_VEB,
vnic_type=network_model.VNIC_TYPE_MACVTAP,
ovs_interfaceid=None,
details={network_model.VIF_DETAILS_VLAN: 100},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_8021qbg = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_802_QBG,
ovs_interfaceid=None,
qbg_params=network_model.VIF8021QbgParams(
managerid="xxx-yyy-zzz",
typeid="aaa-bbb-ccc",
typeidversion="1",
instanceid="ddd-eee-fff"))
network_midonet = network_model.Network(id=uuids.network,
label=None,
bridge=None,
subnets=[subnet_bridge_4],
interface='eth0')
network_vrouter = network_model.Network(id=uuids.network,
label=None,
bridge=None,
subnets=[subnet_bridge_4, subnet_bridge_6],
interface='eth0')
vif_vrouter = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_vrouter,
type=network_model.VIF_TYPE_VROUTER,
devname='tap-xxx-yyy-zzz')
vif_ib_hostdev = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_IB_HOSTDEV,
vnic_type=network_model.VNIC_TYPE_DIRECT,
ovs_interfaceid=None,
details={network_model.VIF_DETAILS_VLAN: 100},
profile={'pci_vendor_info': '1137:0043',
'pci_slot': '0000:0a:00.1',
'physical_network': 'phynet1'})
vif_midonet = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_midonet,
type=network_model.VIF_TYPE_MIDONET,
devname='tap-xxx-yyy-zzz')
vif_tap = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
type=network_model.VIF_TYPE_TAP,
devname='tap-xxx-yyy-zzz')
vif_iovisor = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_IOVISOR,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid=None)
vif_vhostuser = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_VHOSTUSER,
details={
network_model.VIF_DETAILS_VHOSTUSER_MODE: 'client',
network_model.VIF_DETAILS_VHOSTUSER_SOCKET: '/tmp/vif-xxx-yyy-zzz'
})
vif_vhostuser_ovs = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_VHOSTUSER,
details={
network_model.VIF_DETAILS_VHOSTUSER_MODE: 'client',
network_model.VIF_DETAILS_VHOSTUSER_SOCKET: '/tmp/usv-xxx-yyy-zzz',
network_model.VIF_DETAILS_VHOSTUSER_OVS_PLUG: True},
ovs_interfaceid=uuids.ovs, mtu=1500)
vif_vhostuser_no_path = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_bridge,
type=network_model.VIF_TYPE_VHOSTUSER,
details={network_model.VIF_DETAILS_VHOSTUSER_MODE: 'client'})
vif_macvtap_vlan = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_MACVTAP,
details={
network_model.VIF_DETAILS_VLAN: 1,
network_model.VIF_DETAILS_PHYS_INTERFACE: 'eth0',
network_model.VIF_DETAILS_MACVTAP_SOURCE: 'eth0.1',
network_model.VIF_DETAILS_MACVTAP_MODE: 'vepa'})
vif_macvtap_flat = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_MACVTAP,
details={
network_model.VIF_DETAILS_PHYS_INTERFACE: 'eth0',
network_model.VIF_DETAILS_MACVTAP_SOURCE: 'eth0',
network_model.VIF_DETAILS_MACVTAP_MODE: 'bridge'})
vif_macvtap_exception = network_model.VIF(id=uuids.vif,
address='ca:fe:de:ad:be:ef',
network=network_8021,
type=network_model.VIF_TYPE_MACVTAP)
instance = objects.Instance(id=1,
uuid='f0000000-0000-0000-0000-000000000001',
project_id=723)
bandwidth = {
'quota:vif_inbound_peak': '200',
'quota:vif_outbound_peak': '20',
'quota:vif_inbound_average': '100',
'quota:vif_outbound_average': '10',
'quota:vif_inbound_burst': '300',
'quota:vif_outbound_burst': '30'
}
def setup_os_vif_objects(self):
self.os_vif_network = osv_objects.network.Network(
id="b82c1929-051e-481d-8110-4669916c7915",
label="Demo Net",
subnets=osv_objects.subnet.SubnetList(
objects=[]),
mtu=9000)
self.os_vif_bridge = osv_objects.vif.VIFBridge(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="linux_bridge",
vif_name="nicdc065497-3c",
bridge_name="br100",
has_traffic_filtering=False,
network=self.os_vif_network)
self.os_vif_ovs_prof = osv_objects.vif.VIFPortProfileOpenVSwitch(
interface_id="07bd6cea-fb37-4594-b769-90fc51854ee9",
profile_id="fishfood")
self.os_vif_repr_prof = osv_objects.vif.VIFPortProfileOVSRepresentor(
interface_id="07bd6cea-fb37-4594-b769-90fc51854ee9",
profile_id="fishfood",
representor_name='nicdc065497-3c',
representor_address='0000:0a:00.1')
self.os_vif_agilio_ovs = osv_objects.vif.VIFOpenVSwitch(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="agilio_ovs",
vif_name="nicdc065497-3c",
bridge_name="br0",
port_profile=self.os_vif_ovs_prof,
network=self.os_vif_network)
self.os_vif_agilio_forwarder = osv_objects.vif.VIFVHostUser(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="agilio_ovs",
vif_name="nicdc065497-3c",
path='/var/run/openvswitch/vhudc065497-3c',
mode='client',
port_profile=self.os_vif_repr_prof,
network=self.os_vif_network)
self.os_vif_agilio_direct = osv_objects.vif.VIFHostDevice(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="agilio_ovs",
vif_name="nicdc065497-3c",
dev_type=osv_fields.VIFHostDeviceDevType.ETHERNET,
dev_address='0000:0a:00.1',
port_profile=self.os_vif_repr_prof,
network=self.os_vif_network)
self.os_vif_ovs = osv_objects.vif.VIFOpenVSwitch(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
unplugin="linux_bridge",
vif_name="nicdc065497-3c",
bridge_name="br0",
port_profile=self.os_vif_ovs_prof,
network=self.os_vif_network)
self.os_vif_ovs_hybrid = osv_objects.vif.VIFBridge(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
unplugin="linux_bridge",
vif_name="nicdc065497-3c",
bridge_name="br0",
port_profile=self.os_vif_ovs_prof,
has_traffic_filtering=False,
network=self.os_vif_network)
self.os_vif_vhostuser = osv_objects.vif.VIFVHostUser(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="openvswitch",
vif_name="vhudc065497-3c",
path='/var/run/openvswitch/vhudc065497-3c',
mode='client',
port_profile=self.os_vif_ovs_prof,
network=self.os_vif_network)
self.os_vif_hostdevice_ethernet = osv_objects.vif.VIFHostDevice(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="linux_bridge",
vif_name="nicdc065497-3c",
dev_type=osv_fields.VIFHostDeviceDevType.ETHERNET,
dev_address='0000:0a:00.1',
network=self.os_vif_network)
self.os_vif_hostdevice_generic = osv_objects.vif.VIFHostDevice(
id="dc065497-3c8d-4f44-8fb4-e1d33c16a536",
address="22:52:25:62:e2:aa",
plugin="linux_bridge",
vif_name="nicdc065497-3c",
dev_type=osv_fields.VIFHostDeviceDevType.GENERIC,
dev_address='0000:0a:00.1',
network=self.os_vif_network)
self.os_vif_inst_info = osv_objects.instance_info.InstanceInfo(
uuid="d5b1090c-9e00-4fa4-9504-4b1494857970",
name="instance-000004da",
project_id="2f37d7f6-e51a-4a1f-8b6e-b0917ffc8390")
def setUp(self):
super(LibvirtVifTestCase, self).setUp()
self.useFixture(fakelibvirt.FakeLibvirtFixture(stub_os_vif=False))
self.flags(firewall_driver=None)
# os_vif.initialize is typically done in nova-compute startup
os_vif.initialize()
self.setup_os_vif_objects()
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.stub_out('nova.utils.execute', fake_execute)
def _get_node(self, xml):
doc = etree.fromstring(xml)
ret = doc.findall('./devices/interface')
self.assertEqual(len(ret), 1)
return ret[0]
def _assertMacEquals(self, node, vif):
mac = node.find("mac").get("address")
self.assertEqual(mac, vif['address'])
def _assertTypeEquals(self, node, type, attr, source, br_want,
prefix=None):
self.assertEqual(node.get("type"), type)
br_name = node.find(attr).get(source)
if prefix is None:
self.assertEqual(br_name, br_want)
else:
self.assertTrue(br_name.startswith(prefix))
def _assertTypeAndMacEquals(self, node, type, attr, source, vif,
br_want=None, size=0, prefix=None):
ret = node.findall("filterref")
self.assertEqual(len(ret), size)
self._assertTypeEquals(node, type, attr, source, br_want,
prefix)
self._assertMacEquals(node, vif)
def _assertModel(self, xml, model_want=None, driver_want=None):
node = self._get_node(xml)
if model_want is None:
ret = node.findall("model")
self.assertEqual(len(ret), 0)
else:
model = node.find("model").get("type")
self.assertEqual(model, model_want)
if driver_want is None:
ret = node.findall("driver")
self.assertEqual(len(ret), 0)
else:
driver = node.find("driver").get("name")
self.assertEqual(driver, driver_want)
def _assertTypeAndPciEquals(self, node, type, vif):
self.assertEqual(node.get("type"), type)
self._assertPciEqual(node, vif, type="pci")
def _assertPciEqual(self, node, vif, type=None):
address = node.find("source").find("address")
if type:
addr_type = address.get("type")
self.assertEqual(type, addr_type)
pci_slot = "%(domain)s:%(bus)s:%(slot)s.%(func)s" % {
'domain': address.get("domain")[2:],
'bus': address.get("bus")[2:],
'slot': address.get("slot")[2:],
'func': address.get("function")[2:]}
pci_slot_want = vif['profile']['pci_slot']
self.assertEqual(pci_slot, pci_slot_want)
def _assertXmlEqual(self, expectedXmlstr, actualXmlstr):
self.assertThat(actualXmlstr, matchers.XMLMatches(expectedXmlstr))
def _get_conf(self):
conf = vconfig.LibvirtConfigGuest()
conf.virt_type = "qemu"
conf.name = "fake-name"
conf.uuid = "fake-uuid"
conf.memory = 100 * 1024
conf.vcpus = 4
return conf
def _get_instance_xml(self, driver, vif, image_meta=None, flavor=None,
has_min_libvirt_version=True):
if flavor is None:
flavor = objects.Flavor(name='m1.small',
memory_mb=128,
vcpus=1,
root_gb=0,
ephemeral_gb=0,
swap=0,
extra_specs=dict(self.bandwidth),
deleted_at=None,
deleted=0,
created_at=None, flavorid=1,
is_public=True, vcpu_weight=None,
id=2, disabled=False, rxtx_factor=1.0)
conf = self._get_conf()
hostimpl = host.Host("qemu:///system")
with mock.patch.object(hostimpl, 'has_min_version',
return_value=has_min_libvirt_version):
nic = driver.get_config(self.instance, vif, image_meta,
flavor, CONF.libvirt.virt_type,
hostimpl)
conf.add_device(nic)
return conf.to_xml()
def _test_virtio_multiqueue(self, vcpus, want_queues):
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
group='libvirt')
flavor = objects.Flavor(name='m1.small',
memory_mb=128,
vcpus=vcpus,
root_gb=0,
ephemeral_gb=0,
swap=0,
deleted_at=None,
deleted=0,
created_at=None, flavorid=1,
is_public=True, vcpu_weight=None,
id=2, disabled=False, rxtx_factor=1.0)
d = vif.LibvirtGenericVIFDriver()
image_meta = objects.ImageMeta.from_dict(
{'properties': {'hw_vif_model': 'virtio',
'hw_vif_multiqueue_enabled': 'true'}})
xml = self._get_instance_xml(d, self.vif_bridge,
image_meta, flavor)
node = self._get_node(xml)
driver = node.find("driver").get("name")
self.assertEqual(driver, 'vhost')
queues = node.find("driver").get("queues")
self.assertEqual(queues, want_queues)
def test_virtio_multiqueue(self):
self._test_virtio_multiqueue(4, '4')
@mock.patch('os.uname', return_value=('Linux', '', '2.6.32-21-generic'))
def test_virtio_multiqueue_in_kernel_2(self, mock_uname):
self._test_virtio_multiqueue(10, '1')
@mock.patch('os.uname', return_value=('Linux', '', '3.19.0-47-generic'))
def test_virtio_multiqueue_in_kernel_3(self, mock_uname):
self._test_virtio_multiqueue(10, '8')
@mock.patch('os.uname', return_value=('Linux', '', '4.2.0-35-generic'))
def test_virtio_multiqueue_in_kernel_4(self, mock_uname):
self._test_virtio_multiqueue(10, '10')
def test_vhostuser_os_vif_multiqueue(self):
d = vif.LibvirtGenericVIFDriver()
hostimpl = host.Host("qemu:///system")
image_meta = objects.ImageMeta.from_dict(
{'properties': {'hw_vif_model': 'virtio',
'hw_vif_multiqueue_enabled': 'true'}})
flavor = objects.Flavor(name='m1.small',
memory_mb=128,
vcpus=4,
root_gb=0,
ephemeral_gb=0,
swap=0,
deleted_at=None,
deleted=0,
created_at=None, flavorid=1,
is_public=True, vcpu_weight=None,
id=2, disabled=False, rxtx_factor=1.0)
conf = d.get_base_config(None, 'ca:fe:de:ad:be:ef', image_meta,
flavor, 'kvm', 'normal', hostimpl)
self.assertEqual(4, conf.vhost_queues)
self.assertEqual('vhost', conf.driver_name)
d._set_config_VIFVHostUser(self.instance, self.os_vif_vhostuser,
conf, hostimpl)
self.assertEqual(4, conf.vhost_queues)
self.assertIsNone(conf.driver_name)
def _test_virtio_config_queue_sizes(
self, vnic_type=network_model.VNIC_TYPE_NORMAL):
self.flags(rx_queue_size=512, group='libvirt')
self.flags(tx_queue_size=1024, group='libvirt')
hostimpl = host.Host("qemu:///system")
v = vif.LibvirtGenericVIFDriver()
conf = v.get_base_config(
None, 'ca:fe:de:ad:be:ef', {}, objects.Flavor(), 'kvm', vnic_type,
hostimpl)
return hostimpl, v, conf
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_virtio_vhost_queue_sizes(self, has_min_version):
_, _, conf = self._test_virtio_config_queue_sizes()
self.assertEqual(512, conf.vhost_rx_queue_size)
self.assertIsNone(conf.vhost_tx_queue_size)
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_virtio_vhost_queue_sizes_vnic_type_direct(self, has_min_version):
_, _, conf = self._test_virtio_config_queue_sizes(
vnic_type=network_model.VNIC_TYPE_DIRECT)
self.assertIsNone(conf.vhost_rx_queue_size)
self.assertIsNone(conf.vhost_tx_queue_size)
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_virtio_vhost_queue_sizes_vnic_type_direct_physical(
self, has_min_version):
_, _, conf = self._test_virtio_config_queue_sizes(
vnic_type=network_model.VNIC_TYPE_DIRECT_PHYSICAL)
self.assertIsNone(conf.vhost_rx_queue_size)
self.assertIsNone(conf.vhost_tx_queue_size)
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_virtio_vhost_queue_sizes_vnic_type_macvtap(self, has_min_version):
_, _, conf = self._test_virtio_config_queue_sizes(
vnic_type=network_model.VNIC_TYPE_MACVTAP)
self.assertEqual(512, conf.vhost_rx_queue_size)
self.assertIsNone(conf.vhost_tx_queue_size)
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_virtio_vhost_queue_sizes_vnic_type_virtio_forwarder(
self, has_min_version):
_, _, conf = self._test_virtio_config_queue_sizes(
vnic_type=network_model.VNIC_TYPE_VIRTIO_FORWARDER)
self.assertEqual(512, conf.vhost_rx_queue_size)
self.assertIsNone(conf.vhost_tx_queue_size)
@mock.patch.object(host.Host, "has_min_version", return_value=False)
def test_virtio_vhost_queue_sizes_nover(self, has_min_version):
_, _, conf = self._test_virtio_config_queue_sizes()
self.assertIsNone(conf.vhost_rx_queue_size)
self.assertIsNone(conf.vhost_tx_queue_size)
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_virtio_vhostuser_osvif_queue_sizes(self, has_min_version):
hostimpl, v, conf = self._test_virtio_config_queue_sizes()
v._set_config_VIFVHostUser(self.instance, self.os_vif_vhostuser,
conf, hostimpl)
self.assertEqual(512, conf.vhost_rx_queue_size)
self.assertEqual(1024, conf.vhost_tx_queue_size)
@mock.patch.object(host.Host, "has_min_version", return_value=False)
def test_virtio_vhostuser_osvif_queue_sizes_ver_err(self, has_min_version):
hostimpl, v, conf = self._test_virtio_config_queue_sizes()
v._set_config_VIFVHostUser(self.instance, self.os_vif_vhostuser,
conf, hostimpl)
self.assertIsNone(conf.vhost_rx_queue_size)
self.assertIsNone(conf.vhost_tx_queue_size)
def test_multiple_nics(self):
conf = self._get_conf()
# Tests multiple nic configuration and that target_dev is
# set for each
nics = [{'net_type': 'bridge',
'mac_addr': '00:00:00:00:00:0b',
'source_dev': 'b_source_dev',
'target_dev': 'b_target_dev'},
{'net_type': 'ethernet',
'mac_addr': '00:00:00:00:00:0e',
'source_dev': 'e_source_dev',
'target_dev': 'e_target_dev'},
{'net_type': 'direct',
'mac_addr': '00:00:00:00:00:0d',
'source_dev': 'd_source_dev',
'target_dev': 'd_target_dev'}]
for nic in nics:
nic_conf = vconfig.LibvirtConfigGuestInterface()
nic_conf.net_type = nic['net_type']
nic_conf.target_dev = nic['target_dev']
nic_conf.mac_addr = nic['mac_addr']
nic_conf.source_dev = nic['source_dev']
conf.add_device(nic_conf)
xml = conf.to_xml()
doc = etree.fromstring(xml)
for nic in nics:
path = "./devices/interface/[@type='%s']" % nic['net_type']
node = doc.find(path)
self.assertEqual(nic['net_type'], node.get("type"))
self.assertEqual(nic['mac_addr'],
node.find("mac").get("address"))
self.assertEqual(nic['target_dev'],
node.find("target").get("dev"))
def test_model_novirtio(self):
self.flags(use_virtio_for_bridges=False,
virt_type='kvm',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_bridge)
self._assertModel(xml)
def test_model_kvm(self):
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_bridge)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_model_parallels(self):
self.flags(use_virtio_for_bridges=True,
virt_type='parallels',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_bridge)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_model_kvm_qemu_parallels_custom(self):
for virt in ('kvm', 'qemu', 'parallels'):
self.flags(use_virtio_for_bridges=True,
virt_type=virt,
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
if virt == 'parallels':
supported = (network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000)
elif virt == 'qemu':
supported = (network_model.VIF_MODEL_LAN9118,
network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN)
else:
supported = (network_model.VIF_MODEL_NE2K_PCI,
network_model.VIF_MODEL_PCNET,
network_model.VIF_MODEL_RTL8139,
network_model.VIF_MODEL_E1000,
network_model.VIF_MODEL_SPAPR_VLAN)
for model in supported:
image_meta = objects.ImageMeta.from_dict(
{'properties': {'hw_vif_model': model}})
xml = self._get_instance_xml(d, self.vif_bridge,
image_meta)
self._assertModel(xml, model)
@mock.patch.object(vif.designer, 'set_vif_guest_frontend_config')
def test_model_with_osinfo(self, mock_set):
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
group='libvirt')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.osinfo.libosinfo',
fakelibosinfo))
d = vif.LibvirtGenericVIFDriver()
hostimpl = host.Host("qemu:///system")
image_meta = {'properties': {'os_name': 'fedora22'}}
image_meta = objects.ImageMeta.from_dict(image_meta)
d.get_base_config(None, 'ca:fe:de:ad:be:ef', image_meta,
None, 'kvm', 'normal', hostimpl)
mock_set.assert_called_once_with(mock.ANY, 'ca:fe:de:ad:be:ef',
'virtio', None, None, None)
@mock.patch.object(vif.designer, 'set_vif_guest_frontend_config')
def test_model_sriov_direct_multi_queue_not_set(self, mock_set):
self.flags(use_virtio_for_bridges=True,
virt_type='kvm',
group='libvirt')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.osinfo.libosinfo',
fakelibosinfo))
d = vif.LibvirtGenericVIFDriver()
hostimpl = host.Host("qemu:///system")
image_meta = {'properties': {'os_name': 'fedora22'}}
image_meta = objects.ImageMeta.from_dict(image_meta)
conf = d.get_base_config(None, 'ca:fe:de:ad:be:ef', image_meta,
None, 'kvm', network_model.VNIC_TYPE_DIRECT,
hostimpl)
mock_set.assert_called_once_with(mock.ANY, 'ca:fe:de:ad:be:ef',
None, None, None, None)
self.assertIsNone(conf.vhost_queues)
self.assertIsNone(conf.driver_name)
def _test_model_qemu(self, *vif_objs, **kw):
libvirt_version = kw.get('libvirt_version')
self.flags(use_virtio_for_bridges=True,
virt_type='qemu',
group='libvirt')
for vif_obj in vif_objs:
d = vif.LibvirtGenericVIFDriver()
if libvirt_version is not None:
d.libvirt_version = libvirt_version
xml = self._get_instance_xml(d, vif_obj)
doc = etree.fromstring(xml)
bandwidth = doc.find('./devices/interface/bandwidth')
self.assertIsNotNone(bandwidth)
inbound = bandwidth.find('inbound')
self.assertEqual(inbound.get("average"),
self.bandwidth['quota:vif_inbound_average'])
self.assertEqual(inbound.get("peak"),
self.bandwidth['quota:vif_inbound_peak'])
self.assertEqual(inbound.get("burst"),
self.bandwidth['quota:vif_inbound_burst'])
outbound = bandwidth.find('outbound')
self.assertEqual(outbound.get("average"),
self.bandwidth['quota:vif_outbound_average'])
self.assertEqual(outbound.get("peak"),
self.bandwidth['quota:vif_outbound_peak'])
self.assertEqual(outbound.get("burst"),
self.bandwidth['quota:vif_outbound_burst'])
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO, "qemu")
def test_model_qemu_no_firewall(self):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
self._test_model_qemu(
self.vif_bridge,
self.vif_8021qbg,
self.vif_iovisor,
self.vif_ovs,
)
def test_model_qemu_iptables(self):
self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
self._test_model_qemu(
self.vif_bridge,
self.vif_ovs,
self.vif_8021qbg,
self.vif_iovisor
)
def test_model_xen(self):
self.flags(use_virtio_for_bridges=True,
virt_type='xen',
group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_bridge)
self._assertModel(xml)
def test_generic_driver_none(self):
d = vif.LibvirtGenericVIFDriver()
self.assertRaises(exception.NovaException,
self._get_instance_xml,
d,
self.vif_none)
def _check_bridge_driver(self, d, vif, br_want):
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
self.vif_bridge, br_want, 1)
def test_generic_driver_bridge(self):
d = vif.LibvirtGenericVIFDriver()
self._check_bridge_driver(d,
self.vif_bridge,
self.vif_bridge['network']['bridge'])
@mock.patch.object(utils, 'execute')
@mock.patch.object(pci_utils, 'get_ifname_by_pci_address')
@mock.patch.object(pci_utils, 'get_vf_num_by_pci_address', return_value=1)
def _test_hw_veb_op(self, op, vlan, mock_get_vf_num, mock_get_ifname,
mock_execute):
mock_get_ifname.side_effect = ['eth1', 'eth13']
exit_code = [0, 2, 254]
port_state = 'up' if vlan > 0 else 'down'
calls = {
'get_ifname':
[mock.call(self.vif_hw_veb_macvtap['profile']['pci_slot'],
pf_interface=True),
mock.call(self.vif_hw_veb_macvtap['profile']['pci_slot'])],
'get_vf_num':
[mock.call(self.vif_hw_veb_macvtap['profile']['pci_slot'])],
'execute': [mock.call('ip', 'link', 'set', 'eth1',
'vf', 1, 'mac',
self.vif_hw_veb_macvtap['address'],
'vlan', vlan,
run_as_root=True,
check_exit_code=exit_code),
mock.call('ip', 'link', 'set',
'eth13', 'address',
self.vif_hw_veb_macvtap['address'],
port_state,
run_as_root=True,
check_exit_code=exit_code)]
}
op(self.instance, self.vif_hw_veb_macvtap)
mock_get_ifname.assert_has_calls(calls['get_ifname'])
mock_get_vf_num.assert_has_calls(calls['get_vf_num'])
mock_execute.assert_has_calls(calls['execute'])
def test_plug_hw_veb(self):
d = vif.LibvirtGenericVIFDriver()
self._test_hw_veb_op(
d.plug,
self.vif_hw_veb_macvtap['details'][network_model.VIF_DETAILS_VLAN])
def test_unplug_hw_veb(self):
d = vif.LibvirtGenericVIFDriver()
self._test_hw_veb_op(d.unplug, 0)
@mock.patch('nova.network.linux_net.set_vf_trusted')
def test_plug_hw_veb_trusted(self, mset_vf_trusted):
d = vif.LibvirtGenericVIFDriver()
d.plug(self.instance, self.vif_hw_veb_trusted)
mset_vf_trusted.assert_called_once_with('0000:0a:00.1', True)
@mock.patch('nova.network.linux_net.set_vf_trusted')
def test_unplug_hw_veb_trusted(self, mset_vf_trusted):
d = vif.LibvirtGenericVIFDriver()
d.unplug(self.instance, self.vif_hw_veb_trusted)
mset_vf_trusted.assert_called_once_with('0000:0a:00.1', False)
@mock.patch('nova.privsep.libvirt.unplug_plumgrid_vif',
side_effect=processutils.ProcessExecutionError)
def test_unplug_iovisor(self, mock_unplug):
d = vif.LibvirtGenericVIFDriver()
d.unplug(self.instance, self.vif_iovisor)
@mock.patch('nova.network.linux_utils.device_exists')
@mock.patch('nova.privsep.libvirt.plug_plumgrid_vif')
def test_plug_iovisor(self, mock_plug, device_exists):
device_exists.return_value = True
d = vif.LibvirtGenericVIFDriver()
d.plug(self.instance, self.vif_iovisor)
mock_plug.assert_has_calls(
[mock.call('tap-xxx-yyy-zzz',
self.vif_iovisor['id'],
self.vif_iovisor['address'],
self.vif_iovisor['network']['id'],
self.instance.project_id)])
@mock.patch('nova.privsep.libvirt.unplug_contrail_vif')
def test_unplug_vrouter_with_details(self, mock_unplug_contrail):
d = vif.LibvirtGenericVIFDriver()
d.unplug(self.instance, self.vif_vrouter)
mock_unplug_contrail.assert_called_once_with(self.vif_vrouter['id'])
@mock.patch('nova.privsep.libvirt.plug_contrail_vif')
def test_plug_vrouter_with_details(self, mock_plug_contrail):
d = vif.LibvirtGenericVIFDriver()
instance = mock.Mock()
instance.name = 'instance-name'
instance.uuid = '46a4308b-e75a-4f90-a34a-650c86ca18b2'
instance.project_id = 'b168ea26fa0c49c1a84e1566d9565fa5'
instance.display_name = 'instance1'
instance.image_meta = objects.ImageMeta.from_dict({'properties': {}})
with mock.patch.object(utils, 'execute') as execute:
d.plug(instance, self.vif_vrouter)
execute.assert_has_calls([
mock.call('ip', 'tuntap', 'add', 'tap-xxx-yyy-zzz', 'mode',
'tap', run_as_root=True, check_exit_code=[0, 2, 254]),
mock.call('ip', 'link', 'set', 'tap-xxx-yyy-zzz', 'up',
run_as_root=True, check_exit_code=[0, 2, 254])])
mock_plug_contrail.called_once_with(
instance.project_id, instance.uuid, instance.display_name,
self.vif_vrouter['id'], self.vif_vrouter['network']['id'],
'NovaVMPort', self.vif_vrouter['devname'],
self.vif_vrouter['address'], '0.0.0.0', None)
@mock.patch('nova.network.linux_utils.create_tap_dev')
@mock.patch('nova.privsep.libvirt.plug_contrail_vif')
def test_plug_vrouter_with_details_multiqueue(
self, mock_plug_contrail, mock_create_tap_dev):
d = vif.LibvirtGenericVIFDriver()
instance = mock.Mock()
instance.name = 'instance-name'
instance.uuid = '46a4308b-e75a-4f90-a34a-650c86ca18b2'
instance.project_id = 'b168ea26fa0c49c1a84e1566d9565fa5'
instance.display_name = 'instance1'
instance.image_meta = objects.ImageMeta.from_dict({
'properties': {'hw_vif_multiqueue_enabled': True}})
instance.flavor.vcpus = 2
d.plug(instance, self.vif_vrouter)
mock_create_tap_dev.assert_called_once_with('tap-xxx-yyy-zzz',
multiqueue=True)
mock_plug_contrail.called_once_with(
instance.project_id, instance.uuid, instance.display_name,
self.vif_vrouter['id'], self.vif_vrouter['network']['id'],
'NovaVMPort', self.vif_vrouter['devname'],
self.vif_vrouter['address'], '0.0.0.0', None)
def _check_ovs_virtualport_driver(self, d, vif, want_iface_id):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
vif, "br0")
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "openvswitch")
iface_id_found = False
for p_elem in vp.findall("parameters"):
iface_id = p_elem.get("interfaceid", None)
if iface_id:
self.assertEqual(iface_id, want_iface_id)
iface_id_found = True
self.assertTrue(iface_id_found)
def test_generic_ovs_virtualport_driver(self):
d = vif.LibvirtGenericVIFDriver()
want_iface_id = self.vif_ovs['ovs_interfaceid']
self._check_ovs_virtualport_driver(d,
self.vif_ovs,
want_iface_id)
def test_direct_plug_with_port_filter_cap_no_nova_firewall(self):
d = vif.LibvirtGenericVIFDriver()
br_want = self.vif_midonet['devname']
xml = self._get_instance_xml(d, self.vif_ovs_filter_cap)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "target", "dev",
self.vif_ovs_filter_cap, br_want)
def _check_neutron_hybrid_driver(self, d, vif, br_want):
self.flags(firewall_driver="nova.virt.firewall.IptablesFirewallDriver")
xml = self._get_instance_xml(d, vif)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "bridge", "source", "bridge",
vif, br_want, 1)
def test_ivs_hybrid_driver(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ivs['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
self._check_neutron_hybrid_driver(d,
self.vif_ivs,
br_want)
def test_generic_hybrid_driver(self):
d = vif.LibvirtGenericVIFDriver()
br_want = "qbr" + self.vif_ovs['id']
br_want = br_want[:network_model.NIC_NAME_LEN]
self._check_neutron_hybrid_driver(d,
self.vif_ovs,
br_want)
def test_ib_hostdev_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_ib_hostdev)
doc = etree.fromstring(xml)
node = doc.findall('./devices/hostdev')[0]
self.assertEqual(1, len(node))
self._assertPciEqual(node, self.vif_ib_hostdev)
def test_midonet_ethernet_vif_driver(self):
d = vif.LibvirtGenericVIFDriver()
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
br_want = self.vif_midonet['devname']
xml = self._get_instance_xml(d, self.vif_midonet)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_midonet, br_want)
def test_tap_ethernet_vif_driver(self):
d = vif.LibvirtGenericVIFDriver()
br_want = self.vif_tap['devname']
xml = self._get_instance_xml(d, self.vif_tap)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_tap, br_want)
@mock.patch('nova.network.linux_utils.device_exists')
def test_plug_tap(self, device_exists):
device_exists.return_value = True
d = vif.LibvirtGenericVIFDriver()
d.plug(self.instance, self.vif_tap)
def test_unplug_tap(self):
d = vif.LibvirtGenericVIFDriver()
d.unplug(self.instance, self.vif_tap)
def test_generic_8021qbh_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_8021qbh)
node = self._get_node(xml)
self._assertTypeAndPciEquals(node, "hostdev", self.vif_8021qbh)
self._assertMacEquals(node, self.vif_8021qbh)
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "802.1Qbh")
profile_id_found = False
for p_elem in vp.findall("parameters"):
details = self.vif_8021qbh["details"]
profile_id = p_elem.get("profileid", None)
if profile_id:
self.assertEqual(profile_id,
details[network_model.VIF_DETAILS_PROFILEID])
profile_id_found = True
self.assertTrue(profile_id_found)
def test_hw_veb_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_hw_veb)
node = self._get_node(xml)
self._assertTypeAndPciEquals(node, "hostdev", self.vif_hw_veb)
self._assertMacEquals(node, self.vif_hw_veb)
conf = vconfig.LibvirtConfigGuestInterface()
conf.parse_dom(node)
self.assertEqual(conf.vlan, self.vif_hw_veb["details"]["vlan"])
def test_hostdev_physical_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_hostdev_physical)
doc = etree.fromstring(xml)
node = doc.findall('./devices/hostdev')[0]
self.assertEqual(1, len(node))
self._assertPciEqual(node, self.vif_hostdev_physical)
@mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
return_value='eth1')
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_hw_veb_driver_macvtap(self, ver_mock, mock_get_ifname):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_hw_veb_macvtap)
node = self._get_node(xml)
self.assertEqual(node.get("type"), "direct")
self._assertTypeEquals(node, "direct", "source",
"dev", "eth1")
self._assertTypeEquals(node, "direct", "source",
"mode", "passthrough")
self._assertMacEquals(node, self.vif_hw_veb_macvtap)
vlan = node.find("vlan").find("tag").get("id")
vlan_want = self.vif_hw_veb["details"]["vlan"]
self.assertEqual(int(vlan), vlan_want)
@mock.patch.object(pci_utils, 'get_ifname_by_pci_address',
return_value='eth1')
@mock.patch.object(host.Host, "has_min_version", return_value=False)
def test_hw_veb_driver_macvtap_pre_vlan_support(self, ver_mock,
mock_get_ifname):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(
d, self.vif_hw_veb_macvtap,
has_min_libvirt_version=ver_mock.return_value)
node = self._get_node(xml)
self.assertEqual(node.get("type"), "direct")
self._assertTypeEquals(node, "direct", "source",
"dev", "eth1")
self._assertTypeEquals(node, "direct", "source",
"mode", "passthrough")
self._assertMacEquals(node, self.vif_hw_veb_macvtap)
vlan = node.find("vlan")
self.assertIsNone(vlan)
def test_driver_macvtap_vlan(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_macvtap_vlan)
node = self._get_node(xml)
self.assertEqual(node.get("type"), "direct")
self._assertTypeEquals(node, "direct", "source",
"dev", "eth0.1")
self._assertTypeEquals(node, "direct", "source",
"mode", "vepa")
self._assertMacEquals(node, self.vif_macvtap_vlan)
def test_driver_macvtap_flat(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_macvtap_flat)
node = self._get_node(xml)
self.assertEqual(node.get("type"), "direct")
self._assertTypeEquals(node, "direct", "source",
"dev", "eth0")
self._assertTypeEquals(node, "direct", "source",
"mode", "bridge")
self._assertMacEquals(node, self.vif_macvtap_flat)
def test_driver_macvtap_exception(self):
d = vif.LibvirtGenericVIFDriver()
e = self.assertRaises(exception.VifDetailsMissingMacvtapParameters,
self._get_instance_xml,
d,
self.vif_macvtap_exception)
self.assertIn('macvtap_source', six.text_type(e))
self.assertIn('macvtap_mode', six.text_type(e))
self.assertIn('physical_interface', six.text_type(e))
@mock.patch.object(linux_net.LinuxBridgeInterfaceDriver, 'ensure_vlan')
def test_macvtap_plug_vlan(self, ensure_vlan_mock):
d = vif.LibvirtGenericVIFDriver()
d.plug(self.instance, self.vif_macvtap_vlan)
ensure_vlan_mock.assert_called_once_with(1, 'eth0', interface='eth0.1')
@mock.patch.object(linux_net.LinuxBridgeInterfaceDriver, 'ensure_vlan')
def test_macvtap_plug_flat(self, ensure_vlan_mock):
d = vif.LibvirtGenericVIFDriver()
d.plug(self.instance, self.vif_macvtap_flat)
self.assertFalse(ensure_vlan_mock.called)
def test_generic_iovisor_driver(self):
d = vif.LibvirtGenericVIFDriver()
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
br_want = self.vif_iovisor['devname']
xml = self._get_instance_xml(d, self.vif_iovisor)
node = self._get_node(xml)
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_iovisor, br_want)
def test_generic_8021qbg_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_8021qbg)
node = self._get_node(xml)
self._assertTypeEquals(node, "direct", "source", "dev", "eth0")
self._assertMacEquals(node, self.vif_8021qbg)
vp = node.find("virtualport")
self.assertEqual(vp.get("type"), "802.1Qbg")
manager_id_found = False
type_id_found = False
typeversion_id_found = False
instance_id_found = False
for p_elem in vp.findall("parameters"):
wantparams = self.vif_8021qbg['qbg_params']
manager_id = p_elem.get("managerid", None)
type_id = p_elem.get("typeid", None)
typeversion_id = p_elem.get("typeidversion", None)
instance_id = p_elem.get("instanceid", None)
if manager_id:
self.assertEqual(manager_id,
wantparams['managerid'])
manager_id_found = True
if type_id:
self.assertEqual(type_id,
wantparams['typeid'])
type_id_found = True
if typeversion_id:
self.assertEqual(typeversion_id,
wantparams['typeidversion'])
typeversion_id_found = True
if instance_id:
self.assertEqual(instance_id,
wantparams['instanceid'])
instance_id_found = True
self.assertTrue(manager_id_found)
self.assertTrue(type_id_found)
self.assertTrue(typeversion_id_found)
self.assertTrue(instance_id_found)
def test_vhostuser_driver(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_vhostuser)
node = self._get_node(xml)
self.assertEqual(node.get("type"),
network_model.VIF_TYPE_VHOSTUSER)
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "mode", "client")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "path", "/tmp/vif-xxx-yyy-zzz")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "type", "unix")
self._assertMacEquals(node, self.vif_vhostuser)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_vhostuser_driver_queue_sizes(self):
self.flags(rx_queue_size=512, group='libvirt')
self.flags(tx_queue_size=1024, group='libvirt')
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_vhostuser)
self._assertXmlEqual("""
<domain type="qemu">
<uuid>fake-uuid</uuid>
<name>fake-name</name>
<memory>102400</memory>
<vcpu>4</vcpu>
<os>
<type>None</type>
</os>
<devices>
<interface type="vhostuser">
<mac address="ca:fe:de:ad:be:ef"/>
<model type="virtio"/>
<driver rx_queue_size="512" tx_queue_size="1024"/>
<source mode="client" path="/tmp/vif-xxx-yyy-zzz" type="unix"/>
</interface>
</devices>
</domain>""", xml)
def test_vhostuser_driver_no_path(self):
d = vif.LibvirtGenericVIFDriver()
self.assertRaises(exception.VifDetailsMissingVhostuserSockPath,
self._get_instance_xml,
d,
self.vif_vhostuser_no_path)
def test_vhostuser_driver_ovs(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d,
self.vif_vhostuser_ovs)
node = self._get_node(xml)
self.assertEqual(node.get("type"),
network_model.VIF_TYPE_VHOSTUSER)
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "mode", "client")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "path", "/tmp/usv-xxx-yyy-zzz")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "type", "unix")
self._assertMacEquals(node, self.vif_vhostuser_ovs)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_ovs_direct(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_ovs_direct)
node = self._get_node(xml)
self._assertTypeAndPciEquals(node,
"hostdev",
self.vif_ovs_direct)
self._assertMacEquals(node, self.vif_ovs_direct)
def test_agilio_ovs_direct(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_agilio_ovs_direct)
node = self._get_node(xml)
self._assertTypeAndPciEquals(node,
"hostdev",
self.vif_agilio_ovs_direct)
self._assertMacEquals(node, self.vif_agilio_ovs_direct)
def test_agilio_ovs_forwarder(self):
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d,
self.vif_agilio_ovs_forwarder)
node = self._get_node(xml)
self.assertEqual(node.get("type"),
network_model.VIF_TYPE_VHOSTUSER)
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "mode", "client")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "path", "/tmp/usv-xxx-yyy-zzz")
self._assertTypeEquals(node, network_model.VIF_TYPE_VHOSTUSER,
"source", "type", "unix")
self._assertMacEquals(node, self.vif_agilio_ovs_forwarder)
self._assertModel(xml, network_model.VIF_MODEL_VIRTIO)
def test_ivs_ethernet_driver(self):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
d = vif.LibvirtGenericVIFDriver()
xml = self._get_instance_xml(d, self.vif_ivs)
node = self._get_node(xml)
dev_want = self.vif_ivs['devname']
self._assertTypeAndMacEquals(node, "ethernet", "target", "dev",
self.vif_ivs, dev_want)
script = node.find("script")
self.assertIsNone(script)
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
@mock.patch.object(os_vif, "plug")
def _test_osvif_plug(self, fail, mock_plug,
mock_convert_vif, mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_bridge
mock_convert_inst.return_value = self.os_vif_inst_info
d = vif.LibvirtGenericVIFDriver()
if fail:
mock_plug.side_effect = osv_exception.ExceptionBase("Wibble")
self.assertRaises(exception.NovaException,
d.plug,
self.instance, self.vif_bridge)
else:
d.plug(self.instance, self.vif_bridge)
mock_plug.assert_called_once_with(self.os_vif_bridge,
self.os_vif_inst_info)
def test_osvif_plug_normal(self):
self._test_osvif_plug(False)
def test_osvif_plug_fail(self):
self._test_osvif_plug(True)
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
@mock.patch.object(os_vif, "unplug")
def _test_osvif_unplug(self, fail, mock_unplug,
mock_convert_vif, mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_bridge
mock_convert_inst.return_value = self.os_vif_inst_info
d = vif.LibvirtGenericVIFDriver()
if fail:
mock_unplug.side_effect = osv_exception.ExceptionBase("Wibble")
self.assertRaises(exception.NovaException,
d.unplug,
self.instance, self.vif_bridge)
else:
d.unplug(self.instance, self.vif_bridge)
mock_unplug.assert_called_once_with(self.os_vif_bridge,
self.os_vif_inst_info)
def test_osvif_unplug_normal(self):
self._test_osvif_unplug(False)
def test_osvif_unplug_fail(self):
self._test_osvif_unplug(True)
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_bridge(self, mock_convert_vif,
mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_bridge
mock_convert_inst.return_value = self.os_vif_inst_info
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(d, "_has_min_version_for_mtu",
return_value=True):
cfg = d.get_config(self.instance, self.vif_bridge,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="bridge">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source bridge="br100"/>
<mtu size="9000"/>
<target dev="nicdc065497-3c"/>
<filterref
filter="nova-instance-instance-00000001-22522562e2aa"/>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_bridge_no_mtu(self, mock_convert_vif,
mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_bridge
mock_convert_inst.return_value = self.os_vif_inst_info
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(d, "_has_min_version_for_mtu",
return_value=False):
cfg = d.get_config(self.instance, self.vif_bridge,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="bridge">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source bridge="br100"/>
<target dev="nicdc065497-3c"/>
<filterref
filter="nova-instance-instance-00000001-22522562e2aa"/>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_bridge_nofw(self, mock_convert_vif,
mock_convert_inst):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
mock_convert_vif.return_value = self.os_vif_bridge
mock_convert_inst.return_value = self.os_vif_inst_info
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(d, "_has_min_version_for_mtu",
return_value=True):
cfg = d.get_config(self.instance, self.vif_bridge,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="bridge">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source bridge="br100"/>
<mtu size="9000"/>
<target dev="nicdc065497-3c"/>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_bridge_nofw_no_mtu(self, mock_convert_vif,
mock_convert_inst):
self.flags(firewall_driver="nova.virt.firewall.NoopFirewallDriver")
mock_convert_vif.return_value = self.os_vif_bridge
mock_convert_inst.return_value = self.os_vif_inst_info
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(d, "_has_min_version_for_mtu",
return_value=False):
cfg = d.get_config(self.instance, self.vif_bridge,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="bridge">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source bridge="br100"/>
<target dev="nicdc065497-3c"/>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_agilio_ovs_fallthrough(self, mock_convert_vif,
mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_agilio_ovs
mock_convert_inst.return_value = self.os_vif_inst_info
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(d, "_has_min_version_for_mtu",
return_value=True):
cfg = d.get_config(self.instance, self.vif_agilio_ovs,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="bridge">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source bridge="br0"/>
<mtu size="9000"/>
<target dev="nicdc065497-3c"/>
<virtualport type="openvswitch">
<parameters
interfaceid="07bd6cea-fb37-4594-b769-90fc51854ee9"/>
</virtualport>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_agilio_ovs_fallthrough_no_mtu(self,
mock_convert_vif,
mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_agilio_ovs
mock_convert_inst.return_value = self.os_vif_inst_info
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(d, "_has_min_version_for_mtu",
return_value=False):
cfg = d.get_config(self.instance, self.vif_agilio_ovs,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="bridge">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source bridge="br0"/>
<target dev="nicdc065497-3c"/>
<virtualport type="openvswitch">
<parameters
interfaceid="07bd6cea-fb37-4594-b769-90fc51854ee9"/>
</virtualport>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_agilio_ovs_forwarder(self, mock_convert_vif,
mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_agilio_forwarder
mock_convert_inst.return_value = self.os_vif_inst_info
d = vif.LibvirtGenericVIFDriver()
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
cfg = d.get_config(self.instance, self.vif_agilio_ovs_forwarder,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="vhostuser">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source mode="client"
path="/var/run/openvswitch/vhudc065497-3c" type="unix"/>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_agilio_ovs_direct(self, mock_convert_vif,
mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_agilio_direct
mock_convert_inst.return_value = self.os_vif_inst_info
d = vif.LibvirtGenericVIFDriver()
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
cfg = d.get_config(self.instance, self.vif_agilio_ovs_direct,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="hostdev" managed="yes">
<mac address="22:52:25:62:e2:aa"/>
<source>
<address type="pci" domain="0x0000"
bus="0x0a" slot="0x00" function="0x1"/>
</source>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_ovs(self, mock_convert_vif,
mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_ovs
mock_convert_inst.return_value = self.os_vif_inst_info
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(d, "_has_min_version_for_mtu",
return_value=True):
cfg = d.get_config(self.instance, self.vif_ovs,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="bridge">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source bridge="br0"/>
<mtu size="9000"/>
<target dev="nicdc065497-3c"/>
<virtualport type="openvswitch">
<parameters
interfaceid="07bd6cea-fb37-4594-b769-90fc51854ee9"/>
</virtualport>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_ovs_no_mtu(self, mock_convert_vif,
mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_ovs
mock_convert_inst.return_value = self.os_vif_inst_info
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(d, "_has_min_version_for_mtu",
return_value=False):
cfg = d.get_config(self.instance, self.vif_ovs,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="bridge">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source bridge="br0"/>
<target dev="nicdc065497-3c"/>
<virtualport type="openvswitch">
<parameters
interfaceid="07bd6cea-fb37-4594-b769-90fc51854ee9"/>
</virtualport>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_ovs_hybrid(self, mock_convert_vif,
mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_ovs_hybrid
mock_convert_inst.return_value = self.os_vif_inst_info
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(d, "_has_min_version_for_mtu",
return_value=True):
cfg = d.get_config(self.instance, self.vif_ovs,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="bridge">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source bridge="br0"/>
<mtu size="9000"/>
<target dev="nicdc065497-3c"/>
<filterref
filter="nova-instance-instance-00000001-22522562e2aa"/>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_ovs_hybrid_no_mtu(self, mock_convert_vif,
mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_ovs_hybrid
mock_convert_inst.return_value = self.os_vif_inst_info
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
with mock.patch.object(d, "_has_min_version_for_mtu",
return_value=False):
cfg = d.get_config(self.instance, self.vif_ovs,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="bridge">
<mac address="22:52:25:62:e2:aa"/>
<model type="virtio"/>
<source bridge="br0"/>
<target dev="nicdc065497-3c"/>
<filterref
filter="nova-instance-instance-00000001-22522562e2aa"/>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_hostdevice_ethernet(self, mock_convert_vif,
mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_hostdevice_ethernet
mock_convert_inst.return_value = self.os_vif_inst_info
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
cfg = d.get_config(self.instance, self.vif_bridge,
image_meta, flavor,
CONF.libvirt.virt_type,
hostimpl)
self._assertXmlEqual("""
<interface type="hostdev" managed="yes">
<mac address="22:52:25:62:e2:aa"/>
<source>
<address type="pci" domain="0x0000"
bus="0x0a" slot="0x00" function="0x1"/>
</source>
</interface>""", cfg.to_xml())
@mock.patch("nova.network.os_vif_util.nova_to_osvif_instance")
@mock.patch("nova.network.os_vif_util.nova_to_osvif_vif")
def test_config_os_vif_hostdevice_generic(self, mock_convert_vif,
mock_convert_inst):
mock_convert_vif.return_value = self.os_vif_hostdevice_generic
mock_convert_inst.return_value = self.os_vif_inst_info
hostimpl = host.Host("qemu:///system")
flavor = objects.Flavor(name='m1.small')
image_meta = objects.ImageMeta.from_dict({})
d = vif.LibvirtGenericVIFDriver()
self.assertRaises(exception.InternalError,
d.get_config, self.instance, self.vif_bridge,
image_meta, flavor, CONF.libvirt.virt_type,
hostimpl)
|
|
"""Integration testing."""
import unittest
import time
from test import regnet
class TestChannel(unittest.TestCase):
"""Run basic tests on payment channels."""
@classmethod
def setUpClass(cls):
cls.cache = regnet.make_cache()
@classmethod
def tearDownClass(cls):
cls.cache.cleanup()
def propagate(self):
"""Ensure all nodes up to date."""
self.net.generate()
def setUp(self):
# Set up 3 nodes: Alice, Bob, and Carol
self.net = regnet.create(datadir=None, cache=self.cache)
self.alice, self.bob, self.carol = self.net[0], self.net[1], self.net[2]
# self.alice.bit is an interface to bitcoind,
# self.alice.lit talks to the lightning node
# self.alice.lurl is Alice's identifier
def tearDown(self):
self.net.stop(hard=True, cleanup=True)
def test_setup(self):
"""Test that the setup worked."""
# Alice and Bob each start with 1.00 BTC
self.assertEqual(self.alice.bit.getbalance(), 100000000)
self.assertEqual(self.bob.bit.getbalance(), 100000000)
def test_basic(self):
"""Test basic operation of a payment channel."""
# Open a channel between Alice and Bob
self.alice.lit.create(self.bob.lurl, 50000000, 25000000)
self.propagate()
# There are some fees associated with opening a channel
afee = 50000000 - self.alice.bit.getbalance()
bfee = 75000000 - self.bob.bit.getbalance()
self.assertGreaterEqual(afee, 0)
self.assertGreaterEqual(bfee, 0)
# (Balance) Alice: 0.50 BTC, Bob: 0.25 BTC
self.assertEqual(self.alice.lit.getbalance(self.bob.lurl), 50000000)
self.assertEqual(self.bob.lit.getbalance(self.alice.lurl), 25000000)
# Bob sends Alice 0.05 BTC
self.bob.lit.send(self.alice.lurl, 5000000)
# (Balance) Alice: 0.55 BTC, Bob: 0.20 BTC
self.assertEqual(self.alice.lit.getbalance(self.bob.lurl), 55000000)
self.assertEqual(self.bob.lit.getbalance(self.alice.lurl), 20000000)
# Now Alice sends Bob 0.10 BTC
self.alice.lit.send(self.bob.lurl, 10000000)
# (Balance) Alice: 0.45 BTC, Bob: 0.30 BTC
self.assertEqual(self.alice.lit.getbalance(self.bob.lurl), 45000000)
self.assertEqual(self.bob.lit.getbalance(self.alice.lurl), 30000000)
# Bob closes the channel
self.bob.lit.close(self.alice.lurl)
self.propagate()
# The Lightning balance is returned to the bitcoin wallet
# If any coin was held for fees which were never paid,
# they are refunded, so the balance may be more than expected.
self.assertGreaterEqual(self.alice.bit.getbalance(), 95000000 - afee)
self.assertGreaterEqual(self.bob.bit.getbalance(), 105000000 - bfee)
def test_stress(self):
"""Test edge cases in payment channels."""
# Open *two* payment channels Bob - Alice - Carol
self.alice.lit.create(self.bob.lurl, 25000000, 50000000)
self.propagate()
self.carol.lit.create(self.alice.lurl, 50000000, 25000000)
self.propagate()
# Account for fees
afee = 50000000 - self.alice.bit.getbalance()
bfee = 50000000 - self.bob.bit.getbalance()
self.assertGreaterEqual(afee, 0)
self.assertGreaterEqual(bfee, 0)
# Balance (A-C) Alice: 0.25 BTC, Carol: 0.50 BTC
# Balance (B-A) Bob: 0.50 BTC, Alice: 0.25 BTC
self.assertEqual(self.alice.lit.getbalance(self.bob.lurl), 25000000)
self.assertEqual(self.alice.lit.getbalance(self.carol.lurl), 25000000)
self.assertEqual(self.bob.lit.getbalance(self.alice.lurl), 50000000)
self.assertEqual(self.carol.lit.getbalance(self.alice.lurl), 50000000)
# Carol sends 0.25 BTC to Alice
self.carol.lit.send(self.alice.lurl, 25000000)
# Balance (A-C) Alice: 0.50 BTC, Carol: 0.25 BTC
self.assertEqual(self.alice.lit.getbalance(self.carol.lurl), 50000000)
self.assertEqual(self.carol.lit.getbalance(self.alice.lurl), 25000000)
# Alice sends 0.15 BTC to Carol
self.alice.lit.send(self.carol.lurl, 15000000)
# Balance (A-C) Alice: 0.35 BTC, Carol: 0.40 BTC
self.assertEqual(self.alice.lit.getbalance(self.carol.lurl), 35000000)
self.assertEqual(self.carol.lit.getbalance(self.alice.lurl), 40000000)
# Bob sends Alice 0.50 BTC (his whole balance)
self.bob.lit.send(self.alice.lurl, 50000000)
# Balance (B-A) Bob: 0.00 BTC, Alice: 0.75 BTC
self.assertEqual(self.alice.lit.getbalance(self.bob.lurl), 75000000)
self.assertEqual(self.bob.lit.getbalance(self.alice.lurl), 0)
# Alice sends Bob 0.75 BTC (her whole balance)
self.alice.lit.send(self.bob.lurl, 75000000)
# Balance (B-A) Bob: 0.75 BTC, Alice: 0.00 BTC
self.assertEqual(self.alice.lit.getbalance(self.bob.lurl), 0)
self.assertEqual(self.bob.lit.getbalance(self.alice.lurl), 75000000)
# Alice closes the channel with Bob, on an empty account (Alice opened)
self.alice.lit.close(self.bob.lurl)
self.propagate()
self.assertGreaterEqual(self.alice.bit.getbalance(), 50000000 - afee)
self.assertGreaterEqual(self.bob.bit.getbalance(), 125000000 - bfee)
# Alice closes the channel with Carol (Carol opened)
self.alice.lit.close(self.carol.lurl)
self.propagate()
self.assertGreaterEqual(self.alice.bit.getbalance(), 85000000 - afee)
def test_unilateral_close(self):
"""Test unilateral close."""
# Set up channel between Alice and Bob
self.alice.lit.create(self.bob.lurl, 50000000, 25000000)
self.propagate()
afee = 50000000 - self.alice.bit.getbalance()
bfee = 75000000 - self.bob.bit.getbalance()
self.assertGreaterEqual(afee, 0)
self.assertGreaterEqual(bfee, 0)
# Do some transactions
self.assertEqual(self.alice.lit.getbalance(self.bob.lurl), 50000000)
self.assertEqual(self.bob.lit.getbalance(self.alice.lurl), 25000000)
self.bob.lit.send(self.alice.lurl, 5000000)
self.assertEqual(self.alice.lit.getbalance(self.bob.lurl), 55000000)
self.assertEqual(self.bob.lit.getbalance(self.alice.lurl), 20000000)
# Pause Bob
with self.bob.paused():
# Publish Alice's commitment transactions
commitment = self.alice.lit.getcommitmenttransactions(self.bob.lurl)
for transaction in commitment:
self.alice.bit.sendrawtransaction(transaction)
self.propagate()
time.sleep(1)
self.propagate()
self.propagate()
# Alice and Bob get their money out
self.assertGreaterEqual(self.bob.bit.getbalance(), 95000000 - bfee)
self.assertGreaterEqual(self.alice.bit.getbalance(), 105000000 - afee)
@unittest.expectedFailure
def test_revoked(self):
"""Test a revoked commitment transaction being published."""
# Set up channel between Alice and Bob
self.alice.lit.create(self.bob.lurl, 50000000, 25000000)
self.propagate()
afee = 50000000 - self.alice.bit.getbalance()
bfee = 75000000 - self.bob.bit.getbalance()
self.assertGreaterEqual(afee, 0)
self.assertGreaterEqual(bfee, 0)
# Make a transaction
self.assertEqual(self.alice.lit.getbalance(self.bob.lurl), 50000000)
self.assertEqual(self.bob.lit.getbalance(self.alice.lurl), 25000000)
self.bob.lit.send(self.alice.lurl, 5000000)
self.assertEqual(self.alice.lit.getbalance(self.bob.lurl), 55000000)
self.assertEqual(self.bob.lit.getbalance(self.alice.lurl), 20000000)
# Save Alice's old commitment transactions
commitment = self.alice.lit.getcommitmenttransactions(self.bob.lurl)
# Do annother transaction, Alice sends Bob money
self.alice.lit.send(self.bob.lurl, 10000000)
self.assertEqual(self.alice.lit.getbalance(self.bob.lurl), 45000000)
self.assertEqual(self.bob.lit.getbalance(self.alice.lurl), 30000000)
# Alice publishes her old, revoked commitment transactions
for transaction in commitment:
self.alice.bit.sendrawtransaction(transaction)
self.propagate()
time.sleep(1)
self.propagate()
# Bob ends up with all the money
self.assertGreaterEqual(self.bob.bit.getbalance(), 150000000 - bfee)
class TestLightning(unittest.TestCase):
"""Run basic tests on payment channels."""
@classmethod
def setUpClass(cls):
cls.cache = regnet.make_cache()
@classmethod
def tearDownClass(cls):
cls.cache.cleanup()
def propagate(self):
"""Ensure all nodes up to date."""
self.net.generate()
def setUp(self):
# As in TestChannel, set up 3 nodes
self.net = regnet.create(datadir=None, cache=self.cache)
self.alice, self.bob, self.carol = self.net[0], self.net[1], self.net[2]
# Set up channels between so the network is Alice - Carol - Bob
self.alice.lit.create(self.carol.lurl, 50000000, 50000000)
self.propagate()
self.bob.lit.create(self.carol.lurl, 50000000, 50000000)
self.propagate()
def tearDown(self):
self.net.stop(hard=True, cleanup=True)
def test_setup(self):
"""Test that the setup worked."""
# (Balance) Alice-Carol: Alice: 0.50 BTC, Carol 0.50 BTC
# Carol-Bob : Carol: 0.50 BTC, Bob 0.50 BTC
# (Total) Alice: 0.50 BTC, Carol: 1.00 BTC, Bob: 0.50 BTC
self.assertEqual(self.alice.lit.getbalance(self.carol.lurl), 50000000)
self.assertEqual(self.bob.lit.getbalance(self.carol.lurl), 50000000)
self.assertEqual(self.carol.lit.getbalance(self.alice.lurl), 50000000)
self.assertEqual(self.carol.lit.getbalance(self.bob.lurl), 50000000)
@unittest.expectedFailure # Routing needs to be re-implemented with the state machine
def test_payment(self):
"""Test multi-hop payment."""
# Note Alice and Bob do not have a payment channel open directly.
# They are connected through Carol
self.alice.lit.send(self.bob.lurl, 5000000)
# There is a fee associated with multi-hop payments
fee = 45000000 - self.alice.lit.getbalance(self.carol.lurl)
self.assertGreaterEqual(fee, 0)
# (Balance) Alice-Carol: Alice: 0.45 - fee BTC, Carol 0.55 + fee BTC
# Carol-Bob : Carol: 0.45 BTC, Bob 0.55 BTC
# (Total) Alice: 0.45 - fee BTC, Carol: 1.00 + fee BTC, Bob: 0.55 BTC
self.assertEqual(self.carol.lit.getbalance(self.alice.lurl), 55000000 + fee)
self.assertEqual(self.bob.lit.getbalance(self.carol.lurl), 55000000)
self.assertEqual(self.carol.lit.getbalance(self.bob.lurl), 45000000)
# Send money the other direction
self.bob.lit.send(self.alice.lurl, 10000000)
# Annother fee will be deducted
fee2 = 45000000 - self.bob.lit.getbalance(self.carol.lurl)
self.assertGreaterEqual(fee2, 0)
# (Balance) Alice-Carol: Alice: 0.55 - fee BTC, Carol 0.45 + fee BTC
# Carol-Bob : Carol: 0.55 + fee2 BTC, Bob 0.45 - fee2 BTC
# (Total) Alice: 0.55 - fee BTC, Carol: 1.00 + fee + fee2 BTC, Bob: 0.45 - fee2 BTC
self.assertEqual(self.carol.lit.getbalance(self.alice.lurl), 45000000 + fee)
self.assertEqual(self.alice.lit.getbalance(self.carol.lurl), 55000000 - fee)
self.assertEqual(self.carol.lit.getbalance(self.bob.lurl), 55000000 + fee2)
@unittest.expectedFailure
def test_route_close(self):
"""Test routing around closed channels."""
# Create a new channel between Alice and Bob
# so all 3 are connected to each other.
self.alice.lit.create(self.bob.lurl, 25000000, 25000000)
# Close the connection between Alice and Carol
self.alice.lit.close(self.carol.lurl)
self.propagate()
# Alice sends 0.10 BTC to Carol
self.alice.lit.send(self.carol.lurl, 10000000)
# Carol should have recieved money from Bob
self.assertEqual(self.carol.lit.getbalance(self.bob.lurl), 60000000)
if __name__ == '__main__':
unittest.main()
|
|
"""
Assign geographically density value to a points.
"""
from scipy.spatial import KDTree
from scipy.spatial.distance import cdist
from scipy.stats import norm
from scipy.optimize import minimize
import numpy as np
def general_density_assignation(locs, parameters, values=None, locs2=None):
"Density assignation function."
# Creation of the kdtree for retrieving neighs
if locs2 is None:
leafsize = int(locs.shape[0]/float(10.))
kdtree = KDTree(locs, leafsize=leafsize)
else:
leafsize = int(locs2.shape[0]/float(10.))
kdtree = KDTree(locs2, leafsize=leafsize)
parameters = preparation_parameters(parameters)
M = compute_measure(locs=locs, kdtree=kdtree, values=values, **parameters)
## Recurrent measure (TODO)[better before with the population?]
return M
# method, params (weitghted count, ...)
# method, params (linear, trapezoid,...)
###############################################################################
############################### Compute measure ###############################
###############################################################################
def compute_measure(locs, kdtree, max_r, values, method, params):
"Retrieve the neighs indices and the neighs descriptors and weights."
## Computation of the measure based in the distances as weights.
M = np.zeros(locs.shape[0])
for i in range(locs):
neighs, dist = get_self_neighs_i(locs, kdtree, max_r, i)
M[i] = compute_measure_i(neighs, dist, values[neighs], method, params)
return M
def get_self_neighs_i(locs, kdtree, max_r, i):
"Retrieving neighs and distance."
loc = locs[i, :]
neighs = kdtree.query_ball_point(loc, max_r)
neighs.remove(i)
dist = cdist(locs[i, :], locs[neighs, :])
return neighs, dist
def compute_measure_i(neighs, dist, values, method, params):
"Swither function between different possible options to compute density."
if method == 'weighted_count':
measure = compute_measure_wcount(neighs, dist, params)
elif method == 'weighted_avg':
measure = compute_measure_wavg(neighs, dist, params)
return measure
def compute_measure_wcount(neighs, dist, params):
"""Measure to compute density only based on the weighted count of selected
elements around the point considered.
"""
weights = from_distance_to_weights(dist, **params)
measure = np.sum(weights)
return measure
def compute_measure_wavg(neighs, dist, values, params):
"""Measure to compute density based on the weighted average of selected
elements around the point considered.
"""
weights = from_distance_to_weights(dist, **params)
measure = np.sum(weights * values)
return measure
###############################################################################
############################# Distance to weights #############################
###############################################################################
def from_distance_to_weights(dist, method, params):
"Function which transforms the distance given to weights."
if method == 'linear':
weights = dist2weights_linear(dist, **params)
elif method == 'Trapezoid':
weights = dist2weights_trapez(dist, **params)
elif method == 'inverse_prop':
weights = dist2weights_invers(dist, **params)
elif method == 'exponential':
weights = dist2weights_exp(dist, **params)
elif method == 'gaussian':
weights = dist2weights_gauss(dist, **params)
elif method == 'surgaussian':
weights = dist2weights_surgauss(dist, **params)
elif method == 'sigmoid':
weights = dist2weights_sigmoid(dist, **params)
return weights
def dist2weights_linear(dist, max_r, max_w=1, min_w=0):
"Linear distance weighting."
weights = (max_w - dist)*((max_w-min_w)/float(max_r))+min_w
return weights
def dist2weights_trapez(dist, max_r, r2, max_w=1, min_w=0):
"Trapezoidal distance weighting."
if type(dist) == np.ndarray:
weights = dist2weights_linear(dist-r2, max_r-r2, max_w, min_w)
weights[dist <= r2] = max_w
else:
if dist <= r2:
weights = max_w
else:
weights = dist2weights_linear(dist-r2, max_r-r2, max_w, min_w)
return weights
def dist2weights_invers(dist, max_r, max_w=1, min_w=1e-8, rescale=True):
"Inverse distance weighting."
if min_w == 0:
tau = 1.
else:
tau = (max_w/min_w-1)/max_r
if rescale:
floor_f = 1./float(1.+tau*max_r)
weights = max_w/(1.-floor_f) * (1./float(1.+tau*dist)-floor_f)
else:
weights = max_w/float(1.+tau*dist)
return weights
def dist2weights_exp(dist, max_r, max_w=1, min_w=1e-8, rescale=True):
"Exponential distanve weighting."
if min_w == 0:
C = 1.
else:
C = -np.log(min_w/max_w)
if rescale:
weights = max_w/(1.-np.exp(-C)) * np.exp(-C*dist/max_r)
else:
weights = max_w * np.exp(-C*dist/max_r)
return weights
def dist2weights_gauss(dist, max_r, max_w=1, min_w=1e-3, S=None, rescale=True):
"Gaussian distance weighting."
if S is None:
S = set_scale_surgauss(max_r, max_w, min_w)
if rescale:
A = max_w/(norm.pdf(0)-norm.pdf(max_r, scale=S))
weights = A*norm.pdf(dist, scale=S)
else:
A = max_w/norm.pdf(0)
weights = A*norm.pdf(dist, scale=S)
return weights
def dist2weights_surgauss(dist, max_r, max_w=1, min_w=1e-3, S=None,
rescale=True):
"Survival gaussian distance weighting."
if S is None:
S = set_scale_surgauss(max_r, max_w, min_w)
if rescale:
A = max_w/(norm.sf(0, scale=S)-norm.sf(max_r, scale=S))
weights = A*(norm.sf(dist, scale=S)-norm.sf(max_r, scale=S))
else:
A = max_w/norm.sf(0)
weights = A*norm.sf(dist, scale=S)
return weights
def dist2weights_sigmoid(dist, max_r, max_w=1, min_w=1e-3, r_char=0, B=None,
rescale=True):
"Sigmoid-like distance weighting"
C = r_char*max_r
if B is None:
B = set_scale_sigmoid(max_r, max_w, min_w, r_char)
sigmoid = lambda x: 1./(1.+B*np.exp(x+C))
if rescale:
floor_f = sigmoid(max_r)
weights = max_w/(sigmoid(0)-floor_f)*(sigmoid(dist)-floor_f)
else:
weights = 1./(1.+B*np.exp(dist+C))
return weights
###############################################################################
############################# Set scale functions #############################
###############################################################################
def set_scales_kernel(method, max_r, max_w, min_w, r_char=None):
"Switcher function for set scale functions."
if method == 'surgaussian':
scale = set_scale_surgauss(max_r, max_w, min_w)
elif method == 'gaussian':
scale = set_scale_gauss(max_r, max_w, min_w)
elif method == 'sigmoid':
scale = set_scale_sigmoid(max_r, max_w, min_w, r_char)
return scale
def set_scale_surgauss(max_r, max_w, min_w):
"Set the scale factor of the surgauss kernel."
A = max_w/norm.sf(0)
scale = minimize(lambda x: (A*norm.sf(max_r, scale=x)-min_w)**2,
x0=np.array([max_r]), method='BFGS',
tol=1e-8, bounds=(0, None))
scale = scale['x'][0]
return scale
def set_scale_gauss(max_r, max_w, min_w):
"Set the scale factor of the gauss kernel."
A = max_w/norm.pdf(0)
scale = minimize(lambda x: (A*norm.pdf(max_r, scale=x)-min_w)**2,
x0=np.array([max_r]), method='BFGS',
tol=1e-8, bounds=(0, None))
scale = scale['x'][0]
return scale
def set_scale_sigmoid(max_r, max_w, min_w, r_char):
"Set scale for sigmoidal functions."
C = r_char*max_r
sigmoid_c = lambda B: 1./(1.+B*np.exp(max_r+C)) - min_w
B = minimize((sigmoid_c)**2,
x0=np.array([1]), method='BFGS',
tol=1e-8, bounds=(0, None))
return B
###############################################################################
############################# Preparation inputs #############################
###############################################################################
def preparation_parameters(parameters):
"Function to put into coherence the selected parameters."
method = parameters['params']['method']
params = parameters['params']['params']
if method == 'gaussian':
bool_scale = 'S' in params
if not bool_scale:
scale = set_scale_gauss(params['max_r'], params['max_w'],
params['min_w'])
parameters['params']['params']['S'] = scale
elif method == 'surgaussian':
bool_scale = 'S' in params
if not bool_scale:
scale = set_scale_surgauss(params['max_r'], params['max_w'],
params['min_w'])
parameters['params']['params']['S'] = scale
elif method == 'sigmoid':
bool_scale = 'B' in params
if not bool_scale:
scale = set_scale_sigmoid(params['max_r'], params['max_w'],
params['min_w'], params['r_char'])
parameters['params']['params']['B'] = scale
return parameters
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bisect
from stackalytics.openstack.common import log as logging
from stackalytics.processor import launchpad_utils
from stackalytics.processor import normalizer
from stackalytics.processor import utils
LOG = logging.getLogger(__name__)
class RecordProcessor(object):
def __init__(self, runtime_storage_inst):
self.runtime_storage_inst = runtime_storage_inst
self.domains_index = runtime_storage_inst.get_by_key('companies')
self.users_index = runtime_storage_inst.get_by_key('users')
self.releases = runtime_storage_inst.get_by_key('releases')
self.releases_dates = [r['end_date'] for r in self.releases]
self.modules = None
self.updated_users = set()
def _get_release(self, timestamp):
release_index = bisect.bisect(self.releases_dates, timestamp)
return self.releases[release_index]['release_name']
def _get_modules(self):
if self.modules is None:
self.modules = set()
for repo in utils.load_repos(self.runtime_storage_inst):
module = repo['module'].lower()
add = True
for m in self.modules:
if module.find(m) >= 0:
add = False
break
if m.find(module) >= 0:
self.modules.remove(m)
break
if add:
self.modules.add(module)
return self.modules
def _find_company(self, companies, date):
for r in companies:
if date < r['end_date']:
return r['company_name']
return companies[-1]['company_name']
def _get_company_by_email(self, email):
if not email:
return None
name, at, domain = email.partition('@')
if domain:
parts = domain.split('.')
for i in range(len(parts), 1, -1):
m = '.'.join(parts[len(parts) - i:])
if m in self.domains_index:
return self.domains_index[m]
return None
def _create_user(self, launchpad_id, email, user_name):
company = (self._get_company_by_email(email) or
self._get_independent())
user = {
'user_id': normalizer.get_user_id(launchpad_id, email),
'launchpad_id': launchpad_id,
'user_name': user_name or '',
'companies': [{
'company_name': company,
'end_date': 0,
}],
}
if email:
user['emails'] = [email]
else:
user['emails'] = []
normalizer.normalize_user(user)
LOG.debug('Create new user: %s', user)
return user
def _get_lp_info(self, email):
lp_profile = None
if not utils.check_email_validity(email):
LOG.debug('User email is not valid %s', email)
else:
lp_profile = launchpad_utils.lp_profile_by_email(email)
if not lp_profile:
LOG.debug('User with email %s not found', email)
return None, None
LOG.debug('Email is mapped to launchpad user: %s', lp_profile['name'])
return lp_profile['name'], lp_profile['display_name']
def _get_lp_user_name(self, launchpad_id):
if not launchpad_id:
return None
lp_profile = launchpad_utils.lp_profile_by_launchpad_id(launchpad_id)
if not lp_profile:
LOG.debug('User with id %s not found', launchpad_id)
return launchpad_id
return lp_profile['display_name']
def _get_independent(self):
return self.domains_index['']
def _update_user_profile(self, user, email):
LOG.debug('Add email %s to user %s', email, user['user_id'])
user['emails'].append(email)
company_name = self._get_company_by_email(email)
if ((company_name) and (len(user['companies']) == 1) and
(user['companies'][0]['company_name'] != company_name)):
LOG.debug('Updating affiliation of user %s to %s',
user['user_id'], company_name)
user['companies'][0]['company_name'] = company_name
self.updated_users.add(user['user_id'])
def update_user(self, record):
email = record.get('author_email')
if email in self.users_index:
user = self.users_index[email]
else:
if record.get('launchpad_id'):
launchpad_id = record.get('launchpad_id')
user_name = record.get('author_name')
else:
launchpad_id, user_name = self._get_lp_info(email)
if (launchpad_id) and (launchpad_id in self.users_index):
# merge emails
user = self.users_index[launchpad_id]
if email:
self._update_user_profile(user, email)
else:
# create new
if not user_name:
user_name = record.get('author_name')
if not user_name:
user_name = self._get_lp_user_name(launchpad_id)
user = self._create_user(launchpad_id, email, user_name)
utils.store_user(self.runtime_storage_inst, user)
if email:
self.users_index[email] = user
if user['launchpad_id']:
self.users_index[user['launchpad_id']] = user
return user
def _update_record_and_user(self, record):
user = self.update_user(record)
record['user_id'] = user['user_id']
record['launchpad_id'] = user['launchpad_id']
if user.get('user_name'):
record['author_name'] = user['user_name']
company = self._find_company(user['companies'], record['date'])
if company != '*robots':
company = (self._get_company_by_email(record.get('author_email'))
or company)
record['company_name'] = company
def _process_commit(self, record):
record['primary_key'] = record['commit_id']
record['loc'] = record['lines_added'] + record['lines_deleted']
record['author_email'] = record['author_email'].lower()
self._update_record_and_user(record)
if record['company_name'] != '*robots':
yield record
def _spawn_review(self, record):
# copy everything except pathsets and flatten user data
review = dict([(k, v) for k, v in record.iteritems()
if k not in ['patchSets', 'owner', 'createdOn']])
owner = record['owner']
if 'email' not in owner or 'username' not in owner:
return # ignore
review['primary_key'] = review['id']
review['launchpad_id'] = owner['username']
review['author_name'] = owner['name']
review['author_email'] = owner['email'].lower()
review['date'] = record['createdOn']
self._update_record_and_user(review)
yield review
def _spawn_marks(self, record):
review_id = record['id']
module = record['module']
for patch in record.get('patchSets', []):
if 'approvals' not in patch:
continue # not reviewed by anyone
for approval in patch['approvals']:
# copy everything and flatten user data
mark = dict([(k, v) for k, v in approval.iteritems()
if k not in ['by', 'grantedOn']])
reviewer = approval['by']
if 'email' not in reviewer or 'username' not in reviewer:
continue # ignore
mark['record_type'] = 'mark'
mark['date'] = approval['grantedOn']
mark['primary_key'] = (record['id'] +
str(mark['date']) +
mark['type'])
mark['launchpad_id'] = reviewer['username']
mark['author_name'] = reviewer['name']
mark['author_email'] = reviewer['email'].lower()
mark['module'] = module
mark['review_id'] = review_id
self._update_record_and_user(mark)
yield mark
def _process_review(self, record):
"""
Process a review. Review spawns into records of two types:
* review - records that a user created review request
* mark - records that a user set approval mark to given review
"""
for gen in [self._spawn_review, self._spawn_marks]:
for r in gen(record):
yield r
def _guess_module(self, record):
subject = record['subject'].lower()
pos = len(subject)
best_guess_module = None
for module in self._get_modules():
find = subject.find(module)
if (find >= 0) and (find < pos):
pos = find
best_guess_module = module
if best_guess_module:
if (((pos > 0) and (subject[pos - 1] == '[')) or
(not record.get('module'))):
record['module'] = best_guess_module
if not record.get('module'):
record['module'] = 'unknown'
def _process_email(self, record):
record['primary_key'] = record['message_id']
record['author_email'] = record['author_email'].lower()
self._update_record_and_user(record)
self._guess_module(record)
if not record.get('blueprint_id'):
del record['body']
yield record
def _process_blueprint(self, record):
bpd_author = record.get('drafter') or record.get('owner')
bpd = dict([(k, v) for k, v in record.iteritems()
if k.find('_link') < 0])
bpd['record_type'] = 'bpd'
bpd['primary_key'] = 'bpd:' + record['id']
bpd['launchpad_id'] = bpd_author
bpd['date'] = record['date_created']
self._update_record_and_user(bpd)
yield bpd
if record.get('assignee') and record['date_completed']:
bpc = dict([(k, v) for k, v in record.iteritems()
if k.find('_link') < 0])
bpc['record_type'] = 'bpc'
bpc['primary_key'] = 'bpc:' + record['id']
bpc['launchpad_id'] = record['assignee']
bpc['date'] = record['date_completed']
self._update_record_and_user(bpc)
yield bpc
def _apply_type_based_processing(self, record):
if record['record_type'] == 'commit':
for r in self._process_commit(record):
yield r
elif record['record_type'] == 'review':
for r in self._process_review(record):
yield r
elif record['record_type'] == 'email':
for r in self._process_email(record):
yield r
elif record['record_type'] == 'bp':
for r in self._process_blueprint(record):
yield r
def process(self, record_iterator):
for record in record_iterator:
for r in self._apply_type_based_processing(record):
if r['company_name'] == '*robots':
continue
r['week'] = utils.timestamp_to_week(r['date'])
if ('release' not in r) or (not r['release']):
r['release'] = self._get_release(r['date'])
yield r
self.runtime_storage_inst.set_by_key('users', self.users_index)
def update(self, record_iterator, release_index):
for record in record_iterator:
need_update = False
company_name = record['company_name']
user_id = record['user_id']
author_name = record['author_name']
self._update_record_and_user(record)
if ((record['company_name'] != company_name) or
(record['user_id'] != user_id) or
(record['author_name'] != author_name)):
need_update = True
if record['primary_key'] in release_index:
release = release_index[record['primary_key']]
else:
release = self._get_release(record['date'])
if record['release'] != release:
need_update = True
record['release'] = release
if need_update:
yield record
self.runtime_storage_inst.set_by_key('users', self.users_index)
def _get_records_for_users_to_update(self):
users_reviews = {}
valid_blueprints = {}
mentioned_blueprints = {}
for record in self.runtime_storage_inst.get_all_records():
for bp in record.get('blueprint_id', []):
if bp in mentioned_blueprints:
mentioned_blueprints[bp]['count'] += 1
if record['date'] > mentioned_blueprints[bp]['date']:
mentioned_blueprints[bp]['date'] = record['date']
else:
mentioned_blueprints[bp] = {
'count': 1,
'date': record['date']
}
if record['record_type'] in ['bpd', 'bpc']:
valid_blueprints[record['id']] = {
'primary_key': record['primary_key'],
'count': 0,
'date': record['date']
}
if record['record_type'] == 'review':
launchpad_id = record['launchpad_id']
review = {'date': record['date'], 'id': record['id']}
if launchpad_id in users_reviews:
users_reviews[launchpad_id].append(review)
else:
users_reviews[launchpad_id] = [review]
for bp_name, bp in valid_blueprints.iteritems():
if bp_name in mentioned_blueprints:
bp['count'] = mentioned_blueprints[bp_name]['count']
bp['date'] = mentioned_blueprints[bp_name]['date']
else:
bp['count'] = 0
bp['date'] = 0
reviews_index = {}
for launchpad_id, reviews in users_reviews.iteritems():
reviews.sort(key=lambda x: x['date'])
review_number = 0
for review in reviews:
review_number += 1
review['review_number'] = review_number
reviews_index[review['id']] = review
for record in self.runtime_storage_inst.get_all_records():
need_update = False
user_id = record['user_id']
if user_id in self.updated_users:
user = self.users_index[user_id]
user_company_name = user['companies'][0]['company_name']
if record['company_name'] != user_company_name:
LOG.debug('Update record %s: company changed to: %s',
record['primary_key'], user_company_name)
record['company_name'] = user_company_name
need_update = True
valid_bp = set([])
for bp in record.get('blueprint_id', []):
if bp in valid_blueprints:
valid_bp.add(bp)
else:
LOG.debug('Update record %s: removed invalid bp: %s',
record['primary_key'], bp)
need_update = True
record['blueprint_id'] = list(valid_bp)
if record['record_type'] in ['bpd', 'bpc']:
bp = valid_blueprints[record['id']]
if ((record.get('mention_count') != bp['count']) or
(record.get('mention_date') != bp['date'])):
record['mention_count'] = bp['count']
record['mention_date'] = bp['date']
LOG.debug('Update record %s: mention stats: (%s:%s)',
record['primary_key'], bp['count'], bp['date'])
need_update = True
if record['record_type'] == 'review':
review = reviews_index[record['id']]
if record.get('review_number') != review['review_number']:
record['review_number'] = review['review_number']
need_update = True
if need_update:
yield record
def finalize(self):
self.runtime_storage_inst.set_records(
self._get_records_for_users_to_update())
|
|
from __future__ import absolute_import
from __future__ import print_function
import sys
import tornado.web
import logging
from django import http
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest, get_script_name
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
from django.core import signals
from django.core import exceptions, urlresolvers
from django.http import HttpRequest, HttpResponse
from threading import Lock
from tornado.wsgi import WSGIContainer
from six.moves import urllib
from zerver.decorator import RespondAsynchronously
from zerver.lib.response import json_response
from zerver.middleware import async_request_stop, async_request_restart
from zerver.tornado.descriptors import get_descriptor_by_handler_id
from typing import Any, Callable, Dict, List
current_handler_id = 0
handlers = {} # type: Dict[int, AsyncDjangoHandler]
def get_handler_by_id(handler_id):
# type: (int) -> AsyncDjangoHandler
return handlers[handler_id]
def allocate_handler_id(handler):
# type: (AsyncDjangoHandler) -> int
global current_handler_id
handlers[current_handler_id] = handler
handler.handler_id = current_handler_id
current_handler_id += 1
return handler.handler_id
def clear_handler_by_id(handler_id):
# type: (int) -> None
del handlers[handler_id]
def handler_stats_string():
# type: () -> str
return "%s handlers, latest ID %s" % (len(handlers), current_handler_id)
def finish_handler(handler_id, event_queue_id, contents, apply_markdown):
# type: (int, str, List[Dict[str, Any]], bool) -> None
err_msg = "Got error finishing handler for queue %s" % (event_queue_id,)
try:
# We call async_request_restart here in case we are
# being finished without any events (because another
# get_events request has supplanted this request)
handler = get_handler_by_id(handler_id)
request = handler._request
async_request_restart(request)
if len(contents) != 1:
request._log_data['extra'] = "[%s/1]" % (event_queue_id,)
else:
request._log_data['extra'] = "[%s/1/%s]" % (event_queue_id, contents[0]["type"])
handler.zulip_finish(dict(result='success', msg='',
events=contents,
queue_id=event_queue_id),
request, apply_markdown=apply_markdown)
except IOError as e:
if str(e) != 'Stream is closed':
logging.exception(err_msg)
except AssertionError as e:
if str(e) != 'Request closed':
logging.exception(err_msg)
except Exception:
logging.exception(err_msg)
# Modified version of the base Tornado handler for Django
class AsyncDjangoHandler(tornado.web.RequestHandler, base.BaseHandler):
initLock = Lock()
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
super(AsyncDjangoHandler, self).__init__(*args, **kwargs)
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
self._request_middleware = None # type: ignore # Should be List[Callable[[WSGIRequest], Any]] https://github.com/JukkaL/mypy/issues/1174
self.initLock.acquire()
# Check that middleware is still uninitialised.
if self._request_middleware is None:
self.load_middleware()
self.initLock.release()
self._auto_finish = False
# Handler IDs are allocated here, and the handler ID map must
# be cleared when the handler finishes its response
allocate_handler_id(self)
def __repr__(self):
# type: () -> str
descriptor = get_descriptor_by_handler_id(self.handler_id)
return "AsyncDjangoHandler<%s, %s>" % (self.handler_id, descriptor)
def get(self, *args, **kwargs):
# type: (*Any, **Any) -> None
environ = WSGIContainer.environ(self.request)
environ['PATH_INFO'] = urllib.parse.unquote(environ['PATH_INFO'])
request = WSGIRequest(environ)
request._tornado_handler = self
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__)
try:
response = self.get_response(request)
if not response:
return
finally:
signals.request_finished.send(sender=self.__class__)
self.set_status(response.status_code)
for h in response.items():
self.set_header(h[0], h[1])
if not hasattr(self, "_new_cookies"):
self._new_cookies = [] # type: List[http.cookie.SimpleCookie]
self._new_cookies.append(response.cookies)
self.write(response.content)
self.finish()
def head(self, *args, **kwargs):
# type: (*Any, **Any) -> None
self.get(*args, **kwargs)
def post(self, *args, **kwargs):
# type: (*Any, **Any) -> None
self.get(*args, **kwargs)
def delete(self, *args, **kwargs):
# type: (*Any, **Any) -> None
self.get(*args, **kwargs)
def on_connection_close(self):
# type: () -> None
client_descriptor = get_descriptor_by_handler_id(self.handler_id)
if client_descriptor is not None:
client_descriptor.disconnect_handler(client_closed=True)
# Based on django.core.handlers.base: get_response
def get_response(self, request):
# type: (HttpRequest) -> HttpResponse
"Returns an HttpResponse object for the given HttpRequest"
try:
try:
# Setup default url resolver for this thread.
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if hasattr(request, "urlconf"):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
### ADDED BY ZULIP
request._resolver = resolver
### END ADDED BY ZULIP
callback, callback_args, callback_kwargs = resolver.resolve(
request.path_info)
# Apply view middleware
if response is None:
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args,
callback_kwargs)
if response:
break
### THIS BLOCK MODIFIED BY ZULIP
if response is None:
try:
response = callback(request, *callback_args, **callback_kwargs)
if response is RespondAsynchronously:
async_request_stop(request)
return None
clear_handler_by_id(self.handler_id)
except Exception as e:
clear_handler_by_id(self.handler_id)
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
break
if response is None:
raise
if response is None:
try:
view_name = callback.__name__
except AttributeError:
view_name = callback.__class__.__name__ + '.__call__'
raise ValueError("The view %s.%s returned None." %
(callback.__module__, view_name))
# If the response supports deferred rendering, apply template
# response middleware and the render the response
if hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
response = response.render()
except http.Http404 as e:
if settings.DEBUG:
from django.views import debug
response = debug.technical_404_response(request, e)
else:
try:
callback, param_dict = resolver.resolve404()
response = callback(request, **param_dict)
except Exception:
try:
response = self.handle_uncaught_exception(request, resolver,
sys.exc_info())
finally:
signals.got_request_exception.send(sender=self.__class__,
request=request)
except exceptions.PermissionDenied:
logging.warning(
'Forbidden (Permission denied): %s', request.path,
extra={
'status_code': 403,
'request': request
})
try:
callback, param_dict = resolver.resolve403()
response = callback(request, **param_dict)
except Exception:
try:
response = self.handle_uncaught_exception(request,
resolver, sys.exc_info())
finally:
signals.got_request_exception.send(
sender=self.__class__, request=request)
except SystemExit:
# See https://code.djangoproject.com/ticket/4701
raise
except Exception as e:
exc_info = sys.exc_info()
signals.got_request_exception.send(sender=self.__class__, request=request)
return self.handle_uncaught_exception(request, resolver, exc_info)
finally:
# Reset urlconf on the way out for isolation
urlresolvers.set_urlconf(None)
### ZULIP CHANGE: The remainder of this function was moved
### into its own function, just below, so we can call it from
### finish().
response = self.apply_response_middleware(request, response, resolver)
return response
### Copied from get_response (above in this file)
def apply_response_middleware(self, request, response, resolver):
# type: (HttpRequest, HttpResponse, urlresolvers.RegexURLResolver) -> HttpResponse
try:
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
if hasattr(self, 'apply_response_fixes'):
response = self.apply_response_fixes(request, response)
except Exception: # Any exception should be gathered and handled
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
return response
def zulip_finish(self, response, request, apply_markdown):
# type: (HttpResponse, HttpRequest, bool) -> None
# Make sure that Markdown rendering really happened, if requested.
# This is a security issue because it's where we escape HTML.
# c.f. ticket #64
#
# apply_markdown=True is the fail-safe default.
if response['result'] == 'success' and 'messages' in response and apply_markdown:
for msg in response['messages']:
if msg['content_type'] != 'text/html':
self.set_status(500)
self.finish('Internal error: bad message format')
if response['result'] == 'error':
self.set_status(400)
# Call the Django response middleware on our object so that
# e.g. our own logging code can run; but don't actually use
# the headers from that since sending those to Tornado seems
# tricky; instead just send the (already json-rendered)
# content on to Tornado
django_response = json_response(res_type=response['result'],
data=response, status=self.get_status())
django_response = self.apply_response_middleware(request, django_response,
request._resolver)
# Pass through the content-type from Django, as json content should be
# served as application/json
self.set_header("Content-Type", django_response['Content-Type'])
self.finish(django_response.content)
|
Subsets and Splits