max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
lte/gateway/python/magma/pipelined/tests/test_internal_pkt_ipfix_export.py | Aitend/magma | 849 | 12740212 | <reponame>Aitend/magma<gh_stars>100-1000
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import warnings
from concurrent.futures import Future
from lte.protos.mconfig.mconfigs_pb2 import PipelineD
from lte.protos.pipelined_pb2 import FlowRequest
from lte.protos.policydb_pb2 import FlowMatch
from magma.pipelined.app.dpi import DPIController
from magma.pipelined.bridge_util import BridgeTools
from magma.pipelined.policy_converters import convert_ipv4_str_to_ip_proto
from magma.pipelined.tests.app.start_pipelined import (
PipelinedController,
TestSetup,
)
from magma.pipelined.tests.pipelined_test_util import (
SnapshotVerifier,
create_service_manager,
start_ryu_app_thread,
stop_ryu_app_thread,
)
from nose.tools import nottest
class InternalPktIpfixExportTest(unittest.TestCase):
BRIDGE = 'testing_br'
IFACE = 'testing_br'
MAC_DEST = "5e:cc:cc:b1:49:4b"
BRIDGE_IP = '192.168.128.1'
DPI_PORT = 'mon1'
DPI_IP = '1.1.1.1'
@classmethod
def setUpClass(cls):
"""
Starts the thread which launches ryu apps
Create a testing bridge, add a port, setup the port interfaces. Then
launch the ryu apps for testing pipelined. Gets the references
to apps launched by using futures, mocks the redis policy_dictionary
of dpi_controller
"""
super(InternalPktIpfixExportTest, cls).setUpClass()
warnings.simplefilter('ignore')
cls._static_rule_dict = {}
cls.service_manager = create_service_manager(
[PipelineD.DPI], ['ue_mac', 'ipfix'],
)
cls._tbl_num = cls.service_manager.get_table_num(
DPIController.APP_NAME,
)
ue_mac_controller_reference = Future()
dpi_controller_reference = Future()
ipfix_controller_reference = Future()
testing_controller_reference = Future()
test_setup = TestSetup(
apps=[
PipelinedController.UEMac,
PipelinedController.DPI,
PipelinedController.IPFIX,
PipelinedController.Testing,
PipelinedController.StartupFlows,
],
references={
PipelinedController.UEMac:
ue_mac_controller_reference,
PipelinedController.DPI:
dpi_controller_reference,
PipelinedController.Arp:
Future(),
PipelinedController.IPFIX:
ipfix_controller_reference,
PipelinedController.Testing:
testing_controller_reference,
PipelinedController.StartupFlows:
Future(),
},
config={
'bridge_name': cls.BRIDGE,
'bridge_ip_address': '192.168.128.1',
'internal_ip_subnet': '192.168.0.0/16',
'nat_iface': 'eth2',
'enodeb_iface': 'eth1',
'enable_queue_pgm': False,
'clean_restart': True,
'setup_type': 'CWF',
'dpi': {
'enabled': True,
'mon_port': 'mon1',
'mon_port_number': 32769,
'idle_timeout': 42,
},
'ipfix': {
'enabled': True,
'probability': 65,
'collector_set_id': 1,
'collector_ip': '1.1.1.1',
'collector_port': 65010,
'cache_timeout': 60,
'obs_domain_id': 1,
'obs_point_id': 1,
},
'conntrackd': {
'enabled': True,
},
'ovs_gtp_port_number': 32768,
},
mconfig=PipelineD(),
loop=None,
service_manager=cls.service_manager,
integ_test=False,
)
BridgeTools.create_bridge(cls.BRIDGE, cls.IFACE)
BridgeTools.create_internal_iface(
cls.BRIDGE, cls.DPI_PORT,
cls.DPI_IP,
)
cls.thread = start_ryu_app_thread(test_setup)
cls.ue_mac_controller = ue_mac_controller_reference.result()
cls.dpi_controller = dpi_controller_reference.result()
cls.ipfix_controller = ipfix_controller_reference.result()
cls.testing_controller = testing_controller_reference.result()
cls.dpi_controller._policy_dict = cls._static_rule_dict
@classmethod
def tearDownClass(cls):
stop_ryu_app_thread(cls.thread)
BridgeTools.destroy_bridge(cls.BRIDGE)
def test_subscriber_policy(self):
"""
Classify DPI flow, verify internal packet is generated
Assert:
snapshots math
"""
imsi = 'IMSI010000000088888'
ue_mac = '5e:cc:cc:b1:49:4b'
self.ue_mac_controller.add_ue_mac_flow(imsi, ue_mac)
flow_match = FlowMatch(
ip_proto=FlowMatch.IPPROTO_TCP,
ip_dst=convert_ipv4_str_to_ip_proto('192.168.3.11'),
ip_src=convert_ipv4_str_to_ip_proto('1.2.3.0'),
tcp_dst=80, tcp_src=51115, direction=FlowMatch.UPLINK,
)
self.dpi_controller.add_classify_flow(
flow_match, FlowRequest.FLOW_FINAL_CLASSIFICATION,
'base.ip.http.facebook', 'tbd',
)
self.ipfix_controller.add_ue_sample_flow(
imsi, "magma_is_awesome_msisdn",
"00:11:22:33:44:55", "apn_name123456789", 145,
)
snapshot_verifier = SnapshotVerifier(
self, self.BRIDGE,
self.service_manager,
include_stats=False,
)
with snapshot_verifier:
pass
self.ipfix_controller.delete_ue_sample_flow(imsi)
snapshot_verifier = SnapshotVerifier(
self, self.BRIDGE,
self.service_manager,
'after_deletion',
include_stats=False,
)
with snapshot_verifier:
pass
if __name__ == "__main__":
unittest.main()
|
tests/pipeline/test_adjusted_array.py | leonarduschen/zipline | 14,525 | 12740219 | <gh_stars>1000+
"""
Tests for chunked adjustments.
"""
from collections import namedtuple
from itertools import chain, product
from string import ascii_lowercase, ascii_uppercase
from textwrap import dedent
from unittest import TestCase
from nose_parameterized import parameterized
from numpy import (
arange,
array,
asarray,
dtype,
full,
)
from six.moves import zip_longest
from toolz import curry
from zipline.errors import WindowLengthNotPositive, WindowLengthTooLong
from zipline.lib.adjustment import (
Boolean1DArrayOverwrite,
BooleanOverwrite,
Datetime641DArrayOverwrite,
Datetime64Overwrite,
Float641DArrayOverwrite,
Float64Multiply,
Float64Overwrite,
Int64Overwrite,
Object1DArrayOverwrite,
ObjectOverwrite,
)
from zipline.lib.adjusted_array import AdjustedArray
from zipline.lib.labelarray import LabelArray
from zipline.testing import check_arrays
from zipline.testing.predicates import assert_equal
from zipline.utils.compat import unicode
from zipline.utils.numpy_utils import (
coerce_to_dtype,
datetime64ns_dtype,
default_missing_value_for_dtype,
bool_dtype,
float64_dtype,
int64_dtype,
object_dtype,
)
def moving_window(array, nrows):
"""
Simple moving window generator over a 2D numpy array.
"""
count = num_windows_of_length_M_on_buffers_of_length_N(nrows, len(array))
for i in range(count):
yield array[i:i + nrows]
def num_windows_of_length_M_on_buffers_of_length_N(M, N):
"""
For a window of length M rolling over a buffer of length N,
there are (N - M) + 1 legal windows.
Example:
If my array has N=4 rows, and I want windows of length M=2, there are
3 legal windows: data[0:2], data[1:3], and data[2:4].
"""
return N - M + 1
def valid_window_lengths(underlying_buffer_length):
"""
An iterator of all legal window lengths on a buffer of a given length.
Returns values from 1 to underlying_buffer_length.
"""
return iter(range(1, underlying_buffer_length + 1))
@curry
def as_dtype(dtype, data):
"""
Curried wrapper around array.astype for when you have the dtype before you
have the data.
"""
return asarray(data).astype(dtype)
@curry
def as_labelarray(initial_dtype, missing_value, array):
"""
Curried wrapper around LabelArray, that round-trips the input data through
`initial_dtype` first.
"""
return LabelArray(
array.astype(initial_dtype),
missing_value=initial_dtype.type(missing_value),
)
bytes_dtype = dtype('S3')
unicode_dtype = dtype('U3')
AdjustmentCase = namedtuple(
'AdjustmentCase',
[
'name',
'baseline',
'window_length',
'adjustments',
'missing_value',
'perspective_offset',
'expected_result',
]
)
def _gen_unadjusted_cases(name,
make_input,
make_expected_output,
missing_value):
nrows = 6
ncols = 3
raw_data = arange(nrows * ncols).reshape(nrows, ncols)
input_array = make_input(raw_data)
expected_output_array = make_expected_output(raw_data)
for windowlen in valid_window_lengths(nrows):
num_legal_windows = num_windows_of_length_M_on_buffers_of_length_N(
windowlen, nrows
)
yield AdjustmentCase(
name="%s_length_%d" % (name, windowlen),
baseline=input_array,
window_length=windowlen,
adjustments={},
missing_value=missing_value,
perspective_offset=0,
expected_result=[
expected_output_array[offset:offset + windowlen]
for offset in range(num_legal_windows)
],
)
def _gen_multiplicative_adjustment_cases(dtype):
"""
Generate expected moving windows on a buffer with adjustments.
We proceed by constructing, at each row, the view of the array we expect in
in all windows anchored on that row.
In general, if we have an adjustment to be applied once we process the row
at index N, should see that adjustment applied to the underlying buffer for
any window containing the row at index N.
We then build all legal windows over these buffers.
"""
adjustment_type = {
float64_dtype: Float64Multiply,
}[dtype]
nrows, ncols = 6, 3
adjustments = {}
buffer_as_of = [None] * 6
baseline = buffer_as_of[0] = full((nrows, ncols), 1, dtype=dtype)
# Note that row indices are inclusive!
adjustments[1] = [
adjustment_type(0, 0, 0, 0, coerce_to_dtype(dtype, 2)),
]
buffer_as_of[1] = array([[2, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]], dtype=dtype)
# No adjustment at index 2.
buffer_as_of[2] = buffer_as_of[1]
adjustments[3] = [
adjustment_type(1, 2, 1, 1, coerce_to_dtype(dtype, 3)),
adjustment_type(0, 1, 0, 0, coerce_to_dtype(dtype, 4)),
]
buffer_as_of[3] = array([[8, 1, 1],
[4, 3, 1],
[1, 3, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]], dtype=dtype)
adjustments[4] = [
adjustment_type(0, 3, 2, 2, coerce_to_dtype(dtype, 5))
]
buffer_as_of[4] = array([[8, 1, 5],
[4, 3, 5],
[1, 3, 5],
[1, 1, 5],
[1, 1, 1],
[1, 1, 1]], dtype=dtype)
adjustments[5] = [
adjustment_type(0, 4, 1, 1, coerce_to_dtype(dtype, 6)),
adjustment_type(2, 2, 2, 2, coerce_to_dtype(dtype, 7)),
]
buffer_as_of[5] = array([[8, 6, 5],
[4, 18, 5],
[1, 18, 35],
[1, 6, 5],
[1, 6, 1],
[1, 1, 1]], dtype=dtype)
return _gen_expectations(
baseline,
default_missing_value_for_dtype(dtype),
adjustments,
buffer_as_of,
nrows,
perspective_offsets=(0, 1),
)
def _gen_overwrite_adjustment_cases(dtype):
"""
Generate test cases for overwrite adjustments.
The algorithm used here is the same as the one used above for
multiplicative adjustments. The only difference is the semantics of how
the adjustments are expected to modify the arrays.
This is parameterized on `make_input` and `make_expected_output` functions,
which take 2-D lists of values and transform them into desired input/output
arrays. We do this so that we can easily test both vanilla numpy ndarrays
and our own LabelArray class for strings.
"""
adjustment_type = {
float64_dtype: Float64Overwrite,
datetime64ns_dtype: Datetime64Overwrite,
int64_dtype: Int64Overwrite,
bytes_dtype: ObjectOverwrite,
unicode_dtype: ObjectOverwrite,
object_dtype: ObjectOverwrite,
bool_dtype: BooleanOverwrite,
}[dtype]
make_expected_dtype = as_dtype(dtype)
missing_value = default_missing_value_for_dtype(datetime64ns_dtype)
if dtype == object_dtype:
# When we're testing object dtypes, we expect to have strings, but
# coerce_to_dtype(object, 3) just gives 3 as a Python integer.
def make_overwrite_value(dtype, value):
return str(value)
else:
make_overwrite_value = coerce_to_dtype
adjustments = {}
buffer_as_of = [None] * 6
baseline = make_expected_dtype([[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2]])
buffer_as_of[0] = make_expected_dtype([[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2]])
# Note that row indices are inclusive!
adjustments[1] = [
adjustment_type(0, 0, 0, 0, make_overwrite_value(dtype, 1)),
]
buffer_as_of[1] = make_expected_dtype([[1, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2]])
# No adjustment at index 2.
buffer_as_of[2] = buffer_as_of[1]
adjustments[3] = [
adjustment_type(1, 2, 1, 1, make_overwrite_value(dtype, 3)),
adjustment_type(0, 1, 0, 0, make_overwrite_value(dtype, 4)),
]
buffer_as_of[3] = make_expected_dtype([[4, 2, 2],
[4, 3, 2],
[2, 3, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2]])
adjustments[4] = [
adjustment_type(0, 3, 2, 2, make_overwrite_value(dtype, 5))
]
buffer_as_of[4] = make_expected_dtype([[4, 2, 5],
[4, 3, 5],
[2, 3, 5],
[2, 2, 5],
[2, 2, 2],
[2, 2, 2]])
adjustments[5] = [
adjustment_type(0, 4, 1, 1, make_overwrite_value(dtype, 6)),
adjustment_type(2, 2, 2, 2, make_overwrite_value(dtype, 7)),
]
buffer_as_of[5] = make_expected_dtype([[4, 6, 5],
[4, 6, 5],
[2, 6, 7],
[2, 6, 5],
[2, 6, 2],
[2, 2, 2]])
return _gen_expectations(
baseline,
missing_value,
adjustments,
buffer_as_of,
nrows=6,
perspective_offsets=(0, 1),
)
def _gen_overwrite_1d_array_adjustment_case(dtype):
"""
Generate test cases for overwrite adjustments.
The algorithm used here is the same as the one used above for
multiplicative adjustments. The only difference is the semantics of how
the adjustments are expected to modify the arrays.
This is parameterized on `make_input` and `make_expected_output` functions,
which take 1-D lists of values and transform them into desired input/output
arrays. We do this so that we can easily test both vanilla numpy ndarrays
and our own LabelArray class for strings.
"""
adjustment_type = {
bool_dtype: Boolean1DArrayOverwrite,
float64_dtype: Float641DArrayOverwrite,
datetime64ns_dtype: Datetime641DArrayOverwrite,
}[dtype]
make_expected_dtype = as_dtype(dtype)
missing_value = default_missing_value_for_dtype(datetime64ns_dtype)
adjustments = {}
buffer_as_of = [None] * 6
baseline = make_expected_dtype([[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2]])
buffer_as_of[0] = make_expected_dtype([[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2]])
vals1 = [1]
# Note that row indices are inclusive!
adjustments[1] = [
adjustment_type(
0, 0, 0, 0,
array([coerce_to_dtype(dtype, val) for val in vals1])
)
]
buffer_as_of[1] = make_expected_dtype([[1, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2]])
# No adjustment at index 2.
buffer_as_of[2] = buffer_as_of[1]
vals3 = [4, 4, 1]
adjustments[3] = [
adjustment_type(
0, 2, 0, 0,
array([coerce_to_dtype(dtype, val) for val in vals3])
)
]
buffer_as_of[3] = make_expected_dtype([[4, 2, 2],
[4, 2, 2],
[1, 2, 2],
[2, 2, 2],
[2, 2, 2],
[2, 2, 2]])
vals4 = [5] * 4
adjustments[4] = [
adjustment_type(
0, 3, 2, 2,
array([coerce_to_dtype(dtype, val) for val in vals4]))
]
buffer_as_of[4] = make_expected_dtype([[4, 2, 5],
[4, 2, 5],
[1, 2, 5],
[2, 2, 5],
[2, 2, 2],
[2, 2, 2]])
vals5 = range(1, 6)
adjustments[5] = [
adjustment_type(
0, 4, 1, 1,
array([coerce_to_dtype(dtype, val) for val in vals5])),
]
buffer_as_of[5] = make_expected_dtype([[4, 1, 5],
[4, 2, 5],
[1, 3, 5],
[2, 4, 5],
[2, 5, 2],
[2, 2, 2]])
return _gen_expectations(
baseline,
missing_value,
adjustments,
buffer_as_of,
nrows=6,
perspective_offsets=(0, 1),
)
def _gen_expectations(baseline,
missing_value,
adjustments,
buffer_as_of,
nrows,
perspective_offsets):
for windowlen, perspective_offset in product(valid_window_lengths(nrows),
perspective_offsets):
# How long is an iterator of length-N windows on this buffer?
# For example, for a window of length 3 on a buffer of length 6, there
# are four valid windows.
num_legal_windows = num_windows_of_length_M_on_buffers_of_length_N(
windowlen, nrows
)
# Build the sequence of regions in the underlying buffer we expect to
# see. For example, with a window length of 3 on a buffer of length 6,
# we expect to see:
# (buffer[0:3], buffer[1:4], buffer[2:5], buffer[3:6])
#
slices = [slice(i, i + windowlen) for i in range(num_legal_windows)]
# The sequence of perspectives we expect to take on the underlying
# data. For example, with a window length of 3 and a perspective offset
# of 1, we expect to see:
# (buffer_as_of[3], buffer_as_of[4], buffer_as_of[5], buffer_as_of[5])
#
initial_perspective = windowlen + perspective_offset - 1
perspectives = range(
initial_perspective,
initial_perspective + num_legal_windows
)
def as_of(p):
# perspective_offset can push us past the end of the underlying
# buffer/adjustments. When it does, we should always see the latest
# version of the buffer.
if p >= len(buffer_as_of):
return buffer_as_of[-1]
return buffer_as_of[p]
expected_iterator_results = [
as_of(perspective)[slice_]
for slice_, perspective in zip(slices, perspectives)
]
test_name = "dtype_{}_length_{}_perpective_offset_{}".format(
baseline.dtype,
windowlen,
perspective_offset,
)
yield AdjustmentCase(
name=test_name,
baseline=baseline,
window_length=windowlen,
adjustments=adjustments,
missing_value=missing_value,
perspective_offset=perspective_offset,
expected_result=expected_iterator_results
)
class AdjustedArrayTestCase(TestCase):
def test_traverse_invalidating(self):
data = arange(5 * 3, dtype='f8').reshape(5, 3)
original_data = data.copy()
adjustments = {2: [Float64Multiply(0, 4, 0, 2, 2.0)]}
adjusted_array = AdjustedArray(data, adjustments, float('nan'))
for _ in adjusted_array.traverse(1, copy=False):
pass
assert_equal(data, original_data * 2)
with self.assertRaises(ValueError) as e:
adjusted_array.traverse(1)
assert_equal(
str(e.exception),
'cannot traverse invalidated AdjustedArray',
)
def test_copy(self):
data = arange(5 * 3, dtype='f8').reshape(5, 3)
original_data = data.copy()
adjustments = {2: [Float64Multiply(0, 4, 0, 2, 2.0)]}
adjusted_array = AdjustedArray(data, adjustments, float('nan'))
traverse_copy = adjusted_array.copy()
clean_copy = adjusted_array.copy()
a_it = adjusted_array.traverse(2, copy=False)
b_it = traverse_copy.traverse(2, copy=False)
for a, b in zip(a_it, b_it):
assert_equal(a, b)
with self.assertRaises(ValueError) as e:
adjusted_array.copy()
assert_equal(
str(e.exception),
'cannot copy invalidated AdjustedArray',
)
# the clean copy should have the original data even though the
# original adjusted array has it's data mutated in place
assert_equal(clean_copy.data, original_data)
assert_equal(adjusted_array.data, original_data * 2)
@parameterized.expand(
chain(
_gen_unadjusted_cases(
'float',
make_input=as_dtype(float64_dtype),
make_expected_output=as_dtype(float64_dtype),
missing_value=default_missing_value_for_dtype(float64_dtype),
),
_gen_unadjusted_cases(
'datetime',
make_input=as_dtype(datetime64ns_dtype),
make_expected_output=as_dtype(datetime64ns_dtype),
missing_value=default_missing_value_for_dtype(
datetime64ns_dtype
),
),
# Test passing an array of strings to AdjustedArray.
_gen_unadjusted_cases(
'bytes_ndarray',
make_input=as_dtype(bytes_dtype),
make_expected_output=as_labelarray(bytes_dtype, b''),
missing_value=b'',
),
_gen_unadjusted_cases(
'unicode_ndarray',
make_input=as_dtype(unicode_dtype),
make_expected_output=as_labelarray(unicode_dtype, u''),
missing_value=u'',
),
_gen_unadjusted_cases(
'object_ndarray',
make_input=lambda a: a.astype(unicode).astype(object),
make_expected_output=as_labelarray(unicode_dtype, u''),
missing_value='',
),
# Test passing a LabelArray directly to AdjustedArray.
_gen_unadjusted_cases(
'bytes_labelarray',
make_input=as_labelarray(bytes_dtype, b''),
make_expected_output=as_labelarray(bytes_dtype, b''),
missing_value=b'',
),
_gen_unadjusted_cases(
'unicode_labelarray',
make_input=as_labelarray(unicode_dtype, None),
make_expected_output=as_labelarray(unicode_dtype, None),
missing_value=u'',
),
_gen_unadjusted_cases(
'object_labelarray',
make_input=(
lambda a: LabelArray(a.astype(unicode).astype(object), u'')
),
make_expected_output=as_labelarray(unicode_dtype, ''),
missing_value='',
),
)
)
def test_no_adjustments(self,
name,
data,
lookback,
adjustments,
missing_value,
perspective_offset,
expected_output):
array = AdjustedArray(data, adjustments, missing_value)
for _ in range(2): # Iterate 2x ensure adjusted_arrays are re-usable.
in_out = zip(array.traverse(lookback), expected_output)
for yielded, expected_yield in in_out:
check_arrays(yielded, expected_yield)
@parameterized.expand(_gen_multiplicative_adjustment_cases(float64_dtype))
def test_multiplicative_adjustments(self,
name,
data,
lookback,
adjustments,
missing_value,
perspective_offset,
expected):
array = AdjustedArray(data, adjustments, missing_value)
for _ in range(2): # Iterate 2x ensure adjusted_arrays are re-usable.
window_iter = array.traverse(
lookback,
perspective_offset=perspective_offset,
)
for yielded, expected_yield in zip_longest(window_iter, expected):
check_arrays(yielded, expected_yield)
@parameterized.expand(
chain(
_gen_overwrite_adjustment_cases(bool_dtype),
_gen_overwrite_adjustment_cases(int64_dtype),
_gen_overwrite_adjustment_cases(float64_dtype),
_gen_overwrite_adjustment_cases(datetime64ns_dtype),
_gen_overwrite_1d_array_adjustment_case(float64_dtype),
_gen_overwrite_1d_array_adjustment_case(datetime64ns_dtype),
_gen_overwrite_1d_array_adjustment_case(bool_dtype),
# There are six cases here:
# Using np.bytes/np.unicode/object arrays as inputs.
# Passing np.bytes/np.unicode/object arrays to LabelArray,
# and using those as input.
#
# The outputs should always be LabelArrays.
_gen_unadjusted_cases(
'bytes_ndarray',
make_input=as_dtype(bytes_dtype),
make_expected_output=as_labelarray(bytes_dtype, b''),
missing_value=b'',
),
_gen_unadjusted_cases(
'unicode_ndarray',
make_input=as_dtype(unicode_dtype),
make_expected_output=as_labelarray(unicode_dtype, u''),
missing_value=u'',
),
_gen_unadjusted_cases(
'object_ndarray',
make_input=lambda a: a.astype(unicode).astype(object),
make_expected_output=as_labelarray(unicode_dtype, u''),
missing_value=u'',
),
_gen_unadjusted_cases(
'bytes_labelarray',
make_input=as_labelarray(bytes_dtype, b''),
make_expected_output=as_labelarray(bytes_dtype, b''),
missing_value=b'',
),
_gen_unadjusted_cases(
'unicode_labelarray',
make_input=as_labelarray(unicode_dtype, u''),
make_expected_output=as_labelarray(unicode_dtype, u''),
missing_value=u'',
),
_gen_unadjusted_cases(
'object_labelarray',
make_input=(
lambda a: LabelArray(
a.astype(unicode).astype(object),
None,
)
),
make_expected_output=as_labelarray(unicode_dtype, u''),
missing_value=None,
),
)
)
def test_overwrite_adjustment_cases(self,
name,
baseline,
lookback,
adjustments,
missing_value,
perspective_offset,
expected):
array = AdjustedArray(baseline, adjustments, missing_value)
for _ in range(2): # Iterate 2x ensure adjusted_arrays are re-usable.
window_iter = array.traverse(
lookback,
perspective_offset=perspective_offset,
)
for yielded, expected_yield in zip_longest(window_iter, expected):
check_arrays(yielded, expected_yield)
def test_object1darrayoverwrite(self):
pairs = [u + l for u, l in product(ascii_uppercase, ascii_lowercase)]
categories = pairs + ['~' + c for c in pairs]
baseline = LabelArray(
array([[''.join((r, c)) for c in 'abc'] for r in ascii_uppercase]),
None,
categories,
)
full_expected = baseline.copy()
def flip(cs):
if cs is None:
return None
if cs[0] != '~':
return '~' + cs
return cs
def make_overwrite(fr, lr, fc, lc):
fr, lr, fc, lc = map(ord, (fr, lr, fc, lc))
fr -= ord('A')
lr -= ord('A')
fc -= ord('a')
lc -= ord('a')
return Object1DArrayOverwrite(
fr, lr,
fc, lc,
baseline[fr:lr + 1, fc].map(flip),
)
overwrites = {
3: [make_overwrite('A', 'B', 'a', 'a')],
4: [make_overwrite('A', 'C', 'b', 'c')],
5: [make_overwrite('D', 'D', 'a', 'b')],
}
it = AdjustedArray(baseline, overwrites, None).traverse(3)
window = next(it)
expected = full_expected[:3]
check_arrays(window, expected)
window = next(it)
full_expected[0:2, 0] = LabelArray(['~Aa', '~Ba'], None)
expected = full_expected[1:4]
check_arrays(window, expected)
window = next(it)
full_expected[0:3, 1:3] = LabelArray([['~Ab', '~Ac'],
['~Bb', '~Bc'],
['~Cb', '~Cb']], None)
expected = full_expected[2:5]
check_arrays(window, expected)
window = next(it)
full_expected[3, :2] = '~Da'
expected = full_expected[3:6]
check_arrays(window, expected)
def test_invalid_lookback(self):
data = arange(30, dtype=float).reshape(6, 5)
adj_array = AdjustedArray(data, {}, float('nan'))
with self.assertRaises(WindowLengthTooLong):
adj_array.traverse(7)
with self.assertRaises(WindowLengthNotPositive):
adj_array.traverse(0)
with self.assertRaises(WindowLengthNotPositive):
adj_array.traverse(-1)
def test_array_views_arent_writable(self):
data = arange(30, dtype=float).reshape(6, 5)
adj_array = AdjustedArray(data, {}, float('nan'))
for frame in adj_array.traverse(3):
with self.assertRaises(ValueError):
frame[0, 0] = 5.0
def test_inspect(self):
data = arange(15, dtype=float).reshape(5, 3)
adj_array = AdjustedArray(
data,
{4: [Float64Multiply(2, 3, 0, 0, 4.0)]},
float('nan'),
)
expected = dedent(
"""\
Adjusted Array (float64):
Data:
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.],
[ 12., 13., 14.]])
Adjustments:
{4: [Float64Multiply(first_row=2, last_row=3, first_col=0, \
last_col=0, value=4.000000)]}
"""
)
got = adj_array.inspect()
self.assertEqual(expected, got)
def test_update_labels(self):
data = array([
['aaa', 'bbb', 'ccc'],
['ddd', 'eee', 'fff'],
['ggg', 'hhh', 'iii'],
['jjj', 'kkk', 'lll'],
['mmm', 'nnn', 'ooo'],
])
label_array = LabelArray(data, missing_value='')
adj_array = AdjustedArray(
data=label_array,
adjustments={4: [ObjectOverwrite(2, 3, 0, 0, 'ppp')]},
missing_value='',
)
expected_data = array([
['aaa-foo', 'bbb-foo', 'ccc-foo'],
['ddd-foo', 'eee-foo', 'fff-foo'],
['ggg-foo', 'hhh-foo', 'iii-foo'],
['jjj-foo', 'kkk-foo', 'lll-foo'],
['mmm-foo', 'nnn-foo', 'ooo-foo'],
])
expected_label_array = LabelArray(expected_data, missing_value='')
expected_adj_array = AdjustedArray(
data=expected_label_array,
adjustments={4: [ObjectOverwrite(2, 3, 0, 0, 'ppp-foo')]},
missing_value='',
)
adj_array.update_labels(lambda x: x + '-foo')
# Check that the mapped AdjustedArray has the expected baseline
# values and adjustment values.
check_arrays(adj_array.data, expected_adj_array.data)
self.assertEqual(adj_array.adjustments, expected_adj_array.adjustments)
A = Float64Multiply(0, 4, 1, 1, 0.5)
B = Float64Overwrite(3, 3, 4, 4, 4.2)
C = Float64Multiply(0, 2, 0, 0, 0.14)
D = Float64Overwrite(0, 3, 0, 0, 4.0)
E = Float64Overwrite(0, 0, 1, 1, 3.7)
F = Float64Multiply(0, 4, 3, 3, 10.0)
G = Float64Overwrite(5, 5, 4, 4, 1.7)
H = Float64Multiply(0, 4, 2, 2, 0.99)
S = Float64Multiply(0, 1, 4, 4, 5.06)
@parameterized.expand([(
# Initial adjustments
{
1: [A, B],
2: [C],
4: [D],
},
# Adjustments to add
{
1: [E],
2: [F, G],
3: [H, S],
},
# Expected adjustments with 'append'
{
1: [A, B, E],
2: [C, F, G],
3: [H, S],
4: [D],
},
# Expected adjustments with 'prepend'
{
1: [E, A, B],
2: [F, G, C],
3: [H, S],
4: [D],
},
)])
def test_update_adjustments(self,
initial_adjustments,
adjustments_to_add,
expected_adjustments_with_append,
expected_adjustments_with_prepend):
methods = ['append', 'prepend']
expected_outputs = [
expected_adjustments_with_append, expected_adjustments_with_prepend
]
for method, expected_output in zip(methods, expected_outputs):
data = arange(30, dtype=float).reshape(6, 5)
adjusted_array = AdjustedArray(
data, initial_adjustments, float('nan')
)
adjusted_array.update_adjustments(adjustments_to_add, method)
self.assertEqual(adjusted_array.adjustments, expected_output)
|
Programming Languages/Python/Theory/100_Python_Challenges/Section_7_Datetime_Module/85. number of days between two given dates.py | jaswinder9051998/Resources | 101 | 12740223 | <reponame>jaswinder9051998/Resources<gh_stars>100-1000
"""
Write a function that calculates the number of days between two given dates.
Input Data:
Date1 = 2011-1-1
Date2 = 2021-1-1'
"""
import datetime
def date_diff(Date1, Date2):
delta = Date2 - Date1
return (delta) |
Utilities/ReferenceImplementations/gru.py | vguerra/swift-apis | 848 | 12740226 | # Computes expected results for `testGRU()` in `Tests/TensorFlowTests/LayerTests.swift`.
# Requires 'tensorflow>=2.0.0a0' (e.g. "pip install tensorflow==2.2.0").
import sys
import numpy
import tensorflow as tf
# Set random seed for repetable results
tf.random.set_seed(0)
def indented(s):
return '\n'.join([' ' + l for l in s.split('\n')])
def swift_tensor(name, tensor):
if hasattr(tensor, 'numpy'):
tensor = tensor.numpy()
def format_float(x):
formatted = numpy.format_float_positional(x, unique=True)
if formatted[-1] == '.':
return formatted + '0'
return formatted
formatter = {
'float_kind': format_float
}
return 'let {} = Tensor<Float>(\n{}\n)'.format(
name,
indented(numpy.array2string(tensor, separator=',', formatter=formatter)))
units = 4
input_dim = 3
input_length = 4
go_backwards = "go_backwards" in sys.argv
# Initialize the keras model with the GRU.
gru = tf.keras.layers.GRU(
input_dim=input_dim,
units=units,
activation="tanh", recurrent_activation="sigmoid",
return_sequences=True, return_state=True,
go_backwards=go_backwards)
x_input = tf.keras.Input(shape=[input_length, input_dim])
initial_state = tf.keras.Input(shape=[units])
initial_state_input = [initial_state]
output = gru(x_input, initial_state=initial_state_input)
model = tf.keras.Model(inputs=[x_input, initial_state_input], outputs=[output])
[kernel, recurrent_kernel, bias] = gru.get_weights()
update_kernel = kernel[:, :units]
update_recurrent_kernel = recurrent_kernel[:, :units]
reset_kernel = kernel[:, units: units * 2]
reset_recurrent_kernel = recurrent_kernel[:, units: units * 2]
new_kernel = kernel[:, units * 2:]
new_recurrent_kernel = recurrent_kernel[:, units * 2:]
update_bias = bias[0][:units]
update_recurrent_bias = bias[1][:units]
reset_bias = bias[0][units: units * 2]
reset_recurrent_bias = bias[1][units: units * 2]
new_bias = bias[0][units * 2:]
new_recurrent_bias = bias[1][units * 2:]
# Print the GRU weights.
print(swift_tensor('updateKernel', update_kernel))
print(swift_tensor('resetKernel', reset_kernel))
print(swift_tensor('outputKernel', new_kernel))
print(swift_tensor('updateRecurrentKernel', update_recurrent_kernel))
print(swift_tensor('resetRecurrentKernel', reset_recurrent_kernel))
print(swift_tensor('outputRecurrentKernel', new_recurrent_kernel))
print(swift_tensor('updateBias', update_bias))
print(swift_tensor('resetBias', reset_bias))
print(swift_tensor('outputBias', new_bias))
print(swift_tensor('updateRecurrentBias', update_recurrent_bias))
print(swift_tensor('resetRecurrentBias', reset_recurrent_bias))
print(swift_tensor('outputRecurrentBias', new_recurrent_bias))
# Initialize input data and print it.
x = tf.keras.initializers.GlorotUniform()(shape=[1, input_length, input_dim])
initial_state = [
tf.keras.initializers.GlorotUniform()(shape=[1, units]),
]
print(swift_tensor('x', x))
print(swift_tensor('initialState', initial_state[0]))
# Run forwards and backwards pass and print the results.
with tf.GradientTape() as tape:
tape.watch(x)
tape.watch(initial_state)
[[states, final_state]] = model([x, initial_state])
sum_output = tf.reduce_sum(states[0][-1])
[grad_model, grad_x, grad_initial_state] = tape.gradient(sum_output, [model.variables, x, initial_state])
[grad_kernel, grad_recurrent_kernel, grad_bias] = grad_model
[grad_initial_state] = grad_initial_state
grad_update_kernel = grad_kernel[:, :units]
grad_update_recurrent_kernel = grad_recurrent_kernel[:, :units]
grad_reset_kernel = grad_kernel[:, units: units * 2]
grad_reset_recurrent_kernel = grad_recurrent_kernel[:, units: units * 2]
grad_new_kernel = grad_kernel[:, units * 2:]
grad_new_recurrent_kernel = grad_recurrent_kernel[:, units * 2:]
grad_update_bias = grad_bias[0][:units]
grad_update_recurrent_bias = grad_bias[1][:units]
grad_reset_bias = grad_bias[0][units: units * 2]
grad_reset_recurrent_bias = grad_bias[1][units: units * 2]
grad_new_bias = grad_bias[0][units * 2:]
grad_new_recurrent_bias = grad_bias[1][units * 2:]
print(swift_tensor('expectedSum', sum_output))
print(swift_tensor('expectedStates', states))
print(swift_tensor('expectedFinalState', final_state))
print(swift_tensor('expectedGradX', grad_x))
print(swift_tensor('expectedGradInitialState', grad_initial_state))
print(swift_tensor('expectedGradUpdateKernel', grad_update_kernel))
print(swift_tensor('expectedGradResetKernel', grad_reset_kernel))
print(swift_tensor('expectedGradOutputKernel', grad_new_kernel))
print(swift_tensor('expectedGradUpdateRecurrentKernel', grad_update_recurrent_kernel))
print(swift_tensor('expectedGradResetRecurrentKernel', grad_reset_recurrent_kernel))
print(swift_tensor('expectedGradOutputRecurrentKernel', grad_new_recurrent_kernel))
print(swift_tensor('expectedGradUpdateBias', grad_update_bias))
print(swift_tensor('expectedGradResetBias', grad_reset_bias))
print(swift_tensor('expectedGradOutputBias', grad_new_bias))
print(swift_tensor('expectedGradUpdateRecurrentBias', grad_update_recurrent_bias))
print(swift_tensor('expectedGradResetRecurrentBias', grad_reset_recurrent_bias))
print(swift_tensor('expectedGradOutputRecurrentBias', grad_new_recurrent_bias))
|
alf/examples/misc_playground_empowerment.py | www2171668/alf | 175 | 12740229 | <filename>alf/examples/misc_playground_empowerment.py<gh_stars>100-1000
# Copyright (c) 2019 Horizon Robotics. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gin
import tensorflow as tf
@gin.configurable
def split_observation_fn(o):
dimo = o.get_shape().as_list()[-1]
assert dimo == 23, ("The dimension does not match.")
task_specific_ob, agent_pose, agent_vel, internal_states, action = tf.split(
o, [3, 6, 6, 6, 2], axis=-1)
return (action, task_specific_ob)
|
library/oci_data_guard_association_facts.py | slmjy/oci-ansible-modules | 106 | 12740245 | <reponame>slmjy/oci-ansible-modules<gh_stars>100-1000
#!/usr/bin/python
# Copyright (c) 2018, Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_data_guard_association_facts
short_description: Fetches details of an OCI Data Guard Association
description:
- Fetches details of an OCI Data Guard Association
version_added: "2.5"
options:
database_id:
description: Identifier of the database whose Data Guard Association
details needs to be fetched
required: false
data_guard_association_id:
description: Identifier of the Data Guard Association whose details needs to be fetched.
required: false
aliases: ['id']
author:
- "<NAME>(@debayan_gupta)"
extends_documentation_fragment: oracle
"""
EXAMPLES = """
# Note: These examples do not set authentication details.
# List all Data Guard Association related to a database
- name: List all Data Guard Association of a Database
oci_data_guard_association_facts:
database_id: 'ocid1.database..abuw'
# List a specific Data Guard Association related to a database
- name: List all Data Guard Association of a Database
oci_data_guard_association_facts:
database_id: 'ocid1.database..abuw'
data_guard_association_id: 'ocid1.dgassociation.abuw'
"""
RETURN = """
data_guard_association:
description: Attributes of the Data Guard Association.
returned: success
type: complex
contains:
apply_lag:
description: The lag time between updates to the primary database and application
of the redo data on the standby database, as computed by the reporting
database.
returned: always
type: string
sample: 9 seconds
apply_rate:
description: The rate at which redo logs are synced between the associated databases.
returned: always
type: string
sample: 17.00 KByte/s
database_id:
description: Identifier of the reporting Database.
returned: always
type: string
sample: ocid1.database.oc1.iad.xxxxxEXAMPLExxxxx
id:
description: Identifier of the Data Guard Association.
returned: always
type: string
sample: ocid1.dgassociation.oc1.iad.xxxxxEXAMPLExxxxx
time_created:
description: Date and time when the Data Guard Association was created, in
the format defined by RFC3339
returned: always
type: datetime
sample: 2016-08-25T21:10:29.600Z
lifecycle_details:
description: Additional information about the current lifecycle_state, if available.
returned: always
type: string
sample: Details of lifecycle state
lifecycle_state:
description: The current state of the Data Guard Association.
returned: always
type: string
sample: AVAILABLE
peer_data_guard_association_id:
description: Identifier of the peer database's Data Guard association.
returned: always
type: string
sample: ocid1.dgassociation.oc1.iad.xxxxxEXAMPLExxxxx
peer_database_id:
description: Identifier of the associated peer database.
returned: always
type: string
sample: ocid1.database.oc1.iad.xxxxxEXAMPLExxxxx
peer_db_system_id:
description: Identifier of the DB System containing the associated peer database.
returned: always
type: string
sample: ocid1.dgassociation.oc1.iad.xxxxxEXAMPLExxxxx
peer_role:
description: The role of the peer database in this Data Guard association.
returned: always
type: string
sample: STANDBY
protection_mode:
description: The protection mode of this Data Guard association.
returned: always
type: string
sample: MAXIMUM_PERFORMANCE
role:
description: The role of the reporting database in this Data Guard Association.
returned: always
type: string
sample: PRIMARY
transport_type:
description: The redo transport type used by this Data Guard Association.
returned: always
type: string
sample: ASYNC
sample: [{
"apply_lag":"7 seconds",
"apply_rate":"15 KByte/s",
"database_id":"ocid1.database.oc1.iad.xxxxxEXAMPLExxxxx",
"id":"ocid1.dgassociation.oc1.iad.xxxxxEXAMPLExxxxx",
"lifecycle_details":null,
"lifecycle_state":"PROVISIONING",
"peer_data_guard_association_id":"ocid1.dgassociation.oc1.iad.xxxxxEXAMPLExxxxx",
"peer_database_id":"ocid1.database.oc1.iad.xxxxxEXAMPLExxxxx",
"peer_db_home_id":"ocid1.dbhome.oc1.iad.xxxxxEXAMPLExxxxx",
"peer_db_system_id":"ocid1.dbsystem.oc1.iad.xxxxxEXAMPLExxxxx",
"peer_role":"STANDBY",
"protection_mode":"MAXIMUM_PERFORMANCE",
"role":"PRIMARY",
"time_created":"2018-03-03T06:55:49.463000+00:00",
"transport_type":"ASYNC"
},
{
"apply_lag":"7 seconds",
"apply_rate":"15 KByte/s",
"database_id":"ocid1.database.oc1.iad.xxxxxEXAMPLExxxxx",
"id":"ocid1.dgassociation.oc1.iad.xxxxxEXAMPLExxxxx",
"lifecycle_details":null,
"lifecycle_state":"PROVISIONING",
"peer_data_guard_association_id":"ocid1.dgassociation.oc1.iad.xxxxxEXAMPLExxxxx",
"peer_database_id":"ocid1.database.oc1.iad.xxxxxEXAMPLExxxxx",
"peer_db_home_id":"ocid1.dbhome.oc1.iad.xxxxxEXAMPLExxxxx",
"peer_db_system_id":"ocid1.dbsystem.oc1.iad.xxxxxEXAMPLExxxxx",
"peer_role":"STANDBY",
"protection_mode":"MAXIMUM_PERFORMANCE",
"role":"PRIMARY",
"time_created":"2018-03-03T06:55:49.463000+00:00",
"transport_type":"ASYNC"
}]
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oracle import oci_utils
try:
from oci.database.database_client import DatabaseClient
from oci.exceptions import ServiceError
from oci.util import to_dict
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
logger = None
def list_data_guard_associations(db_client, module):
result = dict(data_guard_associations="")
database_id = module.params.get("database_id")
data_guard_association_id = module.params.get("data_guard_association_id")
try:
if data_guard_association_id:
get_logger().debug(
"Listing Data Guard Association %s", data_guard_association_id
)
response = oci_utils.call_with_backoff(
db_client.get_data_guard_association,
database_id=database_id,
data_guard_association_id=data_guard_association_id,
)
existing_data_guard_associations = [response.data]
else:
get_logger().debug(
"Listing all Data Guard Association for Database %s", database_id
)
existing_data_guard_associations = oci_utils.list_all_resources(
db_client.list_data_guard_associations, database_id=database_id
)
except ServiceError as ex:
get_logger().error(
"Unable to list Data Guard Associations due to %s", ex.message
)
module.fail_json(msg=ex.message)
result["data_guard_associations"] = to_dict(existing_data_guard_associations)
return result
def set_logger(input_logger):
global logger
logger = input_logger
def get_logger():
return logger
def main():
logger = oci_utils.get_logger("oci_data_guard_association_facts")
set_logger(logger)
module_args = oci_utils.get_common_arg_spec()
module_args.update(
dict(
database_id=dict(type="str", required=True),
data_guard_association_id=dict(type="str", required=False, aliases=["id"]),
)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module")
db_client = oci_utils.create_service_client(module, DatabaseClient)
result = list_data_guard_associations(db_client, module)
module.exit_json(**result)
if __name__ == "__main__":
main()
|
lib/python2.7/site-packages/sklearn/utils/random.py | wfehrnstrom/harmonize | 6,989 | 12740254 | <reponame>wfehrnstrom/harmonize
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribution over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if p is not None:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if p is not None:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if p is not None:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
|
scripts/model_conversion/convert_stylegan.py | Gptao/BasicSR | 1,421 | 12740268 | <reponame>Gptao/BasicSR<filename>scripts/model_conversion/convert_stylegan.py
import torch
from basicsr.models.archs.stylegan2_arch import (StyleGAN2Discriminator,
StyleGAN2Generator)
def convert_net_g(ori_net, crt_net):
"""Convert network generator."""
for crt_k, crt_v in crt_net.items():
if 'style_mlp' in crt_k:
ori_k = crt_k.replace('style_mlp', 'style')
elif 'constant_input.weight' in crt_k:
ori_k = crt_k.replace('constant_input.weight', 'input.input')
# style conv1
elif 'style_conv1.modulated_conv' in crt_k:
ori_k = crt_k.replace('style_conv1.modulated_conv', 'conv1.conv')
elif 'style_conv1' in crt_k:
if crt_v.shape == torch.Size([1]):
ori_k = crt_k.replace('style_conv1', 'conv1.noise')
else:
ori_k = crt_k.replace('style_conv1', 'conv1')
# style conv
elif 'style_convs' in crt_k:
ori_k = crt_k.replace('style_convs',
'convs').replace('modulated_conv', 'conv')
if crt_v.shape == torch.Size([1]):
ori_k = ori_k.replace('.weight', '.noise.weight')
# to_rgb1
elif 'to_rgb1.modulated_conv' in crt_k:
ori_k = crt_k.replace('to_rgb1.modulated_conv', 'to_rgb1.conv')
# to_rgbs
elif 'to_rgbs' in crt_k:
ori_k = crt_k.replace('modulated_conv', 'conv')
elif 'noises' in crt_k:
ori_k = crt_k.replace('.noise', '.noise_')
else:
ori_k = crt_k
# replace
if crt_net[crt_k].size() != ori_net[ori_k].size():
raise ValueError('Wrong tensor size: \n'
f'crt_net: {crt_net[crt_k].size()}\n'
f'ori_net: {ori_net[ori_k].size()}')
else:
crt_net[crt_k] = ori_net[ori_k]
return crt_net
def convert_net_d(ori_net, crt_net):
"""Convert network discriminator."""
for crt_k, crt_v in crt_net.items():
if 'conv_body' in crt_k:
ori_k = crt_k.replace('conv_body', 'convs')
else:
ori_k = crt_k
# replace
if crt_net[crt_k].size() != ori_net[ori_k].size():
raise ValueError('Wrong tensor size: \n'
f'crt_net: {crt_net[crt_k].size()}\n'
f'ori_net: {ori_net[ori_k].size()}')
else:
crt_net[crt_k] = ori_net[ori_k]
return crt_net
if __name__ == '__main__':
"""Convert official stylegan2 weights from stylegan2-pytorch."""
# configuration
ori_net = torch.load('experiments/pretrained_models/stylegan2-ffhq.pth')
save_path_g = 'experiments/pretrained_models/stylegan2_ffhq_config_f_1024_official.pth' # noqa: E501
save_path_d = 'experiments/pretrained_models/stylegan2_ffhq_config_f_1024_discriminator_official.pth' # noqa: E501
out_size = 1024
channel_multiplier = 1
# convert generator
crt_net = StyleGAN2Generator(
out_size,
num_style_feat=512,
num_mlp=8,
channel_multiplier=channel_multiplier)
crt_net = crt_net.state_dict()
crt_net_params_ema = convert_net_g(ori_net['g_ema'], crt_net)
torch.save(
dict(params_ema=crt_net_params_ema, latent_avg=ori_net['latent_avg']),
save_path_g)
# convert discriminator
crt_net = StyleGAN2Discriminator(
out_size, channel_multiplier=channel_multiplier)
crt_net = crt_net.state_dict()
crt_net_params = convert_net_d(ori_net['d'], crt_net)
torch.save(dict(params=crt_net_params), save_path_d)
|
modelling/src/neuraldb/final_scoring_with_dbsize_sweep.py | j6mes/NeuralDB | 213 | 12740290 | <reponame>j6mes/NeuralDB
#
# Copyright (c) 2021 Facebook, Inc. and its affiliates.
#
# This file is part of NeuralDB.
# See https://github.com/facebookresearch/NeuralDB for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import json
from collections import OrderedDict, defaultdict
import numpy as np
import pandas as pd
from neuraldb.evaluation.scoring_functions import f1
from functools import reduce
def load_experiment(path, db_sizes):
running_score = defaultdict(lambda: defaultdict(int))
running_count = defaultdict(lambda: defaultdict(int))
print(path)
with open(path) as f:
for line in f:
instance = json.loads(line)
actual = instance["actual"]
prediction = instance["prediction"]
if "dbsize" not in instance["metadata"]:
db_idx, q_idx = (
instance["metadata"]["database_idx"],
instance["metadata"]["question_idx"],
)
dbsize = db_sizes[(db_idx, q_idx)]
else:
dbsize = instance["metadata"]["dbsize"]
if dbsize == 0:
dbsize = "0"
elif dbsize == 1:
dbsize = "1"
elif dbsize < 5:
dbsize = "2-4"
elif dbsize < 10:
dbsize = "5-9"
elif dbsize < 20:
dbsize = "10-19"
else:
dbsize = "20+"
local_score = f1(set(actual), set(prediction))
# relation = instance["metadata"]["relation"]
# running_score["relation"][relation] += local_score
# running_count["relation"][relation] += 1
qtype = instance["metadata"]["type"]
if qtype in {"argmin", "argmax", "min", "max"}:
qtype = "minmax"
running_score["type"][qtype] += local_score
running_count["type"][qtype] += 1
running_score["size"][dbsize] += local_score
running_count["size"][dbsize] += 1
running_score["all"][""] += local_score
running_count["all"][""] += 1
scores = {}
for k, v in running_score.items():
for attr, val in v.items():
score = (
running_score[k][attr] / running_count[k][attr]
if running_count[k][attr]
else 0
)
print(f"Running score: {k}\t{attr}\t\t{score}")
scores["_".join([k, attr])] = (
running_score[k][attr] / running_count[k][attr]
if running_count[k][attr]
else 0
)
return scores
if __name__ == "__main__":
dbs = ["v2.4_25", "v2.4_50", "v2.4_100", "v2.4_250", "v2.4_500", "v2.4_1000"]
all_dbs = {}
for file in dbs:
master_file = f"resources/{file}/test.jsonl"
db_sizes = dict()
with open(master_file) as f:
for db_idx, line in enumerate(f):
database = json.loads(line)
for q_idx, query in enumerate(database["queries"]):
db_sizes[(db_idx, q_idx)] = (
len(set(reduce(lambda a, b: a + b, query["facts"])))
if len(query["facts"])
else 0
)
all_dbs[file] = db_sizes
ndb_predictions = glob.glob(
"consolidated/work/*/**/predictions.jsonl", recursive=True
)
all_experiments = []
for prediction in ndb_predictions:
experiment = OrderedDict()
for element in prediction.split("/"):
if "," in element:
for kvp in element.split(","):
k, v = kvp.split("=", maxsplit=1)
experiment[k] = v
elif "-" in element:
for kvp in element.split(","):
k, v = kvp.split("-", maxsplit=1)
experiment[k] = v
experiment["dataset"] = prediction.split("/")[2]
experiment["path"] = prediction
if experiment["generator"] == "spj_rand":
experiment["retriever"] = "ssg"
elif "retriever" not in experiment:
experiment["retriever"] = ""
all_experiments.append(experiment)
print("Reading by experiment: \n\n\n")
for expt in all_experiments:
expt.update(load_experiment(expt["path"], all_dbs[expt["dataset"]]))
del expt["path"]
original_frame = pd.DataFrame(all_experiments)
# original_frame[original_frame.select_dtypes(include=['number']).columns] *= 100
pd.set_option("display.width", 1000)
pd.set_option("display.max_columns", None)
aggr = {"all_": [np.mean, np.std]}
pt = pd.pivot_table(
original_frame,
index=["dataset", "model", "generator", "retriever", "lr", "steps"],
aggfunc=aggr,
fill_value=0,
)
frame = pd.DataFrame(pt.to_records())
frame.columns = [
hdr.replace("('all_', '", "all.")
.replace("('size_", "size_")
.replace(", ", ".")
.replace(")", "")
.replace("'", "")
for hdr in frame.columns
]
print(pt)
final_configs = [
["t5", "1e-4", "spj"],
["t5", "1e-4", "spj_rand"],
# ["longformer", "1e-4", "perfectir"],
# ["t5-fid", "1e-4", "perfectir"],
] # ,["t5-fid-max1","1e-4","perfectir"],]
import matplotlib.pyplot as plt
plt.style.use("ggplot")
fig, ax = plt.subplots(figsize=(5, 3))
all_series = []
all_stds = []
for model, lr, gene in final_configs:
print(model, lr, gene)
series = []
stds = []
for db in dbs:
k = "all"
series.extend(
frame[
(frame.model == model)
& (frame.lr == lr)
& (frame.generator == gene)
& (frame.dataset == db)
][k + ".mean"]
)
stds.extend(
frame[
(frame.model == model)
& (frame.lr == lr)
& (frame.generator == gene)
& (frame.dataset == db)
][k + ".std"]
)
all_series.append(series)
all_stds.append(stds)
final_configs = [
# ["t5", "1e-4", "externalir", "tfidf"],
# ["t5", "1e-4", "externalir", "dpr"],
["t5", "1e-4", "externalir2", "tfidf"],
["t5", "1e-4", "externalir2", "dpr"],
]
for model, lr, gene, retr in final_configs:
print(model, lr, gene)
series = []
stds = []
for db in dbs:
k = "all"
print(
frame[
(frame.model == model)
& (frame.lr == lr)
& (frame.generator == gene)
& (frame.retriever == retr)
& (frame.dataset == db)
]
)
series.extend(
frame[
(frame.model == model)
& (frame.lr == lr)
& (frame.generator == gene)
& (frame.retriever == retr)
& (frame.dataset == db)
][k + ".mean"]
)
stds.extend(
frame[
(frame.model == model)
& (frame.lr == lr)
& (frame.generator == gene)
& (frame.retriever == retr)
& (frame.dataset == db)
][k + ".std"]
)
if len(series) > 6:
all_series.append(series[1:])
all_stds.append(stds[1:])
else:
all_series.append(series)
all_stds.append(stds)
for series, stds in zip(all_series, all_stds):
print(series)
ax.plot(series)
ax.fill_between(
range(len(series)),
[min(1, s + i) for (s, i) in zip(series, stds)],
[s - i for (s, i) in zip(series, stds)],
alpha=0.4,
)
plt.xticks(range(len(dbs)), labels=[k.replace("v2.4_", "") for k in dbs])
plt.xlabel("Number of facts in DB")
plt.ylabel("Answer Accuracy")
plt.legend(
[
"SPJ PerfectIR",
"SSG+SPJ",
"T5 + TF-IDF",
"T5 + DPR",
], # "T5 FiD", "TF-IDF", "DPR"])
loc="lower left",
fontsize="x-small",
)
# plt.tight_layout()
# plt.show()
plt.savefig("ssg_dbsize.pdf", bbox_inches="tight")
|
CondFormats/PCLConfig/test/AlignPCLThresholdsWriter_cfg.py | ckamtsikis/cmssw | 852 | 12740300 | import FWCore.ParameterSet.Config as cms
import copy
process = cms.Process("ProcessOne")
##
## MessageLogger
##
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.cerr.enable = False
process.MessageLogger.AlignPCLThresholdsWriter=dict()
process.MessageLogger.AlignPCLThresholds=dict()
process.MessageLogger.cout = cms.untracked.PSet(
enable = cms.untracked.bool(True),
enableStatistics = cms.untracked.bool(True),
threshold = cms.untracked.string("INFO"),
default = cms.untracked.PSet(limit = cms.untracked.int32(0)),
FwkReport = cms.untracked.PSet(limit = cms.untracked.int32(-1),
reportEvery = cms.untracked.int32(1000)
),
AlignPCLThresholdsWriter = cms.untracked.PSet( limit = cms.untracked.int32(-1)),
AlignPCLThresholds = cms.untracked.PSet( limit = cms.untracked.int32(-1))
)
##
## Empty source
##
process.source = cms.Source("EmptyIOVSource",
timetype = cms.string('runnumber'),
firstValue = cms.uint64(1),
lastValue = cms.uint64(1),
interval = cms.uint64(1)
)
##
## Database output service
##
process.load("CondCore.CondDB.CondDB_cfi")
##
## Output database (in this case local sqlite file)
##
process.CondDB.connect = 'sqlite_file:mythresholds.db'
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDB,
timetype = cms.untracked.string('runnumber'),
toPut = cms.VPSet(cms.PSet(record = cms.string('AlignPCLThresholdsRcd'),
tag = cms.string('PCLThresholds_express_v0')
)
)
)
##
## Impot the thresholds configuration
##
import CondFormats.PCLConfig.Thresholds_cff as Thresholds
##
## Example on how to add to the default extra degrees of freedom
##
AddSurfaceThresholds = copy.deepcopy(Thresholds.default)
BPixSurface= cms.VPSet(
cms.PSet(alignableId = cms.string("TPBModule"),
DOF = cms.string("Surface1"),
cut = cms.double(0.1),
sigCut = cms.double(0.1),
maxMoveCut = cms.double(0.1),
maxErrorCut = cms.double(10.0)
)
)
DefaultPlusSurface = AddSurfaceThresholds+BPixSurface
#print DefaultPlusSurface.dumpPython()
process.WriteInDB = cms.EDAnalyzer("AlignPCLThresholdsWriter",
record= cms.string('AlignPCLThresholdsRcd'),
### minimum number of records found in pede output
minNRecords = cms.uint32(25000),
#thresholds = cms.VPSet() # empty object
#thresholds = DefaultPlusSurface # add extra deegree of freedom
thresholds = Thresholds.default # as a cms.VPset
)
process.p = cms.Path(process.WriteInDB)
|
pycwr/GraphicalInterface/RadarInterface.py | 1271756664/study | 144 | 12740325 | # -*- coding: utf-8 -*-
"""
Module implementing MainWindow.
"""
import os
from .RadarUI import Ui_MainWindow
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5 import QtWidgets
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtWidgets import QMainWindow
from ..io import read_auto
from ..io.util import radar_format
from ..draw.SingleRadarPlot import RadarGraph
from ..draw.SingleRadarPlotMap import RadarGraphMap
from ..configure.location_config import last_open_dir
from glob import glob
import json
import sys
from .station_info import Ui_Dialog
from ..draw.VerticalSectionPlot import VerticalSection
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
field_name = ["dBZ", "V", "W", "ZDR", "KDP", "CC"]
class LineBuilder:
def __init__(self, fig, ax, radar_data, product, map_bool):
self.ax = ax
self.xs = []
self.ys = []
self.fig = fig
self.map = map_bool
self.cid = self.fig.canvas.mpl_connect('button_press_event', self)
self.cursor = self.fig.canvas.mpl_connect('motion_notify_event', self.mouse_move)
self.radar_dat = radar_data
self.product = product
def __call__(self, event):
if len(self.xs) < 2:
self.xs.append(event.xdata)
self.ys.append(event.ydata)
if len(self.xs) == 1:
self.start = self.ax.scatter(event.xdata, event.ydata, color="r", marker="+", s=60,
zorder=len(self.xs)+10)
else:
self.end = self.ax.scatter(event.xdata, event.ydata, color="r", marker="+", s=60,
zorder=len(self.xs) + 10)
self.rline = self.ax.plot(self.xs, self.ys, color="r", linewidth=1, zorder=13)
cv = FigureCanvas(Figure(figsize=(8, 6)))
ax = cv.figure.add_axes([0.1, 0.3, 0.8, 0.6])
cax = cv.figure.add_axes([0.1, 0.1, 0.8, 0.06])
if not self.map:
VerticalSection.GUI_section(cv.figure, ax, cax, self.radar_dat, [self.xs[0]*1000, self.ys[0]*1000],\
[self.xs[1]*1000, self.ys[1]*1000], field_name[self.product])
else:
VerticalSection.GUI_section_map(cv.figure, ax, cax, self.radar_dat,
[self.xs[0], self.ys[0]], \
[self.xs[1], self.ys[1]], field_name[self.product])
cv.show()
self.fig.canvas.draw()
else:
self.rline[0].remove()
self.start.remove()
self.end.remove()
self.xs = []
self.ys = []
self.xs.append(event.xdata)
self.ys.append(event.ydata)
self.start = self.ax.scatter(event.xdata, event.ydata, color="r", marker="+", s=60,
zorder=len(self.xs) + 10)
self.fig.canvas.draw()
def mouse_move(self, event):
try:
self.move_line[0].remove()
except Exception:
pass
if len(self.xs) == 1:
self.move_line = self.ax.plot([self.xs[0], event.xdata], [self.ys[0], event.ydata], color="r",
linewidth=1, linestyle="--", zorder=100)
self.fig.canvas.draw()
class Dialog(QDialog, Ui_Dialog):
"""
Class documentation goes here.
"""
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent widget
@type QWidget
"""
super(Dialog, self).__init__(parent)
self.setupUi(self)
@pyqtSlot()
def on_pushButton_clicked(self):
"""
Slot documentation goes here.
"""
self.lon = float(self.lineEdit.text())
self.lat = float(self.lineEdit_2.text())
self.height = float(self.lineEdit_3.text())
self.close()
@pyqtSlot()
def on_pushButton_2_clicked(self):
"""
Slot documentation goes here.
"""
self.close()
@pyqtSlot()
def on_toolButton_clicked(self):
"""
Slot documentation goes here.
"""
lon, LonTrue = QInputDialog.getDouble(self, r"经度", "雷达站点经度(单位:度)", 131.3, -180, 180)
if LonTrue:
self.lineEdit.setText(str(lon))
@pyqtSlot()
def on_toolButton_2_clicked(self):
"""
Slot documentation goes here.
"""
# TODO: not implemented yet
lat, LatTrue = QInputDialog.getDouble(self, r"纬度", "雷达站点纬度(单位:度)", 23, -90, 90)
if LatTrue:
self.lineEdit.setText(str(lat))
@pyqtSlot()
def on_toolButton_3_clicked(self):
"""
Slot documentation goes here.
"""
# TODO: not implemented yet
height, HeightTrue = QInputDialog.getDouble(self, r"高度", "雷达站点高度(单位:米)", 57, -2000, 5000)
if HeightTrue:
self.lineEdit.setText(str(height))
class MainWindow(QMainWindow, Ui_MainWindow):
"""
Class documentation goes here.
"""
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent widget
@type QWidget
"""
super(MainWindow, self).__init__(parent)
self.setupUi(self)
self.lastOpenDir = self.open_last_opendir()
self.radar_dat = None
self.dualPOL = False
self.openbasename = None
self.files = None
self.radar_type = None
self.org_lat = 131.3
self.org_lon = 23
self.org_height = 57
def open_last_opendir(self):
"""打开上次关闭文件的位置"""
with open(last_open_dir, "r") as f:
dir_dict = json.load(f)
return dir_dict["lastOpenDir"]
def write_last_opendir(self, filedir):
"""将打开的位置写入json文件中"""
with open(last_open_dir, "w") as f:
json.dump({"lastOpenDir":filedir}, f)
@pyqtSlot()
def on_actionvertical_changed(self):
"""垂直剖面的绘制"""
if self.actionvertical.isChecked():
try:
self.linebuilder = LineBuilder(self.fig, self.ax, self.radar_dat, self.find_var_in_groupBox(),\
self.actionwithmap.isChecked())
self.clickevent = True
except AttributeError:
pass
else:
self.fig.canvas.mpl_disconnect(self.linebuilder.cid)
self.fig.canvas.mpl_disconnect(self.linebuilder.cursor)
self.linebuilder.rline[0].remove()
self.linebuilder.start.remove()
self.linebuilder.end.remove()
self.fig.canvas.draw()
@pyqtSlot()
def on_actionwithmap_changed(self):
"""
Slot documentation goes here.
"""
pass
@pyqtSlot()
def on_actioncontinuous_changed(self):
"""
Slot documentation goes here.
"""
pass
def Read_radar(self, filename):
if radar_format(filename) is not None:
NRadar = read_auto(filename)
self.org_lat = NRadar.scan_info.latitude.values
self.org_lon = NRadar.scan_info.longitude.values
self.org_height = NRadar.scan_info.altitude.values
if "KDP" in NRadar.fields[0].keys():
self.open_dual()
else:
self.close_non_dual()
return NRadar
else:
QMessageBox.warning(self, "数据错误警告", "非SA/SB/CA/CB/98D/CC/CCJ/SC/CD数据",
QMessageBox.Yes)
return 0
def close_non_dual(self):
"""关闭非双偏振雷达变量"""
self.radioButton_13.hide()
self.radioButton_14.hide()
self.radioButton_15.hide()
def open_dual(self):
"""关闭非双偏振雷达变量"""
self.radioButton_13.show()
self.radioButton_14.show()
self.radioButton_15.show()
def setSelected(self, filename):
"""将选中数据高亮"""
basename = os.path.basename(filename)
self.openbasename = basename
items = self.listWidget.findItems(basename, Qt.MatchExactly)
if len(items) > 0:
for item in items:
self.listWidget.setCurrentItem(item)
def import_basedat(self, direc):
"""查找文件夹中的所有雷达文件名,并以list返回"""
self.lastOpenDir = direc
self.write_last_opendir(direc)
extensions = ["*.*A", "*.*V", "*.bz2", "*.bin",
"*.AR2", "*.gz", ".GZ"]
files = []
for iextend in extensions:
file = glob(os.path.join(direc, iextend))
files.extend(file)
return [os.path.basename(ifile) for ifile in files]
def add_listwidget(self, files):
"""将files添加到listWidget"""
self.listWidget.clear()
for item in files:
self.listWidget.addItem(item)
@pyqtSlot(QListWidgetItem)
def on_listWidget_itemDoubleClicked(self, item):
"""
Slot documentation goes here.
@param item DESCRIPTION
@type QListWidgetItem
"""
filename = self.lastOpenDir + os.sep + item.text()
self.radar_dat = self.Read_radar(filename)
if self.radar_dat != 0:
self.setSelected(filename)
self.plot_graph_PPI(self.radar_dat, self.find_level_in_groupBox(), self.find_var_in_groupBox(),
self.actionwithmap.isChecked(), self.actioncontinuous.isChecked())
@pyqtSlot()
def on_actionopen_2_triggered(self):
"""
Slot documentation goes here.
"""
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
defaultOpenDirPath = self.lastOpenDir
else:
defaultOpenDirPath = '.'
filename = QFileDialog.getOpenFileName(self, "打开一个雷达基数据", defaultOpenDirPath,
"天气雷达基数据(*bin *bz2 *A *V *BIN *BZ2 *AR2 *GZ *gz)")
ReadFile = filename[0]
if ReadFile.strip() == "":
return
PathDir = os.path.dirname(ReadFile)
self.files = self.import_basedat(PathDir)
self.add_listwidget(self.files)
self.radar_dat = self.Read_radar(ReadFile)
if self.radar_dat != 0:
self.setSelected(ReadFile)
self.plot_graph_PPI(self.radar_dat, self.find_level_in_groupBox(), self.find_var_in_groupBox(),
self.actionwithmap.isChecked(), self.actioncontinuous.isChecked())
@pyqtSlot()
def on_actionopendir_2_triggered(self):
"""
Slot documentation goes here.
"""
if self.lastOpenDir and os.path.exists(self.lastOpenDir):
defaultOpenDirPath = self.lastOpenDir
else:
defaultOpenDirPath = '.'
self.targetDirPath = QFileDialog.getExistingDirectory(self, "打开新一代天气雷达数据文件夹",
defaultOpenDirPath,
QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks)
if self.targetDirPath.strip() == '':
return
self.files = self.import_basedat(self.targetDirPath)
self.add_listwidget(self.files)
@pyqtSlot()
def on_actionquit_2_triggered(self):
"""
Slot documentation goes here.
"""
sys.exit(0)
@pyqtSlot()
def on_actionstation_triggered(self):
"""
Slot documentation goes here.
"""
self.my_info = Dialog()
self.my_info.lineEdit.setText(str(self.org_lon))
self.my_info.lineEdit_2.setText(str(self.org_lat))
self.my_info.lineEdit_3.setText(str(self.org_height))
self.my_info.lat = self.org_lat
self.my_info.lon = self.org_lon
self.my_info.height = self.org_height
self.my_info.exec_()
self.org_lat = self.my_info.lat
self.org_lon = self.my_info.lon
self.org_height = self.my_info.height
def find_checked_radiobutton(self, radiobuttons):
''' find the checked radiobutton '''
for items in radiobuttons:
if items.isChecked():
checked_radiobutton = items.text()
return checked_radiobutton
def find_level_in_groupBox(self):
"""查找仰角"""
level = self.find_checked_radiobutton(self.groupBox.findChildren(QtWidgets.QRadioButton))
levels = ["第1层", "第2层", "第3层",
"第4层", "第5层", "第6层",
"第7层", "第8层", "第9层"]
for i in range(9):
if level == levels[i]:
return i
return 0
def find_var_in_groupBox(self):
"""查找变量"""
var = self.find_checked_radiobutton(self.groupBox_2.findChildren(QtWidgets.QRadioButton))
vars = ["反射率因子", "径向速度", "谱宽", "差分反射率", "差分相位比", "相关系数"]
for i in range(6):
if var == vars[i]:
return i
return 0
def plot_graph_PPI(self, radar, level, product, map, continuously):
self.MplWidget.canvas.update()
self.MplWidget.canvas.flush_events()
try:
self.fig.clf()
self.ax.clear()
self.cax.clear()
except AttributeError:
pass
if not map:
self.fig, self.ax, self.cax = self.MplWidget.canvas.get_fig_ax()
self.ax.set_facecolor((0.95, 0.95, 0.95))
self.pm = RadarGraph.GUI_plot(radar, self.fig, self.ax, self.cax, level, \
field_name[product], continuously=continuously)
else:
self.fig, self.ax, self.cax = self.MplWidget.canvas.get_fig_ax_map()
self.ax.set_facecolor((0.95, 0.95, 0.95))
self.pm = RadarGraphMap.GUI_plot(radar, self.fig, self.ax, self.cax, level, \
field_name[product], continuously=continuously)
self.ax.tick_params(axis="y", which="both", direction='in')
self.ax.tick_params(axis="x", which="both", direction='in')
self.MplWidget.canvas.draw()
if self.actionvertical.isChecked(): #尝试重新绑定
try:
self.fig.canvas.mpl_disconnect(self.linebuilder.cid)
self.fig.canvas.mpl_disconnect(self.linebuilder.cursor)
self.linebuilder = LineBuilder(self.fig, self.ax, self.radar_dat, self.find_var_in_groupBox(), \
self.actionwithmap.isChecked())
self.clickevent = True
except AttributeError:
pass
@pyqtSlot()
def on_pushButton_clicked(self):
"""
Slot documentation goes here.
"""
if self.files is not None:
items = self.listWidget.findItems(self.openbasename, Qt.MatchExactly)
row = self.listWidget.row(items[0])
nrows = len(self.files)
res_row = row - 1
if res_row < 0:
res_row = nrows - 1
self.radar_dat = self.Read_radar(self.lastOpenDir + os.sep + self.files[res_row])
if self.radar_dat != 0:
self.setSelected(self.lastOpenDir + os.sep + self.files[res_row])
self.plot_graph_PPI(self.radar_dat, self.find_level_in_groupBox(), self.find_var_in_groupBox(),
self.actionwithmap.isChecked(), self.actioncontinuous.isChecked())
@pyqtSlot()
def on_pushButton_2_clicked(self):
"""
Slot documentation goes here.
动画播放部分
"""
if self.files is not None:
items = self.listWidget.findItems(self.openbasename, Qt.MatchExactly)
row = self.listWidget.row(items[0])
nrows = len(self.files)
for irow in range(row, nrows):
self.radar_dat = self.Read_radar(os.path.join(self.lastOpenDir, self.files[irow]))
if self.radar_dat != 0:
self.setSelected(self.lastOpenDir + os.sep + self.files[irow])
self.plot_graph_PPI(self.radar_dat, self.find_level_in_groupBox(), self.find_var_in_groupBox(),
self.actionwithmap.isChecked(), self.actioncontinuous.isChecked())
@pyqtSlot()
def on_pushButton_3_clicked(self):
"""
Slot documentation goes here.
"""
if self.files is not None:
items = self.listWidget.findItems(self.openbasename, Qt.MatchExactly)
row = self.listWidget.row(items[0])
nrows = len(self.files)
res_row = row + 1
if res_row == nrows:
res_row = 0
self.radar_dat = self.Read_radar(self.lastOpenDir + os.sep + self.files[res_row])
if self.radar_dat != 0:
self.setSelected(self.lastOpenDir + os.sep + self.files[res_row])
self.plot_graph_PPI(self.radar_dat, self.find_level_in_groupBox(), self.find_var_in_groupBox(),
self.actionwithmap.isChecked(), self.actioncontinuous.isChecked())
@pyqtSlot()
def on_radioButton_15_clicked(self):
"""
Slot documentation goes here.
"""
if self.radar_dat is not None:
self.plot_graph_PPI(self.radar_dat, self.find_level_in_groupBox(), self.find_var_in_groupBox(),
self.actionwithmap.isChecked(), self.actioncontinuous.isChecked())
@pyqtSlot()
def on_radioButton_12_clicked(self):
"""
Slot documentation goes here.
"""
if self.radar_dat is not None:
self.plot_graph_PPI(self.radar_dat, self.find_level_in_groupBox(), self.find_var_in_groupBox(),
self.actionwithmap.isChecked(), self.actioncontinuous.isChecked())
@pyqtSlot()
def on_radioButton_14_clicked(self):
"""
Slot documentation goes here.
"""
if self.radar_dat is not None:
self.plot_graph_PPI(self.radar_dat, self.find_level_in_groupBox(), self.find_var_in_groupBox(),
self.actionwithmap.isChecked(), self.actioncontinuous.isChecked())
@pyqtSlot()
def on_radioButton_10_clicked(self):
"""
Slot documentation goes here.
"""
if self.radar_dat is not None:
self.plot_graph_PPI(self.radar_dat, self.find_level_in_groupBox(), self.find_var_in_groupBox(),
self.actionwithmap.isChecked(), self.actioncontinuous.isChecked())
@pyqtSlot()
def on_radioButton_13_clicked(self):
"""
Slot documentation goes here.
"""
if self.radar_dat is not None:
self.plot_graph_PPI(self.radar_dat, self.find_level_in_groupBox(), self.find_var_in_groupBox(),
self.actionwithmap.isChecked(), self.actioncontinuous.isChecked())
@pyqtSlot()
def on_radioButton_11_clicked(self):
"""
Slot documentation goes here.
"""
if self.radar_dat is not None:
self.plot_graph_PPI(self.radar_dat, self.find_level_in_groupBox(), self.find_var_in_groupBox(),
self.actionwithmap.isChecked(), self.actioncontinuous.isChecked())
@pyqtSlot()
def on_radioButton_2_clicked(self):
"""
Slot documentation goes here.
"""
if self.radar_dat is not None:
self.plot_graph_PPI(self.radar_dat, self.find_level_in_groupBox(), self.find_var_in_groupBox(),
self.actionwithmap.isChecked(), self.actioncontinuous.isChecked())
@pyqtSlot()
def on_radioButton_4_clicked(self):
"""
Slot documentation goes here.
"""
if self.radar_dat is not None:
self.plot_graph_PPI(self.radar_dat, self.find_level_in_groupBox(), self.find_var_in_groupBox(),
self.actionwithmap.isChecked(), self.actioncontinuous.isChecked())
@pyqtSlot()
def on_radioButton_5_clicked(self):
"""
Slot documentation goes here.
"""
if self.radar_dat is not None:
self.plot_graph_PPI(self.radar_dat, self.find_level_in_groupBox(), self.find_var_in_groupBox(),
self.actionwithmap.isChecked(), self.actioncontinuous.isChecked())
@pyqtSlot()
def on_radioButton_3_clicked(self):
"""
Slot documentation goes here.
"""
if self.radar_dat is not None:
self.plot_graph_PPI(self.radar_dat, self.find_level_in_groupBox(), self.find_var_in_groupBox(),
self.actionwithmap.isChecked(), self.actioncontinuous.isChecked())
@pyqtSlot()
def on_radioButton_1_clicked(self):
"""
Slot documentation goes here.
"""
if self.radar_dat is not None:
self.plot_graph_PPI(self.radar_dat, self.find_level_in_groupBox(), self.find_var_in_groupBox(),
self.actionwithmap.isChecked(), self.actioncontinuous.isChecked())
@pyqtSlot()
def on_radioButton_7_clicked(self):
"""
Slot documentation goes here.
"""
if self.radar_dat is not None:
self.plot_graph_PPI(self.radar_dat, self.find_level_in_groupBox(), self.find_var_in_groupBox(),
self.actionwithmap.isChecked(), self.actioncontinuous.isChecked())
@pyqtSlot()
def on_radioButton_8_clicked(self):
"""
Slot documentation goes here.
"""
if self.radar_dat is not None:
self.plot_graph_PPI(self.radar_dat, self.find_level_in_groupBox(), self.find_var_in_groupBox(),
self.actionwithmap.isChecked(), self.actioncontinuous.isChecked())
@pyqtSlot()
def on_radioButton_6_clicked(self):
"""
Slot documentation goes here.
"""
if self.radar_dat is not None:
self.plot_graph_PPI(self.radar_dat, self.find_level_in_groupBox(), self.find_var_in_groupBox(),
self.actionwithmap.isChecked(), self.actioncontinuous.isChecked())
@pyqtSlot()
def on_radioButton_9_clicked(self):
"""
Slot documentation goes here.
"""
if self.radar_dat is not None:
self.plot_graph_PPI(self.radar_dat, self.find_level_in_groupBox(), self.find_var_in_groupBox(),
self.actionwithmap.isChecked(), self.actioncontinuous.isChecked())
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
ui = MainWindow()
ui.show()
sys.exit(app.exec_())
|
lixian_plugins/commands/get_torrent.py | deadblue/xunlei-lixian | 722 | 12740347 | <gh_stars>100-1000
from lixian_plugins.api import command
from lixian_cli_parser import command_line_parser, command_line_option
from lixian_cli_parser import with_parser
from lixian_cli import parse_login
from lixian_commands.util import create_client
@command(name='get-torrent', usage='get .torrent by task id or info hash')
@command_line_parser()
@with_parser(parse_login)
@command_line_option('rename', default=True)
def get_torrent(args):
'''
usage: lx get-torrent [info-hash|task-id]...
'''
client = create_client(args)
for id in args:
id = id.lower()
import re
if re.match(r'[a-fA-F0-9]{40}$', id):
torrent = client.get_torrent_file_by_info_hash(id)
elif re.match(r'\d+$', id):
import lixian_query
task = lixian_query.get_task_by_id(client, id)
id = task['bt_hash']
id = id.lower()
torrent = client.get_torrent_file_by_info_hash(id)
else:
raise NotImplementedError()
if args.rename:
import lixian_hash_bt
from lixian_encoding import default_encoding
info = lixian_hash_bt.bdecode(torrent)['info']
name = info['name'].decode(info.get('encoding', 'utf-8')).encode(default_encoding)
import re
name = re.sub(r'[\\/:*?"<>|]', '-', name)
else:
name = id
path = name + '.torrent'
print path
with open(path, 'wb') as output:
output.write(torrent)
|
gdal/examples/pydrivers/ogr_DUMMY.py | jpapadakis/gdal | 3,100 | 12740379 | <reponame>jpapadakis/gdal
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This code is in the public domain, so as to serve as a template for
# real-world plugins.
# or, at the choice of the licensee,
# Copyright 2019 <NAME>
# SPDX-License-Identifier: MIT
# Metadata parsed by GDAL C++ code at driver pre-loading, starting with '# gdal: '
# Required and with that exact syntax since it is parsed by non-Python
# aware code. So just literal values, no expressions, etc.
# gdal: DRIVER_NAME = "DUMMY"
# API version(s) supported. Must include 1 currently
# gdal: DRIVER_SUPPORTED_API_VERSION = [1]
# gdal: DRIVER_DCAP_VECTOR = "YES"
# gdal: DRIVER_DMD_LONGNAME = "my super plugin"
# Optional driver metadata items.
# # gdal: DRIVER_DMD_EXTENSIONS = "ext1 est2"
# # gdal: DRIVER_DMD_HELPTOPIC = "http://example.com/my_help.html"
try:
# The gdal_python_driver module is defined by the GDAL library at runtime
from gdal_python_driver import BaseDriver, BaseDataset, BaseLayer
except ImportError:
# To be able to run in standalone mode
class BaseDriver(object):
pass
class BaseDataset(object):
pass
class BaseLayer(object):
pass
class Layer(BaseLayer):
def __init__(self):
# Reserved attribute names. Either those or the corresponding method
# must be defined
self.name = 'my_layer' # Required, or name() method
self.fid_name = 'my_fid' # Optional
self.fields = [{'name': 'boolField', 'type': 'Boolean'},
{'name': 'int16Field', 'type': 'Integer16'},
{'name': 'int32Field', 'type': 'Integer'},
{'name': 'int64Field', 'type': 'Integer64'},
{'name': 'realField', 'type': 'Real'},
{'name': 'floatField', 'type': 'Float'},
{'name': 'strField', 'type': 'String'},
{'name': 'strNullField', 'type': 'String'},
{'name': 'strUnsetField', 'type': 'String'},
{'name': 'binaryField', 'type': 'Binary'},
{'name': 'timeField', 'type': 'Time'},
{'name': 'dateField', 'type': 'Date'},
{'name': 'datetimeField', 'type': 'DateTime'}] # Required, or fields() method
self.geometry_fields = [{'name': 'geomField',
'type': 'Point', # optional
'srs': 'EPSG:4326' # optional
}] # Required, or geometry_fields() method
self.metadata = {'foo': 'bar'} # optional
# uncomment if __iter__() honour self.attribute_filter
#self.iterator_honour_attribute_filter = True
# uncomment if __iter__() honour self.spatial_filter
#self.iterator_honour_spatial_filter = True
# uncomment if feature_count() honour self.attribute_filter
#self.feature_count_honour_attribute_filter = True
# uncomment if feature_count() honour self.spatial_filter
#self.feature_count_honour_spatial_filter = True
# End of reserved attribute names
self.count = 5
# Required, unless self.name attribute is defined
# def name(self):
# return 'my_layer'
# Optional. If not defined, fid name is 'fid'
# def fid_name(self):
# return 'my_fid'
# Required, unless self.geometry_fields attribute is defined
# def geometry_fields(self):
# return [...]
# Required, unless self.required attribute is defined
# def fields(self):
# return [...]
# Optional. Only to be usd if self.metadata field is not defined
# def metadata(self, domain):
# if domain is None:
# return {'foo': 'bar'}
# return None
# Optional. Called when self.attribute_filter is changed by GDAL
# def attribute_filter_changed(self):
# # You may change self.iterator_honour_attribute_filter
# # or feature_count_honour_attribute_filter
# pass
# Optional. Called when self.spatial_filter is changed by GDAL
# def spatial_filter_changed(self):
# # You may change self.iterator_honour_spatial_filter
# # or feature_count_honour_spatial_filter
# pass
# Optional
def test_capability(self, cap):
if cap == BaseLayer.FastGetExtent:
return True
if cap == BaseLayer.StringsAsUTF8:
return True
# if cap == BaseLayer.FastSpatialFilter:
# return False
# if cap == BaseLayer.RandomRead:
# return False
if cap == BaseLayer.FastFeatureCount:
return self.attribute_filter is None and self.spatial_filter is None
return False
# Optional
def extent(self, force_computation):
return [2.1, 49, 3, 50] # minx, miny, maxx, maxy
# Optional.
def feature_count(self, force_computation):
# As we did not declare feature_count_honour_attribute_filter and
# feature_count_honour_spatial_filter, the below case cannot happen
# But this is to illustrate that you can callback the default implementation
# if needed
# if self.attribute_filter is not None or \
# self.spatial_filter is not None:
# return super(Layer, self).feature_count(force_computation)
return self.count
# Required. You do not need to handle the case of simultaneous iterators on
# the same Layer object.
def __iter__(self):
for i in range(self.count):
properties = {
'boolField': True,
'int16Field': 32767,
'int32Field': i + 2,
'int64Field': 1234567890123,
'realField': 1.23,
'floatField': 1.2,
'strField': 'foo',
'strNullField': None,
'binaryField': b'\x01\x00\x02',
'timeField': '12:34:56.789',
'dateField': '2017-04-26',
'datetimeField': '2017-04-26T12:34:56.789Z'}
yield {"type": "OGRFeature",
"id": i + 1,
"fields": properties,
"geometry_fields": {"geomField": "POINT(2 49)"},
"style": "SYMBOL(a:0)" if i % 2 == 0 else None,
}
# Optional
# def feature_by_id(self, fid):
# return {}
class Dataset(BaseDataset):
# Optional, but implementations will generally need it
def __init__(self, filename):
# If the layers member is set, layer_count() and layer() will not be used
self.layers = [Layer()]
self.metadata = {'foo': 'bar'}
# Optional, called on native object destruction
def close(self):
pass
# Optional. Only to be usd if self.metadata field is not defined
# def metadata(self, domain):
# if domain is None:
# return {'foo': 'bar'}
# return None
# Required, unless a layers attribute is set in __init__
# def layer_count(self):
# return len(self.layers)
# Required, unless a layers attribute is set in __init__
# def layer(self, idx):
# return self.layers[idx]
# Required: class deriving from BaseDriver
class Driver(BaseDriver):
# Optional. Called the first time the driver is loaded
def __init__(self):
pass
# Required
def identify(self, filename, first_bytes, open_flags, open_options={}):
return filename == 'DUMMY:'
# Required
def open(self, filename, first_bytes, open_flags, open_options={}):
if not self.identify(filename, first_bytes, open_flags):
return None
return Dataset(filename)
|
modules/post-exploitation/empire.py | decidedlygray/ptf | 4,391 | 12740383 | #!/usr/bin/env python
#####################################
# Installation module for empire
#####################################
# AUTHOR OF MODULE NAME
AUTHOR="<NAME>"
# DESCRIPTION OF THE MODULE
DESCRIPTION="This module will install/update Empire - post exploitation python/powershell for windows and nix/osx"
# INSTALL TYPE GIT, SVN, FILE DOWNLOAD
# OPTIONS = GIT, SVN, FILE
INSTALL_TYPE="GIT"
# LOCATION OF THE FILE OR GIT/SVN REPOSITORY
REPOSITORY_LOCATION="https://github.com/BC-SECURITY/Empire"
# WHERE DO YOU WANT TO INSTALL IT
INSTALL_LOCATION="empire3"
# DEPENDS FOR DEBIAN INSTALLS
DEBIAN="git"
FEDORA="git"
# COMMANDS TO RUN AFTER
AFTER_COMMANDS='cd {INSTALL_LOCATION},echo -e "\n" | ./setup/install.sh'
# DON'T RUN AFTER COMMANDS ON UPDATE
BYPASS_UPDATE="NO"
# LAUNCHER
LAUNCHER="empire"
|
solution/graph_traversal/7569/main.py | jungyoonoh/baekjoon-1 | 2,236 | 12740406 | # Authored by : gusdn3477
# Co-authored by : tony9402
# Link : http://boj.kr/8a53cdacfc6340c894fb47257232f244
import sys
from collections import deque
def input():
return sys.stdin.readline().rstrip()
def checkMap():
for z in range(H):
for i in range(N):
for j in range(M):
if arr[z][i][j] == 0:
return False
return True
def BFS():
while queue:
q = queue.popleft()
z, x, y = q[0]
for i in range(6):
dx = x + nx[i]
dy = y + ny[i]
dz = z + nz[i]
if dx < 0 or dx >= N or dy < 0 or dy >= M or dz < 0 or dz >= H:
continue
if arr[dz][dx][dy] == 0:
arr[dz][dx][dy] = 1
queue.append(((dz,dx,dy), q[1]+1))
if checkMap():
return q[1]
return -1
M, N, H = map(int, input().split())
arr = []
nx = [-1,0,1,0,0,0]
ny = [0,-1,0,1,0,0]
nz = [0,0,0,0,-1,1]
queue = deque()
arr = [ [ list(map(int, input().split())) for _ in range(N) ] for _ in range(H) ]
for z in range(H):
for i in range(N):
for j in range(M):
if arr[z][i][j] == 1:
arr[z][i][j] = 1
queue.append(((z,i,j),0))
ans = BFS()
print(ans)
|
typed_python/compiler/type_wrappers/hash_wrapper.py | APrioriInvestments/typed_python | 105 | 12740419 | # Copyright 2017-2019 typed_python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typed_python.compiler.type_wrappers.wrapper import Wrapper
import typed_python.compiler.native_ast as native_ast
from typed_python import Int32
def tp_hash_to_py_hash(hVal):
"""Convert a typed-python hash to a regular python hash.
Python insists that its hash values are never -1, because it uses -1 as an
indicator that the exception flag is set. TypedPython doesn't have this behavior
because it uses c++ exception propagation internally. As a result, it's the
'hash' wrapper that's responsible for mapping -1 to -2.
"""
if hVal == -1:
return Int32(-2)
return hVal
class HashWrapper(Wrapper):
is_pod = True
is_empty = False
is_pass_by_ref = False
def __init__(self):
super().__init__(hash)
def getNativeLayoutType(self):
return native_ast.Type.Void()
def convert_call(self, context, expr, args, kwargs):
if len(args) == 1 and not kwargs:
hashVal = args[0].convert_hash()
if hashVal is None:
return None
return context.call_py_function(tp_hash_to_py_hash, (hashVal,), {})
return super().convert_call(context, expr, args, kwargs)
|
tests/test_phoneme_conversion.py | jayten42/pororo | 1,137 | 12740426 | <gh_stars>1000+
"""Test Grapheme to Phoneme module"""
import unittest
from pororo import Pororo
class PororoPhonemeConversionTester(unittest.TestCase):
def test_modules(self):
g2pk = Pororo(task="g2p", lang="ko")
g2pk_res = g2pk("어제는 날씨가 맑았는데, 오늘은 흐리다.")
self.assertIsInstance(g2pk_res, str)
g2pen = Pororo(task="g2p", lang="en")
g2pen_res = g2pen("I have $250 in my pocket.")
self.assertIsInstance(g2pen_res, list)
g2pzh = Pororo(task="g2p", lang="zh")
g2pzh_res = g2pzh("然而,他红了20年以后,他竟退出了大家的视线。")
self.assertIsInstance(g2pzh_res, str)
if __name__ == "__main__":
unittest.main()
|
guacozy_server/backend/models/guacdserver.py | yinm8315/guacozy-django-react | 121 | 12740429 | from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
class GuacdServer(models.Model):
class Meta:
verbose_name = "Guacd Server"
verbose_name_plural = "Guacd Servers"
name = models.CharField(max_length=64, blank=False, unique=True,
default="guacd server")
hostname = models.CharField(max_length=64, blank=False,
default="localhost")
port = models.PositiveIntegerField(blank=False, default=4822,
validators=[MinValueValidator(1),
MaxValueValidator(65535)])
def __str__(self):
return self.name
|
python/tvm/autotvm/graph_tuner/utils/traverse_graph.py | XiaoSong9905/tvm | 4,640 | 12740468 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-locals,too-many-statements,too-many-branches,protected-access
"""API for graph traversing."""
import threading
import re
import tvm
from tvm import relay, autotvm
from tvm.relay import transform
from tvm.relay.expr import Call, TupleGetItem, Var, Constant, Tuple
from tvm.relay.function import Function
from tvm.relay.ty import TupleType, TensorType
from tvm.autotvm.task import TaskExtractEnv
from .utils import has_multiple_inputs, is_boundary_node, is_skipped_node
from .._base import OPT_OUT_OP
def expr2graph(expr, target_ops, node_dict, node_list, tvm_target):
"""Convert relay expr to graph data structure
and fetch workloads of target operators.
Parameters
----------
expr : tvm.relay.Expr.Function
Input relay function expression.
target_ops: List of tvm.ir.Op
List of target relay ops
node_dict : dictionary from tvm.relay.Expr to int
Dictionary to record node index
node_list : list of dictionary
List of nodes which contains all expr in the input relay function.
Each node will be stored as a dictionary in the format of
{"op": str, "node": tvm.relay.expr, "inputs": [int], "types": [tvm.relay.Type],
"name": str, "workloads": [tuple], "topi_op": [function]}
tvm_target : tvm.target
The TVM target object.
"""
# TODO(@kevinthesun, @icemelon9): Currently graph tuning pass relies on the fact
# that # autotvm tasks == # ops. But this won't be true after having relay op
# strategy. We need to find a solution to fix this.
env = TaskExtractEnv.get(allow_duplicate=True)
env.reset(target_ops)
# pylint: disable=not-context-manager
with env:
_expr2graph_impl(expr, target_ops, node_dict, node_list, tvm_target)
task_pos = 0
for node_entry in node_list:
if node_entry["op"] in target_ops:
task_name, args = env.task_collection[task_pos]
task = autotvm.task.create(task_name, args, target=tvm_target)
node_entry["workloads"] = [task.workload]
node_entry["topi_op"] = [task_name]
task_pos += 1
def _infer_type(node):
"""A method to infer the type of a relay expression."""
mod = tvm.IRModule.from_expr(node)
mod = transform.InferType()(mod)
entry = mod["main"]
return entry if isinstance(node, relay.Function) else entry.body
def _replace_device_with_tracing(target):
"""This is to replace -device=XXX with -device=tracing in the tvm_target string.
It is a stand-along function for testability.
We need to have device=tracing in order to fetch the workloads, it is not used
for anything beyond that so it is safe to override the device here only."""
target = str(target)
if "-device" in target:
return re.sub("-device=[^\\-$]+", "-device=tracing ", target).strip(" ")
return target + " -device=tracing"
def _expr2graph_impl(expr, target_ops, node_dict, node_list, tvm_target):
"""Implementation to convert relay expr to graph data structure"""
def _traverse_expr(node):
if node in node_dict:
return
node_index = len(node_list)
node_entry = {"node": node, "inputs": [], "types": [], "op": None, "name": None}
if isinstance(node, Call):
op = node.op
node_entry["op"] = node.op
for arg in node.args:
in_node_idx = node_dict[arg]
if isinstance(arg, (Tuple, TupleGetItem)):
node_entry["inputs"] += node_list[in_node_idx]["inputs"]
else:
node_entry["inputs"].append([in_node_idx, 0, 0])
infer_out = _infer_type(node)
out_type = infer_out._checked_type_
if isinstance(out_type, TensorType):
node_entry["types"].append(out_type)
elif isinstance(out_type, TupleType):
for tupe_type in out_type.fields:
node_entry["types"].append(tupe_type)
else:
raise RuntimeError(
"Unsupported output type %s in operator %s" % (type(out_type), op.name)
)
# Utilize tracing target to fetch workload with topo-order.
# Since we only need workload, dummy target can be used to
# create task.
if op in target_ops:
params = []
for i, input_idx in enumerate(node_entry["inputs"]):
input_node_entry = node_list[input_idx[0]]
input_type = input_node_entry["types"][input_idx[1]]
if not isinstance(input_node_entry["node"], (Var, Constant, Call)):
raise RuntimeError(
"Graph tuner can only tune target "
"operators with input node of type "
"relay.expr.Var/Constant/Call. Now "
"find a target op %s with input type %s"
% (op, str(type(input_node_entry["node"])))
)
free_var = relay.Var("var_%d" % i, input_type)
params.append(free_var)
call = relay.Call(node.op, params, node.attrs)
mod = tvm.IRModule.from_expr(relay.Function(params, call))
relay.backend.te_compiler.get().clear()
tracing_target = _replace_device_with_tracing(tvm_target)
build_thread = threading.Thread(target=relay.build, args=(mod, tracing_target))
build_thread.start()
build_thread.join()
elif isinstance(node, Var):
node_entry["name"] = node.name_hint
node_entry["types"] = [node.type_annotation]
elif isinstance(node, Function):
# Ignore root node since it equals to input function expression
if node != expr:
_expr2graph_impl(node, target_ops, node_dict, node_list, tvm_target)
return
elif isinstance(node, TupleGetItem):
in_node_idx = node_dict[node.tuple_value]
node_entry["inputs"].append([in_node_idx, node.index, 0])
elif isinstance(node, Tuple):
for tuple_item in node:
in_node_idx = node_dict[tuple_item]
if isinstance(tuple_item, TupleGetItem):
node_entry["inputs"] += node_list[in_node_idx]["inputs"]
elif isinstance(tuple_item, Tuple):
raise RuntimeError("Graph tuner doesn't support nested tuple.")
else:
node_entry["inputs"].append([in_node_idx, 0, 0])
elif isinstance(node, Constant):
node_entry["name"] = "Constant_" + str(node_index)
node_entry["types"] = [node.checked_type]
elif isinstance(node, tvm.ir.Op):
return
else:
raise RuntimeError(
"Not supported relay node type in graph tuning: %s" % str(type(node))
)
node_dict[node] = node_index
node_list.append(node_entry)
relay.analysis.post_order_visit(expr, _traverse_expr)
def get_direct_ancestor(node_list, visited_dict, target_ops, node_idx, input_names):
"""Given a node_list in relay function and a node index, return the
closest ancestor which has op_name as operator name or is multi_input operator.
If node has multiple inputs, multiple ancestor nodes will be returned.
Parameters
----------
node_list : list of dict of str to object
List of all nodes in a graph.
visited_dict : dict of int to int
Nodes and corresponding ancestors which have been visited.
target_ops: List of str
List of target relay base op name
node_idx : int
Input node index.
input_names : list of str
Names of graph input nodes.
Returns
-------
out : list of int
List of ancestor node index.
"""
if node_idx in visited_dict:
return visited_dict[node_idx]
node = node_list[node_idx]
if is_boundary_node(node, input_names):
return [node_idx]
node_direct_ancestor = []
for item_idx in node["inputs"]:
item = node_list[item_idx[0]]
is_multiple_inputs = has_multiple_inputs(node_list, item_idx[0], input_names, OPT_OUT_OP)
if item["op"] in target_ops or is_multiple_inputs:
node_direct_ancestor.append(item_idx[0])
else:
tmp = get_direct_ancestor(node_list, visited_dict, target_ops, item_idx[0], input_names)
for tmp_item in tmp:
if tmp_item not in node_direct_ancestor:
node_direct_ancestor.append(tmp_item)
visited_dict[node_idx] = node_direct_ancestor
return node_direct_ancestor
def get_in_nodes(node_list, target_ops, input_names):
"""Create a dictionary mapping from op_name nodes or multi-input
nodes to closest input ancestors.
Parameters
----------
node_list : list of dict of str to object
List of all nodes in a graph.
target_ops: List of str
List of target relay op
input_names : list of str
Names of graph input nodes.
Returns
-------
out : dict of int to list of int
Dictionary maps node index to closest input ancestors.
"""
visited_dict = {}
in_node_dict = {}
for i, node in enumerate(node_list):
if is_boundary_node(node, input_names) or is_skipped_node(node):
continue
get_direct_ancestor(node_list, visited_dict, target_ops, i, input_names)
for key, val in visited_dict.items():
node = node_list[key]
is_multiple_inputs = has_multiple_inputs(node_list, key, input_names, OPT_OUT_OP)
if node["op"] in target_ops or is_multiple_inputs:
in_node_dict[key] = val
# Reduce boundary nodes
out_node_dict = get_out_nodes(in_node_dict)
has_reduced_node = True
while has_reduced_node:
boundary_nodes = []
for key, val in in_node_dict.items():
node = node_list[key]
is_boundary = True
# Target ops can't be boundary nodes
if node["op"] not in target_ops:
for input_idx in val:
in_node = node_list[input_idx]
if not is_boundary_node(in_node, input_names) and input_idx in in_node_dict:
is_boundary = False
else:
val.remove(input_idx)
if is_boundary:
boundary_nodes.append(key)
if boundary_nodes:
for idx in boundary_nodes:
if idx in in_node_dict:
del in_node_dict[idx]
else:
has_reduced_node = False
# Remove empty nodes to ignore pre-computed sub-graph
has_empty_node = True
while has_empty_node:
empty_nodes = []
for key, val in in_node_dict.items():
if not val:
empty_nodes.append(key)
if empty_nodes:
has_empty_node = True
for node in empty_nodes:
del in_node_dict[node]
if node in out_node_dict:
for out_node in out_node_dict[node]:
in_node_dict[out_node].remove(node)
else:
has_empty_node = False
return in_node_dict
def get_out_nodes(in_node_dict):
"""Create output dictionary from input dictionary.
Parameters
----------
in_node_dict : dict of int to list of int
Dictionary maps node index to closest input ancestors.
It can be created with get_in_nodes.
Returns
-------
out : dict of int to list of int
Dictionary maps node index to closest output nodes.
"""
out_node_dict = {}
for key in in_node_dict:
out_node_dict[key] = []
for key, val in in_node_dict.items():
for item in val:
if item in out_node_dict:
out_node_dict[item].append(key)
else:
out_node_dict[item] = [key]
return out_node_dict
|
plugins/webosint/who/whoami.py | Appnet1337/OSINT-SAN | 313 | 12740491 | <filename>plugins/webosint/who/whoami.py
import whois
from pythonping import ping
import re
def whoami(target,post):
#target=input("Enter the IP Address/Domain:")
getweb=str(ping(target))
ip = re.compile('(([2][5][0-5]\.)|([2][0-4][0-9]\.)|([0-1]?[0-9]?[0-9]\.)){3}'
+'(([2][5][0-5])|([2][0-4][0-9])|([0-1]?[0-9]?[0-9]))')
match = ip.search(getweb)
if match:
#target=match.group()
w = whois.whois(target)
print("Domain Name:"+ str(w['domain_name']))
print("Register:"+str(w['registrar']))
try:
print("Whois Server:"+str(w['whois_server']))
except Exception as e:
print(e)
print("Server:"+str(w['name_servers']))
print("Emails:"+str(w['emails']))
try:
print("Organisation:"+str(w['org']))
except Exception as e:
print("Organisation:"+str(w['organization']))
print(e)
try:
print("Address:"+str(w['address']))
print("City:"+str(w['city']))
print("State:"+str(w['state']))
print("Zipcode:"+str(w['zipcode']))
except Exception as e:
print(e)
print("Country:"+str(w['country']))
|
assignment1/tests/test_sigmoid_to_solutions.py | gyubokLee/CS224 | 125 | 12740493 | '''
HOW TO RUN THIS CODE (if tests are within the assignment 1 root):
python -m py.test tests/test_sigmoid_to_solutions.py -vv -s -q
python -m py.test tests/test_sigmoid_to_solutions.py -vv -s -q --cov
py.test.exe --cov=cs224d/ tests/test_sigmoid_to_solutions.py --cov-report html
(if the tests are within the subfolder tests)
PYTHONPATH=${PWD} py.test.exe tests/ -v --cov-report html
python -m pytest tests -v --cov-report html
Open index.html contained within htmlcov
'''
import pytest
import numpy as np
from q2_sigmoid import sigmoid, sigmoid_grad
from q2_sigmoid_sol import sigmoid_sol, sigmoid_grad_sol
import random
from collections import defaultdict, OrderedDict, Counter
COUNT=5
def rel_error(x,y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-7, np.abs(x) + np.abs(y))))
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid(sigmoid_f):
""" Original sigmoid test defined in q2_sigmoid.py; """
x = np.array([[1, 2], [-1, -2]])
f = sigmoid_f(x)
assert rel_error(f, np.array([[0.73105858, 0.88079708],
[0.26894142, 0.11920292]])) <= 1e-7
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoidgrad(sigmoid_f):
""" Original sigmoid gradient test defined in q2_sigmoid.py; """
x = np.array([[1, 2], [-1, -2]])
f = sigmoid(x)
g = sigmoid_grad(f)
assert rel_error(g, np.array([[0.19661193, 0.10499359],
[0.19661193, 0.10499359]])) <= 1e-7
@pytest.mark.parametrize("dim", list(range(1,8)))
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_shape(dim, sigmoid_f):
testing_shape = []
for y in range(0,dim):
testing_shape.append(np.random.randint(3,8))
shape = tuple(testing_shape)
#z = np.random.randn(*testing_shape)
x = np.random.standard_normal(shape)
y = np.copy(x)
assert x.shape == sigmoid(y).shape
assert x.shape == sigmoid_grad(sigmoid(y)).shape
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_minus_z(sigmoid_f, count=100):
z = np.random.normal(loc=0., scale=100., size=count)
y = -z
assert rel_error(1 - sigmoid(y), sigmoid(z)) <= 1e-7
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_monotone(sigmoid_f, count=100):
z = np.random.normal(loc=0., scale=100., size=count)
shift = np.random.uniform(low=0., high=10., size=count)
assert np.all(sigmoid(z + shift) - sigmoid(z)) >= 0
assert np.all(sigmoid(z - shift) - sigmoid(z)) <= 0
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_range(sigmoid_f, count=100):
z = np.random.normal(loc=0., scale=100., size=count)
assert np.max(sigmoid(z)) <= 1.
assert np.max(sigmoid(z)) >= 0.
@pytest.mark.parametrize("dim_1", list(range(1,20)))
@pytest.mark.parametrize('execution_number', list(range(COUNT)))
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_permutation_axis0(dim_1, execution_number, sigmoid_f):
""" sigmoid needs to be applied element-wise;"""
a1 = np.random.normal(size=(dim_1,1))
s1 = sigmoid(a1)
permutation = np.random.permutation(dim_1)
inverse_permutation = np.argsort(permutation)
s1_perm = sigmoid(a1[permutation])
assert rel_error(s1_perm[inverse_permutation], s1) <= 1e-8
@pytest.mark.parametrize("dim_1", list(range(1,20)))
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_permutation_axis1(dim_1, sigmoid_f):
a1 = np.random.normal(size=(1,dim_1))
s1 = sigmoid(a1)
permutation = np.random.permutation(dim_1)
inverse_permutation = np.argsort(permutation)
s1_perm = sigmoid(a1.ravel()[permutation])
assert rel_error(s1_perm.ravel()[inverse_permutation], s1) <= 1e-8
#note: permutation(sigmoid(x)) = sigmoid(permutation(x))
@pytest.mark.parametrize("dim_1", list(range(1,20)))
@pytest.mark.parametrize("dim_2", list(range(1,20)))
@pytest.mark.parametrize("sigmoid_f", [sigmoid, sigmoid_sol])
def test_sigmoid_gradient(dim_1, dim_2, sigmoid_f):
a1 = np.random.normal(loc=0., scale=20., size=(dim_1,dim_2))
shift = np.random.uniform(low=1e-9, high=1e-5, size=(dim_1,dim_2))
ap = a1 + shift
am = a1 - shift
dsigmoid = (sigmoid(ap) - sigmoid(am)) / (2*shift)
assert np.abs(np.max(dsigmoid - sigmoid_grad(sigmoid(a1)))) <= 1e-7
assert np.abs(np.min(dsigmoid - sigmoid_grad(sigmoid(a1)))) <= 1e-7
@pytest.mark.parametrize("dim_1", list(range(1,20)))
@pytest.mark.parametrize("dim_2", list(range(1,20)))
def test_sigmoid(dim_1, dim_2):
a1 = np.random.normal(loc=0., scale=20., size=(dim_1,dim_2))
assert rel_error(sigmoid(a1), sigmoid_sol(a1)) <= 1e-10
@pytest.mark.parametrize("dim_1", list(range(1,20)))
@pytest.mark.parametrize("dim_2", list(range(1,20)))
def test_sigmoid(dim_1, dim_2):
a1 = np.random.normal(loc=0., scale=20., size=(dim_1,dim_2))
a1_copy = a1.copy()
s_a1 = sigmoid(a1)
s_sol_a1 = sigmoid_sol(a1_copy)
assert rel_error(sigmoid_grad(s_a1), sigmoid_grad_sol(s_sol_a1)) <= 1e-10
@pytest.mark.parametrize("dim_1", list(range(1,20)))
@pytest.mark.parametrize("dim_2", list(range(1,20)))
def test_sigmoid(dim_1, dim_2):
a1 = np.random.normal(loc=0., scale=20., size=(dim_1,dim_2))
a1_copy = a1.copy()
assert rel_error(sigmoid_grad(a1), sigmoid_grad_sol(a1_copy)) <= 1e-10
|
coremltools/test/neural_network/test_keras2_numeric.py | tonybove-apple/coremltools | 2,740 | 12740496 | import itertools
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from coremltools._deps import _HAS_KERAS2_TF
from coremltools.models import _MLMODEL_FULL_PRECISION, _MLMODEL_HALF_PRECISION
from coremltools.models.utils import _macos_version, _is_macos
if _HAS_KERAS2_TF:
import keras.backend
from keras.models import Sequential, Model
from keras.layers import (
Dense,
Activation,
Conv2D,
Conv1D,
Flatten,
BatchNormalization,
Conv2DTranspose,
SeparableConv2D,
)
from keras.layers import (
MaxPooling2D,
AveragePooling2D,
GlobalAveragePooling2D,
GlobalMaxPooling2D,
)
from keras.layers import (
MaxPooling1D,
AveragePooling1D,
GlobalAveragePooling1D,
GlobalMaxPooling1D,
)
from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout
from keras.layers import Add, Concatenate
from keras.layers import add, multiply, concatenate, dot, maximum, average
from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D
from keras.layers import ZeroPadding1D, UpSampling1D, Cropping1D
from keras.layers import SimpleRNN, LSTM, GRU
from keras.layers.core import SpatialDropout2D
from keras.layers.wrappers import Bidirectional, TimeDistributed
from distutils.version import StrictVersion as _StrictVersion
if keras.__version__ >= _StrictVersion("2.2.1"):
from keras.layers import DepthwiseConv2D, ReLU
elif keras.__version__ >= _StrictVersion("2.2.0"):
from keras.layers import DepthwiseConv2D
from keras_applications.mobilenet import relu6
else:
from keras.applications.mobilenet import DepthwiseConv2D, relu6
def _keras_transpose(x, is_sequence=False):
if len(x.shape) == 5:
# Keras input shape = [Batch, Seq, Height, Width, Channels]
x = np.transpose(x, [1, 0, 4, 2, 3])
if len(x.shape) == 4:
# Keras input shape = [Batch, Height, Width, Channels]
x = np.transpose(x, [0, 3, 1, 2])
return np.expand_dims(x, axis=0)
elif len(x.shape) == 3:
# Keras input shape = [Batch, (Sequence) Length, Channels]
return np.transpose(x, [1, 0, 2])
elif len(x.shape) == 2:
if is_sequence: # (N,S) --> (S,N,1,)
return x.reshape(x.shape[::-1] + (1,))
else: # (N,C) --> (N,C,1,1)
return x.reshape((1,) + x.shape) # Dense
elif len(x.shape) == 1:
if is_sequence: # (S) --> (S,N,1,1,1)
return x.reshape((x.shape[0], 1, 1))
else:
return x
else:
return x
def _get_coreml_model(
model,
input_names=["data"],
output_names=["output"],
input_name_shape_dict={},
model_precision=_MLMODEL_FULL_PRECISION,
use_float_arraytype=False,
):
"""
Get the coreml model from the Keras model.
"""
# Convert the model
from coremltools.converters import keras as keras_converter
model = keras_converter.convert(
model,
input_names,
output_names,
input_name_shape_dict=input_name_shape_dict,
model_precision=model_precision,
use_float_arraytype=use_float_arraytype,
)
return model
def _generate_data(input_shape, mode="random"):
"""
Generate some random data according to a shape.
"""
if mode == "zeros":
X = np.zeros(input_shape)
elif mode == "ones":
X = np.ones(input_shape)
elif mode == "linear":
X = np.array(range(np.product(input_shape))).reshape(input_shape)
elif mode == "random":
X = np.random.rand(*input_shape)
elif mode == "random_zero_mean":
X = np.random.rand(*input_shape) - 0.5
return X
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
class KerasNumericCorrectnessTest(unittest.TestCase):
"""
Unit test class for testing the Keras converter.
"""
def runTest(self):
pass
def _get_coreml_model_params_and_test_input(
self, model, mode, one_dim_seq_flags, input_name_shape_dict={}
):
# Generate data
nb_inputs = len(model.inputs)
if nb_inputs > 1:
input_names = []
input_data = []
coreml_input = {}
for i in range(nb_inputs):
feature_name = "data_%s" % i
input_names.append(feature_name)
if feature_name in input_name_shape_dict:
input_shape = [
1 if a is None else a
for a in input_name_shape_dict[feature_name]
]
else:
input_shape = [1 if a is None else a for a in model.input_shape[i]]
X = _generate_data(input_shape, mode)
input_data.append(X)
if one_dim_seq_flags is None:
coreml_input[feature_name] = _keras_transpose(X).astype("f").copy()
else:
coreml_input[feature_name] = (
_keras_transpose(X, one_dim_seq_flags[i]).astype("f").copy()
)
else:
input_names = ["data"]
if "data" in input_name_shape_dict:
input_shape = [
1 if a is None else a for a in input_name_shape_dict["data"]
]
else:
input_shape = [1 if a is None else a for a in model.input_shape]
input_data = _generate_data(input_shape, mode)
if one_dim_seq_flags is None:
coreml_input = {"data": _keras_transpose(input_data).astype("f").copy()}
else:
coreml_input = {
"data": _keras_transpose(input_data, one_dim_seq_flags[0])
.astype("f")
.copy()
}
output_names = ["output" + str(i) for i in range(len(model.outputs))]
return input_names, output_names, input_data, coreml_input
def _test_model(
self,
model,
input_name_shape_dict={},
num_samples=1,
mode="random",
delta=1e-2,
model_dir=None,
transpose_keras_result=True,
one_dim_seq_flags=None,
model_precision=_MLMODEL_FULL_PRECISION,
):
# transpose_keras_result: if true, compare the transposed Keras result
# one_dim_seq_flags: a list of same length as the number of inputs in
# the model; if None, treat all 1D input (if any) as non-sequence
# if one_dim_seq_flags[i] is True, it means the ith input, with shape
# (X,) is in fact a sequence of length X.
# Get the CoreML model
use_tmp_folder = False
if model_dir is None:
use_tmp_folder = True
model_dir = tempfile.mkdtemp()
(
input_names,
output_names,
input_data,
coreml_input,
) = self._get_coreml_model_params_and_test_input(
model, mode, one_dim_seq_flags, input_name_shape_dict
)
coreml_model = _get_coreml_model(
model,
input_names,
output_names,
input_name_shape_dict,
model_precision=model_precision,
)
try:
if not (_is_macos() and _macos_version() >= (10, 13)):
return
# Assuming coreml model output names are in the same order as
# Keras output list, put predictions into a list, sorted by output
# name
coreml_preds = coreml_model.predict(coreml_input)
c_preds = [coreml_preds[name] for name in output_names]
# Get Keras predictions
keras_preds = model.predict(input_data)
k_preds = keras_preds if type(keras_preds) is list else [keras_preds]
# Compare each output blob
for idx, k_pred in enumerate(k_preds):
if transpose_keras_result:
kp = _keras_transpose(k_pred).flatten()
else:
kp = k_pred.flatten()
cp = c_preds[idx].flatten()
# Compare predictions
self.assertEqual(len(kp), len(cp))
for i in range(len(kp)):
max_den = max(1.0, kp[i], cp[i])
self.assertAlmostEqual(
kp[i] / max_den, cp[i] / max_den, delta=delta
)
finally:
# Cleanup files - models on disk no longer useful
if use_tmp_folder and os.path.exists(model_dir):
shutil.rmtree(model_dir)
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
class KerasBasicNumericCorrectnessTest(KerasNumericCorrectnessTest):
def test_tiny_inner_product(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
# Define a model
model = Sequential()
model.add(Dense(2, input_shape=(2,)))
# Test all zeros
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="zeros", model_precision=model_precision)
# Test all ones
model.set_weights([np.ones(w.shape) for w in model.get_weights()])
self._test_model(model, mode="ones", model_precision=model_precision)
# Test random
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, model_precision=model_precision)
def test_tiny_inner_product_half_precision(self):
self.test_tiny_inner_product(model_precision=_MLMODEL_HALF_PRECISION)
def test_inner_product_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
# Define a model
model = Sequential()
model.add(Dense(1000, input_shape=(100,)))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_inner_product_half_precision_random(self):
self.test_inner_product_random(model_precision=_MLMODEL_HALF_PRECISION)
def test_dense_softmax(self):
np.random.seed(1988)
# Define a model
model = Sequential()
model.add(Dense(32, input_shape=(32,), activation="softmax"))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_dense_elu(self):
np.random.seed(1988)
# Define a model
model = Sequential()
model.add(Dense(32, input_shape=(32,), activation="elu"))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_dense_selu(self):
np.random.seed(1988)
# Define a model
model = Sequential()
model.add(Dense(32, input_shape=(32,), activation="selu"))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_housenet_random(self):
np.random.seed(1988)
num_hidden = 2
num_features = 3
# Define a model
model = Sequential()
model.add(Dense(num_hidden, input_dim=num_features))
model.add(Activation("relu"))
model.add(Dense(1, input_dim=num_features))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv_ones(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels, kernel_height, kernel_width = 3, 5, 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.ones(w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_ones_half_precision(self):
self.test_tiny_conv_ones(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_conv_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels, kernel_height, kernel_width = 3, 5, 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
@unittest.skipUnless(
_is_macos() and _macos_version() >= (10, 14), "Only supported on MacOS 10.14+"
)
def test_tiny_conv_random_input_shape_dict(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
H, W, C = 10, 20, 5
input_shape = (None, H, W, C)
num_kernels, kernel_height, kernel_width = 3, 5, 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=(None, None, C),
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(
model,
input_name_shape_dict={"data": input_shape},
model_precision=model_precision,
)
def test_tiny_conv_random_half_precision(self):
self.test_tiny_conv_random(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_conv_dilated(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels, kernel_height, kernel_width = 3, 5, 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
dilation_rate=(2, 2),
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_dilated_half_precision(self):
return self.test_tiny_conv_dilated(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_conv_dilated_rect_random(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_shape = (32, 20, 3)
num_kernels = 2
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
dilation_rate=(2, 2),
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_dilated_rect_random_half_precision(self):
return self.test_tiny_conv_dilated_rect_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_conv_pseudo_1d_x(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 2
input_length = 5
filter_length = 1 # 3
nb_filters = 1
# Define a model
model = Sequential()
model.add(
Conv2D(
nb_filters,
kernel_size=(1, filter_length),
input_shape=(1, input_length, input_dim),
padding="valid",
)
)
# Set some random weights
model.set_weights([np.ones(w.shape) for w in model.get_weights()])
self._test_model(model, mode="linear", model_precision=model_precision)
def test_tiny_conv_pseudo_1d_x_half_precision(self):
return self.test_tiny_conv_pseudo_1d_x(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_conv1d_same_random(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv1d_same_random_input_shape_dict(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(None, input_dim),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(
model, input_name_shape_dict={"data": (None, input_length, input_dim)}
)
def test_large_input_length_conv1d_same_random(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_dim = 2
input_length = 80
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_large_input_length_conv1d_same_random_half_precision(self):
return self.test_large_input_length_conv1d_same_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_conv1d_valid_random(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="valid",
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv1d_dilated_random(self):
np.random.seed(1988)
input_shape = (20, 1)
num_kernels = 2
filter_length = 3
# Define a model
model = Sequential()
model.add(
Conv1D(
num_kernels,
kernel_size=filter_length,
padding="valid",
input_shape=input_shape,
dilation_rate=3,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv_rect_kernel_x(self):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels = 3
kernel_height = 1
kernel_width = 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="same",
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv_rect_kernel_y(self):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels = 3
kernel_height = 5
kernel_width = 1
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="valid",
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv_rect_kernel_xy(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels = 3
kernel_height = 5
kernel_width = 3
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="valid",
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_rect_kernel_xy_half_precision(self):
self.test_tiny_conv_rect_kernel_xy(model_precision=_MLMODEL_HALF_PRECISION)
def test_flatten(self):
model = Sequential()
model.add(Flatten(input_shape=(2, 2, 2)))
self._test_model(model, mode="linear")
def test_conv_dense(self, model_precision=_MLMODEL_FULL_PRECISION):
input_shape = (48, 48, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), activation="relu", input_shape=input_shape))
model.add(Flatten())
model.add(Dense(10, activation="softmax"))
# Get the coreml model
self._test_model(model, model_precision=model_precision)
def test_conv_dense_half_precision(self):
return self.test_conv_dense(model_precision=_MLMODEL_HALF_PRECISION)
def test_conv_batchnorm_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 3)
num_kernels = 3
kernel_height = 5
kernel_width = 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
model.add(BatchNormalization(epsilon=1e-5))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model, model_precision=model_precision)
def test_conv_batchnorm_random_half_precision(self):
return self.test_conv_batchnorm_random(model_precision=_MLMODEL_HALF_PRECISION)
def test_conv_batchnorm_no_gamma_no_beta(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 3)
num_kernels = 3
kernel_height = 5
kernel_width = 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
model.add(BatchNormalization(center=False, scale=False, epsilon=1e-5))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model, model_precision=model_precision)
def test_conv_batchnorm_no_gamma_no_beta_half_precision(self):
return self.test_conv_batchnorm_no_gamma_no_beta(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_deconv_random(self):
# In Keras 2, deconvolution auto computes the output shape.
np.random.seed(1988)
input_dim = 13
input_shape = (input_dim, input_dim, 5)
num_kernels = 16
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
Conv2DTranspose(
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape,
padding="valid",
use_bias=False,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_deconv_random_same_padding(self):
np.random.seed(1988)
input_dim = 14
input_shape = (input_dim, input_dim, 3)
num_kernels = 16
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
Conv2DTranspose(
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape,
padding="same",
strides=(2, 2),
use_bias=True,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_depthwise_conv_same_pad(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 1
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
DepthwiseConv2D(
depth_multiplier=depth_multiplier,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape,
padding="same",
strides=(1, 1),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_depthwise_conv_valid_pad(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 1
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
DepthwiseConv2D(
depth_multiplier=depth_multiplier,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape,
padding="valid",
strides=(1, 1),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_depthwise_conv_same_pad_depth_multiplier(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 4
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
DepthwiseConv2D(
depth_multiplier=depth_multiplier,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape,
padding="same",
strides=(1, 1),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_depthwise_conv_valid_pad_depth_multiplier(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 2
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
DepthwiseConv2D(
depth_multiplier=depth_multiplier,
kernel_size=(kernel_height, kernel_width),
input_shape=input_shape,
padding="valid",
strides=(1, 1),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_separable_conv_valid(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 1
kernel_height = 3
kernel_width = 3
num_kernels = 4
# Define a model
model = Sequential()
model.add(
SeparableConv2D(
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="valid",
strides=(1, 1),
depth_multiplier=depth_multiplier,
input_shape=input_shape,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_separable_conv_same_fancy(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 1
kernel_height = 3
kernel_width = 3
num_kernels = 4
# Define a model
model = Sequential()
model.add(
SeparableConv2D(
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="same",
strides=(2, 2),
activation="relu",
depth_multiplier=depth_multiplier,
input_shape=input_shape,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_separable_conv_valid_depth_multiplier(self):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 5
kernel_height = 3
kernel_width = 3
num_kernels = 40
# Define a model
model = Sequential()
model.add(
SeparableConv2D(
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="valid",
strides=(1, 1),
depth_multiplier=depth_multiplier,
input_shape=input_shape,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_separable_conv_same_fancy_depth_multiplier(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_dim = 16
input_shape = (input_dim, input_dim, 3)
depth_multiplier = 2
kernel_height = 3
kernel_width = 3
num_kernels = 40
# Define a model
model = Sequential()
model.add(
SeparableConv2D(
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
padding="same",
strides=(2, 2),
activation="relu",
depth_multiplier=depth_multiplier,
input_shape=input_shape,
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_separable_conv_same_fancy_depth_multiplier_half_precision(self):
return self.test_tiny_separable_conv_same_fancy_depth_multiplier(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_separable_conv_dilated(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels, kernel_height, kernel_width = 3, 5, 5
# Define a model
model = Sequential()
model.add(
SeparableConv2D(
input_shape=input_shape,
dilation_rate=(2, 2),
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_separable_conv_dilated_half_precision(self):
return self.test_tiny_separable_conv_dilated(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_separable_conv_dilated_rect_random(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_shape = (32, 20, 3)
num_kernels = 2
kernel_height = 3
kernel_width = 3
# Define a model
model = Sequential()
model.add(
SeparableConv2D(
input_shape=input_shape,
dilation_rate=(2, 2),
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_separable_conv_dilated_rect_random_half_precision(self):
return self.test_tiny_separable_conv_dilated_rect_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_max_pooling_no_overlap(self):
# no_overlap: pool_size = strides
model = Sequential()
model.add(
MaxPooling2D(
input_shape=(16, 16, 3), pool_size=(2, 2), strides=None, padding="valid"
)
)
self._test_model(model)
def test_max_pooling_overlap_multiple(self):
# input shape is multiple of pool_size, strides != pool_size
model = Sequential()
model.add(
MaxPooling2D(
input_shape=(18, 18, 3),
pool_size=(3, 3),
strides=(2, 2),
padding="valid",
)
)
self._test_model(model)
def test_max_pooling_overlap_odd(self):
model = Sequential()
model.add(
MaxPooling2D(
input_shape=(16, 16, 3),
pool_size=(3, 3),
strides=(2, 2),
padding="valid",
)
)
self._test_model(model)
def test_max_pooling_overlap_same(self):
model = Sequential()
model.add(
MaxPooling2D(
input_shape=(16, 16, 3),
pool_size=(3, 3),
strides=(2, 2),
padding="same",
)
)
self._test_model(model)
def test_global_max_pooling(self):
model = Sequential()
model.add(GlobalMaxPooling2D(input_shape=(16, 16, 3)))
self._test_model(model)
def test_average_pooling_no_overlap(self):
# no_overlap: pool_size = strides
model = Sequential()
model.add(
AveragePooling2D(
input_shape=(16, 16, 3), pool_size=(2, 2), strides=None, padding="valid"
)
)
self._test_model(model, delta=1e-2)
def test_average_pooling_inception_config_1(self):
# no_overlap: pool_size = strides
model = Sequential()
model.add(
AveragePooling2D(
input_shape=(16, 16, 3),
pool_size=(3, 3),
strides=(1, 1),
padding="same",
)
)
self._test_model(model, delta=1e-2)
def test_global_average_pooling(self):
model = Sequential()
model.add(GlobalAveragePooling2D(input_shape=(16, 16, 3)))
self._test_model(model)
def test_max_pooling_1d(self):
model = Sequential()
model.add(MaxPooling1D(input_shape=(16, 3), pool_size=4))
self._test_model(model)
def test_global_max_pooling_1d(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
model.add(GlobalMaxPooling1D())
self._test_model(model)
def test_average_pooling_1d(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
model.add(AveragePooling1D(pool_size=2))
self._test_model(model)
def test_global_average_pooling_1d(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
model.add(GlobalAveragePooling1D())
self._test_model(model)
def test_tiny_conv_upsample_random(self):
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
num_kernels = 3
kernel_height = 5
kernel_width = 5
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
model.add(UpSampling2D(size=2))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv_upsample_1d_random(self):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
model.add(UpSampling1D(size=2))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_conv_crop_1d_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
model.add(Cropping1D(cropping=2))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_crop_1d_random_half_precision(self):
return self.test_tiny_conv_crop_1d_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_conv_pad_1d_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 2
input_length = 10
filter_length = 3
nb_filters = 4
model = Sequential()
model.add(
Conv1D(
nb_filters,
kernel_size=filter_length,
padding="same",
input_shape=(input_length, input_dim),
)
)
model.add(ZeroPadding1D(padding=2))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_pad_1d_random_half_precision(self):
return self.test_tiny_conv_pad_1d_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_conv_causal_1d(self):
np.random.seed(1988)
model = Sequential()
model.add(Conv1D(1, 3, input_shape=(10, 1), use_bias=False, padding="causal"))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_embedding(self, model_precision=_MLMODEL_FULL_PRECISION):
model = Sequential()
num_inputs = 10
num_outputs = 3
model.add(Embedding(num_inputs, num_outputs))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, model_precision=model_precision)
def test_embedding_half_precision(self):
return self.test_embedding(model_precision=_MLMODEL_HALF_PRECISION)
def test_embedding_seq(self, model_precision=_MLMODEL_FULL_PRECISION):
model = Sequential()
num_inputs = 10
num_outputs = 3
model.add(Embedding(num_inputs, num_outputs, input_length=7))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(
model, one_dim_seq_flags=[True], model_precision=model_precision
)
def test_embedding_seq_half_precision(self):
return self.test_embedding_seq(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_no_sequence_simple_rnn_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 1
# Define a model
model = Sequential()
model.add(SimpleRNN(num_channels, input_shape=(input_length, input_dim)))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model)
def test_tiny_sequence_simple_rnn_random(self):
np.random.seed(1988)
input_dim = 2
input_length = 4
num_channels = 3
# Define a model
model = Sequential()
model.add(SimpleRNN(num_channels, input_shape=(input_length, input_dim)))
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_tiny_seq2seq_rnn_random(self):
np.random.seed(1988)
input_dim = 2
input_length = 4
num_channels = 3
# Define a model
model = Sequential()
model.add(
SimpleRNN(
num_channels,
input_shape=(input_length, input_dim),
return_sequences=True,
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_rnn_seq(self):
np.random.seed(1988)
input_dim = 11
input_length = 5
# Define a model
model = Sequential()
model.add(
SimpleRNN(20, input_shape=(input_length, input_dim), return_sequences=False)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_rnn_seq_backwards(self):
np.random.seed(1988)
input_dim = 11
input_length = 5
# Define a model
model = Sequential()
model.add(
SimpleRNN(
20,
input_shape=(input_length, input_dim),
return_sequences=False,
go_backwards=True,
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_medium_no_sequence_simple_rnn_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 10
# Define a model
model = Sequential()
model.add(SimpleRNN(num_channels, input_shape=(input_length, input_dim)))
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_tiny_no_sequence_lstm_zeros(self):
np.random.seed(1988)
input_dim = 1
input_length = 1
num_channels = 1
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=1,
recurrent_activation="sigmoid",
)
)
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
self._test_model(model, mode="zeros")
def test_tiny_no_sequence_lstm_ones(self):
np.random.seed(1988)
input_dim = 1
input_length = 1
num_channels = 1
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=1,
recurrent_activation="sigmoid",
)
)
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
self._test_model(model, mode="ones")
def test_small_no_sequence_lstm_zeros(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 1
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=2,
recurrent_activation="sigmoid",
)
)
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
self._test_model(model, mode="zeros")
def test_small_no_sequence_lstm_ones(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 1
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=2,
recurrent_activation="sigmoid",
)
)
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
self._test_model(model, mode="ones")
def test_lstm_seq(self):
np.random.seed(1988)
input_dim = 11
input_length = 5
model = Sequential()
model.add(
LSTM(20, input_shape=(input_length, input_dim), return_sequences=False)
)
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
self._test_model(model)
def test_lstm_seq_backwards(self):
np.random.seed(1988)
input_dim = 11
input_length = 5
model = Sequential()
model.add(
LSTM(
20,
input_shape=(input_length, input_dim),
return_sequences=False,
go_backwards=True,
)
)
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
self._test_model(model)
def test_medium_no_sequence_lstm_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 10
# Define a model
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_tiny_no_sequence_lstm_zeros_gpu(self):
np.random.seed(1988)
input_dim = 1
input_length = 1
num_channels = 1
# Define a model
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=2,
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model, mode="zeros")
def test_small_no_sequence_lstm_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 1
# Define a model
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=2,
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_tiny_no_sequence_gru_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 1
input_length = 1
num_channels = 1
num_samples = 1
# Define a model
model = Sequential()
model.add(
GRU(
num_channels,
input_shape=(input_length, input_dim),
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_no_sequence_gru_random_half_precision(self):
return self.test_tiny_no_sequence_gru_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_small_no_sequence_gru_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 1
# Define a model
model = Sequential()
model.add(
GRU(
num_channels,
input_shape=(input_length, input_dim),
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_medium_no_sequence_gru_random(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 10
# Define a model
model = Sequential()
model.add(
GRU(
num_channels,
input_shape=(input_length, input_dim),
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_medium_no_sequence_gru_random_half_precision(self):
return self.test_medium_no_sequence_gru_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_gru_seq(self):
np.random.seed(1988)
input_dim = 11
input_length = 5
# Define a model
model = Sequential()
model.add(
GRU(20, input_shape=(input_length, input_dim), return_sequences=False)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_gru_seq_backwards(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 11
input_length = 5
# Define a model
model = Sequential()
model.add(
GRU(
20,
input_shape=(input_length, input_dim),
return_sequences=False,
go_backwards=True,
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_gru_seq_backwards_half_precision(self):
return self.test_gru_seq_backwards(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_no_sequence_bidir_random(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_dim = 1
input_length = 1
num_channels = 1
num_samples = 1
# Define a model
model = Sequential()
model.add(
Bidirectional(
LSTM(num_channels, implementation=1, recurrent_activation="sigmoid"),
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_no_sequence_bidir_random_half_precision(self):
return self.test_tiny_no_sequence_bidir_random(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_tiny_no_sequence_bidir_random_gpu(
self, model_precision=_MLMODEL_FULL_PRECISION
):
np.random.seed(1988)
input_dim = 1
input_length = 1
num_channels = 1
num_samples = 1
# Define a model
model = Sequential()
model.add(
Bidirectional(
LSTM(num_channels, implementation=2, recurrent_activation="sigmoid"),
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model, model_precision=model_precision)
def test_tiny_no_sequence_bidir_random_gpu_half_precision(self):
return self.test_tiny_no_sequence_bidir_random_gpu(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_small_no_sequence_bidir_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 1
# Define a model
model = Sequential()
model.add(
Bidirectional(
LSTM(num_channels, implementation=2, recurrent_activation="sigmoid"),
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_medium_no_sequence_bidir_random(self):
np.random.seed(1988)
input_dim = 10
input_length = 1
num_channels = 10
# Define a model
model = Sequential()
model.add(
Bidirectional(
LSTM(num_channels, implementation=2, recurrent_activation="sigmoid"),
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_medium_bidir_random_return_seq_false(self):
np.random.seed(1988)
input_dim = 7
input_length = 5
num_channels = 10
# Define a model
model = Sequential()
model.add(
Bidirectional(
LSTM(
num_channels,
return_sequences=False,
implementation=2,
recurrent_activation="sigmoid",
),
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_medium_bidir_random_return_seq_true(self):
np.random.seed(1988)
input_dim = 7
input_length = 5
num_channels = 10
# Define a model
model = Sequential()
model.add(
Bidirectional(
LSTM(
num_channels,
return_sequences=True,
implementation=2,
recurrent_activation="sigmoid",
),
input_shape=(input_length, input_dim),
)
)
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
def test_bilstm_merge_modes(self):
# issue 157
def get_model(input_dim, fc_size, rnn_size, output_dim, merge_mode):
input_data = Input(name="the_input", shape=(None, input_dim))
x = TimeDistributed(Dense(fc_size, name="fc1", activation="relu",))(
input_data
)
x = Bidirectional(
LSTM(
rnn_size,
return_sequences=True,
activation="relu",
kernel_initializer="he_normal",
),
merge_mode=merge_mode,
)(x)
y_pred = TimeDistributed(
Dense(output_dim, name="y_pred", activation="softmax")
)(x)
model = Model([input_data], [y_pred])
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
return model
input_dim = 26
fc_size = 512
rnn_size = 512
output_dim = 29
for merge_mode in ["concat", "sum", "mul", "ave"]:
model = get_model(input_dim, fc_size, rnn_size, output_dim, merge_mode)
self._test_model(model)
def test_tiny_conv_elu_random(self):
np.random.seed(1988)
# Define a model
from keras.layers.advanced_activations import ELU
model = Sequential()
model.add(Conv2D(input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5)))
model.add(ELU(alpha=0.8))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_conv_prelu_random(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
# Define a model
from keras.layers.advanced_activations import PReLU
model = Sequential()
model.add(
Conv2D(
input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5), padding="same"
)
)
model.add(PReLU(shared_axes=[1, 2]))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model, model_precision=model_precision)
def test_tiny_conv_prelu_random_half_precision(self):
return self.test_tiny_conv_prelu_random(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_conv_leaky_relu_random(self):
np.random.seed(1988)
# Define a model
from keras.layers.advanced_activations import LeakyReLU
model = Sequential()
model.add(
Conv2D(
input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5), padding="same"
)
)
model.add(LeakyReLU(alpha=0.3))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_conv_thresholded_relu_random(self):
np.random.seed(1988)
# Define a model
from keras.layers.advanced_activations import ThresholdedReLU
model = Sequential()
model.add(
Conv2D(
input_shape=(10, 10, 3), filters=3, kernel_size=(5, 5), padding="same"
)
)
model.add(ThresholdedReLU(theta=0.8))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_concat_random(self):
np.random.seed(1988)
input_dim = 10
num_channels = 6
# Define a model
input_tensor = Input(shape=(input_dim,))
x1 = Dense(num_channels)(input_tensor)
x2 = Dense(num_channels)(x1)
x3 = Dense(num_channels)(x1)
x4 = concatenate([x2, x3])
x5 = Dense(num_channels)(x4)
model = Model(inputs=[input_tensor], outputs=[x5])
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_concat_seq_random(self):
np.random.seed(1988)
max_features = 10
embedding_dims = 4
seq_len = 5
num_channels = 6
# Define a model
input_tensor = Input(shape=(seq_len,))
x1 = Embedding(max_features, embedding_dims)(input_tensor)
x2 = Embedding(max_features, embedding_dims)(input_tensor)
x3 = concatenate([x1, x2], axis=1)
model = Model(inputs=[input_tensor], outputs=[x3])
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model, one_dim_seq_flags=[True])
def test_lstm_concat_dense_random(self):
np.random.seed(1988)
vocab_size = 1250
seq_length = 5
units = 32
# Define a model
input = Input(shape=(seq_length,))
pos = Input(shape=(seq_length, 1))
embedding = Embedding(vocab_size, 50, input_length=seq_length)(input)
concat = Concatenate(axis=2)([embedding, pos])
model = LSTM(units, return_sequences=True, stateful=False)(concat)
model = LSTM(units, return_sequences=False)(model)
model = Dense(100, activation="relu")(model)
model = Dense(vocab_size, activation="softmax")(model)
model = Model(inputs=[input, pos], outputs=model)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model, one_dim_seq_flags=[True, True])
def test_tiny_add_random(self):
np.random.seed(1988)
input_dim = 10
num_channels = 6
# Define a model
input_tensor = Input(shape=(input_dim,))
x1 = Dense(num_channels)(input_tensor)
x2 = Dense(num_channels)(x1)
x3 = Dense(num_channels)(x1)
x4 = add([x2, x3])
x5 = Dense(num_channels)(x4)
model = Model(inputs=[input_tensor], outputs=[x5])
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_mul_random(self):
np.random.seed(1988)
input_dim = 10
num_channels = 6
# Define a model
input_tensor = Input(shape=(input_dim,))
x1 = Dense(num_channels)(input_tensor)
x2 = Dense(num_channels)(x1)
x3 = Dense(num_channels)(x1)
x4 = multiply([x2, x3])
x5 = Dense(num_channels)(x4)
model = Model(inputs=[input_tensor], outputs=[x5])
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_cos_random(self):
np.random.seed(1988)
input_dim = 10
num_channels = 6
# Define a model
input_tensor = Input(shape=(input_dim,))
x1 = Dense(num_channels)(input_tensor)
x2 = Dense(num_channels)(x1)
x3 = Dense(num_channels)(x1)
x4 = dot([x2, x3], axes=-1, normalize=True)
x5 = Dense(num_channels)(x4)
model = Model(inputs=[input_tensor], outputs=[x5])
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_zeropad_simple(self):
input_shape = (48, 48, 3)
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=input_shape))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_zeropad_fancy(self):
input_shape = (48, 48, 3)
model = Sequential()
model.add(ZeroPadding2D(((2, 5), (3, 4)), input_shape=input_shape))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_crop_simple(self):
input_shape = (48, 48, 3)
model = Sequential()
model.add(Cropping2D(cropping=((2, 5), (2, 5)), input_shape=input_shape))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_permute(self):
# When input blob is 3D array (D1, D2, D3), Keras assumes the axes' meaning is
# (D1=H,D2=W,D3=C), while CoreML assumes (D1=C,D2=H,D3=W)
import itertools
for permute_order in list(itertools.permutations([1, 2, 3])):
model = Sequential()
model.add(Permute(permute_order, input_shape=(4, 3, 2)))
self._test_model(model, transpose_keras_result=True)
def test_reshape_3d(self):
model = Sequential()
model.add(Reshape((10, 1, 6), input_shape=(5, 4, 3)))
self._test_model(model, mode="linear")
def test_tiny_conv_dense_random(self):
np.random.seed(1988)
num_samples = 1
input_dim = 8
input_shape = (input_dim, input_dim, 3)
num_kernels = 2
kernel_height = 5
kernel_width = 5
hidden_dim = 4
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(hidden_dim))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_conv_dropout_random(self):
np.random.seed(1988)
num_samples = 1
input_dim = 8
input_shape = (input_dim, input_dim, 3)
num_kernels = 2
kernel_height = 5
kernel_width = 5
hidden_dim = 4
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
model.add(SpatialDropout2D(0.5))
model.add(Flatten())
model.add(Dense(hidden_dim))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_dense_tanh_fused_random(self):
np.random.seed(1988)
num_samples = 1
input_dim = 3
hidden_dim = 4
# Define a model
model = Sequential()
model.add(Dense(hidden_dim, input_shape=(input_dim,), activation="tanh"))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_conv_relu_fused_random(self):
np.random.seed(1988)
num_samples = 1
input_dim = 8
input_shape = (input_dim, input_dim, 3)
num_kernels = 2
kernel_height = 5
kernel_width = 5
hidden_dim = 4
# Define a model
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
activation="relu",
filters=num_kernels,
kernel_size=(kernel_height, kernel_width),
)
)
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Get the coreml model
self._test_model(model)
def test_tiny_time_distrbuted(self):
# as the first layer in a model
model = Sequential()
model.add(TimeDistributed(Dense(8), input_shape=(10, 16)))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_tiny_sequence_lstm(self, model_precision=_MLMODEL_FULL_PRECISION):
np.random.seed(1988)
input_dim = 1
input_length = 2
num_channels = 1
# Define a model
model = Sequential()
model.add(
LSTM(
num_channels,
input_shape=(input_length, input_dim),
implementation=1,
recurrent_activation="sigmoid",
)
)
# Set some random weights
model.set_weights(
[(np.random.rand(*w.shape) - 0.5) * 0.2 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model, delta=1e-4, model_precision=model_precision)
def test_tiny_sequence_lstm_half_precision(self):
return self.test_tiny_sequence_lstm(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_spatial_bn(self):
np.random.seed(1988)
x_in = Input(shape=(7, 7, 2))
x = ZeroPadding2D(padding=(1, 1))(x_in)
x = BatchNormalization(axis=2)(x)
model = Model(x_in, x)
self._test_model(model, delta=1e-2)
def test_embedding_fixed_length(self):
sequence_length = 5
vocab_size = 10
embed_channels = 4
dense_units = sequence_length * embed_channels
model = Sequential()
model.add(Embedding(vocab_size, embed_channels, input_length=sequence_length))
model.add(Flatten())
model.add(Dense(dense_units))
model.add(Dense(20))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, one_dim_seq_flags=[True])
def test_conv1d_flatten(self, delta=1e-2):
model = Sequential()
model.add(AveragePooling1D(2, input_shape=(64, 9)))
model.add(Conv1D(16, 1, padding="same", activation="relu", use_bias=False))
model.add(MaxPooling1D(2))
model.add(Flatten())
model.add(Dense(units=7, activation="softmax", use_bias=False))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, delta=delta)
def test_dense_fused_act_in_td(self):
np.random.seed(1988)
x_in = Input(shape=(10, 2))
x = TimeDistributed(Dense(6, activation="softmax"))(x_in)
model = Model(inputs=[x_in], outputs=[x])
self._test_model(model, delta=1e-4)
def test_conv_batch_1d(self):
np.random.seed(1988)
vocabulary_size = 4
embedding_dimension = 6
input_length = 10
model = Sequential()
model.add(
Embedding(
vocabulary_size,
embedding_dimension,
input_length=input_length,
trainable=True,
)
)
model.add(Conv1D(5, 2))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling1D(2))
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, one_dim_seq_flags=[True])
def test_lstm_td(self):
np.random.seed(1988)
input_dim = 2
input_length = 4
num_channels = 3
# Define a model
model = Sequential()
model.add(
SimpleRNN(
num_channels,
return_sequences=True,
input_shape=(input_length, input_dim),
)
)
model.add(TimeDistributed(Dense(5)))
# Set some random weights
model.set_weights(
[np.random.rand(*w.shape) * 0.2 - 0.1 for w in model.get_weights()]
)
# Test the keras model
self._test_model(model)
# Making sure that giant channel sizes get handled correctly
def test_large_channel_gpu(self):
input_shape = (20, 20, 3)
num_channels = 2049
kernel_size = 3
model = Sequential()
model.add(
Conv2D(
input_shape=input_shape,
filters=num_channels,
kernel_size=(kernel_size, kernel_size),
)
)
model.set_weights(
[(np.random.rand(*w.shape) - 0.5) * 0.2 for w in model.get_weights()]
)
self._test_model(model, delta=1e-2)
@pytest.mark.xfail(raises=Exception)
def test_large_batch_gpu(self):
batch_size = 2049
num_channels = 4
kernel_size = 3
model = Sequential()
model.add(
TimeDistributed(Dense(num_channels), input_shape=(batch_size, kernel_size))
)
model.set_weights(
[(np.random.rand(*w.shape) - 0.5) * 0.2 for w in model.get_weights()]
)
self._test_model(model, delta=1e-2)
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
class KerasTopologyCorrectnessTest(KerasNumericCorrectnessTest):
def test_dangling_merge_left(self):
x1 = Input(shape=(4,), name="input1")
x2 = Input(shape=(5,), name="input2")
y1 = Dense(6, name="dense")(x2)
z = concatenate([x1, y1])
model = Model(inputs=[x1, x2], outputs=[z])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_dangling_merge_right(self):
x1 = Input(shape=(4,), name="input1")
x2 = Input(shape=(5,), name="input2")
y1 = Dense(6, name="dense")(x2)
z = concatenate([y1, x1])
model = Model(inputs=[x1, x2], outputs=[z])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_shared_vision(self):
digit_input = Input(shape=(27, 27, 1))
x = Conv2D(64, (3, 3))(digit_input)
x = Conv2D(64, (3, 3))(x)
out = Flatten()(x)
vision_model = Model(inputs=[digit_input], outputs=[out])
# then define the tell-digits-apart model
digit_a = Input(shape=(27, 27, 1))
digit_b = Input(shape=(27, 27, 1))
# the vision model will be shared, weights and all
out_a = vision_model(digit_a)
out_b = vision_model(digit_b)
concatenated = concatenate([out_a, out_b])
out = Dense(1, activation="sigmoid")(concatenated)
model = Model(inputs=[digit_a, digit_b], outputs=out)
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model)
def test_tiny_weight_sharing(self):
# - Dense1 -----------
# x - | |- Merge
# - Dense1 - Dense2 --
x = Input(shape=(3,))
dense = Dense(4)
y1 = dense(x)
y2 = dense(x)
y3 = Dense(4)(y2)
z = concatenate([y1, y3])
model = Model(inputs=[x], outputs=[z])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_tiny_multiple_outputs(self):
x = Input(shape=(3,))
y1 = Dense(4)(x)
y2 = Dense(5)(x)
model = Model([x], [y1, y2])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_intermediate_outputs_dense(self):
x = Input(shape=(3,))
y = Dense(4, name="intermediate_dense_y")(x)
z = Dense(5, name="intermediate_dense_z")(y)
model = Model([x], [y, z])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_intermediate_outputs_conv2d(self):
x = Input(shape=(8, 8, 3))
y = Conv2D(4, (3, 3), name="intermdiate_conv2d_1")(x)
z = Conv2D(5, (3, 3), name="intermdiate_conv2d_2")(y)
model = Model([x], [y, z])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_intermediate_outputs_conv2d_fused_act(self):
x = Input(shape=(8, 8, 3))
y = Conv2D(4, (3, 3), name="intermdiate_conv2d_1_fused", activation="relu")(x)
z = Conv2D(5, (3, 3), name="intermdiate_conv2d_2_fused", activation="relu")(y)
model = Model([x], [y, z])
model.set_weights([np.random.rand(*w.shape) - 0.5 for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_intermediate_outputs_conv1d(self):
x = Input(shape=(10, 3))
y = Conv1D(4, 3, name="intermdiate_conv1d_1")(x)
z = Conv1D(5, 3, name="intermdiate_conv1d_2")(y)
model = Model([x], [y, z])
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_intermediate_outputs_conv1d_fused_act(self):
x = Input(shape=(10, 3))
y = Conv1D(4, 3, name="intermdiate_conv1d_1_fused", activation="relu")(x)
z = Conv1D(5, 3, name="intermdiate_conv1d_2_fused", activation="relu")(y)
model = Model([x], [y, z])
model.set_weights([np.random.rand(*w.shape) - 0.5 for w in model.get_weights()])
self._test_model(model, mode="random", delta=1e-2)
def test_intermediate_rcnn_1d(self):
x_in = Input(shape=(10, 2))
# Conv block 1
x = Conv1D(3, 3, padding="same", name="interm_rcnn_conv1")(x_in)
x = BatchNormalization(axis=-1, name="interm_rcnn_bn1")(x)
x = Activation("elu")(x)
x = MaxPooling1D(pool_size=2, name="interm_rcnn_pool1")(x)
out1 = x # out1.shape = (5,3)
x = GRU(6, name="gru1")(x)
out2 = x
model = Model(x_in, [out1, out2])
# model = Model(x_in, [out2])
self._test_model(model, mode="random_zero_mean", delta=1e-2)
def test_tiny_mobilenet_arch(self, model_precision=_MLMODEL_FULL_PRECISION):
def ReLU6(x, name):
if keras.__version__ >= _StrictVersion("2.2.1"):
return ReLU(6.0, name=name)(x)
else:
return Activation(relu6, name=name)(x)
img_input = Input(shape=(32, 32, 3))
x = Conv2D(
4, (3, 3), padding="same", use_bias=False, strides=(2, 2), name="conv1"
)(img_input)
x = BatchNormalization(axis=-1, name="conv1_bn")(x)
x = ReLU6(x, name="conv1_relu")
x = DepthwiseConv2D(
(3, 3),
padding="same",
depth_multiplier=1,
strides=(1, 1),
use_bias=False,
name="conv_dw_1",
)(x)
x = BatchNormalization(axis=-1, name="conv_dw_1_bn")(x)
x = ReLU6(x, name="conv_dw_1_relu")
x = Conv2D(
8, (1, 1), padding="same", use_bias=False, strides=(1, 1), name="conv_pw_1"
)(x)
x = BatchNormalization(axis=-1, name="conv_pw_1_bn")(x)
x = ReLU6(x, name="conv_pw_1_relu")
x = DepthwiseConv2D(
(3, 3),
padding="same",
depth_multiplier=1,
strides=(2, 2),
use_bias=False,
name="conv_dw_2",
)(x)
x = BatchNormalization(axis=-1, name="conv_dw_2_bn")(x)
x = ReLU6(x, name="conv_dw_2_relu")
x = Conv2D(
8, (1, 1), padding="same", use_bias=False, strides=(2, 2), name="conv_pw_2"
)(x)
x = BatchNormalization(axis=-1, name="conv_pw_2_bn")(x)
x = ReLU6(x, name="conv_pw_2_relu")
model = Model(inputs=[img_input], outputs=[x])
self._test_model(model, delta=1e-2, model_precision=model_precision)
def test_tiny_mobilenet_arch_half_precision(self):
self.test_tiny_mobilenet_arch(model_precision=_MLMODEL_HALF_PRECISION)
def test_tiny_xception(self, model_precision=_MLMODEL_FULL_PRECISION):
img_input = Input(shape=(32, 32, 3))
x = Conv2D(2, (3, 3), strides=(2, 2), use_bias=False, name="block1_conv1")(
img_input
)
x = BatchNormalization(name="block1_conv1_bn")(x)
x = Activation("relu", name="block1_conv1_act")(x)
x = Conv2D(4, (3, 3), use_bias=False, name="block1_conv2")(x)
x = BatchNormalization(name="block1_conv2_bn")(x)
x = Activation("relu", name="block1_conv2_act")(x)
residual = Conv2D(8, (1, 1), strides=(2, 2), padding="same", use_bias=False)(x)
residual = BatchNormalization()(residual)
x = SeparableConv2D(
8, (3, 3), padding="same", use_bias=False, name="block2_sepconv1"
)(x)
x = BatchNormalization(name="block2_sepconv1_bn")(x)
x = Activation("relu", name="block2_sepconv2_act")(x)
x = SeparableConv2D(
8, (3, 3), padding="same", use_bias=False, name="block2_sepconv2"
)(x)
x = BatchNormalization(name="block2_sepconv2_bn")(x)
x = MaxPooling2D((3, 3), strides=(2, 2), padding="same", name="block2_pool")(x)
x = add([x, residual])
residual = Conv2D(16, (1, 1), strides=(2, 2), padding="same", use_bias=False)(x)
residual = BatchNormalization()(residual)
model = Model(inputs=[img_input], outputs=[residual])
self._test_model(model, delta=1e-2, model_precision=model_precision)
def test_tiny_xception_half_precision(self):
return self.test_tiny_xception(model_precision=_MLMODEL_HALF_PRECISION)
def test_nested_model_giving_output(self):
base_model = Sequential()
base_model.add(Conv2D(32, (1, 1), input_shape=(4, 4, 3)))
top_model = Sequential()
top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
top_model.add(Dense(16, activation="relu"))
top_model.add(Dense(1, activation="sigmoid"))
model = Model(inputs=base_model.input, outputs=top_model(base_model.output))
self._test_model(model)
# similar to issue 269
def test_time_distributed_conv(self):
model = Sequential()
model.add(
TimeDistributed(
Conv2D(64, (3, 3), activation="relu"), input_shape=(1, 30, 30, 3)
)
)
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(1, 1))))
model.add(TimeDistributed(Conv2D(32, (4, 4), activation="relu")))
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(TimeDistributed(Conv2D(32, (4, 4), activation="relu")))
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(TimeDistributed(Flatten()))
model.add(Dropout(0.5))
model.add(LSTM(32, return_sequences=False, dropout=0.5))
model.add(Dense(10, activation="sigmoid"))
self._test_model(model)
@pytest.mark.slow
@pytest.mark.keras2
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
class KerasNumericCorrectnessStressTest(KerasNumericCorrectnessTest):
"""
Unit test class for testing all combinations of a particular
layer.
"""
def _run_test(
self,
model,
param,
model_dir=None,
delta=1e-2,
transpose_keras_result=True,
one_dim_seq_flags=None,
model_precision=_MLMODEL_FULL_PRECISION,
):
""" Run a test on a particular model
"""
use_tmp_folder = False
if model_dir is None:
use_tmp_folder = True
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, "keras.mlmodel")
# Generate some random data
nb_inputs = len(model.inputs)
if nb_inputs > 1:
input_names = []
input_data = []
coreml_input = {}
for i in range(nb_inputs):
input_shape = [1 if a is None else a for a in model.input_shape[i]]
X = _generate_data(input_shape)
feature_name = "data_%s" % i
input_names.append(feature_name)
input_data.append(X)
if one_dim_seq_flags is None:
coreml_input[feature_name] = _keras_transpose(X).astype("f")
else:
coreml_input[feature_name] = _keras_transpose(
X, one_dim_seq_flags[i]
).astype("f")
else:
input_shape = [1 if a is None else a for a in model.input_shape]
input_names = ["data"]
input_data = _generate_data(input_shape)
if one_dim_seq_flags is None:
coreml_input = {"data": _keras_transpose(input_data).astype("f")}
else:
coreml_input = {
"data": _keras_transpose(input_data, one_dim_seq_flags[0]).astype(
"f"
)
}
# Make predictions
if transpose_keras_result:
keras_preds = _keras_transpose(model.predict(input_data)).flatten()
else:
keras_preds = model.predict(input_data).flatten()
# Get the model
coreml_model = _get_coreml_model(
model, input_names, ["output"], model_precision=model_precision
)
if _is_macos() and _macos_version() >= (10, 13):
# get prediction
coreml_preds = coreml_model.predict(coreml_input)["output"].flatten()
if use_tmp_folder:
shutil.rmtree(model_dir)
self.assertEqual(
len(coreml_preds),
len(keras_preds),
msg="Failed test case %s. Lengths wrong (%s vs %s)"
% (param, len(coreml_preds), len(keras_preds)),
)
for i in range(len(keras_preds)):
max_den = max(1.0, keras_preds[i], coreml_preds[i])
self.assertAlmostEqual(
keras_preds[i] / max_den,
coreml_preds[i] / max_den,
delta=delta,
msg="Failed test case %s. Predictions wrong (%s vs %s)"
% (param, coreml_preds[i], keras_preds[i]),
)
@pytest.mark.slow
def test_activation_layer_params(self):
options = dict(
activation=[
"tanh",
"relu",
"sigmoid",
"softmax",
"softplus",
"softsign",
"hard_sigmoid",
"elu",
]
)
# Define a function that tests a model
num_channels = 10
input_dim = 10
def build_model(x):
model = Sequential()
model.add(Dense(num_channels, input_dim=input_dim))
model.add(Activation(**dict(zip(options.keys(), x))))
return x, model
# Iterate through all combinations
product = itertools.product(*options.values())
args = [build_model(p) for p in product]
# Test the cases
print("Testing a total of %s cases. This could take a while" % len(args))
for param, model in args:
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._run_test(model, param)
@pytest.mark.slow
def test_dense_layer_params(self):
options = dict(
activation=[
"relu",
"softmax",
"tanh",
"sigmoid",
"softplus",
"softsign",
"elu",
"hard_sigmoid",
],
use_bias=[True, False],
)
# Define a function that tests a model
input_shape = (10,)
num_channels = 10
def build_model(x):
kwargs = dict(zip(options.keys(), x))
model = Sequential()
model.add(Dense(num_channels, input_shape=input_shape, **kwargs))
return x, model
# Iterate through all combinations
product = itertools.product(*options.values())
args = [build_model(p) for p in product]
# Test the cases
print("Testing a total of %s cases. This could take a while" % len(args))
for param, model in args:
self._run_test(model, param)
@pytest.mark.slow
def test_upsample_layer_params(self):
options = dict(size=[(2, 2), (3, 3), (4, 4), (5, 5)])
np.random.seed(1988)
input_dim = 10
input_shape = (input_dim, input_dim, 1)
X = np.random.rand(1, *input_shape)
# Define a function that tests a model
def build_model(x):
kwargs = dict(zip(options.keys(), x))
model = Sequential()
model.add(Conv2D(filters=5, kernel_size=(7, 7), input_shape=input_shape))
model.add(UpSampling2D(**kwargs))
return x, model
# Iterate through all combinations
product = itertools.product(*options.values())
args = [build_model(p) for p in product]
# Test the cases
print("Testing a total of %s cases. This could take a while" % len(args))
for param, model in args:
self._run_test(model, param)
@pytest.mark.slow
def test_conv_layer_params(self, model_precision=_MLMODEL_FULL_PRECISION):
options = dict(
activation=[
"relu",
"tanh",
"sigmoid",
], # keras does not support softmax on 4-D
use_bias=[True, False],
padding=["same", "valid"],
filters=[1, 3, 5],
kernel_size=[[5, 5]], # fails when sizes are different
)
# Define a function that tests a model
input_shape = (10, 10, 1)
def build_model(x):
kwargs = dict(zip(options.keys(), x))
model = Sequential()
model.add(Conv2D(input_shape=input_shape, **kwargs))
return x, model
# Iterate through all combinations
product = itertools.product(*options.values())
args = [build_model(p) for p in product]
# Test the cases
print("Testing a total of %s cases. This could take a while" % len(args))
for param, model in args:
self._run_test(model, param, model_precision=model_precision)
@pytest.mark.keras2
def test_conv_layer_params_half_precision(self):
return self.test_conv_layer_params(model_precision=_MLMODEL_HALF_PRECISION)
@pytest.mark.slow
def test_dense_elementwise_params(self):
options = dict(modes=[add, multiply, concatenate, average, maximum])
def build_model(mode):
x1 = Input(shape=(3,))
x2 = Input(shape=(3,))
y1 = Dense(4)(x1)
y2 = Dense(4)(x2)
z = mode([y1, y2])
model = Model([x1, x2], z)
return mode, model
product = itertools.product(*options.values())
args = [build_model(p[0]) for p in product]
print("Testing a total of %s cases. This could take a while" % len(args))
for param, model in args:
self._run_test(model, param)
def test_vgg_16_tiny(self):
input_shape = (48, 48, 3)
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=input_shape))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(32, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(32, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(1000)) # activation='softmax'))
# Set some random weights
model.set_weights(
[(np.random.rand(*w.shape) - 0.5) * 0.2 for w in model.get_weights()]
)
# Get the coreml model
self._test_model(model)
def test_vgg_16_tiny_no_pooling(self):
input_shape = (48, 48, 3)
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=input_shape))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(32, activation="relu"))
# model.add(Dropout(0.5))
model.add(Dense(32, activation="relu"))
# model.add(Dropout(0.5))
model.add(Dense(1000)) # activation='softmax'))
# Set some random weights
model.set_weights(
[(np.random.rand(*w.shape) - 0.5) * 0.2 for w in model.get_weights()]
)
# Get the coreml model
self._test_model(model)
def test_vgg_16_tiny_no_pooling_no_padding(
self, model_precision=_MLMODEL_FULL_PRECISION
):
input_shape = (48, 48, 3)
model = Sequential()
model.add(Conv2D(32, (3, 3), activation="relu", input_shape=input_shape))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(Flatten())
model.add(Dense(32, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(32, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(1000, activation="softmax"))
# Get the coreml model
self._test_model(model, model_precision=model_precision)
def test_vgg_16_tiny_no_pooling_no_padding_half_precision(self):
return self.test_vgg_16_tiny_no_pooling_no_padding(
model_precision=_MLMODEL_HALF_PRECISION
)
def test_imdb_fasttext_first_2(self):
max_features = 10
max_len = 6
embedding_dims = 4
pool_length = 2
model = Sequential()
model.add(Embedding(max_features, embedding_dims, input_length=max_len))
# we add a AveragePooling1D, which will average the embeddings
# of all words in the document
model.add(AveragePooling1D(pool_size=pool_length))
self._test_model(model, one_dim_seq_flags=[True])
def test_tiny_mcrnn_td(self):
model = Sequential()
model.add(Conv2D(3, (1, 1), input_shape=(2, 4, 4), padding="same"))
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Reshape((2, 3)))
model.add(TimeDistributed(Dense(5)))
self._test_model(model)
def test_tiny_mcrnn_recurrent(self):
model = Sequential()
model.add(Conv2D(3, (1, 1), input_shape=(2, 4, 4), padding="same"))
model.add(AveragePooling2D(pool_size=(2, 2)))
model.add(Reshape((2, 3)))
model.add(LSTM(5, recurrent_activation="sigmoid"))
self._test_model(model)
def test_tiny_mcrnn_music_tagger(self):
x_in = Input(shape=(4, 6, 1))
x = ZeroPadding2D(padding=(0, 1))(x_in)
x = BatchNormalization(axis=2, name="bn_0_freq")(x)
# Conv block 1
x = Conv2D(2, (3, 3), padding="same", name="conv1")(x)
x = BatchNormalization(axis=3, name="bn1")(x)
x = Activation("elu")(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="pool1")(x)
# Conv block 2
x = Conv2D(4, (3, 3), padding="same", name="conv2")(x)
x = BatchNormalization(axis=3, name="bn2")(x)
x = Activation("elu")(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="pool2")(x)
# Should get you (1,1,2,4)
x = Reshape((2, 4))(x)
x = GRU(32, return_sequences=True, name="gru1")(x)
x = GRU(32, return_sequences=False, name="gru2")(x)
# Create model.
model = Model(x_in, x)
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
self._test_model(model, mode="random_zero_mean", delta=1e-2)
def test_tiny_apple_manual(self):
model = Sequential()
model.add(LSTM(3, input_shape=(4, 5), recurrent_activation="sigmoid"))
model.add(Dense(5))
model.add(Activation("softmax"))
self._test_model(model)
def test_tiny_image_captioning_image_branch(self):
img_input_1 = Input(shape=(16, 16, 3))
x = Conv2D(2, (3, 3))(img_input_1)
x = Flatten()(x)
img_model = Model(inputs=[img_input_1], outputs=[x])
img_input = Input(shape=(16, 16, 3))
x = img_model(img_input)
x = Dense(8, name="cap_dense")(x)
x = Reshape((1, 8), name="cap_reshape")(x)
image_branch = Model(inputs=[img_input], outputs=[x])
self._test_model(image_branch)
def test_tiny_image_captioning_feature_merge(self):
img_input_1 = Input(shape=(16, 16, 3))
x = Conv2D(2, (3, 3))(img_input_1)
x = Flatten()(x)
img_model = Model([img_input_1], [x])
img_input = Input(shape=(16, 16, 3))
x = img_model(img_input)
x = Dense(8, name="cap_dense")(x)
x = Reshape((1, 8), name="cap_reshape")(x)
sentence_input = Input(shape=(5,)) # max_length = 5
y = Embedding(8, 8, name="cap_embedding")(sentence_input)
z = concatenate([x, y], axis=1, name="cap_merge")
combined_model = Model(inputs=[img_input, sentence_input], outputs=[z])
self._test_model(combined_model, one_dim_seq_flags=[False, True])
def test_tiny_image_captioning(self):
# use a conv layer as a image feature branch
img_input_1 = Input(shape=(16, 16, 3))
x = Conv2D(2, (3, 3))(img_input_1)
x = Flatten()(x)
img_model = Model(inputs=[img_input_1], outputs=[x])
img_input = Input(shape=(16, 16, 3))
x = img_model(img_input)
x = Dense(8, name="cap_dense")(x)
x = Reshape((1, 8), name="cap_reshape")(x)
sentence_input = Input(shape=(5,)) # max_length = 5
y = Embedding(8, 8, name="cap_embedding")(sentence_input)
z = concatenate([x, y], axis=1, name="cap_merge")
z = LSTM(4, return_sequences=True, name="cap_lstm")(z)
z = TimeDistributed(Dense(8), name="cap_timedistributed")(z)
combined_model = Model(inputs=[img_input, sentence_input], outputs=[z])
self._test_model(combined_model, one_dim_seq_flags=[False, True])
def test_tiny_babi_rnn(self):
vocab_size = 10
embed_hidden_size = 8
story_maxlen = 5
query_maxlen = 5
input_tensor_1 = Input(shape=(story_maxlen,))
x1 = Embedding(vocab_size, embed_hidden_size)(input_tensor_1)
x1 = Dropout(0.3)(x1)
input_tensor_2 = Input(shape=(query_maxlen,))
x2 = Embedding(vocab_size, embed_hidden_size)(input_tensor_2)
x2 = Dropout(0.3)(x2)
x2 = LSTM(embed_hidden_size, return_sequences=False)(x2)
x2 = RepeatVector(story_maxlen)(x2)
x3 = add([x1, x2])
x3 = LSTM(embed_hidden_size, return_sequences=False)(x3)
x3 = Dropout(0.3)(x3)
x3 = Dense(vocab_size, activation="softmax")(x3)
model = Model(inputs=[input_tensor_1, input_tensor_2], outputs=[x3])
self._test_model(model, one_dim_seq_flags=[True, True])
def test_clickbait_cnn(self, model_precision=_MLMODEL_FULL_PRECISION):
# from: https://github.com/saurabhmathur96/clickbait-detector
vocabulary_size = 500
embedding_dimension = 30
input_length = 20
model = Sequential()
model.add(
Embedding(
vocabulary_size,
embedding_dimension,
input_length=input_length,
trainable=True,
)
)
model.add(Conv1D(32, 2))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Conv1D(32, 2))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Conv1D(32, 2))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling1D(17))
model.add(Flatten())
model.add(Dense(1, use_bias=True))
model.add(BatchNormalization())
model.add(Activation("sigmoid"))
self._test_model(
model, one_dim_seq_flags=[True], model_precision=model_precision
)
def test_clickbait_cnn_half_precision(self):
return self.test_clickbait_cnn(model_precision=_MLMODEL_HALF_PRECISION)
def test_model_with_duplicated_edges(self):
# Create a simple model
inputs = Input(shape=(20, 20))
activation = Activation("relu")(inputs)
cropping = Cropping1D(cropping=(1, 1))(activation)
conv1d = Conv1D(20, 3, padding="valid")(activation)
ouputs = Add()([conv1d, cropping])
model = Model(inputs, ouputs)
self._test_model(model)
@unittest.skipIf(not _HAS_KERAS2_TF, "Missing keras. Skipping tests.")
@pytest.mark.keras2
class KerasBasicConversionTest(KerasNumericCorrectnessTest):
def test_float_arraytype_flag(self):
np.random.seed(1988)
# Define a model
model = Sequential()
model.add(Dense(1000, input_shape=(100,)))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Convert model
from coremltools.converters import keras as keras_converter
coreml_model = keras_converter.convert(model, use_float_arraytype=True)
spec = coreml_model.get_spec()
from coremltools.proto import Model_pb2 as _Model_pb2
self.assertEqual(
spec.description.input[0].type.multiArrayType.dataType,
_Model_pb2.ArrayFeatureType.FLOAT32,
)
self.assertEqual(
spec.description.output[0].type.multiArrayType.dataType,
_Model_pb2.ArrayFeatureType.FLOAT32,
)
if __name__ == "__main__":
unittest.main()
# suite = unittest.TestSuite()
# suite.addTest(KerasBasicNumericCorrectnessTest("test_lstm_concat_dense_random"))
# unittest.TextTestRunner().run(suite)
|
tests/services/model_tester.py | aarontp/forseti-security | 921 | 12740501 | <filename>tests/services/model_tester.py<gh_stars>100-1000
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Installing test models against a session."""
from builtins import object
from collections import defaultdict
from google.cloud.forseti.services import utils
class ModelCreatorClient(object):
"""Model creator client."""
def __init__(self, session, data_access):
self.session = session
self.data_access = data_access
self.explain = self
def add_resource(self, resource_type_name, parent_type_name, no_parent):
return self.data_access.add_resource_by_name(self.session,
resource_type_name,
parent_type_name,
no_parent)
def add_member(self, child, parents):
return self.data_access.add_member(self.session, child, parents)
def add_role(self, role_name, permissions):
return self.data_access.add_role_by_name(self.session,
role_name,
permissions)
def get_iam_policy(self, full_resource_name):
policy_dict = self.data_access.get_iam_policy(
self.session, utils.full_to_type_name(full_resource_name))
class PolicyAccessor(dict):
def __init__(self, *args, **kwargs):
super(PolicyAccessor, self).__init__(*args, **kwargs)
self.policy = self
self.bindings = self['bindings'] if 'bindings' in self else []
self.etag = self['etag'] if 'etag' in self else None
return PolicyAccessor(policy_dict)
def set_iam_policy(self, full_resource_name, policy):
return self.data_access.set_iam_policy(
self.session, utils.full_to_type_name(full_resource_name), policy,
update_members=True)
def expand_special_members(self):
self.data_access.expand_special_members(self.session)
def commit(self):
self.session.commit()
self.data_access.denorm_group_in_group(self.session)
self.session.commit()
class ModelCreator(object):
"""Model creator."""
def __init__(self, model, client):
self._install_model(model, client)
client.commit()
def _install_model(self, model, client):
self._install_resources(model['resources'], client)
self._install_memberships(model['memberships'], client)
self._install_roles(model['roles'], client)
self._install_bindings(model['bindings'], client)
def _recursive_install_resources(self, node, model, client, parent):
"""Install resources."""
client.add_resource(node, parent, bool(not parent))
for root, tree in model.items():
self._recursive_install_resources(root, tree, client, node)
def _install_resources(self, model_view, client):
"""Install resources."""
for root, tree in model_view.items():
self._recursive_install_resources(root, tree, client, '')
def _recursive_invert_membership(self, node, model, parentship):
if node not in parentship:
parentship[node] = set()
for child in model.keys():
parentship[child].add(node)
for root, tree in model.items():
self._recursive_invert_membership(root, tree, parentship)
return parentship
def _cyclic(self, g):
path = set()
visited = set()
def visit(vertex):
if vertex in visited:
return False
visited.add(vertex)
path.add(vertex)
for neighbour in g.get(vertex, ()):
if neighbour in path or visit(neighbour):
return True
path.remove(vertex)
return False
return any(visit(v) for v in g)
def _install_memberships(self, model_view, client):
parent_relationship = defaultdict(set)
for root, tree in model_view.items():
self._recursive_invert_membership(root, tree, parent_relationship)
if self._cyclic(parent_relationship):
raise Exception('Cyclic membership relation not supported!')
installed_members = set()
while len(parent_relationship) > 0:
for child, parents in parent_relationship.items():
if parents.issubset(installed_members):
break
installed_members.add(child)
client.add_member(child, list(parents))
parent_relationship.pop(child)
def _install_roles(self, model_view, client):
for role, permissions in model_view.items():
client.add_role(role, permissions)
def _install_bindings(self, model_view, client):
for resource_name, bindings in model_view.items():
reply = client.get_iam_policy(resource_name)
if len(reply.policy.bindings) > 0:
raise Exception('policy should have been empty')
client.set_iam_policy(resource_name,
{'bindings': bindings,
'etag': reply.policy.etag})
client.expand_special_members()
|
utils/model.py | thatrobotdev/Image-Caption-Generator | 187 | 12740558 | <gh_stars>100-1000
import numpy as np
# Keras
from keras.applications.inception_v3 import InceptionV3
from keras.applications.vgg16 import VGG16
from keras.models import Model
from keras.layers import Input, Dense, Dropout, LSTM, Embedding, concatenate, RepeatVector, TimeDistributed, Bidirectional
from keras.preprocessing.sequence import pad_sequences
from tqdm import tqdm
# To measure BLEU Score
from nltk.translate.bleu_score import corpus_bleu
"""
*Define the CNN model
"""
def CNNModel(model_type):
if model_type == 'inceptionv3':
model = InceptionV3()
elif model_type == 'vgg16':
model = VGG16()
model.layers.pop()
model = Model(inputs=model.inputs, outputs=model.layers[-1].output)
return model
"""
*Define the RNN model
"""
def RNNModel(vocab_size, max_len, rnnConfig, model_type):
embedding_size = rnnConfig['embedding_size']
if model_type == 'inceptionv3':
# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model
image_input = Input(shape=(2048,))
elif model_type == 'vgg16':
# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model
image_input = Input(shape=(4096,))
image_model_1 = Dropout(rnnConfig['dropout'])(image_input)
image_model = Dense(embedding_size, activation='relu')(image_model_1)
caption_input = Input(shape=(max_len,))
# mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency.
caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input)
caption_model_2 = Dropout(rnnConfig['dropout'])(caption_model_1)
caption_model = LSTM(rnnConfig['LSTM_units'])(caption_model_2)
# Merging the models and creating a softmax classifier
final_model_1 = concatenate([image_model, caption_model])
final_model_2 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_1)
final_model = Dense(vocab_size, activation='softmax')(final_model_2)
model = Model(inputs=[image_input, caption_input], outputs=final_model)
model.compile(loss='categorical_crossentropy', optimizer='adam')
return model
"""
*Define the RNN model with different architecture
"""
def AlternativeRNNModel(vocab_size, max_len, rnnConfig, model_type):
embedding_size = rnnConfig['embedding_size']
if model_type == 'inceptionv3':
# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model
image_input = Input(shape=(2048,))
elif model_type == 'vgg16':
# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model
image_input = Input(shape=(4096,))
image_model_1 = Dense(embedding_size, activation='relu')(image_input)
image_model = RepeatVector(max_len)(image_model_1)
caption_input = Input(shape=(max_len,))
# mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency.
caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input)
# Since we are going to predict the next word using the previous words
# (length of previous words changes with every iteration over the caption), we have to set return_sequences = True.
caption_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=True)(caption_model_1)
# caption_model = TimeDistributed(Dense(embedding_size, activation='relu'))(caption_model_2)
caption_model = TimeDistributed(Dense(embedding_size))(caption_model_2)
# Merging the models and creating a softmax classifier
final_model_1 = concatenate([image_model, caption_model])
# final_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=False)(final_model_1)
final_model_2 = Bidirectional(LSTM(rnnConfig['LSTM_units'], return_sequences=False))(final_model_1)
# final_model_3 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_2)
# final_model = Dense(vocab_size, activation='softmax')(final_model_3)
final_model = Dense(vocab_size, activation='softmax')(final_model_2)
model = Model(inputs=[image_input, caption_input], outputs=final_model)
model.compile(loss='categorical_crossentropy', optimizer='adam')
# model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
return model
"""
*Map an integer to a word
"""
def int_to_word(integer, tokenizer):
for word, index in tokenizer.word_index.items():
if index == integer:
return word
return None
"""
*Generate a caption for an image, given a pre-trained model and a tokenizer to map integer back to word
*Uses simple argmax
"""
def generate_caption(model, tokenizer, image, max_length):
# Seed the generation process
in_text = 'startseq'
# Iterate over the whole length of the sequence
for _ in range(max_length):
# Integer encode input sequence
sequence = tokenizer.texts_to_sequences([in_text])[0]
# Pad input
sequence = pad_sequences([sequence], maxlen=max_length)
# Predict next word
# The model will output a prediction, which will be a probability distribution over all words in the vocabulary.
yhat = model.predict([image,sequence], verbose=0)
# The output vector representins a probability distribution where maximum probability is the predicted word position
# Take output class with maximum probability and convert to integer
yhat = np.argmax(yhat)
# Map integer back to word
word = int_to_word(yhat, tokenizer)
# Stop if we cannot map the word
if word is None:
break
# Append as input for generating the next word
in_text += ' ' + word
# Stop if we predict the end of the sequence
if word == 'endseq':
break
return in_text
"""
*Generate a caption for an image, given a pre-trained model and a tokenizer to map integer back to word
*Uses BEAM Search algorithm
"""
def generate_caption_beam_search(model, tokenizer, image, max_length, beam_index=3):
# in_text --> [[idx,prob]] ;prob=0 initially
in_text = [[tokenizer.texts_to_sequences(['startseq'])[0], 0.0]]
while len(in_text[0][0]) < max_length:
tempList = []
for seq in in_text:
padded_seq = pad_sequences([seq[0]], maxlen=max_length)
preds = model.predict([image,padded_seq], verbose=0)
# Take top (i.e. which have highest probailities) `beam_index` predictions
top_preds = np.argsort(preds[0])[-beam_index:]
# Getting the top `beam_index` predictions and
for word in top_preds:
next_seq, prob = seq[0][:], seq[1]
next_seq.append(word)
# Update probability
prob += preds[0][word]
# Append as input for generating the next word
tempList.append([next_seq, prob])
in_text = tempList
# Sorting according to the probabilities
in_text = sorted(in_text, reverse=False, key=lambda l: l[1])
# Take the top words
in_text = in_text[-beam_index:]
in_text = in_text[-1][0]
final_caption_raw = [int_to_word(i,tokenizer) for i in in_text]
final_caption = []
for word in final_caption_raw:
if word=='endseq':
break
else:
final_caption.append(word)
final_caption.append('endseq')
return ' '.join(final_caption)
"""
*Evaluate the model on BLEU Score using argmax predictions
"""
def evaluate_model(model, images, captions, tokenizer, max_length):
actual, predicted = list(), list()
for image_id, caption_list in tqdm(captions.items()):
yhat = generate_caption(model, tokenizer, images[image_id], max_length)
ground_truth = [caption.split() for caption in caption_list]
actual.append(ground_truth)
predicted.append(yhat.split())
print('BLEU Scores :')
print('A perfect match results in a score of 1.0, whereas a perfect mismatch results in a score of 0.0.')
print('BLEU-1: %f' % corpus_bleu(actual, predicted, weights=(1.0, 0, 0, 0)))
print('BLEU-2: %f' % corpus_bleu(actual, predicted, weights=(0.5, 0.5, 0, 0)))
print('BLEU-3: %f' % corpus_bleu(actual, predicted, weights=(0.3, 0.3, 0.3, 0)))
print('BLEU-4: %f' % corpus_bleu(actual, predicted, weights=(0.25, 0.25, 0.25, 0.25)))
"""
*Evaluate the model on BLEU Score using BEAM search predictions
"""
def evaluate_model_beam_search(model, images, captions, tokenizer, max_length, beam_index=3):
actual, predicted = list(), list()
for image_id, caption_list in tqdm(captions.items()):
yhat = generate_caption_beam_search(model, tokenizer, images[image_id], max_length, beam_index=beam_index)
ground_truth = [caption.split() for caption in caption_list]
actual.append(ground_truth)
predicted.append(yhat.split())
print('BLEU Scores :')
print('A perfect match results in a score of 1.0, whereas a perfect mismatch results in a score of 0.0.')
print('BLEU-1: %f' % corpus_bleu(actual, predicted, weights=(1.0, 0, 0, 0)))
print('BLEU-2: %f' % corpus_bleu(actual, predicted, weights=(0.5, 0.5, 0, 0)))
print('BLEU-3: %f' % corpus_bleu(actual, predicted, weights=(0.3, 0.3, 0.3, 0)))
print('BLEU-4: %f' % corpus_bleu(actual, predicted, weights=(0.25, 0.25, 0.25, 0.25))) |
libnum/ecc.py | hellman/libnum | 222 | 12740562 | <filename>libnum/ecc.py
import random
from .sqrtmod import sqrtmod_prime_power, has_sqrtmod_prime_power
from .modular import invmod
__all__ = ('NULL_POINT', 'Curve')
NULL_POINT = (None, None)
class Curve:
def __init__(self, a, b, p, g=None,
order=None,
cofactor=None,
seed=None):
self.a = a
self.b = b
self.module = p
self.g = g
self.order = order
self.cofactor = cofactor
self.seed = seed
self.points_count = None
if self.cofactor == 1 and self.order is not None:
self.points_count = self.order
return None
def is_null(self, p):
"""
Check if a point is curve's null point
"""
return p == NULL_POINT
def is_opposite(self, p1, p2):
"""
Check if one point is opposite to another (p1 == -p2)
"""
x1, y1 = p1
x2, y2 = p2
return (x1 == x2 and y1 == -y2 % self.module)
def check(self, p):
"""
Check if point is on the curve
"""
x, y = p
if self.is_null(p):
return True
left = (y ** 2) % self.module
right = self.right(x)
return left == right
def check_x(self, x):
"""
Check if there is a point on the curve with given @x coordinate
"""
if x > self.module or x < 0:
raise ValueError("Value " + str(x) +
" is not in range [0; <modulus>]")
a = self.right(x)
n = self.module
if not has_sqrtmod_prime_power(a, n):
return False
ys = sqrtmod_prime_power(a, n)
return map(lambda y: (x, y), ys)
def right(self, x):
"""
Right part of the curve equation: x^3 + a*x + b (mod p)
"""
return (x ** 3 + self.a * x + self.b) % self.module
def find_points_in_range(self, start=0, end=None):
"""
List of points in given range for x coordinate
"""
points = []
if end is None:
end = self.module - 1
for x in range(start, end + 1):
p = self.check_x(x)
if not p:
continue
points.extend(p)
return points
def find_points_rand(self, number=1):
"""
List of @number random points on the curve
"""
points = []
while len(points) < number:
x = random.randint(0, self.module)
p = self.check_x(x)
if not p:
continue
points.append(p)
return points
def add(self, p1, p2):
"""
Sum of two points
"""
if self.is_null(p1):
return p2
if self.is_null(p2):
return p1
if self.is_opposite(p1, p2):
return NULL_POINT
x1, y1 = p1
x2, y2 = p2
l = 0
if x1 != x2:
l = (y2 - y1) * invmod(x2 - x1, self.module)
else:
l = (3 * x1 ** 2 + self.a) * invmod(2 * y1, self.module)
x = (l * l - x1 - x2) % self.module
y = (l * (x1 - x) - y1) % self.module # yes, it's that new x
return (x, y)
def power(self, p, n):
"""
n✕P or (P + P + ... + P) n times
"""
if n == 0 or self.is_null(p):
return NULL_POINT
res = NULL_POINT
while n:
if n & 1:
res = self.add(res, p)
p = self.add(p, p)
n >>= 1
return res
def generate(self, n):
"""
Too lazy to give self.g to self.power
"""
return self.power(self.g, n)
def get_order(self, p, limit=None):
"""
Tries to calculate order of @p, returns None if @limit is reached
(SLOW method)
"""
order = 1
res = p
while not self.is_null(res):
res = self.add(res, p)
order += 1
if limit is not None and order >= limit:
return None
return order
|
experiments/lut_init/fuzz_lut_init.py | Keno/prjtrellis | 256 | 12740573 | <reponame>Keno/prjtrellis<gh_stars>100-1000
#!/usr/bin/env python3
import os
from os import path
import shutil
import diamond
from string import Template
import pytrellis
device = "LFE5U-25F"
lut_inputs = ("A", "B", "C", "D")
def run_get_bits(init_bits):
sop_terms = []
for i in range(16):
if init_bits & (1 << i) != 0:
p_terms = []
for j in range(4):
if i & (1 << j) != 0:
p_terms.append(lut_inputs[j])
else:
p_terms.append("~" + lut_inputs[j])
sop_terms.append("({})".format("*".join(p_terms)))
if len(sop_terms) == 0:
lut_func = "0"
else:
lut_func = "+".join(sop_terms)
with open("lut_init_template.ncl", "r") as inf:
with open("work/lut_init.ncl", "w") as ouf:
ouf.write(Template(inf.read()).substitute(lut_func=lut_func))
diamond.run(device, "work/lut_init.ncl")
bs = pytrellis.Bitstream.read_bit("work/lut_init.bit")
chip = bs.deserialise_chip()
tile = chip.tiles["R2C2:PLC2"]
return tile.cram
def main():
pytrellis.load_database("../../../prjtrellis-db")
shutil.rmtree("work", ignore_errors=True)
os.mkdir("work")
baseline = run_get_bits(0)
with open("lut_bits_out.txt", "w") as f:
for i in range(16):
bits = run_get_bits(1 << i)
diff = bits - baseline
assert len(diff) == 1
inv = "!" if diff[0].delta < 0 else ""
print("INIT[{}]\t{}({}, {})".format(i, inv, diff[0].bit, diff[0].frame), file=f)
f.flush()
if __name__ == "__main__":
main() |
salt/utils/stringutils.py | markgras/salt | 9,425 | 12740574 | """
Functions for manipulating or otherwise processing strings
"""
import base64
import difflib
import errno
import fnmatch
import logging
import os
import re
import shlex
import time
import unicodedata
from salt.utils.decorators.jinja import jinja_filter
log = logging.getLogger(__name__)
@jinja_filter("to_bytes")
def to_bytes(s, encoding=None, errors="strict"):
"""
Given bytes, bytearray, str, or unicode (python 2), return bytes (str for
python 2)
"""
if encoding is None:
# Try utf-8 first, and fall back to detected encoding
encoding = ("utf-8", __salt_system_encoding__)
if not isinstance(encoding, (tuple, list)):
encoding = (encoding,)
if not encoding:
raise ValueError("encoding cannot be empty")
exc = None
if isinstance(s, bytes):
return s
if isinstance(s, bytearray):
return bytes(s)
if isinstance(s, str):
for enc in encoding:
try:
return s.encode(enc, errors)
except UnicodeEncodeError as err:
exc = err
continue
# The only way we get this far is if a UnicodeEncodeError was
# raised, otherwise we would have already returned (or raised some
# other exception).
raise exc # pylint: disable=raising-bad-type
raise TypeError("expected str, bytes, or bytearray not {}".format(type(s)))
def to_str(s, encoding=None, errors="strict", normalize=False):
"""
Given str, bytes, bytearray, or unicode (py2), return str
"""
def _normalize(s):
try:
return unicodedata.normalize("NFC", s) if normalize else s
except TypeError:
return s
if encoding is None:
# Try utf-8 first, and fall back to detected encoding
encoding = ("utf-8", __salt_system_encoding__)
if not isinstance(encoding, (tuple, list)):
encoding = (encoding,)
if not encoding:
raise ValueError("encoding cannot be empty")
if isinstance(s, str):
return _normalize(s)
exc = None
if isinstance(s, (bytes, bytearray)):
for enc in encoding:
try:
return _normalize(s.decode(enc, errors))
except UnicodeDecodeError as err:
exc = err
continue
# The only way we get this far is if a UnicodeDecodeError was
# raised, otherwise we would have already returned (or raised some
# other exception).
raise exc # pylint: disable=raising-bad-type
raise TypeError("expected str, bytes, or bytearray not {}".format(type(s)))
def to_unicode(s, encoding=None, errors="strict", normalize=False):
"""
Given str or unicode, return unicode (str for python 3)
"""
def _normalize(s):
return unicodedata.normalize("NFC", s) if normalize else s
if encoding is None:
# Try utf-8 first, and fall back to detected encoding
encoding = ("utf-8", __salt_system_encoding__)
if not isinstance(encoding, (tuple, list)):
encoding = (encoding,)
if not encoding:
raise ValueError("encoding cannot be empty")
if isinstance(s, str):
return _normalize(s)
elif isinstance(s, (bytes, bytearray)):
return _normalize(to_str(s, encoding, errors))
raise TypeError("expected str, bytes, or bytearray not {}".format(type(s)))
@jinja_filter("str_to_num")
@jinja_filter("to_num")
def to_num(text):
"""
Convert a string to a number.
Returns an integer if the string represents an integer, a floating
point number if the string is a real number, or the string unchanged
otherwise.
"""
try:
return int(text)
except ValueError:
try:
return float(text)
except ValueError:
return text
def to_none(text):
"""
Convert a string to None if the string is empty or contains only spaces.
"""
if str(text).strip():
return text
return None
def is_quoted(value):
"""
Return a single or double quote, if a string is wrapped in extra quotes.
Otherwise return an empty string.
"""
ret = ""
if (
isinstance(value, str)
and value[0] == value[-1]
and value.startswith(("'", '"'))
):
ret = value[0]
return ret
def dequote(value):
"""
Remove extra quotes around a string.
"""
if is_quoted(value):
return value[1:-1]
return value
@jinja_filter("is_hex")
def is_hex(value):
"""
Returns True if value is a hexadecimal string, otherwise returns False
"""
try:
int(value, 16)
return True
except (TypeError, ValueError):
return False
def is_binary(data):
"""
Detects if the passed string of data is binary or text
"""
if not data or not isinstance(data, ((str,), bytes)):
return False
if isinstance(data, bytes):
if b"\0" in data:
return True
elif "\0" in data:
return True
text_characters = "".join([chr(x) for x in range(32, 127)] + list("\n\r\t\b"))
# Get the non-text characters (map each character to itself then use the
# 'remove' option to get rid of the text characters.)
if isinstance(data, bytes):
import salt.utils.data
nontext = data.translate(None, salt.utils.data.encode(text_characters))
else:
trans = "".maketrans("", "", text_characters)
nontext = data.translate(trans)
# If more than 30% non-text characters, then
# this is considered binary data
if float(len(nontext)) / len(data) > 0.30:
return True
return False
@jinja_filter("random_str")
def random(size=32):
key = os.urandom(size)
return to_unicode(base64.b64encode(key).replace(b"\n", b"")[:size])
@jinja_filter("contains_whitespace")
def contains_whitespace(text):
"""
Returns True if there are any whitespace characters in the string
"""
return any(x.isspace() for x in text)
def human_to_bytes(size):
"""
Given a human-readable byte string (e.g. 2G, 30M),
return the number of bytes. Will return 0 if the argument has
unexpected form.
.. versionadded:: 2018.3.0
"""
sbytes = size[:-1]
unit = size[-1]
if sbytes.isdigit():
sbytes = int(sbytes)
if unit == "P":
sbytes *= 1125899906842624
elif unit == "T":
sbytes *= 1099511627776
elif unit == "G":
sbytes *= 1073741824
elif unit == "M":
sbytes *= 1048576
else:
sbytes = 0
else:
sbytes = 0
return sbytes
def build_whitespace_split_regex(text):
'''
Create a regular expression at runtime which should match ignoring the
addition or deletion of white space or line breaks, unless between commas
Example:
.. code-block:: python
>>> import re
>>> import salt.utils.stringutils
>>> regex = salt.utils.stringutils.build_whitespace_split_regex(
... """if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then"""
... )
>>> regex
'(?:[\\s]+)?if(?:[\\s]+)?\\[(?:[\\s]+)?\\-z(?:[\\s]+)?\\"\\$debian'
'\\_chroot\\"(?:[\\s]+)?\\](?:[\\s]+)?\\&\\&(?:[\\s]+)?\\[(?:[\\s]+)?'
'\\-r(?:[\\s]+)?\\/etc\\/debian\\_chroot(?:[\\s]+)?\\]\\;(?:[\\s]+)?'
'then(?:[\\s]+)?'
>>> re.search(
... regex,
... """if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then"""
... )
<_sre.SRE_Match object at 0xb70639c0>
>>>
'''
def __build_parts(text):
lexer = shlex.shlex(text)
lexer.whitespace_split = True
lexer.commenters = ""
if r"'\"" in text:
lexer.quotes = ""
elif "'" in text:
lexer.quotes = '"'
elif '"' in text:
lexer.quotes = "'"
return list(lexer)
regex = r""
for line in text.splitlines():
parts = [re.escape(s) for s in __build_parts(line)]
regex += r"(?:[\s]+)?{}(?:[\s]+)?".format(r"(?:[\s]+)?".join(parts))
return r"(?m)^{}$".format(regex)
def expr_match(line, expr):
"""
Checks whether or not the passed value matches the specified expression.
Tries to match expr first as a glob using fnmatch.fnmatch(), and then tries
to match expr as a regular expression. Originally designed to match minion
IDs for whitelists/blacklists.
Note that this also does exact matches, as fnmatch.fnmatch() will return
``True`` when no glob characters are used and the string is an exact match:
.. code-block:: python
>>> fnmatch.fnmatch('foo', 'foo')
True
"""
try:
if fnmatch.fnmatch(line, expr):
return True
try:
if re.match(r"\A{}\Z".format(expr), line):
return True
except re.error:
pass
except TypeError:
log.exception("Value %r or expression %r is not a string", line, expr)
return False
@jinja_filter("check_whitelist_blacklist")
def check_whitelist_blacklist(value, whitelist=None, blacklist=None):
"""
Check a whitelist and/or blacklist to see if the value matches it.
value
The item to check the whitelist and/or blacklist against.
whitelist
The list of items that are white-listed. If ``value`` is found
in the whitelist, then the function returns ``True``. Otherwise,
it returns ``False``.
blacklist
The list of items that are black-listed. If ``value`` is found
in the blacklist, then the function returns ``False``. Otherwise,
it returns ``True``.
If both a whitelist and a blacklist are provided, value membership
in the blacklist will be examined first. If the value is not found
in the blacklist, then the whitelist is checked. If the value isn't
found in the whitelist, the function returns ``False``.
"""
# Normalize the input so that we have a list
if blacklist:
if isinstance(blacklist, str):
blacklist = [blacklist]
if not hasattr(blacklist, "__iter__"):
raise TypeError(
"Expecting iterable blacklist, but got {} ({})".format(
type(blacklist).__name__, blacklist
)
)
else:
blacklist = []
if whitelist:
if isinstance(whitelist, str):
whitelist = [whitelist]
if not hasattr(whitelist, "__iter__"):
raise TypeError(
"Expecting iterable whitelist, but got {} ({})".format(
type(whitelist).__name__, whitelist
)
)
else:
whitelist = []
_blacklist_match = any(expr_match(value, expr) for expr in blacklist)
_whitelist_match = any(expr_match(value, expr) for expr in whitelist)
if blacklist and not whitelist:
# Blacklist but no whitelist
return not _blacklist_match
elif whitelist and not blacklist:
# Whitelist but no blacklist
return _whitelist_match
elif blacklist and whitelist:
# Both whitelist and blacklist
return not _blacklist_match and _whitelist_match
else:
# No blacklist or whitelist passed
return True
def check_include_exclude(path_str, include_pat=None, exclude_pat=None):
"""
Check for glob or regexp patterns for include_pat and exclude_pat in the
'path_str' string and return True/False conditions as follows.
- Default: return 'True' if no include_pat or exclude_pat patterns are
supplied
- If only include_pat or exclude_pat is supplied: return 'True' if string
passes the include_pat test or fails exclude_pat test respectively
- If both include_pat and exclude_pat are supplied: return 'True' if
include_pat matches AND exclude_pat does not match
"""
def _pat_check(path_str, check_pat):
if re.match("E@", check_pat):
return True if re.search(check_pat[2:], path_str) else False
else:
return True if fnmatch.fnmatch(path_str, check_pat) else False
ret = True # -- default true
# Before pattern match, check if it is regexp (E@'') or glob(default)
if include_pat:
if isinstance(include_pat, list):
for include_line in include_pat:
retchk_include = _pat_check(path_str, include_line)
if retchk_include:
break
else:
retchk_include = _pat_check(path_str, include_pat)
if exclude_pat:
if isinstance(exclude_pat, list):
for exclude_line in exclude_pat:
retchk_exclude = not _pat_check(path_str, exclude_line)
if not retchk_exclude:
break
else:
retchk_exclude = not _pat_check(path_str, exclude_pat)
# Now apply include/exclude conditions
if include_pat and not exclude_pat:
ret = retchk_include
elif exclude_pat and not include_pat:
ret = retchk_exclude
elif include_pat and exclude_pat:
ret = retchk_include and retchk_exclude
else:
ret = True
return ret
def print_cli(msg, retries=10, step=0.01):
"""
Wrapper around print() that suppresses tracebacks on broken pipes (i.e.
when salt output is piped to less and less is stopped prematurely).
"""
while retries:
try:
try:
print(msg)
except UnicodeEncodeError:
print(msg.encode("utf-8"))
except OSError as exc:
err = "{}".format(exc)
if exc.errno != errno.EPIPE:
if (
"temporarily unavailable" in err or exc.errno in (errno.EAGAIN,)
) and retries:
time.sleep(step)
retries -= 1
continue
else:
raise
break
def get_context(template, line, num_lines=5, marker=None):
"""
Returns debugging context around a line in a given string
Returns:: string
"""
template_lines = template.splitlines()
num_template_lines = len(template_lines)
# In test mode, a single line template would return a crazy line number like,
# 357. Do this sanity check and if the given line is obviously wrong, just
# return the entire template
if line > num_template_lines:
return template
context_start = max(0, line - num_lines - 1) # subt 1 for 0-based indexing
context_end = min(num_template_lines, line + num_lines)
error_line_in_context = line - context_start - 1 # subtr 1 for 0-based idx
buf = []
if context_start > 0:
buf.append("[...]")
error_line_in_context += 1
buf.extend(template_lines[context_start:context_end])
if context_end < num_template_lines:
buf.append("[...]")
if marker:
buf[error_line_in_context] += marker
return "---\n{}\n---".format("\n".join(buf))
def get_diff(a, b, *args, **kwargs):
"""
Perform diff on two iterables containing lines from two files, and return
the diff as as string. Lines are normalized to str types to avoid issues
with unicode on PY2.
"""
encoding = ("utf-8", "latin-1", __salt_system_encoding__)
# Late import to avoid circular import
import salt.utils.data
return "".join(
difflib.unified_diff(
salt.utils.data.decode_list(a, encoding=encoding),
salt.utils.data.decode_list(b, encoding=encoding),
*args,
**kwargs
)
)
@jinja_filter("to_snake_case")
def camel_to_snake_case(camel_input):
"""
Converts camelCase (or CamelCase) to snake_case.
From https://codereview.stackexchange.com/questions/185966/functions-to-convert-camelcase-strings-to-snake-case
:param str camel_input: The camelcase or CamelCase string to convert to snake_case
:return str
"""
res = camel_input[0].lower()
for i, letter in enumerate(camel_input[1:], 1):
if letter.isupper():
if camel_input[i - 1].islower() or (
i != len(camel_input) - 1 and camel_input[i + 1].islower()
):
res += "_"
res += letter.lower()
return res
@jinja_filter("to_camelcase")
def snake_to_camel_case(snake_input, uppercamel=False):
"""
Converts snake_case to camelCase (or CamelCase if uppercamel is ``True``).
Inspired by https://codereview.stackexchange.com/questions/85311/transform-snake-case-to-camelcase
:param str snake_input: The input snake_case string to convert to camelCase
:param bool uppercamel: Whether or not to convert to CamelCase instead
:return str
"""
words = snake_input.split("_")
if uppercamel:
words[0] = words[0].capitalize()
return words[0] + "".join(word.capitalize() for word in words[1:])
|
Bot/Trade.py | mtranhoangson/bot | 199 | 12740575 | <filename>Bot/Trade.py
from collections import OrderedDict
import uuid
from typing import List
from Bot.EntryExitSettings import EntryExitSettings
from Bot.TradeEnums import Side
from Bot.StopLossSettings import StopLossSettings
from Bot.Target import *
class Trade(CustomSerializable):
# def __init__(self, symbol, side, asset, status=None, sl_settings=None, entry=None, exit=None):
def __init__(self, symbol, side, asset, status=None, *args, **kvargs):
self.side = Side(side.lower())
self.symbol = symbol.upper()
self.asset = asset.upper()
self.entry: EntryExitSettings = None
self.exit: EntryExitSettings = None
self._init_entry_exit(True, kvargs.get('entry'), self.side)
self._init_entry_exit(False, kvargs.get('exit'), self.side)
sl_settings = kvargs.get('stoploss', kvargs.get('sl_settings'))
# self.sl_settings: StopLossSettings = StopLossSettings(**sl_settings) if sl_settings else None
self.sl_settings: StopLossSettings = StopLossSettings(**sl_settings) if sl_settings else StopLossSettings({})
if status:
self.status = OrderStatus(status.lower())
else:
self.status = OrderStatus.ACTIVE if not kvargs.get('entry') else OrderStatus.NEW
self.cap = float(kvargs.get('cap')) if kvargs.get('cap') else None
self.id = kvargs.get('id', None)
if not self.id:
self.id = str(uuid.uuid4())
def _init_entry_exit(self, is_entry, data, side: Side):
if data:
if 'side' not in data:
data['side'] = (side.reverse() if is_entry else side).value
# TODO: right now there is only Smart Entry option allowed
if is_entry:
data['smart'] = True
self.entry = EntryExitSettings(is_entry=is_entry, **data)
else:
self.exit = EntryExitSettings(is_entry=is_entry, **data)
def get_cap(self, available_balance):
return min(self.cap if self.cap else available_balance, available_balance)
def is_sell(self):
return self.side.is_sell()
def has_entry(self):
return self.entry is not None
def has_exit(self):
return self.exit is not None
def has_stoploss(self):
return self.sl_settings is not None and self.sl_settings.initial_target
def has_stoploss_in_last_completed_target(self):
completed_targets = self.get_completed_exit_targets()
has_completed_targets = len(completed_targets) > 0
return has_completed_targets and completed_targets[-1].has_custom_stop()
def get_completed_exit_targets(self) -> List[Target]:
if not self.exit:
return []
return self.exit.get_completed_targets()
def get_initial_stop(self) -> Target:
if self.sl_settings:
return self.sl_settings.initial_target
return None
def serializable_dict(self):
d = OrderedDict()
d['id'] = self.id
d['asset'] = self.asset
d['symbol'] = self.symbol
d['side'] = self.side
d['status'] = self.status
if self.cap:
d['cap'] = self.format_float(self.cap)
if self.entry:
d['entry'] = self.entry
if self.exit:
d['exit'] = self.exit
if self.sl_settings:
d['stoploss'] = self.sl_settings
return d
def get_all_active_placed_targets(self) -> List[Target]:
tgt = []
if self.has_exit():
tgt.extend(self.exit.targets)
if self.has_entry():
tgt.extend(self.entry.targets)
if self.has_stoploss():
tgt.append(self.sl_settings.initial_target)
return [t for t in tgt if not t.is_completed() and t.has_id()]
def is_completed(self):
return self.status.is_completed()
def is_active(self):
return self.status.is_active()
def is_new(self):
return self.status.is_new()
def is_removed(self):
return self.status.is_removed()
def set_active(self):
self.status = OrderStatus.ACTIVE
def set_completed(self):
self.status = OrderStatus.COMPLETED
def set_removed(self):
self.status = OrderStatus.REMOVED
def __str__(self):
return '{}({}): {}'.format(self.symbol, self.id, self.side)
def describe(self):
description = self.__str__()
if self.has_entry():
description += '\n'+self.entry.describe()
if self.has_exit():
description +='\n'+self.exit.describe()
if self.has_stoploss():
description += '\n'+self.sl_settings.describe()
return description
|
tacker/vnfm/infra_drivers/openstack/update_template.py | h1r0mu/tacker | 116 | 12740577 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tacker.common import log
LOG = logging.getLogger(__name__)
class HOTUpdater(object):
"""Update HOT template."""
def __init__(self, heatclient):
self.heatclient = heatclient
self.template = {}
self.nested_templates = dict()
@log.log
def get_templates_from_stack(self, stack_id):
"""Get template information from the stack.
Get the template from stack specified by stack_id,
if stack has scalable resource, get the its child
template.
"""
def _get_resource(name, resources):
for resource in resources:
if resource.resource_name == name:
return resource
self.template = self.heatclient.stacks.template(stack_id)
LOG.debug('got main template for stack({}). template={}'.format(
stack_id, self.template))
stack_resources = self.heatclient.resource_get_list(stack_id,
nested_depth=2)
for resource in stack_resources:
if resource.resource_type == 'OS::Heat::AutoScalingGroup':
intermediate_template = self.heatclient.stacks.template(
resource.physical_resource_id)
for resource_id in intermediate_template['resources'].keys():
corresponding_resource = _get_resource(resource_id,
stack_resources)
nested_template = self.heatclient.stacks.template(
corresponding_resource.physical_resource_id)
LOG.debug('got nested template for stack({}). template={}'
.format(corresponding_resource.physical_resource_id,
nested_template))
if nested_template:
self.nested_templates[
corresponding_resource.resource_type] = nested_template
@log.log
def update_resource_property(self,
resource_id,
resource_types=[],
**kwargs):
"""Update attributes of resource properties.
Get the resource information from template's resources section,
and update properties using kwargs information.
If resource type does not include in resource_types, nothing to do.
"""
def _update(template, resource_id, resource_types, kwargs):
resource = template.get('resources', {}).get(resource_id)
if not resource:
return
if resource.get('type', {}) not in resource_types:
return
resource_properties = resource.get('properties', {})
if not resource_properties:
return
for key, value in kwargs.items():
if value is not None:
resource_properties.update({key: value})
elif resource_properties.get(key):
del resource_properties[key]
_update(self.template, resource_id, resource_types, kwargs)
for value in self.nested_templates.values():
nested_template = value
_update(nested_template, resource_id, resource_types, kwargs)
|
src/skmultiflow/meta/adaptive_random_forests.py | denisesato/scikit-multiflow | 663 | 12740583 | <gh_stars>100-1000
from copy import deepcopy
import math
import itertools
import numpy as np
from skmultiflow.core import BaseSKMObject, ClassifierMixin, MetaEstimatorMixin
from skmultiflow.drift_detection.base_drift_detector import BaseDriftDetector
from skmultiflow.drift_detection import ADWIN
from skmultiflow.trees.arf_hoeffding_tree import ARFHoeffdingTreeClassifier
from skmultiflow.metrics import ClassificationPerformanceEvaluator
from skmultiflow.utils import get_dimensions, normalize_values_in_dict, check_random_state,\
check_weights
import warnings
def AdaptiveRandomForest(n_estimators=10,
max_features='auto',
disable_weighted_vote=False,
lambda_value=6,
performance_metric='acc',
drift_detection_method: BaseDriftDetector = ADWIN(0.001),
warning_detection_method: BaseDriftDetector = ADWIN(0.01),
max_byte_size=33554432,
memory_estimate_period=2000000,
grace_period=50,
split_criterion='info_gain',
split_confidence=0.01,
tie_threshold=0.05,
binary_split=False,
stop_mem_management=False,
remove_poor_atts=False,
no_preprune=False,
leaf_prediction='nba',
nb_threshold=0,
nominal_attributes=None,
random_state=None): # pragma: no cover
warnings.warn("’AdaptiveRandomForest’ has been renamed to ‘AdaptiveRandomForestClassifier’ "
"in v0.5.0.\nThe old name will be removed in v0.7.0", category=FutureWarning)
return AdaptiveRandomForestClassifier(n_estimators=n_estimators,
max_features=max_features,
disable_weighted_vote=disable_weighted_vote,
lambda_value=lambda_value,
performance_metric=performance_metric,
drift_detection_method=drift_detection_method,
warning_detection_method=warning_detection_method,
max_byte_size=max_byte_size,
memory_estimate_period=memory_estimate_period,
grace_period=grace_period,
split_criterion=split_criterion,
split_confidence=split_confidence,
tie_threshold=tie_threshold,
binary_split=binary_split,
stop_mem_management=stop_mem_management,
remove_poor_atts=remove_poor_atts,
no_preprune=no_preprune,
leaf_prediction=leaf_prediction,
nb_threshold=nb_threshold,
nominal_attributes=nominal_attributes,
random_state=random_state)
class AdaptiveRandomForestClassifier(BaseSKMObject, ClassifierMixin, MetaEstimatorMixin):
"""Adaptive Random Forest classifier.
Parameters
----------
n_estimators: int, optional (default=10)
Number of trees in the ensemble.
max_features : int, float, string or None, optional (default="auto")
Max number of attributes for each node split.
- If int, then consider ``max_features`` features at each split.
- If float, then ``max_features`` is a percentage and
``int(max_features * n_features)`` features are considered at each split.
- If "auto", then ``max_features=sqrt(n_features)``.
- If "sqrt", then ``max_features=sqrt(n_features)`` (same as "auto").
- If "log2", then ``max_features=log2(n_features)``.
- If None, then ``max_features=n_features``.
disable_weighted_vote: bool, optional (default=False)
Weighted vote option.
lambda_value: int, optional (default=6)
The lambda value for bagging (lambda=6 corresponds to Leverage Bagging).
performance_metric: string, optional (default="acc")
Metric used to track trees performance within the ensemble.
- 'acc' - Accuracy
- 'kappa' - Accuracy
drift_detection_method: BaseDriftDetector or None, optional (default=ADWIN(0.001))
Drift Detection method. Set to None to disable Drift detection.
warning_detection_method: BaseDriftDetector or None, default(ADWIN(0.01))
Warning Detection method. Set to None to disable warning detection.
max_byte_size: int, optional (default=33554432)
(`ARFHoeffdingTreeClassifier` parameter)
Maximum memory consumed by the tree.
memory_estimate_period: int, optional (default=2000000)
(`ARFHoeffdingTreeClassifier` parameter)
Number of instances between memory consumption checks.
grace_period: int, optional (default=50)
(`ARFHoeffdingTreeClassifier` parameter)
Number of instances a leaf should observe between split attempts.
split_criterion: string, optional (default='info_gain')
(`ARFHoeffdingTreeClassifier` parameter)
Split criterion to use.
- 'gini' - Gini
- 'info_gain' - Information Gain
split_confidence: float, optional (default=0.01)
(`ARFHoeffdingTreeClassifier` parameter)
Allowed error in split decision, a value closer to 0 takes longer to decide.
tie_threshold: float, optional (default=0.05)
(`ARFHoeffdingTreeClassifier` parameter)
Threshold below which a split will be forced to break ties.
binary_split: bool, optional (default=False)
(`ARFHoeffdingTreeClassifier` parameter)
If True, only allow binary splits.
stop_mem_management: bool, optional (default=False)
(`ARFHoeffdingTreeClassifier` parameter)
If True, stop growing as soon as memory limit is hit.
remove_poor_atts: bool, optional (default=False)
(`ARFHoeffdingTreeClassifier` parameter)
If True, disable poor attributes.
no_preprune: bool, optional (default=False)
(`ARFHoeffdingTreeClassifier` parameter)
If True, disable pre-pruning.
leaf_prediction: string, optional (default='nba')
(`ARFHoeffdingTreeClassifier` parameter)
Prediction mechanism used at leafs.
- 'mc' - Majority Class
- 'nb' - Naive Bayes
- 'nba' - Naive Bayes Adaptive
nb_threshold: int, optional (default=0)
(`ARFHoeffdingTreeClassifier` parameter)
Number of instances a leaf should observe before allowing Naive Bayes.
nominal_attributes: list, optional
(`ARFHoeffdingTreeClassifier` parameter)
List of Nominal attributes. If emtpy, then assume that all attributes are numerical.
random_state: int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by `np.random`.
Notes
-----
The 3 most important aspects of Adaptive Random Forest [1]_ are:
(1) inducing diversity through re-sampling;
(2) inducing diversity through randomly selecting subsets of features for node splits
(see skmultiflow.classification.trees.arf_hoeffding_tree);
(3) drift detectors per base tree, which cause selective resets in response to drifts.
It also allows training background trees, which start training if a warning is detected
and replace the active tree if the warning escalates to a drift.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>.
Adaptive random forests for evolving data stream classification.
In Machine Learning, DOI: 10.1007/s10994-017-5642-8, Springer, 2017.
Examples
--------
>>> # Imports
>>> from skmultiflow.data import SEAGenerator
>>> from skmultiflow.meta import AdaptiveRandomForestClassifier
>>>
>>> # Setting up a data stream
>>> stream = SEAGenerator(random_state=1)
>>>
>>> # Setup Adaptive Random Forest Classifier
>>> arf = AdaptiveRandomForestClassifier()
>>>
>>> # Setup variables to control loop and track performance
>>> n_samples = 0
>>> correct_cnt = 0
>>> max_samples = 200
>>>
>>> # Train the estimator with the samples provided by the data stream
>>> while n_samples < max_samples and stream.has_more_samples():
>>> X, y = stream.next_sample()
>>> y_pred = arf.predict(X)
>>> if y[0] == y_pred[0]:
>>> correct_cnt += 1
>>> arf.partial_fit(X, y)
>>> n_samples += 1
>>>
>>> # Display results
>>> print('Adaptive Random Forest ensemble classifier example')
>>> print('{} samples analyzed.'.format(n_samples))
>>> print('Accuracy: {}'.format(correct_cnt / n_samples))
"""
def __init__(self,
n_estimators=10,
max_features='auto',
disable_weighted_vote=False,
lambda_value=6,
performance_metric='acc',
drift_detection_method: BaseDriftDetector = ADWIN(0.001),
warning_detection_method: BaseDriftDetector = ADWIN(0.01),
max_byte_size=33554432,
memory_estimate_period=2000000,
grace_period=50,
split_criterion='info_gain',
split_confidence=0.01,
tie_threshold=0.05,
binary_split=False,
stop_mem_management=False,
remove_poor_atts=False,
no_preprune=False,
leaf_prediction='nba',
nb_threshold=0,
nominal_attributes=None,
random_state=None):
"""AdaptiveRandomForestClassifier class constructor."""
super().__init__()
self.n_estimators = n_estimators
self.max_features = max_features
self.disable_weighted_vote = disable_weighted_vote
self.lambda_value = lambda_value
if isinstance(drift_detection_method, BaseDriftDetector):
self.drift_detection_method = drift_detection_method
else:
self.drift_detection_method = None
if isinstance(warning_detection_method, BaseDriftDetector):
self.warning_detection_method = warning_detection_method
else:
self.warning_detection_method = None
self.instances_seen = 0
self.classes = None
self._train_weight_seen_by_model = 0.0
self.ensemble = None
self.random_state = random_state
self._random_state = check_random_state(self.random_state) # Actual random_state object
if performance_metric in ['acc', 'kappa']:
self.performance_metric = performance_metric
else:
raise ValueError('Invalid performance metric: {}'.format(performance_metric))
# ARH Hoeffding Tree configuration
self.max_byte_size = max_byte_size
self. memory_estimate_period = memory_estimate_period
self.grace_period = grace_period
self.split_criterion = split_criterion
self.split_confidence = split_confidence
self.tie_threshold = tie_threshold
self.binary_split = binary_split
self.stop_mem_management = stop_mem_management
self.remove_poor_atts = remove_poor_atts
self.no_preprune = no_preprune
self.leaf_prediction = leaf_prediction
self.nb_threshold = nb_threshold
self.nominal_attributes = nominal_attributes
def partial_fit(self, X, y, classes=None, sample_weight=None):
""" Partially (incrementally) fit the model.
Parameters
----------
X : numpy.ndarray of shape (n_samples, n_features)
The features to train the model.
y: numpy.ndarray of shape (n_samples)
An array-like with the class labels of all samples in X.
classes: numpy.ndarray, list, optional (default=None)
Array with all possible/known class labels. This is an optional parameter, except
for the first partial_fit call where it is compulsory.
sample_weight: numpy.ndarray of shape (n_samples), optional (default=None)
Samples weight. If not provided, uniform weights are assumed.
Returns
-------
self
"""
if self.classes is None and classes is not None:
self.classes = classes
if sample_weight is None:
weight = 1.0
else:
weight = sample_weight
if y is not None:
row_cnt, _ = get_dimensions(X)
weight = check_weights(weight, expand_length=row_cnt)
for i in range(row_cnt):
if weight[i] != 0.0:
self._train_weight_seen_by_model += weight[i]
self._partial_fit(X[i], y[i], self.classes, weight[i])
return self
def _partial_fit(self, X, y, classes=None, sample_weight=1.0):
self.instances_seen += 1
if self.ensemble is None:
self._init_ensemble(X)
for i in range(self.n_estimators):
y_predicted = self.ensemble[i].predict(np.asarray([X]))
self.ensemble[i].evaluator.add_result(y_predicted, y, sample_weight)
k = self._random_state.poisson(self.lambda_value)
if k > 0:
self.ensemble[i].partial_fit(np.asarray([X]), np.asarray([y]),
classes=classes,
sample_weight=np.asarray([k]),
instances_seen=self.instances_seen)
def predict(self, X):
""" Predict classes for the passed data.
Parameters
----------
X : numpy.ndarray of shape (n_samples, n_features)
The set of data samples to predict the class labels for.
Returns
-------
A numpy.ndarray with all the predictions for the samples in X.
"""
y_proba = self.predict_proba(X)
n_rows = y_proba.shape[0]
y_pred = np.zeros(n_rows, dtype=int)
for i in range(n_rows):
index = np.argmax(y_proba[i])
y_pred[i] = index
return y_pred
def predict_proba(self, X):
""" Estimates the probability of each sample in X belonging to each of the class-labels.
Class probabilities are calculated as the mean predicted class probabilities
per base estimator.
Parameters
----------
X: numpy.ndarray of shape (n_samples, n_features)
Samples for which we want to predict the class probabilities.
Returns
-------
numpy.ndarray of shape (n_samples, n_classes)
Predicted class probabilities for all instances in X.
If class labels were specified in a `partial_fit` call, the order of the columns
matches `self.classes`.
If classes were not specified, they are assumed to be 0-indexed.
Class probabilities for a sample shall sum to 1 as long as at least one estimators
has non-zero predictions.
If no estimator can predict probabilities, probabilities of 0 are returned.
"""
if self.ensemble is None:
self._init_ensemble(X)
r, _ = get_dimensions(X)
y_proba = []
for i in range(r):
votes = deepcopy(self._get_votes_for_instance(X[i]))
if votes == {}:
# Estimator is empty, all classes equal, default to zero
y_proba.append([0])
else:
if sum(votes.values()) != 0:
votes = normalize_values_in_dict(votes)
if self.classes is not None:
votes_array = np.zeros(int(max(self.classes)) + 1)
else:
votes_array = np.zeros(int(max(votes.keys())) + 1)
for key, value in votes.items():
votes_array[int(key)] = value
y_proba.append(votes_array)
# Set result as np.array
if self.classes is not None:
y_proba = np.asarray(y_proba)
else:
# Fill missing values related to unobserved classes to ensure we get a 2D array
y_proba = np.asarray(list(itertools.zip_longest(*y_proba, fillvalue=0.0))).T
return y_proba
def reset(self):
"""Reset ARF."""
self.ensemble = None
self.instances_seen = 0
self._train_weight_seen_by_model = 0.0
self._random_state = check_random_state(self.random_state)
def _get_votes_for_instance(self, X):
if self.ensemble is None:
self._init_ensemble(X)
combined_votes = {}
for i in range(self.n_estimators):
vote = deepcopy(self.ensemble[i]._get_votes_for_instance(X))
if vote != {} and sum(vote.values()) > 0:
vote = normalize_values_in_dict(vote, inplace=True)
if not self.disable_weighted_vote:
performance = self.ensemble[i].evaluator.accuracy_score()\
if self.performance_metric == 'acc'\
else self.ensemble[i].evaluator.kappa_score()
if performance != 0.0: # CHECK How to handle negative (kappa) values?
for k in vote:
vote[k] = vote[k] * performance
# Add values
for k in vote:
try:
combined_votes[k] += vote[k]
except KeyError:
combined_votes[k] = vote[k]
return combined_votes
def _init_ensemble(self, X):
self._set_max_features(get_dimensions(X)[1])
self.ensemble = [ARFBaseLearner(index_original=i,
classifier=ARFHoeffdingTreeClassifier(
max_byte_size=self.max_byte_size,
memory_estimate_period=self.memory_estimate_period,
grace_period=self.grace_period,
split_criterion=self.split_criterion,
split_confidence=self.split_confidence,
tie_threshold=self.tie_threshold,
binary_split=self.binary_split,
stop_mem_management=self.stop_mem_management,
remove_poor_atts=self.remove_poor_atts,
no_preprune=self.no_preprune,
leaf_prediction=self.leaf_prediction,
nb_threshold=self.nb_threshold,
nominal_attributes=self.nominal_attributes,
max_features=self.max_features,
random_state=self.random_state),
instances_seen=self.instances_seen,
drift_detection_method=self.drift_detection_method,
warning_detection_method=self.warning_detection_method,
is_background_learner=False)
for i in range(self.n_estimators)]
def _set_max_features(self, n):
if self.max_features == 'auto' or self.max_features == 'sqrt':
self.max_features = round(math.sqrt(n))
elif self.max_features == 'log2':
self.max_features = round(math.log2(n))
elif isinstance(self.max_features, int):
# Consider 'max_features' features at each split.
pass
elif isinstance(self.max_features, float):
# Consider 'max_features' as a percentage
self.max_features = int(self.max_features * n)
elif self.max_features is None:
self.max_features = n
else:
# Default to "auto"
self.max_features = round(math.sqrt(n))
# Sanity checks
# max_features is negative, use max_features + n
if self.max_features < 0:
self.max_features += n
# max_features <= 0
# (m can be negative if max_features is negative and abs(max_features) > n),
# use max_features = 1
if self.max_features <= 0:
self.max_features = 1
# max_features > n, then use n
if self.max_features > n:
self.max_features = n
class ARFBaseLearner(BaseSKMObject):
"""ARF Base Learner class.
Parameters
----------
index_original: int
Tree index within the ensemble.
classifier: ARFHoeffdingTreeClassifier
Tree classifier.
instances_seen: int
Number of instances seen by the tree.
drift_detection_method: BaseDriftDetector
Drift Detection method.
warning_detection_method: BaseDriftDetector
Warning Detection method.
is_background_learner: bool
True if the tree is a background learner.
Notes
-----
Inner class that represents a single tree member of the forest.
Contains analysis information, such as the numberOfDriftsDetected.
"""
def __init__(self,
index_original,
classifier: ARFHoeffdingTreeClassifier,
instances_seen,
drift_detection_method: BaseDriftDetector,
warning_detection_method: BaseDriftDetector,
is_background_learner):
self.index_original = index_original
self.classifier = classifier
self.created_on = instances_seen
self.is_background_learner = is_background_learner
self.evaluator_method = ClassificationPerformanceEvaluator
# Drift and warning
self.drift_detection_method = drift_detection_method
self.warning_detection_method = warning_detection_method
self.last_drift_on = 0
self.last_warning_on = 0
self.nb_drifts_detected = 0
self.nb_warnings_detected = 0
self.drift_detection = None
self.warning_detection = None
self.background_learner = None
self._use_drift_detector = False
self._use_background_learner = False
self.evaluator = self.evaluator_method()
# Initialize drift and warning detectors
if drift_detection_method is not None:
self._use_drift_detector = True
self.drift_detection = deepcopy(drift_detection_method)
if warning_detection_method is not None:
self._use_background_learner = True
self.warning_detection = deepcopy(warning_detection_method)
def reset(self, instances_seen):
if self._use_background_learner and self.background_learner is not None:
self.classifier = self.background_learner.classifier
self.warning_detection = self.background_learner.warning_detection
self.drift_detection = self.background_learner.drift_detection
self.evaluator_method = self.background_learner.evaluator_method
self.created_on = self.background_learner.created_on
self.background_learner = None
else:
self.classifier.reset()
self.created_on = instances_seen
self.drift_detection.reset()
self.evaluator = self.evaluator_method()
def partial_fit(self, X, y, classes, sample_weight, instances_seen):
self.classifier.partial_fit(X, y, classes=classes, sample_weight=sample_weight)
if self.background_learner:
self.background_learner.classifier.partial_fit(X, y,
classes=classes,
sample_weight=sample_weight)
if self._use_drift_detector and not self.is_background_learner:
correctly_classifies = self.classifier.predict(X) == y
# Check for warning only if use_background_learner is active
if self._use_background_learner:
self.warning_detection.add_element(int(not correctly_classifies))
# Check if there was a change
if self.warning_detection.detected_change():
self.last_warning_on = instances_seen
self.nb_warnings_detected += 1
# Create a new background tree classifier
background_learner = self.classifier.new_instance()
# Create a new background learner object
self.background_learner = ARFBaseLearner(self.index_original,
background_learner,
instances_seen,
self.drift_detection_method,
self.warning_detection_method,
True)
# Update the warning detection object for the current object
# (this effectively resets changes made to the object while it
# was still a background learner).
self.warning_detection.reset()
# Update the drift detection
self.drift_detection.add_element(int(not correctly_classifies))
# Check if there was a change
if self.drift_detection.detected_change():
self.last_drift_on = instances_seen
self.nb_drifts_detected += 1
self.reset(instances_seen)
def predict(self, X):
return self.classifier.predict(X)
def _get_votes_for_instance(self, X):
return self.classifier._get_votes_for_instance(X)
|
ytdl.py | KenT2/tboplayer | 164 | 12740613 | import json
import pexpect
import re
import string
import sys
import requests
import os
from hashlib import sha256
from threading import Thread
from time import sleep
from vtt_to_srt import vtts_to_srt
# ***************************************
# YTDL CLASS
# ***************************************
class Ytdl:
"""
interface for youtube-dl
"""
_YTLOCATION = ''
_YTLAUNCH_CMD = ''
_YTLAUNCH_ARGS_FORMAT = ' -j -f %s --youtube-skip-dash-manifest "%s"'
_YTLAUNCH_PLST_CMD = ''
_YTLAUNCH_PLST_ARGS_FORMAT = ' -J -f %s --youtube-skip-dash-manifest "%s"'
_YTLAUNCH_SUB_DIR = '/dev/shm/tbopsubs'
_YTLAUNCH_SUBT_ARGS_FORMAT = ' --write-sub --sub-lang %s --skip-download "%s" --output %s/subtitle'
_YTLAUNCH_AUTOSUBT_ARGS_FORMAT = ' --write-auto-sub --sub-lang %s --skip-download "%s" --output %s/subtitle'
_FINISHED_STATUS = "\n"
_WRN_STATUS = ".*WARNING:.*"
_UPDATED_STATUS = ".*Restart youtube-dl to use the new version.*"
_ERR_STATUS = ".*ERROR:.*"
_SUDO_STATUS = ".*\[sudo\].*"
_NO_SUB_STATUS = ".*WARNING: video doesn't have subtitles.*"
_FOUND_SUB_STATUS = ".*\[info\] Writing video subtitles to:.*"
_SERVICES_REGEXPS = ()
_ACCEPTED_LINK_REXP_FORMAT = "(http[s]{0,1}://(?:\w|\.{0,1})+%s\.(?:[a-z]{2,3})(?:\.[a-z]{2,3}){0,1}/)"
_running_processes = {}
finished_processes = {}
MSGS = (0,1,2)
start_signal = False
end_signal = False
updated_signal = False
updating_signal = False
update_failed_signal = False
password_requested_signal = False
has_password_signal = False
downloading_subtitle_signal = False
downloaded_subtitle_signal = False
downloaded_partial_subtitle_signal = False
download_subtitle_failed_signal = False
subtitle_ready_signal = False
_sudo_password = ''
def __init__(self, options, yt_not_found_callback):
os.system("mount > /dev/null")
try:
os.mkdir(self._YTLAUNCH_SUB_DIR)
except:
pass
self.set_options(options)
self.yt_not_found_callback = yt_not_found_callback
self.compile_regexps()
def compile_regexps(self, updated=False):
Thread(target=self._compile_regexps,args=[updated]).start()
def _compile_regexps(self, updated=False):
if not os.path.isfile(self._YTLOCATION): return
self._SERVICES_REGEXPS = ()
extractors_f = os.path.expanduser("~") + "/.tboplayer/ytdl_extractors"
if not os.path.isfile(extractors_f) or updated:
os.system(self._YTLOCATION + " --list-extractors > " + extractors_f)
f = open(extractors_f, "r")
extractors = f.read().split("\n")
f.close()
supported_service_re = re.compile("^[\w\d.]+$")
supported_services = ()
for e in extractors:
if supported_service_re.match(e) != None:
supported_services = supported_services + (e.lower(),)
for s in list(sorted(supported_services, reverse=True)):
if "." in s:
self._SERVICES_REGEXPS = self._SERVICES_REGEXPS + (re.compile(s),)
else:
self._SERVICES_REGEXPS = self._SERVICES_REGEXPS + (re.compile(self._ACCEPTED_LINK_REXP_FORMAT % (s)),)
def _response(self, url):
process = self._running_processes[url][0]
if self._terminate_sent_signal:
r = (-2, '')
else:
data = process.before
if self._WRN_STATUS in data:
# warning message
r = (0, self.MSGS[0])
elif self._ERR_STATUS in data:
# error message
r = (-1, self.MSGS[1])
else:
r = (1, data)
self.finished_processes[url] = self._running_processes[url]
self.finished_processes[url][1] = r
del self._running_processes[url]
def _get_link_media_format(self, url, f):
return "m4a" if (f == "m4a" and "youtube." in url) else "mp4"
def _background_process(self, url):
process = self._running_processes[url][0]
while self.is_running(url):
try:
index = process.expect([self._FINISHED_STATUS,
pexpect.TIMEOUT,
pexpect.EOF])
if index == 1: continue
elif index == 2:
del self._running_processes[url]
break
else:
self._response(url)
break
except Exception:
del self._running_processes[url]
break
sleep(1)
def _spawn_thread(self, url):
self._terminate_sent_signal = False
Thread(target=self._background_process, args=[url]).start()
def retrieve_media_url(self, url, f):
if self.is_running(url): return
ytcmd = self._YTLAUNCH_CMD % (self._get_link_media_format(url, f), url)
process = pexpect.spawn(ytcmd)
self._running_processes[url] = [process, ''] # process, result
self._spawn_thread(url)
def retrieve_youtube_playlist(self, url, f):
if self.is_running(url): return
ytcmd = self._YTLAUNCH_PLST_CMD % (f, url)
process = pexpect.spawn(ytcmd, timeout=180, maxread=50000, searchwindowsize=50000)
self._running_processes[url] = [process, '']
self._spawn_thread(url)
def whether_to_use_youtube_dl(self, url):
to_use = url[:4] == "http" and any(regxp.match(url) for regxp in self._SERVICES_REGEXPS)
if to_use and not os.path.isfile(self._YTLOCATION):
self.yt_not_found_callback()
return False
return to_use
def is_running(self, url = None):
if url and not url in self._running_processes:
return False
elif not url:
return bool(len(self._running_processes))
process = self._running_processes[url][0]
return process is not None and process.isalive()
def set_options(self, options):
self._YTLOCATION=options.ytdl_location
self._YTLAUNCH_CMD=self._YTLOCATION + self._YTLAUNCH_ARGS_FORMAT
self._YTLAUNCH_PLST_CMD=self._YTLOCATION + self._YTLAUNCH_PLST_ARGS_FORMAT
def set_password(self, password):
self._sudo_password = password
self.has_password_signal = True
def quit(self):
self._terminate_sent_signal = True
try:
for url in self._running_processes:
self._running_processes[url][0].terminate(force=True)
except:
return
def download_subtitles(self, lang, url):
self.downloaded_subtitle_signal = False
self.download_subtitle_failed_signal = False
self.subtitle_ready_signal = False
self.downloaded_partial_subtitle_signal = False
if not os.path.isfile(self._YTLOCATION):
self.download_subtitle_failed_signal = True
return
ytcmd = self._YTLOCATION + ((self._YTLAUNCH_SUBT_ARGS_FORMAT) % (lang, url, self._YTLAUNCH_SUB_DIR))
self._subtitled_process = pexpect.spawn(ytcmd)
self.downloading_subtitle_signal = True
Thread(target=self._download_subtitles,args=[lang, url]).start()
def _download_subtitles(self, lang, url, trying = 1):
while self.downloading_subtitle_signal:
try:
index = self._subtitled_process.expect([self._FOUND_SUB_STATUS,
pexpect.EOF,
self._NO_SUB_STATUS,
pexpect.TIMEOUT])
if index == 0:
self.downloaded_partial_subtitle_signal = True
elif index == 1:
if self.downloaded_partial_subtitle_signal:
self.downloaded_subtitle_signal = True
else:
self.download_subtitle_failed_signal = True
self.downloading_subtitle_signal = False
break
elif index in (2,3):
self.downloading_subtitle_signal = False
if trying == 2:
self.download_subtitle_failed_signal = True
break
sleep(0.2)
except Exception, e:
print e
self.download_subtitle_failed_signal = True
self.downloading_subtitle_signal = False
return
if trying == 1 and not self.downloaded_subtitle_signal:
self.downloading_subtitle_signal = True
ytcmd = self._YTLOCATION + ((self._YTLAUNCH_AUTOSUBT_ARGS_FORMAT) % (lang, url, self._YTLAUNCH_SUB_DIR))
self._subtitled_process = pexpect.spawn(ytcmd)
self._download_subtitles(lang, url, trying = 2)
if self.downloaded_subtitle_signal:
vtts_to_srt(self._YTLAUNCH_SUB_DIR, rec = False)
os.remove(self._YTLAUNCH_SUB_DIR + "/subtitle." + lang + ".vtt")
self.subtitle_ready_signal = True
def check_for_update(self):
if not os.path.isfile(self._YTLOCATION):
return
self.updating_signal = True
Thread(target=self._check_for_update,args=[]).start()
def _check_for_update(self):
try:
versionsurl = "http://rg3.github.io/youtube-dl/update/versions.json"
versions = json.loads(requests.get(versionsurl).text)
except Exception, e:
print e
self.updating_signal = False
return
current_version_hash = sha256(open(self._YTLOCATION, 'rb').read()).hexdigest()
latest_version_hash = versions['versions'][versions['latest']]['bin'][1]
if current_version_hash != latest_version_hash:
self._update_process = pexpect.spawn("sudo %s -U" % self._YTLOCATION, timeout=60)
while self.updating_signal:
try:
index = self._update_process.expect([self._UPDATED_STATUS,
self._SUDO_STATUS,
self._ERR_STATUS,
pexpect.EOF,
pexpect.TIMEOUT])
if index == 0:
self.updating_signal = False
self.updated_signal = True
break
elif index in (2,3,4):
self.update_failed_signal = True
self.updating_signal = False
break
elif index == 1:
if self._sudo_password:
self.password_requested_signal = False
self._update_process.sendline(self._sudo_password)
self._sudo_password = ''
elif self._sudo_password == None:
self.password_requested_signal = False
self.updating_signal = False
self._sudo_password = False
self._update_process.terminate(force=True)
break
elif not self.has_password_signal:
self.password_requested_signal = True
except Exception, e:
print e
break
sleep(0.5)
if self.updated_signal:
self.compile_regexps(updated=True)
self.updating_signal = False
def reset_processes(self):
self._running_processes = {}
self.finished_processes = {}
def reset_subtitle_attributes(self):
self.downloading_subtitle_signal = False
self.downloaded_subtitle_signal = False
self.downloaded_partial_subtitle_signal = False
self.download_subtitle_failed_signal = False
self.subtitle_ready_signal = False
|
applications/ConvectionDiffusionApplication/test_examples/square_edgebased.gid/test_pureconvectionsolver_build_reference.py | lkusch/Kratos | 778 | 12740614 | <gh_stars>100-1000
from __future__ import print_function, absolute_import, division #makes KratosMultiphysics backward compatible with python 2.6 and 2.7
import sys
kratos_benchmarking_path = '../../../../benchmarking'
sys.path.append(kratos_benchmarking_path)
import benchmarking
print("Building reference data for edgebased_PureConvection.py...")
benchmarking.BuildReferenceData("edgebased_PureConvection.py", "test_pureconvectionsolver_benchmarking_ref.txt")
|
DPGAnalysis/Skims/python/MultiMuon_cfg.py | ckamtsikis/cmssw | 852 | 12740643 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
process = cms.Process("SKIM")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/D266D139-D871-DE11-A709-001D09F28F0C.root',
'/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/CA27788D-E871-DE11-8B46-001D09F276CF.root',
'/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/AC5633B2-D471-DE11-9B3A-001D09F252F3.root',
'/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/9CD957E7-D071-DE11-B6AE-001D09F252F3.root',
'/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/94BF68F7-D171-DE11-902B-000423D986A8.root',
'/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/7838FE1E-C771-DE11-9FD5-000423D98950.root',
'/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/56632803-DD71-DE11-BAF5-000423D9870C.root',
'/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/42A67CB9-E971-DE11-AA86-001D09F252F3.root',
'/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/407225D3-D071-DE11-809B-001D09F297EF.root',
'/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/3E5E1CF0-D271-DE11-AC2B-000423D94700.root',
'/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/2C57E916-D071-DE11-AF0E-001D09F24E39.root',
'/store/data/Commissioning09/Cosmics/RECO/v5/000/105/755/228896A5-E571-DE11-A60B-001D09F2AF96.root')
)
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.6 $'),
name = cms.untracked.string('$Source: /cvs_server/repositories/CMSSW/CMSSW/DPGAnalysis/Skims/python/MultiMuon_cfg.py,v $'),
annotation = cms.untracked.string('CRAFT MultiMuon skim')
)
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1000))
process.options = cms.untracked.PSet(wantSummary = cms.untracked.bool(True))
process.load('Configuration/StandardSequences/MagneticField_AutoFromDBCurrent_cff')
process.load('Configuration/StandardSequences/GeometryIdeal_cff')
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.GlobalTag.globaltag = 'GR09_31X_V3P::All'
process.load("Configuration.StandardSequences.ReconstructionCosmics_cff")
process.multiCosmicMuonFilter = cms.EDFilter("TrackCountFilter",
src = cms.InputTag('cosmicMuonsBarrelOnly'),
minNumber = cms.uint32(5)
)
process.multiLHCMuonFilter = cms.EDFilter("TrackCountFilter",
src = cms.InputTag('lhcStandAloneMuonsBarrelOnly'),
minNumber = cms.uint32(5)
)
process.out = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring('keep *','drop *_MEtoEDMConverter_*_*'),
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('multiCosmicMuonPath',
'multiLHCMuonPath')),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RECO'),
filterName = cms.untracked.string('multiCosmicMuon')),
fileName = cms.untracked.string('/tmp/malgeri/multiMuon.root')
)
process.multiCosmicMuonPath = cms.Path(process.multiCosmicMuonFilter)
process.multiLHCMuonPath = cms.Path(process.multiLHCMuonFilter)
process.this_is_the_end = cms.EndPath(process.out)
|
zip_file_example/extract_file_in_memory/main.py | DazEB2/SimplePyScripts | 117 | 12740655 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import zipfile
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parent.parent.parent))
from human_byte_size import sizeof_fmt
FILE_NAME = Path('Doc_df7c89c378c04e8daf69257ea95d9a2e.zip')
print('Zip size:', sizeof_fmt(len(FILE_NAME.read_bytes())))
with zipfile.ZipFile('Doc_df7c89c378c04e8daf69257ea95d9a2e.zip') as f:
data_file = f.read('Doc_df7c89c378c04e8daf69257ea95d9a2e.html')
size = sizeof_fmt(len(data_file))
print(f'File size: {size}')
print(f'data_file[:100]: {data_file[:100]}')
|
qcfractal/alembic/versions/469ece903d76_migrate_provenance_to_not_null.py | MolSSI/dqm_server | 113 | 12740685 | <filename>qcfractal/alembic/versions/469ece903d76_migrate_provenance_to_not_null.py
"""migrate provenance to not null
Revision ID: 469ece903d76
Revises: <PASSWORD>
Create Date: 2021-05-02 09:48:57.061825
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.orm.session import Session
# revision identifiers, used by Alembic.
revision = "469ece903d76"
down_revision = "<PASSWORD>"
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
conn.execute(
sa.text(
"UPDATE base_result SET provenance = provenance::jsonb || '{\"creator\":\"\"}' where (provenance->'creator')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE base_result SET provenance = provenance::jsonb || '{\"routine\":\"\"}' where (provenance->'routine')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE base_result SET provenance = provenance::jsonb || '{\"version\":\"\"}' where (provenance->'version')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE molecule SET provenance = provenance::jsonb || '{\"creator\":\"\"}' where (provenance->'creator')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE molecule SET provenance = provenance::jsonb || '{\"routine\":\"\"}' where (provenance->'routine')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE molecule SET provenance = provenance::jsonb || '{\"version\":\"\"}' where (provenance->'version')::text = 'null'"
)
)
conn.execute(sa.text("UPDATE molecule SET connectivity = null where connectivity::text = '[]'"))
conn.execute(
sa.text(
"UPDATE result SET properties = properties::jsonb - 'mp2_total_correlation_energy' || jsonb_build_object('mp2_correlation_energy', properties->'mp2_total_correlation_energy') WHERE properties::jsonb ? 'mp2_total_correlation_energy'"
)
)
def downgrade():
pass
|
pyhealth/test/test_model_sequence.py | rkalahasty/PyHealth | 485 | 12740709 | <gh_stars>100-1000
import unittest
import numpy as np
import torch
import os
import shutil
from pyhealth.models.sequence.dipole import Dipole
from pyhealth.models.sequence.lstm import LSTM
from pyhealth.models.sequence.gru import GRU
from pyhealth.models.sequence.embedgru import EmbedGRU
from pyhealth.models.sequence.retain import Retain
from pyhealth.models.sequence.raim import RAIM
from pyhealth.models.sequence.tlstm import tLSTM
from pyhealth.models.sequence.stagenet import StageNet
from pyhealth.models.sequence.xgboost_seq import XGBoostSequence
from pyhealth.models.sequence.rf import RandomForest
from pyhealth.data.expdata_generator import sequencedata as expdata_generator
from pyhealth.evaluation.evaluator import func
import sys
if sys.version_info >= (3, 6):
import zipfile
else:
import zipfile36 as zipfile
class TestSequentialModel(unittest.TestCase):
expdata_id = 'test.sequence.model'
def test_01(self):
if os.path.exists('./experiments_data') is False:
os.mkdir('./experiments_data')
if os.path.exists('./datasets/mimic') is False:
z = zipfile.ZipFile("./datasets/mimic.zip", "r")
seq_x = []
label_y = []
for filename in z.namelist( ):
z.extract(filename,'./datasets')
cur_dataset = expdata_generator(self.expdata_id)
cur_dataset.get_exp_data(sel_task='mortality', data_root='./datasets/mimic')
def test_02_lstm_cpu(self):
cur_dataset = expdata_generator(self.expdata_id)
cur_dataset.load_exp_data()
expmodel_id = 'test.lstm.gpu'
clf = LSTM(expmodel_id=expmodel_id,
n_batchsize=20,
use_gpu=False,
n_epoch=10)
clf.fit(cur_dataset.train, cur_dataset.valid)
clf.load_model()
clf.inference(cur_dataset.test)
pred_results = clf.get_results()
assert np.shape(pred_results['hat_y']) == np.shape(pred_results['y'])
assert True not in np.isnan(pred_results['hat_y']).tolist()
assert True not in np.isnan(pred_results['hat_y']*0).tolist()
def test_02_lstm_gpu(self):
cur_dataset = expdata_generator(self.expdata_id)
cur_dataset.load_exp_data()
expmodel_id = 'test.lstm.cpu'
clf = LSTM(expmodel_id=expmodel_id,
n_batchsize=20,
use_gpu=True,
n_epoch=10)
clf.fit(cur_dataset.train, cur_dataset.valid)
clf.load_model()
clf.inference(cur_dataset.test)
pred_results = clf.get_results()
assert np.shape(pred_results['hat_y']) == np.shape(pred_results['y'])
assert True not in np.isnan(pred_results['hat_y']).tolist()
assert True not in np.isnan(pred_results['hat_y']*0).tolist()
def test_02_gru(self):
cur_dataset = expdata_generator(self.expdata_id)
cur_dataset.load_exp_data()
expmodel_id = 'test.gru'
clf = GRU(expmodel_id=expmodel_id,
n_batchsize=20,
use_gpu=True,
n_epoch=10)
clf.fit(cur_dataset.train, cur_dataset.valid)
clf.load_model()
clf.inference(cur_dataset.test)
pred_results = clf.get_results()
assert np.shape(pred_results['hat_y']) == np.shape(pred_results['y'])
assert True not in np.isnan(pred_results['hat_y']).tolist()
assert True not in np.isnan(pred_results['hat_y']*0).tolist()
def test_02_embedgru(self):
cur_dataset = expdata_generator(self.expdata_id)
cur_dataset.load_exp_data()
expmodel_id = 'test.embedgru'
clf = EmbedGRU(expmodel_id=expmodel_id,
n_batchsize=20,
use_gpu=True,
n_epoch=10)
clf.fit(cur_dataset.train, cur_dataset.valid)
clf.load_model()
clf.inference(cur_dataset.test)
pred_results = clf.get_results()
assert np.shape(pred_results['hat_y']) == np.shape(pred_results['y'])
assert True not in np.isnan(pred_results['hat_y']).tolist()
assert True not in np.isnan(pred_results['hat_y']*0).tolist()
def test_02_dipole(self):
cur_dataset = expdata_generator(self.expdata_id)
cur_dataset.load_exp_data()
expmodel_id = 'test.dipole'
clf = Dipole(expmodel_id=expmodel_id,
n_batchsize=20,
use_gpu=True,
n_epoch=10)
clf.fit(cur_dataset.train, cur_dataset.valid)
clf.load_model()
clf.inference(cur_dataset.test)
pred_results = clf.get_results()
assert np.shape(pred_results['hat_y']) == np.shape(pred_results['y'])
assert True not in np.isnan(pred_results['hat_y']).tolist()
assert True not in np.isnan(pred_results['hat_y']*0).tolist()
def test_02_retain(self):
cur_dataset = expdata_generator(self.expdata_id)
cur_dataset.load_exp_data()
expmodel_id = 'test.retain'
clf = Retain(expmodel_id=expmodel_id,
n_batchsize=20,
use_gpu=True,
n_epoch=10)
clf.fit(cur_dataset.train, cur_dataset.valid)
clf.load_model()
clf.inference(cur_dataset.test)
pred_results = clf.get_results()
assert np.shape(pred_results['hat_y']) == np.shape(pred_results['y'])
assert True not in np.isnan(pred_results['hat_y']).tolist()
assert True not in np.isnan(pred_results['hat_y']*0).tolist()
def test_02_raim(self):
cur_dataset = expdata_generator(self.expdata_id)
cur_dataset.load_exp_data()
expmodel_id = 'test.raim'
clf = RAIM(expmodel_id=expmodel_id,
n_batchsize=20,
use_gpu=True,
n_epoch=10)
clf.fit(cur_dataset.train, cur_dataset.valid)
clf.load_model()
clf.inference(cur_dataset.test)
pred_results = clf.get_results()
assert np.shape(pred_results['hat_y']) == np.shape(pred_results['y'])
assert True not in np.isnan(pred_results['hat_y']).tolist()
assert True not in np.isnan(pred_results['hat_y']*0).tolist()
def test_02_tlstm(self):
cur_dataset = expdata_generator(self.expdata_id)
cur_dataset.load_exp_data()
expmodel_id = 'test.tlstm'
clf = tLSTM(expmodel_id=expmodel_id,
n_batchsize=20,
use_gpu=True,
n_epoch=10)
clf.fit(cur_dataset.train, cur_dataset.valid)
clf.load_model()
clf.inference(cur_dataset.test)
pred_results = clf.get_results()
assert np.shape(pred_results['hat_y']) == np.shape(pred_results['y'])
assert True not in np.isnan(pred_results['hat_y']).tolist()
assert True not in np.isnan(pred_results['hat_y']*0).tolist()
def test_02_stagenet(self):
cur_dataset = expdata_generator(self.expdata_id)
cur_dataset.load_exp_data()
expmodel_id = 'test.stagenet'
clf = StageNet(expmodel_id=expmodel_id,
n_batchsize=20,
use_gpu=True,
n_epoch=10)
clf.fit(cur_dataset.train, cur_dataset.valid)
clf.load_model()
clf.inference(cur_dataset.test)
pred_results = clf.get_results()
assert np.shape(pred_results['hat_y']) == np.shape(pred_results['y'])
assert True not in np.isnan(pred_results['hat_y']).tolist()
assert True not in np.isnan(pred_results['hat_y']*0).tolist()
def test_02_xgboost(self):
cur_dataset = expdata_generator(self.expdata_id)
cur_dataset.load_exp_data()
expmodel_id = 'test.xgboost'
clf = XGBoostSequence(expmodel_id=expmodel_id)
clf.fit(cur_dataset.train, cur_dataset.valid)
clf.load_model()
clf.inference(cur_dataset.test)
pred_results = clf.get_results()
assert np.shape(pred_results['hat_y']) == np.shape(pred_results['y'])
assert True not in np.isnan(pred_results['hat_y']).tolist()
assert True not in np.isnan(pred_results['hat_y']*0).tolist()
def test_02_rm(self):
cur_dataset = expdata_generator(self.expdata_id)
cur_dataset.load_exp_data()
expmodel_id = 'test.randomforest'
clf = RandomForest(expmodel_id=expmodel_id)
clf.fit(cur_dataset.train, cur_dataset.valid)
clf.load_model()
clf.inference(cur_dataset.test)
pred_results = clf.get_results()
assert np.shape(pred_results['hat_y']) == np.shape(pred_results['y'])
assert True not in np.isnan(pred_results['hat_y']).tolist()
assert True not in np.isnan(pred_results['hat_y']*0).tolist()
def test_03_delete(self):
shutil.rmtree(os.path.join('./experiments_data', self.expdata_id))
shutil.rmtree(os.path.join('./experiments_records', 'test.lstm.cpu'))
shutil.rmtree(os.path.join('./experiments_records', 'test.lstm.gpu'))
shutil.rmtree(os.path.join('./experiments_records', 'test.gru'))
shutil.rmtree(os.path.join('./experiments_records', 'test.embedgru'))
shutil.rmtree(os.path.join('./experiments_records', 'test.dipole'))
shutil.rmtree(os.path.join('./experiments_records', 'test.retain'))
shutil.rmtree(os.path.join('./experiments_records', 'test.raim'))
shutil.rmtree(os.path.join('./experiments_records', 'test.tlstm'))
shutil.rmtree(os.path.join('./experiments_records', 'test.stagenet'))
shutil.rmtree(os.path.join('./experiments_records', 'test.xgboost'))
shutil.rmtree(os.path.join('./experiments_records', 'test.randomforest'))
|
Printing all instances of a class/use__mixin_and_weakrefs.py | DazEB2/SimplePyScripts | 117 | 12740729 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# SOURCE: https://stackoverflow.com/a/328882/5909792
from collections import defaultdict
import weakref
class KeepRefs:
__refs__ = defaultdict(list)
def __init__(self):
self.__refs__[self.__class__].append(weakref.ref(self))
@classmethod
def get_instances(cls):
for inst_ref in cls.__refs__[cls]:
inst = inst_ref()
if inst is not None:
yield inst
class X(KeepRefs):
def __init__(self, name):
super().__init__()
self.name = name
x = X("x")
y = X("y")
print([r.name for r in X.get_instances()]) # ['x', 'y']
del y
print([r.name for r in X.get_instances()]) # ['x']
|
src/systemtest/op-monitoring/integration/testcases/test_attachments.py | yannzido/new | 188 | 12740741 | #!/usr/bin/env python3
# The MIT License
# Copyright (c) 2016 Estonian Information System Authority (RIA), Population Register Centre (VRK)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Test case for verifying that the operational monitoring related data of
# X-Road requests and responses that contain attachments are stored by the
# operational monitoring daemon.
import os
import sys
sys.path.append('..')
import python_common as common
def _expected_keys_and_values_of_one_attachment_query_rec(
xroad_message_id, security_server_type):
return [
("clientMemberClass", "GOV"),
("clientMemberCode", "00000001"),
("clientSecurityServerAddress", "xtee9.ci.kit"),
("clientSubsystemCode", "System1"),
("clientXRoadInstance", "XTEE-CI-XM"),
("messageId", xroad_message_id),
("messageIssue", "attachmentsPlease"),
("messageProtocolVersion", "4.0"),
("requestAttachmentCount", 1),
("requestMimeSize", 1430),
("requestSoapSize", 1413),
("responseAttachmentCount", 3),
("responseMimeSize", 1648),
("responseSoapSize", 1600),
("securityServerType", security_server_type),
("serviceCode", "xroadGetRandom"),
("serviceMemberClass", "GOV"),
("serviceMemberCode", "00000000"),
("serviceSecurityServerAddress", "xtee8.ci.kit"),
("serviceSubsystemCode", "Center"),
("serviceVersion", "v1"),
("serviceXRoadInstance", "XTEE-CI-XM"),
("succeeded", True),
]
def _expected_keys_and_values_of_five_attachments_query_rec(
xroad_message_id, security_server_type):
return [
("clientMemberClass", "GOV"),
("clientMemberCode", "00000001"),
("clientSecurityServerAddress", "xtee9.ci.kit"),
("clientSubsystemCode", "System1"),
("clientXRoadInstance", "XTEE-CI-XM"),
("messageId", xroad_message_id),
("messageIssue", "453465"),
("messageProtocolVersion", "4.0"),
("messageUserId", "EE37702211230"),
("representedPartyClass", "COM"),
("representedPartyCode", "UNKNOWN_MEMBER"),
("requestAttachmentCount", 5),
("requestMimeSize", 1714),
("requestSoapSize", 1629),
("responseAttachmentCount", 0),
("responseSoapSize", 1519),
("securityServerType", security_server_type),
("serviceCode", "xroadGetRandom"),
("serviceMemberClass", "GOV"),
("serviceMemberCode", "00000000"),
("serviceSecurityServerAddress", "xtee8.ci.kit"),
("serviceSubsystemCode", "Center"),
("serviceVersion", "v1"),
("serviceXRoadInstance", "XTEE-CI-XM"),
("succeeded", True),
]
def run(client_security_server_address, producer_security_server_address,
ssh_user, request_template_dir):
xroad_request_template_filename = os.path.join(
request_template_dir, "simple_xroad_query_template.xml")
xroad_request_attachments_template_filename = os.path.join(
request_template_dir, "xroad_query_for_attachments_template.xml")
query_data_client_template_filename = os.path.join(
request_template_dir, "query_operational_data_client_template.xml")
query_data_producer_template_filename = os.path.join(
request_template_dir, "query_operational_data_producer_template.xml")
client_timestamp_before_requests = common.get_remote_timestamp(
client_security_server_address, ssh_user)
producer_timestamp_before_requests = common.get_remote_timestamp(
producer_security_server_address, ssh_user)
message_id_one_attachment = common.generate_message_id()
print("\nGenerated message ID %s for X-Road request with one " \
"attachment" % (message_id_one_attachment, ))
### Regular and operational data requests and the relevant checks
print("\n---- Sending an X-Road request with one attachment to the " \
"service that will respond with three attachments ----\n")
request_contents = common.format_xroad_request_template(
xroad_request_attachments_template_filename, message_id_one_attachment)
response = common.post_multipart_request(
client_security_server_address, request_contents,
attachment_count=1, get_raw_stream=True)
# Expecting a multipart response with attachments.
mime_parts, raw_response = common.parse_multipart_response(response)
print("Received the following X-Road response: \n")
print(raw_response.decode("utf-8"))
if not mime_parts:
common.parse_and_check_soap_response(raw_response)
message_id_five_attachments = common.generate_message_id()
print("\nGenerated message ID %s for X-Road request with five " \
"attachments" % (message_id_five_attachments, ))
print("\n---- Sending an X-Road request with five attachments to the " \
"client's security server ----\n")
request_contents = common.format_xroad_request_template(
xroad_request_template_filename, message_id_five_attachments)
# Expecting a simple SOAP response.
response = common.post_multipart_request(
client_security_server_address, request_contents, attachment_count=5)
print("Received the following X-Road response: \n")
xml = common.parse_and_clean_xml(response.text)
print(xml.toprettyxml())
common.check_soap_fault(xml)
common.wait_for_operational_data()
client_timestamp_after_request = common.get_remote_timestamp(
client_security_server_address, ssh_user)
producer_timestamp_after_request = common.get_remote_timestamp(
producer_security_server_address, ssh_user)
# Now make operational data requests to both security servers and check the
# response payloads.
print("\n---- Sending an operational data request to the client's security server ----\n")
message_id = common.generate_message_id()
print("Generated message ID %s for query data request" % (message_id, ))
request_contents = common.format_query_operational_data_request_template(
query_data_client_template_filename, message_id,
client_timestamp_before_requests, client_timestamp_after_request)
print("Generated the following query data request for the client's security server: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents,
get_raw_stream=True)
mime_parts, raw_response = common.parse_multipart_response(response)
if mime_parts:
soap_part, record_count = common.get_multipart_soap_and_record_count(mime_parts[0])
common.print_multipart_soap_and_record_count(soap_part, record_count)
json_payload = common.get_multipart_json_payload(mime_parts[1])
# Check the presence of all the required fields in at least one JSON structure.
# The record describing the query with one attachment
common.assert_present_in_json(
json_payload, _expected_keys_and_values_of_one_attachment_query_rec(
message_id_one_attachment, "Client"))
# The record describing the query with five attachments
common.assert_present_in_json(
json_payload, _expected_keys_and_values_of_five_attachments_query_rec(
message_id_five_attachments, "Client"))
# As operational data is queried by regular client, the field
# 'securityServerInternalIp' is not expected to be included
# in the response payload.
common.assert_missing_in_json(json_payload, "securityServerInternalIp")
# Check if the timestamps in the response are in the expected range.
common.assert_expected_timestamp_values(
json_payload, client_timestamp_before_requests, client_timestamp_after_request)
common.print_multipart_query_data_response(json_payload)
else:
common.parse_and_check_soap_response(raw_response)
print("\n---- Sending an operational data request to the producer's " \
"security server ----\n")
message_id = common.generate_message_id()
print("\nGenerated message ID %s for query data request" % (message_id, ))
request_contents = common.format_query_operational_data_request_template(
query_data_producer_template_filename, message_id,
producer_timestamp_before_requests, producer_timestamp_after_request)
print("Generated the following query data request for the producer's " \
"security server: \n")
print(request_contents)
response = common.post_xml_request(
producer_security_server_address, request_contents,
get_raw_stream=True)
mime_parts, raw_response = common.parse_multipart_response(response)
if mime_parts:
soap_part, record_count = common.get_multipart_soap_and_record_count(mime_parts[0])
common.print_multipart_soap_and_record_count(soap_part, record_count, is_client=False)
json_payload = common.get_multipart_json_payload(mime_parts[1])
# Check the presence of all the required fields in at least one JSON structure.
# The record describing the query with one attachment
common.assert_present_in_json(
json_payload, _expected_keys_and_values_of_one_attachment_query_rec(
message_id_one_attachment, "Producer"))
# The record describing the query with five attachments
common.assert_present_in_json(
json_payload, _expected_keys_and_values_of_five_attachments_query_rec(
message_id_five_attachments, "Producer"))
# As operational data is queried by regular client, the field
# 'securityServerInternalIp' is not expected to be included
# in the response payload.
common.assert_missing_in_json(json_payload, "securityServerInternalIp")
# Check if the timestamps in the response are in the expected range.
common.assert_expected_timestamp_values(
json_payload,
producer_timestamp_before_requests, producer_timestamp_after_request)
common.print_multipart_query_data_response(json_payload)
else:
common.parse_and_check_soap_response(raw_response)
|
recipes/Python/577654_DDE_Client/recipe-577654.py | tdiprima/code | 2,023 | 12740744 | #!/usr/bin/env python
# Send DDE Execute command to running program
from ctypes import POINTER, WINFUNCTYPE, c_char_p, c_void_p, c_int, c_ulong, c_char_p
from ctypes.wintypes import BOOL, DWORD, BYTE, INT, LPCWSTR, UINT, ULONG
# DECLARE_HANDLE(name) typedef void *name;
HCONV = c_void_p # = DECLARE_HANDLE(HCONV)
HDDEDATA = c_void_p # = DECLARE_HANDLE(HDDEDATA)
HSZ = c_void_p # = DECLARE_HANDLE(HSZ)
LPBYTE = c_char_p # POINTER(BYTE)
LPDWORD = POINTER(DWORD)
LPSTR = c_char_p
ULONG_PTR = c_ulong
# See windows/ddeml.h for declaration of struct CONVCONTEXT
PCONVCONTEXT = c_void_p
DMLERR_NO_ERROR = 0
# Predefined Clipboard Formats
CF_TEXT = 1
CF_BITMAP = 2
CF_METAFILEPICT = 3
CF_SYLK = 4
CF_DIF = 5
CF_TIFF = 6
CF_OEMTEXT = 7
CF_DIB = 8
CF_PALETTE = 9
CF_PENDATA = 10
CF_RIFF = 11
CF_WAVE = 12
CF_UNICODETEXT = 13
CF_ENHMETAFILE = 14
CF_HDROP = 15
CF_LOCALE = 16
CF_DIBV5 = 17
CF_MAX = 18
DDE_FACK = 0x8000
DDE_FBUSY = 0x4000
DDE_FDEFERUPD = 0x4000
DDE_FACKREQ = 0x8000
DDE_FRELEASE = 0x2000
DDE_FREQUESTED = 0x1000
DDE_FAPPSTATUS = 0x00FF
DDE_FNOTPROCESSED = 0x0000
DDE_FACKRESERVED = (~(DDE_FACK | DDE_FBUSY | DDE_FAPPSTATUS))
DDE_FADVRESERVED = (~(DDE_FACKREQ | DDE_FDEFERUPD))
DDE_FDATRESERVED = (~(DDE_FACKREQ | DDE_FRELEASE | DDE_FREQUESTED))
DDE_FPOKRESERVED = (~(DDE_FRELEASE))
XTYPF_NOBLOCK = 0x0002
XTYPF_NODATA = 0x0004
XTYPF_ACKREQ = 0x0008
XCLASS_MASK = 0xFC00
XCLASS_BOOL = 0x1000
XCLASS_DATA = 0x2000
XCLASS_FLAGS = 0x4000
XCLASS_NOTIFICATION = 0x8000
XTYP_ERROR = (0x0000 | XCLASS_NOTIFICATION | XTYPF_NOBLOCK)
XTYP_ADVDATA = (0x0010 | XCLASS_FLAGS)
XTYP_ADVREQ = (0x0020 | XCLASS_DATA | XTYPF_NOBLOCK)
XTYP_ADVSTART = (0x0030 | XCLASS_BOOL)
XTYP_ADVSTOP = (0x0040 | XCLASS_NOTIFICATION)
XTYP_EXECUTE = (0x0050 | XCLASS_FLAGS)
XTYP_CONNECT = (0x0060 | XCLASS_BOOL | XTYPF_NOBLOCK)
XTYP_CONNECT_CONFIRM = (0x0070 | XCLASS_NOTIFICATION | XTYPF_NOBLOCK)
XTYP_XACT_COMPLETE = (0x0080 | XCLASS_NOTIFICATION )
XTYP_POKE = (0x0090 | XCLASS_FLAGS)
XTYP_REGISTER = (0x00A0 | XCLASS_NOTIFICATION | XTYPF_NOBLOCK )
XTYP_REQUEST = (0x00B0 | XCLASS_DATA )
XTYP_DISCONNECT = (0x00C0 | XCLASS_NOTIFICATION | XTYPF_NOBLOCK )
XTYP_UNREGISTER = (0x00D0 | XCLASS_NOTIFICATION | XTYPF_NOBLOCK )
XTYP_WILDCONNECT = (0x00E0 | XCLASS_DATA | XTYPF_NOBLOCK)
XTYP_MONITOR = (0x00F0 | XCLASS_NOTIFICATION | XTYPF_NOBLOCK)
XTYP_MASK = 0x00F0
XTYP_SHIFT = 4
TIMEOUT_ASYNC = 0xFFFFFFFF
def get_winfunc(libname, funcname, restype=None, argtypes=(), _libcache={}):
"""Retrieve a function from a library, and set the data types."""
from ctypes import windll
if libname not in _libcache:
_libcache[libname] = windll.LoadLibrary(libname)
func = getattr(_libcache[libname], funcname)
func.argtypes = argtypes
func.restype = restype
return func
DDECALLBACK = WINFUNCTYPE(HDDEDATA, UINT, UINT, HCONV, HSZ, HSZ, HDDEDATA,
ULONG_PTR, ULONG_PTR)
class DDE(object):
"""Object containing all the DDE functions"""
AccessData = get_winfunc("user32", "DdeAccessData", LPBYTE, (HDDEDATA, LPDWORD))
ClientTransaction = get_winfunc("user32", "DdeClientTransaction", HDDEDATA, (LPBYTE, DWORD, HCONV, HSZ, UINT, UINT, DWORD, LPDWORD))
Connect = get_winfunc("user32", "DdeConnect", HCONV, (DWORD, HSZ, HSZ, PCONVCONTEXT))
CreateStringHandle = get_winfunc("user32", "DdeCreateStringHandleW", HSZ, (DWORD, LPCWSTR, UINT))
Disconnect = get_winfunc("user32", "DdeDisconnect", BOOL, (HCONV,))
GetLastError = get_winfunc("user32", "DdeGetLastError", UINT, (DWORD,))
Initialize = get_winfunc("user32", "DdeInitializeW", UINT, (LPDWORD, DDECALLBACK, DWORD, DWORD))
FreeDataHandle = get_winfunc("user32", "DdeFreeDataHandle", BOOL, (HDDEDATA,))
FreeStringHandle = get_winfunc("user32", "DdeFreeStringHandle", BOOL, (DWORD, HSZ))
QueryString = get_winfunc("user32", "DdeQueryStringA", DWORD, (DWORD, HSZ, LPSTR, DWORD, c_int))
UnaccessData = get_winfunc("user32", "DdeUnaccessData", BOOL, (HDDEDATA,))
Uninitialize = get_winfunc("user32", "DdeUninitialize", BOOL, (DWORD,))
class DDEError(RuntimeError):
"""Exception raise when a DDE errpr occures."""
def __init__(self, msg, idInst=None):
if idInst is None:
RuntimeError.__init__(self, msg)
else:
RuntimeError.__init__(self, "%s (err=%s)" % (msg, hex(DDE.GetLastError(idInst))))
class DDEClient(object):
"""The DDEClient class.
Use this class to create and manage a connection to a service/topic. To get
classbacks subclass DDEClient and overwrite callback."""
def __init__(self, service, topic):
"""Create a connection to a service/topic."""
from ctypes import byref
self._idInst = DWORD(0)
self._hConv = HCONV()
self._callback = DDECALLBACK(self._callback)
res = DDE.Initialize(byref(self._idInst), self._callback, 0x00000010, 0)
if res != DMLERR_NO_ERROR:
raise DDEError("Unable to register with DDEML (err=%s)" % hex(res))
hszService = DDE.CreateStringHandle(self._idInst, service, 1200)
hszTopic = DDE.CreateStringHandle(self._idInst, topic, 1200)
self._hConv = DDE.Connect(self._idInst, hszService, hszTopic, PCONVCONTEXT())
DDE.FreeStringHandle(self._idInst, hszTopic)
DDE.FreeStringHandle(self._idInst, hszService)
if not self._hConv:
raise DDEError("Unable to establish a conversation with server", self._idInst)
def __del__(self):
"""Cleanup any active connections."""
if self._hConv:
DDE.Disconnect(self._hConv)
if self._idInst:
DDE.Uninitialize(self._idInst)
def advise(self, item, stop=False):
"""Request updates when DDE data changes."""
from ctypes import byref
hszItem = DDE.CreateStringHandle(self._idInst, item, 1200)
hDdeData = DDE.ClientTransaction(LPBYTE(), 0, self._hConv, hszItem, CF_TEXT, XTYP_ADVSTOP if stop else XTYP_ADVSTART, TIMEOUT_ASYNC, LPDWORD())
DDE.FreeStringHandle(self._idInst, hszItem)
if not hDdeData:
raise DDEError("Unable to %s advise" % ("stop" if stop else "start"), self._idInst)
DDE.FreeDataHandle(hDdeData)
def execute(self, command, timeout=5000):
"""Execute a DDE command."""
pData = c_char_p(command)
cbData = DWORD(len(command) + 1)
hDdeData = DDE.ClientTransaction(pData, cbData, self._hConv, HSZ(), CF_TEXT, XTYP_EXECUTE, timeout, LPDWORD())
if not hDdeData:
raise DDEError("Unable to send command", self._idInst)
DDE.FreeDataHandle(hDdeData)
def request(self, item, timeout=5000):
"""Request data from DDE service."""
from ctypes import byref
hszItem = DDE.CreateStringHandle(self._idInst, item, 1200)
hDdeData = DDE.ClientTransaction(LPBYTE(), 0, self._hConv, hszItem, CF_TEXT, XTYP_REQUEST, timeout, LPDWORD())
DDE.FreeStringHandle(self._idInst, hszItem)
if not hDdeData:
raise DDEError("Unable to request item", self._idInst)
if timeout != TIMEOUT_ASYNC:
pdwSize = DWORD(0)
pData = DDE.AccessData(hDdeData, byref(pdwSize))
if not pData:
DDE.FreeDataHandle(hDdeData)
raise DDEError("Unable to access data", self._idInst)
# TODO: use pdwSize
DDE.UnaccessData(hDdeData)
else:
pData = None
DDE.FreeDataHandle(hDdeData)
return pData
def callback(self, value, item=None):
"""Calback function for advice."""
print "%s: %s" % (item, value)
def _callback(self, wType, uFmt, hConv, hsz1, hsz2, hDdeData, dwData1, dwData2):
if wType == XTYP_ADVDATA:
from ctypes import byref, create_string_buffer
dwSize = DWORD(0)
pData = DDE.AccessData(hDdeData, byref(dwSize))
if pData:
item = create_string_buffer('\000' * 128)
DDE.QueryString(self._idInst, hsz2, item, 128, 1004)
self.callback(pData, item.value)
DDE.UnaccessData(hDdeData)
return DDE_FACK
return 0
def WinMSGLoop():
"""Run the main windows message loop."""
from ctypes import POINTER, byref, c_ulong
from ctypes.wintypes import BOOL, HWND, MSG, UINT
LPMSG = POINTER(MSG)
LRESULT = c_ulong
GetMessage = get_winfunc("user32", "GetMessageW", BOOL, (LPMSG, HWND, UINT, UINT))
TranslateMessage = get_winfunc("user32", "TranslateMessage", BOOL, (LPMSG,))
# restype = LRESULT
DispatchMessage = get_winfunc("user32", "DispatchMessageW", LRESULT, (LPMSG,))
msg = MSG()
lpmsg = byref(msg)
while GetMessage(lpmsg, HWND(), 0, 0) > 0:
TranslateMessage(lpmsg)
DispatchMessage(lpmsg)
if __name__ == "__main__":
# Create a connection to ESOTS (OTS Swardfish) and to instrument MAR11 ALSI
dde = DDEClient("ESOTS", "MAR11 ALSI")
# Monitor the various attributes from MAR11 ALSI
dde.advise("BIDQ") # Last bid quantity
dde.advise("BIDP") # Last bid price
dde.advise("ASKP") # Last ask price
dde.advise("ASKQ") # Last ask quantity
dde.advise("LASTP") # Last traded price
dde.advise("TIME") # Last traded time
dde.advise("VOL") # Daily volume
# Run the main message loop to receive advices
WinMSGLoop()
|
EISeg/eiseg/widget/scene.py | JamesLim-sy/PaddleSeg | 4,708 | 12740746 | from qtpy import QtWidgets, QtCore
from qtpy.QtCore import Qt
class AnnotationScene(QtWidgets.QGraphicsScene):
clickRequest = QtCore.Signal(int, int, bool)
def __init__(self, parent=None):
super(AnnotationScene, self).__init__(parent)
self.creating = False
self.polygon_items = []
def updatePolygonSize(self):
for poly in self.polygon_items:
for grip in poly.m_items:
grip.updateSize()
for line in poly.m_lines:
line.updateWidth()
def setCreating(self, creating=True):
self.creating = creating
def mousePressEvent(self, ev):
pos = ev.scenePos()
if not self.creating and not self.hovering:
if ev.buttons() in [Qt.LeftButton, Qt.RightButton]:
self.clickRequest.emit(
int(pos.x()), int(pos.y()), ev.buttons() == Qt.LeftButton
)
elif self.creating:
self.polygon_item.removeLastPoint()
self.polygon_item.addPointLast(ev.scenePos())
# movable element
self.polygon_item.addPointLast(ev.scenePos())
super(AnnotationScene, self).mousePressEvent(ev)
def mouseMoveEvent(self, ev):
if self.creating:
self.polygon_item.movePoint(
# self.polygon_item.number_of_points() - 1, ev.scenePos()
len(self.polygon_item) - 1,
ev.scenePos(),
)
super(AnnotationScene, self).mouseMoveEvent(ev)
@property
def item_hovering(self):
for poly in self.polygon_items:
if poly.item_hovering:
return True
return False
@property
def polygon_hovering(self):
for poly in self.polygon_items:
if poly.polygon_hovering:
return True
return False
@property
def line_hovering(self):
for poly in self.polygon_items:
if poly.line_hovering:
return True
return False
@property
def hovering(self):
return self.item_hovering or self.polygon_hovering or self.line_hovering
|
gluon/packages/dal/pydal/dialects/couchdb.py | GeorgesBrantley/ResistanceGame | 408 | 12740747 | <filename>gluon/packages/dal/pydal/dialects/couchdb.py
from ..adapters.couchdb import CouchDB
from .base import NoSQLDialect
from . import dialects
@dialects.register_for(CouchDB)
class CouchDBDialect(NoSQLDialect):
def _and(self, first, second, query_env={}):
return "(%s && %s)" % (
self.expand(first, query_env=query_env),
self.expand(second, query_env=query_env),
)
def _or(self, first, second, query_env={}):
return "(%s || %s)" % (
self.expand(first, query_env=query_env),
self.expand(second, query_env=query_env),
)
def eq(self, first, second=None, query_env={}):
if second is None:
return "(%s == null)" % self.expand(first, query_env=query_env)
return "(%s == %s)" % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env),
)
def ne(self, first, second=None, query_env={}):
if second is None:
return "(%s != null)" % self.expand(first, query_env=query_env)
return "(%s != %s)" % (
self.expand(first, query_env=query_env),
self.expand(second, first.type, query_env=query_env),
)
def comma(self, first, second, query_env={}):
return "%s + %s" % (
self.expand(first, query_env=query_env),
self.expand(second, query_env=query_env),
)
|
dash_docs/chapters/dash_core_components/Markdown/index.py | joelostblom/dash-docs | 379 | 12740759 | <reponame>joelostblom/dash-docs
# -*- coding: utf-8 -*-
import dash_core_components as dcc
import dash_html_components as html
from dash_docs import styles
from dash_docs import tools
from dash_docs import reusable_components as rc
examples = tools.load_examples(__file__)
layout = html.Div(children=[
html.H1("Markdown Examples and Reference"),
html.H2("Headers"),
rc.ComponentBlock("""import dash_core_components as dcc
dcc.Markdown('''
# This is an <h1> tag
## This is an <h2> tag
###### This is an <h6> tag
''')"""),
html.H2("Emphasis"),
rc.ComponentBlock("""import dash_core_components as dcc
dcc.Markdown('''
*This text will be italic*
_This will also be italic_
**This text will be bold**
__This will also be bold__
_You **can** combine them_
''')"""),
html.Hr(),
html.H2("Lists"),
rc.ComponentBlock("""import dash_core_components as dcc
dcc.Markdown('''
* Item 1
* Item 2
* Item 2a
* Item 2b
''')"""),
html.Hr(),
html.H2("Block Quotes"),
rc.ComponentBlock("""import dash_core_components as dcc
dcc.Markdown('''
>
> Block quotes are used to highlight text.
>
''')"""),
html.Hr(),
html.H2("Links"),
rc.ComponentBlock("""import dash_core_components as dcc
dcc.Markdown('''
[Dash User Guide](/)
''')"""),
html.Hr(),
html.H2("Inline Code"),
html.P("Any block of text surrounded by ` ` will rendered as inline-code. "),
# Don't use ComponentBlock for markdown block quotes... too complicated to
# get all the nested quotes right!
rc.Markdown("""
````py
import dash_core_components as dcc
dcc.Markdown('''
Inline code snippet: `True`
Block code snippet:
```py
import dash
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
```
''')
````
"""),
html.Div(rc.Markdown('''
Inline code snippet: `True`
Block code snippet:
```py
import dash
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
```
'''), className='example-container'),
rc.Markdown('''
Only certain languages are supported by default in
`dcc.Markdown`. For more details about how to customize the
languages and colour schemes, please see ["Syntax Highlighting
With
Markdown"](https://dash.plot.ly/external-resources#md-syntax-highlight).
'''),
html.H2('dcc.Markdown Properties'),
rc.ComponentReference('Markdown')
])
|
CalibTracker/SiStripDCS/python/popcon_last_value_cfg.py | ckamtsikis/cmssw | 852 | 12740760 | # Import configurations
import FWCore.ParameterSet.Config as cms
process = cms.Process("test")
process.load("CalibTracker.SiStripDCS.MessLogger_cfi")
process.SiStripConfigDb = cms.Service("SiStripConfigDb",
ConfDb = cms.untracked.string('username/password@cms_omds_nolb'),
TNS_ADMIN = cms.untracked.string('.'),
UsingDb = cms.untracked.bool(True),
Partitions = cms.untracked.PSet(
TPDD = cms.untracked.PSet(
PartitionName = cms.untracked.string('TP_08-AUG-2008_1'),
ForceVersions = cms.untracked.bool(True),
FecVersion = cms.untracked.vuint32(430,2),
DcuDetIdsVersion = cms.untracked.vuint32(9,0)
),
TMDD = cms.untracked.PSet(
PartitionName = cms.untracked.string('TM_08-AUG-2008_1'),
ForceVersions = cms.untracked.bool(True),
FecVersion = cms.untracked.vuint32(428,1),
DcuDetIdsVersion = cms.untracked.vuint32(9,0)
),
TIDD = cms.untracked.PSet(
PartitionName = cms.untracked.string('TI_08-AUG-2008_1'),
ForceVersions = cms.untracked.bool(True),
FecVersion = cms.untracked.vuint32(427,1),
DcuDetIdsVersion = cms.untracked.vuint32(9,0)
),
TODD = cms.untracked.PSet(
PartitionName = cms.untracked.string('TO_08-AUG-2008_1'),
ForceVersions = cms.untracked.bool(True),
FecVersion = cms.untracked.vuint32(415,3),
DcuDetIdsVersion = cms.untracked.vuint32(9,0)
),
TEPD2 = cms.untracked.PSet(
PartitionName = cms.untracked.string('TE_27-JUN-2008_2'),
ForceVersions = cms.untracked.bool(True),
DcuPsuMapVersion = cms.untracked.vuint32(211, 2)
),
TMPD = cms.untracked.PSet(
PartitionName = cms.untracked.string('TE_17-JUN-2008_12'),
ForceVersions = cms.untracked.bool(True),
DcuPsuMapVersion = cms.untracked.vuint32(163, 1)
),
TEPD1 = cms.untracked.PSet(
PartitionName = cms.untracked.string('TE_24-JUN-2008_2'),
ForceVersions = cms.untracked.bool(True),
DcuPsuMapVersion = cms.untracked.vuint32(204, 1)
),
TEPD4 = cms.untracked.PSet(
PartitionName = cms.untracked.string('TE_30-JUN-2008_1'),
ForceVersions = cms.untracked.bool(True),
DcuPsuMapVersion = cms.untracked.vuint32(229, 1)
),
TEPD3 = cms.untracked.PSet(
PartitionName = cms.untracked.string('TE_27-JUN-2008_4'),
ForceVersions = cms.untracked.bool(True),
DcuPsuMapVersion = cms.untracked.vuint32(214, 1)
),
TPPD = cms.untracked.PSet(
PartitionName = cms.untracked.string('TE_17-JUN-2008_11'),
ForceVersions = cms.untracked.bool(True),
DcuPsuMapVersion = cms.untracked.vuint32(162, 1)
),
TIPD = cms.untracked.PSet(
PartitionName = cms.untracked.string('TI_17-JUN-2008_2'),
ForceVersions = cms.untracked.bool(True),
DcuPsuMapVersion = cms.untracked.vuint32(157, 1)
),
TIPD2 = cms.untracked.PSet(
PartitionName = cms.untracked.string('TI_18-JUN-2008_1'),
ForceVersions = cms.untracked.bool(True),
DcuPsuMapVersion = cms.untracked.vuint32(165, 1)
),
TIPD3 = cms.untracked.PSet(
PartitionName = cms.untracked.string('TI_18-JUN-2008_10'),
ForceVersions = cms.untracked.bool(True),
DcuPsuMapVersion = cms.untracked.vuint32(179, 1)
),
TIPD4 = cms.untracked.PSet(
PartitionName = cms.untracked.string('TI_20-JUN-2008_1'),
ForceVersions = cms.untracked.bool(True),
DcuPsuMapVersion = cms.untracked.vuint32(192, 1)
),
TIPD5 = cms.untracked.PSet(
PartitionName = cms.untracked.string('TI_27-JUN-2008_1'),
ForceVersions = cms.untracked.bool(True),
DcuPsuMapVersion = cms.untracked.vuint32(212, 1)
),
TIPD6 = cms.untracked.PSet(
PartitionName = cms.untracked.string('TI_27-JUN-2008_3'),
ForceVersions = cms.untracked.bool(True),
DcuPsuMapVersion = cms.untracked.vuint32(218, 1)
),
TOPD = cms.untracked.PSet(
PartitionName = cms.untracked.string('TO_18-JUN-2008_1_TEST_1'),
ForceVersions = cms.untracked.bool(True),
DcuPsuMapVersion = cms.untracked.vuint32(177, 1)
),
TOPD2 = cms.untracked.PSet(
PartitionName = cms.untracked.string('TO_18-JUN-2008_2'),
ForceVersions = cms.untracked.bool(True),
DcuPsuMapVersion = cms.untracked.vuint32(178, 1)
),
TOPD3 = cms.untracked.PSet(
PartitionName = cms.untracked.string('TO_30-JUN-2008_1'),
ForceVersions = cms.untracked.bool(True),
DcuPsuMapVersion = cms.untracked.vuint32(228, 1)
)
)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource",
numberEventsInRun = cms.untracked.uint32(1),
firstRun = cms.untracked.uint32(1)
)
process.load("CondCore.DBCommon.CondDBCommon_cfi")
process.CondDBCommon.connect = cms.string('oracle://cms_omds_nolb/username')
process.SiStripModuleHVBuilder = cms.Service("SiStripModuleHVBuilder",
onlineDB = cms.untracked.string('oracle://cms_omds_nolb/username'),
authPath = cms.untracked.string('.'),
# Format for date/time vector: year, month, day, hour, minute, second, nanosecond
Tmin = cms.untracked.vint32(2008, 10, 13, 1, 0, 0, 0),
Tmax = cms.untracked.vint32(2008, 10, 13, 12, 0, 0, 0),
# Do NOT change this unless you know what you are doing!
TSetMin = cms.untracked.vint32(2007, 11, 26, 0, 0, 0, 0),
# queryType can be either STATUSCHANGE or LASTVALUE
queryType = cms.untracked.string('LASTVALUE'),
# if reading lastValue from file put insert file name here
lastValueFile = cms.untracked.string(''),
# flag to show if you are reading from file for lastValue or not
lastValueFromFile = cms.untracked.bool(False),
#
debugModeOn = cms.untracked.bool(False)
)
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'),
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(2),
authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb')
),
timetype = cms.untracked.string('timestamp'),
connect = cms.string('sqlite_file:dbfile.db'),
toPut = cms.VPSet(cms.PSet(
record = cms.string('SiStripDetVOffRcd'),
tag = cms.string('SiStripDetVOff_Fake_31X')
)),
logconnect = cms.untracked.string('sqlite_file:logfile.db')
)
process.siStripPopConModuleHV = cms.EDAnalyzer("SiStripPopConModuleHV",
record = cms.string('SiStripDetVOffRcd'),
loggingOn= cms.untracked.bool(True),
SinceAppendMode=cms.bool(True),
Source = cms.PSet(
name = cms.untracked.string('default')
)
)
process.p = cms.Path(process.siStripPopConModuleHV)
|
pandas/tests/tseries/offsets/test_business_day.py | RakhithJK/pandas | 28,899 | 12740773 | <reponame>RakhithJK/pandas<gh_stars>1000+
"""
Tests for offsets.BDay
"""
from datetime import (
date,
datetime,
timedelta,
)
import pytest
from pandas._libs.tslibs.offsets import (
ApplyTypeError,
BDay,
BMonthEnd,
)
from pandas import (
DatetimeIndex,
_testing as tm,
)
from pandas.tests.tseries.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
from pandas.tseries import offsets as offsets
class TestBusinessDay(Base):
_offset = BDay
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset1 = self.offset
self.offset2 = BDay(2)
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<BusinessDay>"
assert repr(self.offset2) == "<2 * BusinessDays>"
expected = "<BusinessDay: offset=datetime.timedelta(days=1)>"
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def test_with_offset_index(self):
dti = DatetimeIndex([self.d])
result = dti + (self.offset + timedelta(hours=2))
expected = DatetimeIndex([datetime(2008, 1, 2, 2)])
tm.assert_index_equal(result, expected)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 1, 3)
def testRollback1(self):
assert BDay(10).rollback(self.d) == self.d
def testRollback2(self):
assert BDay(10).rollback(datetime(2008, 1, 5)) == datetime(2008, 1, 4)
def testRollforward1(self):
assert BDay(10).rollforward(self.d) == self.d
def testRollforward2(self):
assert BDay(10).rollforward(datetime(2008, 1, 5)) == datetime(2008, 1, 7)
def test_roll_date_object(self):
offset = BDay()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 14)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 17)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
def test_is_on_offset(self):
tests = [
(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False),
]
for offset, d, expected in tests:
assert_is_on_offset(offset, d, expected)
apply_cases: _ApplyCases = [
(
BDay(),
{
datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8),
},
),
(
2 * BDay(),
{
datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9),
},
),
(
-BDay(),
{
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7),
},
),
(
-2 * BDay(),
{
datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7),
},
),
(
BDay(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + BDay(10)
assert result == datetime(2012, 11, 6)
result = dt + BDay(100) - BDay(100)
assert result == dt
off = BDay() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2011, 12, 26)
assert rs == xp
off = BDay() * 10
rs = datetime(2014, 1, 5) + off # see #5890
xp = datetime(2014, 1, 17)
assert rs == xp
def test_apply_corner(self):
msg = "Only know how to combine business day with datetime or timedelta"
with pytest.raises(ApplyTypeError, match=msg):
BDay().apply(BMonthEnd())
|
analysis/tradeoff.py | harvardnlp/cascaded-generation | 122 | 12740778 | <reponame>harvardnlp/cascaded-generation
import sys, os
import re, subprocess
dataset = 0
if __name__ == '__main__':
if len(sys.argv) != 3:
print ('Usage: python %s <input> <output>'%sys.argv[0])
sys.exit(1)
with open(sys.argv[1]) as fin:
with open(sys.argv[2], 'w') as fout:
for line in fin:
items = line.strip().split('&')
name = items[0]
m = re.match(r'.*?K\s*=\s*(\d+).*?', name)
assert m, name
K = int(m.group(1))
if K > 128:
continue
#print (K)
m = re.match(r'.*?rounds\s*=\s*(\d+).*?', name)
assert m, name
rounds = int(m.group(1))
#print (rounds)
m = re.match(r'.*?D\s*=\s*(\d+).*?', name)
assert m, name
D = int(m.group(1))
idx = 2 + 2*dataset
item1 = items[idx]
m = re.match(r'\s*([\d\.]+).*?', item1)
if not m:
continue
bleu = float(m.group(1))
item2 = items[idx+1]
m = re.match(r'\s*([\d\.]+).*?', item2)
assert m, item2
latency = m.group(1)
latency = float(latency)
fout.write(f'{K}\t{rounds}\t{D}\t{bleu}\t{latency}\n')
#print (D)
#output_items = []
#output_items.append(name)
#output_items.append(items[1])
#output_items[-1] = output_items[-1] + '\\\\'
#fout.write('&'.join(output_items) + '\n')
|
data/morphology/grab_unimorph_data.py | wannaphong/wikipron | 111 | 12740780 | <reponame>wannaphong/wikipron
#!/usr/bin/env python
"""Downloads UniMorph morphological paradigms data."""
import json
import logging
import os
import time
from typing import Dict, List
import requests
UNIMORPH_DICT_PATH = "unimorph_languages.json"
def download(data_to_grab: Dict[str, List[str]]) -> Dict[str, List[str]]:
to_retry = {}
os.mkdir("tsv")
for language, urls in data_to_grab.items():
with open(f"tsv/{language}.tsv", "wb") as sink:
for url in urls:
with requests.get(url, stream=True) as response:
logging.info("Downloading: %s", language)
if response.status_code == 200:
sink.write(response.content)
else:
logging.info(
"Status code %d while downloading %s",
response.status_code,
language,
)
to_retry[language] = data_to_grab[language]
continue
# 30 seconds appears to not be enough, 60-70 seconds works well
# but takes a long time.
time.sleep(45)
return to_retry
def main() -> None:
with open(UNIMORPH_DICT_PATH, "r", encoding="utf-8") as langs:
languages = json.load(langs)
# Hack for repeatedly attempting to download Wortschatz data
# as a way of getting around 404 response from their server.
langs_to_retry = download(languages)
while langs_to_retry:
langs_to_retry = download(langs_to_retry)
if __name__ == "__main__":
logging.basicConfig(
format="%(filename)s %(levelname)s: %(message)s", level="INFO"
)
main()
|
tests/backends/aiida_sqlalchemy/test_schema.py | azadoks/aiida-core | 180 | 12740809 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=import-error,no-name-in-module
"""Test object relationships in the database."""
import warnings
from sqlalchemy import exc as sa_exc
import aiida
from aiida.backends.sqlalchemy.models.node import DbNode
from aiida.backends.sqlalchemy.models.user import DbUser
from aiida.backends.testbase import AiidaTestCase
from aiida.common.links import LinkType
from aiida.common.utils import get_new_uuid
from aiida.orm import CalculationNode, Data
class TestRelationshipsSQLA(AiidaTestCase):
"""Class of tests concerning the schema and the correct
implementation of relationships within the AiiDA ORM
The genereal naming convention is the following:
1)tests on one-to-many relationships: test_<Parent>_<child> (Parent class is capitalized).
2)tests on many-to-many relationships: test_<peer>_<peer> (none is
capitalized)."""
def test_outputs_children_relationship(self):
"""This test checks that the outputs_q, children_q relationship and the
corresponding properties work as expected."""
n_1 = Data().store()
n_2 = CalculationNode()
n_3 = Data().store()
# Create a link between these 2 nodes
n_2.add_incoming(n_1, link_type=LinkType.INPUT_CALC, link_label='N1')
n_2.store()
n_3.add_incoming(n_2, link_type=LinkType.CREATE, link_label='N2')
# Check that the result of outputs is a list
self.assertIsInstance(n_1.backend_entity.dbmodel.outputs, list, 'This is expected to be a list')
# Check that the result of outputs_q is a query
from sqlalchemy.orm.dynamic import AppenderQuery
self.assertIsInstance(
n_1.backend_entity.dbmodel.outputs_q, AppenderQuery, 'This is expected to be an AppenderQuery'
)
# Check that the result of outputs is correct
out = {_.pk for _ in n_1.backend_entity.dbmodel.outputs}
self.assertEqual(out, set([n_2.pk]))
def test_inputs_parents_relationship(self):
"""This test checks that the inputs_q, parents_q relationship and the
corresponding properties work as expected."""
n_1 = Data().store()
n_2 = CalculationNode()
n_3 = Data().store()
# Create a link between these 2 nodes
n_2.add_incoming(n_1, link_type=LinkType.INPUT_CALC, link_label='N1')
n_2.store()
n_3.add_incoming(n_2, link_type=LinkType.CREATE, link_label='N2')
# Check that the result of outputs is a list
self.assertIsInstance(n_1.backend_entity.dbmodel.inputs, list, 'This is expected to be a list')
# Check that the result of outputs_q is a query
from sqlalchemy.orm.dynamic import AppenderQuery
self.assertIsInstance(
n_1.backend_entity.dbmodel.inputs_q, AppenderQuery, 'This is expected to be an AppenderQuery'
)
# Check that the result of inputs is correct
out = {_.pk for _ in n_3.backend_entity.dbmodel.inputs}
self.assertEqual(out, set([n_2.pk]))
def test_user_node_1(self):
"""Test that when a user and a node having that user are created,
storing NODE induces storage of the USER
Assert the correct storage of user and node."""
# Create user
dbu1 = DbUser('<EMAIL>', 'spam', 'eggs', 'monty')
# Creat node
node_dict = dict(user=dbu1)
dbn_1 = DbNode(**node_dict)
# Check that the two are neither flushed nor committed
self.assertIsNone(dbu1.id)
self.assertIsNone(dbn_1.id)
session = aiida.backends.sqlalchemy.get_scoped_session()
# Add only the node and commit
session.add(dbn_1)
session.commit()
# Check that a pk has been assigned, which means that things have
# been flushed into the database
self.assertIsNotNone(dbn_1.id)
self.assertIsNotNone(dbu1.id)
def test_user_node_2(self):
"""Test that when a user and a node having that user are created,
storing USER does NOT induce storage of the NODE
Assert the correct storage of user and node."""
# Create user
dbu1 = DbUser('tests2<EMAIL>', 'spam', 'eggs', 'monty')
# Creat node
node_dict = dict(user=dbu1)
dbn_1 = DbNode(**node_dict)
# Check that the two are neither flushed nor committed
self.assertIsNone(dbu1.id)
self.assertIsNone(dbn_1.id)
session = aiida.backends.sqlalchemy.get_scoped_session()
# Catch all the SQLAlchemy warnings generated by the following code
with warnings.catch_warnings(): # pylint: disable=no-member
warnings.simplefilter('ignore', category=sa_exc.SAWarning) # pylint: disable=no-member
# Add only the user and commit
session.add(dbu1)
session.commit()
# Check that a pk has been assigned (or not), which means that things
# have been flushed into the database
self.assertIsNotNone(dbu1.id)
self.assertIsNone(dbn_1.id)
def test_user_node_3(self):
"""Test that when a user and two nodes having that user are created,
storing only ONE NODE induces storage of that node, of the user but
not of the other node
Assert the correct storage of the user and node. Assert the
non-storage of the other node."""
# Create user
dbu1 = DbUser('tests3@schema', 'spam', 'eggs', 'monty')
# Creat node
node_dict = dict(user=dbu1)
dbn_1 = DbNode(**node_dict)
dbn_2 = DbNode(**node_dict)
# Check that the two are neither flushed nor committed
self.assertIsNone(dbu1.id)
self.assertIsNone(dbn_1.id)
self.assertIsNone(dbn_2.id)
session = aiida.backends.sqlalchemy.get_scoped_session()
# Add only first node and commit
session.add(dbn_1)
with warnings.catch_warnings():
# suppress known SAWarning that we have not added dbn_2
warnings.simplefilter('ignore', category=sa_exc.SAWarning)
session.commit()
# Check for which object a pk has been assigned, which means that
# things have been at least flushed into the database
self.assertIsNotNone(dbu1.id)
self.assertIsNotNone(dbn_1.id)
self.assertIsNone(dbn_2.id)
def test_user_node_4(self):
"""Test that when several nodes are created with the same user and each
of them is assigned to the same name, storage of last node object
associated to that node does not trigger storage of all objects.
Assert the correct storage of the user and node. Assert the
non-storage of the other nodes."""
# Create user
dbu1 = DbUser('tests4@schema', 'spam', 'eggs', 'monty')
# Creat node objects assigningd them to the same name
# Check https://docs.python.org/2/tutorial/classes.html subsec. 9.1
for _ in range(5):
# It is important to change the uuid each time (or any other
# variable) so that a different objects (with a different pointer)
# is actually created in this scope.
dbn_1 = DbNode(user=dbu1, uuid=get_new_uuid())
# Check that the two are neither flushed nor committed
self.assertIsNone(dbu1.id)
self.assertIsNone(dbn_1.id)
session = aiida.backends.sqlalchemy.get_scoped_session()
# Add only first node and commit
session.add(dbn_1)
with warnings.catch_warnings():
# suppress known SAWarning that we have not add the other nodes
warnings.simplefilter('ignore', category=sa_exc.SAWarning)
session.commit()
# Check for which object a pk has been assigned, which means that
# things have been at least flushed into the database
self.assertIsNotNone(dbu1.id)
self.assertIsNotNone(dbn_1.id)
|
locations/spiders/bunnings.py | nbeecher/alltheplaces | 297 | 12740816 | # -*- coding: utf-8 -*-
import re
import json
import scrapy
from locations.items import GeojsonPointItem
class BunningsSpider(scrapy.Spider):
name = "bunnings"
allowed_domains = ["bunnings.com.au"]
start_urls = (
'https://www.bunnings.com.au/stores/',
)
def parse(self, response):
raw_data = re.search( "com_bunnings_locations_mapLocations = (.+);", response.text,).group(1)
stores = json.loads(raw_data)
for idx, store in enumerate(stores):
store = store['Store']
properties = {
"lat": store["Location"]["Latitude"],
"lon": store["Location"]["Longitude"],
"name": store["StoreName"],
"addr_full": f'{store["Address"]["Address"]} {store["Address"]["AddressLineTwo"]}'.strip(),
"city": store["Address"]["Suburb"],
"state": store["Address"]["State"],
"postcode": store["Address"]["Postcode"],
"country": "AU",
"phone": store["Phone"],
"website": response.urljoin(store["StoreUrl"]),
"ref": idx
}
yield GeojsonPointItem(**properties) |
meteostat/interface/monthly.py | meteoDaniel/meteostat-python | 133 | 12740820 | """
Monthly Class
Meteorological data provided by Meteostat (https://dev.meteostat.net)
under the terms of the Creative Commons Attribution-NonCommercial
4.0 International Public License.
The code is licensed under the MIT license.
"""
from datetime import datetime
from typing import Union
import numpy as np
import pandas as pd
from meteostat.core.cache import get_file_path, file_in_cache
from meteostat.core.loader import processing_handler, load_handler
from meteostat.utilities.validations import validate_series
from meteostat.utilities.aggregations import degree_mean, weighted_average
from meteostat.interface.timeseries import Timeseries
from meteostat.interface.point import Point
class Monthly(Timeseries):
"""
Retrieve monthly weather data for one or multiple weather stations or
a single geographical point
"""
# The cache subdirectory
cache_subdir: str = 'monthly'
# Default frequency
_freq: str = '1MS'
# Columns
_columns: list = [
'year',
'month',
'tavg',
'tmin',
'tmax',
'prcp',
'snow',
'wdir',
'wspd',
'wpgt',
'pres',
'tsun'
]
# Index of first meteorological column
_first_met_col = 2
# Data types
_types: dict = {
'tavg': 'float64',
'tmin': 'float64',
'tmax': 'float64',
'prcp': 'float64',
'snow': 'float64',
'wdir': 'float64',
'wspd': 'float64',
'wpgt': 'float64',
'pres': 'float64',
'tsun': 'float64'
}
# Columns for date parsing
_parse_dates: dict = {
'time': [0, 1]
}
# Default aggregation functions
aggregations: dict = {
'tavg': 'mean',
'tmin': 'mean',
'tmax': 'mean',
'prcp': 'sum',
'snow': 'max',
'wdir': degree_mean,
'wspd': 'mean',
'wpgt': 'max',
'pres': 'mean',
'tsun': 'sum'
}
def _load(
self,
station: str
) -> None:
"""
Load file from Meteostat
"""
# File name
file = 'monthly/' + ('full' if self._model else 'obs') + \
'/' + station + '.csv.gz'
# Get local file path
path = get_file_path(self.cache_dir, self.cache_subdir, file)
# Check if file in cache
if self.max_age > 0 and file_in_cache(path, self.max_age):
# Read cached data
df = pd.read_pickle(path)
else:
# Get data from Meteostat
df = load_handler(
self.endpoint,
file,
self._columns,
self._types,
self._parse_dates)
# Validate Series
df = validate_series(df, station)
# Save as Pickle
if self.max_age > 0:
df.to_pickle(path)
# Filter time period and append to DataFrame
if self._start and self._end:
# Get time index
time = df.index.get_level_values('time')
# Filter & return
return df.loc[(time >= self._start) & (time <= self._end)]
# Return
return df
def _get_data(self) -> None:
"""
Get all required data
"""
if len(self._stations) > 0:
# List of datasets
datasets = []
for station in self._stations:
datasets.append((
str(station),
))
# Data Processing
return processing_handler(datasets, self._load, self.processes, self.threads)
# Empty DataFrame
return pd.DataFrame(columns=[*self._types])
def _resolve_point(
self,
method: str,
stations: pd.DataFrame,
alt: int,
adapt_temp: bool
) -> None:
"""
Project weather station data onto a single point
"""
if self._stations.size == 0 or self._data.size == 0:
return None
def adjust_temp(data: pd.DataFrame):
"""
Adjust temperature-like data based on altitude
"""
data.loc[data['tavg'] != np.NaN, 'tavg'] = data['tavg'] + \
((2 / 3) * ((data['elevation'] - alt) / 100))
data.loc[data['tmin'] != np.NaN, 'tmin'] = data['tmin'] + \
((2 / 3) * ((data['elevation'] - alt) / 100))
data.loc[data['tmax'] != np.NaN, 'tmax'] = data['tmax'] + \
((2 / 3) * ((data['elevation'] - alt) / 100))
return data
if method == 'nearest':
if adapt_temp:
# Join elevation of involved weather stations
data = self._data.join(
stations['elevation'], on='station')
# Adapt temperature-like data based on altitude
data = adjust_temp(data)
# Drop elevation & round
data = data.drop('elevation', axis=1).round(1)
else:
data = self._data
self._data = self._data.groupby(
pd.Grouper(level='time', freq=self._freq)).agg('first')
else:
# Join score and elevation of involved weather stations
data = self._data.join(
stations[['score', 'elevation']], on='station')
# Adapt temperature-like data based on altitude
if adapt_temp:
data = adjust_temp(data)
# Exclude non-mean data & perform aggregation
excluded = data['wdir']
excluded = excluded.groupby(
pd.Grouper(level='time', freq=self._freq)).agg('first')
# Aggregate mean data
data = data.groupby(
pd.Grouper(level='time', freq=self._freq)).apply(weighted_average)
# Drop RangeIndex
data.index = data.index.droplevel(1)
# Merge excluded fields
data['wdir'] = excluded
# Drop score and elevation
self._data = data.drop(['score', 'elevation'], axis=1).round(1)
# Set placeholder station ID
self._data['station'] = 'XXXXX'
self._data = self._data.set_index(
['station', self._data.index.get_level_values('time')])
self._stations = pd.Index(['XXXXX'])
def __init__(
self,
loc: Union[pd.DataFrame, Point, list, str],
start: datetime = None,
end: datetime = None,
model: bool = True
) -> None:
# Set list of weather stations
if isinstance(loc, pd.DataFrame):
self._stations = loc.index
elif isinstance(loc, Point):
stations = loc.get_stations('monthly', start, end, model)
self._stations = stations.index
else:
if not isinstance(loc, list):
loc = [loc]
self._stations = pd.Index(loc)
# Set start date
if start is not None:
self._start = start.replace(day=1)
# Set end date
self._end = end
# Set model
self._model = model
# Get data for all weather stations
self._data = self._get_data()
# Interpolate data
if isinstance(loc, Point):
self._resolve_point(loc.method, stations, loc.alt, loc.adapt_temp)
# Clear cache
if self.max_age > 0 and self.autoclean:
self.clear_cache()
def expected_rows(self) -> int:
"""
Return the number of rows expected for the defined date range
"""
return ((self._end.year - self._start.year) * 12 +
self._end.month - self._start.month) + 1
|
tests/Unit/Visualization/Python/Test_Render1D.py | nilsvu/spectre | 117 | 12740826 | #!/usr/bin/env python
# Distributed under the MIT License.
# See LICENSE.txt for details.
from spectre.Visualization.Render1D import (find_extrema_over_data_set,
render_single_time)
import unittest
import os
import numpy as np
import matplotlib as mpl
mpl.use('agg')
class TestRender1D(unittest.TestCase):
def test_find_extrema_over_data_set(self):
test_array = np.array([1.1, 6.45, 0.34, 2.3])
expected_vals = (0.34, 6.45)
self.assertEqual(find_extrema_over_data_set(test_array), expected_vals)
def test_render_single_time(self):
var_name = "Variable Test"
time_slice = 1
output_prefix = "TestRenderSingleTime"
time = [0.0, 0.1]
coords = [[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]]
data = [[5.2, 4.5, 9.0, 2.0, 8.0], [1.1, 4.0, 6.0, 5.3, 3.0]]
# test whether a pdf file is saved when run
render_single_time(var_name, time_slice, output_prefix, time, coords,
data)
self.assertTrue(os.path.isfile(output_prefix + '.pdf'))
os.remove(output_prefix + '.pdf')
if __name__ == '__main__':
unittest.main(verbosity=2)
|
setup.py | XFFXFF/ElegantRL | 129 | 12740827 | from setuptools import setup, find_packages
setup(
name="elegantrl",
version="0.3.3",
author="<NAME>, <NAME>, <NAME>, <NAME>",
author_email="<EMAIL>",
url="https://github.com/AI4Finance-LLC/ElegantRL",
license="Apache 2.0",
packages=find_packages(),
install_requires=[
"gym",
"matplotlib",
"numpy",
"pybullet",
"torch",
"opencv-python",
"box2d-py",
],
description="Lightweight, Efficient and Stable DRL Implementation Using PyTorch",
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
keywords="Deep Reinforcment Learning",
python_requires=">=3.6",
)
|
sample.py | jeammimi/chem2 | 537 | 12740836 | from __future__ import print_function
import argparse
import os
import h5py
import numpy as np
import sys
from molecules.model import MoleculeVAE
from molecules.utils import one_hot_array, one_hot_index, from_one_hot_array, \
decode_smiles_from_indexes, load_dataset
from pylab import figure, axes, scatter, title, show
from rdkit import Chem
from rdkit.Chem import Draw
LATENT_DIM = 292
TARGET = 'autoencoder'
def get_arguments():
parser = argparse.ArgumentParser(description='Molecular autoencoder network')
parser.add_argument('data', type=str, help='File of latent representation tensors for decoding.')
parser.add_argument('model', type=str, help='Trained Keras model to use.')
parser.add_argument('--save_h5', type=str, help='Name of a file to write HDF5 output to.')
parser.add_argument('--target', type=str, default=TARGET,
help='What model to sample from: autoencoder, encoder, decoder.')
parser.add_argument('--latent_dim', type=int, metavar='N', default=LATENT_DIM,
help='Dimensionality of the latent representation.')
return parser.parse_args()
def read_latent_data(filename):
h5f = h5py.File(filename, 'r')
data = h5f['latent_vectors'][:]
charset = h5f['charset'][:]
h5f.close()
return (data, charset)
def autoencoder(args, model):
latent_dim = args.latent_dim
data, charset = load_dataset(args.data, split = False)
if os.path.isfile(args.model):
model.load(charset, args.model, latent_rep_size = latent_dim)
else:
raise ValueError("Model file %s doesn't exist" % args.model)
sampled = model.autoencoder.predict(data[0].reshape(1, 120, len(charset))).argmax(axis=2)[0]
mol = decode_smiles_from_indexes(map(from_one_hot_array, data[0]), charset)
sampled = decode_smiles_from_indexes(sampled, charset)
print(mol)
print(sampled)
def decoder(args, model):
latent_dim = args.latent_dim
data, charset = read_latent_data(args.data)
if os.path.isfile(args.model):
model.load(charset, args.model, latent_rep_size = latent_dim)
else:
raise ValueError("Model file %s doesn't exist" % args.model)
sampled = model.decoder.predict(data[0].reshape(1, latent_dim)).argmax(axis=2)[0]
sampled = decode_smiles_from_indexes(sampled, charset)
print(sampled)
def encoder(args, model):
latent_dim = args.latent_dim
data, charset = load_dataset(args.data, split = False)
if os.path.isfile(args.model):
model.load(charset, args.model, latent_rep_size = latent_dim)
else:
raise ValueError("Model file %s doesn't exist" % args.model)
x_latent = model.encoder.predict(data)
if args.save_h5:
h5f = h5py.File(args.save_h5, 'w')
h5f.create_dataset('charset', data = charset)
h5f.create_dataset('latent_vectors', data = x_latent)
h5f.close()
else:
np.savetxt(sys.stdout, x_latent, delimiter = '\t')
def main():
args = get_arguments()
model = MoleculeVAE()
if args.target == 'autoencoder':
autoencoder(args, model)
elif args.target == 'encoder':
encoder(args, model)
elif args.target == 'decoder':
decoder(args, model)
if __name__ == '__main__':
main()
|
codigo/Live112/locust_demo/demo_server/flask-server.py | cassiasamp/live-de-python | 572 | 12740838 | from random import randint
from flask import Flask, request, jsonify, redirect, make_response
app = Flask(__name__)
auth = randint(100, 50000)
@app.route('/get-auth', methods=['POST'])
def get_auth_cookie():
req = request.get_json()
if req['pass'] == '<PASSWORD>':
res = make_response(jsonify({'auth': str(auth)}))
res.set_cookie('auth', str(auth))
else:
res = make_response(jsonify({'erro': 'nao autorizado'}), 401)
res.set_cookie('auth', '0')
return res
@app.route('/get-complex-object', methods=['GET'])
def get_complex_object():
print(bool(request.args.get('returnObject')))
if bool(request.args.get('returnObject')):
return_object = {
"complexObj":
[
{
"id": "0001",
"type": "donut",
"name": "Cake",
"ppu": 0.55,
"batters":
{
"batter":
[
{"id": "1001", "type": "Regular"},
{"id": "1002", "type": "Chocolate"},
{"id": "1003", "type": "Blueberry"},
{"id": "1004", "type": "Devil's Food"}
]
},
"topping":
[
{"id": "5001", "type": "None"},
{"id": "5002", "type": "Glazed"},
{"id": "5005", "type": "Sugar"},
{"id": "5007", "type": "Powdered Sugar"},
{"id": "5006", "type": "Chocolate with Sprinkles"},
{"id": "5003", "type": "Chocolate"},
{"id": "5004", "type": "Maple"}
]
},
{
"id": "0002",
"type": "donut",
"name": "Raised",
"ppu": 0.55,
"batters":
{
"batter":
[
{"id": "1001", "type": "Regular"}
]
},
"topping":
[
{"id": "5001", "type": "None"},
{"id": "5002", "type": "Glazed"},
{"id": "5005", "type": "Sugar"},
{"id": "5003", "type": "Chocolate"},
{"id": "5004", "type": "Maple"}
]
},
{
"id": "0003",
"type": "donut",
"name": "Old Fashioned",
"ppu": 0.55,
"batters":
{
"batter":
[
{"id": "1001", "type": "Regular"},
{"id": "1002", "type": "Chocolate"}
]
},
"topping":
[
{"id": "5001", "type": "None"},
{"id": "5002", "type": "Glazed"},
{"id": "5003", "type": "Chocolate"},
{"id": "5004", "type": "Maple"}
]
}
]
}
return jsonify(return_object)
return jsonify({"erro": "erro"})
@app.route('/nao-autorizado-param', methods=['GET'])
def get_redirect():
if request.args.get('auth') and int(request.args.get('auth')) == auth:
return jsonify({'redirected': False})
return redirect("http://localhost:5000/redirected", code=302)
@app.route('/nao-autorizado-cookie', methods=['GET'])
def get_redirect_cookie():
if 'auth' in request.cookies and request.cookies['auth'] == str(auth):
return jsonify({'redirected': False})
return redirect("http://localhost:5000/redirected", code=302)
@app.route('/redirected', methods=['GET'])
def redirected():
return jsonify([{"redirected": True}])
if __name__ == '__main__':
app.run(debug=True, host="0.0.0.0") # run app in debug mode on port 5000.
|
widgets/toolbar/lisp_codegen.py | ardovm/wxGlade | 225 | 12740850 | <gh_stars>100-1000
"""\
Lisp generator functions for wxToolBar objects
@copyright: 2002-2004 <NAME>. aka crazyinsomniac on sourceforge
@copyright: 2014-2016 <NAME>
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import common
import wcodegen
from .tool import *
#from .codegen import ToolsHandler
class LispCodeGenerator(wcodegen.LispWidgetCodeWriter):
def get_properties_code(self, obj):
prop = obj.properties
out = []
append = out.append
obj_name = '(slot-%s obj)' % self.codegen._format_name(obj.name)
if obj.properties["bitmapsize"].is_active():
w, h = obj.properties["bitmapsize"].get_tuple()
append( '(wxToolBar_SetToolBitmapSize %s %s %s)\n' % (obj_name, w, h) )
if obj.properties["margins"].is_active():
w, h = obj.properties["margins"].get_tuple()
append( '(wxToolBar_SetMargins %s %s %s)\n' % (obj_name, w, h) )
if obj.properties["packing"].is_active():
append( '(wxToolBar_SetToolPacking %s %s)\n' % (obj_name, obj.packing) )
if obj.properties["separation"].is_active():
append( '(wxToolBar_SetToolSeparation %s %s)\n' % (obj_name, obj.separation) )
return out
def get_init_code(self, obj):
out = []
append = out.append
ids = []
obj_name = self.format_widget_access(obj)
for tool in obj.tools:
if tool.id == '---': # item is a separator
append( '(wxToolBar_AddSeparator %s)\n' % obj_name )
else:
name, val = self.codegen.generate_code_id(None, tool.id)
if not name and (not val or val == '-1'):
wid = 'Wx::NewId()'
else:
if name:
ids.append(name)
wid = val
kinds = ['wxITEM_NORMAL', 'wxITEM_CHECK', 'wxITEM_RADIO']
try:
kind = kinds[int(tool.type)]
except (IndexError, ValueError):
kind = 'wxITEM_NORMAL'
bmp1 = self.generate_code_bitmap(tool.bitmap1)
bmp2 = self.generate_code_bitmap(tool.bitmap2)
# append('%s->AddLabelTool(%s, %s, %s, %s, %s, %s, %s);\n' %
append( '(wxToolBar_AddTool %s %s %s %s %s %s %s %s)\n' %
(obj_name, wid, self.codegen.quote_str(tool.label),
bmp1, bmp2, kind,
self.codegen.quote_str(tool.short_help),
self.codegen.quote_str(tool.long_help)) )
return ids + out
def get_code(self, obj):
"function that generates Lisp code for the toolbar of a wxFrame"
style = obj.properties['style'].get_string_value()
if not style:
style = 'wxTB_HORIZONTAL'
else:
style += "|wxTB_HORIZONTAL"
style = self.cn_f(style)
parent = self.format_widget_access(obj.parent_window)
obj_name = self.codegen._format_name(obj.name)
init = [ ';;; Tool Bar\n',
'(setf (slot-%s obj) (wxToolBar_Create %s -1 -1 -1 -1 -1 %s))\n' % (obj_name, parent, style),
] + self.get_init_code(obj) + self.get_properties_code(obj) + [
'(wxFrame_SetToolBar (slot-top-window obj) (slot-%s obj))\n' % obj_name,
'(wxToolBar_Realize %s)\n' % self.format_widget_access(obj),
';;; Tool Bar end\n']
return init, []
def get_layout_code(self, obj):
obj_name = '(slot-%s obj)' % self.codegen._format_name(obj.name)
return ['(wxToolBar_Realize %s)\n' % obj_name]
def initialize():
klass = 'wxToolBar'
common.class_names['EditToolBar'] = klass
common.register('lisp', klass, LispCodeGenerator(klass) )#, 'tools', ToolsHandler)
|
tests/test_speed.py | dcslagel/lasio | 285 | 12740879 | import os, sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import glob
import fnmatch
import traceback
import logging
import numpy
import pytest
import lasio
test_dir = os.path.dirname(__file__)
egfn = lambda fn: os.path.join(os.path.dirname(__file__), "examples", fn)
stegfn = lambda vers, fn: os.path.join(os.path.dirname(__file__), "examples", vers, fn)
logger = logging.getLogger(__name__)
def read_file():
las = lasio.read(stegfn("1.2", "sample_big.las"))
def test_read_v12_sample_big(benchmark):
benchmark(read_file)
|
modules/nltk_contrib/toolbox/errors.py | h4ck3rm1k3/NLP-project | 123 | 12740897 | <filename>modules/nltk_contrib/toolbox/errors.py
# Natural Language Toolkit: Shoebox Errors
#
# Copyright (C) 2001-2006 NLTK Project
# Author: <NAME> <<EMAIL>>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
This module provides Shoebox exceptions.
"""
# ---------------------------------------------------------------------
# CLASS: ShoeboxError
# DESC: ???
# ---------------------------------------------------------------------
class ShoeboxError(Exception):
"""
This is the base class for all Shoebox errors.
"""
def __init__(self):
self._msg = ""
# ---------------------------------------------
# CLASS: ValidationError
# DESC: ???
# ---------------------------------------------
class NonUniqueEntryError(ShoeboxError):
"""
???
"""
def __init__(self) :
pass
class ValidationError(ShoeboxError):
def __init__(self):
pass
def setField(self, field):
self._field = field
def getField(self):
return self._field
# ---------------------------------------------
# CLASS: NoMetadataFound
# DESC: ???
# ---------------------------------------------
class NoMetadataFound(ValidationError):
def __init__(self, field):
self._field = field
class FieldError(ShoeboxError):
def __init__(self):
pass
def __str__(self) :
return self.get_message()
class NonUniqueFieldError(FieldError):
"""
Error raised when an attempt is made to retrieve a unique field which has more than one value
"""
def __init__(self, entry):
self._entry = entry
def setEntry(self, entry):
self._entry = entry
def getEntry(self):
return self._entry
# ---------------------------------------------
# CLASS: BadFieldValue
# DESC: ???
# ---------------------------------------------
class BadFieldValueError(ValidationError, FieldError):
FIELD_VALUE_ERROR_RANGE_SET = '1'
FIELD_VALUE_ERROR_NO_WORD_WRAP = '2'
FIELD_VALUE_ERROR_EMPTY_VALUE = '3'
FIELD_VALUE_ERROR_SINGLE_WORD = '4'
errorTypes = {
'1': "Range Set",
'2': "No Word Wrap",
'3': "Empty Value",
'4': "Single Word"
}
def __init__(self, errorType, entry, field, fmMetadata):
self._entry = entry
self._errorType = errorType
self._field = field
self._fmMetadata = fmMetadata
def __str__(self):
e = self.getEntry()
f = self.getField()
typ = self.getErrorDescription()
s = "'%s' error in '\\%s' field of record %i!\nRecord:\n%s" % (typ, f.getMarker(), e.getNumber(), e.getRawText())
return s
def getFieldMarkerMetadata(self):
return self._fmMetadata
def setFieldMarkerMetadata(self, fmMetadata):
self._fmMetadata = fmMetadata
def getErrorDescription(self):
try:
return self.errorTypes[self.getErrorType()]
except:
return None
def getErrorType(self):
return self._errorType
def setErrorType(self, errorType):
self._errorType = errorType
def getEntry(self):
return self._entry
def setEntry(self, entry):
self._entry = entry
|
dev/Gems/CloudGemComputeFarm/v1/Harness/main.py | BadDevCode/lumberyard | 1,738 | 12740900 | import argparse
import boto3
import json
import os
import sys
from six.moves import urllib
import uuid
import traceback
from botocore.exceptions import ClientError
from dictionary_sorter import divide
from dictionary_sorter import merge
from dictionary_sorter import build
from harness import config
from harness import decider
from harness import worker
from harness import cloudwatch
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--domain", required=True)
parser.add_argument("-t", "--task-list", required=True)
parser.add_argument("--div-task", required=True)
parser.add_argument("--div-task-version", default="1.0")
parser.add_argument("--merge-task", required=True)
parser.add_argument("--merge-task-version", default="1.0")
parser.add_argument("--build-task", default=None)
parser.add_argument("--build-task-version", default="1.0")
parser.add_argument("-rd", "--run-decider", action="store_true")
parser.add_argument("--region", default=None)
parser.add_argument("--config-bucket", default=None)
parser.add_argument("--log-group", default=None)
parser.add_argument("--log-db", default=None)
parser.add_argument("--kvs-db", default=None)
parser.add_argument("--profile", default=None)
parser.add_argument("--role-arn", default=None)
parser.add_argument("--stdout", default=None)
args = parser.parse_args()
try:
# Fetch instance identity: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html
with urllib.request.urlopen('http://169.254.169.254/latest/dynamic/instance-identity/document') as response:
info = json.load(response)
ec2_region = info['region']
identity = info['instanceId']
print("Running on EC2 instance {} in region {}".format(identity, ec2_region))
except:
ec2_region = "us-east-1"
identity = os.environ.get("COMPUTERNAME", "<unavailable>")
print("Couldn't load EC2 instance data from environment, using computer hostname {}".format(identity))
if not args.region:
args.region = ec2_region
# You can supply a profile to use if you are testing locally.
session = boto3.Session(region_name=args.region, profile_name=args.profile)
# You can supply a role arn to use if you are testing locally.
if args.role_arn:
sts_result = session.client('sts').assume_role(
DurationSeconds=3600,
RoleSessionName="Harness-" + str(uuid.uuid4()),
RoleArn=args.role_arn
)['Credentials']
session = boto3.Session(
region_name=args.region,
aws_access_key_id=sts_result['AccessKeyId'],
aws_secret_access_key=sts_result['SecretAccessKey'],
aws_session_token=sts_result['SessionToken']
)
if args.stdout:
if args.stdout == 'cloudwatch':
writeHandler = cloudwatch.OutputHandler('HARNESS-DEBUG', session, args.region, identity, 'decider' if args.run_decider else 'worker')
else:
fp = open(args.stdout, "w")
sys.stdout = fp
sys.stderr = fp
divide_task = config.TaskConfig(args.div_task, args.div_task_version, divide.handler)
merge_task = config.TaskConfig(args.merge_task, args.merge_task_version, merge.handler)
build_task = config.TaskConfig(args.build_task, args.build_task_version, build.handler) if args.build_task else merge_task
harness_config = config.Config(session, args.region, args.domain, args.task_list, divide_task, build_task,
merge_task, args.log_group, args.log_db, args.kvs_db, args.config_bucket, identity)
try:
if args.run_decider:
decider.run_decider(harness_config)
else:
worker.run_worker(harness_config)
except Exception as e:
message = "Error - " + str(e) + "\n" + traceback.format_exc()
print(message)
|
test/integration/component/test_add_remove_network.py | ycyun/ablestack-cloud | 1,131 | 12740903 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
P1 tests for Add Remove Network to VM
Test Plan: https://cwiki.apache.org/confluence/display/CLOUDSTACK/Add+Remove+networks+to+VM+Test+cases
Issue Link: https://issues.apache.org/jira/browse/CLOUDSTACK-645
Feature Specifications: https://cwiki.apache.org/confluence/display/CLOUDSTACK/Add+Remove+Networks+to+VMs
"""
import random
import time
import unittest
from ddt import ddt, data
from marvin.cloudstackAPI import (addNicToVirtualMachine,
removeNicFromVirtualMachine,
updateDefaultNicForVirtualMachine)
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.codes import PASS
from marvin.lib.base import (
Account,
Domain,
ServiceOffering,
VirtualMachine,
NetworkOffering,
Network,
VpcOffering,
VPC,
PublicIPAddress,
FireWallRule,
NATRule
)
from marvin.lib.common import (get_domain,
get_zone,
get_template,
list_virtual_machines,
list_events,
list_zones,
get_free_vlan,
update_resource_limit,
list_nat_rules
)
from marvin.lib.utils import (validateList,
random_gen,
get_hypervisor_type)
# Import Local Modules
from nose.plugins.attrib import attr
class Services:
"""Test Add Remove Network Services
"""
def __init__(self):
self.services = {
"sleep": 60,
"ostype": "CentOS 5.3 (64-bit)",
# Cent OS 5.3 (64 bit)
"isolated_network_offering": {
"name": 'Test Isolated Network offering',
"displaytext": 'Test Isolated Network offering',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding',
"traffictype": 'GUEST',
"availability": 'Optional',
"serviceProviderList": {
"Dhcp": 'VirtualRouter',
"Dns": 'VirtualRouter',
"SourceNat": 'VirtualRouter',
"PortForwarding": 'VirtualRouter',
},
},
"shared_network_offering": {
"name": 'Test Shared Network Offering',
"displaytext": 'Test Shared Network Offering',
"guestiptype": 'Shared',
"supportedservices": 'Dhcp,Dns,UserData',
"specifyVlan": "True",
"specifyIpRanges": "True",
"traffictype": 'GUEST',
"serviceProviderList": {
"Dhcp": 'VirtualRouter',
"Dns": 'VirtualRouter',
"UserData": 'VirtualRouter'
},
},
"shared_network": {
"name": "Test Shared Network",
"displaytext": "Test Shared Network",
"gateway": "172.16.17.1",
"netmask": "255.255.255.0",
"startip": "172.16.17.2",
"endip": "172.16.17.20",
},
"shared_network_2": {
"name": "Test Shared Network",
"displaytext": "Test Shared Network",
"gateway": "172.16.18.1",
"netmask": "255.255.255.0",
"startip": "172.16.18.2",
"endip": "172.16.18.20",
},
"isolated_network": {
"name": "Test Isolated Network",
"displaytext": "Test Isolated Network",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
# in MHz
"memory": 256,
# In MBs
},
"account": {
"email": "<EMAIL>",
"firstname": "Test_add_remove_network_vm",
"lastname": "User",
"username": "test_add_remove_network_vm",
"password": "password",
},
"domain": {
"name": "Domain_add_nw_to_vm",
},
"virtual_machine": {
"displayname": "testserver",
"username": "root", # VM creds for SSH
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"vpc_offering": {
"name": 'VPC off add remove network',
"displaytext": 'VPC off add remove network',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat,NetworkACL',
},
"vpc": {
"name": "TestVPC add remove network",
"displaytext": "TestVPC add remove network",
"cidr": '10.0.0.1/24'
},
"natrule": {
"privateport": 22,
"publicport": 22,
"protocol": "TCP"
},
}
@ddt
class TestAddNetworkToVirtualMachine(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestAddNetworkToVirtualMachine, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
hypervisor = get_hypervisor_type(cls.api_client)
if hypervisor.lower() not in ["xenserver", "kvm"]:
raise unittest.SkipTest("This feature is supported only on XenServer and KVM")
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"])
# Set Zones and disk offerings
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = template.id
# Create Accounts & networks
cls.services["isolated_network"]["zoneid"] = cls.zone.id
cls.services["shared_network"]["zoneid"] = cls.zone.id
cls._cleanup = []
cls.account = Account.create(cls.api_client, cls.services["account"], domainid=cls.domain.id)
cls._cleanup.append(cls.account)
cls.service_offering = ServiceOffering.create(cls.api_client, cls.services["service_offering"])
cls._cleanup.append(cls.service_offering)
cls.virtual_machine = VirtualMachine.create(cls.api_client, cls.services["virtual_machine"], accountid=cls.account.name,
domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id,
mode=cls.zone.networktype)
cls._cleanup.append(cls.virtual_machine)
cls.defaultNetworkId = cls.virtual_machine.nic[0].networkid
cls.isolated_network_offering = NetworkOffering.create(cls.api_client, cls.services["isolated_network_offering"])
cls._cleanup.append(cls.isolated_network_offering)
cls.isolated_network_offering.update(cls.api_client, state='Enabled')
cls.shared_network_offering = NetworkOffering.create(cls.api_client, cls.services["shared_network_offering"])
cls._cleanup.append(cls.shared_network_offering)
cls.shared_network_offering.update(cls.api_client, state='Enabled')
cls.isolated_network = Network.create(cls.api_client, cls.services["isolated_network"], cls.account.name,
cls.account.domainid, networkofferingid=cls.isolated_network_offering.id)
cls._cleanup.append(cls.isolated_network)
cls.services["shared_network"]["vlan"] = get_free_vlan(cls.api_client, cls.zone.id)[1]
shared_network_subnet_number = random.randrange(1, 254)
cls.services["shared_network"]["gateway"] = "172.16." + str(shared_network_subnet_number) + ".1"
cls.services["shared_network"]["startip"] = "172.16." + str(shared_network_subnet_number) + ".2"
cls.services["shared_network"]["endip"] = "172.16." + str(shared_network_subnet_number) + ".20"
cls.shared_nw_endip = cls.services["shared_network"]["endip"]
cls.shared_network = Network.create(cls.api_client, cls.services["shared_network"], cls.account.name,
cls.account.domainid, networkofferingid=cls.shared_network_offering.id)
cls._cleanup.append(cls.shared_network)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.addednics = []
shared_network_subnet_number = random.randrange(1, 254)
self.services["shared_network"]["gateway"] = "172.16." + str(shared_network_subnet_number) + ".1"
self.services["shared_network"]["startip"] = "172.16." + str(shared_network_subnet_number) + ".2"
self.services["shared_network"]["endip"] = "172.16." + str(shared_network_subnet_number) + ".20"
self.services["shared_network_2"]["gateway"] = "172.16." + str(shared_network_subnet_number + 1) + ".1"
self.services["shared_network_2"]["startip"] = "172.16." + str(shared_network_subnet_number + 1) + ".2"
self.services["shared_network_2"]["endip"] = "172.16." + str(shared_network_subnet_number + 1) + ".20"
self.cleanup = []
def tearDown(self):
try:
for nic in self.addednics:
self.virtual_machine.remove_nic(self.apiclient, nic.id)
except Exception as e:
self.debug("Exception during removal of nics : %s" % e)
super(TestAddNetworkToVirtualMachine, self).tearDown()
@classmethod
def tearDownClass(cls):
try:
# Disable Network Offerings
cls.isolated_network_offering.update(cls.api_client, state='Disabled')
cls.shared_network_offering.update(cls.api_client, state='Disabled')
except Exception as e:
cls.debug("Exception during disable of networks : %s" % e)
super(TestAddNetworkToVirtualMachine, cls).tearDownClass()
def addNetworkToVm(self, network, vm, ipaddress=None):
"""Add network to VM and check if new nic added in the VM"""
self.debug("Adding %s Network: %s to virtual machine %s" %
(network.type, network.id, vm.id))
vm.add_nic(self.apiclient, network.id, ipaddress=ipaddress)
vm_list = list_virtual_machines(self.apiclient, id=vm.id)
vm_list_validation_result = validateList(vm_list)
self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" %
vm_list_validation_result[2])
self.debug("virtual machine nics: %s" % vm_list[0].nic)
nics = [x for x in vm_list[0].nic if x.networkid == network.id]
self.debug("Filtered nics list: %s:" % nics)
# Only the nics added to self.virtual_machine should be added to this list
# Nics added to their list are removed before execution of next test case because we are using
# same virtual machine in all test cases, so it is important that the common
# virtual machine should contain only the default nic whenever new test case
# execution starts
if vm.id == self.virtual_machine.id:
self.addednics.append(nics[-1])
self.assertTrue(len(nics) == 1, "nics list should contain the nic of added isolated network,\
the number of nics for the network should be 1, instead they are %s" %
len(nics))
if ipaddress is not None:
self.assertEqual(nics[0].ipaddress, ipaddress, "The ip address of nic does not match with \
the ip address passed while adding network to vm. ip address of nic is %s \
while passed ip address is %s" % (nics[0].ipaddress, ipaddress))
return
@attr(tags=["advanced", "dvs"])
@data("isolated", "shared")
def test_01_add_nw_running_vm(self, value):
"""Add network to running VM"""
# 1. Deploy VM in an account
# 2. Add isolated/shared network to the VM which is in running state
# Validate the following:
# 1. New nic is generated for the added network
# 2. Event NIC.CREATE is generated
network = None # The network which we are adding to the vm
if value == "isolated":
network = self.isolated_network
elif value == "shared":
network = self.shared_network
if network is None:
self.skipTest("Network should not be none. Case not handled for Network of type %s" % value)
self.addNetworkToVm(network, self.virtual_machine)
self.debug("Retrieving the list of events matching 'NIC.CREATE' in account: %s" % self.account.name)
events = list_events(self.apiclient, account=self.account.name, domainid=self.account.domainid,
type='NIC.CREATE')
event_list_validation_result = validateList(events)
self.assertEqual(event_list_validation_result[0], PASS, "event list validation failed due to %s" %
event_list_validation_result[2])
self.debug("Events list contains event NIC.CREATE")
return
@attr(tags=["advanced", "dvs"])
@data("isolated", "shared")
def test_02_add_nw_stopped_vm(self, value):
"""Add network to stopped VM"""
# 1. Deploy VM in an account
# 2. Stop the VM
# 3. Add isolated/shared network to the stopped VM
# Validate the following:
# 1. New nic is generated for the added network
try:
self.virtual_machine.stop(self.apiclient)
except Exception as e:
self.fail("Failed to stop VM: %s" % e)
network = None # The network which we are adding to the vm
if value == "isolated":
network = self.isolated_network
elif value == "shared":
network = self.shared_network
if network is None:
self.skipTest("Network should not be none. Case not handled for Network of type %s" % value)
self.addNetworkToVm(network, self.virtual_machine)
self.debug("Starting Virtual Machine: %s" % self.virtual_machine.id)
self.virtual_machine.start(self.apiclient)
return
@attr(tags=["advanced", "dvs"])
@data("isolated", "shared")
def test_03_add_nw_multiple_times(self, value):
"""Add same network multiple times to running VM"""
# 1. Deploy VM in an account
# 2. Add isolated/shared network to the VM
# 3. Try Adding same network again to the VM
# Validate the following:
# 1. Adding same network to vm multiple times fails
network = None # The network which we are adding to the vm
if value == "isolated":
network = self.isolated_network
elif value == "shared":
network = self.shared_network
if network is None:
self.skipTest("Network should not be none. Case not handled for Network of type %s" % value)
try:
virtual_machine = VirtualMachine.create(
self.api_client, self.services["virtual_machine"],
accountid=self.account.name, domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
mode=self.zone.networktype,
networkids=[self.defaultNetworkId])
self.cleanup.append(virtual_machine)
except Exception as e:
self.fail("Failed to deply virtual machine: %s" % e)
# Adding network to vm for the first time
self.addNetworkToVm(network, virtual_machine)
# Trying to add same network to vm for the second time
with self.assertRaises(Exception) as e:
self.addNetworkToVm(network, virtual_machine)
self.debug("Adding same network again failed with exception: %s" % e.exception)
return
@attr(tags=["advanced", "dvs"])
@data("isolated")
def test_04_vpc_nw_running_vm(self, value):
"""Add VPC network to running VM belonging to isolated network"""
# 1. Deploy VM in an account
# 2. Add isolated network to the VM
# 3. Create VPC
# 4. Try adding VPC to the VM
# Validate the following:
# 1. Adding VPC to vm should fail
try:
virtual_machine = VirtualMachine.create(
self.api_client, self.services["virtual_machine"],
accountid=self.account.name, domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
mode=self.zone.networktype,
networkids=[self.defaultNetworkId])
self.cleanup.append(virtual_machine)
except Exception as e:
self.fail("Failed to deply virtual machine: %s" % e)
network = self.isolated_network
self.addNetworkToVm(network, virtual_machine)
self.debug("Creating VPC offering")
vpc_off = VpcOffering.create(self.api_client, self.services["vpc_offering"])
self.cleanup.append(vpc_off)
self.debug("Created VPC offering: %s" % vpc_off.id)
self.debug("Enabling the VPC offering")
vpc_off.update(self.apiclient, state='Enabled')
self.debug("Creating VPC")
vpc = VPC.create(self.apiclient, self.services["vpc"], vpcofferingid=vpc_off.id, zoneid=self.zone.id,
account=self.account.name, domainid=self.account.domainid)
self.cleanup.append(vpc)
self.debug("Trying to add VPC to vm belonging to isolated network, this should fail")
with self.assertRaises(Exception):
virtual_machine.add_nic(self.apiclient, vpc.id)
self.debug("Disabling vpc offering: %s" % vpc_off.id)
vpc_off.update(self.apiclient, state='Disabled')
return
@attr(tags=["advanced", "dvs"])
@data("isolated")
def test_05_add_vpc_nw_stopped_vm(self, value):
"""Add VPC network to stopped VM belonging to isolated network"""
# 1. Deploy VM in an account
# 2. Stop the VM
# 3. Add isolated network to the VM
# 4. Create VPC
# 5. Try adding VPC to the stopped VM
# Validate the following:
# 1. Adding VPC to vm should fail
try:
self.virtual_machine.stop(self.apiclient)
except Exception as e:
self.fail("Failed to stop virtual machine: %s" % e)
self.addNetworkToVm(self.isolated_network, self.virtual_machine)
self.debug("Creating VPC offering")
vpc_off = VpcOffering.create(self.api_client, self.services["vpc_offering"])
self.cleanup.append(vpc_off)
self.debug("Created VPC offering: %s" % vpc_off.id)
self.debug("Enabling the VPC offering")
vpc_off.update(self.apiclient, state='Enabled')
self.debug("Creating VPC")
vpc = VPC.create(self.apiclient, self.services["vpc"], vpcofferingid=vpc_off.id, zoneid=self.zone.id,
account=self.account.name, domainid=self.account.domainid)
self.cleanup.append(vpc)
self.debug("Trying to add VPC to vm belonging to isolated network, this should fail")
with self.assertRaises(Exception):
self.virtual_machine.add_nic(self.apiclient, vpc.id)
self.debug("Starting virtual machine")
self.virtual_machine.start(self.apiclient)
self.debug("Disabling vpc offering: %s" % vpc_off.id)
vpc_off.update(self.apiclient, state='Disabled')
return
@attr(tags=["advanced", "dvs"])
def test_06_add_nw_ipaddress_running_vm(self):
"""Add network and ip address to running VM"""
# 1. Deploy VM in an account
# 2. Add shared network and ip address to this VM
# Validate the following:
# 1. New nic gets added for the shared network
# 2. The newly added nic has the ip address same as
# that passed while adding the network
try:
virtual_machine = VirtualMachine.create(
self.api_client, self.services["virtual_machine"],
accountid=self.account.name, domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
mode=self.zone.networktype,
networkids=[self.defaultNetworkId])
self.cleanup.append(virtual_machine)
except Exception as e:
self.fail("Failed to deply virtual machine: %s" % e)
ipaddress = self.shared_nw_endip
self.debug("Adding network to vm with ip address %s: " % ipaddress)
self.addNetworkToVm(self.shared_network, virtual_machine, ipaddress=ipaddress)
return
@attr(tags=["advanced", "dvs"])
def test_10_add_nw_invalid_ipaddress_running_vm(self):
"""Add network with invalid ip address to running VM"""
# 1. Deploy VM in an account
# 2. Add shared network with invalid ip address to this VM
# Validate the following:
# 1. Adding network to VM should fail because of invalid ip address
ipaddress = "257.257.257.257" # Invalid ip address
self.debug("Adding network to vm with ip address %s: " % ipaddress)
with self.assertRaises(Exception) as e:
self.addNetworkToVm(self.shared_network, self.virtual_machine,
ipaddress=ipaddress)
self.debug("API failed with exception: %s" % e.exception)
return
# was tags=["advanced", "dvs"],
# the apiclient that is being used to test this has to much rights?
@attr(tags=["TODO"])
@data("isolated", "shared")
def test_14_add_nw_different_account(self, value):
"""Add network to running VM"""
# 1. Deploy VM in an account
# 2. Create new account under same domain and create network in that account
# 3. Add isolated/shared network belonging to other account to the VM in first account
# Validate the following:
# 1. Adding network should fail
network = None # The network which we are adding to the vm
account = Account.create(self.apiclient, self.services["account"], domainid=self.domain.id)
self.cleanup.append(account)
if value == "isolated":
network = Network.create(self.api_client, self.services["isolated_network"], account.name,
account.domainid, networkofferingid=self.isolated_network_offering.id)
self.cleanup.append(network)
elif value == "shared":
self.services["shared_network_2"]["zoneid"] = self.zone.id
self.services["shared_network_2"]["vlan"] = get_free_vlan(self.apiclient, self.zone.id)[1]
network = Network.create(self.api_client, self.services["shared_network_2"], account.name,
account.domainid, networkofferingid=self.shared_network_offering.id)
self.cleanup.append(network)
if network is None:
self.skipTest("Network should not be none. Case not handled for Network of type %s" % value)
self.debug("Trying to %s network in account %s to a vm in account %s, This should fail" %
(network.type, account.name, self.account.name))
try:
vm_with_nic = self.virtual_machine.add_nic(self.apiclient, network.id)
nics = [x for x in vm_with_nic.nic if x.networkid == network.id]
self.addednics.append(nics[-1])
except Exception:
pass
else:
self.fail("User was able to add NIC, test failed! This issue has been hit: CLOUDSTACK-10071")
return
@attr(tags=["advanced", "dvs"])
def test_24_add_nw_different_domain(self):
"""Add network to running VM"""
# 1. Create two domains
# 2. Create network in one domain and create virtual machine in other domain
# 3. Ad isolated/shared network belonging to one domain to the vm belonging to other domain
# Validate the following:
# 1. Adding network should fail
network = None # The network which we are adding to the vm
try:
self.child_domain_1 = Domain.create(self.apiclient,
services=self.services["domain"],
parentdomainid=self.domain.id)
self.cleanup.append(self.child_domain_1)
self.child_do_admin_1 = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.child_domain_1.id
)
self.cleanup.append(self.child_do_admin_1)
self.child_domain_2 = Domain.create(self.apiclient,
services=self.services["domain"],
parentdomainid=self.domain.id)
self.cleanup.append(self.child_domain_2)
self.child_do_admin_2 = Account.create(
self.apiclient,
self.services["account"],
admin=True,
domainid=self.child_domain_2.id)
self.cleanup.append(self.child_do_admin_2)
except Exception as e:
self.fail(e)
network = Network.create(self.api_client, self.services["isolated_network"], self.child_do_admin_1.name,
self.child_do_admin_1.domainid, networkofferingid=self.isolated_network_offering.id)
self.cleanup.append(network)
virtual_machine = VirtualMachine.create(self.apiclient, self.services["virtual_machine"], accountid=self.child_do_admin_2.name,
domainid=self.child_do_admin_2.domainid, serviceofferingid=self.service_offering.id,
mode=self.zone.networktype)
self.cleanup.append(virtual_machine)
time.sleep(self.services["sleep"])
self.debug("Trying to %s network in domain %s to a vm in domain %s, This should fail" %
(network.type, self.child_domain_1.name, self.child_domain_2.name))
with self.assertRaises(Exception) as e:
virtual_machine.add_nic(self.apiclient, network.id)
self.debug("Operation failed with exception %s" % e.exception)
return
@attr(tags=["advanced", "dvs"])
def test_25_add_nw_above_account_limit(self):
"""Add network to VM with maximum network limit reached"""
# 1. Create an account and create maximum allowed networks in the account
# 2. Deploy VM in this account
# 3. Create a network in other account and add to this VM
# Validate the following:
# 1. Adding network should fail
self.debug("Creating account 1")
account_1 = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup.append(account_1)
self.debug("setting network limit of account: %s as 1" % account_1.name)
update_resource_limit(
self.apiclient,
6, # Network
max=1,
account=account_1.name,
domainid=account_1.domainid
)
self.debug("Creating isolated network in account: %s" % account_1.name)
network_1 = Network.create(self.api_client, self.services["isolated_network"], account_1.name,
account_1.domainid, networkofferingid=self.isolated_network_offering.id)
self.cleanup.append(network_1)
self.debug("created network %s" % network_1.name)
self.debug("Deploying virtual machine in account: %s" % account_1.name)
virtual_machine = VirtualMachine.create(self.apiclient, self.services["virtual_machine"], accountid=account_1.name,
domainid=account_1.domainid, serviceofferingid=self.service_offering.id,
mode=self.zone.networktype)
self.cleanup.append(virtual_machine)
self.debug("Deployed virtual machine : %s" % virtual_machine.id)
self.debug("Creating another account")
account_2 = Account.create(
self.apiclient,
self.services["account"],
domainid=self.domain.id
)
self.cleanup.append(account_2)
self.debug("Created account %s" % account_2.name)
self.debug("Creating network in account %s" % account_2.name)
network_2 = Network.create(self.api_client, self.services["isolated_network"], account_2.name,
account_2.domainid, networkofferingid=self.isolated_network_offering.id)
self.cleanup.append(network_2)
self.debug("Created network %s" % network_2.name)
self.debug("Trying to add netwrok %s to VM %s, this should fail" %
(network_2.name, virtual_machine.id))
with self.assertRaises(Exception) as e:
virtual_machine.add_nic(self.apiclient, network_2.id)
self.debug("Operation failed with exception %s" % e.exception)
return
class TestRemoveNetworkFromVirtualMachine(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestRemoveNetworkFromVirtualMachine, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
hypervisor = get_hypervisor_type(cls.api_client)
if hypervisor.lower() not in ["xenserver", "kvm"]:
raise unittest.SkipTest("This feature is supported only on XenServer and KVM")
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"])
# Set Zones and disk offerings
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = template.id
# Create Accounts & networks
cls.services["isolated_network"]["zoneid"] = cls.zone.id
cls.services["shared_network"]["zoneid"] = cls.zone.id
cls._cleanup = []
cls.account = Account.create(cls.api_client, cls.services["account"], domainid=cls.domain.id)
cls._cleanup.append(cls.account)
cls.service_offering = ServiceOffering.create(cls.api_client, cls.services["service_offering"])
cls._cleanup.append(cls.service_offering)
cls.virtual_machine = VirtualMachine.create(cls.api_client, cls.services["virtual_machine"], accountid=cls.account.name,
domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id,
mode=cls.zone.networktype)
cls._cleanup.append(cls.virtual_machine)
# Create Shared Network Offering
cls.isolated_network_offering = NetworkOffering.create(cls.api_client, cls.services["isolated_network_offering"])
cls._cleanup.append(cls.isolated_network_offering)
# Enable Isolated Network offering
cls.isolated_network_offering.update(cls.api_client, state='Enabled')
cls.isolated_network = Network.create(cls.api_client, cls.services["isolated_network"], cls.account.name,
cls.account.domainid, networkofferingid=cls.isolated_network_offering.id)
cls._cleanup.append(cls.isolated_network)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
self.addednics = []
def tearDown(self):
try:
for nic in self.addednics:
self.virtual_machine.remove_nic(self.apiclient, nic.id)
except Exception as e:
self.debug("Exception during removal of nics : %s" % e)
super(TestRemoveNetworkFromVirtualMachine, self).tearDown()
@classmethod
def tearDownClass(cls):
try:
cls.isolated_network_offering.update(cls.api_client, state='Disabled')
except Exception as e:
cls.debug("Exception during disabling network offering : %s" % e)
super(TestRemoveNetworkFromVirtualMachine, cls).tearDownClass()
def addNetworkToVm(self, network, vm):
"""Add network to VM and check if new nic added in the VM"""
self.debug("Adding %s Network: %s to virtual machine %s" %
(network.type, network.id, vm.id))
vm.add_nic(self.apiclient, network.id)
vm_list = list_virtual_machines(self.apiclient, id=vm.id)
vm_list_validation_result = validateList(vm_list)
self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" %
vm_list_validation_result[2])
self.debug("virtual machine nics: %s" % vm_list[0].nic)
# Add nic of network to list so that it can be deleted later accessing its id from this list
self.nics = [x for x in vm_list[0].nic if x.networkid == network.id]
self.debug("Filtered nics list: %s:" % self.nics)
self.assertTrue(len(self.nics) == 1, "nics list should contain the nic of added isolated network,\
the number of nics for the network should be 1, instead they are %s" %
len(self.nics))
return self.nics
@attr(tags=["advanced", "dvs"])
def test_07_remove_nic_running_vm(self):
"""Remove nic from running VM"""
# 1. Deploy Vm in account
# 2. Add network to VM
# 3. Remove the nic added by the newly added network
# Validate the following:
# 1. Newly added nic is removed
# 2. Event NIC.DELETE is generated
self.addNetworkToVm(self.isolated_network, self.virtual_machine)
# Access the nic of the added network from self.nics object which is fillled
# in addNetworkToVm function
self.debug("Removing added nic %s from vm %s" %
(self.nics[0].id, self.virtual_machine.id))
self.virtual_machine.remove_nic(self.apiclient, self.nics[0].id)
vm_list = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
vm_list_validation_result = validateList(vm_list)
self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" %
vm_list_validation_result[2])
self.debug("virtual machine nics: %s" % vm_list[0].nic)
# Verify the nic is removed from the virtual machine
self.debug("Verifying the nic is removed from the virtual machine")
self.assertFalse(any(x.networkid == self.isolated_network.id for x in vm_list[0].nic),
"nic still present in the virtual machine nic list")
self.debug("nic removed successfully")
self.debug("Retrieving events list matching events 'NIC.DELETE'")
events = list_events(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
type='NIC.DELETE'
)
event_list_validation_result = validateList(events)
self.assertEqual(event_list_validation_result[0], PASS, "vm list validation failed due to %s" %
event_list_validation_result[2])
self.debug("Events list contains event NIC.DELETE")
self.debug("events: %s" % events)
return
@attr(tags=["advanced", "dvs"])
def test_08_remove_default_nic(self):
"""Test Remove default nic of running VM"""
# 1. Deploy Vm in account
# 2. Try to remove the default nic of the VM
# Validate the following:
# 1. Default nic of vm is not removed
vm_list = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
vm_list_validation_result = validateList(vm_list)
self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" %
vm_list_validation_result[2])
self.debug("virtual machine nics: %s" % vm_list[0].nic)
self.assertEqual(len(vm_list[0].nic), 1, "There should only be default nic present in the vm")
self.debug("Trying to remove the default nic of vm : %s, this should fail" %
self.virtual_machine.id)
with self.assertRaises(Exception):
self.virtual_machine.remove_nic(self.apiclient, vm_list[0].nic[0].id)
self.debug("Removing default nic of vm failed")
return
@attr(tags=["advanced", "dvs"])
def test_09_remove_foreign_nic(self):
"""Remove nic which does not belong to VM"""
# 1. Add VM in an account
# 1. Add new account and deploy vm in it
# 2. Try to remove nic of the new vm from first vm
# Validate the following:
# 1. Nic remove operation should fail
self.debug("Creating new account")
account = Account.create(
self.api_client,
self.services["account"],
domainid=self.domain.id
)
self.cleanup.append(account)
self.debug("created new account : %s" % account.name)
self.debug("Deploying virtual machine in this account")
virtual_machine = VirtualMachine.create(self.apiclient, self.services["virtual_machine"], accountid=account.name,
domainid=account.domainid, serviceofferingid=self.service_offering.id,
mode=self.zone.networktype)
self.debug("Deployed virtual machine: %s" % virtual_machine.id)
self.debug("Trying to remove nic of new virtual machine from existing virtual machine, This \
operation should fail")
with self.assertRaises(Exception) as e:
self.virtual_machine.remove_nic(self.apiclient, virtual_machine.nic[0].id)
self.debug("Operation failed with exception: %s" % e.exception)
return
@attr(tags=["advanced"], required_hardware="true")
def test_29_remove_nic_CS22503(self):
"""Test to verify remove nic from vm if the nic ip is same as another vm ip in another network"""
# 1. Deploy vm v1 with networks n1 and n2
# 2. Check the ip address of nic in n2 say ip1
# 3. Deployed vm v2 in another network say n3 with same IP address as ip1 using
# 'deployVirtualMachine' api with 'ipaddress' as one of the parameters.
# 4. Acquire public IP in n3 network.
# 5. Configure PF on the acquired IP and assign it to vm v2
# 6. Try to remove nic n2 from v1. Should be successfull
# There was a bug due to both vms has same ip address, so not allowing to remove nic
vm1 = self.virtual_machine
nic2 = self.addNetworkToVm(self.isolated_network, vm1)
self.addednics.append(nic2)
# get the ip address of the nic added in 2nd network
vm1_ip = nic2[0].ipaddress
self.assertIsNotNone(vm1_ip, "New nic did not get the ip address")
# Create network n3
self.network3 = Network.create(
self.api_client,
self.services["isolated_network"],
self.account.name,
self.account.domainid,
networkofferingid=self.isolated_network_offering.id
)
self.cleanup.append(self.network3)
self.vm2 = VirtualMachine.create(
self.api_client,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[self.network3.id],
ipaddress=vm1_ip,
mode=self.zone.networktype
)
self.cleanup.append(self.vm2)
vm2 = VirtualMachine.list(
self.api_client,
id=self.vm2.id
)
self.assertEqual(validateList(vm2)[0], PASS, "list vms returned invalid response")
self.assertIsNotNone(vm2[0].nic[0].ipaddress, "vm2 didn't get the ip address")
self.assertEqual(
vm1_ip,
vm2[0].nic[0].ipaddress,
"vm2 did not get the ip address passed while deploying vm"
)
ip_address = PublicIPAddress.create(
self.apiclient,
self.account.name,
self.zone.id,
self.account.domainid,
self.services["virtual_machine"],
self.network3.id
)
self.cleanup.append(ip_address)
# Open up firewall port for SSH
FireWallRule.create(
self.apiclient,
ipaddressid=ip_address.ipaddress.id,
protocol=self.services["natrule"]["protocol"],
cidrlist=['0.0.0.0/0'],
startport=self.services["natrule"]["publicport"],
endport=self.services["natrule"]["publicport"]
)
# Create NAT rule
nat_rule = NATRule.create(
self.apiclient,
self.vm2,
self.services["natrule"],
ip_address.ipaddress.id
)
list_nat_rule_response = list_nat_rules(
self.apiclient,
id=nat_rule.id
)
self.assertEqual(
validateList(list_nat_rule_response)[0],
PASS,
"Check list response returns a valid list"
)
self.assertEqual(
list_nat_rule_response[0].id,
nat_rule.id,
"Check Correct Port forwarding Rule is returned"
)
# Try to remove nic 2 from vm1
try:
vm1.remove_nic(self.apiclient, self.nics[0].id)
vm1_res = VirtualMachine.list(self.apiclient, id=vm1.id)
self.assertEqual(validateList(vm1_res)[0], PASS, "invalid listvm response")
self.assertEqual(
len(vm1_res[0].nic),
1,
"VM has more than one nic even after removing the 2nd nic"
)
except Exception as e:
self.fail("Failed to delete the nic from vm")
return
@attr(tags=["advanced"], required_hardware="true")
def test_30_remove_nic_reattach(self):
"""
Test to verify vm start after NIC removal and reattach
# 1.Create vm which has 3 nics(e.g. #0,#1,#2)
# 2.Stop the vm
# 3.Remove second nic(#1)
# 4.Add/Reattach same network(#1)
# 5.Start the instance
"""
self.ntwk2 = Network.create(
self.apiclient,
self.services["isolated_network"],
self.account.name,
self.account.domainid,
networkofferingid=self.isolated_network_offering.id
)
self.cleanup.append(self.ntwk2)
self.ntwk3 = Network.create(
self.apiclient,
self.services["isolated_network"],
self.account.name,
self.account.domainid,
networkofferingid=self.isolated_network_offering.id
)
self.cleanup.append(self.ntwk3)
self.test_vm = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
mode=self.zone.networktype,
networkids=[self.isolated_network.id, self.ntwk2.id, self.ntwk3.id]
)
self.cleanup.append(self.test_vm)
self.assertIsNotNone(self.test_vm, "Failed to create vm with 3 nics")
vm_res = VirtualMachine.list(
self.apiclient,
id=self.test_vm.id
)
self.assertEqual(validateList(vm_res)[0], PASS, "Invalid list vm response")
self.nics = vm_res[0].nic
self.assertEqual(
validateList(self.nics)[0],
PASS,
"vm response does not contain nics info"
)
self.assertEqual(len(self.nics), 3, "Not all nics found in vm response")
self.test_vm.stop(self.apiclient)
vm_res2 = VirtualMachine.list(
self.apiclient,
id=self.test_vm.id
)
self.assertEqual(validateList(vm_res2)[0], PASS, "Invalid response")
self.assertEqual(
vm_res2[0].state,
"Stopped",
"VM did not stop properly"
)
"""
get the network id of the nic which we are remove from the nic, so that we can
use that network id for reattach
"""
nic_to_attach = [x for x in [self.isolated_network, self.ntwk2, self.ntwk3] \
if x.id == self.nics[1].networkid]
self.assertEqual(validateList(nic_to_attach)[0], PASS, "No matching nics")
self.assertEqual(len(nic_to_attach), 1, "More than one nic in same network")
try:
self.test_vm.remove_nic(self.apiclient, nicId=self.nics[1].id)
self.test_vm.add_nic(
self.apiclient,
nic_to_attach[0].id
)
self.test_vm.start(self.apiclient)
except Exception as e:
self.fail("Failed to start vm after nic removal and attachment")
vm_res3 = VirtualMachine.list(self.apiclient, id=self.test_vm.id)
self.assertEqual(
validateList(vm_res3)[0],
PASS,
"Invalid listvm response after nic detach and attach"
)
self.assertEqual(
vm_res3[0].state,
"Running",
"VM didn't come to running state after nic detach and attach"
)
vm_nics = vm_res3[0].nic
self.assertEqual(validateList(vm_nics)[0], PASS, "Invalid nics after vm stop/start")
self.assertEqual(
len(vm_nics),
3,
"Nic is not attached/detected"
)
self.addednics.extend(vm_nics)
return
class TestUpdateVirtualMachineNIC(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestUpdateVirtualMachineNIC, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
hypervisor = get_hypervisor_type(cls.api_client)
if hypervisor.lower() not in ["xenserver", "kvm"]:
raise unittest.SkipTest("This feature is supported only on XenServer and KVM")
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"])
# Set Zones and disk offerings
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = template.id
# Create Accounts & networks
cls.services["isolated_network"]["zoneid"] = cls.zone.id
cls.services["shared_network"]["zoneid"] = cls.zone.id
cls._cleanup = []
cls.account = Account.create(cls.api_client, cls.services["account"], domainid=cls.domain.id)
cls._cleanup.append(cls.account)
cls.service_offering = ServiceOffering.create(cls.api_client, cls.services["service_offering"])
cls._cleanup.append(cls.service_offering)
cls.virtual_machine = VirtualMachine.create(cls.api_client, cls.services["virtual_machine"],
accountid=cls.account.name, domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id,
mode=cls.zone.networktype)
cls._cleanup.append(cls.virtual_machine)
cls.isolated_network_offering = NetworkOffering.create(cls.api_client, cls.services["isolated_network_offering"])
cls._cleanup.append(cls.isolated_network_offering)
cls.isolated_network_offering.update(cls.api_client, state='Enabled')
cls.isolated_network = Network.create(cls.api_client, cls.services["isolated_network"], cls.account.name,
cls.account.domainid, networkofferingid=cls.isolated_network_offering.id)
cls._cleanup.append(cls.isolated_network)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
self.addednics = []
def tearDown(self):
try:
for nic in self.addednics:
self.virtual_machine.remove_nic(self.apiclient, nic.id)
except Exception as e:
self.debug("Exception during removal of nics : %s" % e)
super(TestUpdateVirtualMachineNIC, self).tearDown()
@classmethod
def tearDownClass(cls):
try:
cls.isolated_network_offering.update(cls.api_client, state='Disabled')
except Exception as e:
cls.debug("Exception during disable of network offering : %s" % e)
super(TestUpdateVirtualMachineNIC, cls).tearDownClass()
def addNetworkToVm(self, network, vm):
"""Add network to VM and check if new nic added in the VM"""
self.debug("Adding %s Network: %s to virtual machine %s" %
(network.type, network.id, vm.id))
vm.add_nic(self.apiclient, network.id)
vm_list = list_virtual_machines(self.apiclient, id=vm.id)
vm_list_validation_result = validateList(vm_list)
self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" %
vm_list_validation_result[2])
self.debug("virtual machine nics: %s" % vm_list[0].nic)
# Add nic of network to list so that it can be deleted later accessing its id from this list
self.nics = [x for x in vm_list[0].nic if x.networkid == network.id]
self.debug("Filtered nics list: %s:" % self.nics)
self.assertTrue(len(self.nics) == 1, "nics list should contain the nic of added isolated network,\
the number of nics for the network should be 1, instead they are %s" %
len(self.nics))
self.addednics.append(self.nics[0])
return
@attr(tags=["advanced", "dvs"])
def test_11_update_nic_running_vm(self):
"""update default nic of running VM"""
# 1. Deploy Vm in account
# 2. Add network to VM
# 3. Update default nic of VM (Make the newly added NIC as default)
# Validate the following:
# 1. Default nic is updated
# 2. Previous default nic is now non-default
# 3. Event NIC.UPDATE is generated
self.addNetworkToVm(self.isolated_network, self.virtual_machine)
self.debug("Listing virtual machine so that to retrive the list of non-default and default nic")
vm_list = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
vm_list_validation_result = validateList(vm_list)
self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" %
vm_list_validation_result[2])
if len(vm_list[0].nic) != 2:
self.fail("VM should have exactly two NICs")
defaultNicIdBeforeUpdate = None
nonDefaultNicIdBeforeUpdate = None
for nic in vm_list[0].nic:
if nic.isdefault:
defaultNicIdBeforeUpdate = nic.id
else:
nonDefaultNicIdBeforeUpdate = nic.id
self.debug("Default nic of VM is %s and non default nic of VM is %s"
% (defaultNicIdBeforeUpdate, nonDefaultNicIdBeforeUpdate))
self.debug("Making non default nic as default nic")
self.virtual_machine.update_default_nic(self.apiclient, nicId=nonDefaultNicIdBeforeUpdate)
self.debug("Again listing the NIC list of VM to verify the update operation was successful")
vm_list = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
vm_list_validation_result = validateList(vm_list)
self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" %
vm_list_validation_result[2])
if len(vm_list[0].nic) != 2:
self.fail("VM should have exactly two NICs")
for nic in vm_list[0].nic:
if nic.isdefault:
defaultNicIdAfterUpdate = nic.id
self.assertEqual(nonDefaultNicIdBeforeUpdate, defaultNicIdAfterUpdate, "old non default NIC not made\
default one, update_default_nic API failed")
self.debug("Retrieving events list matching events 'NIC.UPDATE'")
events = list_events(
self.apiclient,
account=self.account.name,
domainid=self.account.domainid,
type='NIC.UPDATE'
)
event_list_validation_result = validateList(events)
self.assertEqual(event_list_validation_result[0], PASS, "event list validation failed due to %s" %
event_list_validation_result[2])
self.debug("Events list contains event NIC.UPDATE")
self.debug("events: %s" % events)
return
@attr(tags=["advanced", "dvs"])
def test_12_make_default_nic_as_default(self):
"""Try to set default nic of vm again as default"""
# 1. Deploy Vm in account
# 2. Set default nic of vm again as default
# Validate the following:
# 1. updateDefaultNic API fails
self.debug("Listing virtual machine to get default nic")
vm_list = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
vm_list_validation_result = validateList(vm_list)
self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" %
vm_list_validation_result[2])
defaultNicId = None
for nic in vm_list[0].nic:
if nic.isdefault:
defaultNicId = nic.id
self.debug("Trying to set default nic again as default nic, This should fail")
with self.assertRaises(Exception) as e:
self.virtual_machine.update_default_nic(self.apiclient, nicId=defaultNicId)
self.debug("updateDefaultNic operation failed as expected with exception: %s" %
e.exception)
return
@attr(tags=["advanced", "dvs"])
def test_13_set_foreign_nic_as_default(self):
"""set nic which does not belong to VM as its default one"""
# 1. Add VM in an account
# 1. Add new account and deploy vm in it
# 2. Try to set nic of the new vm as default nic of first vm
# Validate the following:
# 1. updateDefaultNic operation should fail
self.debug("Creating new account")
account = Account.create(self.api_client, self.services["account"], domainid=self.domain.id)
self.cleanup.append(account)
self.debug("created new account : %s" % account.name)
self.debug("Deploying virtual machine in this account")
virtual_machine = VirtualMachine.create(self.apiclient, self.services["virtual_machine"],
accountid=account.name, domainid=account.domainid,
serviceofferingid=self.service_offering.id, mode=self.zone.networktype)
self.cleanup.append(virtual_machine)
time.sleep(self.services["sleep"])
self.debug("Deployed virtual machine: %s" % virtual_machine.id)
foreignNicId = virtual_machine.nic[0].id
self.debug("Trying to set nic of new virtual machine as default nic of existing virtual machine, This \
operation should fail")
with self.assertRaises(Exception) as e:
self.virtual_machine.update_default_nic(self.apiclient, nicId=foreignNicId)
self.debug("updateDefaultNic operation failed as expected with exception: %s" %
e.exception)
return
class TestFailureScenariosAddNetworkToVM(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestFailureScenariosAddNetworkToVM, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
hypervisor = get_hypervisor_type(cls.api_client)
if hypervisor.lower() not in ["xenserver", "kvm"]:
raise unittest.SkipTest("This feature is supported only on XenServer and KVM")
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"])
# Set Zones and disk offerings
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = template.id
# Create Accounts & networks
cls.services["isolated_network"]["zoneid"] = cls.zone.id
cls._cleanup = []
cls.account = Account.create(cls.api_client, cls.services["account"], domainid=cls.domain.id)
cls._cleanup.append(cls.account)
cls.service_offering = ServiceOffering.create(cls.api_client, cls.services["service_offering"])
cls._cleanup.append(cls.service_offering)
cls.virtual_machine = VirtualMachine.create(cls.api_client, cls.services["virtual_machine"],
accountid=cls.account.name, domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id, mode=cls.zone.networktype)
cls._cleanup.append(cls.virtual_machine)
cls.isolated_network_offering = NetworkOffering.create(cls.api_client, cls.services["isolated_network_offering"], )
cls._cleanup.append(cls.isolated_network_offering)
cls.isolated_network_offering.update(cls.api_client, state='Enabled')
cls.isolated_network = Network.create(cls.api_client, cls.services["isolated_network"], cls.account.name,
cls.account.domainid, networkofferingid=cls.isolated_network_offering.id)
cls._cleanup.append(cls.isolated_network)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
super(TestFailureScenariosAddNetworkToVM, self).tearDown()
@classmethod
def tearDownClass(cls):
try:
cls.isolated_network_offering.update(cls.api_client, state='Disabled')
except Exception as e:
cls.debug("Exception during disabling network offering : %s" % e)
super(TestFailureScenariosAddNetworkToVM, cls).tearDownClass()
@attr(tags=["advanced", "dvs"])
def test_15_add_nic_wrong_vm_id(self):
"""Add network to vm with wrong vm id"""
# 1. Call add network to VM API with correct network id but wrong vm id
# Validate the following:
# 1. API should throw exception saying unable to find virtual machine
cmd = addNicToVirtualMachine.addNicToVirtualMachineCmd()
cmd.virtualmachineid = random_gen(id="virtual_machine", size=30)
cmd.networkid = self.isolated_network.id
with self.assertRaises(Exception) as e:
self.apiclient.addNicToVirtualMachine(cmd)
self.debug("addNicToVirtualMachine API failed with exception: %s" % e.exception)
return
@attr(tags=["advanced", "dvs"])
def test_16_add_nic_wrong_network_id(self):
"""Add network to vm with wrong network id"""
# 1. Call add network to VM API with correct network id but wrong network id
# Validate the following:
# 1. API should throw exception saying unable to find a network
cmd = addNicToVirtualMachine.addNicToVirtualMachineCmd()
cmd.virtualmachineid = self.virtual_machine.id
cmd.networkid = random_gen(id="network_id", size=30)
with self.assertRaises(Exception) as e:
self.apiclient.addNicToVirtualMachine(cmd)
self.debug("addNicToVirtualMachine API failed with exception: %s" % e.exception)
return
@attr(tags=["advanced", "dvs"])
def test_17_add_nic_different_zone(self):
"""Add network to vm where both belong to different zones"""
# 1. Deploy a VM in zone 1
# 2. Create a network in zone 2
# 3. Try to add this network to the VM (both belong to different zones)
# Validate the following:
# 1. API should throw exception vminstance is in zone<id>, but network is in zone <id>
foreignZoneId = None
zones = list_zones(self.apiclient, available=True)
list_zones_validation_result = validateList(zones)
self.assertEqual(list_zones_validation_result[0], PASS, "list zones validation failed due to: %s" %
list_zones_validation_result[2])
if len(zones) >= 2:
for zone in zones:
if zone.id != self.zone.id:
foreignZoneId = zone.id
break
else:
self.skipTest("This test requires at least two zones to be present in the setup")
self.services["isolated_network"]["zoneid"] = foreignZoneId
self.debug("Creating isolated network in zone %s which is foreign to VM" %
foreignZoneId)
isolated_network = Network.create(self.apiclient, self.services["isolated_network"],
self.account.name, self.account.domainid,
networkofferingid=self.isolated_network_offering.id)
self.cleanup.append(isolated_network)
self.debug("Created isolated network %s in zone %s" %
(isolated_network.id, foreignZoneId))
self.debug("Trying to add network to VM, both belonging to different zones")
cmd = addNicToVirtualMachine.addNicToVirtualMachineCmd()
cmd.virtualmachineid = self.virtual_machine.id
cmd.networkid = isolated_network.id
with self.assertRaises(Exception) as e:
time.sleep(5)
self.apiclient.addNicToVirtualMachine(cmd)
self.debug("addNicToVirtualMachine API failed with exception: %s" % e.exception)
return
@attr(tags=["invalid"])
def test_18_add_nic_basic_zone(self):
"""Add network to vm in basic zone"""
# 1. Deploy a vm and create network in basic zone
# 2. Try adding network to vm
# Validate following
# 1. API should throw exception saying Can't add a new nic to vm in basic network
basicZone = None
zones = list_zones(self.apiclient, available=True)
list_zones_validation_result = validateList(zones)
self.assertEqual(list_zones_validation_result[0], PASS, "list zones validation failed due to: %s" %
list_zones_validation_result[2])
for zone in zones:
if zone.networktype.lower() == 'BASIC':
basicZone = zone.id
break
if basicZone is None:
self.skipTest("This test requires at least one basic zone to be present in the setup")
self.services["isolated_network"]["zoneid"] = basicZone.id
self.debug("Creating isolated network in basic zone: %s" % basicZone.id)
isolated_network = Network.create(self.apiclient, self.services["isolated_network"],
networkofferingid=self.isolated_network_offering.id)
self.cleanup.append(isolated_network)
self.debug("Created isolated network %s:" % isolated_network.id)
self.services["virtual_machine"]["zoneid"] = basicZone.id
self.debug("Deploying virtual machine in basic zone: %s" % basicZone.id)
virtual_machine = VirtualMachine.create(self.apiclient, self.services["virtual_machine"],
serviceofferingid=self.service_offering.id,
mode=basicZone.networktype)
self.cleanup.append(virtual_machine)
time.sleep(self.services["sleep"])
self.debug("Deployed virtual machine %s: " % virtual_machine.id)
cmd = addNicToVirtualMachine.addNicToVirtualMachineCmd()
cmd.virtualmachineid = virtual_machine.id
cmd.networkid = isolated_network.id
self.dedbug("Trying to add isolated network to VM (both in basic zone,\
this operation should fail")
with self.assertRaises(Exception) as e:
time.sleep(5)
self.apiclient.addNicToVirtualMachine(cmd)
return
@attr(tags=["advanced", "dvs"])
def test_26_add_nic_insufficient_permission(self):
"""Try to add network to vm with insufficient permission"""
# 1. Call add network to VM API with api client of other account
# Validate the following:
# 1. API should throw exception saying insufficient permission
cmd = addNicToVirtualMachine.addNicToVirtualMachineCmd()
cmd.virtualmachineid = self.virtual_machine.id
cmd.networkid = self.isolated_network.id
self.debug("Creating new account")
account = Account.create(self.apiclient, self.services["account"], domainid=self.domain.id)
self.cleanup.append(account)
self.debug("Created account %s" % account.name)
self.debug("creating user api client for account: %s" % account.name)
api_client = self.testClient.getUserApiClient(UserName=account.name, DomainName=self.account.domain)
self.debug("Trying to add network to vm with this api client, this should fail due to \
insufficient permission")
with self.assertRaises(Exception) as e:
time.sleep(5)
api_client.addNicToVirtualMachine(cmd)
return
class TestFailureScenariosRemoveNicFromVM(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestFailureScenariosRemoveNicFromVM, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
hypervisor = get_hypervisor_type(cls.api_client)
if hypervisor.lower() not in ["xenserver", "kvm"]:
raise unittest.SkipTest("This feature is supported only on XenServer and KVM")
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"])
# Set Zones and disk offerings
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = template.id
# Create Accounts & networks
cls.services["isolated_network"]["zoneid"] = cls.zone.id
cls.services["shared_network"]["zoneid"] = cls.zone.id
cls._cleanup = []
cls.account = Account.create(cls.api_client, cls.services["account"], domainid=cls.domain.id)
cls._cleanup.append(cls.account)
cls.service_offering = ServiceOffering.create(cls.api_client, cls.services["service_offering"])
cls._cleanup.append(cls.service_offering)
cls.virtual_machine = VirtualMachine.create(cls.api_client, cls.services["virtual_machine"],
accountid=cls.account.name, domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id,
mode=cls.zone.networktype)
cls._cleanup.append(cls.virtual_machine)
cls.isolated_network_offering = NetworkOffering.create(cls.api_client, cls.services["isolated_network_offering"], )
cls._cleanup.append(cls.isolated_network_offering)
cls.isolated_network_offering.update(cls.api_client, state='Enabled')
cls.isolated_network = Network.create(cls.api_client, cls.services["isolated_network"], cls.account.name,
cls.account.domainid, networkofferingid=cls.isolated_network_offering.id)
cls._cleanup.append(cls.isolated_network)
cls.virtual_machine.add_nic(cls.api_client, cls.isolated_network.id)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
super(TestFailureScenariosRemoveNicFromVM, self).tearDown()
@classmethod
def tearDownClass(cls):
try:
cls.isolated_network_offering.update(cls.api_client, state='Disabled')
except Exception as e:
cls.debug("Exception during disabling of network offering : %s" % e)
super(TestFailureScenariosRemoveNicFromVM, cls).tearDownClass()
@attr(tags=["advanced", "dvs"])
def test_19_remove_nic_wrong_vm_id(self):
"""Try to remove nic from a vm providing wrong vm id to API"""
# (Frist two steps are perfromed in setupClass)
# 1. Deploy Vm in account
# 2. Add network to VM
# 3. Remove the nic added by the newly added network providing wrong vm id to the API
# Validate the following:
# 1. API throws exception unable to find a virtual machine with id
vm_list = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
vm_list_validation_result = validateList(vm_list)
self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" %
vm_list_validation_result[2])
vm = vm_list_validation_result[1]
nics = [x for x in vm.nic if x.networkid == self.isolated_network.id]
self.assertEqual(len(nics), 1, "There should be exactly one nic corresponding to the isolate\
network %s" % self.isolated_network.id)
cmd = removeNicFromVirtualMachine.removeNicFromVirtualMachineCmd()
cmd.virtualmachineid = self.virtual_machine.id + random_gen()
cmd.nicid = nics[0].id
with self.assertRaises(Exception) as e:
self.apiclient.removeNicFromVirtualMachine(cmd)
self.debug("removeNicFromVirtualMachine API failed with exception: %s" % e.exception)
return
@attr(tags=["advanced", "dvs"])
def test_20_remove_nic_wrong_nic_id(self):
"""Try to remove nic from a vm providing wrong nic id to API"""
# (Frist two steps are perfromed in setupClass)
# 1. Deploy Vm in account
# 2. Add network to VM
# 3. Remove the nic added by the newly added network providing wrong nic id to the API
# Validate the following:
# 1. API throws exception unable to find nic with id
vm_list = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
vm_list_validation_result = validateList(vm_list)
self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" %
vm_list_validation_result[2])
vm = vm_list_validation_result[1]
nics = [x for x in vm.nic if x.networkid == self.isolated_network.id]
self.assertEqual(len(nics), 1, "There should be exactly one nic corresponding to the isolate\
network %s" % self.isolated_network.id)
cmd = removeNicFromVirtualMachine.removeNicFromVirtualMachineCmd()
cmd.virtualmachineid = self.virtual_machine.id
cmd.nicid = nics[0].id + random_gen()
with self.assertRaises(Exception) as e:
self.apiclient.removeNicFromVirtualMachine(cmd)
self.debug("removeNicFromVirtualMachine API failed with exception: %s" % e.exception)
return
@attr(tags=["advanced", "dvs"])
def test_27_remove_nic_insufficient_permission(self):
"""Try to remove nic from vm with insufficient permission"""
# 1. Call remove network from VM API with api client of other account
# Validate the following:
# 1. API should throw exception saying insufficient permission
vm_list = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
vm_list_validation_result = validateList(vm_list)
self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" %
vm_list_validation_result[2])
vm = vm_list_validation_result[1]
nics = [x for x in vm.nic if x.networkid == self.isolated_network.id]
self.assertEqual(len(nics), 1, "There should be exactly one nic corresponding to the isolate\
network %s" % self.isolated_network.id)
cmd = removeNicFromVirtualMachine.removeNicFromVirtualMachineCmd()
cmd.virtualmachineid = self.virtual_machine.id
cmd.nicid = nics[0].id
self.debug("Creating new account")
account = Account.create(self.apiclient, self.services["account"], domainid=self.domain.id)
self.cleanup.append(account)
self.debug("Created account %s" % account.name)
self.debug("creating user api client for account: %s" % account.name)
api_client = self.testClient.getUserApiClient(UserName=account.name, DomainName=self.account.domain)
self.debug("Trying to add network to vm with this api client, this should fail due to \
insufficient permission")
with self.assertRaises(Exception) as e:
api_client.removeNicFromVirtualMachine(cmd)
self.debug("removeNicFromVirtualMachine API failed with exception: %s" % e.exception)
self.apiclient.removeNicFromVirtualMachine(cmd)
return
class TestFailureScenariosUpdateVirtualMachineNIC(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.testClient = super(TestFailureScenariosUpdateVirtualMachineNIC, cls).getClsTestClient()
cls.api_client = cls.testClient.getApiClient()
cls.services = Services().services
hypervisor = get_hypervisor_type(cls.api_client)
if hypervisor.lower() not in ["xenserver", "kvm"]:
raise unittest.SkipTest("This feature is supported only on XenServer and KVM")
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client)
cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests())
template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"])
# Set Zones and disk offerings
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = template.id
# Create Accounts & networks
cls.services["isolated_network"]["zoneid"] = cls.zone.id
cls.services["shared_network"]["zoneid"] = cls.zone.id
cls._cleanup = []
cls.addednics = []
cls.account = Account.create(cls.api_client, cls.services["account"], domainid=cls.domain.id)
cls._cleanup.append(cls.account)
cls.service_offering = ServiceOffering.create(cls.api_client, cls.services["service_offering"])
cls._cleanup.append(cls.service_offering)
cls.virtual_machine = VirtualMachine.create(cls.api_client, cls.services["virtual_machine"],
accountid=cls.account.name, domainid=cls.account.domainid,
serviceofferingid=cls.service_offering.id, mode=cls.zone.networktype)
cls._cleanup.append(cls.virtual_machine)
cls.defaultNetworkId = cls.virtual_machine.nic[0].networkid
# Create Shared Network Offering
cls.isolated_network_offering = NetworkOffering.create(cls.api_client, cls.services["isolated_network_offering"], )
cls._cleanup.append(cls.isolated_network_offering)
# Enable Isolated Network offering
cls.isolated_network_offering.update(cls.api_client, state='Enabled')
cls.isolated_network = Network.create(cls.api_client, cls.services["isolated_network"],
cls.account.name, cls.account.domainid,
networkofferingid=cls.isolated_network_offering.id)
cls._cleanup.append(cls.isolated_network)
vm_with_nic = cls.virtual_machine.add_nic(cls.api_client, cls.isolated_network.id)
nics = [x for x in vm_with_nic.nic if x.networkid == cls.isolated_network.id]
cls.addednics.append(nics[-1])
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
super(TestFailureScenariosUpdateVirtualMachineNIC, self).tearDown()
@classmethod
def tearDownClass(cls):
try:
for nic in cls.addednics:
cls.virtual_machine.remove_nic(cls.apiclient, nic.id)
except Exception as e:
cls.debug("Exception during removal of nics : %s" % e)
try:
cls.isolated_network_offering.update(cls.api_client, state='Disabled')
except Exception as e:
cls.debug("Exception during disabling of network offering : %s" % e)
super(TestFailureScenariosUpdateVirtualMachineNIC, cls).tearDownClass()
@attr(tags=["advanced", "dvs"])
def test_21_update_nic_wrong_vm_id(self):
"""update default nic of vm providing wrong vm id to the API"""
# (First two steps are performed in setupClass)
# 1. Deploy Vm in account
# 2. Add network to VM
# 3. Update default nic of VM (Make the newly added NIC as default) by providing wrong
# vm id to the API
# Validate the following:
# 1. API throws exception saying can't find the virtual machine
self.debug("Listing virtual machine so that to retrive the list of non-default and default nic")
vm_list = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
vm_list_validation_result = validateList(vm_list)
self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" %
vm_list_validation_result[2])
if len(vm_list[0].nic) != 2:
self.fail("VM should have exactly two NICs")
defaultNicIdBeforeUpdate = None
nonDefaultNicIdBeforeUpdate = None
for nic in vm_list[0].nic:
if nic.isdefault:
defaultNicIdBeforeUpdate = nic.id
else:
nonDefaultNicIdBeforeUpdate = nic.id
self.debug("Default nic of VM is %s and non default nic of VM is %s"
% (defaultNicIdBeforeUpdate, nonDefaultNicIdBeforeUpdate))
self.debug("Making non default nic as default nic")
cmd = updateDefaultNicForVirtualMachine.updateDefaultNicForVirtualMachineCmd()
cmd.virtualmachineid = self.virtual_machine.id + random_gen()
cmd.nicid = nonDefaultNicIdBeforeUpdate
with self.assertRaises(Exception) as e:
self.apiclient.updateDefaultNicForVirtualMachine(cmd)
self.debug("updateDefaultNicForVirtualMachine API failed with exception: %s" %
e.exception)
return
@attr(tags=["advanced", "dvs"])
def test_22_update_nic_wrong_nic_id(self):
"""update default nic of vm providing wrong nic id to the API"""
# (First two steps are performed in setupClass)
# 1. Deploy Vm in account
# 2. Add network to VM
# 3. Update default nic of VM (Make the newly added NIC as default) by providing wrong
# nic id to the API
# Validate the following:
# 1. API throws exception saying can't find the nic with id
self.debug("Listing virtual machine so that to retrive the list of non-default and default nic")
vm_list = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
vm_list_validation_result = validateList(vm_list)
self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" %
vm_list_validation_result[2])
if len(vm_list[0].nic) != 2:
self.fail("VM should have exactly two NICs")
defaultNicIdBeforeUpdate = None
nonDefaultNicIdBeforeUpdate = None
for nic in vm_list[0].nic:
if nic.isdefault:
defaultNicIdBeforeUpdate = nic.id
else:
nonDefaultNicIdBeforeUpdate = nic.id
self.debug("Default nic of VM is %s and non default nic of VM is %s"
% (defaultNicIdBeforeUpdate, nonDefaultNicIdBeforeUpdate))
self.debug("Making non default nic as default nic")
cmd = updateDefaultNicForVirtualMachine.updateDefaultNicForVirtualMachineCmd()
cmd.virtualmachineid = self.virtual_machine.id
cmd.nicid = nonDefaultNicIdBeforeUpdate + random_gen()
with self.assertRaises(Exception) as e:
self.apiclient.updateDefaultNicForVirtualMachine(cmd)
self.debug("updateDefaultNicForVirtualMachine API failed with exception: %s" %
e.exception)
return
@attr(tags=["advanced", "dvs"])
def test_23_update_nic_incorrect_vm_state(self):
"""update default nic of vm when vm is state is not Running or Stopped"""
# (First two steps are performed in setupClass)
# 1. Deploy Vm in account
# 2. Add network to VM
# 3. Destroy virtual machine so that the VM state becomes Destroyed or Expunging
# 4. Update default nic of VM (Make the newly added NIC as default)
# Validate the following:
# 1. API throws exception instance is not Running or Stopped
self.debug("Creating new account")
account = Account.create(self.apiclient, self.services["account"], domainid=self.domain.id)
self.cleanup.append(account)
self.debug("Creating virtual machine in the account %s" % account.name)
virtual_machine = VirtualMachine.create(self.api_client, self.services["virtual_machine"],
accountid=account.name, domainid=account.domainid,
serviceofferingid=self.service_offering.id,
mode=self.zone.networktype)
time.sleep(self.services["sleep"])
self.debug("Created virtual machine %s" % virtual_machine.id)
self.debug("Creating isolated network in account %s" % account.name)
isolated_network = Network.create(self.apiclient, self.services["isolated_network"], account.name,
account.domainid, networkofferingid=self.isolated_network_offering.id)
self.debug("Created isolated network %s" % isolated_network.id)
self.debug("Adding isolated network %s to vm %s" % (isolated_network.id, virtual_machine.id))
virtual_machine.add_nic(self.apiclient, isolated_network.id)
self.debug("Listing virtual machine so that to retrive the list of non-default and default nic")
vm_list = list_virtual_machines(self.apiclient, id=virtual_machine.id, listall=True)
vm_list_validation_result = validateList(vm_list)
self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" %
vm_list_validation_result[2])
if len(vm_list[0].nic) != 2:
self.fail("VM should have exactly two NICs")
defaultNicIdBeforeUpdate = None
nonDefaultNicIdBeforeUpdate = None
for nic in vm_list[0].nic:
if nic.isdefault:
defaultNicIdBeforeUpdate = nic.id
else:
nonDefaultNicIdBeforeUpdate = nic.id
self.debug("Default nic of VM is %s and non default nic of VM is %s"
% (defaultNicIdBeforeUpdate, nonDefaultNicIdBeforeUpdate))
self.debug("Destroying VM %s" % virtual_machine.id)
virtual_machine.delete(self.apiclient, expunge=False)
self.debug("Making non default nic as default nic")
cmd = updateDefaultNicForVirtualMachine.updateDefaultNicForVirtualMachineCmd()
cmd.virtualmachineid = virtual_machine.id
cmd.nicid = nonDefaultNicIdBeforeUpdate
with self.assertRaises(Exception) as e:
self.apiclient.updateDefaultNicForVirtualMachine(cmd)
self.debug("updateDefaultNicForVirtualMachine API failed with exception: %s" %
e.exception)
return
@attr(tags=["advanced", "dvs"])
def test_28_update_nic_insufficient_permission(self):
"""Try to update default nic of vm with insufficient permission"""
# 1. Call update nic of VM API with api client of other account
# Validate the following:
# 1. API should throw exception saying insufficient permission
account = Account.create(self.apiclient, self.services["account"], domainid=self.domain.id)
self.cleanup.append(account)
self.debug("Created account %s" % account.name)
self.debug("creating user api client for account: %s" % account.name)
api_client = self.testClient.getUserApiClient(UserName=account.name, DomainName=self.account.domain)
self.debug("Listing virtual machine so that to retrive the list of non-default and default nic")
vm_list = list_virtual_machines(self.apiclient, id=self.virtual_machine.id)
vm_list_validation_result = validateList(vm_list)
self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" %
vm_list_validation_result[2])
if len(vm_list[0].nic) != 2:
self.fail("VM should have exactly two NICs")
defaultNicIdBeforeUpdate = None
nonDefaultNicIdBeforeUpdate = None
for nic in vm_list[0].nic:
if nic.isdefault:
defaultNicIdBeforeUpdate = nic.id
else:
nonDefaultNicIdBeforeUpdate = nic.id
self.debug("Default nic of VM is %s and non default nic of VM is %s"
% (defaultNicIdBeforeUpdate, nonDefaultNicIdBeforeUpdate))
self.debug("Making non default nic as default nic")
cmd = updateDefaultNicForVirtualMachine.updateDefaultNicForVirtualMachineCmd()
cmd.virtualmachineid = self.virtual_machine.id
cmd.nicid = nonDefaultNicIdBeforeUpdate
with self.assertRaises(Exception) as e:
api_client.updateDefaultNicForVirtualMachine(cmd)
return
|
Solutions/Module11WriteFileGuestListChallenge.py | amihaita/GeekTraine | 957 | 12740908 |
#Declare variables to hold the file name and access mode
fileName = "GuestList.txt"
accessMode = "w"
#Open the file for writing
myFile = open(fileName, accessMode)
#Write the guest names and ages to the file
#I can write an entire record in one write statement
myFile.write("<NAME>,27\n")
myFile.write("<NAME>,25\n")
myFile.write("<NAME>,32\n")
#I could write the name and age in separate write statements
myFile.write("<NAME>")
myFile.write(",36\n")
myFile.write("<NAME>")
myFile.write(",26\n")
#Close the file
myFile.close()
|
seahub/organizations/api/admin/user_repos.py | samuelduann/seahub | 420 | 12740922 | <filename>seahub/organizations/api/admin/user_repos.py
# Copyright (c) 2012-2019 Seafile Ltd.
import logging
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.authentication import SessionAuthentication
from django.utils.translation import ugettext as _
from seaserv import ccnet_api, seafile_api
from seahub.api2.permissions import IsProVersion, IsOrgAdminUser
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.utils import api_error
from seahub.base.accounts import User
from seahub.base.templatetags.seahub_tags import email2nickname, email2contact_email
from seahub.utils.timeutils import timestamp_to_isoformat_timestr
from seahub.utils.repo import normalize_repo_status_code
from seahub.api2.endpoints.group_owned_libraries import get_group_id_by_repo_owner
from seahub.group.utils import group_id_to_name
logger = logging.getLogger(__name__)
class OrgAdminUserRepos(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
throttle_classes = (UserRateThrottle,)
permission_classes = (IsProVersion, IsOrgAdminUser)
def get(self, request, org_id, email):
"""Org admin list user owned repos
"""
# resource check
org_id = int(org_id)
if not ccnet_api.get_org_by_id(org_id):
error_msg = 'Organization %s not found.' % org_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
err_msg = 'User %s not found.' % email
return api_error(status.HTTP_404_NOT_FOUND, err_msg)
# permission check
if not ccnet_api.org_user_exists(org_id, email):
err_msg = _('User %s not found in organization.') % email
return api_error(status.HTTP_404_NOT_FOUND, err_msg)
# list repos
repo_info_list = list()
owned_repos = seafile_api.get_org_owned_repo_list(org_id, email)
for r in owned_repos:
# do not return virtual repos
if r.is_virtual:
continue
repo_info = {
"repo_id": r.id,
"repo_name": r.name,
"owner_email": email,
"owner_name": email2nickname(email),
"owner_contact_email": email2contact_email(email),
"last_modified": timestamp_to_isoformat_timestr(r.last_modify),
"modifier_email": r.last_modifier,
"size": r.size,
"encrypted": r.encrypted,
"permission": 'rw', # Always have read-write permission to owned repo
"status": normalize_repo_status_code(r.status),
}
repo_info_list.append(repo_info)
return Response({'repo_list': repo_info_list})
class OrgAdminUserBesharedRepos(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
throttle_classes = (UserRateThrottle,)
permission_classes = (IsProVersion, IsOrgAdminUser)
def get(self, request, org_id, email):
"""Org admin list repos by shared to user
"""
# resource check
org_id = int(org_id)
if not ccnet_api.get_org_by_id(org_id):
error_msg = 'Organization %s not found.' % org_id
return api_error(status.HTTP_404_NOT_FOUND, error_msg)
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
err_msg = 'User %s not found.' % email
return api_error(status.HTTP_404_NOT_FOUND, err_msg)
# permission check
if not ccnet_api.org_user_exists(org_id, email):
err_msg = _('User %s not found in organization.') % email
return api_error(status.HTTP_404_NOT_FOUND, err_msg)
# list beshared repos
repo_info_list = list()
beshared_repos = seafile_api.get_org_share_in_repo_list(org_id, email, -1, -1)
for r in beshared_repos:
owner_email = r.user
group_name = ''
is_group_owned_repo = False
if '@seafile_group' in owner_email:
is_group_owned_repo = True
group_id = get_group_id_by_repo_owner(owner_email)
group_name = group_id_to_name(group_id)
owner_name = group_name if is_group_owned_repo else \
email2nickname(owner_email)
owner_contact_email = '' if is_group_owned_repo else \
email2contact_email(owner_email)
repo_info = {
"repo_id": r.repo_id,
"repo_name": r.repo_name,
"last_modified": timestamp_to_isoformat_timestr(r.last_modify),
"modifier_email": r.last_modifier,
"owner_email": owner_email,
"owner_name": owner_name,
"owner_contact_email": owner_contact_email,
"size": r.size,
"encrypted": r.encrypted,
"permission": r.permission,
"status": normalize_repo_status_code(r.status),
}
repo_info_list.append(repo_info)
return Response({'repo_list': repo_info_list})
|
pecan/log.py | antlarr/pecan | 114 | 12740928 | <filename>pecan/log.py
import logging
from logutils.colorize import ColorizingStreamHandler
class DefaultColorizer(ColorizingStreamHandler):
level_map = {
logging.DEBUG: (None, 'blue', True),
logging.INFO: (None, None, True),
logging.WARNING: (None, 'yellow', True),
logging.ERROR: (None, 'red', True),
logging.CRITICAL: (None, 'red', True),
}
class ColorFormatter(logging.Formatter):
"""
A very basic logging formatter that not only applies color to the
levels of the ouput but can also add padding to the the level names so that
they do not alter the visuals of logging when presented on the terminal.
The padding is provided by a convenient keyword that adds padding to the
``levelname`` so that log output is easier to follow::
%(padded_color_levelname)s
Which would result in log level output that looks like::
[INFO ]
[WARNING ]
[ERROR ]
[DEBUG ]
[CRITICAL]
If colored output is not supported, it falls back to non-colored output
without any extra settings.
"""
def __init__(self, _logging=None, colorizer=None, *a, **kw):
self.logging = _logging or logging
self.color = colorizer or DefaultColorizer()
logging.Formatter.__init__(self, *a, **kw)
def format(self, record):
levelname = record.levelname
padded_level = '%-8s' % levelname
record.color_levelname = self.color.colorize(levelname, record)
record.padded_color_levelname = self.color.colorize(
padded_level,
record
)
return self.logging.Formatter.format(self, record)
|
vendor/python/asttokens/mark_tokens.py | hixio-mh/plugin-python | 362 | 12740963 | # Copyright 2016 <NAME>, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import numbers
import token
from . import util
# Mapping of matching braces. To find a token here, look up token[:2].
_matching_pairs_left = {
(token.OP, '('): (token.OP, ')'),
(token.OP, '['): (token.OP, ']'),
(token.OP, '{'): (token.OP, '}'),
}
_matching_pairs_right = {
(token.OP, ')'): (token.OP, '('),
(token.OP, ']'): (token.OP, '['),
(token.OP, '}'): (token.OP, '{'),
}
class MarkTokens(object):
"""
Helper that visits all nodes in the AST tree and assigns .first_token and .last_token attributes
to each of them. This is the heart of the token-marking logic.
"""
def __init__(self, code):
self._code = code
self._methods = util.NodeMethods()
self._iter_children = None
def visit_tree(self, node):
self._iter_children = util.iter_children_func(node)
util.visit_tree(node, self._visit_before_children, self._visit_after_children)
def _visit_before_children(self, node, parent_token):
col = getattr(node, 'col_offset', None)
token = self._code.get_token_from_utf8(node.lineno, col) if col is not None else None
if not token and util.is_module(node):
# We'll assume that a Module node starts at the start of the source code.
token = self._code.get_token(1, 0)
# Use our own token, or our parent's if we don't have one, to pass to child calls as
# parent_token argument. The second value becomes the token argument of _visit_after_children.
return (token or parent_token, token)
def _visit_after_children(self, node, parent_token, token):
# This processes the node generically first, after all children have been processed.
# Get the first and last tokens that belong to children. Note how this doesn't assume that we
# iterate through children in order that corresponds to occurrence in source code. This
# assumption can fail (e.g. with return annotations).
first = token
last = None
for child in self._iter_children(node):
if not first or child.first_token.index < first.index:
first = child.first_token
if not last or child.last_token.index > last.index:
last = child.last_token
# If we don't have a first token from _visit_before_children, and there were no children, then
# use the parent's token as the first token.
first = first or parent_token
# If no children, set last token to the first one.
last = last or first
# Statements continue to before NEWLINE. This helps cover a few different cases at once.
if util.is_stmt(node):
last = self._find_last_in_line(last)
# Capture any unmatched brackets.
first, last = self._expand_to_matching_pairs(first, last, node)
# Give a chance to node-specific methods to adjust.
nfirst, nlast = self._methods.get(self, node.__class__)(node, first, last)
if (nfirst, nlast) != (first, last):
# If anything changed, expand again to capture any unmatched brackets.
nfirst, nlast = self._expand_to_matching_pairs(nfirst, nlast, node)
node.first_token = nfirst
node.last_token = nlast
def _find_last_in_line(self, start_token):
try:
newline = self._code.find_token(start_token, token.NEWLINE)
except IndexError:
newline = self._code.find_token(start_token, token.ENDMARKER)
return self._code.prev_token(newline)
def _iter_non_child_tokens(self, first_token, last_token, node):
"""
Generates all tokens in [first_token, last_token] range that do not belong to any children of
node. E.g. `foo(bar)` has children `foo` and `bar`, but we would yield the `(`.
"""
tok = first_token
for n in self._iter_children(node):
for t in self._code.token_range(tok, self._code.prev_token(n.first_token)):
yield t
if n.last_token.index >= last_token.index:
return
tok = self._code.next_token(n.last_token)
for t in self._code.token_range(tok, last_token):
yield t
def _expand_to_matching_pairs(self, first_token, last_token, node):
"""
Scan tokens in [first_token, last_token] range that are between node's children, and for any
unmatched brackets, adjust first/last tokens to include the closing pair.
"""
# We look for opening parens/braces among non-child tokens (i.e. tokens between our actual
# child nodes). If we find any closing ones, we match them to the opens.
to_match_right = []
to_match_left = []
for tok in self._iter_non_child_tokens(first_token, last_token, node):
tok_info = tok[:2]
if to_match_right and tok_info == to_match_right[-1]:
to_match_right.pop()
elif tok_info in _matching_pairs_left:
to_match_right.append(_matching_pairs_left[tok_info])
elif tok_info in _matching_pairs_right:
to_match_left.append(_matching_pairs_right[tok_info])
# Once done, extend `last_token` to match any unclosed parens/braces.
for match in reversed(to_match_right):
last = self._code.next_token(last_token)
# Allow for a trailing comma before the closing delimiter.
if util.match_token(last, token.OP, ','):
last = self._code.next_token(last)
# Now check for the actual closing delimiter.
if util.match_token(last, *match):
last_token = last
# And extend `first_token` to match any unclosed opening parens/braces.
for match in to_match_left:
first = self._code.prev_token(first_token)
if util.match_token(first, *match):
first_token = first
return (first_token, last_token)
#----------------------------------------------------------------------
# Node visitors. Each takes a preliminary first and last tokens, and returns the adjusted pair
# that will actually be assigned.
def visit_default(self, node, first_token, last_token):
# pylint: disable=no-self-use
# By default, we don't need to adjust the token we computed earlier.
return (first_token, last_token)
def handle_comp(self, open_brace, node, first_token, last_token):
# For list/set/dict comprehensions, we only get the token of the first child, so adjust it to
# include the opening brace (the closing brace will be matched automatically).
before = self._code.prev_token(first_token)
util.expect_token(before, token.OP, open_brace)
return (before, last_token)
def visit_listcomp(self, node, first_token, last_token):
return self.handle_comp('[', node, first_token, last_token)
if six.PY2:
# We shouldn't do this on PY3 because its SetComp/DictComp already have a correct start.
def visit_setcomp(self, node, first_token, last_token):
return self.handle_comp('{', node, first_token, last_token)
def visit_dictcomp(self, node, first_token, last_token):
return self.handle_comp('{', node, first_token, last_token)
def visit_comprehension(self, node, first_token, last_token):
# The 'comprehension' node starts with 'for' but we only get first child; we search backwards
# to find the 'for' keyword.
first = self._code.find_token(first_token, token.NAME, 'for', reverse=True)
return (first, last_token)
def handle_attr(self, node, first_token, last_token):
# Attribute node has ".attr" (2 tokens) after the last child.
dot = self._code.find_token(last_token, token.OP, '.')
name = self._code.next_token(dot)
util.expect_token(name, token.NAME)
return (first_token, name)
visit_attribute = handle_attr
visit_assignattr = handle_attr
visit_delattr = handle_attr
def handle_doc(self, node, first_token, last_token):
# With astroid, nodes that start with a doc-string can have an empty body, in which case we
# need to adjust the last token to include the doc string.
if not node.body and getattr(node, 'doc', None):
last_token = self._code.find_token(last_token, token.STRING)
return (first_token, last_token)
visit_classdef = handle_doc
visit_funcdef = handle_doc
def visit_call(self, node, first_token, last_token):
# A function call isn't over until we see a closing paren. Remember that last_token is at the
# end of all children, so we are not worried about encountering a paren that belongs to a
# child.
return (first_token, self._code.find_token(last_token, token.OP, ')'))
def visit_subscript(self, node, first_token, last_token):
# A subscript operations isn't over until we see a closing bracket. Similar to function calls.
return (first_token, self._code.find_token(last_token, token.OP, ']'))
def visit_tuple(self, node, first_token, last_token):
# A tuple doesn't include parens; if there is a trailing comma, make it part of the tuple.
try:
maybe_comma = self._code.next_token(last_token)
if util.match_token(maybe_comma, token.OP, ','):
last_token = maybe_comma
except IndexError:
pass
return (first_token, last_token)
def visit_str(self, node, first_token, last_token):
return self.handle_str(first_token, last_token)
def visit_joinedstr(self, node, first_token, last_token):
return self.handle_str(first_token, last_token)
def handle_str(self, first_token, last_token):
# Multiple adjacent STRING tokens form a single string.
last = self._code.next_token(last_token)
while util.match_token(last, token.STRING):
last_token = last
last = self._code.next_token(last_token)
return (first_token, last_token)
def visit_num(self, node, first_token, last_token):
# A constant like '-1' gets turned into two tokens; this will skip the '-'.
while util.match_token(last_token, token.OP):
last_token = self._code.next_token(last_token)
return (first_token, last_token)
# In Astroid, the Num and Str nodes are replaced by Const.
def visit_const(self, node, first_token, last_token):
if isinstance(node.value, numbers.Number):
return self.visit_num(node, first_token, last_token)
elif isinstance(node.value, six.string_types):
return self.visit_str(node, first_token, last_token)
return (first_token, last_token)
def visit_keyword(self, node, first_token, last_token):
if node.arg is not None:
equals = self._code.find_token(first_token, token.OP, '=', reverse=True)
name = self._code.prev_token(equals)
util.expect_token(name, token.NAME, node.arg)
first_token = name
return (first_token, last_token)
def visit_starred(self, node, first_token, last_token):
# Astroid has 'Starred' nodes (for "foo(*bar)" type args), but they need to be adjusted.
if not util.match_token(first_token, token.OP, '*'):
star = self._code.prev_token(first_token)
if util.match_token(star, token.OP, '*'):
first_token = star
return (first_token, last_token)
def visit_assignname(self, node, first_token, last_token):
# Astroid may turn 'except' clause into AssignName, but we need to adjust it.
if util.match_token(first_token, token.NAME, 'except'):
colon = self._code.find_token(last_token, token.OP, ':')
first_token = last_token = self._code.prev_token(colon)
return (first_token, last_token)
if six.PY2:
# No need for this on Python3, which already handles 'with' nodes correctly.
def visit_with(self, node, first_token, last_token):
first = self._code.find_token(first_token, token.NAME, 'with', reverse=True)
return (first, last_token)
|
scripts/top_down_stress_tester.py | Unknoob/buck | 8,027 | 12740964 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import itertools
import json
import logging
import os
import subprocess
import sys
import tempfile
import zipfile
CACHE_DIR = "buck-cache"
class CacheEntry(object):
pass
def get_cache_entry(path):
with zipfile.ZipFile(path) as f:
entry_map = {os.path.basename(n): n for n in f.namelist()}
entry = CacheEntry()
entry.target = f.read(entry_map["TARGET"]).strip()
entry.rule_key = f.read(entry_map["RULE_KEY"]).strip()
entry.deps = json.loads(f.read(entry_map["DEPS"]))
entry.path = path
return entry
def get_cache_inventory():
inventory = {}
for item in os.listdir(CACHE_DIR):
entry = get_cache_entry(os.path.join(CACHE_DIR, item))
inventory[entry.target] = entry
return inventory
def get_missing_cache_entries(inventory):
"""
Find and return all entries missing in the cache.
"""
missing_entries = {}
for entry in inventory.itervalues():
if not os.path.exists(entry.path):
missing_entries[entry.target] = entry
return missing_entries
def clear_cache():
subprocess.check_call(["rm", "-rf", CACHE_DIR])
def clear_output():
subprocess.check_call(["rm", "-rf", "buck-out"])
def run_buck(buck, *args):
logging.info("Running {} {}".format(buck, " ".join(args)))
# Always create a temp file, in case we need to serialize the
# arguments to it.
with tempfile.NamedTemporaryFile() as f:
# Point cache to a known location.
args.append("--config")
args.append("cache.dir=" + CACHE_DIR)
# If the command would be too long, put the args into a file and
# execute that.
if len(args) > 30:
for arg in args:
f.write(arg)
f.write(os.linesep)
f.flush()
args = ["@" + f.name]
return subprocess.check_output([buck] + list(args))
def preorder_traversal(roots, deps, callback):
"""
Execute the given callback during a preorder traversal of the graph.
"""
# Keep track of all the nodes processed.
seen = set()
def traverse(node, callback, chain):
# Make sure we only visit nodes once.
if node in seen:
return
seen.add(node)
# Run the callback with the current node and the chain of parent nodes we
# traversed to find it.
callback(node, chain)
# Recurse on depednencies, making sure to update the visiter chain.
for dep in deps[node]:
traverse(dep, callback, chain=chain + [node])
# Traverse starting from all the roots.
for root in roots:
traverse(root, callback, [])
def build(buck, targets):
"""
Verify that each of the actions the run when building the given targets
run correctly using a top-down build.
"""
# Now run a build to populate the cache.
logging.info("Running a build to populate the cache")
run_buck(buck, "build", *targets)
# Find all targets reachable via the UI.
out = run_buck(buck, "audit", "dependencies", "--transitive", *targets)
ui_targets = set(out.splitlines())
ui_targets.update(targets)
# Grab an inventory of the cache and use it to form a dependency map.
cache_inventory = get_cache_inventory()
dependencies = {n.target: n.deps for n in cache_inventory.itervalues()}
# Keep track of all the processed nodes so we can print progress info.
processed = set()
# The callback to run for each build rule.
def handle(current, chain):
logging.info(
"Processing {} ({}/{})".format(
current, len(processed), len(dependencies.keys())
)
)
processed.add(current)
# Empty the previous builds output.
logging.info("Removing output from previous build")
clear_output()
# Remove the cache entry for this target.
entry = cache_inventory[current]
os.remove(entry.path)
logging.info(" removed {} => {}".format(current, entry.path))
# Now run the build using the closest UI visible ancestor target.
logging.info("Running the build to check " + current)
for node in itertools.chain([current], reversed(chain)):
if node in ui_targets:
run_buck(buck, "build", "--just-build", current, node)
break
else:
assert False, "couldn't find target in UI: " + node
# We should *always* end with a full cache.
logging.info("Verifying cache...")
missing = get_missing_cache_entries(cache_inventory)
assert len(missing) == 0, "\n".join(sorted(missing.keys()))
preorder_traversal(targets, dependencies, handle)
def test(buck, targets):
"""
Test that we can run tests when pulling from the cache.
"""
# Find all test targets.
test_targets = set()
out = run_buck(buck, "targets", "--json", *targets)
for info in json.loads(out):
if info["buck.type"].endswith("_test"):
test_targets.add("//" + info["buck.base_path"] + ":" + info["name"])
if not test_targets:
raise Exception("no test targets")
# Now run a build to populate the cache.
logging.info("Running a build to populate the cache")
run_buck(buck, "build", *test_targets)
# Empty the build output.
logging.info("Removing output from build")
clear_output()
# Now run the test
run_buck(buck, "test", *test_targets)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("--buck", default="buck")
parser.add_argument("command", choices=("build", "test"))
parser.add_argument("targets", metavar="target", nargs="+")
args = parser.parse_args(argv[1:])
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
)
# Resolve any aliases in the top-level targets.
out = run_buck(args.buck, "targets", *args.targets)
targets = set(out.splitlines())
# Clear the cache and output directories to start with a clean slate.
logging.info("Clearing output and cache")
run_buck(args.buck, "clean")
clear_output()
clear_cache()
# Run the subcommand
if args.command == "build":
build(args.buck, targets)
elif args.command == "test":
test(args.buck, targets)
else:
raise Exception("unknown command: " + args.command)
sys.exit(main(sys.argv))
|
codes/models/__init__.py | DanKDorda/BasicSR | 106 | 12741013 | import logging
logger = logging.getLogger('base')
def create_model(opt):
model = opt['model']
if model == 'sr':
from .SR_model import SRModel as M
elif model == 'srgan':
from .SRGAN_model import SRGANModel as M
elif model == 'srragan':
from .SRRaGAN_model import SRRaGANModel as M
elif model == 'sftgan':
from .SFTGAN_ACD_model import SFTGAN_ACD_Model as M
else:
raise NotImplementedError('Model [{:s}] not recognized.'.format(model))
m = M(opt)
logger.info('Model [{:s}] is created.'.format(m.__class__.__name__))
return m
|
libsaas/services/pipedrive/files.py | MidtownFellowship/libsaas | 155 | 12741015 | from libsaas import http, parsers
from libsaas.services import base
class FilesResource(base.RESTResource):
path = 'files'
def create(self, *args, **kwargs):
raise base.MethodNotSupported()
class Files(FilesResource):
@base.apimethod
def get(self, start=None, limit=None):
"""
Returns data about all files.
Upstream documentation:
https://developers.pipedrive.com/v1#methods-Files
"""
params = base.get_params(None, locals())
return http.Request('GET', self.get_url(), params), parsers.parse_json
class File(FilesResource):
pass
|
ss_baselines/savi/models/visual_cnn.py | tynguyen/sound-spaces | 171 | 12741029 | <gh_stars>100-1000
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import torch.nn as nn
from ss_baselines.common.utils import Flatten
from habitat_sim.utils.common import d3_40_colors_rgb
class VisualCNN(nn.Module):
r"""A Simple 3-Conv CNN followed by a fully connected layer
Takes in observations and produces an embedding of the rgb and/or depth components
Args:
observation_space: The observation_space of the agent
output_size: The size of the embedding vector
"""
def __init__(self, observation_space, output_size, extra_rgb=False):
super().__init__()
self._output_size = output_size
if "rgb" in observation_space.spaces and not extra_rgb:
self._n_input_rgb = observation_space.spaces["rgb"].shape[2]
else:
self._n_input_rgb = 0
if "depth" in observation_space.spaces:
self._n_input_depth = observation_space.spaces["depth"].shape[2]
else:
self._n_input_depth = 0
if "semantic" in observation_space.spaces:
self._n_input_semantic = 6
else:
self._n_input_semantic = 0
# kernel size for different CNN layers
self._cnn_layers_kernel_size = [(8, 8), (4, 4), (3, 3)]
# strides for different CNN layers
self._cnn_layers_stride = [(4, 4), (2, 2), (2, 2)]
if self._n_input_rgb > 0:
cnn_dims = np.array(
observation_space.spaces["rgb"].shape[:2], dtype=np.float32
)
elif self._n_input_depth > 0:
cnn_dims = np.array(
observation_space.spaces["depth"].shape[:2], dtype=np.float32
)
elif self._n_input_semantic > 0:
cnn_dims = np.array(
observation_space.spaces["semantic"].shape[:2], dtype=np.float32
)
if self.is_blind:
self.cnn = nn.Sequential()
else:
self._input_shape = (self._n_input_rgb + self._n_input_depth + self._n_input_semantic,
int(cnn_dims[0]), int(cnn_dims[1]))
for kernel_size, stride in zip(
self._cnn_layers_kernel_size, self._cnn_layers_stride
):
cnn_dims = self._conv_output_dim(
dimension=cnn_dims,
padding=np.array([0, 0], dtype=np.float32),
dilation=np.array([1, 1], dtype=np.float32),
kernel_size=np.array(kernel_size, dtype=np.float32),
stride=np.array(stride, dtype=np.float32),
)
self.cnn = nn.Sequential(
nn.Conv2d(
in_channels=self._n_input_rgb + self._n_input_depth + self._n_input_semantic,
out_channels=32,
kernel_size=self._cnn_layers_kernel_size[0],
stride=self._cnn_layers_stride[0],
),
nn.ReLU(True),
nn.Conv2d(
in_channels=32,
out_channels=64,
kernel_size=self._cnn_layers_kernel_size[1],
stride=self._cnn_layers_stride[1],
),
nn.ReLU(True),
nn.Conv2d(
in_channels=64,
out_channels=64,
kernel_size=self._cnn_layers_kernel_size[2],
stride=self._cnn_layers_stride[2],
),
# nn.ReLU(True),
Flatten(),
nn.Linear(64 * cnn_dims[0] * cnn_dims[1], output_size),
nn.ReLU(True),
)
self.layer_init()
def _conv_output_dim(
self, dimension, padding, dilation, kernel_size, stride
):
r"""Calculates the output height and width based on the input
height and width to the convolution layer.
ref: https://pytorch.org/docs/master/nn.html#torch.nn.Conv2d
"""
assert len(dimension) == 2
out_dimension = []
for i in range(len(dimension)):
out_dimension.append(
int(
np.floor(
(
(
dimension[i]
+ 2 * padding[i]
- dilation[i] * (kernel_size[i] - 1)
- 1
)
/ stride[i]
)
+ 1
)
)
)
return tuple(out_dimension)
def layer_init(self):
for layer in self.cnn:
if isinstance(layer, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(
layer.weight, nn.init.calculate_gain("relu")
)
if layer.bias is not None:
nn.init.constant_(layer.bias, val=0)
@property
def is_blind(self):
return self._n_input_rgb + self._n_input_depth + self._n_input_semantic == 0
@property
def input_shape(self):
return self._input_shape
@property
def output_shape(self):
return 1, self._output_size
@property
def feature_dims(self):
return self._output_size
def forward(self, observations):
cnn_input = []
if self._n_input_rgb > 0:
rgb_observations = observations["rgb"]
# permute tensor to dimension [BATCH x CHANNEL x HEIGHT X WIDTH]
rgb_observations = rgb_observations.permute(0, 3, 1, 2)
rgb_observations = rgb_observations / 255.0 # normalize RGB
cnn_input.append(rgb_observations)
if self._n_input_depth > 0:
depth_observations = observations["depth"]
# permute tensor to dimension [BATCH x CHANNEL x HEIGHT X WIDTH]
depth_observations = depth_observations.permute(0, 3, 1, 2)
cnn_input.append(depth_observations)
if self._n_input_semantic > 0:
semantic_observations = convert_semantics_to_rgb(observations["semantic"]).float()
semantic_object_observations = observations["semantic_object"].float()
# permute tensor to dimension [BATCH x CHANNEL x HEIGHT X WIDTH]
semantic_observations = torch.cat([semantic_observations, semantic_object_observations], dim=-1)
semantic_observations = semantic_observations.permute(0, 3, 1, 2) / 255.0
cnn_input.append(semantic_observations)
cnn_input = torch.cat(cnn_input, dim=1)
return self.cnn(cnn_input)
def convert_semantics_to_rgb(semantics):
r"""Converts semantic IDs to RGB images.
"""
semantics = semantics.long() % 40
mapping_rgb = torch.from_numpy(d3_40_colors_rgb).to(semantics.device)
semantics_r = torch.take(mapping_rgb[:, 0], semantics)
semantics_g = torch.take(mapping_rgb[:, 1], semantics)
semantics_b = torch.take(mapping_rgb[:, 2], semantics)
semantics_rgb = torch.stack([semantics_r, semantics_g, semantics_b], -1)
return semantics_rgb |
tests/transforms/test_tape_expand.py | therooler/pennylane | 539 | 12741046 | <gh_stars>100-1000
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for tape expansion stopping criteria and expansion functions.
"""
import pytest
import numpy as np
import pennylane as qml
from pennylane.wires import Wires
class TestCreateExpandFn:
"""Test creating expansion functions from stopping criteria."""
crit_0 = (~qml.operation.is_trainable) | (qml.operation.has_gen & qml.operation.is_trainable)
doc_0 = "Test docstring."
with qml.tape.QuantumTape() as tape:
qml.RX(0.2, wires=0)
qml.RY(qml.numpy.array(2.1, requires_grad=True), wires=1)
qml.Rot(*qml.numpy.array([0.5, 0.2, -0.1], requires_grad=True), wires=0)
def test_create_expand_fn(self):
"""Test creation of expand_fn."""
expand_fn = qml.transforms.create_expand_fn(
depth=10,
stop_at=self.crit_0,
docstring=self.doc_0,
)
assert expand_fn.__doc__ == "Test docstring."
def test_create_expand_fn_expansion(self):
"""Test expansion with created expand_fn."""
expand_fn = qml.transforms.create_expand_fn(depth=10, stop_at=self.crit_0)
new_tape = expand_fn(self.tape)
assert new_tape.operations[0] == self.tape.operations[0]
assert new_tape.operations[1] == self.tape.operations[1]
assert [op.name for op in new_tape.operations[2:]] == ["RZ", "RY", "RZ"]
assert np.allclose([op.data for op in new_tape.operations[2:]], [[0.5], [0.2], [-0.1]])
assert [op.wires for op in new_tape.operations[2:]] == [qml.wires.Wires(0)] * 3
def test_create_expand_fn_dont_expand(self):
"""Test expansion is skipped with depth=0."""
expand_fn = qml.transforms.create_expand_fn(depth=0, stop_at=self.crit_0)
new_tape = expand_fn(self.tape)
assert new_tape.operations == self.tape.operations
def test_device_and_stopping_expansion(self, mocker):
"""Test that passing a device alongside a stopping condition ensures
that all operations are expanded to match the devices default gate
set"""
dev = qml.device("default.qubit", wires=1)
expand_fn = qml.transforms.create_expand_fn(device=dev, depth=10, stop_at=self.crit_0)
with qml.tape.QuantumTape() as tape:
qml.U1(0.2, wires=0)
qml.Rot(*qml.numpy.array([0.5, 0.2, -0.1], requires_grad=True), wires=0)
spy_device = mocker.spy(dev, "supports_operation")
new_tape = expand_fn(tape)
spy_device.assert_called()
assert new_tape.operations[0].name == "PhaseShift"
assert [op.name for op in new_tape.operations[1:]] == ["RZ", "RY", "RZ"]
def test_device_only_expansion(self, mocker):
"""Test that passing a device ensures that all operations are expanded
to match the devices default gate set"""
dev = qml.device("default.qubit", wires=1)
expand_fn = qml.transforms.create_expand_fn(device=dev, depth=10)
with qml.tape.QuantumTape() as tape:
qml.U1(0.2, wires=0)
qml.Rot(*qml.numpy.array([0.5, 0.2, -0.1], requires_grad=True), wires=0)
spy_device = mocker.spy(dev, "supports_operation")
new_tape = expand_fn(tape)
spy_device.assert_called()
assert len(new_tape.operations) == 2
assert new_tape.operations[0].name == "PhaseShift"
assert new_tape.operations[1].name == "Rot"
def test_depth_only_expansion(self):
"""Test that passing a depth simply expands to that depth"""
dev = qml.device("default.qubit", wires=0)
with qml.tape.QuantumTape() as tape:
qml.RX(0.2, wires=0)
qml.RY(qml.numpy.array(2.1, requires_grad=True), wires=1)
qml.Rot(*qml.numpy.array([0.5, 0.2, -0.1], requires_grad=True), wires=0)
qml.templates.StronglyEntanglingLayers(
qml.numpy.ones([2, 2, 3], requires_grad=True), wires=[0, 1]
)
expand_fn = qml.transforms.create_expand_fn(depth=0)
new_tape = expand_fn(tape)
assert new_tape is tape
expand_fn = qml.transforms.create_expand_fn(depth=10)
new_tape = expand_fn(tape)
assert new_tape.operations[0] == tape.operations[0]
assert new_tape.operations[1] == tape.operations[1]
assert [op.name for op in new_tape.operations[2:5]] == ["RZ", "RY", "RZ"]
assert len(new_tape.operations[6:]) == 15
class TestExpandMultipar:
"""Test the expansion of multi-parameter gates."""
def test_expand_multipar(self):
"""Test that a multi-parameter gate is decomposed correctly.
And that single-parameter gates are not decomposed."""
dev = qml.device("default.qubit", wires=3)
class _CRX(qml.CRX):
name = "_CRX"
@staticmethod
def decomposition(theta, wires):
raise NotImplementedError()
with qml.tape.QuantumTape() as tape:
qml.RX(1.5, wires=0)
qml.Rot(-2.1, 0.2, -0.418, wires=1)
_CRX(1.5, wires=[0, 2])
new_tape = qml.transforms.expand_multipar(tape)
new_ops = new_tape.operations
assert [op.name for op in new_ops] == ["RX", "RZ", "RY", "RZ", "_CRX"]
def test_no_generator_expansion(self):
"""Test that a gate is decomposed correctly if it has
generator[0]==None."""
dev = qml.device("default.qubit", wires=3)
class _CRX(qml.CRX):
def generator(self):
raise qml.operations.GeneratorUndefinedError()
with qml.tape.QuantumTape() as tape:
qml.RX(1.5, wires=0)
qml.RZ(-2.1, wires=1)
qml.RY(0.2, wires=1)
qml.RZ(-0.418, wires=1)
_CRX(1.5, wires=[0, 2])
new_tape = qml.transforms.expand_multipar(tape)
new_ops = new_tape.operations
expected = ["RX", "RZ", "RY", "RZ", "RZ", "RY", "CNOT", "RY", "CNOT", "RZ"]
assert [op.name for op in new_ops] == expected
class TestExpandNonunitaryGen:
"""Test the expansion of operations without a unitary generator."""
def test_do_not_expand(self):
"""Test that a tape with single-parameter operations with
unitary generators and non-parametric operations is not touched."""
with qml.tape.QuantumTape() as tape:
qml.RX(0.2, wires=0)
qml.Hadamard(0)
qml.PauliRot(0.9, "XY", wires=[0, 1])
qml.SingleExcitationPlus(-1.2, wires=[1, 0])
new_tape = qml.transforms.expand_nonunitary_gen(tape)
assert tape.operations == new_tape.operations
def test_expand_multi_par(self):
"""Test that a tape with single-parameter operations with
unitary generators and non-parametric operations is not touched."""
with qml.tape.QuantumTape() as tape:
qml.RX(0.2, wires=0)
qml.Hadamard(0)
qml.Rot(0.9, 1.2, -0.6, wires=0)
qml.SingleExcitationPlus(-1.2, wires=[1, 0])
new_tape = qml.transforms.expand_nonunitary_gen(tape)
expanded = [
qml.RZ(0.9, wires=0),
qml.RY(1.2, wires=0),
qml.RZ(-0.6, wires=0),
]
assert tape.operations[:2] == new_tape.operations[:2]
assert all(exp.name == new.name for exp, new in zip(expanded, new_tape.operations[2:5]))
assert all(exp.data == new.data for exp, new in zip(expanded, new_tape.operations[2:5]))
assert all(exp.wires == new.wires for exp, new in zip(expanded, new_tape.operations[2:5]))
assert tape.operations[3:] == new_tape.operations[5:]
def test_expand_missing_generator(self):
"""Test that a tape with single-parameter operations with
unitary generators and non-parametric operations is not touched."""
class _PhaseShift(qml.PhaseShift):
def generator(self):
return None
with qml.tape.QuantumTape() as tape:
qml.RX(0.2, wires=0)
qml.Hadamard(0)
_PhaseShift(2.1, wires=1)
qml.SingleExcitationPlus(-1.2, wires=[1, 0])
new_tape = qml.transforms.expand_nonunitary_gen(tape)
assert tape.operations[:2] == new_tape.operations[:2]
exp_op = new_tape.operations[2]
assert exp_op.name == "RZ" and exp_op.data == [2.1] and exp_op.wires == qml.wires.Wires(1)
assert tape.operations[3:] == new_tape.operations[3:]
def test_expand_nonunitary_generator(self):
"""Test that a tape with single-parameter operations with
unitary generators and non-parametric operations is not touched."""
with qml.tape.QuantumTape() as tape:
qml.RX(0.2, wires=0)
qml.Hadamard(0)
qml.PhaseShift(2.1, wires=1)
qml.SingleExcitationPlus(-1.2, wires=[1, 0])
new_tape = qml.transforms.expand_nonunitary_gen(tape)
assert tape.operations[:2] == new_tape.operations[:2]
exp_op = new_tape.operations[2]
assert exp_op.name == "RZ" and exp_op.data == [2.1] and exp_op.wires == qml.wires.Wires(1)
assert tape.operations[3:] == new_tape.operations[3:]
class TestExpandInvalidTrainable:
"""Tests for the gradient expand function"""
def test_no_expansion(self, mocker):
"""Test that a circuit with differentiable
operations is not expanded"""
x = qml.numpy.array(0.2, requires_grad=True)
y = qml.numpy.array(0.1, requires_grad=True)
with qml.tape.QuantumTape() as tape:
qml.RX(x, wires=0)
qml.RY(y, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
spy = mocker.spy(tape, "expand")
new_tape = qml.transforms.expand_invalid_trainable(tape)
assert new_tape is tape
spy.assert_not_called()
def test_trainable_nondiff_expansion(self, mocker):
"""Test that a circuit with non-differentiable
trainable operations is expanded"""
x = qml.numpy.array(0.2, requires_grad=True)
y = qml.numpy.array(0.1, requires_grad=True)
class NonDiffPhaseShift(qml.PhaseShift):
grad_method = None
with qml.tape.QuantumTape() as tape:
NonDiffPhaseShift(x, wires=0)
qml.RY(y, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
spy = mocker.spy(tape, "expand")
new_tape = qml.transforms.expand_invalid_trainable(tape)
assert new_tape is not tape
spy.assert_called()
new_tape.operations[0].name == "RZ"
new_tape.operations[0].grad_method == "A"
new_tape.operations[1].name == "RY"
new_tape.operations[2].name == "CNOT"
def test_nontrainable_nondiff(self, mocker):
"""Test that a circuit with non-differentiable
non-trainable operations is not expanded"""
x = qml.numpy.array(0.2, requires_grad=False)
y = qml.numpy.array(0.1, requires_grad=True)
class NonDiffPhaseShift(qml.PhaseShift):
grad_method = None
with qml.tape.QuantumTape() as tape:
NonDiffPhaseShift(x, wires=0)
qml.RY(y, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
params = tape.get_parameters(trainable_only=False)
tape.trainable_params = qml.math.get_trainable_indices(params)
assert tape.trainable_params == [1]
spy = mocker.spy(tape, "expand")
new_tape = qml.transforms.expand_invalid_trainable(tape)
assert new_tape is tape
spy.assert_not_called()
def test_trainable_numeric(self, mocker):
"""Test that a circuit with numeric differentiable
trainable operations is *not* expanded"""
x = qml.numpy.array(0.2, requires_grad=True)
y = qml.numpy.array(0.1, requires_grad=True)
class NonDiffPhaseShift(qml.PhaseShift):
grad_method = "F"
with qml.tape.QuantumTape() as tape:
NonDiffPhaseShift(x, wires=0)
qml.RY(y, wires=1)
qml.CNOT(wires=[0, 1])
qml.expval(qml.PauliZ(0))
spy = mocker.spy(tape, "expand")
new_tape = qml.transforms.expand_invalid_trainable(tape)
assert new_tape is tape
spy.assert_not_called()
# Custom decomposition functions for testing.
def custom_cnot(wires):
return [
qml.Hadamard(wires=wires[1]),
qml.CZ(wires=[wires[0], wires[1]]),
qml.Hadamard(wires=wires[1]),
]
def custom_hadamard(wires):
return [qml.RZ(np.pi, wires=wires), qml.RY(np.pi / 2, wires=wires)]
# Incorrect, for testing purposes only
def custom_rx(params, wires):
return [qml.RY(params, wires=wires), qml.Hadamard(wires=wires)]
# To test the gradient; use circuit identity RY(theta) = X RY(-theta) X
def custom_rot(phi, theta, omega, wires):
return [
qml.RZ(phi, wires=wires),
qml.PauliX(wires=wires),
qml.RY(-theta, wires=wires),
qml.PauliX(wires=wires),
qml.RZ(omega, wires=wires),
]
# Decompose a template into another template
def custom_basic_entangler_layers(weights, wires, **kwargs):
return [
qml.AngleEmbedding(weights[0], wires=wires),
qml.broadcast(qml.CNOT, pattern="ring", wires=wires),
]
class TestCreateCustomDecompExpandFn:
"""Tests for the gradient expand function"""
def test_no_custom_decomp(self):
"""Test that sending an empty dictionary results in no decompositions."""
def circuit():
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
original_dev = qml.device("default.qubit", wires=3)
decomp_dev = qml.device("default.qubit", wires=3, custom_decomps={})
original_qnode = qml.QNode(circuit, original_dev, expansion_strategy="device")
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
original_res = original_qnode()
decomp_res = decomp_qnode()
assert np.isclose(original_res, decomp_res)
assert [
orig_op.name == decomp_op.name
for orig_op, decomp_op in zip(
original_qnode.qtape.operations, decomp_qnode.qtape.operations
)
]
def test_no_custom_decomp_template(self):
"""Test that sending an empty dictionary results in no decomposition
when a template is involved, except the decomposition expected from the device."""
def circuit():
qml.BasicEntanglerLayers([[0.1, 0.2]], wires=[0, 1])
return qml.expval(qml.PauliZ(0))
original_dev = qml.device("default.qubit", wires=3)
decomp_dev = qml.device("default.qubit", wires=3, custom_decomps={})
original_qnode = qml.QNode(circuit, original_dev, expansion_strategy="device")
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
original_res = original_qnode()
decomp_res = decomp_qnode()
assert np.isclose(original_res, decomp_res)
assert [
orig_op.name == decomp_op.name
for orig_op, decomp_op in zip(
original_qnode.qtape.operations, decomp_qnode.qtape.operations
)
]
@pytest.mark.parametrize("device_name", ["default.qubit", "lightning.qubit"])
def test_one_custom_decomp(self, device_name):
"""Test that specifying a single custom decomposition works as expected."""
def circuit():
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
custom_decomps = {"Hadamard": custom_hadamard}
decomp_dev = qml.device(device_name, wires=2, custom_decomps=custom_decomps)
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
_ = decomp_qnode()
decomp_ops = decomp_qnode.qtape.operations
assert len(decomp_ops) == 3
assert decomp_ops[0].name == "RZ"
assert np.isclose(decomp_ops[0].parameters[0], np.pi)
assert decomp_ops[1].name == "RY"
assert np.isclose(decomp_ops[1].parameters[0], np.pi / 2)
assert decomp_ops[2].name == "CNOT"
def test_no_decomp_with_depth_zero(self):
"""Test that specifying a single custom decomposition works as expected."""
def circuit():
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
custom_decomps = {"Hadamard": custom_hadamard, "CNOT": custom_cnot}
decomp_dev = qml.device(
"default.qubit", wires=2, custom_decomps=custom_decomps, decomp_depth=0
)
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
_ = decomp_qnode()
decomp_ops = decomp_qnode.qtape.operations
assert len(decomp_ops) == 2
assert decomp_ops[0].name == "Hadamard"
assert decomp_ops[1].name == "CNOT"
def test_one_custom_decomp_gradient(self):
"""Test that gradients are still correctly computed after a decomposition
that performs transpilation."""
def circuit(x):
qml.Hadamard(wires=0)
qml.Rot(x[0], x[1], x[2], wires=0)
qml.Hadamard(wires=0)
return qml.expval(qml.PauliZ(0))
original_dev = qml.device("default.qubit", wires=3)
decomp_dev = qml.device("default.qubit", wires=3, custom_decomps={"Rot": custom_rot})
original_qnode = qml.QNode(circuit, original_dev, expansion_strategy="device")
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
x = qml.numpy.array([0.2, 0.3, 0.4], requires_grad=True)
original_res = original_qnode(x)
decomp_res = decomp_qnode(x)
assert np.allclose(original_res, decomp_res)
original_grad = qml.grad(original_qnode)(x)
decomp_grad = qml.grad(decomp_qnode)(x)
assert np.allclose(original_grad, decomp_grad)
expected_ops = ["Hadamard", "RZ", "PauliX", "RY", "PauliX", "RZ", "Hadamard"]
assert all(
[op.name == name for op, name in zip(decomp_qnode.qtape.operations, expected_ops)]
)
def test_nested_custom_decomp(self):
"""Test that specifying two custom decompositions that have interdependence
works as expected."""
def circuit():
qml.Hadamard(wires=0)
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(0))
custom_decomps = {"Hadamard": custom_hadamard, qml.CNOT: custom_cnot}
decomp_dev = qml.device("default.qubit", wires=2, custom_decomps=custom_decomps)
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
_ = decomp_qnode()
decomp_ops = decomp_qnode.qtape.operations
assert len(decomp_ops) == 7
# Check the RZ gates are in the correct place
for idx in [0, 2, 5]:
assert decomp_ops[idx].name == "RZ"
assert np.isclose(decomp_ops[idx].parameters[0], np.pi)
assert decomp_ops[0].wires == Wires(0)
assert decomp_ops[2].wires == Wires(1)
assert decomp_ops[5].wires == Wires(1)
# Check RY are in the correct place
for idx in [1, 3, 6]:
assert decomp_ops[idx].name == "RY"
assert np.isclose(decomp_ops[idx].parameters[0], np.pi / 2)
assert decomp_ops[1].wires == Wires(0)
assert decomp_ops[3].wires == Wires(1)
assert decomp_ops[6].wires == Wires(1)
assert decomp_ops[4].name == "CZ"
def test_nested_custom_decomp_with_template(self):
"""Test that specifying two custom decompositions that have interdependence
works as expected even when there is a template."""
def circuit():
# -RX(0.1)-C- -> -RX(0.1)---C--- -> -RX(0.1)-----------------C----------------
# -RX(0.2)-X- -> -RX(0.2)-H-Z-H- -> -RX(0.2)-RZ(pi)-RY(pi/2)-Z-RY(pi/2)-RZ(pi)-
qml.BasicEntanglerLayers([[0.1, 0.2]], wires=[0, 1])
return qml.expval(qml.PauliZ(0))
custom_decomps = {"Hadamard": custom_hadamard, qml.CNOT: custom_cnot}
decomp_dev = qml.device("default.qubit", wires=2, custom_decomps=custom_decomps)
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
_ = decomp_qnode()
decomp_ops = decomp_qnode.qtape.operations
assert len(decomp_ops) == 7
assert decomp_ops[0].name == "RX"
assert decomp_ops[0].parameters[0] == 0.1
assert decomp_ops[0].wires == Wires(0)
assert decomp_ops[1].name == "RX"
assert decomp_ops[1].parameters[0] == 0.2
assert decomp_ops[1].wires == Wires(1)
assert decomp_ops[2].name == "RZ"
assert np.isclose(decomp_ops[2].parameters[0], np.pi)
assert decomp_ops[2].wires == Wires(1)
assert decomp_ops[3].name == "RY"
assert np.isclose(decomp_ops[3].parameters[0], np.pi / 2)
assert decomp_ops[3].wires == Wires(1)
assert decomp_ops[4].name == "CZ"
assert decomp_ops[4].wires == Wires([0, 1])
assert decomp_ops[5].name == "RZ"
assert np.isclose(decomp_ops[5].parameters[0], np.pi)
assert decomp_ops[5].wires == Wires(1)
assert decomp_ops[6].name == "RY"
assert np.isclose(decomp_ops[6].parameters[0], np.pi / 2)
assert decomp_ops[6].wires == Wires(1)
def test_custom_decomp_template_to_template(self):
"""Test that decomposing a template into another template and some
gates yields the correct results."""
def circuit():
qml.BasicEntanglerLayers([[0.1, 0.2]], wires=[0, 1])
return qml.expval(qml.PauliZ(0))
# BasicEntanglerLayers custom decomposition involves AngleEmbedding
custom_decomps = {"BasicEntanglerLayers": custom_basic_entangler_layers, "RX": custom_rx}
decomp_dev = qml.device("default.qubit", wires=2, custom_decomps=custom_decomps)
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
_ = decomp_qnode()
decomp_ops = decomp_qnode.qtape.operations
assert len(decomp_ops) == 5
assert decomp_ops[0].name == "RY"
assert decomp_ops[0].parameters[0] == 0.1
assert decomp_ops[0].wires == Wires(0)
assert decomp_ops[1].name == "Hadamard"
assert decomp_ops[1].wires == Wires(0)
assert decomp_ops[2].name == "RY"
assert np.isclose(decomp_ops[2].parameters[0], 0.2)
assert decomp_ops[2].wires == Wires(1)
assert decomp_ops[3].name == "Hadamard"
assert decomp_ops[3].wires == Wires(1)
assert decomp_ops[4].name == "CNOT"
assert decomp_ops[4].wires == Wires([0, 1])
def test_custom_decomp_different_depth(self):
"""Test that alternative expansion depths can be specified."""
def circuit():
qml.BasicEntanglerLayers([[0.1, 0.2]], wires=[0, 1])
return qml.expval(qml.PauliZ(0))
# BasicEntanglerLayers custom decomposition involves AngleEmbedding. If
# expansion depth is 2, the AngleEmbedding will still be decomposed into
# RX (since it's not a supported operation on the device), but the RX will
# not be further decomposed even though the custom decomposition is specified.
custom_decomps = {"BasicEntanglerLayers": custom_basic_entangler_layers, "RX": custom_rx}
decomp_dev = qml.device(
"default.qubit", wires=2, custom_decomps=custom_decomps, decomp_depth=2
)
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
_ = decomp_qnode()
decomp_ops = decomp_qnode.qtape.operations
assert len(decomp_ops) == 3
assert decomp_ops[0].name == "RX"
assert np.isclose(decomp_ops[0].parameters[0], 0.1)
assert decomp_ops[0].wires == Wires(0)
assert decomp_ops[1].name == "RX"
assert np.isclose(decomp_ops[1].parameters[0], 0.2)
assert decomp_ops[1].wires == Wires(1)
assert decomp_ops[2].name == "CNOT"
assert decomp_ops[2].wires == Wires([0, 1])
def test_custom_decomp_with_adjoint(self):
"""Test that applying an adjoint in the circuit results in the adjoint
undergoing the custom decomposition."""
def circuit():
# Adjoint is RX(-0.2), so expect RY(-0.2) H
qml.adjoint(qml.RX)(0.2, wires="a")
return qml.expval(qml.PauliZ("a"))
custom_decomps = {qml.RX: custom_rx}
decomp_dev = qml.device("default.qubit", wires="a", custom_decomps=custom_decomps)
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
_ = decomp_qnode()
decomp_ops = decomp_qnode.qtape.operations
assert len(decomp_ops) == 2
assert decomp_ops[0].name == "RY"
assert decomp_ops[0].parameters[0] == -0.2
assert decomp_ops[0].wires == Wires("a")
assert decomp_ops[1].name == "Hadamard"
assert decomp_ops[1].wires == Wires("a")
def test_custom_decomp_with_control(self):
"""Test that applying a controlled version of a gate results in the
controlled version of a decomposition."""
def circuit():
qml.ctrl(qml.Hadamard, control=0)(wires=1)
return qml.expval(qml.PauliZ(0))
custom_decomps = {qml.Hadamard: custom_hadamard}
decomp_dev = qml.device("default.qubit", wires=2, custom_decomps=custom_decomps)
decomp_qnode = qml.QNode(circuit, decomp_dev, expansion_strategy="device")
_ = decomp_qnode()
decomp_ops = decomp_qnode.qtape.operations
assert len(decomp_ops) == 2
assert decomp_ops[0].name == "CRZ"
assert np.isclose(decomp_ops[0].parameters[0], np.pi)
assert decomp_ops[0].wires == Wires([0, 1])
assert decomp_ops[1].name == "CRY"
assert np.isclose(decomp_ops[1].parameters[0], np.pi / 2)
assert decomp_ops[1].wires == Wires([0, 1])
def test_custom_decomp_in_separate_context(self):
"""Test that the set_decomposition context manager works."""
dev = qml.device("default.qubit", wires=2)
@qml.qnode(dev, expansion_strategy="device")
def circuit():
qml.CNOT(wires=[0, 1])
return qml.expval(qml.PauliZ(wires=0))
# Initial test
_ = circuit()
assert len(circuit.qtape.operations) == 1
assert circuit.qtape.operations[0].name == "CNOT"
assert dev.custom_expand_fn is None
# Test within the context manager
with qml.transforms.set_decomposition({qml.CNOT: custom_cnot}, dev):
_ = circuit()
ops_in_context = circuit.qtape.operations
assert dev.custom_expand_fn is not None
assert len(ops_in_context) == 3
assert ops_in_context[0].name == "Hadamard"
assert ops_in_context[1].name == "CZ"
assert ops_in_context[2].name == "Hadamard"
# Check that afterwards, the device has gone back to normal
_ = circuit()
assert len(circuit.qtape.operations) == 1
assert circuit.qtape.operations[0].name == "CNOT"
assert dev.custom_expand_fn is None
def test_custom_decomp_used_twice(self):
"""Test that creating a custom decomposition includes overwriting the
correct method under the hood and produces expected results."""
res = []
for i in range(2):
custom_decomps = {"MultiRZ": qml.MultiRZ.compute_decomposition}
dev = qml.device("lightning.qubit", wires=2, custom_decomps=custom_decomps)
@qml.qnode(dev, diff_method="adjoint")
def cost(theta):
qml.Hadamard(wires=0)
qml.Hadamard(wires=1)
qml.MultiRZ(theta, wires=[1, 0])
return qml.expval(qml.PauliX(1))
x = np.array(0.5)
res.append(cost(x))
assert res[0] == res[1]
|
tests/test_functionhandler.py | NAnnamalai/gramex | 130 | 12741048 | import json
import gramex.cache
import pandas as pd
from . import TestGramex
from gramex.http import FOUND
from pandas.util.testing import assert_frame_equal as afe
class TestFunctionHandler(TestGramex):
def test_args(self):
etag = {'headers': {'Etag': True}}
text = '{"args": [0, 1], "kwargs": {"a": "a", "b": "b"}}'
self.check('/func/args', text=text, **etag)
self.check('/func/args-split', text=text, **etag)
text = '{"args": ["abc", 1], "kwargs": {"a": "abc", "b": 1}}'
self.check('/func/args-variable', text=text, **etag)
self.check('/func/handler', text='{"args": ["Handler"], "kwargs": {}', **etag)
self.check('/func/handler-null', text='{"args": [], "kwargs": {}', **etag)
self.check('/func/composite',
text='{"args": [0, "Handler"], "kwargs": {"a": "a", "handler": "Handler"}}',
**etag)
text = '{"args": [0, "Handler"], "kwargs": {"a": {"b": 1}, "handler": "Handler"}}'
self.check('/func/compositenested', text=text, **etag)
self.check('/func/compositenested-split', text=text, **etag)
self.check('/func/compositenested-variable', text=text, **etag)
self.check('/func/dumpx?x=1&x=2', text='{"args": [["1", "2"]], "kwargs": {}}', **etag)
def test_async(self):
etag = {'headers': {'Etag': True}}
text = '{"args": [0, 1], "kwargs": {"a": "a", "b": "b"}}'
self.check('/func/async/args', text=text, **etag)
self.check('/func/async/args-split', text=text, **etag)
self.check('/func/async/http', text='{"args": [["1", "2"]], "kwargs": {}}', **etag)
self.check('/func/async/http2',
text='{"args": [["1"]], "kwargs": {}}{"args": [["2"]], "kwargs": {}}', **etag)
self.check('/func/async/calc',
text='[[250,250,250],[250,250,250],[250,250,250],[250,250,250]]', **etag)
def test_json(self):
self.check('/func/numpytypes')
def test_iterator(self):
no_etag = {'headers': {'Etag': False}}
self.check('/func/iterator?x=1&x=2&x=3', text='123', **no_etag)
self.check('/func/iterator/async?x=1&x=2&x=3', text='123', **no_etag)
def test_redirect(self):
r = self.get('/func/redirect', allow_redirects=False)
self.assertEqual(r.headers.get('Location'), '/dir/index/')
self.assertEqual(r.headers.get('Increment'), '1')
r = self.get('/func/redirect?next=/abc', allow_redirects=False)
self.assertEqual(r.headers.get('Location'), '/abc')
self.assertEqual(r.headers.get('Increment'), '2')
r = self.get('/func/redirect', headers={'NEXT': '/abc'}, allow_redirects=False)
self.assertEqual(r.headers.get('Location'), '/abc')
self.assertEqual(r.headers.get('Increment'), '3')
r = self.get('/func/redirect?next=/def', headers={'NEXT': '/abc'}, allow_redirects=False)
self.assertEqual(r.headers.get('Location'), '/def')
self.assertEqual(r.headers.get('Increment'), '4')
def test_path_args(self):
self.check('/func/path_args/高/兴', text='["\\u9ad8", "\\u5174"]')
def test_methods(self):
self.check('/func/methods', method='get', code=405)
self.check('/func/methods', method='delete', code=405)
for method in ['post', 'put']:
r = self.get('/func/methods', method=method,
headers={'NEXT': '/abc'}, allow_redirects=False)
self.assertEqual(r.status_code, FOUND)
self.assertEqual(r.headers.get('Location'), '/abc')
class TestWrapper(TestGramex):
def test_config_kwargs(self):
self.check('/func/power?y=3', text='9.0')
self.check('/func/power?y=3&x=3', text='27.0')
def test_yielder(self):
self.check('/func/yielder?i=a&i=b&i=c', text='abc')
def test_add_handler_get(self):
self.check('/func/total/40/2', text='42.0')
self.check('/func/total/40/2?items=10', text='52.0')
self.check('/func/total/40/2?items=10&items=10', text='62.0')
self.check('/func/name_age/johndoe/age/42', text='johndoe is 42 years old.')
self.check('/func/name_age', text='alpha is 10 years old.')
self.check('/func/name_age?name=johndoe&age=42', text='johndoe is 42 years old.')
# In case of multiple kwargs, the last parameter is picked
self.check('/func/name_age?name=x&name=y&age=1&age=2', text='y is 2 years old.')
# When type hints are violated:
self.check('/func/hints?name=johndoe&age=42.3', code=500)
# When multiple arguments are passed:
self.check('/func/total?items=1&items=2&items=3', text='6.0')
self.check('/func/multilist?items=1&items=2&items=3&start=1', text='7.0')
# Positional args with types
self.check('/func/strtotal?items=a&items=b&items=c', text='abc')
# Test native types. Note: "i=false" won't work -- use "i=" since it's a np.bool8
# Note: datetimes must be quoted, since they'll be read as JSON usually.
self.check(
'/func/nativetypes?a=3&b=1.5&c=false&d=d&e=null&f=3&g=1.5&h=h&i=',
text=''.join(['3', '1.5', 'false', 'd', '', '3', '1.5', 'h', 'false',
'"2020-01-01T00:00:00+00:00"', '{"a":3,"b":1.5}', '[3,1.5]']))
self.check('/func/greet', text='Hello, Stranger!')
self.check('/func/greet?name=gramex', text='Hello, gramex!')
self.check('/func/multilist?items=1&items=2&items=3&start=1', text='7.0')
sales = self.check('/func/sales').json()
afe(pd.DataFrame(sales), gramex.cache.open('sales.xlsx', rel=True))
self.check('/func/content/003.json',
text='{"x":3}',
headers={'Content-Type': 'application/json'})
self.check('/func/content/003.txt',
text='x=3',
headers={'Content-Type': 'text/plain'})
def test_add_handler_post(self):
self.check(
'/func/name_age', method='post', data={'name': 'johndoe', 'age': '42'},
text='johndoe is 42 years old.')
self.check(
'/func/name_age', method='post', data=json.dumps({'name': 'johndoe', 'age': '42'}),
request_headers={'Content-Type': 'application/json'},
text='johndoe is 42 years old.')
# When type hints are violated:
self.check('/func/hints', method='post', data={'name': 'johndoe', 'age': '42.3'},
code=500)
# Check typecasting
self.check(
'/func/nativetypes', method='post',
data=json.dumps({'a': 3, 'b': 1.5, 'c': False, 'd': 'd', 'e': None, 'f': 3,
'g': 1.5, 'h': 'h', 'i': False}),
request_headers={'Content-Type': 'application/json'},
text=''.join(['3', '1.5', 'false', 'd', '', '3', '1.5', 'h', 'false',
'"2020-01-01T00:00:00+00:00"', '{"a":3,"b":1.5}', '[3,1.5]']))
self.check('/func/greet', text='Hello, Stranger!')
# Check if POSTing url params and path args works
self.check('/func/name_age?name=johndoe&age=42', method='post',
text='johndoe is 42 years old.')
self.check('/func/name_age/johndoe/age/42', text='johndoe is 42 years old.')
def test_add_handler_delete(self):
self.check('/func/total/40/2?items=10&items=20', text='72.0', method='delete')
|
pixel_cnn_pp/nn.py | eyalbetzalel/pixelsnail-public | 133 | 12741068 | """
Various tensorflow utilities
"""
import numpy as np
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.python.ops import variables
import functools
def passthrough(obj, value): return value
try:
variables.Variable._build_initializer_expr=passthrough
except: # older versions of TF don't have this
pass
def int_shape(x):
return list(map(int, x.get_shape()))
def concat_elu(x):
""" like concatenated ReLU (http://arxiv.org/abs/1603.05201), but then with ELU """
axis = len(x.get_shape()) - 1
return tf.nn.elu(tf.concat([x, -x], axis))
def log_sum_exp(x):
""" numerically stable log_sum_exp implementation that prevents overflow """
axis = len(x.get_shape()) - 1
m = tf.reduce_max(x, axis)
m2 = tf.reduce_max(x, axis, keep_dims=True)
return m + tf.log(tf.reduce_sum(tf.exp(x - m2), axis))
def log_prob_from_logits(x):
""" numerically stable log_softmax implementation that prevents overflow """
axis = len(x.get_shape()) - 1
m = tf.reduce_max(x, axis, keep_dims=True)
return x - m - tf.log(tf.reduce_sum(tf.exp(x - m), axis, keep_dims=True))
def discretized_mix_logistic_loss(x, l, sum_all=True):
""" log-likelihood for mixture of discretized logistics, assumes the data has been rescaled to [-1,1] interval """
xs = int_shape(
x) # true image (i.e. labels) to regress to, e.g. (B,32,32,3)
ls = int_shape(l) # predicted distribution, e.g. (B,32,32,100)
# here and below: unpacking the params of the mixture of logistics
nr_mix = int(ls[-1] / 10)
logit_probs = l[:, :, :, :nr_mix]
l = tf.reshape(l[:, :, :, nr_mix:], xs + [nr_mix * 3])
means = l[:, :, :, :, :nr_mix]
log_scales = tf.maximum(l[:, :, :, :, nr_mix:2 * nr_mix], -7.)
coeffs = tf.nn.tanh(l[:, :, :, :, 2 * nr_mix:3 * nr_mix])
# here and below: getting the means and adjusting them based on preceding
# sub-pixels
x = tf.reshape(x, xs + [1]) + tf.zeros(xs + [nr_mix])
m2 = tf.reshape(means[:, :, :, 1, :] + coeffs[:, :, :, 0, :]
* x[:, :, :, 0, :], [xs[0], xs[1], xs[2], 1, nr_mix])
m3 = tf.reshape(means[:, :, :, 2, :] + coeffs[:, :, :, 1, :] * x[:, :, :, 0, :] +
coeffs[:, :, :, 2, :] * x[:, :, :, 1, :], [xs[0], xs[1], xs[2], 1, nr_mix])
means = tf.concat([tf.reshape(means[:, :, :, 0, :], [
xs[0], xs[1], xs[2], 1, nr_mix]), m2, m3], 3)
centered_x = x - means
inv_stdv = tf.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1. / 255.)
cdf_plus = tf.nn.sigmoid(plus_in)
min_in = inv_stdv * (centered_x - 1. / 255.)
cdf_min = tf.nn.sigmoid(min_in)
# log probability for edge case of 0 (before scaling)
log_cdf_plus = plus_in - tf.nn.softplus(plus_in)
# log probability for edge case of 255 (before scaling)
log_one_minus_cdf_min = -tf.nn.softplus(min_in)
cdf_delta = cdf_plus - cdf_min # probability for all other cases
mid_in = inv_stdv * centered_x
# log probability in the center of the bin, to be used in extreme cases
# (not actually used in our code)
log_pdf_mid = mid_in - log_scales - 2. * tf.nn.softplus(mid_in)
# now select the right output: left edge case, right edge case, normal
# case, extremely low prob case (doesn't actually happen for us)
# this is what we are really doing, but using the robust version below for extreme cases in other applications and to avoid NaN issue with tf.select()
# log_probs = tf.select(x < -0.999, log_cdf_plus, tf.select(x > 0.999, log_one_minus_cdf_min, tf.log(cdf_delta)))
# robust version, that still works if probabilities are below 1e-5 (which never happens in our code)
# tensorflow backpropagates through tf.select() by multiplying with zero instead of selecting: this requires use to use some ugly tricks to avoid potential NaNs
# the 1e-12 in tf.maximum(cdf_delta, 1e-12) is never actually used as output, it's purely there to get around the tf.select() gradient issue
# if the probability on a sub-pixel is below 1e-5, we use an approximation
# based on the assumption that the log-density is constant in the bin of
# the observed sub-pixel value
log_probs = tf.where(x < -0.999, log_cdf_plus, tf.where(x > 0.999, log_one_minus_cdf_min,
tf.where(cdf_delta > 1e-5, tf.log(tf.maximum(cdf_delta, 1e-12)), log_pdf_mid - np.log(127.5))))
log_probs = tf.reduce_sum(log_probs, 3) + log_prob_from_logits(logit_probs)
if sum_all:
return -tf.reduce_sum(log_sum_exp(log_probs))
else:
return -tf.reduce_sum(log_sum_exp(log_probs), [1, 2])
def discretized_mix_logistic_loss_per_chn(x, lr, lg, lb, sum_all=True):
""" log-likelihood for mixture of discretized logistics, assumes the data has been rescaled to [-1,1] interval """
xs = int_shape(x) # true image (i.e. labels) to regress to, e.g. (B,32,32,3)
ls = int_shape(lr) # predicted distribution, e.g. (B,32,32,100)
# here and below: unpacking the params of the mixture of logistics
nr_mix = int(ls[-1] / 3)
logit_probs = lr[:, :, :, :nr_mix]
means = tf.concat([lr[:, :, :, None, nr_mix:nr_mix*2], lg[:, :, :, None, nr_mix:nr_mix*2], lb[:, :, :, None, nr_mix:nr_mix*2],], axis=-2)
log_scales = tf.concat([lr[:, :, :, None, nr_mix*2:nr_mix*3], lg[:, :, :, None, nr_mix*2:nr_mix*3], lb[:, :, :, None, nr_mix*2:nr_mix*3],], axis=-2)
log_scales = tf.maximum(log_scales, -7.)
x = tf.reshape(x, xs + [1]) + tf.zeros(xs + [nr_mix])
centered_x = x - means
inv_stdv = tf.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1. / 255.)
cdf_plus = tf.nn.sigmoid(plus_in)
min_in = inv_stdv * (centered_x - 1. / 255.)
cdf_min = tf.nn.sigmoid(min_in)
# log probability for edge case of 0 (before scaling)
log_cdf_plus = plus_in - tf.nn.softplus(plus_in)
# log probability for edge case of 255 (before scaling)
log_one_minus_cdf_min = -tf.nn.softplus(min_in)
cdf_delta = cdf_plus - cdf_min # probability for all other cases
mid_in = inv_stdv * centered_x
# log probability in the center of the bin, to be used in extreme cases
# (not actually used in our code)
log_pdf_mid = mid_in - log_scales - 2. * tf.nn.softplus(mid_in)
# now select the right output: left edge case, right edge case, normal
# case, extremely low prob case (doesn't actually happen for us)
# this is what we are really doing, but using the robust version below for extreme cases in other applications and to avoid NaN issue with tf.select()
# log_probs = tf.select(x < -0.999, log_cdf_plus, tf.select(x > 0.999, log_one_minus_cdf_min, tf.log(cdf_delta)))
# robust version, that still works if probabilities are below 1e-5 (which never happens in our code)
# tensorflow backpropagates through tf.select() by multiplying with zero instead of selecting: this requires use to use some ugly tricks to avoid potential NaNs
# the 1e-12 in tf.maximum(cdf_delta, 1e-12) is never actually used as output, it's purely there to get around the tf.select() gradient issue
# if the probability on a sub-pixel is below 1e-5, we use an approximation
# based on the assumption that the log-density is constant in the bin of
# the observed sub-pixel value
log_probs = tf.where(x < -0.999, log_cdf_plus, tf.where(x > 0.999, log_one_minus_cdf_min,
tf.where(cdf_delta > 1e-5, tf.log(tf.maximum(cdf_delta, 1e-12)), log_pdf_mid - np.log(127.5))))
log_probs = tf.reduce_sum(log_probs, 3) + log_prob_from_logits(logit_probs)
if sum_all:
return -tf.reduce_sum(log_sum_exp(log_probs))
else:
return -tf.reduce_sum(log_sum_exp(log_probs), [1, 2])
def sample_from_discretized_mix_logistic(l, nr_mix):
ls = int_shape(l)
xs = ls[:-1] + [3]
# unpack parameters
logit_probs = l[:, :, :, :nr_mix]
l = tf.reshape(l[:, :, :, nr_mix:], xs + [nr_mix * 3])
# sample mixture indicator from softmax
sel = tf.one_hot(tf.argmax(logit_probs - tf.log(-tf.log(tf.random_uniform(
logit_probs.get_shape(), minval=1e-5, maxval=1. - 1e-5))), 3), depth=nr_mix, dtype=tf.float32)
sel = tf.reshape(sel, xs[:-1] + [1, nr_mix])
# select logistic parameters
means = tf.reduce_sum(l[:, :, :, :, :nr_mix] * sel, 4)
log_scales = tf.maximum(tf.reduce_sum(
l[:, :, :, :, nr_mix:2 * nr_mix] * sel, 4), -7.)
coeffs = tf.reduce_sum(tf.nn.tanh(
l[:, :, :, :, 2 * nr_mix:3 * nr_mix]) * sel, 4)
# sample from logistic & clip to interval
# we don't actually round to the nearest 8bit value when sampling
u = tf.random_uniform(means.get_shape(), minval=1e-5, maxval=1. - 1e-5)
x = means + tf.exp(log_scales) * (tf.log(u) - tf.log(1. - u))
x0 = tf.minimum(tf.maximum(x[:, :, :, 0], -1.), 1.)
x1 = tf.minimum(tf.maximum(
x[:, :, :, 1] + coeffs[:, :, :, 0] * x0, -1.), 1.)
x2 = tf.minimum(tf.maximum(
x[:, :, :, 2] + coeffs[:, :, :, 1] * x0 + coeffs[:, :, :, 2] * x1, -1.), 1.)
return tf.concat([tf.reshape(x0, xs[:-1] + [1]), tf.reshape(x1, xs[:-1] + [1]), tf.reshape(x2, xs[:-1] + [1])], 3)
def get_var_maybe_avg(var_name, ema, **kwargs):
''' utility for retrieving polyak averaged params '''
v = tf.get_variable(var_name, **kwargs)
if ema is not None:
v = ema.average(v)
return v
def get_vars_maybe_avg(var_names, ema, **kwargs):
''' utility for retrieving polyak averaged params '''
vars = []
for vn in var_names:
vars.append(get_var_maybe_avg(vn, ema, **kwargs))
return vars
def adam_updates(params, cost_or_grads, lr=0.001, mom1=0.9, mom2=0.999, eps=1e-8):
''' Adam optimizer '''
updates = []
if type(cost_or_grads) is not list:
grads = tf.gradients(cost_or_grads, params)
else:
grads = cost_or_grads
t = tf.Variable(1., 'adam_t')
for p, g in zip(params, grads):
mg = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adam_mg')
if mom1 > 0:
v = tf.Variable(tf.zeros(p.get_shape()), p.name + '_adam_v')
v_t = mom1 * v + (1. - mom1) * g
v_hat = v_t / (1. - tf.pow(mom1, t))
updates.append(v.assign(v_t))
else:
v_hat = g
mg_t = mom2 * mg + (1. - mom2) * tf.square(g)
mg_hat = mg_t / (1. - tf.pow(mom2, t))
g_t = v_hat / tf.sqrt(mg_hat + eps)
p_t = p - lr * g_t
updates.append(mg.assign(mg_t))
updates.append(p.assign(p_t))
updates.append(t.assign_add(1))
return tf.group(*updates)
def get_name(layer_name, counters):
''' utlity for keeping track of layer names '''
if not layer_name in counters:
counters[layer_name] = 0
name = layer_name + '_' + str(counters[layer_name])
counters[layer_name] += 1
return name
@add_arg_scope
def dense(x, num_units, nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
''' fully connected layer '''
name = get_name('dense', counters)
with tf.variable_scope(name):
if init:
# data based initialization of parameters
V = tf.get_variable('V', [int(x.get_shape()[
1]), num_units], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
V_norm = tf.nn.l2_normalize(V.initialized_value(), [0])
x_init = tf.matmul(x, V_norm)
m_init, v_init = tf.nn.moments(x_init, [0])
scale_init = init_scale / tf.sqrt(v_init + 1e-10)
g = tf.get_variable('g', dtype=tf.float32,
initializer=scale_init, trainable=True)
b = tf.get_variable('b', dtype=tf.float32,
initializer=-m_init * scale_init, trainable=True)
x_init = tf.reshape(
scale_init, [1, num_units]) * (x_init - tf.reshape(m_init, [1, num_units]))
if nonlinearity is not None:
x_init = nonlinearity(x_init)
return x_init
else:
V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
# tf.assert_variables_initialized([V, g, b])
# use weight normalization (Salimans & Kingma, 2016)
x = tf.matmul(x, V)
scaler = g / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))
x = tf.reshape(scaler, [1, num_units]) * \
x + tf.reshape(b, [1, num_units])
# apply nonlinearity
if nonlinearity is not None:
x = nonlinearity(x)
return x
@add_arg_scope
def conv2d(x, num_filters, filter_size=[3, 3], stride=[1, 1], pad='SAME', nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
''' convolutional layer '''
name = get_name('conv2d', counters)
with tf.variable_scope(name):
if init:
# data based initialization of parameters
V = tf.get_variable('V', filter_size + [int(x.get_shape()[-1]), num_filters],
tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
V_norm = tf.nn.l2_normalize(V.initialized_value(), [0, 1, 2])
x_init = tf.nn.conv2d(x, V_norm, [1] + stride + [1], pad)
m_init, v_init = tf.nn.moments(x_init, [0, 1, 2])
scale_init = init_scale / tf.sqrt(v_init + 1e-8)
g = tf.get_variable('g', dtype=tf.float32,
initializer=scale_init, trainable=True)
b = tf.get_variable('b', dtype=tf.float32,
initializer=-m_init * scale_init, trainable=True)
x_init = tf.reshape(scale_init, [
1, 1, 1, num_filters]) * (x_init - tf.reshape(m_init, [1, 1, 1, num_filters]))
if nonlinearity is not None:
x_init = nonlinearity(x_init)
return x_init
else:
V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
# tf.assert_variables_initialized([V, g, b])
# use weight normalization (Salimans & Kingma, 2016)
W = tf.reshape(g, [1, 1, 1, num_filters]) * \
tf.nn.l2_normalize(V, [0, 1, 2])
# calculate convolutional layer output
x = tf.nn.bias_add(tf.nn.conv2d(x, W, [1] + stride + [1], pad), b)
# apply nonlinearity
if nonlinearity is not None:
x = nonlinearity(x)
return x
@add_arg_scope
def deconv2d(x, num_filters, filter_size=[3, 3], stride=[1, 1], pad='SAME', nonlinearity=None, init_scale=1., counters={}, init=False, ema=None, **kwargs):
''' transposed convolutional layer '''
name = get_name('deconv2d', counters)
xs = int_shape(x)
if pad == 'SAME':
target_shape = [xs[0], xs[1] * stride[0],
xs[2] * stride[1], num_filters]
else:
target_shape = [xs[0], xs[1] * stride[0] + filter_size[0] -
1, xs[2] * stride[1] + filter_size[1] - 1, num_filters]
with tf.variable_scope(name):
if init:
# data based initialization of parameters
V = tf.get_variable('V', filter_size + [num_filters, int(x.get_shape(
)[-1])], tf.float32, tf.random_normal_initializer(0, 0.05), trainable=True)
V_norm = tf.nn.l2_normalize(V.initialized_value(), [0, 1, 3])
x_init = tf.nn.conv2d_transpose(x, V_norm, target_shape, [
1] + stride + [1], padding=pad)
m_init, v_init = tf.nn.moments(x_init, [0, 1, 2])
scale_init = init_scale / tf.sqrt(v_init + 1e-8)
g = tf.get_variable('g', dtype=tf.float32,
initializer=scale_init, trainable=True)
b = tf.get_variable('b', dtype=tf.float32,
initializer=-m_init * scale_init, trainable=True)
x_init = tf.reshape(scale_init, [
1, 1, 1, num_filters]) * (x_init - tf.reshape(m_init, [1, 1, 1, num_filters]))
if nonlinearity is not None:
x_init = nonlinearity(x_init)
return x_init
else:
V, g, b = get_vars_maybe_avg(['V', 'g', 'b'], ema)
# tf.assert_variables_initialized([V, g, b])
# use weight normalization (Salimans & Kingma, 2016)
W = tf.reshape(g, [1, 1, num_filters, 1]) * \
tf.nn.l2_normalize(V, [0, 1, 3])
# calculate convolutional layer output
x = tf.nn.conv2d_transpose(
x, W, target_shape, [1] + stride + [1], padding=pad)
x = tf.nn.bias_add(x, b)
# apply nonlinearity
if nonlinearity is not None:
x = nonlinearity(x)
return x
@add_arg_scope
def nin(x, num_units, **kwargs):
""" a network in network layer (1x1 CONV) """
s = int_shape(x)
x = tf.reshape(x, [np.prod(s[:-1]), s[-1]])
x = dense(x, num_units, **kwargs)
return tf.reshape(x, s[:-1] + [num_units])
''' meta-layer consisting of multiple base layers '''
@add_arg_scope
def gated_resnet(x, a=None, h=None, nonlinearity=concat_elu, conv=conv2d, init=False, counters={}, ema=None, dropout_p=0., **kwargs):
xs = int_shape(x)
num_filters = xs[-1]
c1 = conv(nonlinearity(x), num_filters)
if a is not None: # add short-cut connection if auxiliary input 'a' is given
c1 += nin(nonlinearity(a), num_filters)
c1 = nonlinearity(c1)
if dropout_p > 0:
c1 = tf.nn.dropout(c1, keep_prob=1. - dropout_p)
c2 = conv(c1, num_filters * 2, init_scale=0.1)
# add projection of h vector if included: conditional generation
if h is not None:
with tf.variable_scope(get_name('conditional_weights', counters)):
hw = get_var_maybe_avg('hw', ema, shape=[int_shape(h)[-1], 2 * num_filters], dtype=tf.float32,
initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
if init:
hw = hw.initialized_value()
c2 += tf.reshape(tf.matmul(h, hw), [xs[0], 1, 1, 2 * num_filters])
# Is this 3,2 or 2,3 ?
a, b = tf.split(c2, 2, 3)
c3 = a * tf.nn.sigmoid(b)
return x + c3
''' utilities for shifting the image around, efficient alternative to masking convolutions '''
def down_shift(x, step=1):
xs = int_shape(x)
return tf.concat([tf.zeros([xs[0], step, xs[2], xs[3]]), x[:, :xs[1] - step, :, :]], 1)
def right_shift(x, step=1):
xs = int_shape(x)
return tf.concat([tf.zeros([xs[0], xs[1], step, xs[3]]), x[:, :, :xs[2] - step, :]], 2)
def left_shift(x, step=1):
xs = int_shape(x)
return tf.concat([x[:, :, step:, :], tf.zeros([xs[0], xs[1], step, xs[3]]),], 2)
@add_arg_scope
def down_shifted_conv2d(x, num_filters, filter_size=[2, 3], stride=[1, 1], **kwargs):
x = tf.pad(x, [[0, 0], [filter_size[0] - 1, 0],
[int((filter_size[1] - 1) / 2), int((filter_size[1] - 1) / 2)], [0, 0]])
return conv2d(x, num_filters, filter_size=filter_size, pad='VALID', stride=stride, **kwargs)
@add_arg_scope
def down_shifted_deconv2d(x, num_filters, filter_size=[2, 3], stride=[1, 1], **kwargs):
x = deconv2d(x, num_filters, filter_size=filter_size,
pad='VALID', stride=stride, **kwargs)
xs = int_shape(x)
return x[:, :(xs[1] - filter_size[0] + 1), int((filter_size[1] - 1) / 2):(xs[2] - int((filter_size[1] - 1) / 2)), :]
@add_arg_scope
def down_right_shifted_conv2d(x, num_filters, filter_size=[2, 2], stride=[1, 1], **kwargs):
x = tf.pad(x, [[0, 0], [filter_size[0] - 1, 0],
[filter_size[1] - 1, 0], [0, 0]])
return conv2d(x, num_filters, filter_size=filter_size, pad='VALID', stride=stride, **kwargs)
@add_arg_scope
def down_right_shifted_deconv2d(x, num_filters, filter_size=[2, 2], stride=[1, 1], **kwargs):
x = deconv2d(x, num_filters, filter_size=filter_size,
pad='VALID', stride=stride, **kwargs)
xs = int_shape(x)
return x[:, :(xs[1] - filter_size[0] + 1):, :(xs[2] - filter_size[1] + 1), :]
def causal_shift_nin(x, num_filters, **kwargs):
chns = int_shape(x)[-1]
assert chns % 4 == 0
left, upleft, up, upright = tf.split(x, 4, axis=-1)
return nin(
tf.concat(
[right_shift(left), right_shift(down_shift(upleft)), down_shift(up), down_shift(left_shift(upleft))],
axis=-1
),
num_filters,
**kwargs
)
from tensorflow.python.framework import function
@add_arg_scope
def mem_saving_causal_shift_nin(x, num_filters, init, counters, **kwargs):
if init:
return causal_shift_nin(x, num_filters, init=init, counters=counters, **kwargs)
shps = int_shape(x)
@function.Defun(tf.float32)
def go(ix):
tf.get_variable_scope().reuse_variables()
ix.set_shape(shps)
return causal_shift_nin(ix, num_filters, init=init, counters=counters, **kwargs)
temp = go(x)
temp.set_shape([shps[0], shps[1], shps[2], num_filters])
return temp
import functools
@functools.lru_cache(maxsize=32)
def get_causal_mask(canvas_size, rate=1):
causal_mask = np.zeros([canvas_size, canvas_size], dtype=np.float32)
for i in range(canvas_size):
causal_mask[i, :i] = 1.
causal_mask = tf.constant(causal_mask, dtype=tf.float32)
if rate > 1:
dim = int(np.sqrt(canvas_size))
causal_mask = tf.reshape(causal_mask, [canvas_size, dim, dim, 1])
causal_mask = -tf.nn.max_pool(-causal_mask, [1, rate, rate, 1], [1, rate, rate, 1], 'SAME')
causal_mask = tf.reshape(causal_mask, [1, canvas_size, -1])
return causal_mask
def causal_attention(key, mixin, query, downsample=1, use_pos_enc=False):
bs, nr_chns = int_shape(key)[0], int_shape(key)[-1]
if downsample > 1:
pool_shape = [1, downsample, downsample, 1]
key = tf.nn.max_pool(key, pool_shape, pool_shape, 'SAME')
mixin = tf.nn.max_pool(mixin, pool_shape, pool_shape, 'SAME')
xs = int_shape(mixin)
if use_pos_enc:
pos1 = tf.range(0., xs[1]) / xs[1]
pos2 = tf.range(0., xs[2]) / xs[1]
mixin = tf.concat([
mixin,
tf.tile(pos1[None, :, None, None], [xs[0], 1, xs[2], 1]),
tf.tile(pos2[None, None, :, None], [xs[0], xs[2], 1, 1]),
], axis=3)
mixin_chns = int_shape(mixin)[-1]
canvas_size = int(np.prod(int_shape(key)[1:-1]))
canvas_size_q = int(np.prod(int_shape(query)[1:-1]))
causal_mask = get_causal_mask(canvas_size_q, downsample)
dot = tf.matmul(
tf.reshape(query, [bs, canvas_size_q, nr_chns]),
tf.reshape(key, [bs, canvas_size, nr_chns]),
transpose_b=True
) - (1. - causal_mask) * 1e10
dot = dot - tf.reduce_max(dot, axis=-1, keep_dims=True)
causal_exp_dot = tf.exp(dot / np.sqrt(nr_chns).astype(np.float32)) * causal_mask
causal_probs = causal_exp_dot / (tf.reduce_sum(causal_exp_dot, axis=-1, keep_dims=True) + 1e-6)
mixed = tf.matmul(
causal_probs,
tf.reshape(mixin, [bs, canvas_size, mixin_chns])
)
return tf.reshape(mixed, int_shape(query)[:-1] + [mixin_chns])
def non_cached_get_causal_mask(canvas_size, causal_unit):
assert causal_unit == 1
ones = tf.ones([canvas_size, canvas_size], dtype=tf.float32)
lt = tf.matrix_band_part(ones, -1, 0) - tf.matrix_diag(tf.ones([canvas_size,], dtype=tf.float32))
return lt[None, ...]
def mem_saving_causal_attention(_key, _mixin, _query, causal_unit=1):
# @function.Defun(tf.float32, tf.float32, tf.float32)
def go(key, mixin, query,):
key.set_shape(int_shape(_key))
mixin.set_shape(int_shape(_mixin))
query.set_shape(int_shape(_query))
bs, nr_chns = int_shape(key)[0], int_shape(key)[-1]
mixin_chns = int_shape(mixin)[-1]
canvas_size = int(np.prod(int_shape(key)[1:-1]))
causal_mask = non_cached_get_causal_mask(canvas_size, causal_unit=causal_unit)
dot = tf.matmul(
tf.reshape(query, [bs, canvas_size, nr_chns]),
tf.reshape(key, [bs, canvas_size, nr_chns]),
transpose_b=True
) - (1. - causal_mask) * 1e10
dot = dot - tf.reduce_max(dot, axis=-1, keep_dims=True)
causal_exp_dot = tf.exp(dot / np.sqrt(nr_chns).astype(np.float32)) * causal_mask
causal_probs = causal_exp_dot / (tf.reduce_sum(causal_exp_dot, axis=-1, keep_dims=True) + 1e-6)
mixed = tf.matmul(
causal_probs,
tf.reshape(mixin, [bs, canvas_size, mixin_chns])
)
return tf.reshape(mixed, int_shape(mixin))
temp = go(_key, _mixin, _query)
temp.set_shape(int_shape(_mixin))
return temp
|
tests/unit/server/bundle_manager/schedule_run_bundles_test.py | kl-chou/codalab-worksheets | 236 | 12741075 | <filename>tests/unit/server/bundle_manager/schedule_run_bundles_test.py
from codalab.worker.bundle_state import State
from freezegun import freeze_time
from tests.unit.server.bundle_manager import BaseBundleManagerTest
class BundleManagerScheduleRunBundlesTest(BaseBundleManagerTest):
def test_no_bundles(self):
"""With no bundles available, nothing should happen."""
self.bundle_manager._schedule_run_bundles()
def test_no_workers(self):
"""When no workers are available, no bundles should be scheduled."""
bundle = self.create_run_bundle()
self.save_bundle(bundle)
self.bundle_manager._schedule_run_bundles()
bundle = self.bundle_manager._model.get_bundle(bundle.uuid)
self.assertEqual(bundle.state, State.CREATED)
def test_stage_single_bundle(self):
"""When a worker with the right specs is available, a bundle should be staged."""
bundle = self.create_run_bundle(
state=State.STAGED,
metadata=dict(request_memory="0", request_time="", request_cpus=1, request_gpus=0),
)
self.save_bundle(bundle)
self.mock_worker_checkin(cpus=1, user_id=self.user_id)
self.bundle_manager._schedule_run_bundles()
bundle = self.bundle_manager._model.get_bundle(bundle.uuid)
self.assertEqual(bundle.state, State.STARTING)
@freeze_time("2020-02-01", as_kwarg='frozen_time')
def test_cleanup_dead_workers(self, frozen_time):
"""If workers don't check in for a long enough time period, they should be removed."""
self.mock_worker_checkin(cpus=1, user_id=self.user_id)
self.assertEqual(len(self.bundle_manager._worker_model.get_workers()), 1)
frozen_time.move_to("2020-02-12")
self.bundle_manager._schedule_run_bundles()
self.assertEqual(len(self.bundle_manager._worker_model.get_workers()), 0)
def test_restage_stuck_starting_bundles(self):
"""No workers are currently running a bundle, it should be restaged."""
bundle = self.create_run_bundle(State.STARTING)
self.save_bundle(bundle)
self.bundle_manager._schedule_run_bundles()
bundle = self.bundle_manager._model.get_bundle(bundle.uuid)
self.assertEqual(bundle.state, State.STAGED)
def test_bring_offline_stuck_running_bundles(self):
"""If no workers exist to claim a bundle, it should go to the WORKER_OFFLINE state."""
bundle = self.create_run_bundle(State.RUNNING)
self.save_bundle(bundle)
self.bundle_manager._schedule_run_bundles()
bundle = self.bundle_manager._model.get_bundle(bundle.uuid)
self.assertEqual(bundle.state, State.WORKER_OFFLINE)
def test_finalizing_bundle_goes_offline_if_no_worker_claims(self):
"""If no worker claims a FINALIZING bundle, it should go to the WORKER_OFFLINE_STATE."""
bundle = self.create_run_bundle(State.FINALIZING)
self.save_bundle(bundle)
self.bundle_manager._schedule_run_bundles()
bundle = self.bundle_manager._model.get_bundle(bundle.uuid)
self.assertEqual(bundle.state, State.WORKER_OFFLINE)
def test_finalizing_bundle_gets_finished(self):
"""If a worker checks in with a "finalizing" message, the bundle should transition
to the FINALIZING and then FINISHED state."""
bundle = self.create_run_bundle(State.STAGED)
self.save_bundle(bundle)
worker_id = self.mock_worker_checkin(cpus=1, user_id=self.user_id)
# Bundle is assigned to worker
self.bundle_manager._schedule_run_bundles()
bundle = self.bundle_manager._model.get_bundle(bundle.uuid)
self.assertEqual(bundle.state, State.STARTING)
# Worker sends back a "finalizing" message
bundle.state = State.FINALIZING
self.mock_bundle_checkin(bundle, worker_id)
# Bundle is finished
self.bundle_manager._schedule_run_bundles()
bundle = self.bundle_manager._model.get_bundle(bundle.uuid)
self.assertEqual(bundle.state, State.READY)
self.assertEqual(
self.bundle_manager._model.get_bundle_metadata([bundle.uuid], "time_preparing")[
bundle.uuid
],
'5',
)
self.assertEqual(
self.bundle_manager._model.get_bundle_metadata([bundle.uuid], "time_running")[
bundle.uuid
],
'5',
)
self.assertEqual(
self.bundle_manager._model.get_bundle_metadata([bundle.uuid], "time_uploading_results")[
bundle.uuid
],
'5',
)
self.assertEqual(
self.bundle_manager._model.get_bundle_metadata([bundle.uuid], "time_cleaning_up")[
bundle.uuid
],
'5',
)
|
colour/colorimetry/datasets/lefs.py | rift-labs-developer/colour | 1,380 | 12741085 | # -*- coding: utf-8 -*-
"""
Spectral Distributions of the Luminous Efficiency Functions
===========================================================
Defines the spectral distributions of the luminous efficiency functions.
The luminous efficiency data is in the form of a *dict* of
:class:`colour.SpectralDistribution` classes as follows::
{'name': SpectralDistribution, ..., 'name': SpectralDistribution}
The following luminous efficiency functions are available:
- CIE 1924 Photopic Standard Observer
- Judd Modified CIE 1951 Photopic Standard Observer
- Judd-Vos Modified CIE 1978 Photopic Standard Observer
- CIE 1964 Photopic 10 Degree Standard Observer
- CIE 2008 2 Degree Physiologically Relevant LEF
- CIE 2008 10 Degree Physiologically Relevant LEF
- CIE 1951 Scotopic Standard Observer
Notes
-----
- The luminous efficiency functions are provided at 1 nm interval.
- The mesopic luminous efficiency function is calculated using the
*CIE 1924 Photopic Standard Observer* and
*CIE 1951 Scotopic Standard Observer* luminous efficiency functions with
the :func:`colour.sd_mesopic_luminous_efficiency_function` definition and
the data from :attr:`colour.colorimetry.datasets.lefs.DATA_MESOPIC_X`
attribute that defines weighting factors dependent on the photopic
luminance :math:`L_p`.
References
----------
- :cite:`CVRLq` : CVRL. (n.d.). Luminous efficiency. Retrieved April 19,
2014, from http://www.cvrl.org/lumindex.htm
- :cite:`CVRLs` : CVRL. (n.d.). Older CIE Standards. Retrieved February 24,
2014, from http://cvrl.ioo.ucl.ac.uk/cie.htm
- :cite:`Wikipedia2005d` : Wikipedia. (2005). Mesopic weighting function.
Retrieved June 20, 2014, from
http://en.wikipedia.org/wiki/Mesopic_vision#Mesopic_weighting_function
"""
from functools import partial
from colour.colorimetry import SpectralDistribution
from colour.utilities import CaseInsensitiveMapping, LazyCaseInsensitiveMapping
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'DATA_LEFS_PHOTOPIC', 'SDS_LEFS_PHOTOPIC', 'DATA_LEFS_SCOTOPIC',
'SDS_LEFS_SCOTOPIC', 'SDS_LEFS', 'DATA_MESOPIC_X'
]
DATA_LEFS_PHOTOPIC = {
'CIE 1924 Photopic Standard Observer': {
360: 0.0000039170000,
361: 0.0000043935810,
362: 0.0000049296040,
363: 0.0000055321360,
364: 0.0000062082450,
365: 0.0000069650000,
366: 0.0000078132190,
367: 0.0000087673360,
368: 0.0000098398440,
369: 0.0000110432300,
370: 0.0000123900000,
371: 0.0000138864100,
372: 0.0000155572800,
373: 0.0000174429600,
374: 0.0000195837500,
375: 0.0000220200000,
376: 0.0000248396500,
377: 0.0000280412600,
378: 0.0000315310400,
379: 0.0000352152100,
380: 0.0000390000000,
381: 0.0000428264000,
382: 0.0000469146000,
383: 0.0000515896000,
384: 0.0000571764000,
385: 0.0000640000000,
386: 0.0000723442100,
387: 0.0000822122400,
388: 0.0000935081600,
389: 0.0001061361000,
390: 0.0001200000000,
391: 0.0001349840000,
392: 0.0001514920000,
393: 0.0001702080000,
394: 0.0001918160000,
395: 0.0002170000000,
396: 0.0002469067000,
397: 0.0002812400000,
398: 0.0003185200000,
399: 0.0003572667000,
400: 0.0003960000000,
401: 0.0004337147000,
402: 0.0004730240000,
403: 0.0005178760000,
404: 0.0005722187000,
405: 0.0006400000000,
406: 0.0007245600000,
407: 0.0008255000000,
408: 0.0009411600000,
409: 0.0010698800000,
410: 0.0012100000000,
411: 0.0013620910000,
412: 0.0015307520000,
413: 0.0017203680000,
414: 0.0019353230000,
415: 0.0021800000000,
416: 0.0024548000000,
417: 0.0027640000000,
418: 0.0031178000000,
419: 0.0035264000000,
420: 0.0040000000000,
421: 0.0045462400000,
422: 0.0051593200000,
423: 0.0058292800000,
424: 0.0065461600000,
425: 0.0073000000000,
426: 0.0080865070000,
427: 0.0089087200000,
428: 0.0097676800000,
429: 0.0106644300000,
430: 0.0116000000000,
431: 0.0125731700000,
432: 0.0135827200000,
433: 0.0146296800000,
434: 0.0157150900000,
435: 0.0168400000000,
436: 0.0180073600000,
437: 0.0192144800000,
438: 0.0204539200000,
439: 0.0217182400000,
440: 0.0230000000000,
441: 0.0242946100000,
442: 0.0256102400000,
443: 0.0269585700000,
444: 0.0283512500000,
445: 0.0298000000000,
446: 0.0313108300000,
447: 0.0328836800000,
448: 0.0345211200000,
449: 0.0362257100000,
450: 0.0380000000000,
451: 0.0398466700000,
452: 0.0417680000000,
453: 0.0437660000000,
454: 0.0458426700000,
455: 0.0480000000000,
456: 0.0502436800000,
457: 0.0525730400000,
458: 0.0549805600000,
459: 0.0574587200000,
460: 0.0600000000000,
461: 0.0626019700000,
462: 0.0652775200000,
463: 0.0680420800000,
464: 0.0709110900000,
465: 0.0739000000000,
466: 0.0770160000000,
467: 0.0802664000000,
468: 0.0836668000000,
469: 0.0872328000000,
470: 0.0909800000000,
471: 0.0949175500000,
472: 0.0990458400000,
473: 0.1033674000000,
474: 0.1078846000000,
475: 0.1126000000000,
476: 0.1175320000000,
477: 0.1226744000000,
478: 0.1279928000000,
479: 0.1334528000000,
480: 0.1390200000000,
481: 0.1446764000000,
482: 0.1504693000000,
483: 0.1564619000000,
484: 0.1627177000000,
485: 0.1693000000000,
486: 0.1762431000000,
487: 0.1835581000000,
488: 0.1912735000000,
489: 0.1994180000000,
490: 0.2080200000000,
491: 0.2171199000000,
492: 0.2267345000000,
493: 0.2368571000000,
494: 0.2474812000000,
495: 0.2586000000000,
496: 0.2701849000000,
497: 0.2822939000000,
498: 0.2950505000000,
499: 0.3085780000000,
500: 0.3230000000000,
501: 0.3384021000000,
502: 0.3546858000000,
503: 0.3716986000000,
504: 0.3892875000000,
505: 0.4073000000000,
506: 0.4256299000000,
507: 0.4443096000000,
508: 0.4633944000000,
509: 0.4829395000000,
510: 0.5030000000000,
511: 0.5235693000000,
512: 0.5445120000000,
513: 0.5656900000000,
514: 0.5869653000000,
515: 0.6082000000000,
516: 0.6293456000000,
517: 0.6503068000000,
518: 0.6708752000000,
519: 0.6908424000000,
520: 0.7100000000000,
521: 0.7281852000000,
522: 0.7454636000000,
523: 0.7619694000000,
524: 0.7778368000000,
525: 0.7932000000000,
526: 0.8081104000000,
527: 0.8224962000000,
528: 0.8363068000000,
529: 0.8494916000000,
530: 0.8620000000000,
531: 0.8738108000000,
532: 0.8849624000000,
533: 0.8954936000000,
534: 0.9054432000000,
535: 0.9148501000000,
536: 0.9237348000000,
537: 0.9320924000000,
538: 0.9399226000000,
539: 0.9472252000000,
540: 0.9540000000000,
541: 0.9602561000000,
542: 0.9660074000000,
543: 0.9712606000000,
544: 0.9760225000000,
545: 0.9803000000000,
546: 0.9840924000000,
547: 0.9874182000000,
548: 0.9903128000000,
549: 0.9928116000000,
550: 0.9949501000000,
551: 0.9967108000000,
552: 0.9980983000000,
553: 0.9991120000000,
554: 0.9997482000000,
555: 1.0000000000000,
556: 0.9998567000000,
557: 0.9993046000000,
558: 0.9983255000000,
559: 0.9968987000000,
560: 0.9950000000000,
561: 0.9926005000000,
562: 0.9897426000000,
563: 0.9864444000000,
564: 0.9827241000000,
565: 0.9786000000000,
566: 0.9740837000000,
567: 0.9691712000000,
568: 0.9638568000000,
569: 0.9581349000000,
570: 0.9520000000000,
571: 0.9454504000000,
572: 0.9384992000000,
573: 0.9311628000000,
574: 0.9234576000000,
575: 0.9154000000000,
576: 0.9070064000000,
577: 0.8982772000000,
578: 0.8892048000000,
579: 0.8797816000000,
580: 0.8700000000000,
581: 0.8598613000000,
582: 0.8493920000000,
583: 0.8386220000000,
584: 0.8275813000000,
585: 0.8163000000000,
586: 0.8047947000000,
587: 0.7930820000000,
588: 0.7811920000000,
589: 0.7691547000000,
590: 0.7570000000000,
591: 0.7447541000000,
592: 0.7324224000000,
593: 0.7200036000000,
594: 0.7074965000000,
595: 0.6949000000000,
596: 0.6822192000000,
597: 0.6694716000000,
598: 0.6566744000000,
599: 0.6438448000000,
600: 0.6310000000000,
601: 0.6181555000000,
602: 0.6053144000000,
603: 0.5924756000000,
604: 0.5796379000000,
605: 0.5668000000000,
606: 0.5539611000000,
607: 0.5411372000000,
608: 0.5283528000000,
609: 0.5156323000000,
610: 0.5030000000000,
611: 0.4904688000000,
612: 0.4780304000000,
613: 0.4656776000000,
614: 0.4534032000000,
615: 0.4412000000000,
616: 0.4290800000000,
617: 0.4170360000000,
618: 0.4050320000000,
619: 0.3930320000000,
620: 0.3810000000000,
621: 0.3689184000000,
622: 0.3568272000000,
623: 0.3447768000000,
624: 0.3328176000000,
625: 0.3210000000000,
626: 0.3093381000000,
627: 0.2978504000000,
628: 0.2865936000000,
629: 0.2756245000000,
630: 0.2650000000000,
631: 0.2547632000000,
632: 0.2448896000000,
633: 0.2353344000000,
634: 0.2260528000000,
635: 0.2170000000000,
636: 0.2081616000000,
637: 0.1995488000000,
638: 0.1911552000000,
639: 0.1829744000000,
640: 0.1750000000000,
641: 0.1672235000000,
642: 0.1596464000000,
643: 0.1522776000000,
644: 0.1451259000000,
645: 0.1382000000000,
646: 0.1315003000000,
647: 0.1250248000000,
648: 0.1187792000000,
649: 0.1127691000000,
650: 0.1070000000000,
651: 0.1014762000000,
652: 0.0961886400000,
653: 0.0911229600000,
654: 0.0862648500000,
655: 0.0816000000000,
656: 0.0771206400000,
657: 0.0728255200000,
658: 0.0687100800000,
659: 0.0647697600000,
660: 0.0610000000000,
661: 0.0573962100000,
662: 0.0539550400000,
663: 0.0506737600000,
664: 0.0475496500000,
665: 0.0445800000000,
666: 0.0417587200000,
667: 0.0390849600000,
668: 0.0365638400000,
669: 0.0342004800000,
670: 0.0320000000000,
671: 0.0299626100000,
672: 0.0280766400000,
673: 0.0263293600000,
674: 0.0247080500000,
675: 0.0232000000000,
676: 0.0218007700000,
677: 0.0205011200000,
678: 0.0192810800000,
679: 0.0181206900000,
680: 0.0170000000000,
681: 0.0159037900000,
682: 0.0148371800000,
683: 0.0138106800000,
684: 0.0128347800000,
685: 0.0119200000000,
686: 0.0110683100000,
687: 0.0102733900000,
688: 0.0095333110000,
689: 0.0088461570000,
690: 0.0082100000000,
691: 0.0076237810000,
692: 0.0070854240000,
693: 0.0065914760000,
694: 0.0061384850000,
695: 0.0057230000000,
696: 0.0053430590000,
697: 0.0049957960000,
698: 0.0046764040000,
699: 0.0043800750000,
700: 0.0041020000000,
701: 0.0038384530000,
702: 0.0035890990000,
703: 0.0033542190000,
704: 0.0031340930000,
705: 0.0029290000000,
706: 0.0027381390000,
707: 0.0025598760000,
708: 0.0023932440000,
709: 0.0022372750000,
710: 0.0020910000000,
711: 0.0019535870000,
712: 0.0018245800000,
713: 0.0017035800000,
714: 0.0015901870000,
715: 0.0014840000000,
716: 0.0013844960000,
717: 0.0012912680000,
718: 0.0012040920000,
719: 0.0011227440000,
720: 0.0010470000000,
721: 0.0009765896000,
722: 0.0009111088000,
723: 0.0008501332000,
724: 0.0007932384000,
725: 0.0007400000000,
726: 0.0006900827000,
727: 0.0006433100000,
728: 0.0005994960000,
729: 0.0005584547000,
730: 0.0005200000000,
731: 0.0004839136000,
732: 0.0004500528000,
733: 0.0004183452000,
734: 0.0003887184000,
735: 0.0003611000000,
736: 0.0003353835000,
737: 0.0003114404000,
738: 0.0002891656000,
739: 0.0002684539000,
740: 0.0002492000000,
741: 0.0002313019000,
742: 0.0002146856000,
743: 0.0001992884000,
744: 0.0001850475000,
745: 0.0001719000000,
746: 0.0001597781000,
747: 0.0001486044000,
748: 0.0001383016000,
749: 0.0001287925000,
750: 0.0001200000000,
751: 0.0001118595000,
752: 0.0001043224000,
753: 0.0000973356000,
754: 0.0000908458700,
755: 0.0000848000000,
756: 0.0000791466700,
757: 0.0000738580000,
758: 0.0000689160000,
759: 0.0000643026700,
760: 0.0000600000000,
761: 0.0000559818700,
762: 0.0000522256000,
763: 0.0000487184000,
764: 0.0000454474700,
765: 0.0000424000000,
766: 0.0000395610400,
767: 0.0000369151200,
768: 0.0000344486800,
769: 0.0000321481600,
770: 0.0000300000000,
771: 0.0000279912500,
772: 0.0000261135600,
773: 0.0000243602400,
774: 0.0000227246100,
775: 0.0000212000000,
776: 0.0000197785500,
777: 0.0000184528500,
778: 0.0000172168700,
779: 0.0000160645900,
780: 0.0000149900000,
781: 0.0000139872800,
782: 0.0000130515500,
783: 0.0000121781800,
784: 0.0000113625400,
785: 0.0000106000000,
786: 0.0000098858770,
787: 0.0000092173040,
788: 0.0000085923620,
789: 0.0000080091330,
790: 0.0000074657000,
791: 0.0000069595670,
792: 0.0000064879950,
793: 0.0000060486990,
794: 0.0000056393960,
795: 0.0000052578000,
796: 0.0000049017710,
797: 0.0000045697200,
798: 0.0000042601940,
799: 0.0000039717390,
800: 0.0000037029000,
801: 0.0000034521630,
802: 0.0000032183020,
803: 0.0000030003000,
804: 0.0000027971390,
805: 0.0000026078000,
806: 0.0000024312200,
807: 0.0000022665310,
808: 0.0000021130130,
809: 0.0000019699430,
810: 0.0000018366000,
811: 0.0000017122300,
812: 0.0000015962280,
813: 0.0000014880900,
814: 0.0000013873140,
815: 0.0000012934000,
816: 0.0000012058200,
817: 0.0000011241430,
818: 0.0000010480090,
819: 0.0000009770578,
820: 0.0000009109300,
821: 0.0000008492513,
822: 0.0000007917212,
823: 0.0000007380904,
824: 0.0000006881098,
825: 0.0000006415300,
826: 0.0000005980895,
827: 0.0000005575746,
828: 0.0000005198080,
829: 0.0000004846123,
830: 0.0000004518100
},
'Judd Modified CIE 1951 Photopic Standard Observer': {
370: 0.0001,
380: 0.0004,
390: 0.0015,
400: 0.0045,
410: 0.0093,
420: 0.0175,
430: 0.0273,
440: 0.0379,
450: 0.0468,
460: 0.0600,
470: 0.0910,
480: 0.1390,
490: 0.2080,
500: 0.3230,
510: 0.5030,
520: 0.7100,
530: 0.8620,
540: 0.9540,
550: 0.9950,
560: 0.9950,
570: 0.9520,
580: 0.8700,
590: 0.7570,
600: 0.6310,
610: 0.5030,
620: 0.3810,
630: 0.2650,
640: 0.1750,
650: 0.1070,
660: 0.0610,
670: 0.0320,
680: 0.0170,
690: 0.0082,
700: 0.0041,
710: 0.0021,
720: 0.0011,
730: 0.0005,
740: 0.0002,
750: 0.0001,
760: 0.0001,
770: 0.0000
},
'Judd-Vos Modified CIE 1978 Photopic Standard Observer': {
380: 0.0002000000,
381: 0.0002282100,
382: 0.0002610900,
383: 0.0002993600,
384: 0.0003438700,
385: 0.0003955600,
386: 0.0004554400,
387: 0.0005246200,
388: 0.0006042800,
389: 0.0006956500,
390: 0.0008000000,
391: 0.0009163500,
392: 0.0010477000,
393: 0.0011955000,
394: 0.0013611000,
395: 0.0015457000,
396: 0.0017508000,
397: 0.0018776000,
398: 0.0022273000,
399: 0.0025011000,
400: 0.0028000000,
401: 0.0031159000,
402: 0.0034576000,
403: 0.0038268000,
404: 0.0042256000,
405: 0.0046562000,
406: 0.0051216000,
407: 0.0056248000,
408: 0.0061695000,
409: 0.0067597000,
410: 0.0074000000,
411: 0.0081451000,
412: 0.0089555000,
413: 0.0098322000,
414: 0.0107740000,
415: 0.0117790000,
416: 0.0128420000,
417: 0.0139560000,
418: 0.0151110000,
419: 0.0162970000,
420: 0.0175000000,
421: 0.0185820000,
422: 0.0196450000,
423: 0.0206830000,
424: 0.0216940000,
425: 0.0226780000,
426: 0.0236360000,
427: 0.0245720000,
428: 0.0254900000,
429: 0.0263970000,
430: 0.0273000000,
431: 0.0283350000,
432: 0.0293830000,
433: 0.0304420000,
434: 0.0315100000,
435: 0.0325840000,
436: 0.0336610000,
437: 0.0347350000,
438: 0.0358030000,
439: 0.0368600000,
440: 0.0379000000,
441: 0.0388380000,
442: 0.0397520000,
443: 0.0406460000,
444: 0.0415240000,
445: 0.0423910000,
446: 0.0432520000,
447: 0.0441160000,
448: 0.0449900000,
449: 0.0458810000,
450: 0.0468000000,
451: 0.0477430000,
452: 0.0487330000,
453: 0.0497850000,
454: 0.0509100000,
455: 0.0521220000,
456: 0.0534350000,
457: 0.0548640000,
458: 0.0564240000,
459: 0.0581310000,
460: 0.0600000000,
461: 0.0626019700,
462: 0.0652775200,
463: 0.0680420800,
464: 0.0709110900,
465: 0.0739000000,
466: 0.0770160000,
467: 0.0802664000,
468: 0.0836668000,
469: 0.0872328000,
470: 0.0909800000,
471: 0.0949175500,
472: 0.0990458400,
473: 0.1033674000,
474: 0.1078846000,
475: 0.1126000000,
476: 0.1175320000,
477: 0.1226744000,
478: 0.1279928000,
479: 0.1334528000,
480: 0.1390200000,
481: 0.1446764000,
482: 0.1504693000,
483: 0.1564619000,
484: 0.1627177000,
485: 0.1693000000,
486: 0.1762431000,
487: 0.1835581000,
488: 0.1912735000,
489: 0.1994180000,
490: 0.2080200000,
491: 0.2171199000,
492: 0.2267345000,
493: 0.2368571000,
494: 0.2474812000,
495: 0.2586000000,
496: 0.2701849000,
497: 0.2822939000,
498: 0.2950505000,
499: 0.3085780000,
500: 0.3230000000,
501: 0.3384021000,
502: 0.3546858000,
503: 0.3716986000,
504: 0.3892875000,
505: 0.4073000000,
506: 0.4256299000,
507: 0.4443096000,
508: 0.4633944000,
509: 0.4829395000,
510: 0.5030000000,
511: 0.5235693000,
512: 0.5445120000,
513: 0.5656900000,
514: 0.5869653000,
515: 0.6082000000,
516: 0.6293456000,
517: 0.6503068000,
518: 0.6708752000,
519: 0.6908424000,
520: 0.7100000000,
521: 0.7281852000,
522: 0.7454636000,
523: 0.7619694000,
524: 0.7778368000,
525: 0.7932000000,
526: 0.8081104000,
527: 0.8224962000,
528: 0.8363068000,
529: 0.8494916000,
530: 0.8620000000,
531: 0.8738108000,
532: 0.8849624000,
533: 0.8954936000,
534: 0.9054432000,
535: 0.9148501000,
536: 0.9237348000,
537: 0.9320924000,
538: 0.9399226000,
539: 0.9472252000,
540: 0.9540000000,
541: 0.9602561000,
542: 0.9660074000,
543: 0.9712606000,
544: 0.9760225000,
545: 0.9803000000,
546: 0.9840924000,
547: 0.9874182000,
548: 0.9903128000,
549: 0.9928116000,
550: 0.9949501000,
551: 0.9967108000,
552: 0.9980983000,
553: 0.9991120000,
554: 0.9997482000,
555: 1.0000000000,
556: 0.9998567000,
557: 0.9993046000,
558: 0.9983255000,
559: 0.9968987000,
560: 0.9950000000,
561: 0.9926005000,
562: 0.9897426000,
563: 0.9864444000,
564: 0.9827241000,
565: 0.9786000000,
566: 0.9740837000,
567: 0.9691712000,
568: 0.9638568000,
569: 0.9581349000,
570: 0.9520000000,
571: 0.9454504000,
572: 0.9384992000,
573: 0.9311628000,
574: 0.9234576000,
575: 0.9154000000,
576: 0.9070064000,
577: 0.8982772000,
578: 0.8892048000,
579: 0.8797816000,
580: 0.8700000000,
581: 0.8598613000,
582: 0.8493920000,
583: 0.8386220000,
584: 0.8275813000,
585: 0.8163000000,
586: 0.8047947000,
587: 0.7930820000,
588: 0.7811920000,
589: 0.7691547000,
590: 0.7570000000,
591: 0.7447541000,
592: 0.7324224000,
593: 0.7200036000,
594: 0.7074965000,
595: 0.6949000000,
596: 0.6822192000,
597: 0.6694716000,
598: 0.6566744000,
599: 0.6438448000,
600: 0.6310000000,
601: 0.6181555000,
602: 0.6053144000,
603: 0.5924756000,
604: 0.5796379000,
605: 0.5668000000,
606: 0.5539611000,
607: 0.5411372000,
608: 0.5283528000,
609: 0.5156323000,
610: 0.5030000000,
611: 0.4904688000,
612: 0.4780304000,
613: 0.4656776000,
614: 0.4534032000,
615: 0.4412000000,
616: 0.4290800000,
617: 0.4170360000,
618: 0.4050320000,
619: 0.3930320000,
620: 0.3810000000,
621: 0.3689184000,
622: 0.3568272000,
623: 0.3447768000,
624: 0.3328176000,
625: 0.3210000000,
626: 0.3093381000,
627: 0.2978504000,
628: 0.2865936000,
629: 0.2756245000,
630: 0.2650000000,
631: 0.2547632000,
632: 0.2448896000,
633: 0.2353344000,
634: 0.2260528000,
635: 0.2170000000,
636: 0.2081616000,
637: 0.1995488000,
638: 0.1911552000,
639: 0.1829744000,
640: 0.1750000000,
641: 0.1672235000,
642: 0.1596464000,
643: 0.1522776000,
644: 0.1451259000,
645: 0.1382000000,
646: 0.1315003000,
647: 0.1250248000,
648: 0.1187792000,
649: 0.1127691000,
650: 0.1070000000,
651: 0.1014762000,
652: 0.0961886400,
653: 0.0911229600,
654: 0.0862648500,
655: 0.0816000000,
656: 0.0771206400,
657: 0.0728255200,
658: 0.0687100800,
659: 0.0647697600,
660: 0.0610000000,
661: 0.0573962100,
662: 0.0539550400,
663: 0.0506737600,
664: 0.0475496500,
665: 0.0445800000,
666: 0.0417587200,
667: 0.0390849600,
668: 0.0365638400,
669: 0.0342004800,
670: 0.0320000000,
671: 0.0299626100,
672: 0.0280766400,
673: 0.0263293600,
674: 0.0247080500,
675: 0.0232000000,
676: 0.0218007700,
677: 0.0205011200,
678: 0.0192810800,
679: 0.0181206900,
680: 0.0170000000,
681: 0.0159037900,
682: 0.0148371800,
683: 0.0138106800,
684: 0.0128347800,
685: 0.0119200000,
686: 0.0110683100,
687: 0.0102733900,
688: 0.0095333110,
689: 0.0088461570,
690: 0.0082100000,
691: 0.0076237810,
692: 0.0070854240,
693: 0.0065914760,
694: 0.0061384850,
695: 0.0057230000,
696: 0.0053430590,
697: 0.0049957960,
698: 0.0046764040,
699: 0.0043800750,
700: 0.0041020000,
701: 0.0038384530,
702: 0.0035890990,
703: 0.0033542190,
704: 0.0031340930,
705: 0.0029290000,
706: 0.0027381390,
707: 0.0025598760,
708: 0.0023932440,
709: 0.0022372750,
710: 0.0020910000,
711: 0.0019535870,
712: 0.0018245800,
713: 0.0017035800,
714: 0.0015901870,
715: 0.0014840000,
716: 0.0013844960,
717: 0.0012912680,
718: 0.0012040920,
719: 0.0011227440,
720: 0.0010470000,
721: 0.0009765896,
722: 0.0009111088,
723: 0.0008501332,
724: 0.0007932384,
725: 0.0007400000,
726: 0.0006900827,
727: 0.0006433100,
728: 0.0005994960,
729: 0.0005584547,
730: 0.0005200000,
731: 0.0004839136,
732: 0.0004500528,
733: 0.0004183452,
734: 0.0003887184,
735: 0.0003611000,
736: 0.0003353835,
737: 0.0003114404,
738: 0.0002891656,
739: 0.0002684539,
740: 0.0002492000,
741: 0.0002313019,
742: 0.0002146856,
743: 0.0001992884,
744: 0.0001850475,
745: 0.0001719000,
746: 0.0001597781,
747: 0.0001486044,
748: 0.0001383016,
749: 0.0001287925,
750: 0.0001200000,
751: 0.0001118595,
752: 0.0001043224,
753: 0.0000973356,
754: 0.0000908459,
755: 0.0000848000,
756: 0.0000791467,
757: 0.0000738580,
758: 0.0000689160,
759: 0.0000643027,
760: 0.0000600000,
761: 0.0000559819,
762: 0.0000522256,
763: 0.0000487184,
764: 0.0000454475,
765: 0.0000424000,
766: 0.0000395610,
767: 0.0000369151,
768: 0.0000344487,
769: 0.0000321482,
770: 0.0000300000,
771: 0.0000279913,
772: 0.0000261136,
773: 0.0000243602,
774: 0.0000227246,
775: 0.0000212000,
776: 0.0000197786,
777: 0.0000184529,
778: 0.0000172169,
779: 0.0000160646,
780: 0.0000149900
},
'CIE 1964 Photopic 10 Degree Standard Observer': {
360: 0.000000013398,
361: 0.000000020294,
362: 0.000000030560,
363: 0.000000045740,
364: 0.000000068050,
365: 0.000000100650,
366: 0.000000147980,
367: 0.000000216270,
368: 0.000000314200,
369: 0.000000453700,
370: 0.000000651100,
371: 0.000000928800,
372: 0.000001317500,
373: 0.000001857200,
374: 0.000002602000,
375: 0.000003625000,
376: 0.000005019000,
377: 0.000006907000,
378: 0.000009449000,
379: 0.000012848000,
380: 0.000017364000,
381: 0.000023327000,
382: 0.000031150000,
383: 0.000041350000,
384: 0.000054560000,
385: 0.000071560000,
386: 0.000093300000,
387: 0.000120870000,
388: 0.000155640000,
389: 0.000199200000,
390: 0.000253400000,
391: 0.000320200000,
392: 0.000402400000,
393: 0.000502300000,
394: 0.000623200000,
395: 0.000768500000,
396: 0.000941700000,
397: 0.001147800000,
398: 0.001390300000,
399: 0.001674000000,
400: 0.002004400000,
401: 0.002386000000,
402: 0.002822000000,
403: 0.003319000000,
404: 0.003880000000,
405: 0.004509000000,
406: 0.005209000000,
407: 0.005985000000,
408: 0.006833000000,
409: 0.007757000000,
410: 0.008756000000,
411: 0.009816000000,
412: 0.010918000000,
413: 0.012058000000,
414: 0.013237000000,
415: 0.014456000000,
416: 0.015717000000,
417: 0.017025000000,
418: 0.018399000000,
419: 0.019848000000,
420: 0.021391000000,
421: 0.022992000000,
422: 0.024598000000,
423: 0.026213000000,
424: 0.027841000000,
425: 0.029497000000,
426: 0.031195000000,
427: 0.032927000000,
428: 0.034738000000,
429: 0.036654000000,
430: 0.038676000000,
431: 0.040792000000,
432: 0.042946000000,
433: 0.045114000000,
434: 0.047333000000,
435: 0.049602000000,
436: 0.051934000000,
437: 0.054337000000,
438: 0.056822000000,
439: 0.059399000000,
440: 0.062077000000,
441: 0.064737000000,
442: 0.067285000000,
443: 0.069764000000,
444: 0.072218000000,
445: 0.074704000000,
446: 0.077272000000,
447: 0.079979000000,
448: 0.082874000000,
449: 0.086000000000,
450: 0.089456000000,
451: 0.092947000000,
452: 0.096275000000,
453: 0.099535000000,
454: 0.102829000000,
455: 0.106256000000,
456: 0.109901000000,
457: 0.113835000000,
458: 0.118167000000,
459: 0.122932000000,
460: 0.128201000000,
461: 0.133457000000,
462: 0.138323000000,
463: 0.143042000000,
464: 0.147787000000,
465: 0.152761000000,
466: 0.158102000000,
467: 0.163941000000,
468: 0.170362000000,
469: 0.177425000000,
470: 0.185190000000,
471: 0.193025000000,
472: 0.200313000000,
473: 0.207156000000,
474: 0.213644000000,
475: 0.219940000000,
476: 0.226170000000,
477: 0.232467000000,
478: 0.239025000000,
479: 0.245997000000,
480: 0.253589000000,
481: 0.261876000000,
482: 0.270643000000,
483: 0.279645000000,
484: 0.288694000000,
485: 0.297665000000,
486: 0.306469000000,
487: 0.315035000000,
488: 0.323335000000,
489: 0.331366000000,
490: 0.339133000000,
491: 0.347860000000,
492: 0.358326000000,
493: 0.370001000000,
494: 0.382464000000,
495: 0.395379000000,
496: 0.408482000000,
497: 0.421588000000,
498: 0.434619000000,
499: 0.447601000000,
500: 0.460777000000,
501: 0.474340000000,
502: 0.488200000000,
503: 0.502340000000,
504: 0.516740000000,
505: 0.531360000000,
506: 0.546190000000,
507: 0.561180000000,
508: 0.576290000000,
509: 0.591500000000,
510: 0.606741000000,
511: 0.622150000000,
512: 0.637830000000,
513: 0.653710000000,
514: 0.669680000000,
515: 0.685660000000,
516: 0.701550000000,
517: 0.717230000000,
518: 0.732570000000,
519: 0.747460000000,
520: 0.761757000000,
521: 0.775340000000,
522: 0.788220000000,
523: 0.800460000000,
524: 0.812140000000,
525: 0.823330000000,
526: 0.834120000000,
527: 0.844600000000,
528: 0.854870000000,
529: 0.865040000000,
530: 0.875211000000,
531: 0.885370000000,
532: 0.895370000000,
533: 0.905150000000,
534: 0.914650000000,
535: 0.923810000000,
536: 0.932550000000,
537: 0.940810000000,
538: 0.948520000000,
539: 0.955600000000,
540: 0.961988000000,
541: 0.967540000000,
542: 0.972230000000,
543: 0.976170000000,
544: 0.979460000000,
545: 0.982200000000,
546: 0.984520000000,
547: 0.986520000000,
548: 0.988320000000,
549: 0.990020000000,
550: 0.991761000000,
551: 0.993530000000,
552: 0.995230000000,
553: 0.996770000000,
554: 0.998090000000,
555: 0.999110000000,
556: 0.999770000000,
557: 1.000000000000,
558: 0.999710000000,
559: 0.998850000000,
560: 0.997340000000,
561: 0.995260000000,
562: 0.992740000000,
563: 0.989750000000,
564: 0.986300000000,
565: 0.982380000000,
566: 0.977980000000,
567: 0.973110000000,
568: 0.967740000000,
569: 0.961890000000,
570: 0.955552000000,
571: 0.948601000000,
572: 0.940981000000,
573: 0.932798000000,
574: 0.924158000000,
575: 0.915175000000,
576: 0.905954000000,
577: 0.896608000000,
578: 0.887249000000,
579: 0.877986000000,
580: 0.868934000000,
581: 0.860164000000,
582: 0.851519000000,
583: 0.842963000000,
584: 0.834393000000,
585: 0.825623000000,
586: 0.816764000000,
587: 0.807544000000,
588: 0.797947000000,
589: 0.787893000000,
590: 0.777405000000,
591: 0.766490000000,
592: 0.755309000000,
593: 0.743845000000,
594: 0.732190000000,
595: 0.720353000000,
596: 0.708281000000,
597: 0.696055000000,
598: 0.683621000000,
599: 0.671048000000,
600: 0.658341000000,
601: 0.645545000000,
602: 0.632718000000,
603: 0.619815000000,
604: 0.606887000000,
605: 0.593878000000,
606: 0.580781000000,
607: 0.567653000000,
608: 0.554490000000,
609: 0.541228000000,
610: 0.527963000000,
611: 0.514634000000,
612: 0.501363000000,
613: 0.488124000000,
614: 0.474935000000,
615: 0.461834000000,
616: 0.448823000000,
617: 0.435917000000,
618: 0.423153000000,
619: 0.410526000000,
620: 0.398057000000,
621: 0.385835000000,
622: 0.373951000000,
623: 0.362311000000,
624: 0.350863000000,
625: 0.339554000000,
626: 0.328309000000,
627: 0.317118000000,
628: 0.305936000000,
629: 0.294737000000,
630: 0.283493000000,
631: 0.272222000000,
632: 0.260990000000,
633: 0.249877000000,
634: 0.238946000000,
635: 0.228254000000,
636: 0.217853000000,
637: 0.207780000000,
638: 0.198072000000,
639: 0.188748000000,
640: 0.179828000000,
641: 0.171285000000,
642: 0.163059000000,
643: 0.155151000000,
644: 0.147535000000,
645: 0.140211000000,
646: 0.133170000000,
647: 0.126400000000,
648: 0.119892000000,
649: 0.113640000000,
650: 0.107633000000,
651: 0.101870000000,
652: 0.096347000000,
653: 0.091063000000,
654: 0.086010000000,
655: 0.081187000000,
656: 0.076583000000,
657: 0.072198000000,
658: 0.068024000000,
659: 0.064052000000,
660: 0.060281000000,
661: 0.056697000000,
662: 0.053292000000,
663: 0.050059000000,
664: 0.046998000000,
665: 0.044096000000,
666: 0.041345000000,
667: 0.038750700000,
668: 0.036297800000,
669: 0.033983200000,
670: 0.031800400000,
671: 0.029739500000,
672: 0.027791800000,
673: 0.025955100000,
674: 0.024226300000,
675: 0.022601700000,
676: 0.021077900000,
677: 0.019650500000,
678: 0.018315300000,
679: 0.017068600000,
680: 0.015905100000,
681: 0.014818300000,
682: 0.013800800000,
683: 0.012849500000,
684: 0.011960700000,
685: 0.011130300000,
686: 0.010355500000,
687: 0.009633200000,
688: 0.008959900000,
689: 0.008332400000,
690: 0.007748800000,
691: 0.007204600000,
692: 0.006697500000,
693: 0.006225100000,
694: 0.005785000000,
695: 0.005375100000,
696: 0.004994100000,
697: 0.004639200000,
698: 0.004309300000,
699: 0.004002800000,
700: 0.003717740000,
701: 0.003452620000,
702: 0.003205830000,
703: 0.002976230000,
704: 0.002762810000,
705: 0.002564560000,
706: 0.002380480000,
707: 0.002209710000,
708: 0.002051320000,
709: 0.001904490000,
710: 0.001768470000,
711: 0.001642360000,
712: 0.001525350000,
713: 0.001416720000,
714: 0.001315950000,
715: 0.001222390000,
716: 0.001135550000,
717: 0.001054940000,
718: 0.000980140000,
719: 0.000910660000,
720: 0.000846190000,
721: 0.000786290000,
722: 0.000730680000,
723: 0.000678990000,
724: 0.000631010000,
725: 0.000586440000,
726: 0.000545110000,
727: 0.000506720000,
728: 0.000471110000,
729: 0.000438050000,
730: 0.000407410000,
731: 0.000378962000,
732: 0.000352543000,
733: 0.000328001000,
734: 0.000305208000,
735: 0.000284041000,
736: 0.000264375000,
737: 0.000246109000,
738: 0.000229143000,
739: 0.000213376000,
740: 0.000198730000,
741: 0.000185115000,
742: 0.000172454000,
743: 0.000160678000,
744: 0.000149730000,
745: 0.000139550000,
746: 0.000130086000,
747: 0.000121290000,
748: 0.000113106000,
749: 0.000105501000,
750: 0.000098428000,
751: 0.000091853000,
752: 0.000085738000,
753: 0.000080048000,
754: 0.000074751000,
755: 0.000069819000,
756: 0.000065222000,
757: 0.000060939000,
758: 0.000056942000,
759: 0.000053217000,
760: 0.000049737000,
761: 0.000046491000,
762: 0.000043464000,
763: 0.000040635000,
764: 0.000038000000,
765: 0.000035540500,
766: 0.000033244800,
767: 0.000031100600,
768: 0.000029099000,
769: 0.000027230700,
770: 0.000025486000,
771: 0.000023856100,
772: 0.000022333200,
773: 0.000020910400,
774: 0.000019580800,
775: 0.000018338400,
776: 0.000017177700,
777: 0.000016093400,
778: 0.000015080000,
779: 0.000014133600,
780: 0.000013249000,
781: 0.000012422600,
782: 0.000011649900,
783: 0.000010927700,
784: 0.000010251900,
785: 0.000009619600,
786: 0.000009028100,
787: 0.000008474000,
788: 0.000007954800,
789: 0.000007468600,
790: 0.000007012800,
791: 0.000006585800,
792: 0.000006185700,
793: 0.000005810700,
794: 0.000005459000,
795: 0.000005129800,
796: 0.000004820600,
797: 0.000004531200,
798: 0.000004259100,
799: 0.000004004200,
800: 0.000003764730,
801: 0.000003539950,
802: 0.000003329140,
803: 0.000003131150,
804: 0.000002945290,
805: 0.000002770810,
806: 0.000002607050,
807: 0.000002453290,
808: 0.000002308940,
809: 0.000002173380,
810: 0.000002046130,
811: 0.000001926620,
812: 0.000001814400,
813: 0.000001708950,
814: 0.000001609880,
815: 0.000001516770,
816: 0.000001429210,
817: 0.000001346860,
818: 0.000001269450,
819: 0.000001196620,
820: 0.000001128090,
821: 0.000001063680,
822: 0.000001003130,
823: 0.000000946220,
824: 0.000000892630,
825: 0.000000842160,
826: 0.000000794640,
827: 0.000000749780,
828: 0.000000707440,
829: 0.000000667480,
830: 0.000000629700
},
'CIE 2008 2 Degree Physiologically Relevant LEF': {
390: 4.14616e-04,
391: 5.02833e-04,
392: 6.08499e-04,
393: 7.34444e-04,
394: 8.83739e-04,
395: 1.05965e-03,
396: 1.26553e-03,
397: 1.50475e-03,
398: 1.78049e-03,
399: 2.09557e-03,
400: 2.45219e-03,
401: 2.85222e-03,
402: 3.29912e-03,
403: 3.79747e-03,
404: 4.35277e-03,
405: 4.97172e-03,
406: 5.66101e-03,
407: 6.42161e-03,
408: 7.25031e-03,
409: 8.14017e-03,
410: 9.07986e-03,
411: 1.00561e-02,
412: 1.10646e-02,
413: 1.21052e-02,
414: 1.31801e-02,
415: 1.42938e-02,
416: 1.54500e-02,
417: 1.66409e-02,
418: 1.78530e-02,
419: 1.90702e-02,
420: 2.02737e-02,
421: 2.14481e-02,
422: 2.26004e-02,
423: 2.37479e-02,
424: 2.49125e-02,
425: 2.61211e-02,
426: 2.73992e-02,
427: 2.87499e-02,
428: 3.01691e-02,
429: 3.16514e-02,
430: 3.31904e-02,
431: 3.47791e-02,
432: 3.64149e-02,
433: 3.80957e-02,
434: 3.98184e-02,
435: 4.15794e-02,
436: 4.33710e-02,
437: 4.51718e-02,
438: 4.69542e-02,
439: 4.86872e-02,
440: 5.03366e-02,
441: 5.18761e-02,
442: 5.33222e-02,
443: 5.47060e-02,
444: 5.60634e-02,
445: 5.74339e-02,
446: 5.88511e-02,
447: 6.03081e-02,
448: 6.17864e-02,
449: 6.32657e-02,
450: 6.47235e-02,
451: 6.61475e-02,
452: 6.75726e-02,
453: 6.90493e-02,
454: 7.06328e-02,
455: 7.23834e-02,
456: 7.43596e-02,
457: 7.65938e-02,
458: 7.91144e-02,
459: 8.19535e-02,
460: 8.51482e-02,
461: 8.87266e-02,
462: 9.26601e-02,
463: 9.68972e-02,
464: 1.01375e-01,
465: 1.06014e-01,
466: 1.10738e-01,
467: 1.15511e-01,
468: 1.20312e-01,
469: 1.25116e-01,
470: 1.29896e-01,
471: 1.34630e-01,
472: 1.39331e-01,
473: 1.44023e-01,
474: 1.48737e-01,
475: 1.53507e-01,
476: 1.58364e-01,
477: 1.63320e-01,
478: 1.68376e-01,
479: 1.73537e-01,
480: 1.78805e-01,
481: 1.84182e-01,
482: 1.89656e-01,
483: 1.95210e-01,
484: 2.00826e-01,
485: 2.06483e-01,
486: 2.12183e-01,
487: 2.18028e-01,
488: 2.24159e-01,
489: 2.30730e-01,
490: 2.37916e-01,
491: 2.45871e-01,
492: 2.54602e-01,
493: 2.64076e-01,
494: 2.74249e-01,
495: 2.85068e-01,
496: 2.96484e-01,
497: 3.08501e-01,
498: 3.21139e-01,
499: 3.34418e-01,
500: 3.48354e-01,
501: 3.62960e-01,
502: 3.78228e-01,
503: 3.94136e-01,
504: 4.10658e-01,
505: 4.27760e-01,
506: 4.45399e-01,
507: 4.63540e-01,
508: 4.82138e-01,
509: 5.01143e-01,
510: 5.20497e-01,
511: 5.40139e-01,
512: 5.60021e-01,
513: 5.80097e-01,
514: 6.00317e-01,
515: 6.20626e-01,
516: 6.40940e-01,
517: 6.61077e-01,
518: 6.80813e-01,
519: 6.99904e-01,
520: 7.18089e-01,
521: 7.35159e-01,
522: 7.51182e-01,
523: 7.66314e-01,
524: 7.80735e-01,
525: 7.94645e-01,
526: 8.08207e-01,
527: 8.21382e-01,
528: 8.34070e-01,
529: 8.46171e-01,
530: 8.57580e-01,
531: 8.68241e-01,
532: 8.78306e-01,
533: 8.87991e-01,
534: 8.97521e-01,
535: 9.07135e-01,
536: 9.16995e-01,
537: 9.26929e-01,
538: 9.36673e-01,
539: 9.45948e-01,
540: 9.54468e-01,
541: 9.61983e-01,
542: 9.68439e-01,
543: 9.73829e-01,
544: 9.78152e-01,
545: 9.81411e-01,
546: 9.83667e-01,
547: 9.85208e-01,
548: 9.86381e-01,
549: 9.87536e-01,
550: 9.89023e-01,
551: 9.91081e-01,
552: 9.93491e-01,
553: 9.95917e-01,
554: 9.98021e-01,
555: 9.99461e-01,
556: 9.99993e-01,
557: 9.99756e-01,
558: 9.98984e-01,
559: 9.97912e-01,
560: 9.96774e-01,
561: 9.95736e-01,
562: 9.94711e-01,
563: 9.93553e-01,
564: 9.92116e-01,
565: 9.90255e-01,
566: 9.87860e-01,
567: 9.84932e-01,
568: 9.81504e-01,
569: 9.77603e-01,
570: 9.73261e-01,
571: 9.68476e-01,
572: 9.63137e-01,
573: 9.57106e-01,
574: 9.50254e-01,
575: 9.42457e-01,
576: 9.33690e-01,
577: 9.24289e-01,
578: 9.14671e-01,
579: 9.05233e-01,
580: 8.96361e-01,
581: 8.88307e-01,
582: 8.80846e-01,
583: 8.73645e-01,
584: 8.66376e-01,
585: 8.58720e-01,
586: 8.50430e-01,
587: 8.41505e-01,
588: 8.32011e-01,
589: 8.22015e-01,
590: 8.11587e-01,
591: 8.00787e-01,
592: 7.89652e-01,
593: 7.78205e-01,
594: 7.66473e-01,
595: 7.54479e-01,
596: 7.42247e-01,
597: 7.29823e-01,
598: 7.17252e-01,
599: 7.04582e-01,
600: 6.91855e-01,
601: 6.79101e-01,
602: 6.66285e-01,
603: 6.53359e-01,
604: 6.40281e-01,
605: 6.27007e-01,
606: 6.13515e-01,
607: 5.99849e-01,
608: 5.86068e-01,
609: 5.72226e-01,
610: 5.58375e-01,
611: 5.44554e-01,
612: 5.30767e-01,
613: 5.17013e-01,
614: 5.03289e-01,
615: 4.89595e-01,
616: 4.75944e-01,
617: 4.62396e-01,
618: 4.49015e-01,
619: 4.35862e-01,
620: 4.22990e-01,
621: 4.10415e-01,
622: 3.98036e-01,
623: 3.85730e-01,
624: 3.73391e-01,
625: 3.60924e-01,
626: 3.48286e-01,
627: 3.35570e-01,
628: 3.22896e-01,
629: 3.10370e-01,
630: 2.98086e-01,
631: 2.86116e-01,
632: 2.74482e-01,
633: 2.63195e-01,
634: 2.52263e-01,
635: 2.41690e-01,
636: 2.31481e-01,
637: 2.21638e-01,
638: 2.12162e-01,
639: 2.03054e-01,
640: 1.94312e-01,
641: 1.85923e-01,
642: 1.77827e-01,
643: 1.69965e-01,
644: 1.62284e-01,
645: 1.54740e-01,
646: 1.47308e-01,
647: 1.40017e-01,
648: 1.32901e-01,
649: 1.25991e-01,
650: 1.19312e-01,
651: 1.12882e-01,
652: 1.06711e-01,
653: 1.00805e-01,
654: 9.51665e-02,
655: 8.97959e-02,
656: 8.46904e-02,
657: 7.98401e-02,
658: 7.52337e-02,
659: 7.08606e-02,
660: 6.67104e-02,
661: 6.27736e-02,
662: 5.90418e-02,
663: 5.55070e-02,
664: 5.21614e-02,
665: 4.89970e-02,
666: 4.60058e-02,
667: 4.31788e-02,
668: 4.05075e-02,
669: 3.79838e-02,
670: 3.55998e-02,
671: 3.33486e-02,
672: 3.12233e-02,
673: 2.92178e-02,
674: 2.73260e-02,
675: 2.55422e-02,
676: 2.38612e-02,
677: 2.22786e-02,
678: 2.07902e-02,
679: 1.93919e-02,
680: 1.80794e-02,
681: 1.68482e-02,
682: 1.56919e-02,
683: 1.46045e-02,
684: 1.35806e-02,
685: 1.26157e-02,
686: 1.17070e-02,
687: 1.08561e-02,
688: 1.00648e-02,
689: 9.33338e-03,
690: 8.66128e-03,
691: 8.04605e-03,
692: 7.48113e-03,
693: 6.95999e-03,
694: 6.47707e-03,
695: 6.02768e-03,
696: 5.60817e-03,
697: 5.21669e-03,
698: 4.85179e-03,
699: 4.51201e-03,
700: 4.19594e-03,
701: 3.90206e-03,
702: 3.62837e-03,
703: 3.37301e-03,
704: 3.13432e-03,
705: 2.91086e-03,
706: 2.70153e-03,
707: 2.50580e-03,
708: 2.32323e-03,
709: 2.15333e-03,
710: 1.99556e-03,
711: 1.84932e-03,
712: 1.71398e-03,
713: 1.58890e-03,
714: 1.47345e-03,
715: 1.36702e-03,
716: 1.26895e-03,
717: 1.17842e-03,
718: 1.09464e-03,
719: 1.01694e-03,
720: 9.44727e-04,
721: 8.77517e-04,
722: 8.15044e-04,
723: 7.57076e-04,
724: 7.03376e-04,
725: 6.53705e-04,
726: 6.07805e-04,
727: 5.65344e-04,
728: 5.26005e-04,
729: 4.89506e-04,
730: 4.55597e-04,
731: 4.24055e-04,
732: 3.94686e-04,
733: 3.67318e-04,
734: 3.41794e-04,
735: 3.17974e-04,
736: 2.95744e-04,
737: 2.75056e-04,
738: 2.55864e-04,
739: 2.38114e-04,
740: 2.21745e-04,
741: 2.06671e-04,
742: 1.92747e-04,
743: 1.79831e-04,
744: 1.67802e-04,
745: 1.56557e-04,
746: 1.46017e-04,
747: 1.36153e-04,
748: 1.26945e-04,
749: 1.18367e-04,
750: 1.10393e-04,
751: 1.02991e-04,
752: 9.61184e-05,
753: 8.97332e-05,
754: 8.37969e-05,
755: 7.82744e-05,
756: 7.31331e-05,
757: 6.83414e-05,
758: 6.38704e-05,
759: 5.96939e-05,
760: 5.57886e-05,
761: 5.21351e-05,
762: 4.87218e-05,
763: 4.55385e-05,
764: 4.25744e-05,
765: 3.98188e-05,
766: 3.72588e-05,
767: 3.48747e-05,
768: 3.26477e-05,
769: 3.05614e-05,
770: 2.86018e-05,
771: 2.67584e-05,
772: 2.50294e-05,
773: 2.34137e-05,
774: 2.19091e-05,
775: 2.05126e-05,
776: 1.92190e-05,
777: 1.80180e-05,
778: 1.68990e-05,
779: 1.58531e-05,
780: 1.48724e-05,
781: 1.39509e-05,
782: 1.30853e-05,
783: 1.22733e-05,
784: 1.15123e-05,
785: 1.08000e-05,
786: 1.01336e-05,
787: 9.50992e-06,
788: 8.92563e-06,
789: 8.37785e-06,
790: 7.86392e-06,
791: 7.38154e-06,
792: 6.92910e-06,
793: 6.50514e-06,
794: 6.10822e-06,
795: 5.73694e-06,
796: 5.38983e-06,
797: 5.06527e-06,
798: 4.76167e-06,
799: 4.47756e-06,
800: 4.21160e-06,
801: 3.96246e-06,
802: 3.72867e-06,
803: 3.50888e-06,
804: 3.30187e-06,
805: 3.10656e-06,
806: 2.92212e-06,
807: 2.74821e-06,
808: 2.58456e-06,
809: 2.43087e-06,
810: 2.28679e-06,
811: 2.15191e-06,
812: 2.02566e-06,
813: 1.90746e-06,
814: 1.79679e-06,
815: 1.69315e-06,
816: 1.59603e-06,
817: 1.50490e-06,
818: 1.41925e-06,
819: 1.33860e-06,
820: 1.26256e-06,
821: 1.19077e-06,
822: 1.12303e-06,
823: 1.05915e-06,
824: 9.98951e-07,
825: 9.42251e-07,
826: 8.88880e-07,
827: 8.38669e-07,
828: 7.91454e-07,
829: 7.47077e-07,
830: 7.05386e-07
},
'CIE 2008 10 Degree Physiologically Relevant LEF': {
390: 4.07678e-04,
391: 4.97777e-04,
392: 6.06475e-04,
393: 7.37004e-04,
394: 8.92939e-04,
395: 1.07817e-03,
396: 1.29682e-03,
397: 1.55316e-03,
398: 1.85146e-03,
399: 2.19579e-03,
400: 2.58977e-03,
401: 3.03680e-03,
402: 3.54193e-03,
403: 4.11142e-03,
404: 4.75262e-03,
405: 5.47421e-03,
406: 6.28503e-03,
407: 7.18807e-03,
408: 8.18179e-03,
409: 9.26042e-03,
410: 1.04130e-02,
411: 1.16264e-02,
412: 1.28988e-02,
413: 1.42344e-02,
414: 1.56408e-02,
415: 1.71297e-02,
416: 1.87127e-02,
417: 2.03839e-02,
418: 2.21294e-02,
419: 2.39299e-02,
420: 2.57613e-02,
421: 2.76016e-02,
422: 2.94551e-02,
423: 3.13388e-02,
424: 3.32758e-02,
425: 3.52955e-02,
426: 3.74271e-02,
427: 3.96714e-02,
428: 4.20200e-02,
429: 4.44617e-02,
430: 4.69823e-02,
431: 4.95674e-02,
432: 5.22122e-02,
433: 5.49139e-02,
434: 5.76692e-02,
435: 6.04743e-02,
436: 6.33220e-02,
437: 6.61927e-02,
438: 6.90619e-02,
439: 7.19019e-02,
440: 7.46829e-02,
441: 7.73845e-02,
442: 8.00360e-02,
443: 8.26852e-02,
444: 8.53875e-02,
445: 8.82054e-02,
446: 9.11893e-02,
447: 9.43104e-02,
448: 9.75135e-02,
449: 1.00735e-01,
450: 1.03903e-01,
451: 1.06964e-01,
452: 1.09968e-01,
453: 1.12999e-01,
454: 1.16154e-01,
455: 1.19539e-01,
456: 1.23250e-01,
457: 1.27305e-01,
458: 1.31696e-01,
459: 1.36418e-01,
460: 1.41459e-01,
461: 1.46800e-01,
462: 1.52400e-01,
463: 1.58202e-01,
464: 1.64140e-01,
465: 1.70137e-01,
466: 1.76123e-01,
467: 1.82090e-01,
468: 1.88046e-01,
469: 1.94006e-01,
470: 1.99986e-01,
471: 2.06005e-01,
472: 2.12098e-01,
473: 2.18304e-01,
474: 2.24669e-01,
475: 2.31243e-01,
476: 2.38074e-01,
477: 2.45180e-01,
478: 2.52568e-01,
479: 2.60248e-01,
480: 2.68227e-01,
481: 2.76501e-01,
482: 2.85004e-01,
483: 2.93647e-01,
484: 3.02332e-01,
485: 3.10944e-01,
486: 3.19410e-01,
487: 3.27868e-01,
488: 3.36526e-01,
489: 3.45618e-01,
490: 3.55402e-01,
491: 3.66089e-01,
492: 3.77586e-01,
493: 3.89696e-01,
494: 4.02195e-01,
495: 4.14823e-01,
496: 4.27354e-01,
497: 4.39821e-01,
498: 4.52336e-01,
499: 4.65030e-01,
500: 4.78048e-01,
501: 4.91517e-01,
502: 5.05422e-01,
503: 5.19706e-01,
504: 5.34301e-01,
505: 5.49134e-01,
506: 5.64130e-01,
507: 5.79242e-01,
508: 5.94426e-01,
509: 6.09639e-01,
510: 6.24830e-01,
511: 6.39966e-01,
512: 6.55094e-01,
513: 6.70290e-01,
514: 6.85638e-01,
515: 7.01229e-01,
516: 7.17110e-01,
517: 7.33092e-01,
518: 7.48904e-01,
519: 7.64253e-01,
520: 7.78820e-01,
521: 7.92341e-01,
522: 8.04851e-01,
523: 8.16475e-01,
524: 8.27352e-01,
525: 8.37636e-01,
526: 8.47465e-01,
527: 8.56887e-01,
528: 8.65924e-01,
529: 8.74604e-01,
530: 8.82955e-01,
531: 8.91027e-01,
532: 8.98949e-01,
533: 9.06875e-01,
534: 9.14965e-01,
535: 9.23386e-01,
536: 9.32232e-01,
537: 9.41286e-01,
538: 9.50238e-01,
539: 9.58765e-01,
540: 9.66532e-01,
541: 9.73250e-01,
542: 9.78842e-01,
543: 9.83287e-01,
544: 9.86572e-01,
545: 9.88689e-01,
546: 9.89706e-01,
547: 9.89985e-01,
548: 9.89962e-01,
549: 9.90073e-01,
550: 9.90750e-01,
551: 9.92283e-01,
552: 9.94384e-01,
553: 9.96622e-01,
554: 9.98565e-01,
555: 9.99778e-01,
556: 9.99944e-01,
557: 9.99220e-01,
558: 9.97879e-01,
559: 9.96193e-01,
560: 9.94430e-01,
561: 9.92783e-01,
562: 9.91158e-01,
563: 9.89392e-01,
564: 9.87329e-01,
565: 9.84813e-01,
566: 9.81725e-01,
567: 9.78071e-01,
568: 9.73886e-01,
569: 9.69203e-01,
570: 9.64055e-01,
571: 9.58441e-01,
572: 9.52238e-01,
573: 9.45297e-01,
574: 9.37477e-01,
575: 9.28649e-01,
576: 9.18795e-01,
577: 9.08301e-01,
578: 8.97635e-01,
579: 8.87240e-01,
580: 8.77536e-01,
581: 8.68792e-01,
582: 8.60747e-01,
583: 8.53023e-01,
584: 8.45253e-01,
585: 8.37084e-01,
586: 8.28241e-01,
587: 8.18732e-01,
588: 8.08635e-01,
589: 7.98030e-01,
590: 7.86995e-01,
591: 7.75604e-01,
592: 7.63900e-01,
593: 7.51916e-01,
594: 7.39683e-01,
595: 7.27231e-01,
596: 7.14588e-01,
597: 7.01793e-01,
598: 6.88887e-01,
599: 6.75910e-01,
600: 6.62904e-01,
601: 6.49891e-01,
602: 6.36841e-01,
603: 6.23709e-01,
604: 6.10454e-01,
605: 5.97037e-01,
606: 5.83440e-01,
607: 5.69704e-01,
608: 5.55889e-01,
609: 5.42047e-01,
610: 5.28230e-01,
611: 5.14475e-01,
612: 5.00788e-01,
613: 4.87169e-01,
614: 4.73616e-01,
615: 4.60131e-01,
616: 4.46726e-01,
617: 4.33459e-01,
618: 4.20392e-01,
619: 4.07581e-01,
620: 3.95076e-01,
621: 3.82889e-01,
622: 3.70919e-01,
623: 3.59045e-01,
624: 3.47162e-01,
625: 3.35179e-01,
626: 3.23056e-01,
627: 3.10886e-01,
628: 2.98784e-01,
629: 2.86853e-01,
630: 2.75181e-01,
631: 2.63834e-01,
632: 2.52833e-01,
633: 2.42183e-01,
634: 2.31890e-01,
635: 2.21956e-01,
636: 2.12383e-01,
637: 2.03170e-01,
638: 1.94318e-01,
639: 1.85825e-01,
640: 1.77688e-01,
641: 1.69893e-01,
642: 1.62382e-01,
643: 1.55099e-01,
644: 1.47992e-01,
645: 1.41020e-01,
646: 1.34161e-01,
647: 1.27440e-01,
648: 1.20889e-01,
649: 1.14534e-01,
650: 1.08400e-01,
651: 1.02501e-01,
652: 9.68459e-02,
653: 9.14394e-02,
654: 8.62832e-02,
655: 8.13769e-02,
656: 7.67171e-02,
657: 7.22940e-02,
658: 6.80970e-02,
659: 6.41155e-02,
660: 6.03398e-02,
661: 5.67605e-02,
662: 5.33699e-02,
663: 5.01603e-02,
664: 4.71241e-02,
665: 4.42538e-02,
666: 4.15421e-02,
667: 3.89804e-02,
668: 3.65609e-02,
669: 3.42760e-02,
670: 3.21185e-02,
671: 3.00819e-02,
672: 2.81600e-02,
673: 2.63470e-02,
674: 2.46373e-02,
675: 2.30257e-02,
676: 2.15074e-02,
677: 2.00784e-02,
678: 1.87347e-02,
679: 1.74727e-02,
680: 1.62884e-02,
681: 1.51777e-02,
682: 1.41347e-02,
683: 1.31541e-02,
684: 1.22309e-02,
685: 1.13611e-02,
686: 1.05419e-02,
687: 9.77505e-03,
688: 9.06196e-03,
689: 8.40296e-03,
690: 7.79746e-03,
691: 7.24323e-03,
692: 6.73438e-03,
693: 6.26500e-03,
694: 5.83009e-03,
695: 5.42539e-03,
696: 5.04763e-03,
697: 4.69514e-03,
698: 4.36659e-03,
699: 4.06069e-03,
700: 3.77614e-03,
701: 3.51158e-03,
702: 3.26521e-03,
703: 3.03534e-03,
704: 2.82050e-03,
705: 2.61937e-03,
706: 2.43096e-03,
707: 2.25480e-03,
708: 2.09049e-03,
709: 1.93759e-03,
710: 1.79560e-03,
711: 1.66399e-03,
712: 1.54220e-03,
713: 1.42964e-03,
714: 1.32575e-03,
715: 1.22998e-03,
716: 1.14173e-03,
717: 1.06027e-03,
718: 9.84885e-04,
719: 9.14970e-04,
720: 8.49990e-04,
721: 7.89516e-04,
722: 7.33304e-04,
723: 6.81146e-04,
724: 6.32829e-04,
725: 5.88138e-04,
726: 5.46839e-04,
727: 5.08635e-04,
728: 4.73240e-04,
729: 4.40402e-04,
730: 4.09893e-04,
731: 3.81514e-04,
732: 3.55090e-04,
733: 3.30467e-04,
734: 3.07503e-04,
735: 2.86072e-04,
736: 2.66072e-04,
737: 2.47459e-04,
738: 2.30192e-04,
739: 2.14223e-04,
740: 1.99495e-04,
741: 1.85934e-04,
742: 1.73407e-04,
743: 1.61786e-04,
744: 1.50964e-04,
745: 1.40847e-04,
746: 1.31364e-04,
747: 1.22490e-04,
748: 1.14206e-04,
749: 1.06489e-04,
750: 9.93144e-05,
751: 9.26551e-05,
752: 8.64722e-05,
753: 8.07278e-05,
754: 7.53872e-05,
755: 7.04188e-05,
756: 6.57934e-05,
757: 6.14825e-05,
758: 5.74601e-05,
759: 5.37027e-05,
760: 5.01893e-05,
761: 4.69024e-05,
762: 4.38317e-05,
763: 4.09678e-05,
764: 3.83012e-05,
765: 3.58222e-05,
766: 3.35190e-05,
767: 3.13742e-05,
768: 2.93707e-05,
769: 2.74938e-05,
770: 2.57308e-05,
771: 2.40725e-05,
772: 2.25170e-05,
773: 2.10635e-05,
774: 1.97099e-05,
775: 1.84535e-05,
776: 1.72898e-05,
777: 1.62093e-05,
778: 1.52026e-05,
779: 1.42617e-05,
780: 1.33795e-05,
781: 1.25504e-05,
782: 1.17717e-05,
783: 1.10412e-05,
784: 1.03566e-05,
785: 9.71580e-06,
786: 9.11632e-06,
787: 8.55520e-06,
788: 8.02956e-06,
789: 7.53677e-06,
790: 7.07442e-06,
791: 6.64046e-06,
792: 6.23344e-06,
793: 5.85204e-06,
794: 5.49496e-06,
795: 5.16095e-06,
796: 4.84869e-06,
797: 4.55671e-06,
798: 4.28358e-06,
799: 4.02799e-06,
800: 3.78873e-06,
801: 3.56460e-06,
802: 3.35428e-06,
803: 3.15656e-06,
804: 2.97033e-06,
805: 2.79463e-06,
806: 2.62870e-06,
807: 2.47225e-06,
808: 2.32503e-06,
809: 2.18677e-06,
810: 2.05715e-06,
811: 1.93581e-06,
812: 1.82224e-06,
813: 1.71591e-06,
814: 1.61636e-06,
815: 1.52311e-06,
816: 1.43575e-06,
817: 1.35377e-06,
818: 1.27671e-06,
819: 1.20417e-06,
820: 1.13576e-06,
821: 1.07118e-06,
822: 1.01024e-06,
823: 9.52778e-07,
824: 8.98622e-07,
825: 8.47617e-07,
826: 7.99605e-07,
827: 7.54436e-07,
828: 7.11962e-07,
829: 6.72042e-07,
830: 6.34538e-07
}
}
SDS_LEFS_PHOTOPIC = LazyCaseInsensitiveMapping({
'CIE 1924 Photopic Standard Observer':
partial(
SpectralDistribution,
DATA_LEFS_PHOTOPIC['CIE 1924 Photopic Standard Observer'],
name='CIE 1924 Photopic Standard Observer'),
'Judd Modified CIE 1951 Photopic Standard Observer':
partial(
SpectralDistribution,
DATA_LEFS_PHOTOPIC[
'Judd Modified CIE 1951 Photopic Standard Observer'],
name='Judd Modified CIE 1951 Photopic Standard Observer'),
'Judd-Vos Modified CIE 1978 Photopic Standard Observer':
partial(
SpectralDistribution,
DATA_LEFS_PHOTOPIC[
'Judd-Vos Modified CIE 1978 Photopic Standard Observer'],
name='Judd-Vos Modified CIE 1978 Photopic Standard Observer'),
'CIE 1964 Photopic 10 Degree Standard Observer':
partial(
SpectralDistribution,
DATA_LEFS_PHOTOPIC[
'CIE 1964 Photopic 10 Degree Standard Observer'],
name='CIE 1964 Photopic 10 Degree Standard Observer',
strict_name='CIE 1964 Photopic 10$^\\circ$ Standard Observer'),
'CIE 2008 2 Degree Physiologically Relevant LEF':
partial(
SpectralDistribution,
DATA_LEFS_PHOTOPIC[
'CIE 2008 2 Degree Physiologically Relevant LEF'],
name='CIE 2008 2 Degree Physiologically Relevant LEF',
strict_name='CIE 2008 2$^\\circ$ Physiologically Relevant LEF'),
'CIE 2008 10 Degree Physiologically Relevant LEF':
partial(
SpectralDistribution,
DATA_LEFS_PHOTOPIC[
'CIE 2008 10 Degree Physiologically Relevant LEF'],
name='CIE 2008 10 Degree Physiologically Relevant LEF',
strict_name='CIE 2008 10$^\\circ$ Physiologically Relevant LEF')
})
SDS_LEFS_PHOTOPIC.__doc__ = """
Spectral distributions of the photopic luminous efficiency functions.
References
----------
:cite:`CVRLq`, :cite:`CVRLs`
SDS_LEFS_PHOTOPIC : LazyCaseInsensitiveMapping
**{'CIE 1924 Photopic Standard Observer',
'Judd Modified CIE 1951 Photopic Standard Observer',
'Judd-Vos Modified CIE 1978 Photopic Standard Observer',
'CIE 1964 Photopic 10 Degree Standard Observer',
'CIE 2008 2 Degree Physiologically Relevant LEF',
'CIE 2008 10 Degree Physiologically Relevant LEF'}**
Aliases:
- 'cie_2_1924': 'CIE 1931 2 Degree Standard Observer'
- 'cie_10_1964': 'CIE 1964 Photopic 10 Degree Standard Observer'
"""
SDS_LEFS_PHOTOPIC['cie_2_1924'] = (
SDS_LEFS_PHOTOPIC['CIE 1924 Photopic Standard Observer'])
SDS_LEFS_PHOTOPIC['cie_10_1964'] = (
SDS_LEFS_PHOTOPIC['CIE 1964 Photopic 10 Degree Standard Observer'])
DATA_LEFS_SCOTOPIC = {
'CIE 1951 Scotopic Standard Observer': {
380: 0.0005890000,
381: 0.0006650000,
382: 0.0007520000,
383: 0.0008540000,
384: 0.0009720000,
385: 0.0011080000,
386: 0.0012680000,
387: 0.0014530000,
388: 0.0016680000,
389: 0.0019180000,
390: 0.0022090000,
391: 0.0025470000,
392: 0.0029390000,
393: 0.0033940000,
394: 0.0039210000,
395: 0.0045300000,
396: 0.0052400000,
397: 0.0060500000,
398: 0.0069800000,
399: 0.0080600000,
400: 0.0092900000,
401: 0.0107000000,
402: 0.0123100000,
403: 0.0141300000,
404: 0.0161900000,
405: 0.0185200000,
406: 0.0211300000,
407: 0.0240500000,
408: 0.0273000000,
409: 0.0308900000,
410: 0.0348400000,
411: 0.0391600000,
412: 0.0439000000,
413: 0.0490000000,
414: 0.0545000000,
415: 0.0604000000,
416: 0.0668000000,
417: 0.0736000000,
418: 0.0808000000,
419: 0.0885000000,
420: 0.0966000000,
421: 0.1052000000,
422: 0.1141000000,
423: 0.1235000000,
424: 0.1334000000,
425: 0.1436000000,
426: 0.1541000000,
427: 0.1651000000,
428: 0.1764000000,
429: 0.1879000000,
430: 0.1998000000,
431: 0.2119000000,
432: 0.2243000000,
433: 0.2369000000,
434: 0.2496000000,
435: 0.2625000000,
436: 0.2755000000,
437: 0.2886000000,
438: 0.3017000000,
439: 0.3149000000,
440: 0.3281000000,
441: 0.3412000000,
442: 0.3543000000,
443: 0.3673000000,
444: 0.3803000000,
445: 0.3931000000,
446: 0.4060000000,
447: 0.4180000000,
448: 0.4310000000,
449: 0.4430000000,
450: 0.4550000000,
451: 0.4670000000,
452: 0.4790000000,
453: 0.4900000000,
454: 0.5020000000,
455: 0.5130000000,
456: 0.5240000000,
457: 0.5350000000,
458: 0.5460000000,
459: 0.5570000000,
460: 0.5670000000,
461: 0.5780000000,
462: 0.5880000000,
463: 0.5990000000,
464: 0.6100000000,
465: 0.6200000000,
466: 0.6310000000,
467: 0.6420000000,
468: 0.6530000000,
469: 0.6640000000,
470: 0.6760000000,
471: 0.6870000000,
472: 0.6990000000,
473: 0.7100000000,
474: 0.7220000000,
475: 0.7340000000,
476: 0.7450000000,
477: 0.7570000000,
478: 0.7690000000,
479: 0.7810000000,
480: 0.7930000000,
481: 0.8050000000,
482: 0.8170000000,
483: 0.8280000000,
484: 0.8400000000,
485: 0.8510000000,
486: 0.8620000000,
487: 0.8730000000,
488: 0.8840000000,
489: 0.8940000000,
490: 0.9040000000,
491: 0.9140000000,
492: 0.9230000000,
493: 0.9320000000,
494: 0.9410000000,
495: 0.9490000000,
496: 0.9570000000,
497: 0.9640000000,
498: 0.9700000000,
499: 0.9760000000,
500: 0.9820000000,
501: 0.9860000000,
502: 0.9900000000,
503: 0.9940000000,
504: 0.9970000000,
505: 0.9980000000,
506: 1.0000000000,
507: 1.0000000000,
508: 1.0000000000,
509: 0.9980000000,
510: 0.9970000000,
511: 0.9940000000,
512: 0.9900000000,
513: 0.9860000000,
514: 0.9810000000,
515: 0.9750000000,
516: 0.9680000000,
517: 0.9610000000,
518: 0.9530000000,
519: 0.9440000000,
520: 0.9350000000,
521: 0.9250000000,
522: 0.9150000000,
523: 0.9040000000,
524: 0.8920000000,
525: 0.8800000000,
526: 0.8670000000,
527: 0.8540000000,
528: 0.8400000000,
529: 0.8260000000,
530: 0.8110000000,
531: 0.7960000000,
532: 0.7810000000,
533: 0.7650000000,
534: 0.7490000000,
535: 0.7330000000,
536: 0.7170000000,
537: 0.7000000000,
538: 0.6830000000,
539: 0.6670000000,
540: 0.6500000000,
541: 0.6330000000,
542: 0.6160000000,
543: 0.5990000000,
544: 0.5810000000,
545: 0.5640000000,
546: 0.5480000000,
547: 0.5310000000,
548: 0.5140000000,
549: 0.4970000000,
550: 0.4810000000,
551: 0.4650000000,
552: 0.4480000000,
553: 0.4330000000,
554: 0.4170000000,
555: 0.4020000000,
556: 0.3864000000,
557: 0.3715000000,
558: 0.3569000000,
559: 0.3427000000,
560: 0.3288000000,
561: 0.3151000000,
562: 0.3018000000,
563: 0.2888000000,
564: 0.2762000000,
565: 0.2639000000,
566: 0.2519000000,
567: 0.2403000000,
568: 0.2291000000,
569: 0.2182000000,
570: 0.2076000000,
571: 0.1974000000,
572: 0.1876000000,
573: 0.1782000000,
574: 0.1690000000,
575: 0.1602000000,
576: 0.1517000000,
577: 0.1436000000,
578: 0.1358000000,
579: 0.1284000000,
580: 0.1212000000,
581: 0.1143000000,
582: 0.1078000000,
583: 0.1015000000,
584: 0.0956000000,
585: 0.0899000000,
586: 0.0845000000,
587: 0.0793000000,
588: 0.0745000000,
589: 0.0699000000,
590: 0.0655000000,
591: 0.0613000000,
592: 0.0574000000,
593: 0.0537000000,
594: 0.0502000000,
595: 0.0469000000,
596: 0.0438000000,
597: 0.0409000000,
598: 0.0381600000,
599: 0.0355800000,
600: 0.0331500000,
601: 0.0308700000,
602: 0.0287400000,
603: 0.0267400000,
604: 0.0248700000,
605: 0.0231200000,
606: 0.0214700000,
607: 0.0199400000,
608: 0.0185100000,
609: 0.0171800000,
610: 0.0159300000,
611: 0.0147700000,
612: 0.0136900000,
613: 0.0126900000,
614: 0.0117500000,
615: 0.0108800000,
616: 0.0100700000,
617: 0.0093200000,
618: 0.0086200000,
619: 0.0079700000,
620: 0.0073700000,
621: 0.0068200000,
622: 0.0063000000,
623: 0.0058200000,
624: 0.0053800000,
625: 0.0049700000,
626: 0.0045900000,
627: 0.0042400000,
628: 0.0039130000,
629: 0.0036130000,
630: 0.0033350000,
631: 0.0030790000,
632: 0.0028420000,
633: 0.0026230000,
634: 0.0024210000,
635: 0.0022350000,
636: 0.0020620000,
637: 0.0019030000,
638: 0.0017570000,
639: 0.0016210000,
640: 0.0014970000,
641: 0.0013820000,
642: 0.0012760000,
643: 0.0011780000,
644: 0.0010880000,
645: 0.0010050000,
646: 0.0009280000,
647: 0.0008570000,
648: 0.0007920000,
649: 0.0007320000,
650: 0.0006770000,
651: 0.0006260000,
652: 0.0005790000,
653: 0.0005360000,
654: 0.0004960000,
655: 0.0004590000,
656: 0.0004250000,
657: 0.0003935000,
658: 0.0003645000,
659: 0.0003377000,
660: 0.0003129000,
661: 0.0002901000,
662: 0.0002689000,
663: 0.0002493000,
664: 0.0002313000,
665: 0.0002146000,
666: 0.0001991000,
667: 0.0001848000,
668: 0.0001716000,
669: 0.0001593000,
670: 0.0001480000,
671: 0.0001375000,
672: 0.0001277000,
673: 0.0001187000,
674: 0.0001104000,
675: 0.0001026000,
676: 0.0000954000,
677: 0.0000888000,
678: 0.0000826000,
679: 0.0000769000,
680: 0.0000715000,
681: 0.0000666000,
682: 0.0000620000,
683: 0.0000578000,
684: 0.0000538000,
685: 0.0000501000,
686: 0.0000467000,
687: 0.0000436000,
688: 0.0000406000,
689: 0.0000378900,
690: 0.0000353300,
691: 0.0000329500,
692: 0.0000307500,
693: 0.0000287000,
694: 0.0000267900,
695: 0.0000250100,
696: 0.0000233600,
697: 0.0000218200,
698: 0.0000203800,
699: 0.0000190500,
700: 0.0000178000,
701: 0.0000166400,
702: 0.0000155600,
703: 0.0000145400,
704: 0.0000136000,
705: 0.0000127300,
706: 0.0000119100,
707: 0.0000111400,
708: 0.0000104300,
709: 0.0000097600,
710: 0.0000091400,
711: 0.0000085600,
712: 0.0000080200,
713: 0.0000075100,
714: 0.0000070400,
715: 0.0000066000,
716: 0.0000061800,
717: 0.0000058000,
718: 0.0000054400,
719: 0.0000051000,
720: 0.0000047800,
721: 0.0000044900,
722: 0.0000042100,
723: 0.0000039510,
724: 0.0000037090,
725: 0.0000034820,
726: 0.0000032700,
727: 0.0000030700,
728: 0.0000028840,
729: 0.0000027100,
730: 0.0000025460,
731: 0.0000023930,
732: 0.0000022500,
733: 0.0000021150,
734: 0.0000019890,
735: 0.0000018700,
736: 0.0000017590,
737: 0.0000016550,
738: 0.0000015570,
739: 0.0000014660,
740: 0.0000013790,
741: 0.0000012990,
742: 0.0000012230,
743: 0.0000011510,
744: 0.0000010840,
745: 0.0000010220,
746: 0.0000009620,
747: 0.0000009070,
748: 0.0000008550,
749: 0.0000008060,
750: 0.0000007600,
751: 0.0000007160,
752: 0.0000006750,
753: 0.0000006370,
754: 0.0000006010,
755: 0.0000005670,
756: 0.0000005350,
757: 0.0000005050,
758: 0.0000004770,
759: 0.0000004500,
760: 0.0000004250,
761: 0.0000004010,
762: 0.0000003790,
763: 0.0000003580,
764: 0.0000003382,
765: 0.0000003196,
766: 0.0000003021,
767: 0.0000002855,
768: 0.0000002699,
769: 0.0000002552,
770: 0.0000002413,
771: 0.0000002282,
772: 0.0000002159,
773: 0.0000002042,
774: 0.0000001932,
775: 0.0000001829,
776: 0.0000001731,
777: 0.0000001638,
778: 0.0000001551,
779: 0.0000001468,
780: 0.0000001390,
}
}
SDS_LEFS_SCOTOPIC = LazyCaseInsensitiveMapping({
'CIE 1951 Scotopic Standard Observer':
partial(
SpectralDistribution,
DATA_LEFS_SCOTOPIC['CIE 1951 Scotopic Standard Observer'],
name='CIE 1951 Scotopic Standard Observer')
})
SDS_LEFS_SCOTOPIC.__doc__ = """
Spectral distributions of the scotopic luminous efficiency functions.
References
----------
:cite:`CVRLs`
SDS_LEFS_SCOTOPIC : LazyCaseInsensitiveMapping
**{'CIE 1951 Scotopic Standard Observer', }**
Aliases:
- 'cie_1951': 'CIE 1951 Scotopic Standard Observer'
"""
SDS_LEFS_SCOTOPIC['cie_1951'] = (
SDS_LEFS_SCOTOPIC['CIE 1951 Scotopic Standard Observer'])
SDS_LEFS = LazyCaseInsensitiveMapping(SDS_LEFS_PHOTOPIC)
SDS_LEFS.__doc__ = """
Spectral distributions of the luminous efficiency functions.
References
----------
:cite:`CVRLq`, :cite:`CVRLs`, :cite:`Wikipedia2005d`
SDS_LEFS : LazyCaseInsensitiveMapping
**{'CIE 1924 Photopic Standard Observer',
'Judd Modified CIE 1951 Photopic Standard Observer',
'Judd-Vos Modified CIE 1978 Photopic Standard Observer',
'CIE 1964 Photopic 10 Degree Standard Observer',
'CIE 2008 2 Degree Physiologically Relevant LEF',
'CIE 2008 10 Degree Physiologically Relevant LEF',
'CIE 1951 Scotopic Standard Observer'}**
"""
SDS_LEFS.update(SDS_LEFS_SCOTOPIC)
DATA_MESOPIC_X = {
0.01:
CaseInsensitiveMapping({
'Blue Heavy': CaseInsensitiveMapping({
'MOVE': 0.13,
'LRC': 0.04
}),
'Red Heavy': CaseInsensitiveMapping({
'MOVE': 0.00,
'LRC': 0.01
})
}),
0.1:
CaseInsensitiveMapping({
'Blue Heavy': CaseInsensitiveMapping({
'MOVE': 0.42,
'LRC': 0.28
}),
'Red Heavy': CaseInsensitiveMapping({
'MOVE': 0.34,
'LRC': 0.11
})
}),
1.0:
CaseInsensitiveMapping({
'Blue Heavy': CaseInsensitiveMapping({
'MOVE': 0.70,
'LRC': 1.00
}),
'Red Heavy': CaseInsensitiveMapping({
'MOVE': 0.68,
'LRC': 1.00
})
}),
10:
CaseInsensitiveMapping({
'Blue Heavy': CaseInsensitiveMapping({
'MOVE': 0.98,
'LRC': 1.00
}),
'Red Heavy': CaseInsensitiveMapping({
'MOVE': 0.98,
'LRC': 1.00
})
})
}
"""
Weighting factors for the mesopic luminous efficiency function calculation.
DATA_MESOPIC_X : CaseInsensitiveMapping
"""
|
site/tests/unittests/test/test_shelve.py | martinphellwig/brython_wf | 652 | 12741102 | <filename>site/tests/unittests/test/test_shelve.py
import unittest
import shelve
import glob
from test import support
from collections.abc import MutableMapping
from test.test_dbm import dbm_iterator
def L1(s):
return s.decode("latin-1")
class byteskeydict(MutableMapping):
"Mapping that supports bytes keys"
def __init__(self):
self.d = {}
def __getitem__(self, key):
return self.d[L1(key)]
def __setitem__(self, key, value):
self.d[L1(key)] = value
def __delitem__(self, key):
del self.d[L1(key)]
def __len__(self):
return len(self.d)
def iterkeys(self):
for k in self.d.keys():
yield k.encode("latin-1")
__iter__ = iterkeys
def keys(self):
return list(self.iterkeys())
def copy(self):
return byteskeydict(self.d)
class TestCase(unittest.TestCase):
fn = "shelftemp.db"
def tearDown(self):
for f in glob.glob(self.fn+"*"):
support.unlink(f)
def test_close(self):
d1 = {}
s = shelve.Shelf(d1, protocol=2, writeback=False)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
self.assertEqual(len(s), 1)
s.close()
self.assertRaises(ValueError, len, s)
try:
s['key1']
except ValueError:
pass
else:
self.fail('Closed shelf should not find a key')
def test_ascii_file_shelf(self):
s = shelve.open(self.fn, protocol=0)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_binary_file_shelf(self):
s = shelve.open(self.fn, protocol=1)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_proto2_file_shelf(self):
s = shelve.open(self.fn, protocol=2)
try:
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
finally:
s.close()
def test_in_memory_shelf(self):
d1 = byteskeydict()
s = shelve.Shelf(d1, protocol=0)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
d2 = byteskeydict()
s = shelve.Shelf(d2, protocol=1)
s['key1'] = (1,2,3,4)
self.assertEqual(s['key1'], (1,2,3,4))
s.close()
self.assertEqual(len(d1), 1)
self.assertEqual(len(d2), 1)
self.assertNotEqual(d1.items(), d2.items())
def test_mutable_entry(self):
d1 = byteskeydict()
s = shelve.Shelf(d1, protocol=2, writeback=False)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
s['key1'].append(5)
self.assertEqual(s['key1'], [1,2,3,4])
s.close()
d2 = byteskeydict()
s = shelve.Shelf(d2, protocol=2, writeback=True)
s['key1'] = [1,2,3,4]
self.assertEqual(s['key1'], [1,2,3,4])
s['key1'].append(5)
self.assertEqual(s['key1'], [1,2,3,4,5])
s.close()
self.assertEqual(len(d1), 1)
self.assertEqual(len(d2), 1)
def test_keyencoding(self):
d = {}
key = 'Pöp'
# the default keyencoding is utf-8
shelve.Shelf(d)[key] = [1]
self.assertIn(key.encode('utf-8'), d)
# but a different one can be given
shelve.Shelf(d, keyencoding='latin-1')[key] = [1]
self.assertIn(key.encode('latin-1'), d)
# with all consequences
s = shelve.Shelf(d, keyencoding='ascii')
self.assertRaises(UnicodeEncodeError, s.__setitem__, key, [1])
def test_writeback_also_writes_immediately(self):
# Issue 5754
d = {}
key = 'key'
encodedkey = key.encode('utf-8')
s = shelve.Shelf(d, writeback=True)
s[key] = [1]
p1 = d[encodedkey] # Will give a KeyError if backing store not updated
s['key'].append(2)
s.close()
p2 = d[encodedkey]
self.assertNotEqual(p1, p2) # Write creates new object in store
from test import mapping_tests
class TestShelveBase(mapping_tests.BasicTestMappingProtocol):
fn = "shelftemp.db"
counter = 0
def __init__(self, *args, **kw):
self._db = []
mapping_tests.BasicTestMappingProtocol.__init__(self, *args, **kw)
type2test = shelve.Shelf
def _reference(self):
return {"key1":"value1", "key2":2, "key3":(1,2,3)}
def _empty_mapping(self):
if self._in_mem:
x= shelve.Shelf(byteskeydict(), **self._args)
else:
self.counter+=1
x= shelve.open(self.fn+str(self.counter), **self._args)
self._db.append(x)
return x
def tearDown(self):
for db in self._db:
db.close()
self._db = []
if not self._in_mem:
for f in glob.glob(self.fn+"*"):
support.unlink(f)
class TestAsciiFileShelve(TestShelveBase):
_args={'protocol':0}
_in_mem = False
class TestBinaryFileShelve(TestShelveBase):
_args={'protocol':1}
_in_mem = False
class TestProto2FileShelve(TestShelveBase):
_args={'protocol':2}
_in_mem = False
class TestAsciiMemShelve(TestShelveBase):
_args={'protocol':0}
_in_mem = True
class TestBinaryMemShelve(TestShelveBase):
_args={'protocol':1}
_in_mem = True
class TestProto2MemShelve(TestShelveBase):
_args={'protocol':2}
_in_mem = True
def test_main():
for module in dbm_iterator():
support.run_unittest(
TestAsciiFileShelve,
TestBinaryFileShelve,
TestProto2FileShelve,
TestAsciiMemShelve,
TestBinaryMemShelve,
TestProto2MemShelve,
TestCase
)
if __name__ == "__main__":
test_main()
|
pyflux/ssm/dar.py | ThomasHoppe/pyflux | 2,091 | 12741120 | import copy
import sys
if sys.version_info < (3,):
range = xrange
import numpy as np
import pandas as pd
import scipy.stats as ss
from patsy import dmatrices, dmatrix, demo_data
from .. import families as fam
from .. import tsm as tsm
from .. import data_check as dc
from .kalman import *
class DAR(tsm.TSM):
""" Inherits time series methods from TSM class.
**** DYNAMIC AUTOREGRESSIVE MODEL ****
Parameters
----------
ar : int
Number of autoregressive lags
data : pd.DataFrame
Field to specify the data that will be used
"""
def __init__(self, data, ar, integ=0, target=None):
# Initialize TSM object
super(DAR, self).__init__('DAR')
# Latent Variable information
self.ar = ar
self.integ = integ
self.target = target
self.model_name = "DAR(" + str(self.ar) + ", integrated=" + str(self.integ) + ")"
self.max_lag = self.ar
self._z_hide = 0 # Whether to cutoff latent variables from results table
self.supported_methods = ["MLE", "PML", "Laplace", "M-H", "BBVI"]
self.default_method = "MLE"
self.multivariate_model = False
# Format the data
self.data_original = data.copy()
self.data, self.data_name, self.is_pandas, self.index = dc.data_check(data,target)
self.data = self.data.astype(np.float) # treat as float for Cython
self.data_original_nondf = self.data.copy()
# Difference data
for order in range(0, self.integ):
self.data = np.diff(self.data)
self.data_name = "Differenced " + self.data_name
self.X = self._ar_matrix()
self.data = self.data[self.max_lag:]
self.y = self.data
self.y_name = self.data_name
self._create_latent_variables()
self.z_no = len(self.latent_variables.z_list)
def _ar_matrix(self):
""" Creates Autoregressive matrix
Returns
----------
X : np.ndarray
Autoregressive Matrix
"""
Y = np.array(self.data[self.max_lag:self.data.shape[0]])
X = np.ones(Y.shape[0])
if self.ar != 0:
for i in range(0, self.ar):
X = np.vstack((X,self.data[(self.max_lag-i-1):-i-1]))
return X.T
def _create_latent_variables(self):
""" Creates model latent variables
Returns
----------
None (changes model attributes)
"""
self.latent_variables.add_z('Sigma^2 irregular', fam.Flat(transform='exp'), fam.Normal(0,3))
self.latent_variables.add_z('Constant', fam.Flat(transform=None), fam.Normal(0,3))
for parm in range(1,self.ar+1):
self.latent_variables.add_z('Sigma^2 AR(' + str(parm) + ')', fam.Flat(transform='exp'), fam.Normal(0,3))
def _forecast_model(self,beta,Z,h):
""" Creates forecasted states and variances
Parameters
----------
beta : np.ndarray
Contains untransformed starting values for latent variables
Returns
----------
a : np.ndarray
Forecasted states
P : np.ndarray
Variance of forecasted states
"""
T, _, R, Q, H = self._ss_matrices(beta)
return dl_univariate_kalman_fcst(self.data,Z,H,T,Q,R,0.0,h)
def _model(self,data,beta):
""" Creates the structure of the model
Parameters
----------
data : np.array
Contains the time series
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
a,P,K,F,v : np.array
Filted states, filtered variances, Kalman gains, F matrix, residuals
"""
T, Z, R, Q, H = self._ss_matrices(beta)
return dl_univariate_kalman(data,Z,H,T,Q,R,0.0)
def _ss_matrices(self,beta):
""" Creates the state space matrices required
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
T, Z, R, Q, H : np.array
State space matrices used in KFS algorithm
"""
T = np.identity(self.z_no-1)
H = np.identity(1)*self.latent_variables.z_list[0].prior.transform(beta[0])
Z = self.X
R = np.identity(self.z_no-1)
Q = np.identity(self.z_no-1)
for i in range(0,self.z_no-1):
Q[i][i] = self.latent_variables.z_list[i+1].prior.transform(beta[i+1])
return T, Z, R, Q, H
def neg_loglik(self,beta):
""" Creates the negative log marginal likelihood of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
The negative log logliklihood of the model
"""
_, _, _, F, v = self._model(self.y,beta)
loglik = 0.0
for i in range(0,self.y.shape[0]):
loglik += np.linalg.slogdet(F[:,:,i])[1] + np.dot(v[i],np.dot(np.linalg.pinv(F[:,:,i]),v[i]))
return -(-((self.y.shape[0]/2)*np.log(2*np.pi))-0.5*loglik.T[0].sum())
def plot_predict(self, h=5, past_values=20, intervals=True, **kwargs):
""" Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : Boolean
Would you like to show 95% prediction intervals for the forecast?
Returns
----------
- Plot of the forecast
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
y_holder = self.y.copy() # holds past data and predicted data to create AR matrix
full_X = self.X.copy()
full_X = np.append(full_X,np.array([np.append(1.0, y_holder[-self.ar:][::-1])]), axis=0)
Z = full_X
# Construct Z matrix
for step in range(h):
a, P = self._forecast_model(self.latent_variables.get_z_values(),Z,step)
new_value = np.dot(Z[-1,:],a[:,self.y.shape[0]+step])
y_holder = np.append(y_holder, new_value)
Z = np.append(Z, np.array([np.append(1.0, y_holder[-self.ar:][::-1])]), axis=0)
# Retrieve data, dates and (transformed) latent variables
a, P = self._forecast_model(self.latent_variables.get_z_values(),Z,h)
smoothed_series = np.zeros(self.y.shape[0]+h)
series_variance = np.zeros(self.y.shape[0]+h)
for t in range(self.y.shape[0]+h):
smoothed_series[t] = np.dot(Z[t],a[:,t])
series_variance[t] = np.dot(np.dot(Z[t],P[:,:,t]),Z[t].T) + self.latent_variables.z_list[0].prior.transform(self.latent_variables.get_z_values()[0])
date_index = self.shift_dates(h)
plot_values = smoothed_series[-h-past_values:]
forecasted_values = smoothed_series[-h:]
lower = forecasted_values - 1.98*np.power(series_variance[-h:],0.5)
upper = forecasted_values + 1.98*np.power(series_variance[-h:],0.5)
lower = np.append(plot_values[-h-1],lower)
upper = np.append(plot_values[-h-1],upper)
plot_index = date_index[-h-past_values:]
plt.figure(figsize=figsize)
if intervals == True:
plt.fill_between(date_index[-h-1:], lower, upper, alpha=0.2)
plt.plot(plot_index,plot_values)
plt.title("Forecast for " + self.y_name)
plt.xlabel("Time")
plt.ylabel(self.y_name)
plt.show()
def plot_fit(self,intervals=False,**kwargs):
""" Plots the fit of the model
Parameters
----------
intervals : Boolean
Whether to plot 95% confidence interval of states
Returns
----------
None (plots data and the fit)
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
series_type = kwargs.get('series_type','Smoothed')
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
date_index = copy.deepcopy(self.index)
date_index = date_index[self.integ+self.ar:]
if series_type == 'Smoothed':
mu, V = self.smoothed_state(self.data,self.latent_variables.get_z_values())
elif series_type == 'Filtered':
mu, V, _, _, _ = self._model(self.data,self.latent_variables.get_z_values())
else:
mu, V = self.smoothed_state(self.data,self.latent_variables.get_z_values())
# Create smoothed/filtered aggregate series
_, Z, _, _, _ = self._ss_matrices(self.latent_variables.get_z_values())
smoothed_series = np.zeros(self.y.shape[0])
for t in range(0,self.y.shape[0]):
smoothed_series[t] = np.dot(Z[t],mu[:,t])
plt.figure(figsize=figsize)
plt.subplot(self.z_no+1, 1, 1)
plt.title(self.y_name + " Raw and " + series_type)
plt.plot(date_index,self.data,label='Data')
plt.plot(date_index,smoothed_series,label=series_type,c='black')
plt.legend(loc=2)
for coef in range(0,self.z_no-1):
V_coef = V[0][coef][:-1]
plt.subplot(self.z_no+1, 1, 2+coef)
plt.title("Beta " + self.latent_variables.z_list[1+coef].name)
if intervals == True:
alpha =[0.15*i/float(100) for i in range(50,12,-2)]
plt.fill_between(date_index[5:], mu[coef,0:mu.shape[1]-1][5:] + 1.98*np.sqrt(V_coef[5:]), mu[coef,0:mu.shape[1]-1][5:] - 1.98*np.sqrt(V_coef[5:]), alpha=0.15,label='95% C.I.')
plt.plot(date_index,mu[coef,0:mu.shape[1]-1],label='Data')
plt.legend(loc=2)
plt.subplot(self.z_no+1, 1, self.z_no+1)
plt.title("Measurement Error")
plt.plot(date_index,self.data-smoothed_series,label='Irregular')
plt.legend(loc=2)
plt.show()
def predict(self, h=5):
""" Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
Returns
----------
- pd.DataFrame with predictions
"""
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
y_holder = self.y.copy() # holds past data and predicted data to create AR matrix
full_X = self.X.copy()
full_X = np.append(full_X,np.array([np.append(1.0, y_holder[-self.ar:][::-1])]), axis=0)
Z = full_X
for step in range(h):
a, P = self._forecast_model(self.latent_variables.get_z_values(),Z,step)
new_value = np.dot(Z[-1,:],a[:,self.y.shape[0]+step])
y_holder = np.append(y_holder, new_value)
Z = np.append(Z, np.array([np.append(1.0, y_holder[-self.ar:][::-1])]), axis=0)
date_index = self.shift_dates(h)
result = pd.DataFrame(y_holder[-h:])
result.rename(columns={0:self.y_name}, inplace=True)
result.index = date_index[-h:]
return result
def predict_is(self, h=5, fit_once=True):
""" Makes dynamic in-sample predictions with the estimated model
Parameters
----------
h : int (default : 5)
How many steps would you like to forecast?
fit_once : boolean
(default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint
Returns
----------
- pd.DataFrame with predicted values
"""
predictions = []
for t in range(0,h):
data1 = self.data_original_nondf[:-h+t]
x = DAR(data=data1, ar=self.ar, integ=self.integ)
if fit_once is False:
x.fit(printer=False)
if t == 0:
if fit_once is True:
x.fit(printer=False)
saved_lvs = x.latent_variables
predictions = x.predict(1)
else:
if fit_once is True:
x.latent_variables = saved_lvs
predictions = pd.concat([predictions,x.predict(1)])
predictions.rename(columns={0:self.y_name}, inplace=True)
predictions.index = self.index[-h:]
return predictions
def plot_predict_is(self, h=5, **kwargs):
""" Plots forecasts with the estimated model against data
(Simulated prediction with data)
Parameters
----------
h : int (default : 5)
How many steps to forecast
Returns
----------
- Plot of the forecast against data
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
plt.figure(figsize=figsize)
predictions = self.predict_is(h)
data = self.data[-h:]
plt.plot(predictions.index,data,label='Data')
plt.plot(predictions.index,predictions,label='Predictions',c='black')
plt.title(self.y_name)
plt.legend(loc=2)
plt.show()
def simulation_smoother(self,beta):
""" Koopman's simulation smoother - simulates from states given
model parameters and observations
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
- A simulated state evolution
"""
T, Z, R, Q, H = self._ss_matrices(beta)
# Generate e_t+ and n_t+
rnd_h = np.random.normal(0,np.sqrt(H),self.data.shape[0]+1)
q_dist = ss.multivariate_normal([0.0, 0.0], Q)
rnd_q = q_dist.rvs(self.data.shape[0]+1)
# Generate a_t+ and y_t+
a_plus = np.zeros((T.shape[0],self.data.shape[0]+1))
a_plus[0,0] = np.mean(self.data[0:5])
y_plus = np.zeros(self.data.shape[0])
for t in range(0,self.data.shape[0]+1):
if t == 0:
a_plus[:,t] = np.dot(T,a_plus[:,t]) + rnd_q[t,:]
y_plus[t] = np.dot(Z,a_plus[:,t]) + rnd_h[t]
else:
if t != self.data.shape[0]:
a_plus[:,t] = np.dot(T,a_plus[:,t-1]) + rnd_q[t,:]
y_plus[t] = np.dot(Z,a_plus[:,t]) + rnd_h[t]
alpha_hat, _ = self.smoothed_state(self.data,beta)
alpha_hat_plus, _ = self.smoothed_state(y_plus,beta)
alpha_tilde = alpha_hat - alpha_hat_plus + a_plus
return alpha_tilde
def smoothed_state(self,data,beta):
""" Creates the negative log marginal likelihood of the model
Parameters
----------
data : np.array
Data to be smoothed
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
- Smoothed states
"""
T, Z, R, Q, H = self._ss_matrices(beta)
alpha, V = dl_univariate_KFS(data,Z,H,T,Q,R,0.0)
return alpha, V
|
atm/database.py | HDI-Project/ATM | 554 | 12741129 | <gh_stars>100-1000
from __future__ import absolute_import, unicode_literals
import hashlib
import json
import os
import pickle
from builtins import object
from datetime import datetime
from io import BytesIO
from operator import attrgetter
import boto3
import numpy as np
import pandas as pd
import pymysql
from sklearn.model_selection import train_test_split
from sqlalchemy import (
Column, DateTime, Enum, ForeignKey, Integer, MetaData, Numeric, String, Text, and_,
create_engine, func, inspect)
from sqlalchemy.engine.url import URL
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.orm.properties import ColumnProperty
from atm.constants import (
BUDGET_TYPES, CLASSIFIER_STATUS, DATARUN_STATUS, METRICS, PARTITION_STATUS, SCORE_TARGETS,
ClassifierStatus, PartitionStatus, RunStatus)
from atm.data import load_data
from atm.utilities import base_64_to_object, object_to_base_64
# The maximum number of errors allowed in a single hyperpartition. If more than
# this many classifiers using a hyperpartition error, the hyperpartition will be
# considered broken and ignored for the rest of the datarun.
MAX_HYPERPARTITION_ERRORS = 3
class DBSession(object):
def __init__(self, db, commit=False):
self.db = db
self.commit = commit
def __enter__(self):
self.db.session = self.db.get_session()
def __exit__(self, type, error, traceback):
if error is not None:
self.db.session.rollback()
elif self.commit:
self.db.session.commit()
self.db.session.close()
self.db.session = None
def try_with_session(commit=False):
"""
Decorator for instance methods on Database that need a sqlalchemy session.
This wrapping function checks if the Database has an active session yet. If
not, it wraps the function call in a ``with DBSession():`` block.
"""
def wrap(func):
def call(db, *args, **kwargs):
# if the Database has an active session, don't create a new one
if db.session is not None:
result = func(db, *args, **kwargs)
if commit:
db.session.commit()
else:
# otherwise, use the session generator
with DBSession(db, commit=commit):
result = func(db, *args, **kwargs)
return result
return call
return wrap
class Database(object):
def __init__(self, dialect, database, username=None, password=<PASSWORD>,
host=None, port=None, query=None):
"""
Accepts configuration for a database connection, and defines SQLAlchemy
ORM objects for all the tables in the database.
"""
# Prepare environment for pymysql
pymysql.install_as_MySQLdb()
pymysql.converters.encoders[np.float64] = pymysql.converters.escape_float
pymysql.converters.conversions = pymysql.converters.encoders.copy()
pymysql.converters.conversions.update(pymysql.converters.decoders)
db_url = URL(drivername=dialect, database=database, username=username,
password=password, host=host, port=port, query=query)
self.engine = create_engine(db_url)
self.session = None
self.get_session = sessionmaker(bind=self.engine,
expire_on_commit=False)
# create ORM objects for the tables
self._define_tables()
def _define_tables(self):
"""
Define the SQLAlchemy ORM class for each table in the ModelHub database.
These must be defined after the Database class is initialized so that
the database metadata is available (at runtime).
If the database does not already exist, it will be created. If it does
exist, it will not be updated with new schema -- after schema changes,
the database must be destroyed and reinialized.
"""
metadata = MetaData(bind=self.engine)
Base = declarative_base(metadata=metadata)
db = self
class Dataset(Base):
__tablename__ = 'datasets'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(100), nullable=False)
# columns necessary for loading/processing data
class_column = Column(String(100), nullable=False)
train_path = Column(String(200), nullable=False)
test_path = Column(String(200))
description = Column(String(1000))
# metadata columns, for convenience
n_examples = Column(Integer, nullable=False)
k_classes = Column(Integer, nullable=False)
d_features = Column(Integer, nullable=False)
majority = Column(Numeric(precision=10, scale=9), nullable=False)
size_kb = Column(Integer, nullable=False)
def load(self, test_size=0.3, random_state=0,
aws_access_key=None, aws_secret_key=None):
data = load_data(self.name, self.train_path, aws_access_key, aws_secret_key)
if self.test_path:
if self.name.endswith('.csv'):
test_name = self.name.replace('.csv', '_test.csv')
else:
test_name = self.name + '_test'
test_data = load_data(test_name, self.test_path,
aws_access_key, aws_secret_key)
return data, test_data
else:
return train_test_split(data, test_size=test_size, random_state=random_state)
def _add_extra_fields(self, aws_access_key=None, aws_secret_key=None):
data = load_data(self.name, self.train_path, aws_access_key, aws_secret_key)
if self.n_examples is None:
self.n_examples = len(data)
if self.k_classes is None:
self.k_classes = len(np.unique(data[self.class_column]))
if self.d_features is None:
total_features = data.shape[1] - 1
for column in data.columns:
if data[column].dtype == 'object':
total_features += len(np.unique(data[column])) - 1
self.d_features = total_features
if self.majority is None:
counts = data[self.class_column].value_counts()
self.majority = float(max(counts)) / float(sum(counts))
if self.size_kb is None:
self.size_kb = int(np.array(data).nbytes / 1000)
@staticmethod
def _make_name(path):
md5 = hashlib.md5(path.encode('utf-8'))
return md5.hexdigest()
def __init__(self, train_path, test_path=None, name=None, description=None,
class_column=None, n_examples=None, majority=None, k_classes=None,
size_kb=None, d_features=None, id=None, aws_access_key=None,
aws_secret_key=None):
self.train_path = train_path
self.test_path = test_path
self.name = name or self._make_name(train_path)
self.description = description or self.name
self.class_column = class_column
self.id = id
self.n_examples = n_examples
self.d_features = d_features
self.majority = majority
self.k_classes = k_classes
self.size_kb = size_kb
self._add_extra_fields(aws_access_key, aws_secret_key)
def __repr__(self):
base = "<%s: %s, %d classes, %d features, %d rows>"
return base % (self.name, self.description, self.k_classes,
self.d_features, self.n_examples)
class Datarun(Base):
__tablename__ = 'dataruns'
# relational columns
id = Column(Integer, primary_key=True, autoincrement=True)
dataset_id = Column(Integer, ForeignKey('datasets.id'))
dataset = relationship('Dataset', back_populates='dataruns')
description = Column(String(200), nullable=False)
priority = Column(Integer)
# hyperparameter selection and tuning settings
selector = Column(String(200), nullable=False)
k_window = Column(Integer)
tuner = Column(String(200), nullable=False)
gridding = Column(Integer, nullable=False)
r_minimum = Column(Integer)
# budget settings
budget_type = Column(Enum(*BUDGET_TYPES))
budget = Column(Integer)
deadline = Column(DateTime)
# which metric to use for judgment, and how to compute it
metric = Column(Enum(*METRICS))
score_target = Column(Enum(*[s + '_judgment_metric' for s in
SCORE_TARGETS]))
# variables that store the status of the datarun
start_time = Column(DateTime)
end_time = Column(DateTime)
status = Column(Enum(*DATARUN_STATUS), default=RunStatus.PENDING)
def __repr__(self):
base = "<ID = %d, dataset ID = %s, strategy = %s, budget = %s (%s), status: %s>"
return base % (self.id, self.dataset_id, self.description,
self.budget_type, self.budget, self.status)
@property
def completed_classifiers(self):
return len(self.get_complete_classifiers())
def get_scores(self):
columns = [
'id',
'cv_judgment_metric',
'cv_judgment_metric_stdev',
'test_judgment_metric',
]
classifiers = db.get_classifiers(datarun_id=self.id)
scores = [
{
key: value
for key, value in vars(classifier).items()
if key in columns
}
for classifier in classifiers
]
scores = pd.DataFrame(scores)
scores.sort_values(by='cv_judgment_metric', ascending=False, inplace=True)
scores['rank'] = scores['cv_judgment_metric'].rank(ascending=0)
return scores.reset_index(drop=True)
def get_best_classifier(self):
return db.get_best_classifier(self.score_target, datarun_id=self.id)
def get_complete_classifiers(self):
return db.get_classifiers(datarun_id=self.id, status=ClassifierStatus.COMPLETE)
def export_best_classifier(self, path, force=False):
if os.path.exists(path) and not force:
print('The indicated path already exists. Use `force=True` to overwrite.')
base_path = os.path.dirname(path)
if base_path and not os.path.exists(base_path):
os.makedirs(base_path)
classifier = self.get_best_classifier()
model = classifier.load_model()
with open(path, 'wb') as pickle_file:
pickle.dump(model, pickle_file)
print("Classifier {} saved as {}".format(classifier.id, path))
def describe(self):
dataset = db.get_dataset(self.dataset_id)
elapsed = self.end_time - self.start_time if self.end_time else 'Not finished yet.'
to_print = [
'Datarun {} summary:'.format(self.id),
"\tDataset: '{}'".format(dataset.train_path),
"\tColumn Name: '{}'".format(dataset.class_column),
"\tJudgment Metric: '{}'".format(self.metric),
'\tClassifiers Tested: {}'.format(len(db.get_classifiers(datarun_id=self.id))),
'\tElapsed Time: {}'.format(elapsed),
]
print('\n'.join(to_print))
Dataset.dataruns = relationship('Datarun', order_by='Datarun.id',
back_populates='dataset')
class Hyperpartition(Base):
__tablename__ = 'hyperpartitions'
# relational columns
id = Column(Integer, primary_key=True, autoincrement=True)
datarun_id = Column(Integer, ForeignKey('dataruns.id'))
datarun = relationship('Datarun', back_populates='hyperpartitions')
# name of or path to a configured classification method
method = Column(String(255))
# list of categorical parameters whose values are fixed to define
# this hyperpartition
categorical_hyperparameters_64 = Column(Text)
# list of continuous parameters which are not fixed; their values
# must be selected by a Tuner
tunable_hyperparameters_64 = Column(Text)
# list of categorical or continuous parameters whose values are
# always fixed. These do not define the hyperpartition, but their
# values must be passed on to the method. Here for convenience.
constant_hyperparameters_64 = Column(Text)
# has the partition had too many errors, or is gridding done?
status = Column(Enum(*PARTITION_STATUS),
default=PartitionStatus.INCOMPLETE)
@property
def categoricals(self):
"""
A list of categorical variables along with the fixed values
which define this hyperpartition.
Each element is a ('name', HyperParameter) tuple.
"""
return base_64_to_object(self.categorical_hyperparameters_64)
@categoricals.setter
def categoricals(self, value):
self.categorical_hyperparameters_64 = object_to_base_64(value)
@property
def tunables(self):
"""
A list of parameters which are unspecified and must be selected
with a Tuner. Each element is a ('name', HyperParameter) tuple.
"""
return base_64_to_object(self.tunable_hyperparameters_64)
@tunables.setter
def tunables(self, value):
self.tunable_hyperparameters_64 = object_to_base_64(value)
@property
def constants(self):
return base_64_to_object(self.constant_hyperparameters_64)
@constants.setter
def constants(self, value):
self.constant_hyperparameters_64 = object_to_base_64(value)
def __repr__(self):
return "<%s: %s>" % (self.method, self.categoricals)
Datarun.hyperpartitions = relationship('Hyperpartition',
order_by='Hyperpartition.id',
back_populates='datarun')
class Classifier(Base):
__tablename__ = 'classifiers'
# relational columns
id = Column(Integer, primary_key=True, autoincrement=True)
datarun_id = Column(Integer, ForeignKey('dataruns.id'))
datarun = relationship('Datarun', back_populates='classifiers')
hyperpartition_id = Column(Integer, ForeignKey('hyperpartitions.id'))
hyperpartition = relationship('Hyperpartition',
back_populates='classifiers')
# name of the host where the model was trained
host = Column(String(50))
# these columns point to where the output is stored
model_location = Column(String(300))
metrics_location = Column(String(300))
# base 64 encoding of the hyperparameter names and values
hyperparameter_values_64 = Column(Text, nullable=False)
# performance metrics
cv_judgment_metric = Column(Numeric(precision=20, scale=10))
cv_judgment_metric_stdev = Column(Numeric(precision=20, scale=10))
test_judgment_metric = Column(Numeric(precision=20, scale=10))
start_time = Column(DateTime)
end_time = Column(DateTime)
status = Column(Enum(*CLASSIFIER_STATUS), nullable=False)
error_message = Column(Text)
@property
def hyperparameter_values(self):
return base_64_to_object(self.hyperparameter_values_64)
@hyperparameter_values.setter
def hyperparameter_values(self, value):
self.hyperparameter_values_64 = object_to_base_64(value)
@property
def mu_sigma_judgment_metric(self):
# compute the lower confidence bound on the cross-validated
# judgment metric
if self.cv_judgment_metric is None:
return None
return (self.cv_judgment_metric - 2 * self.cv_judgment_metric_stdev)
def __repr__(self):
params = '\n'.join(
[
'\t{}: {}'.format(name, value)
for name, value in self.hyperparameter_values.items()
]
)
to_print = [
'Classifier id: {}'.format(self.id),
'Classifier type: {}'.format(
db.get_hyperpartition(self.hyperpartition_id).method),
'Params chosen: \n{}'.format(params),
'Cross Validation Score: {:.3f} +- {:.3f}'.format(
self.cv_judgment_metric, self.cv_judgment_metric_stdev),
'Test Score: {:.3f}'.format(self.test_judgment_metric),
]
return '\n'.join(to_print)
def load_s3_data(self, s3_url, aws_access_key=None, aws_secret_key=None):
"""Returns raw data from S3"""
client = boto3.client(
's3',
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
)
bucket = s3_url.split('/')[2]
path_to_read = s3_url.replace('s3://{}/'.format(bucket), '')
with BytesIO() as data:
client.download_fileobj(bucket, path_to_read, data)
data.seek(0)
return data
def load_model(self):
"""Return the model's insntance."""
if self.model_location.startswith('s3'):
pickled = self.load_s3_data(
self.model_location,
self.aws_access_key,
self.aws_secret_key,
)
return pickle.load(pickled)
else:
with open(self.model_location, 'rb') as f:
return pickle.load(f)
def load_metrics(self):
"""Return the metrics"""
if self.metrics_location.startswith('s3'):
pickled = self.load_s3_data(
self.metrics_location,
self.aws_access_key,
self.aws_secret_key,
)
return json.load(pickled)
else:
with open(self.metrics_location, 'rb') as f:
return json.load(f)
Datarun.classifiers = relationship('Classifier',
order_by='Classifier.id',
back_populates='datarun')
Hyperpartition.classifiers = relationship('Classifier',
order_by='Classifier.id',
back_populates='hyperpartition')
self.Dataset = Dataset
self.Datarun = Datarun
self.Hyperpartition = Hyperpartition
self.Classifier = Classifier
Base.metadata.create_all(bind=self.engine)
# ##########################################################################
# # Save/load the database ###############################################
# ##########################################################################
@try_with_session()
def to_csv(self, path):
"""
Save the entire ModelHub database as a set of CSVs in the given
directory.
"""
for table in ['datasets', 'dataruns', 'hyperpartitions', 'classifiers']:
df = pd.read_sql('SELECT * FROM %s' % table, self.session.bind)
df.to_csv(os.path.join(path, '%s.csv' % table), index=False)
@try_with_session(commit=True)
def from_csv(self, path):
"""
Load a snapshot of the ModelHub database from a set of CSVs in the given
directory.
"""
for model, table in [(self.Dataset, 'dataset'),
(self.Datarun, 'datarun'),
(self.Hyperpartition, 'hyperpartition'),
(self.Classifier, 'classifier')]:
df = pd.read_csv(os.path.join(path, '%ss.csv' % table))
# parse datetime columns. This is necessary because SQLAlchemy can't
# interpret strings as datetimes on its own.
# yes, this is the easiest way to do it
for c in inspect(model).attrs:
if not isinstance(c, ColumnProperty):
continue
col = c.columns[0]
if isinstance(col.type, DateTime):
df[c.key] = pd.to_datetime(df[c.key],
infer_datetime_format=True)
for _, r in df.iterrows():
# replace NaN and NaT with None
for k, v in list(r.iteritems()):
if pd.isnull(v):
r[k] = None
# insert the row into the database
create_func = getattr(self, 'create_%s' % table)
create_func(**r)
# ##########################################################################
# # Standard query methods ###############################################
# ##########################################################################
@try_with_session()
def get_dataset(self, dataset_id):
""" Get a specific dataset. """
return self.session.query(self.Dataset).get(dataset_id)
@try_with_session()
def get_datarun(self, datarun_id):
""" Get a specific datarun. """
return self.session.query(self.Datarun).get(datarun_id)
@try_with_session()
def get_dataruns(self, ignore_pending=False, ignore_running=False,
ignore_complete=True, include_ids=None, exclude_ids=None,
max_priority=True):
"""
Get a list of all dataruns matching the chosen filters.
Args:
ignore_pending: if True, ignore dataruns that have not been started
ignore_running: if True, ignore dataruns that are already running
ignore_complete: if True, ignore completed dataruns
include_ids: only include ids from this list
exclude_ids: don't return any ids from this list
max_priority: only return dataruns which have the highest priority
of any in the filtered set
"""
query = self.session.query(self.Datarun)
if ignore_pending:
query = query.filter(self.Datarun.status != RunStatus.PENDING)
if ignore_running:
query = query.filter(self.Datarun.status != RunStatus.RUNNING)
if ignore_complete:
query = query.filter(self.Datarun.status != RunStatus.COMPLETE)
if include_ids:
exclude_ids = exclude_ids or []
ids = [i for i in include_ids if i not in exclude_ids]
query = query.filter(self.Datarun.id.in_(ids))
elif exclude_ids:
query = query.filter(self.Datarun.id.notin_(exclude_ids))
dataruns = query.all()
if not len(dataruns):
return None
if max_priority:
mp = max(dataruns, key=attrgetter('priority')).priority
dataruns = [d for d in dataruns if d.priority == mp]
return dataruns
@try_with_session()
def get_hyperpartition(self, hyperpartition_id):
""" Get a specific classifier. """
return self.session.query(self.Hyperpartition).get(hyperpartition_id)
@try_with_session()
def get_hyperpartitions(self, dataset_id=None, datarun_id=None, method=None,
ignore_gridding_done=True, ignore_errored=True):
"""
Return all the hyperpartitions in a given datarun by id.
By default, only returns incomplete hyperpartitions.
"""
query = self.session.query(self.Hyperpartition)
if dataset_id is not None:
query = query.join(self.Datarun)\
.filter(self.Datarun.dataset_id == dataset_id)
if datarun_id is not None:
query = query.filter(self.Hyperpartition.datarun_id == datarun_id)
if method is not None:
query = query.filter(self.Hyperpartition.method == method)
if ignore_gridding_done:
query = query.filter(self.Hyperpartition.status != PartitionStatus.GRIDDING_DONE)
if ignore_errored:
query = query.filter(self.Hyperpartition.status != PartitionStatus.ERRORED)
return query.all()
@try_with_session()
def get_classifier(self, classifier_id):
""" Get a specific classifier. """
return self.session.query(self.Classifier).get(classifier_id)
@try_with_session()
def get_classifiers(self, dataset_id=None, datarun_id=None, method=None,
hyperpartition_id=None, status=None):
""" Get a set of classifiers, filtered by the passed-in arguments. """
query = self.session.query(self.Classifier)
if dataset_id is not None:
query = query.join(self.Datarun)\
.filter(self.Datarun.dataset_id == dataset_id)
if datarun_id is not None:
query = query.filter(self.Classifier.datarun_id == datarun_id)
if method is not None:
query = query.join(self.Hyperpartition)\
.filter(self.Hyperpartition.method == method)
if hyperpartition_id is not None:
query = query.filter(self.Classifier.hyperpartition_id == hyperpartition_id)
if status is not None:
query = query.filter(self.Classifier.status == status)
return query.all()
# ##########################################################################
# # Special-purpose queries ##############################################
# ##########################################################################
@try_with_session()
def is_datatun_gridding_done(self, datarun_id):
"""
Check whether gridding is done for the entire datarun.
"""
datarun = self.get_datarun(datarun_id)
is_done = True
for hyperpartition in datarun.hyperpartitions:
# If any hyperpartiton has not finished gridding or errored out,
# gridding is not done for the datarun.
if hyperpartition.status == PartitionStatus.INCOMPLETE:
is_done = False
return is_done
@try_with_session()
def get_number_of_hyperpartition_errors(self, hyperpartition_id):
"""
Get the number of classifiers that have errored using a specified
hyperpartition.
"""
classifiers = self.session.query(self.Classifier)\
.filter(and_(self.Classifier.hyperpartition_id == hyperpartition_id,
self.Classifier.status == ClassifierStatus.ERRORED)).all()
return len(classifiers)
@try_with_session()
def get_methods(self, dataset_id=None, datarun_id=None,
ignore_errored=False, ignore_gridding_done=False):
""" Get all methods used in a particular datarun. """
hyperpartitions = self.get_hyperpartitions(dataset_id=dataset_id,
datarun_id=datarun_id,
ignore_gridding_done=False,
ignore_errored=False)
methods = set(f.method for f in hyperpartitions)
return list(methods)
@try_with_session()
def get_maximum_y(self, datarun_id, score_target):
""" Get the maximum value of a numeric column by name, or None. """
query = self.session.query(func.max(getattr(self.Classifier,
score_target)))
result = query.filter(self.Classifier.datarun_id == datarun_id).first()
if result:
return float(result)
return None
@try_with_session()
def get_best_classifier(self, score_target, dataset_id=None,
datarun_id=None, method=None,
hyperpartition_id=None):
"""
Get the classifier with the best judgment metric, as indicated by
score_target.
score_target: indicates the metric by which to judge the best classifier.
"""
classifiers = self.get_classifiers(dataset_id=dataset_id,
datarun_id=datarun_id,
method=method,
hyperpartition_id=hyperpartition_id,
status=ClassifierStatus.COMPLETE)
if '_judgment_metric' not in score_target:
score_target += '_judgment_metric'
if not classifiers:
return None
return max(classifiers, key=attrgetter(score_target))
@try_with_session()
def load_model(self, classifier_id):
clf = self.get_classifier(classifier_id)
with open(clf.model_location, 'rb') as f:
return pickle.load(f)
@try_with_session()
def load_metrics(self, classifier_id):
clf = self.get_classifier(classifier_id)
with open(clf.metrics_location, 'r') as f:
return json.load(f)
# ##########################################################################
# # Methods to update the database #######################################
# ##########################################################################
@try_with_session(commit=True)
def create_dataset(self, **kwargs):
dataset = self.Dataset(**kwargs)
self.session.add(dataset)
return dataset
@try_with_session(commit=True)
def create_datarun(self, **kwargs):
datarun = self.Datarun(**kwargs)
self.session.add(datarun)
return datarun
@try_with_session(commit=True)
def create_hyperpartition(self, **kwargs):
partition = self.Hyperpartition(**kwargs)
self.session.add(partition)
return partition
@try_with_session(commit=True)
def create_classifier(self, **kwargs):
classifier = self.Classifier(**kwargs)
self.session.add(classifier)
return classifier
@try_with_session(commit=True)
def start_classifier(self, hyperpartition_id, datarun_id, host,
hyperparameter_values):
"""
Save a new, fully qualified classifier object to the database.
Returns: the ID of the newly-created classifier
"""
classifier = self.Classifier(hyperpartition_id=hyperpartition_id,
datarun_id=datarun_id,
host=host,
hyperparameter_values=hyperparameter_values,
start_time=datetime.now(),
status=ClassifierStatus.RUNNING)
self.session.add(classifier)
return classifier
@try_with_session(commit=True)
def complete_classifier(self, classifier_id, model_location,
metrics_location, cv_score, cv_stdev, test_score):
"""
Set all the parameters on a classifier that haven't yet been set, and mark
it as complete.
"""
classifier = self.session.query(self.Classifier).get(classifier_id)
classifier.model_location = model_location
classifier.metrics_location = metrics_location
classifier.cv_judgment_metric = cv_score
classifier.cv_judgment_metric_stdev = cv_stdev
classifier.test_judgment_metric = test_score
classifier.end_time = datetime.now()
classifier.status = ClassifierStatus.COMPLETE
@try_with_session(commit=True)
def mark_classifier_errored(self, classifier_id, error_message):
"""
Mark an existing classifier as having errored and set the error message. If
the classifier's hyperpartiton has produced too many erring classifiers, mark it
as errored as well.
"""
classifier = self.session.query(self.Classifier).get(classifier_id)
classifier.error_message = error_message
classifier.status = ClassifierStatus.ERRORED
classifier.end_time = datetime.now()
noh_errors = self.get_number_of_hyperpartition_errors(classifier.hyperpartition_id)
if noh_errors > MAX_HYPERPARTITION_ERRORS:
self.mark_hyperpartition_errored(classifier.hyperpartition_id)
@try_with_session(commit=True)
def mark_hyperpartition_gridding_done(self, hyperpartition_id):
"""
Mark a hyperpartiton as having all of its possible grid points explored.
"""
hyperpartition = self.get_hyperpartition(hyperpartition_id)
hyperpartition.status = PartitionStatus.GRIDDING_DONE
@try_with_session(commit=True)
def mark_hyperpartition_errored(self, hyperpartition_id):
"""
Mark a hyperpartiton as having had too many classifier errors. This will
prevent more classifiers from being trained on this hyperpartiton in the
future.
"""
hyperpartition = self.get_hyperpartition(hyperpartition_id)
hyperpartition.status = PartitionStatus.ERRORED
@try_with_session(commit=True)
def mark_datarun_running(self, datarun_id):
"""
Set the status of the Datarun to RUNNING and set the 'start_time' field
to the current datetime.
"""
datarun = self.get_datarun(datarun_id)
if datarun.status == RunStatus.PENDING:
datarun.status = RunStatus.RUNNING
datarun.start_time = datetime.now()
@try_with_session(commit=True)
def mark_datarun_complete(self, datarun_id):
"""
Set the status of the Datarun to COMPLETE and set the 'end_time' field
to the current datetime.
"""
datarun = self.get_datarun(datarun_id)
datarun.status = RunStatus.COMPLETE
datarun.end_time = datetime.now()
|
wechat_django/tests/test_utils_decorator.py | UltraVacuum/wechat-django | 166 | 12741137 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import response
from django.urls import reverse
from django.urls.resolvers import get_ns_resolver
from .base import WeChatTestCase
class UtilDecoratorTestCase(WeChatTestCase):
pass
|
sdk/python/pulumi_aws/codecommit/_inputs.py | alexbowers/pulumi-aws | 260 | 12741140 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'TriggerTriggerArgs',
]
@pulumi.input_type
class TriggerTriggerArgs:
def __init__(__self__, *,
destination_arn: pulumi.Input[str],
events: pulumi.Input[Sequence[pulumi.Input[str]]],
name: pulumi.Input[str],
branches: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
custom_data: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] destination_arn: The ARN of the resource that is the target for a trigger. For example, the ARN of a topic in Amazon Simple Notification Service (SNS).
:param pulumi.Input[Sequence[pulumi.Input[str]]] events: The repository events that will cause the trigger to run actions in another service, such as sending a notification through Amazon Simple Notification Service (SNS). If no events are specified, the trigger will run for all repository events. Event types include: `all`, `updateReference`, `createReference`, `deleteReference`.
:param pulumi.Input[str] name: The name of the trigger.
:param pulumi.Input[Sequence[pulumi.Input[str]]] branches: The branches that will be included in the trigger configuration. If no branches are specified, the trigger will apply to all branches.
:param pulumi.Input[str] custom_data: Any custom data associated with the trigger that will be included in the information sent to the target of the trigger.
"""
pulumi.set(__self__, "destination_arn", destination_arn)
pulumi.set(__self__, "events", events)
pulumi.set(__self__, "name", name)
if branches is not None:
pulumi.set(__self__, "branches", branches)
if custom_data is not None:
pulumi.set(__self__, "custom_data", custom_data)
@property
@pulumi.getter(name="destinationArn")
def destination_arn(self) -> pulumi.Input[str]:
"""
The ARN of the resource that is the target for a trigger. For example, the ARN of a topic in Amazon Simple Notification Service (SNS).
"""
return pulumi.get(self, "destination_arn")
@destination_arn.setter
def destination_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "destination_arn", value)
@property
@pulumi.getter
def events(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The repository events that will cause the trigger to run actions in another service, such as sending a notification through Amazon Simple Notification Service (SNS). If no events are specified, the trigger will run for all repository events. Event types include: `all`, `updateReference`, `createReference`, `deleteReference`.
"""
return pulumi.get(self, "events")
@events.setter
def events(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "events", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the trigger.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def branches(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The branches that will be included in the trigger configuration. If no branches are specified, the trigger will apply to all branches.
"""
return pulumi.get(self, "branches")
@branches.setter
def branches(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "branches", value)
@property
@pulumi.getter(name="customData")
def custom_data(self) -> Optional[pulumi.Input[str]]:
"""
Any custom data associated with the trigger that will be included in the information sent to the target of the trigger.
"""
return pulumi.get(self, "custom_data")
@custom_data.setter
def custom_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "custom_data", value)
|
DQM/BeamMonitor/test/beamspotdip_dqm_sourceclient-file_cfg.py | malbouis/cmssw | 852 | 12741148 | from __future__ import print_function
import FWCore.ParameterSet.Config as cms
#
process = cms.Process("BeamSpotDipServer")
process.load("DQMServices.Core.DQM_cfg")
# message logger
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr = cms.untracked.PSet(
threshold = cms.untracked.string('INFO'),
default = cms.untracked.PSet(
limit = cms.untracked.int32(1000)
),
BeamSpotDipServer = cms.untracked.PSet(
limit = cms.untracked.int32(1000)
)
)
# source
process.source = cms.Source("PoolSource",
fileNames=cms.untracked.vstring(
'file:/tmp/sikler/b.root' # lxplus7101
)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
# beamspot from database
process.load("CondCore.CondDB.CondDB_cfi")
process.load("DQM.Integration.config.FrontierCondition_GT_cfi")
process.GlobalTag.toGet = cms.VPSet(
cms.PSet(
record = cms.string("BeamSpotOnlineLegacyObjectsRcd"),
refreshTime = cms.uint64(1)
),
)
# module
process.load("DQM.BeamMonitor.BeamSpotDipServer_cff")
process.beamSpotDipServer.verbose = True
process.beamSpotDipServer.testing = True
process.beamSpotDipServer.readFromNFS = True
process.beamSpotDipServer.sourceFile = "../../../../../BeamFitResults.txt"
process.beamSpotDipServer.sourceFile1 = "../../../../../TkStatus.txt"
# process customizations
from DQM.Integration.config.online_customizations_cfi import *
process = customise(process)
# path
process.p = cms.Path( process.beamSpotDipServer )
|
create_pretraining_data.py | alexa/bort | 469 | 12741149 | <reponame>alexa/bort
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Preprocessing utils for pretraining, mostly taken from:
https://github.com/dmlc/gluon-nlp/tree/v0.9.x/scripts/bert/data
but modified to work with Bort.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import io
import os
import glob
import collections
import random
import time
import numpy as np
import gluonnlp as nlp
from multiprocessing import Pool
class TrainingInstance(object):
"""A single training instance (sentence pair)."""
def __init__(self, tokens, segment_ids, masked_lm_positions,
masked_lm_labels, is_random_next, vocab):
self.tokens = tokens
self.segment_ids = segment_ids
self.is_random_next = is_random_next
self.masked_lm_positions = masked_lm_positions
self.masked_lm_labels = masked_lm_labels
self.vocab = vocab
def __str__(self):
tks = self.vocab.to_tokens(self.tokens)
mask_tks = self.vocab.to_tokens(self.masked_lm_labels)
s = ''
s += 'tokens: %s\n' % (' '.join(tks))
s += 'segment_ids: %s\n' % (' '.join(
[str(x) for x in self.segment_ids]))
s += 'is_random_next: %s\n' % self.is_random_next
s += 'masked_lm_positions: %s\n' % (' '.join(
[str(x) for x in self.masked_lm_positions]))
s += 'masked_lm_labels: %s\n' % (' '.join(mask_tks))
s += '\n'
return s
def __repr__(self):
return self.__str__()
def transform(instance, max_seq_length):
"""Transform instance to inputs for MLM and NSP."""
input_ids = instance.tokens
assert len(input_ids) <= max_seq_length
segment_ids = instance.segment_ids
masked_lm_positions = instance.masked_lm_positions
valid_lengths = len(input_ids)
masked_lm_ids = instance.masked_lm_labels
masked_lm_weights = [1.0] * len(masked_lm_ids)
next_sentence_label = 1 if instance.is_random_next else 0
features = {}
features['input_ids'] = input_ids
features['segment_ids'] = segment_ids
features['masked_lm_positions'] = masked_lm_positions
features['masked_lm_ids'] = masked_lm_ids
features['masked_lm_weights'] = masked_lm_weights
features['next_sentence_labels'] = [next_sentence_label]
features['valid_lengths'] = [valid_lengths]
return features
def print_example(instance, features):
logging.debug('*** Example Instance ***')
logging.debug('\n%s', instance)
for feature_name in features.keys():
feature = features[feature_name]
logging.debug('Generated %s: %s', feature_name, feature)
def write_to_files_np(features, tokenizer, max_seq_length,
max_predictions_per_seq, output_files):
# pylint: disable=unused-argument
"""Write to numpy files from `TrainingInstance`s."""
next_sentence_labels = []
valid_lengths = []
assert len(output_files) == 1, 'numpy format only support single output file'
output_file = output_files[0]
(input_ids, segment_ids, masked_lm_positions, masked_lm_ids,
masked_lm_weights, next_sentence_labels, valid_lengths) = features
total_written = len(next_sentence_labels)
# store variable length numpy array object directly.
outputs = collections.OrderedDict()
outputs['input_ids'] = np.array(input_ids, dtype=object)
outputs['segment_ids'] = np.array(segment_ids, dtype=object)
outputs['masked_lm_positions'] = np.array(
masked_lm_positions, dtype=object)
outputs['masked_lm_ids'] = np.array(masked_lm_ids, dtype=object)
outputs['masked_lm_weights'] = np.array(masked_lm_weights, dtype=object)
outputs['next_sentence_labels'] = np.array(
next_sentence_labels, dtype='int32')
outputs['valid_lengths'] = np.array(valid_lengths, dtype='int32')
try:
np.savez_compressed(output_file, **outputs)
except RuntimeError as e:
logging.error(f"Runtime error: {e}, attempting to save half the data")
halfway = len(outputs['input_ids']) // 2
output1 = {k: v[:halfway] for k, v in outputs.items()}
np.savez_compressed(f"{output_file}_1.npz", **output1)
output2 = {k: v[halfway:] for k, v in outputs.items()}
np.savez_compressed(f"{output_file}_2.npz", **output2)
logging.info('Wrote %d total instances', total_written)
def tokenize_lines_fn(x):
"""Worker function to tokenize lines based on the tokenizer, and perform vocabulary lookup."""
lines, tokenizer, vocab = x
results = []
for line in lines:
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line:
results.append([])
else:
tokens = vocab[[vocab.bos_token] +
vocab[tokenizer(line)] + [vocab.eos_token]]
if tokens:
results.append(tokens)
return results
def convert_to_npz(instances, max_seq_length):
"""Create masked language model and next sentence prediction samples as numpy arrays."""
input_ids = []
segment_ids = []
masked_lm_positions = []
masked_lm_ids = []
masked_lm_weights = []
next_sentence_labels = []
valid_lengths = []
for inst_index, instance in enumerate(instances):
features = transform(instance, max_seq_length)
input_id = features['input_ids']
segment_id = features['segment_ids']
masked_lm_position = features['masked_lm_positions']
masked_lm_id = features['masked_lm_ids']
masked_lm_weight = features['masked_lm_weights']
next_sentence_label = features['next_sentence_labels'][0]
valid_length = features['valid_lengths'][0]
input_ids.append(np.ascontiguousarray(input_id, dtype='int32'))
segment_ids.append(np.ascontiguousarray(segment_id, dtype='int32'))
masked_lm_positions.append(np.ascontiguousarray(
masked_lm_position, dtype='int32'))
masked_lm_ids.append(np.ascontiguousarray(masked_lm_id, dtype='int32'))
masked_lm_weights.append(np.ascontiguousarray(
masked_lm_weight, dtype='float32'))
next_sentence_labels.append(next_sentence_label)
valid_lengths.append(valid_length)
# debugging information
if inst_index < 1:
print_example(instance, features)
return input_ids, masked_lm_ids, masked_lm_positions, masked_lm_weights,\
next_sentence_labels, segment_ids, valid_lengths
def create_training_instances(x):
"""Create `TrainingInstance`s from raw text.
The expected input file format is the following:
(1) One sentence per line. These should ideally be actual sentences, not
entire paragraphs or arbitrary spans of text. (Because we use the
sentence boundaries for the "next sentence prediction" task).
(2) Blank lines between documents. Document boundaries are needed so
that the "next sentence prediction" task doesn't span between documents.
The function expect arguments packed in a tuple as described below.
Parameters
----------
input_files : list of str
List of paths to input text files.
tokenizer : Tokenizer
The tokenizer
max_seq_length : int
The hard limit of maximum sequence length of sentence pairs
dupe_factor : int
Duplication factor.
short_seq_prob : float
The probability of sampling sequences shorter than the max_seq_length.
masked_lm_prob : float
The probability of replacing texts with masks/random words/original words.
max_predictions_per_seq : int
The hard limit of the number of predictions for masked words
whole_word_mask : bool
Whether to do masking for whole words
vocab : Vocab
The vocab for the model
nworker : int
The number of processes to help processing texts in parallel
worker_pool : multiprocessing.Pool
Must be provided if nworker > 1. The caller is responsible for the destruction of
the worker pool.
output_file : str or None
Path to the output file. If None, the result is not serialized. If provided,
results are stored in the order of (input_ids, segment_ids, masked_lm_positions,
masked_lm_ids, masked_lm_weights, next_sentence_labels, valid_lengths).
Returns
-------
A tuple of np.ndarray : input_ids, masked_lm_ids, masked_lm_positions, masked_lm_weights
next_sentence_labels, segment_ids, valid_lengths
"""
(input_files, tokenizer, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, whole_word_mask, vocab,
dupe_factor, nworker, worker_pool, output_file) = x
time_start = time.time()
if nworker > 1:
assert worker_pool is not None
all_documents = [[]]
for input_file in input_files:
with io.open(input_file, 'r', encoding='utf-8') as reader:
lines = reader.readlines()
num_lines = len(lines)
num_lines_per_worker = (num_lines + nworker - 1) // nworker
process_args = []
# tokenize in parallel
for worker_idx in range(nworker):
start = worker_idx * num_lines_per_worker
end = min((worker_idx + 1) * num_lines_per_worker, num_lines)
process_args.append((lines[start:end], tokenizer, vocab))
if worker_pool:
tokenized_results = worker_pool.map(
tokenize_lines_fn, process_args)
else:
tokenized_results = [tokenize_lines_fn(process_args[0])]
for tokenized_result in tokenized_results:
for line in tokenized_result:
if not line:
if all_documents[-1]:
all_documents.append([])
else:
all_documents[-1].append(line)
# remove the last empty document if any
if not all_documents[-1]:
all_documents = all_documents[:-1]
# generate training instances
instances = []
if worker_pool:
process_args = []
for document_index in range(len(all_documents)):
process_args.append((all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, whole_word_mask,
vocab, tokenizer))
for _ in range(dupe_factor):
instances_results = worker_pool.map(
create_instances_from_document, process_args)
for instances_result in instances_results:
instances.extend(instances_result)
npz_instances = worker_pool.apply(
convert_to_npz, (instances, max_seq_length))
else:
for _ in range(dupe_factor):
for document_index in range(len(all_documents)):
instances.extend(
create_instances_from_document(
(all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, whole_word_mask,
vocab, tokenizer)))
npz_instances = convert_to_npz(instances, max_seq_length)
(input_ids, masked_lm_ids, masked_lm_positions, masked_lm_weights,
next_sentence_labels, segment_ids, valid_lengths) = npz_instances
# write output to files. Used when pre-generating files
if output_file:
features = (input_ids, segment_ids, masked_lm_positions, masked_lm_ids,
masked_lm_weights, next_sentence_labels, valid_lengths)
logging.debug('*** Writing to output file %s ***', output_file)
write_to_files_np(features, tokenizer, max_seq_length,
max_predictions_per_seq, [output_file])
features = None
else:
features = (input_ids, masked_lm_ids, masked_lm_positions, masked_lm_weights,
next_sentence_labels, segment_ids, valid_lengths)
time_end = time.time()
logging.debug('Process %d files took %.1f s',
len(input_files), time_end - time_start)
return features
def create_instances_from_document(x):
"""Creates `TrainingInstance`s for a single document."""
(all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, whole_word_mask, vocab, tokenizer) = x
document = all_documents[document_index]
_MASK_TOKEN = vocab[vocab.mask_token]
_CLS_TOKEN = vocab[vocab.cls_token]
_SEP_TOKEN = vocab[vocab.sep_token]
# Account for [CLS], [SEP], [SEP]
max_num_tokens = max_seq_length - 3
# According to the original tensorflow implementation:
# We *usually* want to fill up the entire sequence since we are padding
# to `max_seq_length` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1, 10% of the time) want to use shorter
# sequences to minimize the mismatch between pre-training and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `max_seq_length` is a hard limit.
target_seq_length = max_num_tokens
if random.random() < short_seq_prob:
target_seq_length = random.randint(2, max_num_tokens)
# We DON'T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy. Instead, we split the input into
# segments "A" and "B" based on the actual "sentences" provided by the user
# input.
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document): # pylint: disable=R1702
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A`
# (first) sentence.
a_end = 1
if len(current_chunk) >= 2:
a_end = random.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
# Random next
is_random_next = False
if len(current_chunk) == 1 or random.random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
# randomly choose a document other than itself
random_document_index = random.randint(
0, len(all_documents) - 2)
if random_document_index >= document_index:
random_document_index += 1
random_document = all_documents[random_document_index]
random_start = random.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we 'put them back' so
# they don't go to waste.
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
# Actual next
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens)
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
tokens = []
segment_ids = []
tokens.append(_CLS_TOKEN)
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append(_SEP_TOKEN)
segment_ids.append(0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append(_SEP_TOKEN)
segment_ids.append(1)
(tokens, masked_lm_positions,
masked_lm_labels) = create_masked_lm_predictions(
tokens, masked_lm_prob, max_predictions_per_seq,
whole_word_mask, vocab, tokenizer,
_MASK_TOKEN, _CLS_TOKEN, _SEP_TOKEN)
instance = TrainingInstance(
tokens=tokens,
segment_ids=segment_ids,
is_random_next=is_random_next,
masked_lm_positions=masked_lm_positions,
masked_lm_labels=masked_lm_labels,
vocab=vocab)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances
MaskedLmInstance = collections.namedtuple('MaskedLmInstance',
['index', 'label'])
def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq,
whole_word_mask, vocab, tokenizer,
_MASK_TOKEN, _CLS_TOKEN, _SEP_TOKEN):
"""Creates the predictions for the masked LM objective."""
cand_indexes = []
for (i, token) in enumerate(tokens):
if token in [_CLS_TOKEN, _SEP_TOKEN]:
continue
# Whole Word Masking means that if we mask all of the subwords
# corresponding to an original word. When a word has been split into
# subwords, the first token does not have any marker and any subsequence
# tokens are prefixed with ##. So whenever we see the ## token, we
# append it to the previous set of word indexes.
#
# Note that Whole Word Masking does *not* change the training code
# at all -- we still predict each subword independently, softmaxed
# over the entire vocabulary.
if whole_word_mask and len(cand_indexes) >= 1 and \
not tokenizer.is_first_subword(vocab.idx_to_token[token]):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
random.shuffle(cand_indexes)
output_tokens = list(tokens)
num_to_predict = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
masked_lms = []
covered_indexes = set()
for index_set in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_token = None
# 80% of the time, replace with [MASK]
if random.random() < 0.8:
masked_token = _MASK_TOKEN
else:
# 10% of the time, keep original
if random.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
# generate a random word in [0, vocab_size - 1]
masked_token = random.randint(0, len(vocab) - 1)
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(
index=index, label=tokens[index]))
assert len(masked_lms) <= num_to_predict
masked_lms = sorted(masked_lms, key=lambda x: x.index)
masked_lm_positions = []
masked_lm_labels = []
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels)
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens):
"""Truncates a pair of sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if random.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
def main():
"""Main function."""
time_start = time.time()
# random seed
random.seed(args.random_seed)
# create output dir
output_dir = os.path.expanduser(args.output_dir)
nlp.utils.mkdir(output_dir)
vocab = nlp.data.utils._load_pretrained_vocab(
args.dataset_name, root=output_dir, cls=nlp.vocab.BERTVocab)
tokenizer = nlp.data.GPT2BPETokenizer()
# count the number of input files
input_files = []
for input_pattern in args.input_file.split(','):
input_files.extend(glob.glob(os.path.expanduser(input_pattern)))
for input_file in input_files:
logging.info('\t%s', input_file)
num_inputs = len(input_files)
num_outputs = min(args.num_outputs, len(input_files))
logging.info('*** Reading from %d input files ***', num_inputs)
# calculate the number of splits
file_splits = []
split_size = (num_inputs + num_outputs - 1) // num_outputs
for i in range(num_outputs):
split_start = i * split_size
split_end = min(num_inputs, (i + 1) * split_size)
file_splits.append(input_files[split_start:split_end])
# prepare workload
count = 0
process_args = []
for i, file_split in enumerate(file_splits):
output_file = os.path.join(
output_dir, 'part-{}.npz'.format(str(i).zfill(3)))
count += len(file_split)
process_args.append((file_split, tokenizer, args.max_seq_length, args.short_seq_prob,
args.masked_lm_prob, args.max_predictions_per_seq,
args.whole_word_mask,
vocab, args.dupe_factor, 1, None, output_file))
# sanity check
assert count == len(input_files)
# dispatch to workers
nworker = args.num_workers
if nworker > 1:
pool = Pool(nworker)
pool.map(create_training_instances, process_args)
else:
for process_arg in process_args:
create_training_instances(process_arg)
time_end = time.time()
logging.info('Time cost=%.1f', time_end - time_start)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Pre-training data generator for Bort',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--input_file',
type=str,
required=True,
help='Input files, separated by comma. For example, "~/data/*.txt"')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Output directory.')
parser.add_argument(
'--dataset_name',
type=str,
default='openwebtext_ccnews_stories_books_cased',
choices=['book_corpus_wiki_en_uncased', 'book_corpus_wiki_en_cased',
'wiki_multilingual_uncased', 'wiki_multilingual_cased', 'wiki_cn_cased',
'openwebtext_ccnews_stories_books_cased'],
help='The dataset name for the vocab file Bort model was trained on')
parser.add_argument(
'--whole_word_mask',
action='store_true',
help='Whether to use whole word masking rather than per-subword masking.')
parser.add_argument(
'--max_seq_length', type=int, default=512, help='Maximum sequence length.')
parser.add_argument(
'--max_predictions_per_seq',
type=int,
default=80,
help='Maximum number of masked LM predictions per sequence. ')
parser.add_argument(
'--random_seed',
type=int,
default=12345,
help='Random seed for data generation.')
parser.add_argument(
'--dupe_factor',
type=int,
default=1,
help='Number of times to duplicate the input data (with different masks).')
parser.add_argument(
'--masked_lm_prob',
type=float,
default=0.15,
help='Masked LM probability.')
parser.add_argument(
'--short_seq_prob',
type=float,
default=0.1,
help='Probability of creating sequences which are shorter than the '
'maximum length. ')
parser.add_argument(
'--verbose',
action='store_true',
help='Print debug information')
parser.add_argument(
'--num_workers',
type=int,
default=8,
help='Number of workers for parallel processing, where each generates an output file.')
parser.add_argument(
'--num_outputs',
type=int,
default=1,
help='Number of desired output files, where each is processed independently by a worker.')
args = parser.parse_args()
logging.getLogger().setLevel(logging.DEBUG if args.verbose else logging.INFO)
logging.info(args)
main()
|
velocyto/commands/_run.py | subercui/velocyto.py | 119 | 12741169 | <gh_stars>100-1000
import sys
import os
import glob
import re
import gzip
import array
import loompy
import numpy as np
import random
import string
import subprocess
import multiprocessing
import csv
import itertools
from collections import defaultdict
import logging
import h5py
from typing import *
import velocyto as vcy
def id_generator(size: int=6, chars: str=string.ascii_uppercase + string.digits) -> str:
return ''.join(random.choice(chars) for _ in range(size))
def _run(*, bamfile: Tuple[str], gtffile: str,
bcfile: str, outputfolder: str,
sampleid: str, metadatatable: str,
repmask: str, onefilepercell: bool, logic: str,
without_umi: str, umi_extension: str, multimap: bool, test: bool,
samtools_threads: int, samtools_memory: int, loom_numeric_dtype: str, dump: bool, verbose: int,
additional_ca: dict={}) -> None:
"""Runs the velocity analysis outputing a loom file
BAMFILE or [BAMFILES] one or several bam files with position-sorted
GTFFILE annotation file
NOTE: it is keyword only argument function
"""
########################
# Resolve Inputs #
########################
logging.basicConfig(stream=sys.stdout, format='%(asctime)s - %(levelname)s - %(message)s',
level=[logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG][verbose])
if isinstance(bamfile, tuple) and len(bamfile) > 1 and bamfile[-1][-4:] in [".bam", ".sam"]:
multi = True
elif isinstance(bamfile, tuple) and len(bamfile) == 1:
multi = False
else:
raise IOError(f"Something went wrong in the argument parsing. You passed as bamfile: {bamfile}")
if onefilepercell and multi:
if bcfile is not None:
raise ValueError("Inputs incompatibility. --bcfile/-b option was used together with --onefilepercell/-c option.")
logging.warning("Each bam file will be interpreted as a DIFFERENT cell")
elif not onefilepercell and multi:
logging.warning("Several input files but --onefilepercell is False. Each bam file will be interpreted as containing a SET of cells!!!")
if sampleid is None:
assert metadatatable is None, "--metadatatable was specified but cannot fetch sample metadata without valid sampleid"
if multi:
logging.warning(f"When using mutliple files you may want to use --sampleid option to specify the name of the output file")
if multi and not onefilepercell:
full_name = "_".join([os.path.basename(bamfile[i]).split(".")[0] for i in range(len(bamfile))])
if len(full_name) > 50:
sampleid = f'multi_input_{os.path.basename(bamfile[0]).split(".")[0]}_{id_generator(5)}'
else:
sampleid = f'multi_input_{full_name}_and_others_{id_generator(5)}'
elif multi and onefilepercell:
sampleid = f'onefilepercell_{os.path.basename(bamfile[0]).split(".")[0]}_and_others_{id_generator(5)}'
else:
sampleid = f'{os.path.basename(bamfile[0]).split(".")[0]}_{id_generator(5)}'
logging.info(f"No SAMPLEID specified, the sample will be called {sampleid} (last 5 digits are a random-id to avoid overwriting some other file by mistake)")
# Create an output folder inside the cell ranger output folder
if outputfolder is None:
outputfolder = os.path.join(os.path.split(bamfile[0])[0], "velocyto")
logging.info(f"No OUTPUTFOLDER specified, find output files inside {outputfolder}")
if not os.path.exists(outputfolder):
os.mkdir(outputfolder)
logic_class = getattr(vcy, logic)
if not issubclass(logic_class, vcy.Logic):
raise ValueError(f"{logic} is not a valid logic. Choose one among {', '.join([k for k, v in vcy.logic.__dict__.items() if issubclass(v, vcy.Logic)])}")
else:
logging.debug(f"Using logic: {logic}")
logic_obj = logic_class()
if bcfile is None:
logging.debug("Cell barcodes will be determined while reading the .bam file")
valid_bcset = None
else:
# Get valid cell barcodes
valid_bcs_list = (gzip.open(bcfile).read().decode() if bcfile.endswith(".gz") else open(bcfile).read()).rstrip().split()
valid_cellid_list = np.array([f"{sampleid}:{v_bc}" for v_bc in valid_bcs_list]) # with sample id and with -1
if len(set(bc.split('-')[0] for bc in valid_bcs_list)) == 1:
gem_grp = f"-{valid_bcs_list[0].split('-')[-1]}"
else:
gem_grp = "x"
valid_bcset = set(bc.split('-')[0] for bc in valid_bcs_list) # without -1
logging.info(f"Read {len(valid_bcs_list)} cell barcodes from {bcfile}")
logging.debug(f"Example of barcode: {valid_bcs_list[0].split('-')[0]} and cell_id: {valid_cellid_list[0]}")
# Get metadata from sample sheet
if metadatatable:
try:
sample_metadata = vcy.MetadataCollection(metadatatable)
sample = sample_metadata.where("SampleID", sampleid)
if len(sample) == 0:
logging.error(f"Sample ID {sampleid} not found in sample sheet")
# schema = [] # type: List
sample = {}
elif len(sample) > 1:
logging.error(f"Sample ID {sampleid} has multiple lines in sample sheet")
sys.exit(1)
else:
# schema = sample[0].types
sample = sample[0].dict
logging.debug(f"Collecting column attributes from {metadatatable}")
except (NameError, TypeError) as e:
logging.warn("SAMPLEFILE was not specified. add -s SAMPLEFILE to add metadata.")
sample = {}
else:
sample = {}
########################
# Start Analysis #
########################
# Initialize Exon-Intron Counter with the logic and valid barcodes (need to do it now to peek)
if without_umi:
if umi_extension != "no":
logging.warning("--umi-extension was specified but incompatible with --without-umi, it will be ignored!")
umi_extension = "without_umi"
exincounter = vcy.ExInCounter(sampleid=sampleid, logic=logic_class, valid_bcset=valid_bcset, umi_extension=umi_extension, onefilepercell=onefilepercell, dump_option=dump, outputfolder=outputfolder)
# Heuristic to chose the memory/cpu effort
try:
mb_available = int(subprocess.check_output('grep MemAvailable /proc/meminfo'.split()).split()[1]) / 1000
except subprocess.CalledProcessError:
logging.warning("Your system does not support calling `grep MemAvailable /proc/meminfo` so the memory effort for the samtools command could not be chosen appropriately. 32Gb will be assumed")
mb_available = 32000 # 64Gb
threads_to_use = min(samtools_threads, multiprocessing.cpu_count())
mb_to_use = int(min(samtools_memory, mb_available / (len(bamfile) * threads_to_use)))
compression = vcy.BAM_COMPRESSION
# I need to peek into the bam file to know wich cell barcode flag should be used
if onefilepercell and without_umi:
tagname = "NOTAG"
elif onefilepercell:
logging.debug("The multi input option ")
tagname = "NOTAG"
exincounter.peek_umi_only(bamfile[0])
else:
exincounter.peek(bamfile[0])
tagname = exincounter.cellbarcode_str
if multi and onefilepercell:
bamfile_cellsorted = list(bamfile)
elif onefilepercell:
bamfile_cellsorted = [bamfile[0]]
else:
bamfile_cellsorted = [f"{os.path.join(os.path.dirname(bmf), 'cellsorted_' + os.path.basename(bmf))}" for bmf in bamfile]
sorting_process: Dict[int, Any] = {}
for ni, bmf_cellsorted in enumerate(bamfile_cellsorted):
# Start a subprocess that sorts the bam file
command = f"samtools sort -l {compression} -m {mb_to_use}M -t {tagname} -O BAM -@ {threads_to_use} -o {bmf_cellsorted} {bamfile[ni]}"
if os.path.exists(bmf_cellsorted):
# This should skip sorting in smartseq2
logging.warning(f"The file {bmf_cellsorted} already exists. The sorting step will be skipped and the existing file will be used.")
check_end_process = False
else:
sorting_process[ni] = subprocess.Popen(command.split(), stdout=subprocess.PIPE)
logging.info(f"Starting the sorting process of {bamfile[ni]} the output will be at: {bmf_cellsorted}")
logging.info(f"Command being run is: {command}")
logging.info(f"While the bam sorting happens do other things...")
check_end_process = True
# Load annotations
logging.info(f"Load the annotation from {gtffile}")
annotations_by_chrm_strand = exincounter.read_transcriptmodels(gtffile)
chrs = list(v for k, v in annotations_by_chrm_strand.items())
tms = list(itertools.chain.from_iterable((v.values() for v in chrs)))
ivls = list(itertools.chain.from_iterable(tms))
logging.debug(f"Generated {len(ivls)} features corresponding to {len(tms)} transcript models from {gtffile}")
del chrs, tms, ivls
# Load annotations
if repmask is not None:
logging.info(f"Load the repeat masking annotation from {repmask}")
mask_ivls_by_chromstrand = exincounter.read_repeats(repmask)
# Go through the bam files a first time to markup introns
logging.info(f"Scan {' '.join(bamfile)} to validate intron intervals")
if test: # NOTE: Remove this after finishing testing, the only purpuso was to save 15min in the debugging process
logging.warning("This place is for developer only!")
import pickle
if os.path.exists("exincounter_dump.pickle"):
logging.debug("exincounter_dump.pickle is being loaded")
exincounter = pickle.load(open("exincounter_dump.pickle", "rb"))
else:
logging.debug("exincounter_dump.pickle was not found")
logging.debug("Dumping exincounter_dump.pickle BEFORE markup")
pickle.dump(exincounter, open("exincounter_dump.pickle", "wb"))
exincounter.mark_up_introns(bamfile=bamfile, multimap=multimap)
else:
exincounter.mark_up_introns(bamfile=bamfile, multimap=multimap)
# Wait for child process to terminate
if check_end_process:
logging.info(f"Now just waiting that the bam sorting process terminates")
for k in sorting_process.keys():
returncode = sorting_process[k].wait()
if returncode == 0:
logging.info(f"bam file #{k} has been sorted")
else:
raise MemoryError(f"bam file #{k} could not be sorted by cells.\n\
This is probably related to an old version of samtools, please install samtools >= 1.6.\
In alternative this could be a memory error, try to set the --samtools_memory option to a value compatible with your system. \
Otherwise sort manually by samtools ``sort -l [compression] -m [mb_to_use]M -t [tagname] -O BAM -@ [threads_to_use] -o cellsorted_[bamfile] [bamfile]``")
# Do the actual counting
logging.debug("Start molecule counting!")
results = exincounter.count(bamfile_cellsorted, multimap=multimap) # NOTE: we would avoid some millions of if statements evaluations if we write two function count and count_with output
dict_list_arrays, cell_bcs_order = results
########################
# Output #
########################
# Prepare the loom file output
if not exincounter.filter_mode:
valid_bcset = exincounter.valid_bcset # without -1
valid_bcs_list = list(valid_bcset) # without -1
gem_grp = ""
valid_cellid_list = np.array([f"{sampleid}:{v_bc}" for v_bc in valid_bcs_list]) # with sampleid and with -1
logging.debug(f"Example of barcode: {valid_bcs_list[0]} and cell_id: {valid_cellid_list[0]}")
ca = {"CellID": np.array([f"{sampleid}:{v_bc}{gem_grp}" for v_bc in cell_bcs_order])}
ca.update(additional_ca)
for key, value in sample.items():
ca[key] = np.full(len(cell_bcs_order), value)
# Save to loom file
outfile = os.path.join(outputfolder, f"{sampleid}.loom")
logging.debug(f"Generating output file {outfile}")
# row attributes
atr_table = (("Gene", "genename", str),
("Accession", "geneid", str),
("Chromosome", "chrom", str),
("Strand", "strand", str),
("Start", "start", int),
("End", "end", int))
logging.debug("Collecting row attributes")
ra = {}
for name_col_attr, name_obj_attr, dtyp in atr_table:
tmp_array = np.zeros((len(exincounter.genes),), dtype=object) # type: np.ndarray
for gene_id, gene_info in exincounter.genes.items():
tmp_array[exincounter.geneid2ix[gene_id]] = getattr(gene_info, name_obj_attr)
ra[name_col_attr] = tmp_array.astype(dtyp)
logging.debug("Generating data table")
layers: Dict[str, np.ndarray] = {}
for layer_name in logic_obj.layers:
layers[layer_name] = np.concatenate(dict_list_arrays[layer_name], axis=1)
del dict_list_arrays[layer_name]
for layer_name in logic_obj.layers:
total: np.ndarray # This is just a type annotation to avoid mypy complaints
try:
total += layers[layer_name]
except NameError:
total = np.array(layers[layer_name])
logging.debug("Writing loom file")
try:
ds = loompy.create(filename=outfile, matrix=total, row_attrs=ra, col_attrs=ca, dtype="float32")
for layer_name in logic_obj.layers:
ds.set_layer(name=layer_name, matrix=layers[layer_name], dtype=loom_numeric_dtype)
ds.attrs["velocyto.__version__"] = vcy.__version__
ds.attrs["velocyto.logic"] = logic
ds.close()
except TypeError:
# If user is using loompy2
# NOTE maybe this is not super efficient if the type and order are already correct
tmp_layers = {"": total.astype("float32", order="C", copy=False)}
tmp_layers.update({layer_name: layers[layer_name].astype(loom_numeric_dtype, order="C", copy=False) for layer_name in logic_obj.layers})
loompy.create(filename=outfile, layers=tmp_layers, row_attrs=ra, col_attrs=ca, file_attrs={"velocyto.__version__": vcy.__version__, "velocyto.logic": logic})
logging.debug("Terminated Succesfully!")
|
alipay/aop/api/response/AlipayOpenAppYufanlingsanyaowuYufalingsanyaowuQueryResponse.py | snowxmas/alipay-sdk-python-all | 213 | 12741203 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenAppYufanlingsanyaowuYufalingsanyaowuQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenAppYufanlingsanyaowuYufalingsanyaowuQueryResponse, self).__init__()
self._userid = None
@property
def userid(self):
return self._userid
@userid.setter
def userid(self, value):
self._userid = value
def parse_response_content(self, response_content):
response = super(AlipayOpenAppYufanlingsanyaowuYufalingsanyaowuQueryResponse, self).parse_response_content(response_content)
if 'userid' in response:
self.userid = response['userid']
|
features/steps/test_starter/{{ cookiecutter.repo_name }}/src/{{ cookiecutter.python_package }}/pipelines/data_science/nodes.py | daniel-falk/kedro | 2,047 | 12741210 | """Example code for the nodes in the example pipeline. This code is meant
just for illustrating basic Kedro features.
Delete this when you start working on your own Kedro project.
"""
# pylint: disable=invalid-name
import logging
from typing import Any, Dict
import numpy as np
import pandas as pd
def train_model(
train_x: pd.DataFrame, train_y: pd.DataFrame, parameters: Dict[str, Any]
) -> np.ndarray:
"""Node for training a simple multi-class logistic regression model. The
number of training iterations as well as the learning rate are taken from
conf/project/parameters.yml. All of the data as well as the parameters
will be provided to this function at the time of execution.
"""
num_iter = parameters["example_num_train_iter"]
lr = parameters["example_learning_rate"]
X = train_x.to_numpy()
Y = train_y.to_numpy()
# Add bias to the features
bias = np.ones((X.shape[0], 1))
X = np.concatenate((bias, X), axis=1)
weights = []
# Train one model for each class in Y
for k in range(Y.shape[1]):
# Initialise weights
theta = np.zeros(X.shape[1])
y = Y[:, k]
for _ in range(num_iter):
z = np.dot(X, theta)
h = _sigmoid(z)
gradient = np.dot(X.T, (h - y)) / y.size
theta -= lr * gradient
# Save the weights for each model
weights.append(theta)
# Return a joint multi-class model with weights for all classes
return np.vstack(weights).transpose()
def predict(model: np.ndarray, test_x: pd.DataFrame) -> np.ndarray:
"""Node for making predictions given a pre-trained model and a test set."""
X = test_x.to_numpy()
# Add bias to the features
bias = np.ones((X.shape[0], 1))
X = np.concatenate((bias, X), axis=1)
# Predict "probabilities" for each class
result = _sigmoid(np.dot(X, model))
# Return the index of the class with max probability for all samples
return np.argmax(result, axis=1)
def report_accuracy(predictions: np.ndarray, test_y: pd.DataFrame) -> None:
"""Node for reporting the accuracy of the predictions performed by the
previous node. Notice that this function has no outputs, except logging.
"""
# Get true class index
target = np.argmax(test_y.to_numpy(), axis=1)
# Calculate accuracy of predictions
accuracy = np.sum(predictions == target) / target.shape[0]
# Log the accuracy of the model
log = logging.getLogger(__name__)
log.info("Model accuracy on test set: %0.2f%%", accuracy * 100)
def _sigmoid(z):
"""A helper sigmoid function used by the training and the scoring nodes."""
return 1 / (1 + np.exp(-z))
|
source/OneNote.popclipext/auth.py | cnstntn-kndrtv/PopClip-Extensions | 1,262 | 12741214 | <reponame>cnstntn-kndrtv/PopClip-Extensions
from __future__ import print_function
import constants, rauth, subprocess, os, json, base64, urlparse
"""
Auth module for OneNote.
Our strategy is to store only the refresh token and use it every time to get a new access token.
"""
def get_oauth_service():
client_id, client_secret = json.loads(base64.b64decode(constants.CLIENT_DATA))
return rauth.OAuth2Service(
client_id=client_id,
client_secret=client_secret,
access_token_url=constants.ENDPOINT_ACCESS,
authorize_url=constants.ENDPOINT_AUTHORIZE,
base_url=constants.ENDPOINT_BASE)
def get_session(stored_refresh_token):
""" called in main extension script to actually get a usable session """
service = get_oauth_service()
r = service.get_raw_access_token(data={
'refresh_token': stored_refresh_token,
'grant_type': 'refresh_token'
})
return service.get_session(r.json()['access_token'])
def access(authorization_code):
""" obtain the refresh token """
return get_oauth_service().get_raw_access_token(data={
'code': authorization_code,
'grant_type': 'authorization_code',
'redirect_uri': constants.CALLBACK
}).json()['refresh_token']
def authorize():
""" send user to the oauth autorization url in their browser """
subprocess.call(['open', get_oauth_service().get_authorize_url(**constants.AUTHORIZE_DATA)])
def main(callback_final=None):
""" this is called once (with no params) when the user clicks 'log in',
and again (with params) when they click though the callback landing url """
if (callback_final):
print(json.dumps(access(urlparse.parse_qs(callback_final)['code'])), end='')
else:
print(json.dumps(authorize()), end='')
exit(4) # indicates to PopClip that a callback will follow
if __name__ == '__main__':
main(os.getenv('POPCLIP_AUTH_CALLBACK_FINAL'))
|
ch16-deployment/pages/tests.py | balazskiss1985/djangoforbeginners | 781 | 12741272 | from django.contrib.auth import get_user_model
from django.test import SimpleTestCase, TestCase
from django.urls import reverse
class HomePageTests(SimpleTestCase):
def test_home_page_status_code(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_view_url_by_name(self):
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
def test_view_uses_correct_template(self):
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'home.html')
class SignupPageTests(TestCase):
username = 'newuser'
email = '<EMAIL>'
def test_signup_page_status_code(self):
response = self.client.get('/accounts/signup/')
self.assertEqual(response.status_code, 200)
def test_view_url_by_name(self):
response = self.client.get(reverse('signup'))
self.assertEqual(response.status_code, 200)
def test_view_uses_correct_template(self):
response = self.client.get(reverse('signup'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'registration/signup.html')
def test_signup_form(self):
new_user = get_user_model().objects.create_user(
self.username, self.email)
self.assertEqual(get_user_model().objects.all().count(), 1)
self.assertEqual(get_user_model().objects.all()
[0].username, self.username)
self.assertEqual(get_user_model().objects.all()
[0].email, self.email)
|
tools/misc/merge_imgs_flowmaps.py | ArlenCHEN/mmflow | 481 | 12741336 | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import cv2
import mmcv
import numpy as np
try:
import imageio
except ImportError:
imageio = None
def parse_args():
parser = argparse.ArgumentParser(
description='Merge images and visualized flow')
parser.add_argument(
'--img_dir', type=str, default=None, help='directory of images')
parser.add_argument(
'--flow_dir',
type=str,
default=None,
help='directory of visualized flow')
parser.add_argument(
'--resize_factor',
type=float,
default=0.5,
help='resize factor for gif')
parser.add_argument(
'--out_dir',
type=str,
default=None,
help='directory to save merged results')
args = parser.parse_args()
return args
def merge_imgs_flow(img_dir: str, flow_dir: str, out_dir: str) -> None:
"""Load images and visualized flow maps and merge them.
Args:
img_dir ([str): The directory of images.
flow_dir (str): The directory of flow maps.
out_dir (str): The directory to save the frames
"""
img_files = list(mmcv.scandir(img_dir))
flow_files = list(mmcv.scandir(flow_dir))
img_files.sort()
flow_files.sort()
# img is longer than flow
for i in range(len(img_files) - 1):
img = mmcv.imread(osp.join(img_dir, img_files[i]))
flow = mmcv.imread(osp.join(flow_dir, flow_files[i]))
frame = np.concatenate((img, flow), axis=1)
cv2.imwrite(osp.join(out_dir, flow_files[i]), frame)
def main():
args = parse_args()
merge_imgs_flow(args.img_dir, args.flow_dir, args.out_dir)
if __name__ == '__main__':
main()
|
flexget/tests/test_npo_watchlist.py | Jeremiad/Flexget | 1,322 | 12741344 | # -*- coding: utf-8 -*-
import pytest
@pytest.mark.online
class TestNpoWatchlistInfo:
config = """
tasks:
test:
npo_watchlist:
email: '<EMAIL>'
password: '<PASSWORD>!'
"""
def test_npowatchlist_lookup(self, execute_task):
"""npo_watchlist: Test npo watchlist lookup (ONLINE)"""
task = execute_task('test')
entry = task.find_entry(
url='https://www.npostart.nl/zondag-met-lubach/09-11-2014/VPWON_1220631'
) # s01e01
assert entry['npo_id'] == 'VPWON_1220631'
assert entry['npo_url'] == 'https://www.npostart.nl/zondag-met-lubach/VPWON_1250334'
assert entry['npo_name'] == 'Zondag met Lubach'
assert (
entry['npo_description']
== 'Zeven dagen nieuws in dertig minuten, satirisch geremixt door Arjen Lubach. Nog actueler, nog satirischer en nog vaker nog het woord nog.'
)
assert entry['npo_runtime'] == '32'
assert entry['npo_premium'] is False
assert (
entry['npo_version'] == 'NPO.release-1.58.0'
) # specify for which version of NPO website we did run this unittest
entry = (
task.find_entry(url='https://www.npostart.nl/14-01-2014/VARA_101348553') is None
) # episode with weird (and broken) URL and should be skipped
entry = task.find_entry(
url='https://www.npostart.nl/zembla/12-12-2013/VARA_101320582'
) # check that the next episode it there though
assert entry['npo_id'] == 'VARA_101320582'
assert entry['npo_url'] == 'https://www.npostart.nl/zembla/VARA_101377863'
assert entry['npo_name'] == 'ZEMBLA'
entry = task.find_entry(
url='https://www.npostart.nl/typisch-overvecht/24-05-2018/BV_101388144'
)
assert entry['npo_id'] == 'BV_101388144'
assert entry['npo_url'] == 'https://www.npostart.nl/typisch/BV_101386658'
assert entry['npo_name'] == 'Typisch'
entry = task.find_entry(
url='https://www.npostart.nl/zembla/14-10-2007/VARA_101153941'
) # episode without a running time
assert entry['npo_runtime'] == '0'
assert (
task.find_entry(url='https://www.npostart.nl/11-04-2014/KN_1656572') is None
) # episode without a name (and broken URL) that should be skipped
assert (
task.find_entry(
url='https://www.npostart.nl/zondag-met-lubach-westeros-the-series/04-09-2017/WO_VPRO_10651334'
)
is None
) # a trailer for the series, that should not be listed
@pytest.mark.online
class TestNpoWatchlistPremium:
config = """
tasks:
test:
npo_watchlist:
email: '<EMAIL>'
password: '<PASSWORD>!'
download_premium: yes
"""
def test_npowatchlist_lookup(self, execute_task):
"""npo_watchlist: Test npo watchlist lookup (ONLINE)"""
task = execute_task('test')
entry = task.find_entry(
url='https://www.npostart.nl/hollands-hoop/08-02-2020/BV_101396963'
) # a premium serie
assert entry['npo_id'] == 'BV_101396963'
assert entry['npo_url'] == 'https://www.npostart.nl/hollands-hoop/BV_101385153'
assert entry['npo_name'] == 'Hollands Hoop'
assert entry['npo_runtime'] == '53'
assert entry['npo_premium'] is True
@pytest.mark.online
class TestNpoWatchlistLanguageTheTVDBLookup:
config = """
tasks:
test:
npo_watchlist:
email: '<PASSWORD> <EMAIL>'
password: '<PASSWORD>!'
thetvdb_lookup: yes
"""
def test_tvdblang_lookup(self, execute_task):
"""npo_watchlist: Test npo_watchlist tvdb language lookup (ONLINE)"""
task = execute_task('test')
entry = task.find_entry(
url='https://www.npostart.nl/zondag-met-lubach/09-11-2014/VPWON_1220631'
) # s01e01
assert entry['npo_language'] == 'nl'
assert entry['language'] == 'nl'
assert entry['tvdb_id'] == 288799
assert entry['tvdb_language'] == 'nl'
|
tests/common/gcp_type/test_data/fake_groups.py | aarontp/forseti-security | 921 | 12741368 | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fake groups data.
TODO: consolidate with other fake group test data.
"""
FAKE_GROUPS_DB_ROWS = [
{
'group_id': '1111aaaa1',
'member_role': 'OWNER',
'member_type': 'USER',
'member_email': '<EMAIL>'
},
{
'group_id': '2222bbbb2',
'member_role': 'MEMBER',
'member_type': 'GROUP',
'member_email': '<EMAIL>'
},
{
'group_id': '2222bbbb2',
'member_role': 'OWNER',
'member_type': 'GROUP',
'member_email': '<EMAIL>'
},
{
'group_id': '1111aaaa1',
'member_role': 'MEMBER',
'member_type': 'USER',
'member_email': '<EMAIL>'
},
{
'group_id': '1111aaaa1',
'member_role': 'MEMBER',
'member_type': 'USER',
'member_email': '<EMAIL>'
},
]
|
docs/_downloads/df7502a0f8c6d7dcbf3e27a329b05d03/deep_learning_tutorial.py | woojinsong/PyTorch-tutorials-kr | 221 | 12741370 | <reponame>woojinsong/PyTorch-tutorials-kr<gh_stars>100-1000
# -*- coding: utf-8 -*-
r"""
PyTorch를 이용한 딥러닝
**************************
**번역**: `황성수 <https://github.com/adonisues>`_
딥러닝 블록 구축 : 아핀 맵(affine maps), 비선형성, 객체
==========================================================================
딥러닝은 영리한 방법으로 비선형성을 가진 선형성을 구성하는 것으로
이루어집니다. 비선형성의 도입은 강력한 모델을 가능하게 합니다.
이 섹션에서 이 핵심 구성 요소를 다루고, 객체 함수를 만들고, 어떻게
모델이 학습되지는 살펴봅시다.
아핀 맵
~~~~~~~~~~~
딥러닝의 핵심 작업자 중 하나는 아핀 맵 입니다.
이 함수 :math:`f(x)` 는 다음과 같습니다.
.. math:: f(x) = Ax + b
여기서 :math:`A` 는 행렬, :math:`x, b` 는 벡터 입니다.
여기서 학습되는 변수는 :math:`A` 와 :math:`b` 입니다.
종종 :math:`b` 는 *편향(Bias)* 이라 불립니다.
PyTorch 와 대부분의 다른 딥러닝 프레임워크들은 고전적인 선형 대수학와
조금 다르게 동작합니다. 입력의 열 대신에 행으로 매핑합니다.
즉 주어진 :math:`A` 에서 출력의 :math:`i` 번째 행은
입력의 :math:`i` 번째 행에 매핑되고 편향(Bias)을 더합니다.
아래 예시를 살펴보십시오.
"""
# Author: <NAME>
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
######################################################################
lin = nn.Linear(5, 3) # maps from R^5 to R^3, parameters A, b
# data is 2x5. A maps from 5 to 3... can we map "data" under A?
data = torch.randn(2, 5)
print(lin(data)) # yes
######################################################################
# 비선형성
# ~~~~~~~~~
#
# 먼저 왜 비선형성이 필요한지 설명하는 다음 사실을 주목하십시오.
# :math:`f(x) = Ax + b` 와 :math:`g(x) = Cx + d` 두개의 아핀맵이 있다고 가정합니다.
# :math:`f(g(x))` 는 무엇일까요?
#
# .. math:: f(g(x)) = A(Cx + d) + b = ACx + (Ad + b)
#
# :math:`AC` 는 행렬이고 :math:`Ad + b` 는 벡터이므로 아핀맵 구성은
# 아핀맵이 주어집니다.
#
# 이것으로부터, 신경망이 아핀 구성의 긴 체인이 되길 원한다면,
# 단일 아핀 맵을 작성하는 것보다 이것이 모델에 추가하는 새로운 힘이
# 없다는 것을 알 수 있습니다.
#
# 아핀 계층 사이에 만약 비선형성을 적용한다면
# 이것은 위 경우와 달리 더욱 더 강력한 모델을 구축할 수 있습니다.
#
# 핵심적인 비선형성 :math:`\tanh(x), \sigma(x), \text{ReLU}(x)` 들이 가장
# 일반적입니다. 아마 의문이 생길겁니다 : "왜 이런 함수들이지? 나는 다른 많은
# 비선형성을 생각할 수 있는데". 그 이유는 그들이 변화도(gradient)를 계산하기 쉽고,
# 변화도 연산은 학습에 필수적이기 때문입니다.
# 예를 들어서
#
# .. math:: \frac{d\sigma}{dx} = \sigma(x)(1 - \sigma(x))
#
# 빠른 참고: AI 클래스에 대한 소개에서 일부 신경망을 배웠지만 :math:`\sigma(x)` 가 기본이었을 것입니다.
# 일반적으로 사람들은 실제로 그것을 사용하지 않고 피합니다.
# 이것은 변화도가 인수의 절대 값이 커짐에 따라 매우 빨리 *사라지기* 때문입니다.
# 작은 변화도는 학습하기 어렵다는 것을 의미합니다.
# 대부분의 사람들은 tanh 또는 ReLU를 기본값으로 사용합니다.
#
# Pytorch에서 대부분의 비선형성은 torch.functional에 있습니다 ( F 로 가져옵니다)
# 일반적으로 비선형성은 아핀맵과 같은 파라미터를 가지고 있지 않습니다.
# 즉, 학습 중에 업데이트되는 가중치가 없습니다.
data = torch.randn(2, 2)
print(data)
print(F.relu(data))
######################################################################
# Softmax 및 확률
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
# 함수 :math:`\text{Softmax}(x)` 또한 단지 비선형성 이지만, 일반적으로 네트워크에서
# 마지막으로 수행되는 작업이라는 점에서 특별합니다.
# 이는 실수의 벡터를 취하여 확률 분포를 반환하기 때문입니다.
# 정의는 다음과 같습니다. :math:`x` 는 실수 벡터(음수, 양수 , 제약 없음)라고 하면,
# i번째 구성 요소는 :math:`\text{Softmax}(x)` 는
#
# .. math:: \frac{\exp(x_i)}{\sum_j \exp(x_j)}
#
# 출력은 확률 분포라는 것이 분명해야합니다:
# 각 요소는 음수가 아니며 모든 구성 요소의 합은 1입니다.
#
# 모두 음수가 아니게 하기 위해서 입력에 요소 단위의 지수 연산자를 적용한 다음
# 정규화 상수로 나누는 것도 생각할 수 있습니다.
#
# Softmax 도 torch.nn.functional 에 있습니다.
data = torch.randn(5)
print(data)
print(F.softmax(data, dim=0))
print(F.softmax(data, dim=0).sum()) # 확률 분포이기 때문에 합이 1 입니다!
print(F.log_softmax(data, dim=0)) # log_softmax 도 있습니다.
######################################################################
# 목적 함수(Objective Functions)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# 목적 함수는 네트워크가 최소화하도록 학습되는 함수입니다
# ( *손실 함수* 또는 *비용 함수* 라고 함).
# 먼저 학습 인스턴스를 선택하고 신경망을 통해 실행한 다음 출력의 손실을 계산합니다.
# 그런 다음 손실 함수의 미분을 취함으로써 모델의 파라미터가 업데이트됩니다.
# 직관적으로 모델이 자신의 대답에 완전히 확신하고 대답이 잘못되면 손실이 높아집니다.
# 답변에 자신이 있고 답변이 맞으면 손실이 적습니다.
#
# 학습 예제에서 손실 함수를 최소화하려는 아이디어는
# 네트워크가 잘 일반화되고 개발자 세트, 테스트 세트 또는 프로덕션에서
# 나타나지 않았던 예제(unseen examples)에 대해 작은 손실을 가지기를 바랍니다.
# 손실 함수의 예로 *음의 로그 우도 손실(negative log likelihood loss)* 있습니다.
# 이 것은 다중 클래스 분류에서 매우 자주 사용되는 목적 함수입니다.
# 감독 다중 클래스 분류의 경우에는 올바른 출력(정답을 맞춘 출력)의 음의 로그 확률을
# 최소화하도록 네트워크를 교육하는 것을 의미합니다.
# (또는 이와 동등하게 올바른 출력의 로그 확률을 최대화하십시오)
#
######################################################################
# 최적화와 학습
# =========================
#
# 그럼 인스턴스에 대해 손실 함수를 계산할 수 있다는 것은 무엇입니까? 그걸 어떻게 할까요?
# 우리는 이전에 Tensor가 그것을 계산하는데 사용된 것들에 해당하는 변화도를
# 계산하는 방법을 알고 있다는 것을 보았습니다.
# 손실은 Tensor이기 때문에 그것을 계산하는데 사용된 모든 파라미터와 관련하여
# 변화도를 계산할 수 있습니다! 그런 다음 표준 변화도 업데이트를 수행 할 수 있습니다.
# :math:`\theta` 가 우리의 파라미터라고 합시다.
# :math:`L(\theta)` 는 손실 함수, 그리고 :math:`\eta` 는 양의 러닝 레이트 입니다. 그러면
#
# .. math:: \theta^{(t+1)} = \theta^{(t)} - \eta \nabla_\theta L(\theta)
#
# 이 기본적인 그레디언트 업데이트 이상의 것을 하기 위해서 많은 알고리즘과
# 시도되고 있는 활발한 연구들이 있습니다.
# 많은 시도들은 학습 시간에 일어나는 것에 기반한 러닝 레이트를 변경해봅니다.
# 당신이 정말 관심이 없다면 특별히 이들 알고리즘이 무엇을 하는지 걱정할
# 필요가 없습니다. Torch는 torch.optim 패키지에서 많은 것을 제공하며
# 완전히 공개되어 있습니다. 가장 단순한 변화도 업데이트 사용은
# 더 복잡한 알고리즘을 사용하는 것과 동일합니다.
# 다른 업데이트 알고리즘과 업데이트 알고리즘을 위한 다른 파라미터(다른 초기 러닝 레이트)를
# 시도해 보는 것은 네트워크의 성능을 최적화하는데 중요합니다.
# 종종 기본 SGD를 Adam 또는 RMSprop 으로 교체하는 것이 눈에 띄게 성능
# 향상 시킵니다.
#
######################################################################
# Pytorch 에서 네트워크 구성요소 생성하기
# ==========================================
#
# NLP에 초점을 맞추기 전에, PyTorch에서 아핀 맵과 비선형성만을 사용하여
# 네트워크를 구축하는 주석 처리된 예제를 수행 할 수 있습니다.
# 또한 손실 함수를 계산하는 방법, PyTorch에 내장된 음의 로그 우도를 사용하는 방법,
# 역전파를 통해 매개 변수를 업데이트하는 방법을 볼 것입니다.
#
# 모든 네트워크 구성 요소는 nn.Module에서 상속 받아 forward() 메서드를 재정의해야합니다.
# 이것은 상용구에 관한 것입니다. nn.Module에서의 상속은 구성 요소에 기능을 제공합니다.
# 예를 들어 그것은 학습 가능한 파라미터를 추적하도록 만들고,
# ``.to(device)`` 로 CPU와 GPU 를 교환할수 있습니다.
# ``torch.device("cpu")`` 는 CPU 장치를 ``torch.device("cuda:0")`` 는 GPU 장치를 사용합니다.
#
# 희소한 Bag-of-Words Representation 을 받아서 두개의 레이블 "영어"와 "스페인어"의 확률 분포
# 출력하는 네트워크의 주석이 달린 예시를 작성해 봅시다.
# 이 모델은 단순한 논리 회귀 입니다.
#
######################################################################
# 예제: 논리 회귀 Bag-of-Words 분류기
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# 우리 모델은 희소한 BoW 표현을 레이블에 대한 로그 확률로 매핑합니다.
# 사전의 각 단어에 하나의 색인을 할당합니다.
# 예를 들어서 전체 사전이 각각 0과 1의 색인을 가진 두개의 단어 "hello" 와 "world" 라고 합시다.
# "hello hello hello hello" 문장의 BoW 벡터는 다음과 같습니다.
#
# .. math:: \left[ 4, 0 \right]
#
# "hello world world hello"는 다음과 같습니다.
#
# .. math:: \left[ 2, 2 \right]
#
# 일반화 하면 다음과 같습니다.
#
# .. math:: \left[ \text{Count}(\text{hello}), \text{Count}(\text{world}) \right]
#
# 이 BOW 벡터를 :math:`x` 라하면 네트워크의 출력은 다음과 같습니다:
#
# .. math:: \log \text{Softmax}(Ax + b)
#
# 즉, 아핀맵에 입력을 주고 그 다음 Log Softmax 를 합니다.
#
data = [("me gusta comer en la cafeteria".split(), "SPANISH"),
("Give it to me".split(), "ENGLISH"),
("No creo que sea una buena idea".split(), "SPANISH"),
("No it is not a good idea to get lost at sea".split(), "ENGLISH")]
test_data = [("Yo creo que si".split(), "SPANISH"),
("it is lost on me".split(), "ENGLISH")]
# word_to_ix 는 사전의 각 단어를 고유한 정수로 매핑하고
# 그것은 BoW 벡터에서 자신의 색인이 됩니다.
word_to_ix = {}
for sent, _ in data + test_data:
for word in sent:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
print(word_to_ix)
VOCAB_SIZE = len(word_to_ix)
NUM_LABELS = 2
class BoWClassifier(nn.Module): # nn.Module로 부터 상속 받기 !
def __init__(self, num_labels, vocab_size):
# calls the init function of nn.Module의 초기화 함수 호출. Dont get confused by syntax,
# 문법에 혼란스러워 하지 마시고 단지 항상 nn.Module 에서 수행하십시오.
super(BoWClassifier, self).__init__()
# 필요한 파라미터를 정의 하십시오. 이 경우에는 아핀 매핑의 매개 변수 인 A와 b가 필요합니다.
# Torch는 아핀 맵을 제공하는 nn.Linear()를 정의합니다
# 입력 차원이 vocab_size이고 출력이 num_labels 인 이유를 이해했는지 확인하십시오!
self.linear = nn.Linear(vocab_size, num_labels)
# 주의! 비선형성 Log Softmax에는 파라미터가 없습니다!
# 그래서 여기에 대해 걱정할 필요가 없습니다.
def forward(self, bow_vec):
# 선형 계층를 통해 입력을 전달한 다음 log_softmax로 전달합니다.
# 많은 비선형성 및 기타 기능이 torch.nn.functional 에 있습니다
return F.log_softmax(self.linear(bow_vec), dim=1)
def make_bow_vector(sentence, word_to_ix):
vec = torch.zeros(len(word_to_ix))
for word in sentence:
vec[word_to_ix[word]] += 1
return vec.view(1, -1)
def make_target(label, label_to_ix):
return torch.LongTensor([label_to_ix[label]])
model = BoWClassifier(NUM_LABELS, VOCAB_SIZE)
# 모델은 자신의 파라미터를 알고 있습니다. 아래에 있는 첫번째 출력은 A 두번째는 b 입니다.
# 모듈의 __init__ 함수에서 클래스 변수에 구성 요소를 할당 할 때마다 다음 행을 사용하여 완료합니다.
# self.linear = nn.Linear(...)
# 그런 다음 PyTorch 개발자의 Python 마법을 통해, 모듈(이 경우 BoWClassifier)은
# nn.Linear 파라미터에 대한 지식을 저장합니다
for param in model.parameters():
print(param)
# 모델을 실행하려면 BoW 벡터를 전달합니다.
# 여기서 우리는 학습 할 필요가 없으므로 코드는 torch.no_grad()로 싸여 있습니다.
with torch.no_grad():
sample = data[0]
bow_vector = make_bow_vector(sample[0], word_to_ix)
log_probs = model(bow_vector)
print(log_probs)
######################################################################
# 위의 값 중 어느 것이 ENGLISH와 SPANISH의 로그 확률에 해당하는 값일까요?
# 우리는 정의하지 않았지만, 학습하기를 원한다면 필요합니다.
#
label_to_ix = {"SPANISH": 0, "ENGLISH": 1}
######################################################################
# 그럼 학습을 해봅시다! 이를 위해 로그 확률을 얻고, 손실 함수를 계산하고,
# 손실 함수의 변화도를 계산한 다음 변화도 단계로 파라미터를
# 업데이트하기 위해 인스턴스를 통과시킵니다. 손실 기능은 nn 패키지의 Torch에서 제공합니다.
# nn.NLLLoss()는 원하는 음의 로그 우도 손실입니다. 또한 torch.optim에서 최적화 함수를 정의합니다.
# 여기서는 SGD 만 사용합니다.
#
# NLLLoss에 대한 *입력* 은 로그 확률의 벡터이고 목표 레이블입니다.
# 우리를 위한 로그 확률을 계산하지 않습니다. 이것이 네트워크의 마지막 계층이
# Log softmax 인 이유입니다. 손실 함수 nn.CrossEntropyLoss()는 Log softmax를 제외하고는 NLLLoss()와 같습니다.
#
# 훈련하기 전에 테스트 데이터를 실행하여 전후를 볼 수 있습니다.
with torch.no_grad():
for instance, label in test_data:
bow_vec = make_bow_vector(instance, word_to_ix)
log_probs = model(bow_vec)
print(log_probs)
# "creo"에 해당하는 행렬 열을 인쇄하십시오.
print(next(model.parameters())[:, word_to_ix["creo"]])
loss_function = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)
# 일반적으로 교육 데이터를 여러 번 전달 합니다.
# 100은 실제 데이터 세트보다 훨씬 더 크지 만 실제 데이터 세트는 두 개 이상의 인스턴스를 가집니다.
# 일반적으로 5 ~ 30 개 에포크가 적당합니다.
for epoch in range(100):
for instance, label in data:
# 1 단계. PyTorch는 그라데이션을 축적합니다.
# 각 인스턴스 전에 그들을 제거해야합니다.
model.zero_grad()
# 2 단계. BOW 벡터를 만들고 정수로 텐서로 대상을 싸야합니다.
# 예를 들어, 대상이 SPANISH이면 정수 0으로 합니다.
# 손실 함수는 로그 확률의 0번째 요소가 SPANISH에 해당하는 로그 확률임을 알 수 있습니다
bow_vec = make_bow_vector(instance, word_to_ix)
target = make_target(label, label_to_ix)
# 3 단계. 순전파를 실행합니다.
log_probs = model(bow_vec)
# 4 단계. optimizer.step()을 호출하여 손실, 변화도를 계산하고 파라미터를 업데이트합니다.
loss = loss_function(log_probs, target)
loss.backward()
optimizer.step()
with torch.no_grad():
for instance, label in test_data:
bow_vec = make_bow_vector(instance, word_to_ix)
log_probs = model(bow_vec)
print(log_probs)
# 스페인어에 해당하는 색인이 올라갑니다. 영어가 내려갑니다!!
print(next(model.parameters())[:, word_to_ix["creo"]])
######################################################################
# 정답을 얻었습니다! 첫 번째 예제에서는 스페인어의 로그 확률이 훨씬 높고
# 영어의 로그 확률은 테스트 데이터의 두 번째에서 훨씬 높다는 것을 알 수 있습니다.
#
# 이제 PyTorch 구성 요소를 만들고 이를 통해 일부 데이터를 전달하고
# 변화도 업데이트를 수행하는 방법을 살펴 보았습니다.
# 우리는 심도있는 NLP가 제공해야하는 것을 더 깊이 파고들 준비가되었습니다.
#
|
pose_estimation/datasets.py | arturohernandez10/pose-interpreter-networks | 110 | 12741381 | <filename>pose_estimation/datasets.py
import os
import numpy as np
import torch
import torch.utils.data
from skimage.draw import circle
from skimage.measure import find_contours
from PIL import Image
class RenderedPoseDataset(torch.utils.data.Dataset):
def __init__(self, data_root, objects, subset_num, transform):
self.transform = transform
# images
image_dirs = []
self.object_indices = []
for o in objects:
image_dirs.append(os.path.join(data_root, o, 'subset_{:08}'.format(subset_num)))
for image_dir in image_dirs:
assert os.path.exists(image_dir)
self.image_paths = []
for i, image_dir in enumerate(image_dirs):
image_names = sorted(os.listdir(image_dir))
self.image_paths.extend([os.path.join(image_dir, name) for name in image_names])
self.object_indices.extend(i * np.ones(len(image_names)))
self.object_indices = np.array(self.object_indices, dtype=np.int64)
assert len(self.object_indices) == len(self.image_paths)
# poses
poses_paths = []
for o in objects:
poses_paths.append(os.path.join(data_root, o, 'poses', 'subset_{:08}.txt'.format(subset_num)))
for poses_path in poses_paths:
assert os.path.exists(poses_path)
self.poses = []
for poses_path in poses_paths:
self.poses.extend(np.loadtxt(poses_path).astype(np.float32))
assert len(self.poses) == len(self.image_paths)
def __getitem__(self, index):
object_index = self.object_indices[index]
image = Image.open(self.image_paths[index])
image = self.transform(image)
# enforce quaternion [w, x, y, z] to have positive w
target_pose = self.poses[index]
if target_pose[3] < 0:
target_pose[3:] = -target_pose[3:]
return image, target_pose, object_index
def __len__(self):
return len(self.image_paths)
class OccludedRenderedPoseDataset(torch.utils.data.Dataset):
def __init__(self, data_root, objects, subset_num, transform, max_circle_size):
self.transform = transform
self.max_circle_size = max_circle_size
# images
image_dirs = []
self.object_indices = []
for o in objects:
image_dirs.append(os.path.join(data_root, o, 'subset_{:08}'.format(subset_num)))
for image_dir in image_dirs:
assert os.path.exists(image_dir)
self.image_paths = []
for i, image_dir in enumerate(image_dirs):
image_names = sorted(os.listdir(image_dir))
self.image_paths.extend([os.path.join(image_dir, name) for name in image_names])
self.object_indices.extend(i * np.ones(len(image_names)))
self.object_indices = np.array(self.object_indices, dtype=np.int64)
assert len(self.object_indices) == len(self.image_paths)
# poses
poses_paths = []
for o in objects:
poses_paths.append(os.path.join(data_root, o, 'poses', 'subset_{:08}.txt'.format(subset_num)))
for poses_path in poses_paths:
assert os.path.exists(poses_path)
self.poses = []
for poses_path in poses_paths:
self.poses.extend(np.loadtxt(poses_path).astype(np.float32))
assert len(self.poses) == len(self.image_paths)
def __getitem__(self, index):
object_index = self.object_indices[index]
image = Image.open(self.image_paths[index])
# if possible, occlude the object
np_image = np.array(image)
contours = find_contours(np_image.mean(axis=2) if np_image.ndim == 3 else np_image, 0)
if len(contours) > 0:
contour = sorted(contours, key=lambda x: -x.shape[0])[0]
if len(contour) > 0:
occluded_image = np_image.copy()
circle_center = contour[np.random.choice(len(contour))]
r, c = circle_center
circle_size = np.random.randint(self.max_circle_size + 1)
rr, cc = circle(r, c, circle_size, shape=np_image.shape)
occluded_image[rr, cc] = 0
image = Image.fromarray(occluded_image)
image = self.transform(image)
# enforce quaternion [w, x, y, z] to have positive w
target_pose = self.poses[index]
if target_pose[3] < 0:
target_pose[3:] = -target_pose[3:]
return image, target_pose, object_index
def __len__(self):
return len(self.image_paths)
|
magnum/tests/unit/db/sqlalchemy/test_types.py | ajmadsen/magnum | 319 | 12741395 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for custom SQLAlchemy types via Magnum DB."""
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
import magnum.db.sqlalchemy.api as sa_api
from magnum.db.sqlalchemy import models
from magnum.tests.unit.db import base
class SqlAlchemyCustomTypesTestCase(base.DbTestCase):
def test_JSONEncodedDict_default_value(self):
# Create ClusterTemplate w/o labels
cluster_template1_id = uuidutils.generate_uuid()
self.dbapi.create_cluster_template({'uuid': cluster_template1_id})
cluster_template1 = sa_api.model_query(
models.ClusterTemplate).filter_by(uuid=cluster_template1_id).one()
self.assertEqual({}, cluster_template1.labels)
# Create ClusterTemplate with labels
cluster_template2_id = uuidutils.generate_uuid()
self.dbapi.create_cluster_template(
{'uuid': cluster_template2_id, 'labels': {'bar': 'foo'}})
cluster_template2 = sa_api.model_query(
models.ClusterTemplate).filter_by(uuid=cluster_template2_id).one()
self.assertEqual('foo', cluster_template2.labels['bar'])
def test_JSONEncodedDict_type_check(self):
self.assertRaises(db_exc.DBError,
self.dbapi.create_cluster_template,
{'labels':
['this is not a dict']})
def test_JSONEncodedList_default_value(self):
# Create nodegroup w/o node_addresses
nodegroup1_id = uuidutils.generate_uuid()
self.dbapi.create_nodegroup({'uuid': nodegroup1_id})
nodegroup1 = sa_api.model_query(
models.NodeGroup).filter_by(uuid=nodegroup1_id).one()
self.assertEqual([], nodegroup1.node_addresses)
# Create nodegroup with node_addresses
nodegroup2_id = uuidutils.generate_uuid()
self.dbapi.create_nodegroup({
'uuid': nodegroup2_id,
'node_addresses': ['mynode_address1',
'mynode_address2']
})
nodegroup2 = sa_api.model_query(
models.NodeGroup).filter_by(uuid=nodegroup2_id).one()
self.assertEqual(['mynode_address1', 'mynode_address2'],
nodegroup2.node_addresses)
def test_JSONEncodedList_type_check(self):
self.assertRaises(db_exc.DBError,
self.dbapi.create_nodegroup,
{'node_addresses':
{'this is not a list': 'test'}})
|
mmfashion/models/losses/cosine_embed_loss.py | RyanJiang0416/mmfashion | 952 | 12741400 | <gh_stars>100-1000
import torch.nn as nn
import torch.nn.functional as F
from ..registry import LOSSES
@LOSSES.register_module
class CosineEmbeddingLoss(nn.Module):
def __init__(self,
margin=0.,
size_average=None,
reduce=None,
reduction='mean'):
super(CosineEmbeddingLoss, self).__init__()
self.margin = margin
self.reduction = reduction
def forward(self, input1, input2, target):
return F.cosine_embedding_loss(
input1,
input2,
target,
margin=self.margin,
reduction=self.reduction)
|
cli/sawtooth_cli/keygen.py | EddyKIL/sawtooth-core | 1,530 | 12741452 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
from __future__ import print_function
import getpass
import os
import sys
from sawtooth_signing import create_context
from sawtooth_cli.exceptions import CliException
def add_keygen_parser(subparsers, parent_parser):
parser = subparsers.add_parser(
'keygen',
help='Creates user signing keys',
description='Generates keys with which the user can sign '
'transactions and batches.',
epilog='The private and public key files are stored in '
'<key-dir>/<key-name>.priv and <key-dir>/<key-name>.pub. '
'<key-dir> defaults to ~/.sawtooth and <key-name> defaults to $USER.',
parents=[parent_parser])
parser.add_argument(
'key_name',
help='specify the name of the key to create',
nargs='?')
parser.add_argument(
'--key-dir',
help="specify the directory for the key files")
parser.add_argument(
'--force',
help="overwrite files if they exist",
action='store_true')
parser.add_argument(
'-q',
'--quiet',
help="do not display output",
action='store_true')
def do_keygen(args):
if args.key_name is not None:
key_name = args.key_name
else:
key_name = getpass.getuser()
if args.key_dir is not None:
key_dir = args.key_dir
if not os.path.exists(key_dir):
raise CliException('no such directory: {}'.format(key_dir))
else:
key_dir = os.path.join(os.path.expanduser('~'), '.sawtooth', 'keys')
if not os.path.exists(key_dir):
if not args.quiet:
print('creating key directory: {}'.format(key_dir))
try:
os.makedirs(key_dir, 0o755)
except IOError as e:
raise CliException('IOError: {}'.format(str(e))) from e
priv_filename = os.path.join(key_dir, key_name + '.priv')
pub_filename = os.path.join(key_dir, key_name + '.pub')
if not args.force:
file_exists = False
for filename in [priv_filename, pub_filename]:
if os.path.exists(filename):
file_exists = True
print('file exists: {}'.format(filename), file=sys.stderr)
if file_exists:
raise CliException(
'files exist, rerun with --force to overwrite existing files')
context = create_context('secp256k1')
private_key = context.new_random_private_key()
public_key = context.get_public_key(private_key)
try:
priv_exists = os.path.exists(priv_filename)
with open(priv_filename, 'w') as priv_fd:
if not args.quiet:
if priv_exists:
print('overwriting file: {}'.format(priv_filename))
else:
print('writing file: {}'.format(priv_filename))
priv_fd.write(private_key.as_hex())
priv_fd.write('\n')
# Set the private key u+rw g+r
os.chmod(priv_filename, 0o640)
pub_exists = os.path.exists(pub_filename)
with open(pub_filename, 'w') as pub_fd:
if not args.quiet:
if pub_exists:
print('overwriting file: {}'.format(pub_filename))
else:
print('writing file: {}'.format(pub_filename))
pub_fd.write(public_key.as_hex())
pub_fd.write('\n')
# Set the public key u+rw g+r o+r
os.chmod(pub_filename, 0o644)
except IOError as ioe:
raise CliException('IOError: {}'.format(str(ioe))) from ioe
|
scripts/lib-python/files/create_operator_scorecard_bundle.py | juljog/community-operators | 449 | 12741469 | #!/usr/bin/python3
import sys
import getopt
import yaml
import os
import random
import string
def randomString(stringLength=10):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def main(argv):
"""Creates an operator group for the operator CSV if the CSV does not support AllNamespaces."""
crds_path = None
csv_path = None
namespace = None
bundle_path = None
proxy_image = None
deploy_dir = None
cdrd_path = None
try:
opts, args = getopt.getopt(argv, "c:v:n:b:p:d:r:", ["cdrd=", "crds=", "bundle=", "csvfile=", "namespace=", "proxy=", "deploy-dir="])
except getopt.GetoptError as e:
print(e)
sys.exit(2)
for opt, arg in opts:
if opt in ("-c", "--cdrd"):
cdrd_path = arg
elif opt in ("-r", "--crds"):
crds_path = arg
elif opt in ("-v", "--csvfile"):
csv_path = arg
elif opt in ("-n", "--namespace"):
namespace = arg
elif opt in ("-b", "--bundle"):
bundle_path = arg
elif opt in ("-p", "--proxy"):
proxy_image = arg
elif opt in ("-d", "--deploy-dir"):
deploy_dir = arg
crds = os.listdir(crds_path)
crds = [os.path.join(crds_path, filename) if filename.endswith("cr.yaml") else None for filename in crds]
crds = list(filter(lambda x: x is not None, crds))
for cr in list(crds):
scorecard_bundle = {
"scorecard": {
"output": "text",
"plugins": [
{"basic": {
"olm-deployed": True,
"namespace": namespace,
"crds-dir": cdrd_path,
"cr-manifest": [cr],
"proxy-image": proxy_image,
"bundle": deploy_dir,
"proxy-pull-policy": "Never",
"csv-path": csv_path,
"init-timeout": 180
}},
{"olm": {
"olm-deployed": True,
"namespace": namespace,
"crds-dir": cdrd_path,
"bundle": deploy_dir,
"cr-manifest": [cr],
"proxy-image": proxy_image,
"proxy-pull-policy": "Never",
"csv-path": csv_path,
"init-timeout": 180
}}
]
}
}
if scorecard_bundle is not None:
with open(os.path.join(bundle_path, randomString() + ".bundle.yaml"), 'w') as write_file:
print(yaml.safe_dump(scorecard_bundle, default_flow_style=False), file=write_file)
if crds is not None:
sys.exit(0)
else:
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
|
packages/Python/lldbsuite/test/lang/swift/objc_runtime_ivars/TestObjCIvarDiscovery.py | xiaobai/swift-lldb | 765 | 12741480 | <reponame>xiaobai/swift-lldb<filename>packages/Python/lldbsuite/test/lang/swift/objc_runtime_ivars/TestObjCIvarDiscovery.py
# TestObjCIvarDiscovery.py
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2016 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ------------------------------------------------------------------------------
"""
Test that we can correctly see ivars from the Objective-C runtime
"""
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
import os
import os.path
import re
import unittest2
import shutil
class TestObjCIVarDiscovery(TestBase):
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
@skipIf(debug_info=no_match("dsym"))
def test_nodbg(self):
self.build()
shutil.rmtree(self.getBuildArtifact("aTestFramework.framework/Versions/A/aTestFramework.dSYM"))
self.do_test(False)
@skipUnlessDarwin
@skipIf(debug_info=no_match("dsym"))
def test_dbg(self):
self.build()
self.do_test(True)
def prepare_value(self, value):
value.SetPreferDynamicValue(lldb.eDynamicCanRunTarget)
value.SetPreferSyntheticValue(True)
return value
def do_test(self, dbg):
"""Test that we can correctly see ivars from the Objective-C runtime"""
target = self.dbg.CreateTarget(self.getBuildArtifact("a.out"))
self.assertTrue(target, VALID_TARGET)
#self.registerSharedLibrariesWithTarget(target, ['aTestFramework.framework/aTestFramework'])
if lldb.remote_platform:
wd = lldb.remote_platform.GetWorkingDirectory()
directory = 'aTestFramework.framework/Versions/A/'
filename = directory + '/aTestFramework'
cur_dir = wd
for d in directory.split('/'):
err = lldb.remote_platform.MakeDirectory(
os.path.join(cur_dir, d))
self.assertFalse(err.Fail(), 'Failed to mkdir ' + d + ':' + str(err))
cur_dir = os.path.join(cur_dir, d)
err = lldb.remote_platform.Put(
lldb.SBFileSpec(self.getBuildArtifact(filename)),
lldb.SBFileSpec(os.path.join(wd, filename)))
self.assertFalse(err.Fail(), 'Failed to copy ' + filename + ':' + str(err))
# Launch the process, and do not stop at the entry point.
lldbutil.run_to_source_breakpoint(
self, 'Set breakpoint here', lldb.SBFileSpec('main.swift'))
if dbg:
self.expect(
"image list", "Contents/Resources/DWARF/aTestFramework")
else:
self.expect(
"image list",
"Contents/Resources/DWARF/aTestFramework",
matching=False)
self.runCmd("frame variable -d run --show-types --ptr-depth=1")
obj = self.prepare_value(self.frame().FindVariable("object"))
mysubclass = self.prepare_value(obj.GetChildAtIndex(0))
myclass = self.prepare_value(mysubclass.GetChildAtIndex(0))
m_pair = myclass.GetChildMemberWithName("m_pair")
m_pair_A = m_pair.GetChildMemberWithName("A")
m_pair_B = m_pair.GetChildMemberWithName("B")
self.assertEqual(m_pair_A.GetValueAsUnsigned(), 1)
self.assertEqual(m_pair_B.GetValueAsUnsigned(), 2)
m_derived = self.prepare_value(
myclass.GetChildMemberWithName("m_base"))
m_derivedX = m_derived.GetChildMemberWithName("m_DerivedX")
self.assertEqual(m_derivedX.GetValueAsUnsigned(), 1)
m_numbers = self.prepare_value(
myclass.GetChildMemberWithName("m_myclass_numbers"))
self.assertTrue(
m_numbers.GetSummary() == '3 elements',
"m_myclass_numbers != 3 elements")
m_subclass_ivar = mysubclass.GetChildMemberWithName("m_subclass_ivar")
self.assertTrue(
m_subclass_ivar.GetValueAsUnsigned() == 42,
"m_subclass_ivar != 42")
m_mysubclass_s = mysubclass.GetChildMemberWithName("m_mysubclass_s")
self.assertTrue(
m_mysubclass_s.GetSummary() == '"an NSString here"',
'm_subclass_s != "an NSString here"')
swiftivar = obj.GetChildMemberWithName("swiftivar")
self.assertTrue(
swiftivar.GetSummary() == '"Hey Swift!"', "swiftivar != Hey Swift")
silly = self.prepare_value(obj.GetChildMemberWithName("silly"))
silly_x = silly.GetChildMemberWithName("x")
silly_url = silly.GetChildMemberWithName("url")
self.assertTrue(silly_x.GetValueAsUnsigned() == 12, "x != 12")
self.assertTrue(
silly_url.GetSummary() == '"http://www.apple.com"',
"url != apple.com")
|
azure_provisioning_e2e/iothubservice20180630/models/job_request.py | olivakar/azure-iot-sdk-python | 366 | 12741481 | <filename>azure_provisioning_e2e/iothubservice20180630/models/job_request.py
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobRequest(Model):
"""JobRequest.
:param job_id: Job identifier
:type job_id: str
:param type: Required.
The type of job to execute. Possible values include: 'unknown', 'export',
'import', 'backup', 'readDeviceProperties', 'writeDeviceProperties',
'updateDeviceConfiguration', 'rebootDevice', 'factoryResetDevice',
'firmwareUpdate', 'scheduleDeviceMethod', 'scheduleUpdateTwin',
'restoreFromBackup', 'failoverDataCopy'
:type type: str or ~service20180630.models.enum
:param cloud_to_device_method: Required if jobType is cloudToDeviceMethod.
The method type and parameters.
:type cloud_to_device_method: ~service20180630.models.CloudToDeviceMethod
:param update_twin:
:type update_twin: ~service20180630.models.Twin
:param query_condition: Required if jobType is updateTwin or
cloudToDeviceMethod.
Condition for device query to get devices to execute the job on
:type query_condition: str
:param start_time: ISO 8601 date time to start the job
:type start_time: datetime
:param max_execution_time_in_seconds: Max execution time in secounds (ttl
duration)
:type max_execution_time_in_seconds: long
"""
_attribute_map = {
"job_id": {"key": "jobId", "type": "str"},
"type": {"key": "type", "type": "str"},
"cloud_to_device_method": {"key": "cloudToDeviceMethod", "type": "CloudToDeviceMethod"},
"update_twin": {"key": "updateTwin", "type": "Twin"},
"query_condition": {"key": "queryCondition", "type": "str"},
"start_time": {"key": "startTime", "type": "iso-8601"},
"max_execution_time_in_seconds": {"key": "maxExecutionTimeInSeconds", "type": "long"},
}
def __init__(
self,
job_id=None,
type=None,
cloud_to_device_method=None,
update_twin=None,
query_condition=None,
start_time=None,
max_execution_time_in_seconds=None,
):
super(JobRequest, self).__init__()
self.job_id = job_id
self.type = type
self.cloud_to_device_method = cloud_to_device_method
self.update_twin = update_twin
self.query_condition = query_condition
self.start_time = start_time
self.max_execution_time_in_seconds = max_execution_time_in_seconds
|
demo/webcam_demo.py | Brym-Gyimah/mmdetection | 20,190 | 12741494 | <reponame>Brym-Gyimah/mmdetection
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import cv2
import torch
from mmdet.apis import inference_detector, init_detector
def parse_args():
parser = argparse.ArgumentParser(description='MMDetection webcam demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument(
'--camera-id', type=int, default=0, help='camera device id')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='bbox score threshold')
args = parser.parse_args()
return args
def main():
args = parse_args()
device = torch.device(args.device)
model = init_detector(args.config, args.checkpoint, device=device)
camera = cv2.VideoCapture(args.camera_id)
print('Press "Esc", "q" or "Q" to exit.')
while True:
ret_val, img = camera.read()
result = inference_detector(model, img)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord('q') or ch == ord('Q'):
break
model.show_result(
img, result, score_thr=args.score_thr, wait_time=1, show=True)
if __name__ == '__main__':
main()
|
algorithms/strings/reverse_vowel.py | zhengli0817/algorithms | 22,426 | 12741511 |
def reverse_vowel(s):
vowels = "AEIOUaeiou"
i, j = 0, len(s)-1
s = list(s)
while i < j:
while i < j and s[i] not in vowels:
i += 1
while i < j and s[j] not in vowels:
j -= 1
s[i], s[j] = s[j], s[i]
i, j = i + 1, j - 1
return "".join(s)
|
DataUtils/handle_wordEmbedding2File.py | bamtercelboo/cnn-lstm-bilstm-deepcnn-cnnlstm | 935 | 12741515 | <gh_stars>100-1000
# coding=utf-8
# @Author : bamtercelboo
# @Datetime : 2018/07/19 22:35
# @File : handle_wordEmbedding2File.py
# @Last Modify Time : 2018/07/19 22:35
# @Contact : <EMAIL>, 163.com}
"""
handle external word embedding to file
"""
import os
import tqdm
class WordEmbedding2File:
def __init__(self, wordEmbedding_path, data_path, extract_path):
print("handling external word embedding to file")
self.wordEmbedding_path = wordEmbedding_path
self.data_path = data_path
self.extract_path = extract_path
self.data_dict = self.read_data(data_path)
self.extract_dict = {}
self.dim = 100
self.read_vectors(self.wordEmbedding_path)
self.write(self.extract_path, self.extract_dict)
# print(self.data_dict)
# print(self.extract_dict)
def read_data(self, path):
print("read data file {}".format(path))
data_list = []
with open(path, encoding="UTF-8") as f:
for line in f.readlines():
line = line.strip("\n").split(" ")[:-2]
data_list.extend(line)
return set(data_list)
def read_vectors(self, path):
print("read embedding path {}".format(path))
with open(path, encoding='utf-8') as f:
lines = f.readlines()
self.dim = len(lines[2].strip("\n").strip().split(" ")[1:-1])
# print(dim)
lines = tqdm.tqdm(lines)
for line in lines:
values = line.strip("\n").strip().split(" ")
if len(values) == 1 or len(values) == 2 or len(values) == 3:
continue
word, vector = values[0], values[1:-1]
if word in self.data_dict:
self.extract_dict[word] = vector
def write(self, path, dict):
print("writing to {}".format(path))
if os.path.exists(path):
os.remove(path)
file = open(path, encoding="UTF-8", mode="w")
all_words, dim = len(dict), self.dim
print(all_words, dim)
file.write(str(all_words) + " " + str(dim) + "\n")
for word in dict:
value = " ".join(dict[word])
v = word + " " + value + "\n"
file.write(v)
if __name__ == "__main__":
wordEmbedding_path = "GoogleNews_wordEmbedding/vectors.utf-8"
data_path = "./sst_all.txt"
extract_path = "./extract_googleNews_embed_sst.txt"
WordEmbedding2File(wordEmbedding_path=wordEmbedding_path, data_path=data_path, extract_path=extract_path)
|
megaman/setup.py | ffancheng/megaman | 303 | 12741550 | <reponame>ffancheng/megaman
# LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
import os
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('megaman', parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('datasets')
config.add_subpackage('embedding')
config.add_subpackage('embedding/tests')
config.add_subpackage('geometry')
config.add_subpackage('geometry/cyflann')
config.add_subpackage('geometry/tests')
config.add_subpackage('plotter')
config.add_subpackage('relaxation')
config.add_subpackage('relaxation/tests')
config.add_subpackage('utils')
config.add_subpackage('utils/tests')
config.add_data_files('geometry/tests/testmegaman_laplacian_rad0_2_lam1_5_n200.mat')
config.add_data_files('relaxation/tests/eps_halfdome.mat')
config.add_data_files('relaxation/tests/rloss_halfdome.mat')
config.add_data_files('datasets/megaman.png')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
ryu/tests/unit/ofproto/test_parser_v10.py | hiArvin/ryu | 269 | 12741559 | <reponame>hiArvin/ryu
# Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
import logging
from nose.tools import *
from ryu.ofproto.ofproto_v1_0_parser import *
from ryu.ofproto import ofproto_v1_0_parser
from ryu.lib import addrconv
LOG = logging.getLogger('test_ofproto_v10')
class TestOFPPhyPort(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPPhyPort
"""
# OFP_PHY_PORT_PACK_STR
# '!H6s16sIIIIII'... port_no, hw_addr, name, config, state
# curr, advertised, supported, peer
port_no = {'buf': '\xe7\x6b', 'val': 59243}
hw_addr = '52:54:54:10:20:99'
name = 'name'.ljust(16)
config = {'buf': '\x84\xb6\x8c\x53', 'val': 2226555987}
state = {'buf': '\x64\x07\xfb\xc9', 'val': 1678244809}
curr = {'buf': '\xa9\xe8\x0a\x2b', 'val': 2850556459}
advertised = {'buf': '\x78\xb9\x7b\x72', 'val': 2025421682}
supported = {'buf': '\x7e\x65\x68\xad', 'val': 2120575149}
peer = {'buf': '\xa4\x5b\x8b\xed', 'val': 2757463021}
buf = port_no['buf'] \
+ addrconv.mac.text_to_bin(hw_addr) \
+ name \
+ config['buf'] \
+ state['buf'] \
+ curr['buf'] \
+ advertised['buf'] \
+ supported['buf'] \
+ peer['buf']
c = OFPPhyPort(port_no['val'],
hw_addr,
name,
config['val'],
state['val'],
curr['val'],
advertised['val'],
supported['val'],
peer['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.port_no['val'], self.c.port_no)
eq_(self.hw_addr, self.c.hw_addr)
eq_(self.name, self.c.name)
eq_(self.config['val'], self.c.config)
eq_(self.state['val'], self.c.state)
eq_(self.curr['val'], self.c.curr)
eq_(self.advertised['val'], self.c.advertised)
eq_(self.supported['val'], self.c.supported)
eq_(self.peer['val'], self.c.peer)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.port_no['val'], res.port_no)
eq_(self.hw_addr, res.hw_addr)
eq_(self.name, res.name)
eq_(self.config['val'], res.config)
eq_(self.state['val'], res.state)
eq_(self.curr['val'], res.curr)
eq_(self.advertised['val'], res.advertised)
eq_(self.supported['val'], res.supported)
eq_(self.peer['val'], res.peer)
class TestOFPMatch(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPMatch
"""
# OFP_MATCH_PACK_STR
# '!IH6s6sHBxHBB2xIIHH'...wildcards, in_port, dl_src, dl_dst, dl_vlan,
# dl_vlan_pcp, dl_type, nw_tos, nw_proto,
# nw_src, nw_dst, tp_src, tp_dst
wildcards = {'buf': '\xd2\x71\x25\x23', 'val': 3530630435}
in_port = {'buf': '\x37\x8b', 'val': 14219}
dl_src = '\x52\x54\x54\x10\x20\x99'
dl_dst = '\x61\x31\x50\x6d\xc9\xe5'
dl_vlan = {'buf': '\xc1\xf9', 'val': 49657}
dl_vlan_pcp = {'buf': '\x79', 'val': 121}
zfill0 = '\x00'
dl_type = {'buf': '\xa6\x9e', 'val': 42654}
nw_tos = {'buf': '\xde', 'val': 222}
nw_proto = {'buf': '\xe5', 'val': 229}
zfil11 = '\x00' * 2
nw_src = {'buf': '\x1b\x6d\x8d\x4b', 'val': 460164427}
nw_dst = {'buf': '\xab\x25\xe1\x20', 'val': 2871386400}
tp_src = {'buf': '\xd5\xc3', 'val': 54723}
tp_dst = {'buf': '\x78\xb9', 'val': 30905}
buf = wildcards['buf'] \
+ in_port['buf'] \
+ dl_src \
+ dl_dst \
+ dl_vlan['buf'] \
+ dl_vlan_pcp['buf'] \
+ zfill0 \
+ dl_type['buf'] \
+ nw_tos['buf'] \
+ nw_proto['buf'] \
+ zfil11 \
+ nw_src['buf'] \
+ nw_dst['buf'] \
+ tp_src['buf'] \
+ tp_dst['buf']
def _get_obj(self, dl_src, dl_dst):
c = OFPMatch(self.wildcards['val'],
self.in_port['val'],
dl_src,
dl_dst,
self.dl_vlan['val'],
self.dl_vlan_pcp['val'],
self.dl_type['val'],
self.nw_tos['val'],
self.nw_proto['val'],
self.nw_src['val'],
self.nw_dst['val'],
self.tp_src['val'],
self.tp_dst['val'])
return c
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
c = self._get_obj(self.dl_src, self.dl_dst)
eq_(self.wildcards['val'], c.wildcards)
eq_(self.in_port['val'], c.in_port)
eq_(self.dl_src, c.dl_src)
eq_(self.dl_dst, c.dl_dst)
eq_(self.dl_vlan['val'], c.dl_vlan)
eq_(self.dl_vlan_pcp['val'], c.dl_vlan_pcp)
eq_(self.dl_type['val'], c.dl_type)
eq_(self.nw_tos['val'], c.nw_tos)
eq_(self.nw_proto['val'], c.nw_proto)
eq_(self.nw_src['val'], c.nw_src)
eq_(self.nw_dst['val'], c.nw_dst)
eq_(self.tp_src['val'], c.tp_src)
eq_(self.tp_dst['val'], c.tp_dst)
def test_init_zero(self):
c = self._get_obj(0, 0)
eq_(mac.DONTCARE, c.dl_src)
eq_(mac.DONTCARE, c.dl_dst)
def test_parse(self):
c = self._get_obj(self.dl_src, self.dl_dst)
res = c.parse(self.buf, 0)
eq_(self.wildcards['val'], res.wildcards)
eq_(self.in_port['val'], res.in_port)
eq_(self.dl_src, res.dl_src)
eq_(self.dl_dst, res.dl_dst)
eq_(self.dl_vlan['val'], res.dl_vlan)
eq_(self.dl_vlan_pcp['val'], res.dl_vlan_pcp)
eq_(self.dl_type['val'], res.dl_type)
eq_(self.nw_tos['val'], res.nw_tos)
eq_(self.nw_proto['val'], res.nw_proto)
eq_(self.nw_src['val'], res.nw_src)
eq_(self.nw_dst['val'], res.nw_dst)
eq_(self.tp_src['val'], res.tp_src)
eq_(self.tp_dst['val'], res.tp_dst)
def test_serialize(self):
buf = bytearray()
c = self._get_obj(self.dl_src, self.dl_dst)
c.serialize(buf, 0)
fmt = ofproto.OFP_MATCH_PACK_STR
res = struct.unpack_from(fmt, buffer(buf))
eq_(self.wildcards['val'], res[0])
eq_(self.in_port['val'], res[1])
eq_(self.dl_src, res[2])
eq_(self.dl_dst, res[3])
eq_(self.dl_vlan['val'], res[4])
eq_(self.dl_vlan_pcp['val'], res[5])
eq_(self.dl_type['val'], res[6])
eq_(self.nw_tos['val'], res[7])
eq_(self.nw_proto['val'], res[8])
eq_(self.nw_src['val'], res[9])
eq_(self.nw_dst['val'], res[10])
eq_(self.tp_src['val'], res[11])
eq_(self.tp_dst['val'], res[12])
class TestOFPActionHeader(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPActionHeader
"""
# OFP_ACTION_HEADER_PACK_STR
# '!HH4x'...type, len, zfill
type = {'buf': '\x00\x02', 'val': ofproto.OFPAT_SET_VLAN_PCP}
len = {'buf': '\x00\x08', 'val': ofproto.OFP_ACTION_HEADER_SIZE}
zfill = '\x00' * 4
buf = type['buf'] \
+ len['buf'] \
+ zfill
c = OFPActionHeader(type['val'], len['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.type['val'], self.c.type)
eq_(self.len['val'], self.c.len)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.OFP_ACTION_HEADER_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type['val'], res[0])
eq_(self.len['val'], res[1])
class TestOFPActionOutput(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPActionOutput
"""
# OFP_ACTION_OUTPUT_PACK_STR
# '!HHHH'...type, len, port, max_len
type_ = {'buf': '\x00\x00', 'val': ofproto.OFPAT_OUTPUT}
len_ = {'buf': '\x00\x08', 'val': ofproto.OFP_ACTION_OUTPUT_SIZE}
port = {'buf': '\x19\xce', 'val': 6606}
max_len = {'buf': '\x00\x08', 'val': ofproto.OFP_ACTION_OUTPUT_SIZE}
buf = type_['buf'] \
+ len_['buf'] \
+ port['buf'] \
+ max_len['buf']
c = OFPActionOutput(port['val'], max_len['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.port['val'], self.c.port)
eq_(self.max_len['val'], self.c.max_len)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.port['val'], res.port)
eq_(self.max_len['val'], res.max_len)
@raises(AssertionError)
def test_parser_check_type(self):
type_ = {'buf': '\x00\x01', 'val': 1}
buf = type_['buf'] \
+ self.len_['buf'] \
+ self.port['buf'] \
+ self.max_len['buf']
self.c.parser(buf, 0)
@raises(AssertionError)
def test_parser_check_len(self):
len_ = {'buf': '\x00\x07', 'val': 7}
buf = self.type_['buf'] \
+ len_['buf'] \
+ self.port['buf'] \
+ self.max_len['buf']
self.c.parser(buf, 0)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.OFP_ACTION_OUTPUT_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.port['val'], res[2])
eq_(self.max_len['val'], res[3])
class TestOFPActionVlanVid(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPActionVlanVid
"""
# OFP_ACTION_VLAN_VID_PACK_STR
# '!HHH2x'...type, len, vlan_vid, zfill
type_ = {'buf': '\x00\x01', 'val': ofproto.OFPAT_SET_VLAN_VID}
len_ = {'buf': '\x00\x08', 'val': ofproto.OFP_ACTION_VLAN_VID_SIZE}
vlan_vid = {'buf': '\x3c\x0e', 'val': 15374}
zfill = '\x00' * 2
buf = type_['buf'] \
+ len_['buf'] \
+ vlan_vid['buf'] \
+ zfill
c = OFPActionVlanVid(vlan_vid['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.vlan_vid['val'], self.c.vlan_vid)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.vlan_vid['val'], res.vlan_vid)
@raises(AssertionError)
def test_parser_check_type(self):
type_ = {'buf': '\x00\x02', 'val': 2}
buf = type_['buf'] \
+ self.len_['buf'] \
+ self.vlan_vid['buf'] \
+ self.zfill
self.c.parser(buf, 0)
@raises(AssertionError)
def test_parser_check_len(self):
len_ = {'buf': '\x00\x07', 'val': 7}
buf = self.type_['buf'] \
+ len_['buf'] \
+ self.vlan_vid['buf'] \
+ self.zfill
self.c.parser(buf, 0)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.OFP_ACTION_VLAN_VID_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.vlan_vid['val'], res[2])
class TestOFPActionVlanPcp(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPActionVlanPcp
"""
# OFP_ACTION_VLAN_PCP_PACK_STR
# '!HHB3x'...type, len, vlan_pcp, zfill
type_ = {'buf': '\x00\x02', 'val': ofproto.OFPAT_SET_VLAN_PCP}
len_ = {'buf': '\x00\x08', 'val': ofproto.OFP_ACTION_VLAN_PCP_SIZE}
vlan_pcp = {'buf': '\x1c', 'val': 28}
zfill = '\x00' * 3
buf = type_['buf'] \
+ len_['buf'] \
+ vlan_pcp['buf'] \
+ zfill
c = OFPActionVlanPcp(vlan_pcp['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.vlan_pcp['val'], self.c.vlan_pcp)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.vlan_pcp['val'], res.vlan_pcp)
@raises(AssertionError)
def test_parser_check_type(self):
type_ = {'buf': '\x00\x01', 'val': 1}
buf = type_['buf'] \
+ self.len_['buf'] \
+ self.vlan_pcp['buf'] \
+ self.zfill
self.c.parser(buf, 0)
@raises(AssertionError)
def test_parser_check_len(self):
len_ = {'buf': '\x00\x07', 'val': 7}
buf = self.type_['buf'] \
+ len_['buf'] \
+ self.vlan_pcp['buf'] \
+ self.zfill
self.c.parser(buf, 0)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.OFP_ACTION_VLAN_PCP_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.vlan_pcp['val'], res[2])
class TestOFPActionStripVlan(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPActionStripVlan
"""
# OFP_ACTION_HEADER_PACK_STR
# '!HH4x'...type, len, zfill
type_ = {'buf': '\x00\x03', 'val': ofproto.OFPAT_STRIP_VLAN}
len_ = {'buf': '\x00\x08', 'val': ofproto.OFP_ACTION_HEADER_SIZE}
zfill = '\x00' * 4
buf = type_['buf'] \
+ len_['buf'] \
+ zfill
c = OFPActionStripVlan()
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
ok_(self.c.parser(self.buf, 0))
@raises(AssertionError)
def test_parser_check_type(self):
type_ = {'buf': '\x00\x01', 'val': 1}
buf = type_['buf'] \
+ self.len_['buf'] \
+ self.zfill
self.c.parser(buf, 0)
@raises(AssertionError)
def test_parser_check_len(self):
len_ = {'buf': '\x00\x07', 'val': 7}
buf = self.type_['buf'] \
+ len_['buf'] \
+ self.zfill
self.c.parser(buf, 0)
class TestOFPActionSetDlSrc(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPActionSetDlSrc
"""
# OFP_ACTION_DL_ADDR_PACK_STR
# '!HH6s6x'...type, len, dl_addr, zfill
type_ = {'buf': '\x00\x04', 'val': ofproto.OFPAT_SET_DL_SRC}
len_ = {'buf': '\x00\x10', 'val': ofproto.OFP_ACTION_DL_ADDR_SIZE}
dl_addr = '\x0e\xde\x27\xce\xc6\xcf'
zfill = '\x00' * 6
buf = type_['buf'] \
+ len_['buf'] \
+ dl_addr \
+ zfill
c = OFPActionSetDlSrc(dl_addr)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.dl_addr, self.c.dl_addr)
def test_parser_type_src(self):
res = self.c.parser(self.buf, 0)
eq_(self.dl_addr, res.dl_addr)
def test_parser_type_dst(self):
type_ = {'buf': '\x00\x05', 'val': ofproto.OFPAT_SET_DL_DST}
buf = type_['buf'] \
+ self.len_['buf'] \
+ self.dl_addr \
+ self.zfill
res = self.c.parser(buf, 0)
eq_(self.dl_addr, res.dl_addr)
@raises(AssertionError)
def test_parser_check_type(self):
type_ = {'buf': '\x00\x06', 'val': 6}
buf = type_['buf'] \
+ self.len_['buf'] \
+ self.dl_addr \
+ self.zfill
res = self.c.parser(buf, 0)
@raises(AssertionError)
def test_parser_check_len(self):
len_ = {'buf': '\x00\x07', 'val': 7}
buf = self.type_['buf'] \
+ len_['buf'] \
+ self.dl_addr \
+ self.zfill
res = self.c.parser(buf, 0)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.OFP_ACTION_DL_ADDR_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.dl_addr, res[2])
class TestOFPActionSetDlDst(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPActionSetDlDst
"""
# OFP_ACTION_DL_ADDR_PACK_STR
# '!HH6s6x'...type, len, dl_addr, zfill
type_ = {'buf': '\x00\x05', 'val': ofproto.OFPAT_SET_DL_DST}
len_ = {'buf': '\x00\x10', 'val': ofproto.OFP_ACTION_DL_ADDR_SIZE}
dl_addr = '\x37\x48\x38\x9a\xf4\x28'
zfill = '\x00' * 6
buf = type_['buf'] \
+ len_['buf'] \
+ dl_addr \
+ zfill
c = OFPActionSetDlDst(dl_addr)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.dl_addr, self.c.dl_addr)
def test_parser_type_dst(self):
res = self.c.parser(self.buf, 0)
eq_(self.dl_addr, res.dl_addr)
def test_parser_type_src(self):
type_ = {'buf': '\x00\x04', 'val': ofproto.OFPAT_SET_DL_SRC}
buf = type_['buf'] \
+ self.len_['buf'] \
+ self.dl_addr \
+ self.zfill
res = self.c.parser(buf, 0)
eq_(self.dl_addr, res.dl_addr)
@raises(AssertionError)
def test_parser_check_type(self):
type_ = {'buf': '\x00\x06', 'val': 6}
buf = type_['buf'] \
+ self.len_['buf'] \
+ self.dl_addr \
+ self.zfill
res = self.c.parser(buf, 0)
@raises(AssertionError)
def test_parser_check_len(self):
len_ = {'buf': '\x00\x07', 'val': 7}
buf = self.type_['buf'] \
+ len_['buf'] \
+ self.dl_addr \
+ self.zfill
res = self.c.parser(buf, 0)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.OFP_ACTION_DL_ADDR_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.dl_addr, res[2])
class TestOFPActionSetNwSrc(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPActionSetNwSrc
"""
# OFP_ACTION_NW_ADDR_PACK_STR
# '!HHI'...type, len, nw_addr
type_ = {'buf': '\x00\x06', 'val': ofproto.OFPAT_SET_NW_SRC}
len_ = {'buf': '\x00\x08', 'val': ofproto.OFP_ACTION_NW_ADDR_SIZE}
nw_addr = {'buf': '\xc0\xa8\x7a\x0a', 'val': 3232266762}
buf = type_['buf'] \
+ len_['buf'] \
+ nw_addr['buf']
c = OFPActionSetNwSrc(nw_addr['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.nw_addr['val'], self.c.nw_addr)
def test_parser_src(self):
res = self.c.parser(self.buf, 0)
eq_(self.nw_addr['val'], res.nw_addr)
def test_parser_dst(self):
type_ = {'buf': '\x00\x07', 'val': ofproto.OFPAT_SET_NW_DST}
buf = type_['buf'] \
+ self.len_['buf'] \
+ self.nw_addr['buf']
res = self.c.parser(buf, 0)
eq_(self.nw_addr['val'], res.nw_addr)
@raises(AssertionError)
def test_parser_check_type(self):
type_ = {'buf': '\x00\x05', 'val': 5}
buf = type_['buf'] \
+ self.len_['buf'] \
+ self.nw_addr['buf']
self.c.parser(buf, 0)
@raises(AssertionError)
def test_parser_check_len(self):
len_ = {'buf': '\x00\x10', 'val': 16}
buf = self.type_['buf'] \
+ len_['buf'] \
+ self.nw_addr['buf']
self.c.parser(buf, 0)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.OFP_ACTION_NW_ADDR_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.nw_addr['val'], res[2])
class TestOFPActionSetNwDst(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPActionSetNwDst
"""
# OFP_ACTION_NW_ADDR_PACK_STR
# '!HHI'...type, len, nw_addr
type_ = {'buf': '\x00\x07', 'val': ofproto.OFPAT_SET_NW_DST}
len_ = {'buf': '\x00\x08', 'val': ofproto.OFP_ACTION_NW_ADDR_SIZE}
nw_addr = {'buf': '\xc0\xa8\x7a\x0a', 'val': 3232266762}
buf = type_['buf'] \
+ len_['buf'] \
+ nw_addr['buf']
c = OFPActionSetNwDst(nw_addr['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.nw_addr['val'], self.c.nw_addr)
def test_parser_dst(self):
res = self.c.parser(self.buf, 0)
eq_(self.nw_addr['val'], res.nw_addr)
def test_parser_src(self):
type_ = {'buf': '\x00\x06', 'val': ofproto.OFPAT_SET_NW_SRC}
buf = type_['buf'] \
+ self.len_['buf'] \
+ self.nw_addr['buf']
res = self.c.parser(buf, 0)
eq_(self.nw_addr['val'], res.nw_addr)
@raises(AssertionError)
def test_parser_check_type(self):
type_ = {'buf': '\x00\x05', 'val': 5}
buf = type_['buf'] \
+ self.len_['buf'] \
+ self.nw_addr['buf']
self.c.parser(buf, 0)
@raises(AssertionError)
def test_parser_check_len(self):
len_ = {'buf': '\x00\x10', 'val': 16}
buf = self.type_['buf'] \
+ len_['buf'] \
+ self.nw_addr['buf']
self.c.parser(buf, 0)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.OFP_ACTION_NW_ADDR_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.nw_addr['val'], res[2])
class TestOFPActionSetNwTos(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPActionSetNwTos
"""
# OFP_ACTION_NW_TOS_PACK_STR
# '!HHB3x'...type, len, tos, zfill
type_ = {'buf': '\x00\x08', 'val': ofproto.OFPAT_SET_NW_TOS}
len_ = {'buf': '\x00\x08', 'val': ofproto.OFP_ACTION_NW_TOS_SIZE}
tos = {'buf': '\xb6', 'val': 182}
zfill = '\x00' * 3
buf = type_['buf'] \
+ len_['buf'] \
+ tos['buf'] \
+ zfill
c = OFPActionSetNwTos(tos['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.tos['val'], self.c.tos)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.tos['val'], res.tos)
@raises(AssertionError)
def test_parser_check_type(self):
type_ = {'buf': '\x00\x05', 'val': 5}
buf = type_['buf'] \
+ self.len_['buf'] \
+ self.tos['buf'] \
+ self.zfill
self.c.parser(buf, 0)
@raises(AssertionError)
def test_parser_check_len(self):
len_ = {'buf': '\x00\x07', 'val': 7}
buf = self.type_['buf'] \
+ len_['buf'] \
+ self.tos['buf'] \
+ self.zfill
self.c.parser(buf, 0)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.OFP_ACTION_NW_TOS_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.tos['val'], res[2])
class TestOFPActionSetTpSrc(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPActionSetTpSrc
"""
# OFP_ACTION_TP_PORT_PACK_STR
# '!HHH2x'...type, len, tp, zfill
type_ = {'buf': '\x00\x09', 'val': ofproto.OFPAT_SET_TP_SRC}
len_ = {'buf': '\x00\x08', 'val': ofproto.OFP_ACTION_TP_PORT_SIZE}
tp = {'buf': '\x07\xf1', 'val': 2033}
zfill = '\x00' * 2
buf = type_['buf'] \
+ len_['buf'] \
+ tp['buf'] \
+ zfill
c = OFPActionSetTpSrc(tp['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.tp['val'], self.c.tp)
def test_parser_src(self):
res = self.c.parser(self.buf, 0)
eq_(self.tp['val'], res.tp)
def test_parser_dst(self):
type_ = {'buf': '\x00\x0a', 'val': ofproto.OFPAT_SET_TP_DST}
buf = type_['buf'] \
+ self.len_['buf'] \
+ self.tp['buf'] \
+ self.zfill
res = self.c.parser(self.buf, 0)
eq_(self.tp['val'], res.tp)
@raises(AssertionError)
def test_parser_check_type(self):
type_ = {'buf': '\x00\x07', 'val': 7}
buf = type_['buf'] \
+ self.len_['buf'] \
+ self.tp['buf'] \
+ self.zfill
self.c.parser(buf, 0)
@raises(AssertionError)
def test_parser_check_len(self):
len_ = {'buf': '\x00\x07', 'val': 7}
buf = self.type_['buf'] \
+ len_['buf'] \
+ self.tp['buf'] \
+ self.zfill
self.c.parser(buf, 0)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.OFP_ACTION_TP_PORT_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.tp['val'], res[2])
class TestOFPActionSetTpDst(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPActionSetTpDst
"""
# OFP_ACTION_TP_PORT_PACK_STR
# '!HHH2x'...type, len, tp, zfill
type_ = {'buf': '\x00\x0a', 'val': ofproto.OFPAT_SET_TP_DST}
len_ = {'buf': '\x00\x08', 'val': ofproto.OFP_ACTION_TP_PORT_SIZE}
tp = {'buf': '\x06\x6d', 'val': 1645}
zfill = '\x00' * 2
buf = type_['buf'] \
+ len_['buf'] \
+ tp['buf'] \
+ zfill
c = OFPActionSetTpDst(tp['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.tp['val'], self.c.tp)
def test_parser_dst(self):
res = self.c.parser(self.buf, 0)
eq_(self.tp['val'], res.tp)
def test_parser_src(self):
type_ = {'buf': '\x00\x09', 'val': ofproto.OFPAT_SET_TP_SRC}
buf = type_['buf'] \
+ self.len_['buf'] \
+ self.tp['buf'] \
+ self.zfill
res = self.c.parser(buf, 0)
eq_(self.tp['val'], res.tp)
@raises(AssertionError)
def test_parser_check_type(self):
type_ = {'buf': '\x00\x10', 'val': 16}
buf = type_['buf'] \
+ self.len_['buf'] \
+ self.tp['buf'] \
+ self.zfill
self.c.parser(buf, 0)
@raises(AssertionError)
def test_parser_check_len(self):
len_ = {'buf': '\x00\x07', 'val': 7}
buf = self.type_['buf'] \
+ len_['buf'] \
+ self.tp['buf'] \
+ self.zfill
self.c.parser(buf, 0)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.OFP_ACTION_TP_PORT_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.tp['val'], res[2])
class TestOFPActionEnqueue(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPActionEnqueue
"""
# OFP_ACTION_ENQUEUE_PACK_STR
# '!HHH6xI'...type_, len_, port, zfill, queue_id
type_ = {'buf': '\x00\x0b', 'val': ofproto.OFPAT_ENQUEUE}
len_ = {'buf': '\x00\x10', 'val': ofproto.OFP_ACTION_ENQUEUE_SIZE}
port = {'buf': '\x04\x55', 'val': 1109}
zfill = '\x00' * 6
queue_id = {'buf': '\x0a\x5b\x03\x5e', 'val': 173736798}
buf = type_['buf'] \
+ len_['buf'] \
+ port['buf'] \
+ zfill \
+ queue_id['buf']
c = OFPActionEnqueue(port['val'], queue_id['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.port['val'], self.c.port)
eq_(self.queue_id['val'], self.c.queue_id)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.port['val'], res.port)
eq_(self.queue_id['val'], res.queue_id)
@raises(AssertionError)
def test_parser_check_type(self):
type_ = {'buf': '\x00\x0a', 'val': 10}
buf = type_['buf'] \
+ self.len_['buf'] \
+ self.port['buf'] \
+ self.zfill \
+ self.queue_id['buf']
self.c.parser(buf, 0)
@raises(AssertionError)
def test_parser_check_len(self):
len_ = {'buf': '\x00\x05', 'val': 5}
buf = self.type_['buf'] \
+ len_['buf'] \
+ self.port['buf'] \
+ self.zfill \
+ self.queue_id['buf']
self.c.parser(buf, 0)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.OFP_ACTION_ENQUEUE_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.port['val'], res[2])
eq_(self.queue_id['val'], res[3])
class TestNXActionResubmit(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.NXActionResubmit
"""
# NX_ACTION_RESUBMIT_PACK_STR
# '!HHIHHB3x'...type, len, vendor, subtype, in_port, table, zfill
type_ = {'buf': '\xff\xff', 'val': ofproto.OFPAT_VENDOR}
len_ = {'buf': '\x00\x10', 'val': ofproto.NX_ACTION_RESUBMIT_SIZE}
vendor = {'buf': '\x00\x00\x23\x20', 'val': 8992}
subtype = {'buf': '\x00\x01', 'val': 1}
in_port = {'buf': '\x0a\x4c', 'val': 2636}
table = {'buf': '\x52', 'val': 82}
zfill = '\x00' * 3
buf = type_['buf'] \
+ len_['buf'] \
+ vendor['buf'] \
+ subtype['buf'] \
+ in_port['buf'] \
+ table['buf'] \
+ zfill
c = NXActionResubmit(in_port['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.type_['val'], self.c.type)
eq_(self.len_['val'], self.c.len)
eq_(self.vendor['val'], self.c.vendor)
eq_(self.subtype['val'], self.c.subtype)
eq_(self.in_port['val'], self.c.in_port)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.in_port['val'], res.in_port)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.NX_ACTION_RESUBMIT_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.vendor['val'], res[2])
eq_(self.subtype['val'], res[3])
eq_(self.in_port['val'], res[4])
class TestNXActionResubmitTable(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.NXActionResubmitTable
"""
# NX_ACTION_RESUBMIT_PACK_STR
# '!HHIHHB3x'...type, len, vendor, subtype, in_port, table, zfill
type_ = {'buf': '\xff\xff', 'val': ofproto.OFPAT_VENDOR}
len_ = {'buf': '\x00\x10', 'val': ofproto.NX_ACTION_RESUBMIT_SIZE}
vendor = {'buf': '\x00\x00\x23\x20', 'val': 8992}
subtype = {'buf': '\x00\x0e', 'val': 14}
in_port = {'buf': '\x0a\x4c', 'val': 2636}
table = {'buf': '\x52', 'val': 82}
zfill = '\x00' * 3
buf = type_['buf'] \
+ len_['buf'] \
+ vendor['buf'] \
+ subtype['buf'] \
+ in_port['buf'] \
+ table['buf'] \
+ zfill
c = NXActionResubmitTable(in_port['val'], table['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.type_['val'], self.c.type)
eq_(self.len_['val'], self.c.len)
eq_(self.vendor['val'], self.c.vendor)
eq_(self.subtype['val'], self.c.subtype)
eq_(self.in_port['val'], self.c.in_port)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.in_port['val'], res.in_port)
eq_(self.table['val'], res.table)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.NX_ACTION_RESUBMIT_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.vendor['val'], res[2])
eq_(self.subtype['val'], res[3])
eq_(self.in_port['val'], res[4])
class TestNXActionSetTunnel(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.NXActionSetTunnel
"""
# NX_ACTION_SET_TUNNEL_PACK_STR
# '!HHIH2xI'...type, len, vendor, subtype, zfill, tun_id
type_ = {'buf': '\xff\xff', 'val': ofproto.OFPAT_VENDOR}
len_ = {'buf': '\x00\x10', 'val': ofproto.NX_ACTION_SET_TUNNEL_SIZE}
vendor = {'buf': '\x00\x00\x23\x20', 'val': 8992}
subtype = {'buf': '\x00\x02', 'val': 2}
zfill = '\x00' * 2
tun_id = {'buf': '\x01\x6f\x01\xd0', 'val': 24052176}
buf = type_['buf'] \
+ len_['buf'] \
+ vendor['buf'] \
+ subtype['buf'] \
+ zfill \
+ tun_id['buf']
c = NXActionSetTunnel(tun_id['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.type_['val'], self.c.type)
eq_(self.len_['val'], self.c.len)
eq_(self.vendor['val'], self.c.vendor)
eq_(self.subtype['val'], self.c.subtype)
eq_(self.tun_id['val'], self.c.tun_id)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.tun_id['val'], res.tun_id)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.NX_ACTION_SET_TUNNEL_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.vendor['val'], res[2])
eq_(self.subtype['val'], res[3])
eq_(self.tun_id['val'], res[4])
class TestNXActionSetQueue(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.NXActionSetQueue
"""
# NX_ACTION_SET_QUEUE_PACK_STR
# '!HHIH2xI'...type, len, vendor, subtype, zfill, queue_id
type_ = {'buf': '\xff\xff', 'val': ofproto.OFPAT_VENDOR}
len_ = {'buf': '\x00\x10', 'val': ofproto.NX_ACTION_SET_TUNNEL_SIZE}
vendor = {'buf': '\x00\x00\x23\x20', 'val': ofproto.NX_VENDOR_ID}
subtype = {'buf': '\x00\x04', 'val': ofproto.NXAST_SET_QUEUE}
zfill = '\x00' * 2
queue_id = {'buf': '\xde\xbe\xc5\x18', 'val': 3737044248}
buf = type_['buf'] \
+ len_['buf'] \
+ vendor['buf'] \
+ subtype['buf'] \
+ zfill \
+ queue_id['buf']
c = NXActionSetQueue(queue_id['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.type_['val'], self.c.type)
eq_(self.len_['val'], self.c.len)
eq_(self.vendor['val'], self.c.vendor)
eq_(self.subtype['val'], self.c.subtype)
eq_(self.queue_id['val'], self.c.queue_id)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.queue_id['val'], res.queue_id)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.NX_ACTION_SET_QUEUE_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.vendor['val'], res[2])
eq_(self.subtype['val'], res[3])
eq_(self.queue_id['val'], res[4])
class TestNXActionPopQueue(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.NXActionPopQueue
"""
# NX_ACTION_POP_QUEUE_PACK_STR
# '!HHIH6x'...type, len, vendor, subtype, zfill
type_ = {'buf': '\xff\xff', 'val': ofproto.OFPAT_VENDOR}
len_ = {'buf': '\x00\x10', 'val': ofproto.NX_ACTION_SET_TUNNEL_SIZE}
vendor = {'buf': '\x00\x00\x23\x20', 'val': ofproto.NX_VENDOR_ID}
subtype = {'buf': '\x00\x05', 'val': ofproto.NXAST_POP_QUEUE}
zfill = '\x00' * 6
buf = type_['buf'] \
+ len_['buf'] \
+ vendor['buf'] \
+ subtype['buf'] \
+ zfill
c = NXActionPopQueue()
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.type_['val'], self.c.type)
eq_(self.len_['val'], self.c.len)
eq_(self.vendor['val'], self.c.vendor)
eq_(self.subtype['val'], self.c.subtype)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.type_['val'], res.type)
eq_(self.len_['val'], res.len)
eq_(self.vendor['val'], res.vendor)
eq_(self.subtype['val'], res.subtype)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.NX_ACTION_POP_QUEUE_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.vendor['val'], res[2])
eq_(self.subtype['val'], res[3])
class TestNXActionRegMove(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.NXActionRegMove
"""
# NX_ACTION_REG_MOVE_PACK_STR
# '!HHIHHHHII'...type_, len_, vendor, subtype, n_bits,
# src_ofs, dst_ofs, src, dst
type_ = {'buf': '\xff\xff', 'val': ofproto.OFPAT_VENDOR}
len_ = {'buf': '\x00\x18', 'val': ofproto.NX_ACTION_REG_MOVE_SIZE}
vendor = {'buf': '\x00\x00\x23\x20', 'val': ofproto.NX_VENDOR_ID}
subtype = {'buf': '\x00\x06', 'val': ofproto.NXAST_REG_MOVE}
n_bits = {'buf': '\x3d\x98', 'val': 15768}
src_ofs = {'buf': '\xf3\xa3', 'val': 62371}
dst_ofs = {'buf': '\xdc\x67', 'val': 56423}
src = {'buf': '\x15\x68\x60\xfd', 'val': 359162109}
dst = {'buf': '\x9f\x9f\x88\x26', 'val': 2678032422}
buf = type_['buf'] \
+ len_['buf'] \
+ vendor['buf'] \
+ subtype['buf'] \
+ n_bits['buf'] \
+ src_ofs['buf'] \
+ dst_ofs['buf'] \
+ src['buf'] \
+ dst['buf']
c = NXActionRegMove(n_bits['val'],
src_ofs['val'],
dst_ofs['val'],
src['val'],
dst['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.type_['val'], self.c.type)
eq_(self.len_['val'], self.c.len)
eq_(self.vendor['val'], self.c.vendor)
eq_(self.subtype['val'], self.c.subtype)
eq_(self.n_bits['val'], self.c.n_bits)
eq_(self.src_ofs['val'], self.c.src_ofs)
eq_(self.dst_ofs['val'], self.c.dst_ofs)
eq_(self.src['val'], self.c.src)
eq_(self.dst['val'], self.c.dst)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.n_bits['val'], res.n_bits)
eq_(self.src_ofs['val'], res.src_ofs)
eq_(self.dst_ofs['val'], res.dst_ofs)
eq_(self.src['val'], res.src)
eq_(self.dst['val'], res.dst)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.NX_ACTION_REG_MOVE_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.vendor['val'], res[2])
eq_(self.subtype['val'], res[3])
eq_(self.n_bits['val'], res[4])
eq_(self.src_ofs['val'], res[5])
eq_(self.dst_ofs['val'], res[6])
eq_(self.src['val'], res[7])
eq_(self.dst['val'], res[8])
class TestNXActionRegLoad(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.NXActionRegLoad
"""
# NX_ACTION_REG_LOAD_PACK_STR
# '!HHIHHIQ'...type_, len_, vendor, subtype,
# ofs_nbits, dst, value
type_ = {'buf': '\xff\xff', 'val': ofproto.OFPAT_VENDOR}
len_ = {'buf': '\x00\x18', 'val': ofproto.NX_ACTION_REG_MOVE_SIZE}
vendor = {'buf': '\x00\x00\x23\x20', 'val': ofproto.NX_VENDOR_ID}
subtype = {'buf': '\x00\x07', 'val': ofproto.NXAST_REG_LOAD}
ofs_nbits = {'buf': '\x3d\x98', 'val': 15768}
dst = {'buf': '\x9f\x9f\x88\x26', 'val': 2678032422}
value = {'buf': '\x33\x51\xcd\x43\x25\x28\x18\x99',
'val': 3697962457317775513}
buf = type_['buf'] \
+ len_['buf'] \
+ vendor['buf'] \
+ subtype['buf'] \
+ ofs_nbits['buf'] \
+ dst['buf'] \
+ value['buf']
c = NXActionRegLoad(ofs_nbits['val'],
dst['val'],
value['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.type_['val'], self.c.type)
eq_(self.len_['val'], self.c.len)
eq_(self.vendor['val'], self.c.vendor)
eq_(self.subtype['val'], self.c.subtype)
eq_(self.ofs_nbits['val'], self.c.ofs_nbits)
eq_(self.dst['val'], self.c.dst)
eq_(self.value['val'], self.c.value)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.ofs_nbits['val'], res.ofs_nbits)
eq_(self.dst['val'], res.dst)
eq_(self.value['val'], res.value)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.NX_ACTION_REG_LOAD_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.vendor['val'], res[2])
eq_(self.subtype['val'], res[3])
eq_(self.ofs_nbits['val'], res[4])
eq_(self.dst['val'], res[5])
eq_(self.value['val'], res[6])
class TestNXActionSetTunnel64(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.NXActionSetTunnel64
"""
# NX_ACTION_SET_TUNNEL64_PACK_STR
# '!HHIH6xQ'...type, len, vendor, subtype, zfill, tun_id
type_ = {'buf': '\xff\xff', 'val': ofproto.OFPAT_VENDOR}
len_ = {'buf': '\x00\x18', 'val': ofproto.NX_ACTION_SET_TUNNEL64_SIZE}
vendor = {'buf': '\x00\x00\x23\x20', 'val': ofproto.NX_VENDOR_ID}
subtype = {'buf': '\x00\x09', 'val': ofproto.NXAST_SET_TUNNEL64}
zfill = '\x00' * 6
tun_id = {'buf': '\x6e\x01\xa6\xea\x7e\x36\x1d\xd9',
'val': 7926800345218817497}
buf = type_['buf'] \
+ len_['buf'] \
+ vendor['buf'] \
+ subtype['buf'] \
+ zfill \
+ tun_id['buf']
c = NXActionSetTunnel64(tun_id['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.type_['val'], self.c.type)
eq_(self.len_['val'], self.c.len)
eq_(self.vendor['val'], self.c.vendor)
eq_(self.subtype['val'], self.c.subtype)
eq_(self.tun_id['val'], self.c.tun_id)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.tun_id['val'], self.c.tun_id)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.NX_ACTION_SET_TUNNEL64_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.vendor['val'], res[2])
eq_(self.subtype['val'], res[3])
eq_(self.tun_id['val'], res[4])
class TestNXActionMultipath(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.NXActionMultipath
"""
# NX_ACTION_MULTIPATH_PACK_STR
# '!HHIHHH2xHHI2xHI'...type, len, vendor, subtype, fields, basis, zfill
# algorithm, max_link, arg, zfill, ofs_nbits, dst
type_ = {'buf': '\xff\xff', 'val': ofproto.OFPAT_VENDOR}
len_ = {'buf': '\x00\x20', 'val': ofproto.NX_ACTION_MULTIPATH_SIZE}
vendor = {'buf': '\x00\x00\x23\x20', 'val': ofproto.NX_VENDOR_ID}
subtype = {'buf': '\x00\x0a', 'val': ofproto.NXAST_MULTIPATH}
fields = {'buf': '\x6d\xf5', 'val': 28149}
basis = {'buf': '\x7c\x0a', 'val': 31754}
zfill0 = '\x00' * 2
algorithm = {'buf': '\x82\x1d', 'val': 33309}
max_link = {'buf': '\x06\x2b', 'val': 1579}
arg = {'buf': '\x18\x79\x41\xc8', 'val': 410599880}
zfill1 = '\x00' * 2
ofs_nbits = {'buf': '\xa9\x9a', 'val': 43418}
dst = {'buf': '\xb9\x2f\x16\x64', 'val': 3106870884}
buf = type_['buf'] \
+ len_['buf'] \
+ vendor['buf'] \
+ subtype['buf'] \
+ fields['buf'] \
+ basis['buf'] \
+ zfill0 \
+ algorithm['buf'] \
+ max_link['buf'] \
+ arg['buf'] \
+ zfill1 \
+ ofs_nbits['buf'] \
+ dst['buf']
c = NXActionMultipath(fields['val'],
basis['val'],
algorithm['val'],
max_link['val'],
arg['val'],
ofs_nbits['val'],
dst['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.fields['val'], self.c.fields)
eq_(self.basis['val'], self.c.basis)
eq_(self.algorithm['val'], self.c.algorithm)
eq_(self.max_link['val'], self.c.max_link)
eq_(self.arg['val'], self.c.arg)
eq_(self.ofs_nbits['val'], self.c.ofs_nbits)
eq_(self.dst['val'], self.c.dst)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.fields['val'], res.fields)
eq_(self.basis['val'], res.basis)
eq_(self.algorithm['val'], res.algorithm)
eq_(self.max_link['val'], res.max_link)
eq_(self.arg['val'], res.arg)
eq_(self.ofs_nbits['val'], res.ofs_nbits)
eq_(self.dst['val'], res.dst)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.NX_ACTION_MULTIPATH_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.vendor['val'], res[2])
eq_(self.subtype['val'], res[3])
eq_(self.fields['val'], res[4])
eq_(self.basis['val'], res[5])
eq_(self.algorithm['val'], res[6])
eq_(self.max_link['val'], res[7])
eq_(self.arg['val'], res[8])
eq_(self.ofs_nbits['val'], res[9])
eq_(self.dst['val'], res[10])
class TestNXActionBundle(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.NXActionBundle
"""
# NX_ACTION_BUNDLE_PACK_STR
# '!HHIHHHHIHHI4x'...type, len, vendor, subtype, algorithm,
# fields, basis, slave_type, n_slaves,
# ofs_nbits, dst, zfill
type_ = {'buf': '\xff\xff', 'val': ofproto.OFPAT_VENDOR}
len_ = {'buf': '\x00\x20', 'val': ofproto.NX_ACTION_BUNDLE_SIZE}
vendor = {'buf': '\x00\x00\x23\x20', 'val': ofproto.NX_VENDOR_ID}
subtype = {'buf': '\x00\x0c', 'val': ofproto.NXAST_BUNDLE}
algorithm = {'buf': '\x51\xa7', 'val': 20903}
fields = {'buf': '\xf8\xef', 'val': 63727}
basis = {'buf': '\xfd\x6f', 'val': 64879}
slave_type = {'buf': '\x7c\x51\x0f\xe0', 'val': 2085687264}
n_slaves = {'buf': '\x00\x02', 'val': 2}
ofs_nbits = {'buf': '\xec\xf7', 'val': 60663}
dst = {'buf': '\x50\x7c\x75\xfe', 'val': 1350333950}
zfill = '\x00' * 4
slaves_buf = ('\x00\x01', '\x00\x02')
slaves_val = (1, 2)
_len = len_['val'] + len(slaves_val) * 2
_len += (_len % 8)
buf = type_['buf'] \
+ len_['buf'] \
+ vendor['buf'] \
+ subtype['buf'] \
+ algorithm['buf'] \
+ fields['buf'] \
+ basis['buf'] \
+ slave_type['buf'] \
+ n_slaves['buf'] \
+ ofs_nbits['buf'] \
+ dst['buf'] \
+ zfill \
+ slaves_buf[0] \
+ slaves_buf[1]
c = NXActionBundle(algorithm['val'],
fields['val'],
basis['val'],
slave_type['val'],
n_slaves['val'],
ofs_nbits['val'],
dst['val'],
slaves_val)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.type_['val'], self.c.type)
eq_(self._len, self.c.len)
eq_(self.vendor['val'], self.c.vendor)
eq_(self.subtype['val'], self.c.subtype)
eq_(self.algorithm['val'], self.c.algorithm)
eq_(self.fields['val'], self.c.fields)
eq_(self.basis['val'], self.c.basis)
eq_(self.slave_type['val'], self.c.slave_type)
eq_(self.n_slaves['val'], self.c.n_slaves)
eq_(self.ofs_nbits['val'], self.c.ofs_nbits)
eq_(self.dst['val'], self.c.dst)
# slaves
slaves = self.c.slaves
eq_(self.slaves_val[0], slaves[0])
eq_(self.slaves_val[1], slaves[1])
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.type_['val'], res.type)
eq_(self._len, res.len)
eq_(self.vendor['val'], res.vendor)
eq_(self.subtype['val'], res.subtype)
eq_(self.algorithm['val'], res.algorithm)
eq_(self.fields['val'], res.fields)
eq_(self.basis['val'], res.basis)
eq_(self.slave_type['val'], res.slave_type)
eq_(self.n_slaves['val'], res.n_slaves)
eq_(self.ofs_nbits['val'], res.ofs_nbits)
eq_(self.dst['val'], res.dst)
# slaves
slaves = res.slaves
eq_(self.slaves_val[0], slaves[0])
eq_(self.slaves_val[1], slaves[1])
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = '!' \
+ ofproto.NX_ACTION_BUNDLE_PACK_STR.replace('!', '') \
+ 'HH4x'
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self._len, res[1])
eq_(self.vendor['val'], res[2])
eq_(self.subtype['val'], res[3])
eq_(self.algorithm['val'], res[4])
eq_(self.fields['val'], res[5])
eq_(self.basis['val'], res[6])
eq_(self.slave_type['val'], res[7])
eq_(self.n_slaves['val'], res[8])
eq_(self.ofs_nbits['val'], res[9])
eq_(self.dst['val'], res[10])
class TestNXActionBundleLoad(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.NXActionBundleLoad
"""
# NX_ACTION_BUNDLE_PACK_STR
# '!HHIHHHHIHHI4x'...type, len, vendor, subtype, algorithm,
# fields, basis, slave_type, n_slaves,
# ofs_nbits, dst, zfill
type_ = {'buf': '\xff\xff', 'val': ofproto.OFPAT_VENDOR}
len_ = {'buf': '\x00\x20', 'val': ofproto.NX_ACTION_BUNDLE_SIZE}
vendor = {'buf': '\x00\x00\x23\x20', 'val': ofproto.NX_VENDOR_ID}
subtype = {'buf': '\x00\x0d', 'val': ofproto.NXAST_BUNDLE_LOAD}
algorithm = {'buf': '\x83\x15', 'val': 33557}
fields = {'buf': '\xc2\x7a', 'val': 49786}
basis = {'buf': '\x86\x18', 'val': 34328}
slave_type = {'buf': '\x18\x42\x0b\x55', 'val': 406981461}
n_slaves = {'buf': '\x00\x02', 'val': 2}
ofs_nbits = {'buf': '\xd2\x9d', 'val': 53917}
dst = {'buf': '\x37\xfe\xb3\x60', 'val': 939438944}
zfill = '\x00' * 4
slaves_buf = ('\x00\x01', '\x00\x02')
slaves_val = (1, 2)
_len = len_['val'] + len(slaves_val) * 2
_len += (_len % 8)
buf = type_['buf'] \
+ len_['buf'] \
+ vendor['buf'] \
+ subtype['buf'] \
+ algorithm['buf'] \
+ fields['buf'] \
+ basis['buf'] \
+ slave_type['buf'] \
+ n_slaves['buf'] \
+ ofs_nbits['buf'] \
+ dst['buf'] \
+ zfill \
+ slaves_buf[0] \
+ slaves_buf[1]
c = NXActionBundleLoad(algorithm['val'],
fields['val'],
basis['val'],
slave_type['val'],
n_slaves['val'],
ofs_nbits['val'],
dst['val'],
slaves_val)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.type_['val'], self.c.type)
eq_(self._len, self.c.len)
eq_(self.vendor['val'], self.c.vendor)
eq_(self.subtype['val'], self.c.subtype)
eq_(self.algorithm['val'], self.c.algorithm)
eq_(self.fields['val'], self.c.fields)
eq_(self.basis['val'], self.c.basis)
eq_(self.slave_type['val'], self.c.slave_type)
eq_(self.n_slaves['val'], self.c.n_slaves)
eq_(self.ofs_nbits['val'], self.c.ofs_nbits)
eq_(self.dst['val'], self.c.dst)
# slaves
slaves = self.c.slaves
eq_(self.slaves_val[0], slaves[0])
eq_(self.slaves_val[1], slaves[1])
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.type_['val'], res.type)
eq_(self._len, res.len)
eq_(self.vendor['val'], res.vendor)
eq_(self.subtype['val'], res.subtype)
eq_(self.algorithm['val'], res.algorithm)
eq_(self.fields['val'], res.fields)
eq_(self.basis['val'], res.basis)
eq_(self.slave_type['val'], res.slave_type)
eq_(self.n_slaves['val'], res.n_slaves)
eq_(self.ofs_nbits['val'], res.ofs_nbits)
eq_(self.dst['val'], res.dst)
# slaves
slaves = res.slaves
eq_(self.slaves_val[0], slaves[0])
eq_(self.slaves_val[1], slaves[1])
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = '!' \
+ ofproto.NX_ACTION_BUNDLE_PACK_STR.replace('!', '') \
+ 'HH4x'
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self._len, res[1])
eq_(self.vendor['val'], res[2])
eq_(self.subtype['val'], res[3])
eq_(self.algorithm['val'], res[4])
eq_(self.fields['val'], res[5])
eq_(self.basis['val'], res[6])
eq_(self.slave_type['val'], res[7])
eq_(self.n_slaves['val'], res[8])
eq_(self.ofs_nbits['val'], res[9])
eq_(self.dst['val'], res[10])
class TestNXActionAutopath(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.NXActionAutopath
"""
# NX_ACTION_AUTOPATH_PACK_STR
# '!HHIHHII4x'...type, len, vendor, subtype, ofs_nbits,
# dst, id_, zfill
type_ = {'buf': '\xff\xff', 'val': ofproto.OFPAT_VENDOR}
len_ = {'buf': '\x00\x20', 'val': ofproto.NX_ACTION_OUTPUT_REG_SIZE}
vendor = {'buf': '\x00\x00\x23\x20', 'val': ofproto.NX_VENDOR_ID}
subtype = {'buf': '\x00\x0b', 'val': ofproto.NXAST_AUTOPATH}
ofs_nbits = {'buf': '\xfe\x78', 'val': 65144}
dst = {'buf': '\xf8\x55\x74\x95', 'val': 4166349973}
id_ = {'buf': '\x02\x2d\x37\xed', 'val': 36517869}
zfill = '\x00' * 4
buf = type_['buf'] \
+ len_['buf'] \
+ vendor['buf'] \
+ subtype['buf'] \
+ ofs_nbits['buf'] \
+ dst['buf'] \
+ id_['buf'] \
+ zfill
c = NXActionAutopath(ofs_nbits['val'],
dst['val'],
id_['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.type_['val'], self.c.type)
eq_(self.len_['val'], self.c.len)
eq_(self.vendor['val'], self.c.vendor)
eq_(self.subtype['val'], self.c.subtype)
eq_(self.ofs_nbits['val'], self.c.ofs_nbits)
eq_(self.dst['val'], self.c.dst)
eq_(self.id_['val'], self.c.id)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.type_['val'], res.type)
eq_(self.len_['val'], res.len)
eq_(self.vendor['val'], res.vendor)
eq_(self.subtype['val'], res.subtype)
eq_(self.ofs_nbits['val'], res.ofs_nbits)
eq_(self.dst['val'], res.dst)
eq_(self.id_['val'], res.id)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.NX_ACTION_AUTOPATH_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.vendor['val'], res[2])
eq_(self.subtype['val'], res[3])
eq_(self.ofs_nbits['val'], res[4])
eq_(self.dst['val'], res[5])
eq_(self.id_['val'], res[6])
class TestNXActionOutputReg(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.NXActionOutputReg
"""
# NX_ACTION_OUTPUT_REG_PACK_STR
# '!HHIHHIH6x'...type, len, vendor, subtype, ofs_nbits,
# src, max_len, zfill
type_ = {'buf': '\xff\xff', 'val': ofproto.OFPAT_VENDOR}
len_ = {'buf': '\x00\x20', 'val': ofproto.NX_ACTION_OUTPUT_REG_SIZE}
vendor = {'buf': '\x00\x00\x23\x20', 'val': ofproto.NX_VENDOR_ID}
subtype = {'buf': '\x00\x0f', 'val': ofproto.NXAST_OUTPUT_REG}
ofs_nbits = {'buf': '\xfe\x78', 'val': 65144}
src = {'buf': '\x5e\x3a\x04\x26', 'val': 1580860454}
max_len = {'buf': '\x00\x08', 'val': ofproto.OFP_ACTION_OUTPUT_SIZE}
zfill = '\x00' * 6
buf = type_['buf'] \
+ len_['buf'] \
+ vendor['buf'] \
+ subtype['buf'] \
+ ofs_nbits['buf'] \
+ src['buf'] \
+ max_len['buf'] \
+ zfill
c = NXActionOutputReg(ofs_nbits['val'],
src['val'],
max_len['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.type_['val'], self.c.type)
eq_(self.len_['val'], self.c.len)
eq_(self.vendor['val'], self.c.vendor)
eq_(self.subtype['val'], self.c.subtype)
eq_(self.ofs_nbits['val'], self.c.ofs_nbits)
eq_(self.src['val'], self.c.src)
eq_(self.max_len['val'], self.c.max_len)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.type_['val'], res.type)
eq_(self.len_['val'], res.len)
eq_(self.vendor['val'], res.vendor)
eq_(self.subtype['val'], res.subtype)
eq_(self.ofs_nbits['val'], res.ofs_nbits)
eq_(self.src['val'], res.src)
eq_(self.max_len['val'], res.max_len)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.NX_ACTION_OUTPUT_REG_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.vendor['val'], res[2])
eq_(self.subtype['val'], res[3])
eq_(self.ofs_nbits['val'], res[4])
eq_(self.src['val'], res[5])
eq_(self.max_len['val'], res[6])
class TestNXActionExit(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.NXActionExit
"""
# NX_ACTION_HEADER_PACK_STR
# '!HHIH'...type, len, vendor, subtype
type_ = {'buf': '\xff\xff', 'val': ofproto.OFPAT_VENDOR}
len_ = {'buf': '\x00\x10', 'val': ofproto.NX_ACTION_HEADER_SIZE}
vendor = {'buf': '\x00\x00\x23\x20', 'val': ofproto.NX_VENDOR_ID}
subtype = {'buf': '\x00\x11', 'val': ofproto.NXAST_EXIT}
zfill = '\x00' * 6
buf = type_['buf'] \
+ len_['buf'] \
+ vendor['buf'] \
+ subtype['buf'] \
+ zfill
c = NXActionExit()
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.type_['val'], self.c.type)
eq_(self.len_['val'], self.c.len)
eq_(self.vendor['val'], self.c.vendor)
eq_(self.subtype['val'], self.c.subtype)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.type_['val'], res.type)
eq_(self.len_['val'], res.len)
eq_(self.vendor['val'], res.vendor)
eq_(self.subtype['val'], res.subtype)
def test_serialize(self):
buf = bytearray()
self.c.serialize(buf, 0)
fmt = ofproto.NX_ACTION_HEADER_PACK_STR
res = struct.unpack(fmt, buffer(buf))
eq_(self.type_['val'], res[0])
eq_(self.len_['val'], res[1])
eq_(self.vendor['val'], res[2])
eq_(self.subtype['val'], res[3])
class TestOFPDescStats(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPDescStats
"""
# OFP_DESC_STATS_PACK_STR
# '!256s256s256s32s256s'...mfr_desc, hw_desc, sw_desc, serial_num, dp_desc
mfr_desc = 'mfr_desc'.ljust(256)
hw_desc = 'hw_desc'.ljust(256)
sw_desc = 'sw_desc'.ljust(256)
serial_num = 'serial_num'.ljust(32)
dp_desc = 'dp_desc'.ljust(256)
buf = mfr_desc \
+ hw_desc \
+ sw_desc \
+ serial_num \
+ dp_desc
c = OFPDescStats(mfr_desc, hw_desc, sw_desc, serial_num, dp_desc)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.mfr_desc, self.c.mfr_desc)
eq_(self.hw_desc, self.c.hw_desc)
eq_(self.sw_desc, self.c.sw_desc)
eq_(self.serial_num, self.c.serial_num)
eq_(self.dp_desc, self.c.dp_desc)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.mfr_desc, self.mfr_desc)
eq_(self.hw_desc, self.hw_desc)
eq_(self.sw_desc, self.sw_desc)
eq_(self.serial_num, self.serial_num)
eq_(self.dp_desc, self.dp_desc)
class TestOFPFlowStats(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPFlowStats
"""
# OFP_FLOW_STATS_0_PACK_STR
# '!HBx'...length, table_id, zfill
length = {'buf': '\x00\x58', 'val': 88}
length_append_action = {'buf': '\x00\x60', 'val': 96}
table_id = {'buf': '\x51', 'val': 81}
zfill_0 = '\x00'
# OFP_MATCH_PACK_STR
# '!IH6s6sHBxHBB2xIIHH'...
match = '\x97\x7c\xa6\x1e' \
+ '\x5e\xa0' \
+ '\x7a\x3e\xed\x30\x4a\x90' \
+ '\x96\x8e\x67\xbe\x2f\xe2' \
+ '\xb1\x81' \
+ '\xbe' \
+ '\x00' \
+ '\x01\xab' \
+ '\x42' \
+ '\xfe' \
+ '\x00\x00' \
+ '\xa4\x5d\x5c\x42' \
+ '\xa2\x5c\x2e\x05' \
+ '\x5a\x94' \
+ '\x64\xd4'
# OFP_FLOW_STATS_1_PACK_STR
# '!IIHHH6xQQQ'...duration_sec, duration_nsec, priority,
# idle_timeout, hard_timeout, zfill,
# cookie, packet_count, byte_count
duration_sec = {'buf': '\x94\x19\xb3\xd2', 'val': 2484712402}
duration_nsec = {'buf': '\xee\x66\xcf\x7c', 'val': 3999715196}
priority = {'buf': '\xe1\xc0', 'val': 57792}
idle_timeout = {'buf': '\x8e\x10', 'val': 36368}
hard_timeout = {'buf': '\xd4\x99', 'val': 54425}
zfill_1 = '\x00\x00\x00\x00\x00\x00'
cookie = {'buf': '\x0b\x01\xe8\xe5\xf0\x84\x8a\xe0',
'val': 793171083674290912}
packet_count = {'buf': '\x47\x5c\xc6\x05\x28\xff\x7c\xdb',
'val': 5142202600015232219}
byte_count = {'buf': '\x24\xe9\x4b\xee\xcb\x57\xd9\xc3',
'val': 2659740543924820419}
# <action>_PACK_STR...type_, len_ [others...]
type = {'buf': '\x00\x00', 'val': ofproto.OFPAT_OUTPUT}
len = {'buf': '\x00\x08', 'val': ofproto.OFP_ACTION_OUTPUT_SIZE}
port = {'buf': '\x59\x2a', 'val': 22826}
max_len = {'buf': '\x00\x08', 'val': ofproto.OFP_ACTION_OUTPUT_SIZE}
action = (type, len, port, max_len)
ACTION_TYPE = 0
ACTION_LEN = 1
ACTION_PORT = 2
ACTION_MAX_LEN = 3
c = OFPFlowStats()
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def _parser(self, action=None):
buf = self.table_id['buf'] \
+ self.zfill_0 \
+ self.match \
+ self.duration_sec['buf'] \
+ self.duration_nsec['buf'] \
+ self.priority['buf'] \
+ self.idle_timeout['buf'] \
+ self.hard_timeout['buf'] \
+ self.zfill_1 \
+ self.cookie['buf'] \
+ self.packet_count['buf'] \
+ self.byte_count['buf']
if not action:
buf = self.length['buf'] + buf
else:
buf = self.length_append_action['buf'] + buf
for a in self.action:
buf = buf + a['buf']
return self.c.parser(buf, 0)
def test_parser(self):
res = self._parser()
eq_(self.length['val'], res.length)
eq_(self.table_id['val'], res.table_id)
eq_(self.duration_sec['val'], res.duration_sec)
eq_(self.duration_nsec['val'], res.duration_nsec)
eq_(self.priority['val'], res.priority)
eq_(self.idle_timeout['val'], res.idle_timeout)
eq_(self.hard_timeout['val'], res.hard_timeout)
eq_(self.cookie['val'], res.cookie)
eq_(self.packet_count['val'], res.packet_count)
eq_(self.byte_count['val'], res.byte_count)
def test_parser_append_actions(self):
res = self._parser(True).actions[0]
eq_(self.action[self.ACTION_TYPE]['val'], res.type)
eq_(self.action[self.ACTION_LEN]['val'], res.len)
eq_(self.action[self.ACTION_PORT]['val'], res.port)
eq_(self.action[self.ACTION_MAX_LEN]['val'], res.max_len)
class TestOFPAggregateStats(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPAggregateStats
"""
# OFP_AGGREGATE_STATS_REPLY_PACK_STR
# '!QQI4x'...packet_count, byte_count, flow_count, zfill
packet_count = {'buf': '\x43\x95\x1b\xfb\x0f\xf6\xa7\xdd',
'val': 4869829337189623773}
byte_count = {'buf': '\x36\xda\x2d\x80\x2a\x95\x35\xdd',
'val': 3952521651464517085}
flow_count = {'buf': '\xc3\x0d\xc3\xed', 'val': 3272459245}
zfill = '\x00' * 4
buf = packet_count['buf'] \
+ byte_count['buf'] \
+ flow_count['buf'] \
+ zfill
c = OFPAggregateStats(packet_count['val'],
byte_count['val'],
flow_count['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.packet_count['val'], self.c.packet_count)
eq_(self.byte_count['val'], self.c.byte_count)
eq_(self.flow_count['val'], self.c.flow_count)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.packet_count['val'], res.packet_count)
eq_(self.byte_count['val'], res.byte_count)
eq_(self.flow_count['val'], res.flow_count)
class TestOFPTableStats(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPTableStats
"""
# OFP_TABLE_STATS_PACK_STR
# '!B3x32sIIIQQ'...table_id, zfill, name, wildcards, max_entries,
# active_count, lookup_count, matched_count
table_id = {'buf': '\x5b', 'val': 91}
zfill = '\x00' * 3
name = 'name'.ljust(32)
wildcards = {'buf': '\xc5\xaf\x6e\x12', 'val': 3316608530}
max_entries = {'buf': '\x95\x6c\x78\x4d', 'val': 2506913869}
active_count = {'buf': '\x78\xac\xa8\x1e', 'val': 2024581150}
lookup_count = {'buf': '\x40\x1d\x9c\x39\x19\xec\xd4\x1c',
'val': 4620020561814017052}
matched_count = {'buf': '\x27\x35\x02\xb6\xc5\x5e\x17\x65',
'val': 2825167325263435621}
buf = table_id['buf'] \
+ zfill \
+ name \
+ wildcards['buf'] \
+ max_entries['buf'] \
+ active_count['buf'] \
+ lookup_count['buf'] \
+ matched_count['buf']
c = OFPTableStats(table_id['val'],
name,
wildcards['val'],
max_entries['val'],
active_count['val'],
lookup_count['val'],
matched_count['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.table_id['val'], self.c.table_id)
eq_(self.name, self.c.name)
eq_(self.wildcards['val'], self.c.wildcards)
eq_(self.max_entries['val'], self.c.max_entries)
eq_(self.active_count['val'], self.c.active_count)
eq_(self.lookup_count['val'], self.c.lookup_count)
eq_(self.matched_count['val'], self.c.matched_count)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.table_id['val'], res.table_id)
eq_(self.name, res.name)
eq_(self.wildcards['val'], res.wildcards)
eq_(self.max_entries['val'], res.max_entries)
eq_(self.active_count['val'], res.active_count)
eq_(self.lookup_count['val'], res.lookup_count)
eq_(self.matched_count['val'], res.matched_count)
class TestOFPPortStats(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPPortStats
"""
# OFP_PORT_STATS_PACK_STR
# '!H6xQQQQQQQQQQQQ'... port_no, zfill, rx_packets, tx_packets,
# rx_bytes, tx_bytes, rx_dropped, tx_dropped,
# rx_errors, tx_errors, rx_frame_err,
# rx_over_err, rx_crc_err, collisions
port_no = {'buf': '\xe7\x6b', 'val': 59243}
zfill = '\x00' * 6
rx_packets = {'buf': '\x53\x44\x36\x61\xc4\x86\xc0\x37',
'val': 5999980397101236279}
tx_packets = {'buf': '\x27\xa4\x41\xd7\xd4\x53\x9e\x42',
'val': 2856480458895760962}
rx_bytes = {'buf': '\x55\xa1\x38\x60\x43\x97\x0d\x89',
'val': 6170274950576278921}
tx_bytes = {'buf': '\x77\xe1\xd5\x63\x18\xae\x63\xaa',
'val': 8638420181865882538}
rx_dropped = {'buf': '\x60\xe6\x20\x01\x24\xda\x4e\x5a',
'val': 6982303461569875546}
tx_dropped = {'buf': '\x09\x2d\x5d\x71\x71\xb6\x8e\xc7',
'val': 661287462113808071}
rx_errors = {'buf': '\x2f\x7e\x35\xb3\x66\x3c\x19\x0d',
'val': 3422231811478788365}
tx_errors = {'buf': '\x57\x32\x08\x2f\x88\x32\x40\x6b',
'val': 6283093430376743019}
rx_frame_err = {'buf': '\x0c\x28\x6f\xad\xce\x66\x6e\x8b',
'val': 876072919806406283}
rx_over_err = {'buf': '\x5a\x90\x8f\x9b\xfc\x82\x2e\xa0',
'val': 6525873760178941600}
rx_crc_err = {'buf': '\x73\x3a\x71\x17\xd6\x74\x69\x47',
'val': 8303073210207070535}
collisions = {'buf': '\x2f\x52\x0c\x79\x96\x03\x6e\x79',
'val': 3409801584220270201}
buf = port_no['buf'] \
+ zfill \
+ rx_packets['buf'] \
+ tx_packets['buf'] \
+ rx_bytes['buf'] \
+ tx_bytes['buf'] \
+ rx_dropped['buf'] \
+ tx_dropped['buf'] \
+ rx_errors['buf'] \
+ tx_errors['buf'] \
+ rx_frame_err['buf'] \
+ rx_over_err['buf'] \
+ rx_crc_err['buf'] \
+ collisions['buf']
c = OFPPortStats(port_no['val'],
rx_packets['val'],
tx_packets['val'],
rx_bytes['val'],
tx_bytes['val'],
rx_dropped['val'],
tx_dropped['val'],
rx_errors['val'],
tx_errors['val'],
rx_frame_err['val'],
rx_over_err['val'],
rx_crc_err['val'],
collisions['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.port_no['val'], self.c.port_no)
eq_(self.rx_packets['val'], self.c.rx_packets)
eq_(self.tx_packets['val'], self.c.tx_packets)
eq_(self.rx_bytes['val'], self.c.rx_bytes)
eq_(self.tx_bytes['val'], self.c.tx_bytes)
eq_(self.rx_dropped['val'], self.c.rx_dropped)
eq_(self.tx_dropped['val'], self.c.tx_dropped)
eq_(self.rx_errors['val'], self.c.rx_errors)
eq_(self.tx_errors['val'], self.c.tx_errors)
eq_(self.rx_frame_err['val'], self.c.rx_frame_err)
eq_(self.rx_over_err['val'], self.c.rx_over_err)
eq_(self.rx_crc_err['val'], self.c.rx_crc_err)
eq_(self.collisions['val'], self.c.collisions)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.port_no['val'], res.port_no)
eq_(self.rx_packets['val'], res.rx_packets)
eq_(self.tx_packets['val'], res.tx_packets)
eq_(self.rx_bytes['val'], res.rx_bytes)
eq_(self.tx_bytes['val'], res.tx_bytes)
eq_(self.rx_dropped['val'], res.rx_dropped)
eq_(self.tx_dropped['val'], res.tx_dropped)
eq_(self.rx_errors['val'], res.rx_errors)
eq_(self.tx_errors['val'], res.tx_errors)
eq_(self.rx_frame_err['val'], res.rx_frame_err)
eq_(self.rx_over_err['val'], res.rx_over_err)
eq_(self.rx_crc_err['val'], res.rx_crc_err)
eq_(self.collisions['val'], res.collisions)
class TestOFPQueueStats(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPQueueStats
"""
# OFP_QUEUE_STATS_PACK_STR
# '!H2xIQQQ...port_no, queue_id, tx_bytes, tx_packets, tx_errors
port_no = {'buf': '\xe7\x6b', 'val': 59243}
zfill = '\x00' * 2
queue_id = {'buf': '\x2a\xa8\x7f\x32', 'val': 715685682}
tx_bytes = {'buf': '\x77\xe1\xd5\x63\x18\xae\x63\xaa',
'val': 8638420181865882538}
tx_packets = {'buf': '\x27\xa4\x41\xd7\xd4\x53\x9e\x42',
'val': 2856480458895760962}
tx_errors = {'buf': '\x57\x32\x08\x2f\x88\x32\x40\x6b',
'val': 6283093430376743019}
c = OFPQueueStats(port_no['val'],
queue_id['val'],
tx_bytes['val'],
tx_packets['val'],
tx_errors['val'])
buf = port_no['buf'] \
+ zfill \
+ queue_id['buf'] \
+ tx_bytes['buf'] \
+ tx_packets['buf'] \
+ tx_errors['buf']
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.port_no['val'], self.c.port_no)
eq_(self.queue_id['val'], self.c.queue_id)
eq_(self.tx_bytes['val'], self.c.tx_bytes)
eq_(self.tx_packets['val'], self.c.tx_packets)
eq_(self.tx_errors['val'], self.c.tx_errors)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.port_no['val'], res.port_no)
eq_(self.queue_id['val'], res.queue_id)
eq_(self.tx_bytes['val'], res.tx_bytes)
eq_(self.tx_packets['val'], res.tx_packets)
eq_(self.tx_errors['val'], res.tx_errors)
class TestOFPVendorStats(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPVendorStats
"""
specific_data = 'specific_data'
specific_data_after = 'data'
offset = specific_data.find(specific_data_after)
c = OFPVendorStats(specific_data)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.specific_data, self.c.specific_data)
def test_parser(self):
res = self.c.parser(self.specific_data, self.offset)
eq_(self.specific_data_after, res.specific_data)
class TestOFPQueuePropNone(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPQueuePropNone
"""
# OFP_QUEUE_PROP_HEADER_PACK_STR
# '!HH4x'...property_, len_
property = {'buf': '\x00\x00', 'val': ofproto.OFPQT_NONE}
len = {'buf': '\x00\x08', 'val': ofproto.OFP_QUEUE_PROP_HEADER_SIZE}
zfill = '\x00' * 4
c = OFPQueuePropNone()
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
cls = OFPQueuePropHeader._QUEUE_PROPERTIES[self.c.cls_prop_type]
eq_(self.property['val'], self.c.cls_prop_type)
eq_(self.property['val'], self.c.property)
eq_(self.property['val'], cls.cls_prop_type)
eq_(self.len['val'], self.c.cls_prop_len)
eq_(self.len['val'], self.c.len)
eq_(self.len['val'], cls.cls_prop_len)
def test_parser(self):
buf = self.property['buf'] \
+ self.len['buf'] \
+ self.zfill
ok_(self.c.parser(buf, 0))
class TestOFPQueuePropMinRate(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPQueuePropMinRate
"""
# OFP_QUEUE_PROP_MIN_RATE_PACK_STR
# '!H6x'...rate
rate = {'buf': '\x00\x01', 'val': ofproto.OFPQT_MIN_RATE}
len = {'buf': '\x00\x10', 'val': ofproto.OFP_QUEUE_PROP_MIN_RATE_SIZE}
zfill = '\x00' * 6
buf = rate['buf'] \
+ zfill
c = OFPQueuePropMinRate(rate['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
cls = OFPQueuePropHeader._QUEUE_PROPERTIES[self.c.cls_prop_type]
eq_(self.rate['val'], self.c.cls_prop_type)
eq_(self.rate['val'], self.c.rate)
eq_(self.rate['val'], cls.cls_prop_type)
eq_(self.len['val'], self.c.cls_prop_len)
eq_(self.len['val'], cls.cls_prop_len)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.rate['val'], res.rate)
class TestOFPPacketQueue(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPPacketQueue
"""
# OFP_PACKET_QUEUE_PQCK_STR
# '!IH2x'...queue_id, len_, zfill
queue_id = {'buf': '\x4d\x4b\x3a\xd1', 'val': 1296775889}
len_ = {'buf': '\x00\x08',
'val': ofproto.OFP_QUEUE_PROP_HEADER_SIZE}
zfill = '\x00' * 2
buf = queue_id['buf'] \
+ len_['buf'] \
+ zfill
c = OFPPacketQueue(queue_id['val'],
len_['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.queue_id['val'], self.c.queue_id)
eq_(self.len_['val'], self.c.len)
def test_parser(self):
res = self.c.parser(self.buf, 0)
eq_(self.queue_id['val'], res.queue_id)
eq_(self.len_['val'], res.len)
def test_parser_append_prop(self):
# OFP_QUEUE_PROP_HEADER_PACK_STR + OFP_QUEUE_PROP_MIN_RATE_PACK_STR
# '!HH4xH6x'...type, len, zfill, rate, zfill
len_ = {'buf': '\x00\x10',
'val': ofproto.OFP_QUEUE_PROP_MIN_RATE_SIZE}
a_type = {'buf': '\x00\x01', 'val': ofproto.OFPQT_MIN_RATE}
a_len = {'buf': '\x00\x10',
'val': ofproto.OFP_QUEUE_PROP_MIN_RATE_SIZE}
a_zfill0 = '\x00' * 4
a_rate = {'buf': '\x00\x01', 'val': ofproto.OFPQT_MIN_RATE}
a_zfill1 = '\x00' * 6
buf = self.queue_id['buf'] \
+ len_['buf'] \
+ self.zfill \
+ a_type['buf'] \
+ a_len['buf'] \
+ a_zfill0 \
+ a_rate['buf'] \
+ a_zfill1
res = self.c.parser(buf, 0)
eq_(self.queue_id['val'], res.queue_id)
eq_(len_['val'], res.len)
append_cls = res.properties[0]
eq_(a_type['val'], append_cls.property)
eq_(a_len['val'], append_cls.len)
eq_(a_rate['val'], append_cls.rate)
class TestOFPHello(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPHello
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
version = ofproto.OFP_VERSION
msg_type = ofproto.OFPT_HELLO
msg_len = ofproto.OFP_HEADER_SIZE
xid = 2183948390
data = '\x00\x01\x02\x03'
fmt = ofproto.OFP_HEADER_PACK_STR
buf = struct.pack(fmt, version, msg_type, msg_len, xid) \
+ data
res = OFPHello.parser(object, version, msg_type, msg_len, xid,
bytearray(buf))
eq_(version, res.version)
eq_(msg_type, res.msg_type)
eq_(msg_len, res.msg_len)
eq_(xid, res.xid)
eq_(buffer(buf), res.buf)
def test_serialize(self):
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = OFPHello(Datapath)
c.serialize()
eq_(ofproto.OFP_VERSION, c.version)
eq_(ofproto.OFPT_HELLO, c.msg_type)
eq_(0, c.xid)
class TestOFPErrorMsg(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPErrorMsg
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
version = {'buf': '\x01', 'val': ofproto.OFP_VERSION}
msg_type = {'buf': '\x01', 'val': ofproto.OFPT_ERROR}
msg_len = {'buf': '\x00\x0c',
'val': ofproto.OFP_ERROR_MSG_SIZE}
xid = {'buf': '\x87\x8b\x26\x7c', 'val': 2274043516}
type = {'buf': '\xab\x3e', 'val': 43838}
code = {'buf': '\x5d\x3c', 'val': 23868}
data = 'Error Message.'
buf = version['buf'] \
+ msg_type['buf'] \
+ msg_len['buf'] \
+ xid['buf'] \
+ type['buf'] \
+ code['buf'] \
+ data
res = OFPErrorMsg.parser(object,
version['val'],
msg_type['val'],
msg_len['val'],
xid['val'],
buf)
eq_(version['val'], res.version)
eq_(msg_type['val'], res.msg_type)
eq_(msg_len['val'], res.msg_len)
eq_(xid['val'], res.xid)
eq_(type['val'], res.type)
eq_(code['val'], res.code)
eq_(data, res.data)
def test_serialize(self):
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
type = 1306
code = 13774
data = 'Error Message.'
c = OFPErrorMsg(Datapath)
c.type = type
c.code = code
c.data = data
c.serialize()
eq_(ofproto.OFP_VERSION, c.version)
eq_(ofproto.OFPT_ERROR, c.msg_type)
eq_(0, c.xid)
fmt = '!' \
+ ofproto.OFP_HEADER_PACK_STR.replace('!', '') \
+ ofproto.OFP_ERROR_MSG_PACK_STR.replace('!', '') \
+ str(len(data)) + 's'
res = struct.unpack(fmt, str(c.buf))
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_ERROR, res[1])
eq_(len(c.buf), res[2])
eq_(0, res[3])
eq_(type, res[4])
eq_(code, res[5])
eq_(data, res[6])
class TestOFPEchoRequest(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPEchoRequest
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
version = {'buf': '\x01', 'val': ofproto.OFP_VERSION}
msg_type = {'buf': '\x02', 'val': ofproto.OFPT_ECHO_REQUEST}
msg_len = {'buf': '\x00\x08',
'val': ofproto.OFP_HEADER_SIZE}
xid = {'buf': '\x84\x47\xef\x3f', 'val': 2219306815}
data = 'Request Message.'
buf = version['buf'] \
+ msg_type['buf'] \
+ msg_len['buf'] \
+ xid['buf'] \
+ data
res = OFPEchoRequest.parser(object,
version['val'],
msg_type['val'],
msg_len['val'],
xid['val'],
buf)
eq_(version['val'], res.version)
eq_(msg_type['val'], res.msg_type)
eq_(msg_len['val'], res.msg_len)
eq_(xid['val'], res.xid)
eq_(data, res.data)
def test_serialize(self):
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
data = 'Request Message.'
c = OFPEchoRequest(Datapath)
c.data = data
c.serialize()
eq_(ofproto.OFP_VERSION, c.version)
eq_(ofproto.OFPT_ECHO_REQUEST, c.msg_type)
eq_(0, c.xid)
fmt = '!' \
+ ofproto.OFP_HEADER_PACK_STR.replace('!', '') \
+ str(len(data)) + 's'
res = struct.unpack(fmt, str(c.buf))
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_ECHO_REQUEST, res[1])
eq_(len(c.buf), res[2])
eq_(0, res[3])
eq_(data, res[4])
class TestOFPEchoReply(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPEchoReply
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
version = {'buf': '\x01', 'val': ofproto.OFP_VERSION}
msg_type = {'buf': '\x03', 'val': ofproto.OFPT_ECHO_REPLY}
msg_len = {'buf': '\x00\x08',
'val': ofproto.OFP_HEADER_SIZE}
xid = {'buf': '\x6e\x21\x3e\x62', 'val': 1847672418}
data = 'Reply Message.'
buf = version['buf'] \
+ msg_type['buf'] \
+ msg_len['buf'] \
+ xid['buf'] \
+ data
res = OFPEchoReply.parser(object,
version['val'],
msg_type['val'],
msg_len['val'],
xid['val'],
buf)
eq_(version['val'], res.version)
eq_(msg_type['val'], res.msg_type)
eq_(msg_len['val'], res.msg_len)
eq_(xid['val'], res.xid)
eq_(data, res.data)
def test_serialize(self):
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
data = 'Reply Message.'
c = OFPEchoReply(Datapath)
c.data = data
c.serialize()
eq_(ofproto.OFP_VERSION, c.version)
eq_(ofproto.OFPT_ECHO_REPLY, c.msg_type)
eq_(0, c.xid)
fmt = '!' \
+ ofproto.OFP_HEADER_PACK_STR.replace('!', '') \
+ str(len(data)) + 's'
res = struct.unpack(fmt, str(c.buf))
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_ECHO_REPLY, res[1])
eq_(len(c.buf), res[2])
eq_(0, res[3])
eq_(data, res[4])
class TestOFPVendor(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPVendor
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
version = {'buf': '\x01', 'val': ofproto.OFP_VERSION}
msg_type = {'buf': '\x04', 'val': ofproto.OFPT_VENDOR}
msg_len = {'buf': '\x00\x0c',
'val': ofproto.OFP_VENDOR_HEADER_SIZE}
xid = {'buf': '\x05\x45\xdf\x18', 'val': 88465176}
vendor = {'buf': '\x53\xea\x25\x3e', 'val': 1407853886}
data = 'Vendor Message.'
buf = version['buf'] \
+ msg_type['buf'] \
+ msg_len['buf'] \
+ xid['buf'] \
+ vendor['buf'] \
+ data
res = OFPVendor.parser(object,
version['val'],
msg_type['val'],
msg_len['val'],
xid['val'],
buf)
eq_(version['val'], res.version)
eq_(msg_type['val'], res.msg_type)
eq_(msg_len['val'], res.msg_len)
eq_(xid['val'], res.xid)
eq_(vendor['val'], res.vendor)
eq_(data, res.data)
def test_serialize(self):
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
vendor = {'buf': '\x38\x4b\xf9\x6c', 'val': 944503148}
data = 'Reply Message.'
c = OFPVendor(Datapath)
c.vendor = vendor['val']
c.data = data
c.serialize()
eq_(ofproto.OFP_VERSION, c.version)
eq_(ofproto.OFPT_VENDOR, c.msg_type)
eq_(0, c.xid)
eq_(vendor['val'], c.vendor)
fmt = '!' \
+ ofproto.OFP_HEADER_PACK_STR.replace('!', '') \
+ ofproto.OFP_VENDOR_HEADER_PACK_STR.replace('!', '') \
+ str(len(data)) + 's'
res = struct.unpack(fmt, str(c.buf))
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_VENDOR, res[1])
eq_(len(c.buf), res[2])
eq_(0, res[3])
eq_(vendor['val'], res[4])
eq_(data, res[5])
# class TestNXTRequest(unittest.TestCase):
class TestNiciraHeader(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.NiciraHeader
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
subtype = ofproto.NXT_FLOW_MOD_TABLE_ID
c = NiciraHeader(object, subtype)
eq_(subtype, c.subtype)
def test_parser(self):
# Not used.
pass
def test_serialize(self):
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
data = 'Reply Message.'
subtype = ofproto.NXT_FLOW_MOD_TABLE_ID
c = NiciraHeader(Datapath, subtype)
c.data = data
c.serialize()
eq_(ofproto.OFP_VERSION, c.version)
eq_(ofproto.OFPT_VENDOR, c.msg_type)
eq_(0, c.xid)
eq_(ofproto.NX_VENDOR_ID, c.vendor)
fmt = '!' \
+ ofproto.OFP_HEADER_PACK_STR.replace('!', '') \
+ ofproto.NICIRA_HEADER_PACK_STR.replace('!', '') \
+ str(len(data)) + 's'
res = struct.unpack(fmt, str(c.buf))
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_VENDOR, res[1])
eq_(len(c.buf), res[2])
eq_(0, res[3])
eq_(ofproto.NX_VENDOR_ID, res[4])
eq_(subtype, res[5])
eq_(data, res[6])
class TestNXTSetFlowFormat(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.NXTSetFlowFormat
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
flow_format = {'buf': '\xdc\x6b\xf5\x24', 'val': 3698062628}
c = NXTSetFlowFormat(object, flow_format['val'])
eq_(flow_format['val'], c.format)
def test_parser(self):
# Not used.
pass
def test_serialize(self):
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
flow_format = {'buf': '\x5a\x4e\x59\xad', 'val': 1515084205}
c = NXTSetFlowFormat(Datapath, flow_format['val'])
c.serialize()
eq_(ofproto.OFP_VERSION, c.version)
eq_(ofproto.OFPT_VENDOR, c.msg_type)
eq_(0, c.xid)
eq_(ofproto.NX_VENDOR_ID, c.vendor)
fmt = '!' \
+ ofproto.OFP_HEADER_PACK_STR.replace('!', '') \
+ ofproto.NICIRA_HEADER_PACK_STR.replace('!', '') \
+ ofproto.NX_SET_FLOW_FORMAT_PACK_STR.replace('!', '')
res = struct.unpack(fmt, str(c.buf))
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_VENDOR, res[1])
eq_(len(c.buf), res[2])
eq_(0, res[3])
eq_(ofproto.NX_VENDOR_ID, res[4])
eq_(ofproto.NXT_SET_FLOW_FORMAT, res[5])
eq_(flow_format['val'], res[6])
class TestNXTFlowMod(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.NXTFlowMod
"""
# NX_FLOW_MOD_PACK_STR
# '!Q4HI3H6x'...cokkie, command, idle_timeout, head_timeout,
# priority, buffer_id, out_port, flags, rule, zfill
cookie = {'buf': '\x04\x56\x27\xad\xbd\x43\xd6\x83',
'val': 312480851306993283}
command = {'buf': '\x61\xaa', 'val': 25002}
idle_timeout = {'buf': '\x4e\xff', 'val': 20223}
hard_timeout = {'buf': '\x80\x16', 'val': 32790}
priority = {'buf': '\x70\x5f', 'val': 28767}
buffer_id = {'buf': '\x7b\x97\x3a\x09', 'val': 2073508361}
out_port = {'buf': '\x11\x7d', 'val': 4477}
flags = {'buf': '\x5c\xb9', 'val': 23737}
rule = nx_match.ClsRule()
zfill = '\x00' * 6
port = {'buf': '\x2a\xe0', 'val': 10976}
actions = [OFPActionOutput(port['val'])]
def _get_obj(self, append_action=False):
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
actions = None
if append_action:
actions = self.actions
c = NXTFlowMod(Datapath,
self.cookie['val'],
self.command['val'],
self.idle_timeout['val'],
self.hard_timeout['val'],
self.priority['val'],
self.buffer_id['val'],
self.out_port['val'],
self.flags['val'],
self.rule,
actions)
return c
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
c = self._get_obj()
eq_(self.cookie['val'], c.cookie)
eq_(self.command['val'], c.command)
eq_(self.idle_timeout['val'], c.idle_timeout)
eq_(self.hard_timeout['val'], c.hard_timeout)
eq_(self.priority['val'], c.priority)
eq_(self.buffer_id['val'], c.buffer_id)
eq_(self.out_port['val'], c.out_port)
eq_(self.flags['val'], c.flags)
eq_(self.rule.__hash__(), c.rule.__hash__())
def test_init_append_actions(self):
c = self._get_obj(True)
action = c.actions[0]
eq_(ofproto.OFPAT_OUTPUT, action.type)
eq_(ofproto.OFP_ACTION_OUTPUT_SIZE, action.len)
eq_(self.port['val'], action.port)
def test_parser(self):
# Not used.
pass
def test_serialize(self):
c = self._get_obj()
c.serialize()
eq_(ofproto.OFP_VERSION, c.version)
eq_(ofproto.OFPT_VENDOR, c.msg_type)
eq_(0, c.xid)
eq_(ofproto.NX_VENDOR_ID, c.vendor)
fmt = '!' \
+ ofproto.OFP_HEADER_PACK_STR.replace('!', '') \
+ ofproto.NICIRA_HEADER_PACK_STR.replace('!', '') \
+ ofproto.NX_FLOW_MOD_PACK_STR.replace('!', '')
res = struct.unpack(fmt, str(c.buf))
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_VENDOR, res[1])
eq_(len(c.buf), res[2])
eq_(0, res[3])
eq_(ofproto.NX_VENDOR_ID, res[4])
eq_(ofproto.NXT_FLOW_MOD, res[5])
eq_(self.cookie['val'], res[6])
eq_(self.command['val'], res[7])
eq_(self.idle_timeout['val'], res[8])
eq_(self.hard_timeout['val'], res[9])
eq_(self.priority['val'], res[10])
eq_(self.buffer_id['val'], res[11])
eq_(self.out_port['val'], res[12])
eq_(self.flags['val'], res[13])
def test_serialize_append_actions(self):
c = self._get_obj(True)
c.serialize()
eq_(ofproto.OFP_VERSION, c.version)
eq_(ofproto.OFPT_VENDOR, c.msg_type)
eq_(0, c.xid)
eq_(ofproto.NX_VENDOR_ID, c.vendor)
fmt = '!' \
+ ofproto.OFP_HEADER_PACK_STR.replace('!', '') \
+ ofproto.NICIRA_HEADER_PACK_STR.replace('!', '') \
+ ofproto.NX_FLOW_MOD_PACK_STR.replace('!', '') \
+ ofproto.OFP_ACTION_OUTPUT_PACK_STR.replace('!', '')
res = struct.unpack(fmt, str(c.buf))
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_VENDOR, res[1])
eq_(len(c.buf), res[2])
eq_(0, res[3])
eq_(ofproto.NX_VENDOR_ID, res[4])
eq_(ofproto.NXT_FLOW_MOD, res[5])
eq_(self.cookie['val'], res[6])
eq_(self.command['val'], res[7])
eq_(self.idle_timeout['val'], res[8])
eq_(self.hard_timeout['val'], res[9])
eq_(self.priority['val'], res[10])
eq_(self.buffer_id['val'], res[11])
eq_(self.out_port['val'], res[12])
eq_(self.flags['val'], res[13])
# action
eq_(0, res[14])
eq_(ofproto.OFPAT_OUTPUT, res[15])
eq_(ofproto.OFP_ACTION_OUTPUT_SIZE, res[16])
eq_(self.port['val'], res[17])
eq_(0xffe5, res[18])
class TestNXTRoleRequest(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.NXTRoleRequest
"""
# NX_ROLE_PACK_STR
# '!I'...role
role = {'buf': '\x62\x81\x27\x61', 'val': 1652631393}
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = NXTRoleRequest(Datapath, role['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.role['val'], self.c.role)
def test_parser(self):
# Not used.
pass
def test_serialize(self):
self.c.serialize()
eq_(ofproto.OFP_VERSION, self.c.version)
eq_(ofproto.OFPT_VENDOR, self.c.msg_type)
eq_(0, self.c.xid)
eq_(ofproto.NX_VENDOR_ID, self.c.vendor)
fmt = '!' \
+ ofproto.OFP_HEADER_PACK_STR.replace('!', '') \
+ ofproto.NICIRA_HEADER_PACK_STR.replace('!', '') \
+ ofproto.NX_ROLE_PACK_STR.replace('!', '')
res = struct.unpack(fmt, str(self.c.buf))
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_VENDOR, res[1])
eq_(len(self.c.buf), res[2])
eq_(0, res[3])
eq_(ofproto.NX_VENDOR_ID, res[4])
eq_(ofproto.NXT_ROLE_REQUEST, res[5])
eq_(self.role['val'], res[6])
class TestNXTFlowModTableId(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.NXTFlowModTableId
"""
# NX_FLOW_MOD_TABLE_ID_PACK_STR
# '!B7x'...set_, zfill
set_ = {'buf': '\x71', 'val': 113}
zfill = '\x00' * 7
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = NXTFlowModTableId(Datapath, set_['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.set_['val'], self.c.set)
def test_parser(self):
# Not used.
pass
def test_serialize(self):
self.c.serialize()
eq_(ofproto.OFP_VERSION, self.c.version)
eq_(ofproto.OFPT_VENDOR, self.c.msg_type)
eq_(0, self.c.xid)
eq_(ofproto.NX_VENDOR_ID, self.c.vendor)
fmt = '!' \
+ ofproto.OFP_HEADER_PACK_STR.replace('!', '') \
+ ofproto.NICIRA_HEADER_PACK_STR.replace('!', '') \
+ ofproto.NX_FLOW_MOD_TABLE_ID_PACK_STR.replace('!', '')
res = struct.unpack(fmt, str(self.c.buf))
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_VENDOR, res[1])
eq_(len(self.c.buf), res[2])
eq_(0, res[3])
eq_(ofproto.NX_VENDOR_ID, res[4])
eq_(ofproto.NXT_FLOW_MOD_TABLE_ID, res[5])
eq_(self.set_['val'], res[6])
class TestOFPSwitchFeatures(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPSwitchFeatures
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = OFPSwitchFeatures(Datapath)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
version = {'buf': '\x01', 'val': ofproto.OFP_VERSION}
msg_type = {'buf': '\x06', 'val': ofproto.OFPT_FEATURES_REPLY}
msg_len_val = ofproto.OFP_SWITCH_FEATURES_SIZE \
+ ofproto.OFP_PHY_PORT_SIZE
msg_len = {'buf': '\x00\x4c', 'val': msg_len_val}
xid = {'buf': '\xcc\x0a\x41\xd4', 'val': 3423224276}
# OFP_SWITCH_FEATURES_PACK_STR
# '!QIB3xII'...datapath_id, n_buffers, n_tables,
# zfill, capabilities, actions
datapath_id = {'buf': '\x11\xa3\x72\x63\x61\xde\x39\x81',
'val': 1270985291017894273}
n_buffers = {'buf': '\x80\x14\xd7\xf6', 'val': 2148849654}
n_tables = {'buf': '\xe4', 'val': 228}
zfill = '\x00' * 3
capabilities = {'buf': '\x69\x4f\xe4\xc2', 'val': 1766843586}
actions = {'buf': '\x78\x06\xd9\x0c', 'val': 2013714700}
# OFP_PHY_PORT_PACK_STR
# '!H6s16sIIIIII'... port_no, hw_addr, name, config, state
# curr, advertised, supported, peer
port_no = {'buf': '\xe7\x6b', 'val': 59243}
hw_addr = '3c:d1:2b:8d:3f:d6'
name = 'name'.ljust(16)
config = {'buf': '\x84\xb6\x8c\x53', 'val': 2226555987}
state = {'buf': '\x64\x07\xfb\xc9', 'val': 1678244809}
curr = {'buf': '\xa9\xe8\x0a\x2b', 'val': 2850556459}
advertised = {'buf': '\x78\xb9\x7b\x72', 'val': 2025421682}
supported = {'buf': '\x7e\x65\x68\xad', 'val': 2120575149}
peer = {'buf': '\xa4\x5b\x8b\xed', 'val': 2757463021}
buf = version['buf'] \
+ msg_type['buf'] \
+ msg_len['buf'] \
+ xid['buf'] \
+ datapath_id['buf'] \
+ n_buffers['buf'] \
+ n_tables['buf'] \
+ zfill \
+ capabilities['buf'] \
+ actions['buf'] \
+ port_no['buf'] \
+ addrconv.mac.text_to_bin(hw_addr) \
+ name \
+ config['buf'] \
+ state['buf'] \
+ curr['buf'] \
+ advertised['buf'] \
+ supported['buf'] \
+ peer['buf']
res = OFPSwitchFeatures.parser(object,
version['val'],
msg_type['val'],
msg_len['val'],
xid['val'],
buf)
eq_(version['val'], res.version)
eq_(msg_type['val'], res.msg_type)
eq_(msg_len['val'], res.msg_len)
eq_(xid['val'], res.xid)
eq_(datapath_id['val'], res.datapath_id)
eq_(n_buffers['val'], res.n_buffers)
eq_(n_tables['val'], res.n_tables)
eq_(capabilities['val'], res.capabilities)
eq_(actions['val'], res.actions)
# port
port = res.ports[port_no['val']]
eq_(port_no['val'], port.port_no)
eq_(hw_addr, hw_addr)
eq_(name, port.name)
eq_(config['val'], port.config)
eq_(state['val'], port.state)
eq_(curr['val'], port.curr)
eq_(advertised['val'], port.advertised)
eq_(supported['val'], port.supported)
eq_(peer['val'], port.peer)
def test_serialize(self):
# Not used.
pass
class TestOFPPortStatus(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPPortStatus
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = OFPPortStatus(Datapath)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
version = {'buf': '\x01', 'val': ofproto.OFP_VERSION}
msg_type = {'buf': '\x0c', 'val': ofproto.OFPT_PORT_STATUS}
msg_len = {'buf': '\x00\x40',
'val': ofproto.OFP_PORT_STATUS_SIZE}
xid = {'buf': '\x06\x27\x8b\x7b', 'val': 103254907}
# OFP_PORT_STATUS_PACK_STR
# '!B7xH6s16sIIIIII'...reason, zfill, port_no, hw_addr,
# name, config, state, curr,
# advertised, supported, peer
reason = {'buf': '\x71', 'val': 113}
zfill = '\x00' * 7
port_no = {'buf': '\x48\xd8', 'val': 18648}
hw_addr = '41:f7:a3:52:8f:6b'
name = 'name'.ljust(16)
config = {'buf': '\xae\x73\x90\xec', 'val': 2926809324}
state = {'buf': '\x41\x37\x32\x1d', 'val': 1094136349}
curr = {'buf': '\xa9\x47\x13\x2c', 'val': 2840007468}
advertised = {'buf': '\xce\x6b\x4a\x87', 'val': 3463137927}
supported = {'buf': '\xb8\x06\x65\xa1', 'val': 3087426977}
peer = {'buf': '\x6a\x11\x52\x39', 'val': 1779520057}
buf = version['buf'] \
+ msg_type['buf'] \
+ msg_len['buf'] \
+ xid['buf'] \
+ reason['buf'] \
+ zfill \
+ port_no['buf'] \
+ addrconv.mac.text_to_bin(hw_addr) \
+ name \
+ config['buf'] \
+ state['buf'] \
+ curr['buf'] \
+ advertised['buf'] \
+ supported['buf'] \
+ peer['buf']
res = OFPPortStatus.parser(object,
version['val'],
msg_type['val'],
msg_len['val'],
xid['val'],
buf)
eq_(version['val'], res.version)
eq_(msg_type['val'], res.msg_type)
eq_(msg_len['val'], res.msg_len)
eq_(xid['val'], res.xid)
eq_(reason['val'], res.reason)
# desc
desc = res.desc
eq_(port_no['val'], desc.port_no)
eq_(hw_addr, desc.hw_addr)
eq_(name, desc.name)
eq_(config['val'], desc.config)
eq_(state['val'], desc.state)
eq_(curr['val'], desc.curr)
eq_(advertised['val'], desc.advertised)
eq_(supported['val'], desc.supported)
eq_(peer['val'], desc.peer)
def test_serialize(self):
# Not used.
pass
class TestOFPPacketIn(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPPacketIn
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = OFPPacketIn(Datapath)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def _test_parser(self, padding=False):
version = {'buf': '\x01', 'val': ofproto.OFP_VERSION}
msg_type = {'buf': '\x0a', 'val': ofproto.OFPT_PACKET_IN}
msg_len = {'buf': '\x00\x14',
'val': ofproto.OFP_PACKET_IN_SIZE}
xid = {'buf': '\xd0\x23\x8c\x34', 'val': 3491990580}
# OFP_PACKET_IN_PACK_STR
# '!IHHBx2x'...buffer_id, total_len,
# in_port, reason, zfill, data
buffer_id = {'buf': '\xae\x73\x90\xec', 'val': 2926809324}
total_len = {'buf': '\x00\x10', 'val': 16}
in_port = {'buf': '\x08\x42', 'val': 2114}
reason = {'buf': '\x43', 'val': 67}
zfill = '\x00' * 1
if padding:
data = 'PACKET IN'.ljust(20)
else:
data = 'PACKET IN'.ljust(16)
buf = version['buf'] \
+ msg_type['buf'] \
+ msg_len['buf'] \
+ xid['buf'] \
+ buffer_id['buf'] \
+ total_len['buf'] \
+ in_port['buf'] \
+ reason['buf'] \
+ zfill \
+ data
res = OFPPacketIn.parser(object,
version['val'],
msg_type['val'],
msg_len['val'],
xid['val'],
buf)
eq_(version['val'], res.version)
eq_(msg_type['val'], res.msg_type)
eq_(msg_len['val'], res.msg_len)
eq_(xid['val'], res.xid)
eq_(buffer_id['val'], res.buffer_id)
eq_(total_len['val'], res.total_len)
eq_(in_port['val'], res.in_port)
eq_(reason['val'], res.reason)
eq_(data[0:16], res.data)
return True
def test_parser(self):
ok_(self._test_parser())
def test_parser_padding(self):
ok_(self._test_parser(True))
def test_serialize(self):
# Not used.
pass
class TestOFPGetConfigReply(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPGetConfigReply
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = OFPGetConfigReply(Datapath)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
version = {'buf': '\x01', 'val': ofproto.OFP_VERSION}
msg_type = {'buf': '\x0a', 'val': ofproto.OFPT_GET_CONFIG_REPLY}
msg_len = {'buf': '\x00\x14',
'val': ofproto.OFP_SWITCH_CONFIG_SIZE}
xid = {'buf': '\x94\xc4\xd2\xcd', 'val': 2495926989}
# OFP_SWITCH_CONFIG_PACK_STR
# '!HH'...flags, miss_send_len
flags = {'buf': '\xa0\xe2', 'val': 41186}
miss_send_len = {'buf': '\x36\x0e', 'val': 13838}
buf = version['buf'] \
+ msg_type['buf'] \
+ msg_len['buf'] \
+ xid['buf'] \
+ flags['buf'] \
+ miss_send_len['buf']
res = OFPGetConfigReply.parser(object,
version['val'],
msg_type['val'],
msg_len['val'],
xid['val'],
buf)
eq_(version['val'], res.version)
eq_(msg_type['val'], res.msg_type)
eq_(msg_len['val'], res.msg_len)
eq_(xid['val'], res.xid)
eq_(flags['val'], res.flags)
eq_(miss_send_len['val'], res.miss_send_len)
def test_serialize(self):
# Not used.
pass
class TestOFPBarrierReply(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPBarrierReply
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = OFPBarrierReply(Datapath)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
version = {'buf': '\x01', 'val': ofproto.OFP_VERSION}
msg_type = {'buf': '\x13', 'val': ofproto.OFPT_BARRIER_REPLY}
msg_len = {'buf': '\x00\x08',
'val': ofproto.OFP_HEADER_SIZE}
xid = {'buf': '\x66\xc4\xc3\xac', 'val': 1724171180}
buf = version['buf'] \
+ msg_type['buf'] \
+ msg_len['buf'] \
+ xid['buf']
res = OFPBarrierReply.parser(object,
version['val'],
msg_type['val'],
msg_len['val'],
xid['val'],
buf)
eq_(version['val'], res.version)
eq_(msg_type['val'], res.msg_type)
eq_(msg_len['val'], res.msg_len)
eq_(xid['val'], res.xid)
def test_serialize(self):
# Not used.
pass
class TestOFPFlowRemoved(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPFlowRemoved
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = OFPFlowRemoved(Datapath)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
version = {'buf': '\x01', 'val': ofproto.OFP_VERSION}
msg_type = {'buf': '\x0a', 'val': ofproto.OFPT_FLOW_REMOVED}
msg_len = {'buf': '\x00\x14',
'val': ofproto.OFP_FLOW_REMOVED_SIZE}
xid = {'buf': '\x94\xc4\xd2\xcd', 'val': 2495926989}
buf = version['buf'] \
+ msg_type['buf'] \
+ msg_len['buf'] \
+ xid['buf']
# OFP_MATCH_PACK_STR
# '!IH6s6sHBxHBB2xIIHH'...wildcards, in_port, dl_src, dl_dst, dl_vlan,
# dl_vlan_pcp, dl_type, nw_tos, nw_proto,
# nw_src, nw_dst, tp_src, tp_dst
wildcards = {'buf': '\xd2\x71\x25\x23', 'val': 3530630435}
in_port = {'buf': '\x37\x8b', 'val': 14219}
dl_src = '\x7f\x85\xc4\x70\x12\xda'
dl_dst = '\x0a\x51\x17\x58\xb0\xbb'
dl_vlan = {'buf': '\xc1\xf9', 'val': 49657}
dl_vlan_pcp = {'buf': '\x79', 'val': 121}
zfill0 = '\x00'
dl_type = {'buf': '\xa6\x9e', 'val': 42654}
nw_tos = {'buf': '\xde', 'val': 222}
nw_proto = {'buf': '\xe5', 'val': 229}
zfil11 = '\x00' * 2
nw_src = {'buf': '\x1b\x6d\x8d\x4b', 'val': 460164427}
nw_dst = {'buf': '\xab\x25\xe1\x20', 'val': 2871386400}
tp_src = {'buf': '\xd5\xc3', 'val': 54723}
tp_dst = {'buf': '\x78\xb9', 'val': 30905}
buf += wildcards['buf'] \
+ in_port['buf'] \
+ dl_src \
+ dl_dst \
+ dl_vlan['buf'] \
+ dl_vlan_pcp['buf'] \
+ zfill0 \
+ dl_type['buf'] \
+ nw_tos['buf'] \
+ nw_proto['buf'] \
+ zfil11 \
+ nw_src['buf'] \
+ nw_dst['buf'] \
+ tp_src['buf'] \
+ tp_dst['buf']
# OFP_FLOW_REMOVED_PACK_STR0
# '!QHBxIIH2xQQ'...cookie, priority, reason, zfill,
# duration_sec, duration_nsec, idle_timeout,
# zfill, packet_count, byte_count
cookie = {'buf': '\x02\x79\xba\x00\xef\xab\xee\x44',
'val': 178378173441633860}
priority = {'buf': '\x02\xce', 'val': 718}
reason = {'buf': '\xa9', 'val': 169}
zfill0 = '\x00' * 1
duration_sec = {'buf': '\x86\x24\xa3\xba', 'val': 2250548154}
duration_nsec = {'buf': '\x94\x94\xc2\x23', 'val': 2492776995}
idle_timeout = {'buf': '\xeb\x7c', 'val': 60284}
zfill1 = '\x00' * 2
packet_count = {'buf': '\x5a\x0d\xf2\x03\x8e\x0a\xbb\x8d',
'val': 6489108735192644493}
byte_count = {'buf': '\x65\xc8\xd3\x72\x51\xb5\xbb\x7c',
'val': 7334344481123449724}
buf += cookie['buf'] \
+ priority['buf'] \
+ reason['buf'] \
+ zfill0 \
+ duration_sec['buf'] \
+ duration_nsec['buf'] \
+ idle_timeout['buf'] \
+ zfill1 \
+ packet_count['buf'] \
+ byte_count['buf']
res = OFPFlowRemoved.parser(object,
version['val'],
msg_type['val'],
msg_len['val'],
xid['val'],
buf)
eq_(version['val'], res.version)
eq_(msg_type['val'], res.msg_type)
eq_(msg_len['val'], res.msg_len)
eq_(xid['val'], res.xid)
eq_(cookie['val'], res.cookie)
eq_(priority['val'], res.priority)
eq_(reason['val'], res.reason)
eq_(duration_sec['val'], res.duration_sec)
eq_(duration_nsec['val'], res.duration_nsec)
eq_(idle_timeout['val'], res.idle_timeout)
eq_(packet_count['val'], res.packet_count)
eq_(byte_count['val'], res.byte_count)
# match
match = res.match
eq_(wildcards['val'], match.wildcards)
eq_(in_port['val'], match.in_port)
eq_(dl_src, match.dl_src)
eq_(dl_dst, match.dl_dst)
eq_(dl_vlan['val'], match.dl_vlan)
eq_(dl_vlan_pcp['val'], match.dl_vlan_pcp)
eq_(dl_type['val'], match.dl_type)
eq_(nw_tos['val'], match.nw_tos)
eq_(nw_proto['val'], match.nw_proto)
eq_(nw_src['val'], match.nw_src)
eq_(nw_dst['val'], match.nw_dst)
eq_(tp_src['val'], match.tp_src)
eq_(tp_dst['val'], match.tp_dst)
def test_serialize(self):
# Not used.
pass
class TestOFPQueueGetConfigReply(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPQueueGetConfigReply
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = OFPQueueGetConfigReply(Datapath)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
version = {'buf': '\x01', 'val': ofproto.OFP_VERSION}
msg_type = {'buf': '\x0a',
'val': ofproto.OFPT_QUEUE_GET_CONFIG_REPLY}
msg_len_val = ofproto.OFP_QUEUE_GET_CONFIG_REPLY_SIZE \
+ ofproto.OFP_PACKET_QUEUE_SIZE
msg_len = {'buf': '\x00\x14', 'val': msg_len_val}
xid = {'buf': '\x94\xc4\xd2\xcd', 'val': 2495926989}
buf = version['buf'] \
+ msg_type['buf'] \
+ msg_len['buf'] \
+ xid['buf']
# OFP_QUEUE_GET_CONFIG_REPLY_PACK_STR
# '!H6x'...port, zfill
port = {'buf': '\xfe\x66', 'val': 65126}
zfill = '\x00' * 6
buf += port['buf'] \
+ zfill
# OFP_PACKET_QUEUE_PQCK_STR
# '!IH2x'...queue_id, len_, zfill
queue_id = {'buf': '\x4d\x4b\x3a\xd1', 'val': 1296775889}
len_ = {'buf': '\x00\x08',
'val': ofproto.OFP_QUEUE_PROP_HEADER_SIZE}
zfill = '\x00' * 2
buf += queue_id['buf'] \
+ len_['buf'] \
+ zfill
res = OFPQueueGetConfigReply.parser(object,
version['val'],
msg_type['val'],
msg_len['val'],
xid['val'],
buf)
eq_(version['val'], res.version)
eq_(msg_type['val'], res.msg_type)
eq_(msg_len['val'], res.msg_len)
eq_(xid['val'], res.xid)
eq_(port['val'], res.port)
# queue
queue = res.queues[0]
eq_(queue_id['val'], queue.queue_id)
eq_(len_['val'], queue.len)
def test_serialize(self):
# Not used.
pass
class TestOFPDescStatsReply(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPDescStatsReply
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = OFPDescStatsReply(Datapath)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
version = {'buf': '\x01', 'val': ofproto.OFP_VERSION}
msg_type = {'buf': '\x11', 'val': ofproto.OFPT_STATS_REPLY}
msg_len_val = ofproto.OFP_STATS_MSG_SIZE \
+ ofproto.OFP_DESC_STATS_SIZE
msg_len = {'buf': '\x04\x38', 'val': msg_len_val}
xid = {'buf': '\x94\xc4\xd2\xcd', 'val': 2495926989}
buf = version['buf'] \
+ msg_type['buf'] \
+ msg_len['buf'] \
+ xid['buf']
# OFP_STATS_MSG_PACK_STR
# '!HH'...type_, flags
type_ = {'buf': '\x00\x00', 'val': ofproto.OFPST_DESC}
flags = {'buf': '\x30\xd9', 'val': 12505}
buf += type_['buf'] \
+ flags['buf']
# stats_type_cls = OFPDescStats
# OFP_DESC_STATS_PACK_STR
# '!256s256s256s32s256s'...mfr_desc, hw_desc, sw_desc,
# serial_num, dp_desc
mfr_desc = 'mfr_desc'.ljust(256)
hw_desc = 'hw_desc'.ljust(256)
sw_desc = 'sw_desc'.ljust(256)
serial_num = 'serial_num'.ljust(32)
dp_desc = 'dp_desc'.ljust(256)
buf += mfr_desc \
+ hw_desc \
+ sw_desc \
+ serial_num \
+ dp_desc
res = OFPDescStatsReply.parser(object,
version['val'],
msg_type['val'],
msg_len['val'],
xid['val'],
buf)
eq_(version['val'], res.version)
eq_(msg_type['val'], res.msg_type)
eq_(msg_len['val'], res.msg_len)
eq_(xid['val'], res.xid)
eq_(type_['val'], res.type)
eq_(flags['val'], res.flags)
# body
body = res.body
eq_(mfr_desc, body.mfr_desc)
eq_(hw_desc, body.hw_desc)
eq_(sw_desc, body.sw_desc)
eq_(serial_num, body.serial_num)
eq_(dp_desc, body.dp_desc)
def test_serialize(self):
# Not used.
pass
class TestOFPFlowStatsReply(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPFlowStatsReply
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = OFPFlowStatsReply(Datapath)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
version = {'buf': '\x01', 'val': ofproto.OFP_VERSION}
msg_type = {'buf': '\x11', 'val': ofproto.OFPT_STATS_REPLY}
msg_len_val = ofproto.OFP_STATS_MSG_SIZE \
+ ofproto.OFP_FLOW_STATS_SIZE
msg_len = {'buf': '\x00\x64', 'val': msg_len_val}
xid = {'buf': '\x94\xc4\xd2\xcd', 'val': 2495926989}
buf = version['buf'] \
+ msg_type['buf'] \
+ msg_len['buf'] \
+ xid['buf']
# OFP_STATS_MSG_PACK_STR
# '!HH'...type_, flags
type_ = {'buf': '\x00\x01', 'val': ofproto.OFPST_FLOW}
flags = {'buf': '\x95\xf4', 'val': 38388}
buf += type_['buf'] \
+ flags['buf']
# stats_type_cls = OFPFlowStats
# OFP_FLOW_STATS_0_PACK_STR
# '!HBx'...length, table_id, zfill
length = {'buf': '\x00\x60', 'val': 96}
table_id = {'buf': '\x51', 'val': 81}
zfill = '\x00'
buf += length['buf'] \
+ table_id['buf'] \
+ zfill
# OFP_MATCH_PACK_STR
# '!IH6s6sHBxHBB2xIIHH'...
match = '\x97\x7c\xa6\x1e' \
+ '\x5e\xa0' \
+ '\x70\x17\xdc\x80\x59\x9e' \
+ '\x79\xc6\x56\x87\x92\x28' \
+ '\xb1\x81' \
+ '\xbe' \
+ '\x00' \
+ '\x01\xab' \
+ '\x42' \
+ '\xfe' \
+ '\x00\x00' \
+ '\xa4\x5d\x5c\x42' \
+ '\xa2\x5c\x2e\x05' \
+ '\x5a\x94' \
+ '\x64\xd4'
buf += match
# OFP_FLOW_STATS_1_PACK_STR
# '!IIHHH6xQQQ'...duration_sec, duration_nsec, priority,
# idle_timeout, hard_timeout, zfill,
# cookie, packet_count, byte_count
duration_sec = {'buf': '\x94\x19\xb3\xd2', 'val': 2484712402}
duration_nsec = {'buf': '\xee\x66\xcf\x7c', 'val': 3999715196}
priority = {'buf': '\xe1\xc0', 'val': 57792}
idle_timeout = {'buf': '\x8e\x10', 'val': 36368}
hard_timeout = {'buf': '\xd4\x99', 'val': 54425}
zfill = '\x00' * 6
cookie = {'buf': '\x0b\x01\xe8\xe5\xf0\x84\x8a\xe0',
'val': 793171083674290912}
packet_count = {'buf': '\x47\x5c\xc6\x05\x28\xff\x7c\xdb',
'val': 5142202600015232219}
byte_count = {'buf': '\x24\xe9\x4b\xee\xcb\x57\xd9\xc3',
'val': 2659740543924820419}
buf += duration_sec['buf']
buf += duration_nsec['buf']
buf += priority['buf']
buf += idle_timeout['buf']
buf += hard_timeout['buf']
buf += zfill
buf += cookie['buf']
buf += packet_count['buf']
buf += byte_count['buf']
# <action>_PACK_STR...type_, len_ [others...]
type = {'buf': '\x00\x00', 'val': ofproto.OFPAT_OUTPUT}
len = {'buf': '\x00\x08',
'val': ofproto.OFP_ACTION_OUTPUT_SIZE}
port = {'buf': '\x59\x2a', 'val': 22826}
max_len = {'buf': '\x00\x08',
'val': ofproto.OFP_ACTION_OUTPUT_SIZE}
buf += type['buf'] \
+ len['buf'] \
+ port['buf'] \
+ max_len['buf']
res = OFPFlowStatsReply.parser(object,
version['val'],
msg_type['val'],
msg_len['val'],
xid['val'],
buf)
eq_(version['val'], res.version)
eq_(msg_type['val'], res.msg_type)
eq_(msg_len['val'], res.msg_len)
eq_(xid['val'], res.xid)
eq_(type_['val'], res.type)
eq_(flags['val'], res.flags)
# body
body = res.body[0]
eq_(length['val'], body.length)
eq_(table_id['val'], body.table_id)
eq_(duration_sec['val'], body.duration_sec)
eq_(duration_nsec['val'], body.duration_nsec)
eq_(priority['val'], body.priority)
eq_(idle_timeout['val'], body.idle_timeout)
eq_(hard_timeout['val'], body.hard_timeout)
eq_(cookie['val'], body.cookie)
eq_(packet_count['val'], body.packet_count)
eq_(byte_count['val'], body.byte_count)
# action
action = body.actions[0]
eq_(type['val'], action.type)
eq_(len['val'], action.len)
eq_(port['val'], action.port)
eq_(max_len['val'], action.max_len)
def test_serialize(self):
# Not used.
pass
class TestOFPAggregateStatsReply(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPAggregateStatsReply
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = OFPAggregateStatsReply(Datapath)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
version = {'buf': '\x01', 'val': ofproto.OFP_VERSION}
msg_type = {'buf': '\x11', 'val': ofproto.OFPT_STATS_REPLY}
msg_len_val = ofproto.OFP_STATS_MSG_SIZE \
+ ofproto.OFP_AGGREGATE_STATS_REPLY_SIZE
msg_len = {'buf': '\x00\x4c', 'val': msg_len_val}
xid = {'buf': '\xc6\xd6\xce\x38', 'val': 3335966264}
buf = version['buf'] \
+ msg_type['buf'] \
+ msg_len['buf'] \
+ xid['buf']
# OFP_STATS_MSG_PACK_STR
# '!HH'...type_, flags
type_ = {'buf': '\x00\x02', 'val': ofproto.OFPST_AGGREGATE}
flags = {'buf': '\x65\x66', 'val': 25958}
buf += type_['buf'] \
+ flags['buf']
# stats_type_cls = OFPAggregateStats
# OFP_AGGREGATE_STATS_REPLY_PACK_STR
# '!QQI4x'...packet_count, byte_count, flow_count, zfill
packet_count = {'buf': '\x43\x95\x1b\xfb\x0f\xf6\xa7\xdd',
'val': 4869829337189623773}
byte_count = {'buf': '\x36\xda\x2d\x80\x2a\x95\x35\xdd',
'val': 3952521651464517085}
flow_count = {'buf': '\xc3\x0d\xc3\xed', 'val': 3272459245}
zfill = '\x00' * 4
buf += packet_count['buf'] \
+ byte_count['buf'] \
+ flow_count['buf'] \
+ zfill
res = OFPAggregateStatsReply.parser(object,
version['val'],
msg_type['val'],
msg_len['val'],
xid['val'],
buf)
eq_(version['val'], res.version)
eq_(msg_type['val'], res.msg_type)
eq_(msg_len['val'], res.msg_len)
eq_(xid['val'], res.xid)
eq_(type_['val'], res.type)
eq_(flags['val'], res.flags)
# body
body = res.body[0]
eq_(packet_count['val'], body.packet_count)
eq_(byte_count['val'], body.byte_count)
eq_(flow_count['val'], body.flow_count)
def test_serialize(self):
# Not used.
pass
class TestOFPTableStatsReply(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPTableStatsReply
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = OFPTableStatsReply(Datapath)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
version = {'buf': '\x01', 'val': ofproto.OFP_VERSION}
msg_type = {'buf': '\x11', 'val': ofproto.OFPT_STATS_REPLY}
msg_len_val = ofproto.OFP_STATS_MSG_SIZE \
+ ofproto.OFP_TABLE_STATS_SIZE
msg_len = {'buf': '\x00\x4c', 'val': msg_len_val}
xid = {'buf': '\xd6\xb4\x8d\xe6', 'val': 3602157030}
buf = version['buf'] \
+ msg_type['buf'] \
+ msg_len['buf'] \
+ xid['buf']
# OFP_STATS_MSG_PACK_STR
# '!HH'...type_, flags
type_ = {'buf': '\x00\x03', 'val': ofproto.OFPST_TABLE}
flags = {'buf': '\xb3\xf0', 'val': 46064}
buf += type_['buf'] \
+ flags['buf']
# stats_type_cls = OFPTableStats
# OFP_TABLE_STATS_PACK_STR
# '!B3x32sIIIQQ'...table_id, zfill, name, wildcards, max_entries,
# active_count, lookup_count, matched_count
table_id = {'buf': '\x5b', 'val': 91}
zfill = '\x00' * 3
name = 'name'.ljust(32)
wildcards = {'buf': '\xc5\xaf\x6e\x12', 'val': 3316608530}
max_entries = {'buf': '\x95\x6c\x78\x4d', 'val': 2506913869}
active_count = {'buf': '\x78\xac\xa8\x1e', 'val': 2024581150}
lookup_count = {'buf': '\x40\x1d\x9c\x39\x19\xec\xd4\x1c',
'val': 4620020561814017052}
matched_count = {'buf': '\x27\x35\x02\xb6\xc5\x5e\x17\x65',
'val': 2825167325263435621}
buf += table_id['buf'] \
+ zfill \
+ name \
+ wildcards['buf'] \
+ max_entries['buf'] \
+ active_count['buf'] \
+ lookup_count['buf'] \
+ matched_count['buf']
res = OFPTableStatsReply.parser(object,
version['val'],
msg_type['val'],
msg_len['val'],
xid['val'],
buf)
eq_(version['val'], res.version)
eq_(msg_type['val'], res.msg_type)
eq_(msg_len['val'], res.msg_len)
eq_(xid['val'], res.xid)
eq_(type_['val'], res.type)
eq_(flags['val'], res.flags)
# body
body = res.body[0]
eq_(table_id['val'], body.table_id)
eq_(name, body.name)
eq_(wildcards['val'], body.wildcards)
eq_(max_entries['val'], body.max_entries)
eq_(active_count['val'], body.active_count)
eq_(lookup_count['val'], body.lookup_count)
eq_(matched_count['val'], body.matched_count)
def test_serialize(self):
# Not used.
pass
class TestOFPPortStatsReply(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPPortStatsReply
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = OFPPortStatsReply(Datapath)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
version = {'buf': '\x01', 'val': ofproto.OFP_VERSION}
msg_type = {'buf': '\x11', 'val': ofproto.OFPT_STATS_REPLY}
msg_len_val = ofproto.OFP_STATS_MSG_SIZE \
+ ofproto.OFP_PORT_STATS_SIZE
msg_len = {'buf': '\x00\x74', 'val': msg_len_val}
xid = {'buf': '\xc2\xaf\x3d\xff', 'val': 3266264575}
buf = version['buf'] \
+ msg_type['buf'] \
+ msg_len['buf'] \
+ xid['buf']
# OFP_STATS_MSG_PACK_STR
# '!HH'...type_, flags
type_ = {'buf': '\x00\x04', 'val': ofproto.OFPST_PORT}
flags = {'buf': '\xda\xde', 'val': 56030}
buf += type_['buf'] \
+ flags['buf']
# stats_type_cls = OFPPortStats
# OFP_PORT_STATS_PACK_STR
# '!H6xQQQQQQQQQQQQ'... port_no, zfill, rx_packets, tx_packets,
# rx_bytes, tx_bytes, rx_dropped, tx_dropped,
# rx_errors, tx_errors, rx_frame_err,
# rx_over_err, rx_crc_err, collisions
port_no = {'buf': '\xe7\x6b', 'val': 59243}
zfill = '\x00' * 6
rx_packets = {'buf': '\x53\x44\x36\x61\xc4\x86\xc0\x37',
'val': 5999980397101236279}
tx_packets = {'buf': '\x27\xa4\x41\xd7\xd4\x53\x9e\x42',
'val': 2856480458895760962}
rx_bytes = {'buf': '\x55\xa1\x38\x60\x43\x97\x0d\x89',
'val': 6170274950576278921}
tx_bytes = {'buf': '\x77\xe1\xd5\x63\x18\xae\x63\xaa',
'val': 8638420181865882538}
rx_dropped = {'buf': '\x60\xe6\x20\x01\x24\xda\x4e\x5a',
'val': 6982303461569875546}
tx_dropped = {'buf': '\x09\x2d\x5d\x71\x71\xb6\x8e\xc7',
'val': 661287462113808071}
rx_errors = {'buf': '\x2f\x7e\x35\xb3\x66\x3c\x19\x0d',
'val': 3422231811478788365}
tx_errors = {'buf': '\x57\x32\x08\x2f\x88\x32\x40\x6b',
'val': 6283093430376743019}
rx_frame_err = {'buf': '\x0c\x28\x6f\xad\xce\x66\x6e\x8b',
'val': 876072919806406283}
rx_over_err = {'buf': '\x5a\x90\x8f\x9b\xfc\x82\x2e\xa0',
'val': 6525873760178941600}
rx_crc_err = {'buf': '\x73\x3a\x71\x17\xd6\x74\x69\x47',
'val': 8303073210207070535}
collisions = {'buf': '\x2f\x52\x0c\x79\x96\x03\x6e\x79',
'val': 3409801584220270201}
buf += port_no['buf'] \
+ zfill \
+ rx_packets['buf'] \
+ tx_packets['buf'] \
+ rx_bytes['buf'] \
+ tx_bytes['buf'] \
+ rx_dropped['buf'] \
+ tx_dropped['buf'] \
+ rx_errors['buf'] \
+ tx_errors['buf'] \
+ rx_frame_err['buf'] \
+ rx_over_err['buf'] \
+ rx_crc_err['buf'] \
+ collisions['buf']
res = OFPPortStatsReply.parser(object,
version['val'],
msg_type['val'],
msg_len['val'],
xid['val'],
buf)
eq_(version['val'], res.version)
eq_(msg_type['val'], res.msg_type)
eq_(msg_len['val'], res.msg_len)
eq_(xid['val'], res.xid)
eq_(type_['val'], res.type)
eq_(flags['val'], res.flags)
# body
body = res.body[0]
eq_(port_no['val'], body.port_no)
eq_(rx_packets['val'], body.rx_packets)
eq_(tx_packets['val'], body.tx_packets)
eq_(rx_bytes['val'], body.rx_bytes)
eq_(tx_bytes['val'], body.tx_bytes)
eq_(rx_dropped['val'], body.rx_dropped)
eq_(tx_dropped['val'], body.tx_dropped)
eq_(rx_errors['val'], body.rx_errors)
eq_(tx_errors['val'], body.tx_errors)
eq_(rx_frame_err['val'], body.rx_frame_err)
eq_(rx_over_err['val'], body.rx_over_err)
eq_(rx_crc_err['val'], body.rx_crc_err)
eq_(collisions['val'], body.collisions)
def test_serialize(self):
# Not used.
pass
class TestOFPQueueStatsReply(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPQueueStatsReply
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = OFPQueueStatsReply(Datapath)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
version = {'buf': '\x01', 'val': ofproto.OFP_VERSION}
msg_type = {'buf': '\x11', 'val': ofproto.OFPT_STATS_REPLY}
msg_len_val = ofproto.OFP_STATS_MSG_SIZE \
+ ofproto.OFP_QUEUE_STATS_SIZE
msg_len = {'buf': '\x00\x2c', 'val': msg_len_val}
xid = {'buf': '\x19\xfc\x28\x6c', 'val': 435955820}
buf = version['buf'] \
+ msg_type['buf'] \
+ msg_len['buf'] \
+ xid['buf']
# OFP_STATS_MSG_PACK_STR
# '!HH'...type_, flags
type_ = {'buf': '\x00\x05', 'val': ofproto.OFPST_QUEUE}
flags = {'buf': '\x3b\x2b', 'val': 15147}
buf += type_['buf'] \
+ flags['buf']
# stats_type_cls = OFPQueueStats
# OFP_QUEUE_STATS_PACK_STR
# '!H2xIQQQ...port_no, queue_id, tx_bytes, tx_packets, tx_errors
port_no = {'buf': '\xe7\x6b', 'val': 59243}
zfill = '\x00' * 2
queue_id = {'buf': '\x2a\xa8\x7f\x32', 'val': 715685682}
tx_bytes = {'buf': '\x77\xe1\xd5\x63\x18\xae\x63\xaa',
'val': 8638420181865882538}
tx_packets = {'buf': '\x27\xa4\x41\xd7\xd4\x53\x9e\x42',
'val': 2856480458895760962}
tx_errors = {'buf': '\x57\x32\x08\x2f\x88\x32\x40\x6b',
'val': 6283093430376743019}
buf += port_no['buf'] \
+ zfill \
+ queue_id['buf'] \
+ tx_bytes['buf'] \
+ tx_packets['buf'] \
+ tx_errors['buf']
res = OFPQueueStatsReply.parser(object,
version['val'],
msg_type['val'],
msg_len['val'],
xid['val'],
buf)
eq_(version['val'], res.version)
eq_(msg_type['val'], res.msg_type)
eq_(msg_len['val'], res.msg_len)
eq_(xid['val'], res.xid)
eq_(type_['val'], res.type)
eq_(flags['val'], res.flags)
# body
body = res.body[0]
eq_(port_no['val'], body.port_no)
eq_(queue_id['val'], body.queue_id)
eq_(tx_bytes['val'], body.tx_bytes)
eq_(tx_packets['val'], body.tx_packets)
eq_(tx_errors['val'], body.tx_errors)
def test_serialize(self):
# Not used.
pass
class TestOFPVendorStatsReply(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPVendorStatsReply
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = OFPVendorStatsReply(Datapath)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
version = {'buf': '\x01', 'val': ofproto.OFP_VERSION}
msg_type = {'buf': '\x11', 'val': ofproto.OFPT_STATS_REPLY}
# ofproto.OFP_STATS_MSG_SIZE + len(specific_data)
msg_len = {'buf': '\x00\x18',
'val': ofproto.OFP_STATS_MSG_SIZE + 12}
xid = {'buf': '\x94\xc4\xd2\xcd', 'val': 2495926989}
buf = version['buf'] \
+ msg_type['buf'] \
+ msg_len['buf'] \
+ xid['buf']
# OFP_STATS_MSG_PACK_STR
# '!HH'...type_, flags
type_ = {'buf': '\xff\xff', 'val': ofproto.OFPST_VENDOR}
flags = {'buf': '\x30\xd9', 'val': 12505}
buf += type_['buf'] \
+ flags['buf']
# stats_type_cls = OFPVendorStats
specific_data = 'specific_data'
buf += specific_data
res = OFPVendorStatsReply.parser(object,
version['val'],
msg_type['val'],
msg_len['val'],
xid['val'],
buf)
eq_(version['val'], res.version)
eq_(msg_type['val'], res.msg_type)
eq_(msg_len['val'], res.msg_len)
eq_(xid['val'], res.xid)
eq_(type_['val'], res.type)
eq_(flags['val'], res.flags)
# body
body = res.body[0]
eq_(specific_data, body)
def test_serialize(self):
# Not used.
pass
class TestOFPFeaturesRequest(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPFeaturesRequest
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = OFPFeaturesRequest(Datapath)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
# Not used.
pass
def test_serialize(self):
self.c.serialize()
eq_(ofproto.OFP_VERSION, self.c.version)
eq_(ofproto.OFPT_FEATURES_REQUEST, self.c.msg_type)
eq_(0, self.c.xid)
fmt = ofproto.OFP_HEADER_PACK_STR
res = struct.unpack(fmt, str(self.c.buf))
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_FEATURES_REQUEST, res[1])
eq_(len(self.c.buf), res[2])
eq_(0, res[3])
class TestOFPGetConfigRequest(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPGetConfigRequest
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = OFPGetConfigRequest(Datapath)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
# Not used.
pass
def test_serialize(self):
self.c.serialize()
eq_(ofproto.OFP_VERSION, self.c.version)
eq_(ofproto.OFPT_GET_CONFIG_REQUEST, self.c.msg_type)
eq_(0, self.c.xid)
fmt = ofproto.OFP_HEADER_PACK_STR
res = struct.unpack(fmt, str(self.c.buf))
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_GET_CONFIG_REQUEST, res[1])
eq_(len(self.c.buf), res[2])
eq_(0, res[3])
class TestOFPSetConfig(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPSetConfig
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
# OFP_SWITCH_CONFIG_PACK_STR
# '!HH'...flags, miss_send_len
flags = {'buf': '\xa0\xe2', 'val': 41186}
miss_send_len = {'buf': '\x36\x0e', 'val': 13838}
c = OFPSetConfig(Datapath,
flags['val'],
miss_send_len['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.flags['val'], self.c.flags)
eq_(self.miss_send_len['val'], self.c.miss_send_len)
def test_parser(self):
# Not used.
pass
def test_serialize(self):
self.c.serialize()
eq_(ofproto.OFP_VERSION, self.c.version)
eq_(ofproto.OFPT_SET_CONFIG, self.c.msg_type)
eq_(0, self.c.xid)
fmt = '!' \
+ ofproto.OFP_HEADER_PACK_STR.replace('!', '') \
+ ofproto.OFP_SWITCH_CONFIG_PACK_STR.replace('!', '')
res = struct.unpack(fmt, str(self.c.buf))
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_SET_CONFIG, res[1])
eq_(len(self.c.buf), res[2])
eq_(0, res[3])
eq_(self.flags['val'], res[4])
eq_(self.miss_send_len['val'], res[5])
class TestOFPPacketOut(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPPacketOut
"""
port = 0x2ae0
actions = [OFPActionOutput(port, max_len=0)]
def setUp(self):
pass
def tearDown(self):
pass
def _get_obj(self, buffer_id, in_port, data=None):
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = OFPPacketOut(Datapath,
buffer_id,
in_port,
self.actions,
data)
return c
def test_init(self):
buffer_id = 0xffffffff
in_port = 0x40455
data = 'Message'
c = self._get_obj(buffer_id, in_port, data)
eq_(buffer_id, c.buffer_id)
eq_(in_port, c.in_port)
eq_(data, c.data)
def test_parser(self):
# Not used.
pass
def test_serialize(self):
buffer_id = 0xffffffff
in_port = 0x9e07
data = 'Message'
c = self._get_obj(buffer_id, in_port, data)
c.serialize()
eq_(ofproto.OFP_VERSION, c.version)
eq_(ofproto.OFPT_PACKET_OUT, c.msg_type)
eq_(0, c.xid)
fmt = '!' \
+ ofproto.OFP_HEADER_PACK_STR.replace('!', '') \
+ ofproto.OFP_PACKET_OUT_PACK_STR.replace('!', '') \
+ ofproto.OFP_ACTION_OUTPUT_PACK_STR.replace('!', '') \
+ str(len(data)) + 's'
res = struct.unpack(fmt, str(c.buf))
# OFP_HEADER_PACK_STR
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_PACKET_OUT, res[1])
eq_(len(c.buf), res[2])
eq_(0, res[3])
# OFP_PACKET_OUT_PACK_STR
eq_(buffer_id, res[4])
eq_(in_port, res[5])
eq_(ofproto.OFP_ACTION_OUTPUT_SIZE, res[6])
# OFP_ACTION_OUTPUT_PACK_STR
eq_(ofproto.OFPAT_OUTPUT, res[7])
eq_(ofproto.OFP_ACTION_OUTPUT_SIZE, res[8])
eq_(self.port, res[9])
eq_(0, res[10])
# data
eq_(data, res[11])
@raises(AssertionError)
def test_serialize_check_buffer_id(self):
buffer_id = 0xffffff00
in_port = 0xaa92
data = 'Message'
c = self._get_obj(buffer_id, in_port, data)
c.serialize()
class TestOFPFlowMod(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPFlowMod
"""
# OFP_FLOW_MOD_PACK_STR0
# '!QHHHHIHH'...cookie, command, idle_timeout, hard_timeout,
# priority, buffer_id, out_port, flags
cookie = {'buf': '\x1d\x86\xce\x6e\x8d\xc0\xbe\xa8',
'val': 2127614848199081640}
command = {'buf': '\xe1\x55', 'val': 57685}
idle_timeout = {'buf': '\xf3\x6d', 'val': 62317}
hard_timeout = {'buf': '\x1c\xc5', 'val': 7365}
priority = {'buf': '\x9c\xe3', 'val': 40163}
buffer_id = {'buf': '\xf0\xa1\x80\x33', 'val': 4037115955}
out_port = {'buf': '\xfe\x0d', 'val': 65037}
flags = {'buf': '\x00\x87', 'val': 135}
# OFP_MATCH_PACK_STR
# '!IH6s6sHBxHBB2xIIHH'...wildcards, in_port, dl_src, dl_dst, dl_vlan,
# dl_vlan_pcp, dl_type, nw_tos, nw_proto,
# nw_src, nw_dst, tp_src, tp_dst
wildcards = {'buf': '\xd2\x71\x25\x23', 'val': 3530630435}
in_port = {'buf': '\x37\x8b', 'val': 14219}
dl_src = '\xdf\xcf\xe1\x5d\xcf\xc0'
dl_dst = '\x76\xb3\xfb\xc6\x21\x2f'
dl_vlan = {'buf': '\xc1\xf9', 'val': 49657}
dl_vlan_pcp = {'buf': '\x79', 'val': 121}
zfill0 = '\x00'
dl_type = {'buf': '\xa6\x9e', 'val': 42654}
nw_tos = {'buf': '\xde', 'val': 222}
nw_proto = {'buf': '\xe5', 'val': 229}
zfil11 = '\x00' * 2
nw_src = {'buf': '\x1b\x6d\x8d\x4b', 'val': 460164427}
nw_dst = {'buf': '\xab\x25\xe1\x20', 'val': 2871386400}
tp_src = {'buf': '\xd5\xc3', 'val': 54723}
tp_dst = {'buf': '\x78\xb9', 'val': 30905}
match = OFPMatch(wildcards['val'],
in_port['val'],
dl_src,
dl_dst,
dl_vlan['val'],
dl_vlan_pcp['val'],
dl_type['val'],
nw_tos['val'],
nw_proto['val'],
nw_src['val'],
nw_dst['val'],
tp_src['val'],
tp_dst['val'])
port = 0x2ae0
actions = [OFPActionOutput(port, max_len=1000)]
def setUp(self):
pass
def tearDown(self):
pass
def _get_obj(self, actions=None):
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = OFPFlowMod(Datapath,
self.match,
self.cookie['val'],
self.command['val'],
self.idle_timeout['val'],
self.hard_timeout['val'],
self.priority['val'],
self.buffer_id['val'],
self.out_port['val'],
self.flags['val'],
actions)
return c
def test_init(self):
c = self._get_obj()
eq_(self.cookie['val'], c.cookie)
eq_(self.command['val'], c.command)
eq_(self.idle_timeout['val'], c.idle_timeout)
eq_(self.hard_timeout['val'], c.hard_timeout)
eq_(self.priority['val'], c.priority)
eq_(self.buffer_id['val'], c.buffer_id)
eq_(self.out_port['val'], c.out_port)
eq_(self.flags['val'], c.flags)
def test_init_actions(self):
c = self._get_obj(self.actions)
action = c.actions[0]
eq_(self.port, action.port)
def test_parser(self):
# Not used.
pass
def test_serialize(self):
c = self._get_obj(self.actions)
c.serialize()
eq_(ofproto.OFP_VERSION, c.version)
eq_(ofproto.OFPT_FLOW_MOD, c.msg_type)
eq_(0, c.xid)
fmt = '!' \
+ ofproto.OFP_HEADER_PACK_STR.replace('!', '') \
+ ofproto.OFP_MATCH_PACK_STR.replace('!', '') \
+ ofproto.OFP_FLOW_MOD_PACK_STR0.replace('!', '') \
+ ofproto.OFP_ACTION_OUTPUT_PACK_STR.replace('!', '')
res = struct.unpack(fmt, str(c.buf))
# OFP_HEADER_PACK_STR
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_FLOW_MOD, res[1])
eq_(len(c.buf), res[2])
eq_(0, res[3])
# OFP_MATCH_PACK_STR
eq_(self.wildcards['val'], res[4])
eq_(self.in_port['val'], res[5])
eq_(self.dl_src, res[6])
eq_(self.dl_dst, res[7])
eq_(self.dl_vlan['val'], res[8])
eq_(self.dl_vlan_pcp['val'], res[9])
eq_(self.dl_type['val'], res[10])
eq_(self.nw_tos['val'], res[11])
eq_(self.nw_proto['val'], res[12])
eq_(self.nw_src['val'], res[13])
eq_(self.nw_dst['val'], res[14])
eq_(self.tp_src['val'], res[15])
eq_(self.tp_dst['val'], res[16])
# OFP_FLOW_MOD_PACK_STR0
eq_(self.cookie['val'], res[17])
eq_(self.command['val'], res[18])
eq_(self.idle_timeout['val'], res[19])
eq_(self.hard_timeout['val'], res[20])
eq_(self.priority['val'], res[21])
eq_(self.buffer_id['val'], res[22])
eq_(self.out_port['val'], res[23])
eq_(self.flags['val'], res[24])
# OFP_ACTION_OUTPUT_PACK_STR
eq_(ofproto.OFPAT_OUTPUT, res[25])
eq_(ofproto.OFP_ACTION_OUTPUT_SIZE, res[26])
eq_(self.port, res[27])
eq_(1000, res[28])
class TestOFPBarrierRequest(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPBarrierRequest
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
c = OFPBarrierRequest(Datapath)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
pass
def test_parser(self):
# Not used.
pass
def test_serialize(self):
self.c.serialize()
eq_(ofproto.OFP_VERSION, self.c.version)
eq_(ofproto.OFPT_BARRIER_REQUEST, self.c.msg_type)
eq_(0, self.c.xid)
fmt = ofproto.OFP_HEADER_PACK_STR
res = struct.unpack(fmt, str(self.c.buf))
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_BARRIER_REQUEST, res[1])
eq_(len(self.c.buf), res[2])
eq_(0, res[3])
class TestOFPQueueGetConfigRequest(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPQueueGetConfigRequest
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
# OFP_QUEUE_GET_CONFIG_REQUEST_PACK_STR
# '!H2x'...port, zfill
port = {'buf': '\xa0\xe2', 'val': 41186}
zfill = '\x00' * 2
c = OFPQueueGetConfigRequest(Datapath,
port['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(self.port['val'], self.c.port)
def test_parser(self):
# Not used.
pass
def test_serialize(self):
self.c.serialize()
eq_(ofproto.OFP_VERSION, self.c.version)
eq_(ofproto.OFPT_QUEUE_GET_CONFIG_REQUEST, self.c.msg_type)
eq_(0, self.c.xid)
a = ofproto.OFP_HEADER_PACK_STR.replace('!', '')
b = ofproto.OFP_QUEUE_GET_CONFIG_REQUEST_PACK_STR.replace('!', '')
fmt = '!' + a + b
res = struct.unpack(fmt, str(self.c.buf))
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_QUEUE_GET_CONFIG_REQUEST, res[1])
eq_(len(self.c.buf), res[2])
eq_(0, res[3])
eq_(self.port['val'], res[4])
class TestOFPDescStatsRequest(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPDescStatsRequest
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
flags = {'buf': '\x00\x00', 'val': 0}
c = OFPDescStatsRequest(Datapath, flags['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(ofproto.OFPST_DESC, self.c.type)
eq_(self.flags['val'], self.c.flags)
def test_parser(self):
# Not used.
pass
def test_serialize(self):
self.c.serialize()
eq_(ofproto.OFP_VERSION, self.c.version)
eq_(ofproto.OFPT_STATS_REQUEST, self.c.msg_type)
eq_(0, self.c.xid)
fmt = '!' \
+ ofproto.OFP_HEADER_PACK_STR.replace('!', '') \
+ ofproto.OFP_STATS_MSG_PACK_STR.replace('!', '')
res = struct.unpack(fmt, str(self.c.buf))
# OFP_HEADER_PACK_STR
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_STATS_REQUEST, res[1])
eq_(len(self.c.buf), res[2])
eq_(0, res[3])
# OFP_STATS_MSG_PACK_STR
eq_(ofproto.OFPST_DESC, res[4])
eq_(self.flags['val'], res[5])
class TestOFPFlowStatsRequest(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPFlowStatsRequest
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
flags = {'buf': '\x00\x00', 'val': 0}
# OFP_MATCH_PACK_STR
# '!IH6s6sHBxHBB2xIIHH'...wildcards, in_port, dl_src, dl_dst, dl_vlan,
# dl_vlan_pcp, dl_type, nw_tos, nw_proto,
# nw_src, nw_dst, tp_src, tp_dst
wildcards = {'buf': '\xd2\x71\x25\x23', 'val': 3530630435}
in_port = {'buf': '\x37\x8b', 'val': 14219}
dl_src = '\x58\xd0\x8a\x69\xa4\xfc'
dl_dst = '\xb6\xe2\xef\xb1\xa6\x2d'
dl_vlan = {'buf': '\xc1\xf9', 'val': 49657}
dl_vlan_pcp = {'buf': '\x79', 'val': 121}
zfill0 = '\x00'
dl_type = {'buf': '\xa6\x9e', 'val': 42654}
nw_tos = {'buf': '\xde', 'val': 222}
nw_proto = {'buf': '\xe5', 'val': 229}
zfil11 = '\x00' * 2
nw_src = {'buf': '\x1b\x6d\x8d\x4b', 'val': 460164427}
nw_dst = {'buf': '\xab\x25\xe1\x20', 'val': 2871386400}
tp_src = {'buf': '\xd5\xc3', 'val': 54723}
tp_dst = {'buf': '\x78\xb9', 'val': 30905}
match = OFPMatch(wildcards['val'],
in_port['val'],
dl_src,
dl_dst,
dl_vlan['val'],
dl_vlan_pcp['val'],
dl_type['val'],
nw_tos['val'],
nw_proto['val'],
nw_src['val'],
nw_dst['val'],
tp_src['val'],
tp_dst['val'])
# OFP_FLOW_STATS_REQUEST_ID_PORT_STR
# '!BxH'...table_id, zfill, out_port
table_id = {'buf': '\xd1', 'val': 209}
zfill = '\x00' * 1
out_port = {'buf': '\xe4\x9a', 'val': 58522}
c = OFPFlowStatsRequest(Datapath,
flags['val'],
match,
table_id['val'],
out_port['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(ofproto.OFPST_FLOW, self.c.type)
eq_(self.flags['val'], self.c.flags)
eq_(self.table_id['val'], self.c.table_id)
eq_(self.out_port['val'], self.c.out_port)
# match
match = self.c.match
eq_(self.match.__hash__(), match.__hash__())
def test_parser(self):
# Not used.
pass
def test_serialize(self):
self.c.serialize()
eq_(ofproto.OFP_VERSION, self.c.version)
eq_(ofproto.OFPT_STATS_REQUEST, self.c.msg_type)
eq_(0, self.c.xid)
fmt = '!' \
+ ofproto.OFP_HEADER_PACK_STR.replace('!', '') \
+ ofproto.OFP_STATS_MSG_PACK_STR.replace('!', '') \
+ ofproto.OFP_MATCH_PACK_STR.replace('!', '') \
+ ofproto.OFP_FLOW_STATS_REQUEST_ID_PORT_STR.replace('!', '')
res = struct.unpack(fmt, str(self.c.buf))
# OFP_HEADER_PACK_STR
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_STATS_REQUEST, res[1])
eq_(len(self.c.buf), res[2])
eq_(0, res[3])
# OFP_STATS_MSG_PACK_STR
eq_(ofproto.OFPST_FLOW, res[4])
eq_(self.flags['val'], res[5])
# OFP_MATCH_PACK_STR
eq_(self.wildcards['val'], res[6])
eq_(self.in_port['val'], res[7])
eq_(self.dl_src, res[8])
eq_(self.dl_dst, res[9])
eq_(self.dl_vlan['val'], res[10])
eq_(self.dl_vlan_pcp['val'], res[11])
eq_(self.dl_type['val'], res[12])
eq_(self.nw_tos['val'], res[13])
eq_(self.nw_proto['val'], res[14])
eq_(self.nw_src['val'], res[15])
eq_(self.nw_dst['val'], res[16])
eq_(self.tp_src['val'], res[17])
eq_(self.tp_dst['val'], res[18])
# OFP_FLOW_STATS_REQUEST_ID_PORT_STR
eq_(self.table_id['val'], res[19])
eq_(self.out_port['val'], res[20])
class TestOFPAggregateStatsRequest(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPAggregateStatsRequest
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
flags = {'buf': '\x00\x00', 'val': 0}
# OFP_MATCH_PACK_STR
# '!IH6s6sHBxHBB2xIIHH'...wildcards, in_port, dl_src, dl_dst, dl_vlan,
# dl_vlan_pcp, dl_type, nw_tos, nw_proto,
# nw_src, nw_dst, tp_src, tp_dst
wildcards = {'buf': '\xea\x66\x4a\xd4', 'val': 3932572372}
in_port = {'buf': '\x64\xac', 'val': 25772}
dl_src = '\x90\x13\x60\x5e\x20\x4d'
dl_dst = '\xb5\x5d\x14\x5e\xb9\x22'
dl_vlan = {'buf': '\x8b\xeb', 'val': 35819}
dl_vlan_pcp = {'buf': '\xe8', 'val': 232}
zfill0 = '\x00'
dl_type = {'buf': '\62\xc9', 'val': 25289}
nw_tos = {'buf': '\xb5', 'val': 181}
nw_proto = {'buf': '\xc4', 'val': 196}
zfil11 = '\x00' * 2
nw_src = {'buf': '\xb7\xd1\xb7\xef', 'val': 3083974639}
nw_dst = {'buf': '\x7c\xc6\x18\x15', 'val': 2093357077}
tp_src = {'buf': '\x26\x9a', 'val': 9882}
tp_dst = {'buf': '\x7a\x89', 'val': 31369}
match = OFPMatch(wildcards['val'],
in_port['val'],
dl_src,
dl_dst,
dl_vlan['val'],
dl_vlan_pcp['val'],
dl_type['val'],
nw_tos['val'],
nw_proto['val'],
nw_src['val'],
nw_dst['val'],
tp_src['val'],
tp_dst['val'])
# OFP_FLOW_STATS_REQUEST_ID_PORT_STR
# '!BxH'...table_id, zfill, out_port
table_id = {'buf': '\xd1', 'val': 209}
zfill = '\x00' * 1
out_port = {'buf': '\xb5\xe8', 'val': 46568}
c = OFPAggregateStatsRequest(Datapath,
flags['val'],
match,
table_id['val'],
out_port['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(ofproto.OFPST_AGGREGATE, self.c.type)
eq_(self.flags['val'], self.c.flags)
eq_(self.table_id['val'], self.c.table_id)
eq_(self.out_port['val'], self.c.out_port)
# match
match = self.c.match
eq_(self.match.__hash__(), match.__hash__())
def test_parser(self):
# Not used.
pass
def test_serialize(self):
self.c.serialize()
eq_(ofproto.OFP_VERSION, self.c.version)
eq_(ofproto.OFPT_STATS_REQUEST, self.c.msg_type)
eq_(0, self.c.xid)
fmt = '!' \
+ ofproto.OFP_HEADER_PACK_STR.replace('!', '') \
+ ofproto.OFP_STATS_MSG_PACK_STR.replace('!', '') \
+ ofproto.OFP_MATCH_PACK_STR.replace('!', '') \
+ ofproto.OFP_FLOW_STATS_REQUEST_ID_PORT_STR.replace('!', '')
res = struct.unpack(fmt, str(self.c.buf))
# OFP_HEADER_PACK_STR
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_STATS_REQUEST, res[1])
eq_(len(self.c.buf), res[2])
eq_(0, res[3])
# OFP_STATS_MSG_PACK_STR
eq_(ofproto.OFPST_AGGREGATE, res[4])
eq_(self.flags['val'], res[5])
# OFP_MATCH_PACK_STR
eq_(self.wildcards['val'], res[6])
eq_(self.in_port['val'], res[7])
eq_(self.dl_src, res[8])
eq_(self.dl_dst, res[9])
eq_(self.dl_vlan['val'], res[10])
eq_(self.dl_vlan_pcp['val'], res[11])
eq_(self.dl_type['val'], res[12])
eq_(self.nw_tos['val'], res[13])
eq_(self.nw_proto['val'], res[14])
eq_(self.nw_src['val'], res[15])
eq_(self.nw_dst['val'], res[16])
eq_(self.tp_src['val'], res[17])
eq_(self.tp_dst['val'], res[18])
# OFP_FLOW_STATS_REQUEST_ID_PORT_STR
eq_(self.table_id['val'], res[19])
eq_(self.out_port['val'], res[20])
class TestOFPTableStatsRequest(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPTableStatsRequest
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
flags = {'buf': '\x00\x00', 'val': 0}
c = OFPTableStatsRequest(Datapath, flags['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(ofproto.OFPST_TABLE, self.c.type)
eq_(self.flags['val'], self.c.flags)
def test_parser(self):
# Not used.
pass
def test_serialize(self):
self.c.serialize()
eq_(ofproto.OFP_VERSION, self.c.version)
eq_(ofproto.OFPT_STATS_REQUEST, self.c.msg_type)
eq_(0, self.c.xid)
fmt = '!' \
+ ofproto.OFP_HEADER_PACK_STR.replace('!', '') \
+ ofproto.OFP_STATS_MSG_PACK_STR.replace('!', '')
res = struct.unpack(fmt, str(self.c.buf))
# OFP_HEADER_PACK_STR
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_STATS_REQUEST, res[1])
eq_(len(self.c.buf), res[2])
eq_(0, res[3])
# OFP_STATS_MSG_PACK_STR
eq_(ofproto.OFPST_TABLE, res[4])
eq_(self.flags['val'], res[5])
class TestOFPPortStatsRequest(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPPortStatsRequest
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
flags = {'buf': '\x00\x00', 'val': 0}
# OFP_PORT_STATS_REQUEST_PACK_STR
# '!H6x'...port_no, zfill
port_no = {'buf': '\x6d\x27', 'val': 27943}
c = OFPPortStatsRequest(Datapath,
flags['val'],
port_no['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(ofproto.OFPST_PORT, self.c.type)
eq_(self.flags['val'], self.c.flags)
eq_(self.port_no['val'], self.c.port_no)
def test_parser(self):
# Not used.
pass
def test_serialize(self):
self.c.serialize()
eq_(ofproto.OFP_VERSION, self.c.version)
eq_(ofproto.OFPT_STATS_REQUEST, self.c.msg_type)
eq_(0, self.c.xid)
fmt = '!' \
+ ofproto.OFP_HEADER_PACK_STR.replace('!', '') \
+ ofproto.OFP_STATS_MSG_PACK_STR.replace('!', '') \
+ ofproto.OFP_PORT_STATS_REQUEST_PACK_STR.replace('!', '')
res = struct.unpack(fmt, str(self.c.buf))
# OFP_HEADER_PACK_STR
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_STATS_REQUEST, res[1])
eq_(len(self.c.buf), res[2])
eq_(0, res[3])
# OFP_STATS_MSG_PACK_STR
eq_(ofproto.OFPST_PORT, res[4])
eq_(self.flags['val'], res[5])
# OFP_PORT_STATS_REQUEST_PACK_STR
eq_(self.port_no['val'], res[6])
class TestOFPQueueStatsRequest(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPQueueStatsRequest
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
flags = {'buf': '\x00\x00', 'val': 0}
# OFP_QUEUE_STATS_REQUEST_PACK_STR
# '!HxxI'...port_no, zfill, zfill, queue_id
port_no = {'buf': '\x0c\x2d', 'val': 3117}
queue_id = {'buf': '\x1b\xe6\xba\x36', 'val': 468105782}
c = OFPQueueStatsRequest(Datapath,
flags['val'],
port_no['val'],
queue_id['val'])
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(ofproto.OFPST_QUEUE, self.c.type)
eq_(self.flags['val'], self.c.flags)
eq_(self.port_no['val'], self.c.port_no)
eq_(self.queue_id['val'], self.c.queue_id)
def test_parser(self):
# Not used.
pass
def test_serialize(self):
self.c.serialize()
eq_(ofproto.OFP_VERSION, self.c.version)
eq_(ofproto.OFPT_STATS_REQUEST, self.c.msg_type)
eq_(0, self.c.xid)
fmt = '!' \
+ ofproto.OFP_HEADER_PACK_STR.replace('!', '') \
+ ofproto.OFP_STATS_MSG_PACK_STR.replace('!', '') \
+ ofproto.OFP_QUEUE_STATS_REQUEST_PACK_STR.replace('!', '')
res = struct.unpack(fmt, str(self.c.buf))
# OFP_HEADER_PACK_STR
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_STATS_REQUEST, res[1])
eq_(len(self.c.buf), res[2])
eq_(0, res[3])
# OFP_STATS_MSG_PACK_STR
eq_(ofproto.OFPST_QUEUE, res[4])
eq_(self.flags['val'], res[5])
# OFP_QUEUE_STATS_REQUEST_PACK_STR
eq_(self.port_no['val'], res[6])
eq_(self.queue_id['val'], res[7])
class TestOFPVendorStatsRequest(unittest.TestCase):
""" Test case for ofproto_v1_0_parser.OFPVendorStatsRequest
"""
class Datapath(object):
ofproto = ofproto # copy to class attribute
ofproto_parser = ofproto_v1_0_parser
flags = {'buf': '\x00\x00', 'val': 0}
# OFP_VENDOR_STATS_MSG_PACK_STR
# '!I'...vendor
vendor = {'buf': '\xff\xff\xff\xff', 'val': ofproto.OFPAT_VENDOR}
specific_data = 'specific_data'
c = OFPVendorStatsRequest(Datapath,
flags['val'],
vendor['val'],
specific_data)
def setUp(self):
pass
def tearDown(self):
pass
def test_init(self):
eq_(ofproto.OFPST_VENDOR, self.c.type)
eq_(self.flags['val'], self.c.flags)
eq_(self.vendor['val'], self.c.vendor)
eq_(self.specific_data, self.c.specific_data)
def test_parser(self):
# Not used.
pass
def test_serialize(self):
self.c.serialize()
eq_(ofproto.OFP_VERSION, self.c.version)
eq_(ofproto.OFPT_STATS_REQUEST, self.c.msg_type)
eq_(0, self.c.xid)
fmt = '!' \
+ ofproto.OFP_HEADER_PACK_STR.replace('!', '') \
+ ofproto.OFP_STATS_MSG_PACK_STR.replace('!', '') \
+ ofproto.OFP_VENDOR_STATS_MSG_PACK_STR.replace('!', '') \
+ str(len(self.specific_data)) + 's'
res = struct.unpack(fmt, str(self.c.buf))
# OFP_HEADER_PACK_STR
eq_(ofproto.OFP_VERSION, res[0])
eq_(ofproto.OFPT_STATS_REQUEST, res[1])
eq_(len(self.c.buf), res[2])
eq_(0, res[3])
# OFP_STATS_MSG_PACK_STR
eq_(ofproto.OFPST_VENDOR, res[4])
eq_(self.flags['val'], res[5])
# OFP_VENDOR_STATS_MSG_PACK_STR
eq_(self.vendor['val'], res[6])
# specific_data
eq_(self.specific_data, res[7])
|
third_party/catapult/dashboard/dashboard/common/utils_test.py | zipated/src | 2,151 | 12741575 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import json
import unittest
import mock
from google.appengine.ext import ndb
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import graph_data
class UtilsTest(testing_common.TestCase):
def setUp(self):
super(UtilsTest, self).setUp()
testing_common.SetIsInternalUser('<EMAIL>', True)
testing_common.SetIsInternalUser('<EMAIL>', False)
def _AssertMatches(self, test_path, pattern):
"""Asserts that a test path matches a pattern with MatchesPattern."""
test_key = utils.TestKey(test_path)
self.assertTrue(utils.TestMatchesPattern(test_key, pattern))
def _AssertDoesntMatch(self, test_path, pattern):
"""Asserts that a test path doesn't match a pattern with MatchesPattern."""
test_key = utils.TestKey(test_path)
self.assertFalse(utils.TestMatchesPattern(test_key, pattern))
def testMatchesPattern_AllWildcards(self):
self._AssertMatches(
'ChromiumPerf/cros-one/dromaeo.top25/Total', '*/*/*/*')
self._AssertDoesntMatch(
'ChromiumPerf/cros-one/dromaeo.top25/Total', '*/*/*')
def testMatchesPattern_SomeWildcards(self):
self._AssertMatches(
'ChromiumPerf/cros-one/dromaeo.top25/Total',
'ChromiumPerf/*/dromaeo.top25/*')
self._AssertDoesntMatch(
'ChromiumPerf/cros-one/dromaeo.top25/Total',
'ChromiumPerf/*/dromaeo.another_page_set/*')
def testMatchesPattern_SomePartialWildcards(self):
self._AssertMatches(
'ChromiumPerf/cros-one/dromaeo.top25/Total',
'ChromiumPerf/cros-*/dromaeo.*/Total')
self._AssertDoesntMatch(
'ChromiumPerf/cros-one/dromaeoXtop25/Total',
'ChromiumPerf/cros-*/dromaeo.*/Total')
self._AssertDoesntMatch(
'ChromiumPerf/cros-one/dromaeo.top25/Total',
'OtherMaster/cros-*/dromaeo.*/Total')
def testMatchesPattern_MorePartialWildcards(self):
# Note that the wildcard matches zero or more characters.
self._AssertMatches(
'ChromiumPerf/cros-one/dromaeo.top25/Total',
'Chromium*/cros-one*/*.*/To*al')
self._AssertDoesntMatch(
'ChromiumPerf/cros-one/dromaeo.top25/Total',
'Chromium*/linux-*/*.*/To*al')
def testMatchesPattern_RequiresFullMatchAtEnd(self):
# If there is no wildcard at the beginning or end of the
# test path part, then a part will only match if it matches
# right up to the beginning or end.
self._AssertDoesntMatch(
'ChromiumPerf/cros-one/dromaeo.top25/Total',
'ChromiumPerf/cros-one/dromaeo.top25/*Tot')
self._AssertDoesntMatch(
'ChromiumPerf/cros-one/dromaeo.top25/Total',
'ChromiumPerf/cros-one/dromaeo.top25/otal*')
def testMostSpecificMatchingPattern_SpecificVsGeneral(self):
test_key = utils.TestKey('M/B/S/Total')
result = utils.MostSpecificMatchingPattern(
test_key,
[('*/*/*/*', 1), ('*/*/*/Total', 2), ('*/*/*/Foo', 3)])
self.assertEqual(2, result)
def testMostSpecificMatchingPattern_PartialVsGeneral(self):
test_key = utils.TestKey('M/B/S/Total')
result = utils.MostSpecificMatchingPattern(
test_key,
[('*/*/*/*', 1), ('*/*/*/To*al', 2), ('*/*/*/Foo', 3)])
self.assertEqual(2, result)
def testMostSpecificMatchingPattern_2ndLevel(self):
test_key = utils.TestKey('M/B/S/Total')
result = utils.MostSpecificMatchingPattern(
test_key,
[('*/*/*/*', 1), ('*/*/S/*', 2), ('*/*/*/Foo', 3)])
self.assertEqual(2, result)
def testMostSpecificMatchingPattern_TopLevelSpecificOverLowerSpecific(self):
test_key = utils.TestKey('M/B/S/Total')
result = utils.MostSpecificMatchingPattern(
test_key,
[('*/*/S/*', 1), ('*/*/*/Total', 2), ('*/*/*/Foo', 3)])
self.assertEqual(2, result)
def testMostSpecificMatchingPattern_TopLevelPartialOverLowerSpecific(self):
test_key = utils.TestKey('M/B/S/Total')
result = utils.MostSpecificMatchingPattern(
test_key,
[('*/*/S/*', 1), ('*/*/*/To*al', 2), ('*/*/*/Foo', 3)])
self.assertEqual(2, result)
def _PutEntitiesAllExternal(self):
"""Puts entities (none internal-only) and returns the keys."""
master = graph_data.Master(id='M').put()
graph_data.Bot(parent=master, id='b').put()
keys = [
graph_data.TestMetadata(id='M/b/a', internal_only=False).put(),
graph_data.TestMetadata(id='M/b/b', internal_only=False).put(),
graph_data.TestMetadata(id='M/b/c', internal_only=False).put(),
graph_data.TestMetadata(id='M/b/d', internal_only=False).put(),
]
return keys
def _PutEntitiesHalfInternal(self):
"""Puts entities (half internal-only) and returns the keys."""
master = graph_data.Master(id='M').put()
graph_data.Bot(parent=master, id='b').put()
keys = [
graph_data.TestMetadata(id='M/b/ax', internal_only=True).put(),
graph_data.TestMetadata(id='M/b/a', internal_only=False).put(),
graph_data.TestMetadata(id='M/b/b', internal_only=False).put(),
graph_data.TestMetadata(id='M/b/bx', internal_only=True).put(),
graph_data.TestMetadata(id='M/b/c', internal_only=False).put(),
graph_data.TestMetadata(id='M/b/cx', internal_only=True).put(),
graph_data.TestMetadata(id='M/b/d', internal_only=False).put(),
graph_data.TestMetadata(id='M/b/dx', internal_only=True).put(),
]
return keys
def testGetMulti_ExternalUser_ReturnsSomeEntities(self):
keys = self._PutEntitiesHalfInternal()
self.SetCurrentUser('<EMAIL>')
self.assertEqual(len(keys) / 2, len(utils.GetMulti(keys)))
def testGetMulti_InternalUser_ReturnsAllEntities(self):
keys = self._PutEntitiesHalfInternal()
self.SetCurrentUser('<EMAIL>')
self.assertEqual(len(keys), len(utils.GetMulti(keys)))
def testGetMulti_AllExternalEntities_ReturnsAllEntities(self):
keys = self._PutEntitiesAllExternal()
self.SetCurrentUser('<EMAIL>')
self.assertEqual(len(keys), len(utils.GetMulti(keys)))
def testTestPath_Test(self):
key = ndb.Key('Master', 'm', 'Bot', 'b', 'Test', 'suite', 'Test', 'metric')
self.assertEqual('m/b/suite/metric', utils.TestPath(key))
def testTestPath_TestMetadata(self):
key = ndb.Key('TestMetadata', 'm/b/suite/metric')
self.assertEqual('m/b/suite/metric', utils.TestPath(key))
def testTestPath_Container(self):
key = ndb.Key('TestContainer', 'm/b/suite/metric')
self.assertEqual('m/b/suite/metric', utils.TestPath(key))
def testTestMetadataKey_None(self):
key = utils.TestMetadataKey(None)
self.assertIsNone(key)
def testTestMetadataKey_Test(self):
key = utils.TestMetadataKey(
ndb.Key('Master', 'm', 'Bot', 'b', 'Test', 'suite', 'Test', 'metric'))
self.assertEqual('TestMetadata', key.kind())
self.assertEqual('m/b/suite/metric', key.id())
self.assertEqual(('TestMetadata', 'm/b/suite/metric'), key.flat())
def testTestMetadataKey_TestMetadata(self):
original_key = ndb.Key('TestMetadata', 'm/b/suite/metric')
key = utils.TestMetadataKey(original_key)
self.assertEqual(original_key, key)
def testTestMetadataKey_String(self):
key = utils.TestMetadataKey('m/b/suite/metric/page')
self.assertEqual('TestMetadata', key.kind())
self.assertEqual('m/b/suite/metric/page', key.id())
self.assertEqual(('TestMetadata', 'm/b/suite/metric/page'), key.flat())
def testOldStyleTestKey_None(self):
key = utils.OldStyleTestKey(None)
self.assertIsNone(key)
def testOldStyleTestKey_Test(self):
original_key = ndb.Key(
'Master', 'm', 'Bot', 'b', 'Test', 'suite', 'Test', 'metric')
key = utils.OldStyleTestKey(original_key)
self.assertEqual(original_key, key)
def testOldStyleTestKey_TestMetadata(self):
key = utils.OldStyleTestKey(ndb.Key('TestMetadata', 'm/b/suite/metric'))
self.assertEqual('Test', key.kind())
self.assertEqual('metric', key.id())
self.assertEqual(
('Master', 'm', 'Bot', 'b', 'Test', 'suite', 'Test', 'metric'),
key.flat())
def testOldStyleTestKey_String(self):
key = utils.OldStyleTestKey('m/b/suite/metric')
self.assertEqual('Test', key.kind())
self.assertEqual('metric', key.id())
self.assertEqual(
('Master', 'm', 'Bot', 'b', 'Test', 'suite', 'Test', 'metric'),
key.flat())
def testTestSuiteName_Basic(self):
key = utils.TestKey('Master/bot/suite-foo/sub/x/y/z')
self.assertEqual('suite-foo', utils.TestSuiteName(key))
def testMinimumRange_Empty_ReturnsNone(self):
self.assertIsNone(utils.MinimumRange([]))
def testMinimumRange_NotOverlapping_ReturnsNone(self):
self.assertIsNone(utils.MinimumRange([(5, 10), (15, 20)]))
def testMinimumRange_OneRange_ReturnsSameRange(self):
self.assertEqual((5, 10), utils.MinimumRange([(5, 10)]))
def testMinimumRange_OverlapsForOneNumber_ReturnsRangeWithOneNumber(self):
self.assertEqual((5, 5), utils.MinimumRange([(2, 5), (5, 10)]))
def testMinimumRange_MoreThanTwoRanges_ReturnsIntersection(self):
self.assertEqual((6, 14), utils.MinimumRange(
[(3, 20), (5, 15), (6, 25), (3, 14)]))
def testValidate_StringNotInOptionList_Fails(self):
with self.assertRaises(ValueError):
utils.Validate(
['completed', 'pending', 'failed'], 'running')
def testValidate_InvalidType_Fails(self):
with self.assertRaises(ValueError):
utils.Validate(int, 'a string')
def testValidate_MissingProperty_Fails(self):
with self.assertRaises(ValueError):
utils.Validate(
{'status': str, 'try_job_id': int, 'required_property': int},
{'status': 'completed', 'try_job_id': 1234})
def testValidate_InvalidTypeInDict_Fails(self):
with self.assertRaises(ValueError):
utils.Validate(
{'status': int, 'try_job_id': int},
{'status': 'completed', 'try_job_id': 1234})
def testValidate_StringNotInNestedOptionList_Fails(self):
with self.assertRaises(ValueError):
utils.Validate(
{'values': {'nested_values': ['orange', 'banana']}},
{'values': {'nested_values': 'apple'}})
def testValidate_MissingPropertyInNestedDict_Fails(self):
with self.assertRaises(ValueError):
utils.Validate(
{'values': {'nested_values': ['orange', 'banana']}},
{'values': {}})
def testValidate_ExpectedValueIsNone_Passes(self):
utils.Validate(None, 'running')
def testValidate_StringInOptionList_Passes(self):
utils.Validate(str, 'a string')
def testValidate_HasExpectedProperties_Passes(self):
utils.Validate(
{'status': str, 'try_job_id': int},
{'status': 'completed', 'try_job_id': 1234})
def testValidate_StringInNestedOptionList_Passes(self):
utils.Validate(
{'values': {'nested_values': ['orange', 'banana']}},
{'values': {'nested_values': 'orange'}})
def testValidate_TypeConversion_Passes(self):
utils.Validate([1], '1')
def testGetBuildDetailsFromStdioLink_InvalidLink(self):
base_url, master, bot, number, step = utils.GetBuildDetailsFromStdioLink(
'[Buildbot stdio](http://notquite/builders/whatever/234)')
self.assertIsNone(base_url)
self.assertIsNone(master)
self.assertIsNone(bot)
self.assertIsNone(number)
self.assertIsNone(step)
def testGetBuildDetailsFromStdioLink(self):
base_url, master, bot, number, step = utils.GetBuildDetailsFromStdioLink((
'[Buildbot stdio](https://build.chromium.org/p/chromium.perf/builders/'
'Android%20One%20Perf%20%282%29/builds/5365/steps/'
'blink_style.top_25/logs/stdio)'))
self.assertEqual('https://build.chromium.org/p/chromium.perf/builders/',
base_url)
self.assertEqual('chromium.perf', master)
self.assertEqual('Android One Perf (2)', bot)
self.assertEqual('5365', number)
self.assertEqual('blink_style.top_25', step)
def testGetBuildDetailsFromStdioLink_DifferentBaseUrl(self):
base_url, master, bot, number, step = utils.GetBuildDetailsFromStdioLink((
'[Buildbot stdio]('
'https://uberchromegw.corp.google.com/i/new.master/builders/Builder/'
'builds/3486/steps/new_test/logs/stdio)'))
self.assertEqual(
'https://uberchromegw.corp.google.com/i/new.master/builders/',
base_url)
self.assertEqual('new.master', master)
self.assertEqual('Builder', bot)
self.assertEqual('3486', number)
self.assertEqual('new_test', step)
def testGetBuildbotStatusPageUriFromStdioLink(self):
buildbot_status_page = utils.GetBuildbotStatusPageUriFromStdioLink((
'[Buildbot stdio](https://build.chromium.org/p/chromium.perf/builders/'
'Android%20One%20Perf%20%282%29/builds/5365/steps/'
'blink_style.top_25/logs/stdio)'))
self.assertEqual((
'https://build.chromium.org/p/chromium.perf/builders/'
'Android%20One%20Perf%20%282%29/builds/5365'), buildbot_status_page)
def testGetLogdogLogUriFromStdioLink(self):
logdog_uri = utils.GetLogdogLogUriFromStdioLink((
'[Buildbot stdio](https://build.chromium.org/p/chromium.perf/builders/'
'Android%20One%20Perf%20%282%29/builds/5365/steps/'
'blink_style.top_25/logs/stdio)'))
self.assertEqual((
'https://luci-logdog.appspot.com/v/?s='
'chrome%2Fbb%2Fchromium.perf%2FAndroid_One_Perf__2_%2F5365%2F%2B%2F'
'recipes%2Fsteps%2Fblink_style.top_25%2F0%2Fstdout'), logdog_uri)
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch('common.utils.discovery.build')
def testIsGroupMember_PositiveCase(self, mock_discovery_build):
mock_request = mock.MagicMock()
mock_request.execute = mock.MagicMock(return_value={'is_member': True})
mock_service = mock.MagicMock()
mock_service.membership = mock.MagicMock(
return_value=mock_request)
mock_discovery_build.return_value = mock_service
self.assertTrue(utils.IsGroupMember('<EMAIL>', 'group'))
mock_service.membership.assert_called_once_with(
identity='<EMAIL>', group='group')
@mock.patch.object(utils, 'ServiceAccountHttp', mock.MagicMock())
@mock.patch('logging.error')
@mock.patch('common.utils.discovery.build')
def testIsGroupMember_RequestFails_LogsErrorAndReturnsFalse(
self, mock_discovery_build, mock_logging_error):
mock_service = mock.MagicMock()
mock_service.membership = mock.MagicMock(
return_value={'error': 'Some error'})
mock_discovery_build.return_value = mock_service
self.assertFalse(utils.IsGroupMember('<EMAIL>', 'group'))
self.assertEqual(1, mock_logging_error.call_count)
def testGetSheriffForAutorollCommit_InvalidCommit_ReturnsNone(self):
self.assertIsNone(utils.GetSheriffForAutorollCommit(None))
self.assertIsNone(utils.GetSheriffForAutorollCommit({}))
self.assertIsNone(utils.GetSheriffForAutorollCommit({'author': {}}))
def testGetSheriffForAutorollCommit_NotAutoroll_ReturnsNone(self):
self.assertIsNone(utils.GetSheriffForAutorollCommit({
'author': {'email': '<EMAIL>'},
'message': 'TBR=<EMAIL>',
}))
self.assertIsNone(utils.GetSheriffForAutorollCommit({
'author': {'email': '<EMAIL>'},
'message': 'TBR=<EMAIL>',
}))
def testGetSheriffForAutorollCommit_AutoRoll_ReturnsSheriff(self):
self.assertEqual(
'<EMAIL>',
utils.GetSheriffForAutorollCommit({
'author': {
'email': '<EMAIL>',
},
'message': 'This is a roll.\n\nTBR=<EMAIL>,<EMAIL>\n\n',
}))
self.assertEqual(
'<EMAIL>',
utils.GetSheriffForAutorollCommit({
'author': {
'email': '<EMAIL>',
},
'message': 'TBR=<EMAIL>',
}))
self.assertEqual(
'<EMAIL>',
utils.GetSheriffForAutorollCommit({'tbr': '<EMAIL>'}))
def _MakeMockFetch(base64_encoded=True, status=200):
"""Returns a mock fetch object that returns a canned response."""
def _MockFetch(_):
response_text = json.dumps({'key': 'this is well-formed JSON.'})
if base64_encoded:
response_text = base64.b64encode(response_text)
return testing_common.FakeResponseObject(status, response_text)
return _MockFetch
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.