gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
from paddle.fluid import Program, program_guard
class TestDiagV2Op(OpTest):
def setUp(self):
self.op_type = "diag_v2"
self.x = np.random.rand(10, 10)
self.offset = 0
self.padding_value = 0.0
self.out = np.diag(self.x, self.offset)
self.init_config()
self.inputs = {'X': self.x}
self.attrs = {
'offset': self.offset,
'padding_value': self.padding_value
}
self.outputs = {'Out': self.out}
def test_check_output(self):
paddle.enable_static()
self.check_output(check_eager=True)
def init_config(self):
pass
class TestDiagV2OpCase1(TestDiagV2Op):
def init_config(self):
self.offset = 1
self.out = np.diag(self.x, self.offset)
class TestDiagV2OpCase2(TestDiagV2Op):
def init_config(self):
self.offset = -1
self.out = np.diag(self.x, self.offset)
class TestDiagV2OpCase3(TestDiagV2Op):
def init_config(self):
self.x = np.random.randint(-10, 10, size=(10, 10))
self.out = np.diag(self.x, self.offset)
class TestDiagV2OpCase4(TestDiagV2Op):
def init_config(self):
self.x = np.random.rand(100)
self.padding_value = 8
n = self.x.size
self.out = self.padding_value * np.ones((n, n)) + np.diag(
self.x, self.offset) - np.diag(self.padding_value * np.ones(n))
class TestDiagV2Error(unittest.TestCase):
def test_errors(self):
paddle.enable_static()
with program_guard(Program(), Program()):
def test_diag_v2_type():
x = [1, 2, 3]
output = paddle.diag(x)
self.assertRaises(TypeError, test_diag_v2_type)
x = paddle.static.data('data', [3, 3])
self.assertRaises(TypeError, paddle.diag, x, offset=2.5)
self.assertRaises(TypeError, paddle.diag, x, padding_value=[9])
x = paddle.static.data('data2', [3, 3, 3])
self.assertRaises(ValueError, paddle.diag, x)
class TestDiagV2API(unittest.TestCase):
def setUp(self):
self.input_np = np.random.random(size=(10, 10)).astype(np.float32)
self.expected0 = np.diag(self.input_np)
self.expected1 = np.diag(self.input_np, k=1)
self.expected2 = np.diag(self.input_np, k=-1)
self.input_np2 = np.random.rand(100)
self.offset = 0
self.padding_value = 8
n = self.input_np2.size
self.expected3 = self.padding_value * np.ones(
(n, n)) + np.diag(self.input_np2, self.offset) - np.diag(
self.padding_value * np.ones(n))
self.input_np3 = np.random.randint(-10, 10, size=(100)).astype(np.int64)
self.padding_value = 8.0
n = self.input_np3.size
self.expected4 = self.padding_value * np.ones(
(n, n)) + np.diag(self.input_np3, self.offset) - np.diag(
self.padding_value * np.ones(n))
self.padding_value = -8
self.expected5 = self.padding_value * np.ones(
(n, n)) + np.diag(self.input_np3, self.offset) - np.diag(
self.padding_value * np.ones(n))
self.input_np4 = np.random.random(size=(2000, 2000)).astype(np.float32)
self.expected6 = np.diag(self.input_np4)
self.expected7 = np.diag(self.input_np4, k=1)
self.expected8 = np.diag(self.input_np4, k=-1)
self.input_np5 = np.random.random(size=(2000)).astype(np.float32)
self.expected9 = np.diag(self.input_np5)
self.expected10 = np.diag(self.input_np5, k=1)
self.expected11 = np.diag(self.input_np5, k=-1)
self.input_np6 = np.random.random(size=(2000, 1500)).astype(np.float32)
self.expected12 = np.diag(self.input_np6, k=-1)
def run_imperative(self):
x = paddle.to_tensor(self.input_np)
y = paddle.diag(x)
self.assertTrue(np.allclose(y.numpy(), self.expected0))
y = paddle.diag(x, offset=1)
self.assertTrue(np.allclose(y.numpy(), self.expected1))
y = paddle.diag(x, offset=-1)
self.assertTrue(np.allclose(y.numpy(), self.expected2))
x = paddle.to_tensor(self.input_np2)
y = paddle.diag(x, padding_value=8)
self.assertTrue(np.allclose(y.numpy(), self.expected3))
x = paddle.to_tensor(self.input_np3)
y = paddle.diag(x, padding_value=8.0)
self.assertTrue(np.allclose(y.numpy(), self.expected4))
y = paddle.diag(x, padding_value=-8)
self.assertTrue(np.allclose(y.numpy(), self.expected5))
x = paddle.to_tensor(self.input_np4)
y = paddle.diag(x)
self.assertTrue(np.allclose(y.numpy(), self.expected6))
y = paddle.diag(x, offset=1)
self.assertTrue(np.allclose(y.numpy(), self.expected7))
y = paddle.diag(x, offset=-1)
self.assertTrue(np.allclose(y.numpy(), self.expected8))
x = paddle.to_tensor(self.input_np5)
y = paddle.diag(x)
self.assertTrue(np.allclose(y.numpy(), self.expected9))
y = paddle.diag(x, offset=1)
self.assertTrue(np.allclose(y.numpy(), self.expected10))
y = paddle.diag(x, offset=-1)
self.assertTrue(np.allclose(y.numpy(), self.expected11))
x = paddle.to_tensor(self.input_np6)
y = paddle.diag(x, offset=-1)
self.assertTrue(np.allclose(y.numpy(), self.expected12))
def run_static(self, use_gpu=False):
x = paddle.static.data(name='input', shape=[10, 10], dtype='float32')
x2 = paddle.static.data(name='input2', shape=[100], dtype='float64')
x3 = paddle.static.data(name='input3', shape=[100], dtype='int64')
x4 = paddle.static.data(
name='input4', shape=[2000, 2000], dtype='float32')
x5 = paddle.static.data(name='input5', shape=[2000], dtype='float32')
x6 = paddle.static.data(
name='input6', shape=[2000, 1500], dtype='float32')
result0 = paddle.diag(x)
result1 = paddle.diag(x, offset=1)
result2 = paddle.diag(x, offset=-1)
result3 = paddle.diag(x, name='aaa')
result4 = paddle.diag(x2, padding_value=8)
result5 = paddle.diag(x3, padding_value=8.0)
result6 = paddle.diag(x3, padding_value=-8)
result7 = paddle.diag(x4)
result8 = paddle.diag(x4, offset=1)
result9 = paddle.diag(x4, offset=-1)
result10 = paddle.diag(x5)
result11 = paddle.diag(x5, offset=1)
result12 = paddle.diag(x5, offset=-1)
result13 = paddle.diag(x6, offset=-1)
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
res0, res1, res2, res4, res5, res6, res7, res8, res9, res10, res11, res12, res13 = exe.run(
feed={
"input": self.input_np,
"input2": self.input_np2,
'input3': self.input_np3,
'input4': self.input_np4,
'input5': self.input_np5,
'input6': self.input_np6
},
fetch_list=[
result0, result1, result2, result4, result5, result6, result7,
result8, result9, result10, result11, result12, result13
])
self.assertTrue(np.allclose(res0, self.expected0))
self.assertTrue(np.allclose(res1, self.expected1))
self.assertTrue(np.allclose(res2, self.expected2))
self.assertTrue('aaa' in result3.name)
self.assertTrue(np.allclose(res4, self.expected3))
self.assertTrue(np.allclose(res5, self.expected4))
self.assertTrue(np.allclose(res6, self.expected5))
self.assertTrue(np.allclose(res7, self.expected6))
self.assertTrue(np.allclose(res8, self.expected7))
self.assertTrue(np.allclose(res9, self.expected8))
self.assertTrue(np.allclose(res10, self.expected9))
self.assertTrue(np.allclose(res11, self.expected10))
self.assertTrue(np.allclose(res12, self.expected11))
self.assertTrue(np.allclose(res13, self.expected12))
def test_cpu(self):
paddle.disable_static(place=paddle.fluid.CPUPlace())
self.run_imperative()
paddle.enable_static()
with fluid.program_guard(fluid.Program()):
self.run_static()
def test_gpu(self):
if not fluid.core.is_compiled_with_cuda():
return
paddle.disable_static(place=paddle.fluid.CUDAPlace(0))
self.run_imperative()
paddle.enable_static()
with fluid.program_guard(fluid.Program()):
self.run_static(use_gpu=True)
if __name__ == "__main__":
unittest.main()
|
|
"""
Unit tests for lib/sqlcompare.py
test_lib_sqlcompare.py
Ken Kinder
2005-03-17
"""
from testing_common import *
import sqlcompare, MySQLdb
class TestSqlCompare(unittest.TestCase):
def setUp(self):
cnx = MySQLdb.connect(user='root', passwd='z0t0123')
c = cnx.cursor()
c.execute('drop database if exists unittest')
c.execute('create database unittest')
c.execute('use unittest')
self.cnx = cnx
self.cursor = c
def _get_create(self, table):
self.cursor.execute('show create table %s' % table)
(table, create) = self.cursor.fetchone()
return table, create
def _get_table(self, table):
return sqlcompare.MySQLTable(*self._get_create(table))
def test_basic_detection(self):
self.cursor.execute(
"""
create table test_basic_detection (
spam_id int(11),
spam_x varchar(10),
spam_text text,
primary key(spam_id),
index(spam_x),
unique(spam_id, spam_x),
fulltext key(spam_text)
)
""")
table = self._get_table('test_basic_detection')
self.failUnlessEqual(table.table_name, 'test_basic_detection')
self.failUnlessEqual(table.cols, {'spam_id': "int(11) NOT NULL default '0'", 'spam_x': 'varchar(10) default NULL', 'spam_text': 'text'})
self.failUnlessEqual(table.primary_key, '(spam_id)')
self.failUnlessEqual(table.indexes, {'spam_id': 'UNIQUE KEY spam_id (spam_id,spam_x)', 'spam_x': 'KEY spam_x (spam_x)', 'spam_text': 'FULLTEXT KEY spam_text (spam_text)'})
def test_add_missing_cols(self):
self.cursor.execute(
"""
create table test_add_missing_cols1 (
spam_id int(11)
)
""")
self.cursor.execute(
"""
create table test_add_missing_cols2 (
spam_id int(11),
spam_x varchar(10),
spam_text text
)
""")
table1 = self._get_table('test_add_missing_cols1')
table2 = self._get_table('test_add_missing_cols2')
for statement in table1.diff(table2):
self.failUnless('add column' in statement)
self.cursor.execute(statement)
table1 = self._get_table('test_add_missing_cols1')
table2 = self._get_table('test_add_missing_cols2')
self.failUnlessEqual(table1.diff(table2), [])
def test_remove_extra_cols(self):
self.cursor.execute(
"""
create table test_remove_extra_cols1 (
spam_id int(11),
spam_x varchar(10),
spam_text text
)
""")
self.cursor.execute(
"""
create table test_remove_extra_cols2 (
spam_id int(11)
)
""")
table1 = self._get_table('test_remove_extra_cols1')
table2 = self._get_table('test_remove_extra_cols2')
for statement in table1.diff(table2):
self.failUnless('drop column' in statement)
self.cursor.execute(statement)
table1 = self._get_table('test_remove_extra_cols1')
table2 = self._get_table('test_remove_extra_cols2')
self.failUnlessEqual(table1.diff(table2), [])
def test_drop_extra_indexes(self):
self.cursor.execute(
"""
create table test_drop_extra_indexes1 (
spam_id int(11) not null,
spam_x varchar(10),
spam_text text,
primary key(spam_id),
index(spam_x),
unique(spam_id, spam_x),
fulltext key(spam_text)
)
""")
self.cursor.execute(
"""
create table test_drop_extra_indexes2 (
spam_id int(11) not null,
spam_x varchar(10),
spam_text text
)
""")
table1 = self._get_table('test_drop_extra_indexes1')
table2 = self._get_table('test_drop_extra_indexes2')
for statement in table1.diff(table2):
self.cursor.execute(statement)
table1 = self._get_table('test_drop_extra_indexes1')
table2 = self._get_table('test_drop_extra_indexes2')
self.failUnlessEqual(table1.diff(table2), [])
def test_add_missing_indexes(self):
self.cursor.execute(
"""
create table test_add_missing_indexes1 (
spam_id int(11) not null,
spam_x varchar(10),
spam_text text
)
""")
self.cursor.execute(
"""
create table test_add_missing_indexes2 (
spam_id int(11) not null,
spam_x varchar(10),
spam_text text,
primary key(spam_id),
index(spam_x),
unique(spam_id, spam_x),
fulltext key(spam_text)
)
""")
table1 = self._get_table('test_add_missing_indexes1')
table2 = self._get_table('test_add_missing_indexes2')
for statement in table1.diff(table2):
self.cursor.execute(statement)
table1 = self._get_table('test_add_missing_indexes1')
table2 = self._get_table('test_add_missing_indexes2')
self.failUnlessEqual(table1.diff(table2), [])
def test_wildly_different(self):
self.cursor.execute(
"""
create table test_wildly_different1 (
test_wildly_different_id float,
text_col1 varchar(50) not null,
text_col2 varchar(25) not null,
some_number int(11) default 15,
some_number2 int(11) default 15,
some_number3 int(11) default 15,
primary key(test_wildly_different_id),
unique (some_number),
key (some_number3),
fulltext key(text_col1, text_col2)
)
""")
self.cursor.execute(
"""
create table test_wildly_different2 (
test_wildly_different_idx int(11) not null auto_increment,
text_col2 varchar(25) not null,
some_number int(11) default 15,
some_number2 float(11) default 15,
some_number3 int(11) default 10,
some_number4 int(11) default 10,
some_number5 int(11) default 10,
primary key(test_wildly_different_idx),
index (some_number),
key (some_number2),
fulltext key(text_col2)
)
""")
table1 = self._get_table('test_wildly_different1')
table2 = self._get_table('test_wildly_different2')
for statement in table1.diff(table2):
self.cursor.execute(statement)
table1 = self._get_table('test_wildly_different1')
table2 = self._get_table('test_wildly_different2')
self.failUnlessEqual(table1.diff(table2), [])
def test_add_primary_key(self):
self.cursor.execute(
"""
create table test_add_primary_key1 (
value varchar(10)
)
""")
self.cursor.execute("insert into test_add_primary_key1 values ('foo')")
self.cursor.execute("insert into test_add_primary_key1 values ('bar')")
self.cursor.execute(
"""
create table test_add_primary_key2 (
test_add_primary_key_id int(11) not null auto_increment,
value varchar(10),
primary key(test_add_primary_key_id)
)
""")
table1 = self._get_table('test_add_primary_key1')
table2 = self._get_table('test_add_primary_key2')
for statement in table1.diff(table2):
self.cursor.execute(statement)
table1 = self._get_table('test_add_primary_key1')
table2 = self._get_table('test_add_primary_key2')
self.failUnlessEqual(table1.diff(table2), [])
if __name__ == '__main__':
unittest.main()
|
|
# Romulus.py
#
SYSTEM_STATES = [
'BASE_APPS',
'BMC_STARTING',
'BMC_READY',
'HOST_POWERING_ON',
'HOST_POWERED_ON',
'HOST_BOOTING',
'HOST_BOOTED',
'HOST_POWERED_OFF',
]
EXIT_STATE_DEPEND = {
'BASE_APPS': {
'/org/openbmc/sensors': 0,
},
'BMC_STARTING': {
'/org/openbmc/control/chassis0': 0,
'/org/openbmc/control/power0': 0,
'/org/openbmc/control/flash/bios': 0,
},
}
INVENTORY_ROOT = '/org/openbmc/inventory'
FRU_INSTANCES = {
'<inventory_root>/system': {'fru_type': 'SYSTEM', 'is_fru': True, 'present': "True"},
'<inventory_root>/system/bios': {'fru_type': 'SYSTEM', 'is_fru': True, 'present': "True"},
'<inventory_root>/system/misc': {'fru_type': 'SYSTEM', 'is_fru': False, },
'<inventory_root>/system/chassis': {'fru_type': 'SYSTEM', 'is_fru': True, 'present': "True"},
'<inventory_root>/system/chassis/motherboard': {'fru_type': 'MAIN_PLANAR', 'is_fru': True, },
'<inventory_root>/system/systemevent': {'fru_type': 'SYSTEM_EVENT', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/refclock': {'fru_type': 'MAIN_PLANAR',
'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/pcieclock': {'fru_type': 'MAIN_PLANAR',
'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/todclock': {'fru_type': 'MAIN_PLANAR',
'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/apss': {'fru_type': 'MAIN_PLANAR',
'is_fru': False, },
'<inventory_root>/system/chassis/fan0': {'fru_type': 'FAN', 'is_fru': True, },
'<inventory_root>/system/chassis/fan1': {'fru_type': 'FAN', 'is_fru': True, },
'<inventory_root>/system/chassis/fan2': {'fru_type': 'FAN', 'is_fru': True, },
'<inventory_root>/system/chassis/fan3': {'fru_type': 'FAN', 'is_fru': True, },
'<inventory_root>/system/chassis/motherboard/bmc': {'fru_type': 'BMC', 'is_fru': False,
'manufacturer': 'ASPEED'},
'<inventory_root>/system/chassis/motherboard/cpu0': {'fru_type': 'CPU', 'is_fru': True, },
'<inventory_root>/system/chassis/motherboard/cpu1': {'fru_type': 'CPU', 'is_fru': True, },
'<inventory_root>/system/chassis/motherboard/cpu0/core0': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core1': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core2': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core3': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core4': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core5': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core6': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core7': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core8': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core9': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core10': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core11': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core12': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core13': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core14': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core15': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core16': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core17': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core18': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core19': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core20': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core21': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core22': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu0/core23': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core0': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core1': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core2': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core3': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core4': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core5': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core6': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core7': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core8': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core9': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core10': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core11': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core12': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core13': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core14': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core15': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core16': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core17': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core18': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core19': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core20': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core21': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core22': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/cpu1/core23': {'fru_type': 'CORE', 'is_fru': False, },
'<inventory_root>/system/chassis/motherboard/dimm0': {'fru_type': 'DIMM', 'is_fru': True, },
'<inventory_root>/system/chassis/motherboard/dimm1': {'fru_type': 'DIMM', 'is_fru': True, },
'<inventory_root>/system/chassis/motherboard/dimm2': {'fru_type': 'DIMM', 'is_fru': True, },
'<inventory_root>/system/chassis/motherboard/dimm3': {'fru_type': 'DIMM', 'is_fru': True, },
'<inventory_root>/system/chassis/motherboard/dimm4': {'fru_type': 'DIMM', 'is_fru': True, },
'<inventory_root>/system/chassis/motherboard/dimm5': {'fru_type': 'DIMM', 'is_fru': True, },
'<inventory_root>/system/chassis/motherboard/dimm6': {'fru_type': 'DIMM', 'is_fru': True, },
'<inventory_root>/system/chassis/motherboard/dimm7': {'fru_type': 'DIMM', 'is_fru': True, },
'<inventory_root>/system/chassis/motherboard/dimm8': {'fru_type': 'DIMM', 'is_fru': True, },
'<inventory_root>/system/chassis/motherboard/dimm9': {'fru_type': 'DIMM', 'is_fru': True, },
'<inventory_root>/system/chassis/motherboard/dimm10': {'fru_type': 'DIMM', 'is_fru': True, },
'<inventory_root>/system/chassis/motherboard/dimm11': {'fru_type': 'DIMM', 'is_fru': True, },
'<inventory_root>/system/chassis/motherboard/dimm12': {'fru_type': 'DIMM', 'is_fru': True, },
'<inventory_root>/system/chassis/motherboard/dimm13': {'fru_type': 'DIMM', 'is_fru': True, },
'<inventory_root>/system/chassis/motherboard/dimm14': {'fru_type': 'DIMM', 'is_fru': True, },
'<inventory_root>/system/chassis/motherboard/dimm15': {'fru_type': 'DIMM', 'is_fru': True, },
}
ID_LOOKUP = {
'FRU': {
0x01: '<inventory_root>/system/chassis/motherboard/cpu0',
0x02: '<inventory_root>/system/chassis/motherboard/cpu1',
0x03: '<inventory_root>/system/chassis/motherboard',
0x04: '<inventory_root>/system/chassis/motherboard/dimm0',
0x05: '<inventory_root>/system/chassis/motherboard/dimm1',
0x06: '<inventory_root>/system/chassis/motherboard/dimm2',
0x07: '<inventory_root>/system/chassis/motherboard/dimm3',
0x08: '<inventory_root>/system/chassis/motherboard/dimm4',
0x09: '<inventory_root>/system/chassis/motherboard/dimm5',
0x0a: '<inventory_root>/system/chassis/motherboard/dimm6',
0x0b: '<inventory_root>/system/chassis/motherboard/dimm7',
0x0c: '<inventory_root>/system/chassis/motherboard/dimm8',
0x0d: '<inventory_root>/system/chassis/motherboard/dimm9',
0x0e: '<inventory_root>/system/chassis/motherboard/dimm10',
0x0f: '<inventory_root>/system/chassis/motherboard/dimm11',
0x10: '<inventory_root>/system/chassis/motherboard/dimm12',
0x11: '<inventory_root>/system/chassis/motherboard/dimm13',
0x12: '<inventory_root>/system/chassis/motherboard/dimm14',
0x13: '<inventory_root>/system/chassis/motherboard/dimm15',
},
'FRU_STR': {
'PRODUCT_0': '<inventory_root>/system/bios',
'BOARD_1': '<inventory_root>/system/chassis/motherboard/cpu0',
'BOARD_2': '<inventory_root>/system/chassis/motherboard/cpu1',
'CHASSIS_3': '<inventory_root>/system/chassis/motherboard',
'BOARD_3': '<inventory_root>/system/misc',
'PRODUCT_12': '<inventory_root>/system/chassis/motherboard/dimm0',
'PRODUCT_13': '<inventory_root>/system/chassis/motherboard/dimm1',
'PRODUCT_14': '<inventory_root>/system/chassis/motherboard/dimm2',
'PRODUCT_15': '<inventory_root>/system/chassis/motherboard/dimm3',
'PRODUCT_16': '<inventory_root>/system/chassis/motherboard/dimm4',
'PRODUCT_17': '<inventory_root>/system/chassis/motherboard/dimm5',
'PRODUCT_18': '<inventory_root>/system/chassis/motherboard/dimm6',
'PRODUCT_19': '<inventory_root>/system/chassis/motherboard/dimm7',
'PRODUCT_20': '<inventory_root>/system/chassis/motherboard/dimm8',
'PRODUCT_21': '<inventory_root>/system/chassis/motherboard/dimm9',
'PRODUCT_22': '<inventory_root>/system/chassis/motherboard/dimm10',
'PRODUCT_23': '<inventory_root>/system/chassis/motherboard/dimm11',
'PRODUCT_24': '<inventory_root>/system/chassis/motherboard/dimm12',
'PRODUCT_25': '<inventory_root>/system/chassis/motherboard/dimm13',
'PRODUCT_26': '<inventory_root>/system/chassis/motherboard/dimm14',
'PRODUCT_27': '<inventory_root>/system/chassis/motherboard/dimm15',
'PRODUCT_47': '<inventory_root>/system/misc',
},
'SENSOR': {
0x01: '/org/openbmc/sensors/host/HostStatus',
0x02: '/org/openbmc/sensors/host/BootProgress',
0x03: '/org/openbmc/sensors/host/cpu0/OccStatus',
0x04: '/org/openbmc/sensors/host/cpu1/OccStatus',
0x08: '<inventory_root>/system/chassis/motherboard/cpu0',
0x09: '<inventory_root>/system/chassis/motherboard/cpu1',
0x0b: '<inventory_root>/system/chassis/motherboard/dimm0',
0x0c: '<inventory_root>/system/chassis/motherboard/dimm1',
0x0d: '<inventory_root>/system/chassis/motherboard/dimm2',
0x0e: '<inventory_root>/system/chassis/motherboard/dimm3',
0x0f: '<inventory_root>/system/chassis/motherboard/dimm4',
0x10: '<inventory_root>/system/chassis/motherboard/dimm5',
0x11: '<inventory_root>/system/chassis/motherboard/dimm6',
0x12: '<inventory_root>/system/chassis/motherboard/dimm7',
0x13: '<inventory_root>/system/chassis/motherboard/dimm8',
0x14: '<inventory_root>/system/chassis/motherboard/dimm9',
0x15: '<inventory_root>/system/chassis/motherboard/dimm10',
0x16: '<inventory_root>/system/chassis/motherboard/dimm11',
0x17: '<inventory_root>/system/chassis/motherboard/dimm12',
0x18: '<inventory_root>/system/chassis/motherboard/dimm13',
0x19: '<inventory_root>/system/chassis/motherboard/dimm14',
0x1a: '<inventory_root>/system/chassis/motherboard/dimm15',
0x2b: '<inventory_root>/system/chassis/motherboard/cpu0/core0',
0x2c: '<inventory_root>/system/chassis/motherboard/cpu0/core1',
0x2d: '<inventory_root>/system/chassis/motherboard/cpu0/core2',
0x2e: '<inventory_root>/system/chassis/motherboard/cpu0/core3',
0x2f: '<inventory_root>/system/chassis/motherboard/cpu0/core4',
0x30: '<inventory_root>/system/chassis/motherboard/cpu0/core5',
0x31: '<inventory_root>/system/chassis/motherboard/cpu0/core6',
0x32: '<inventory_root>/system/chassis/motherboard/cpu0/core7',
0x33: '<inventory_root>/system/chassis/motherboard/cpu0/core8',
0x34: '<inventory_root>/system/chassis/motherboard/cpu0/core9',
0x35: '<inventory_root>/system/chassis/motherboard/cpu0/core10',
0x36: '<inventory_root>/system/chassis/motherboard/cpu0/core11',
0x37: '<inventory_root>/system/chassis/motherboard/cpu0/core12',
0x38: '<inventory_root>/system/chassis/motherboard/cpu0/core13',
0x39: '<inventory_root>/system/chassis/motherboard/cpu0/core14',
0x3a: '<inventory_root>/system/chassis/motherboard/cpu0/core15',
0x3b: '<inventory_root>/system/chassis/motherboard/cpu0/core16',
0x3c: '<inventory_root>/system/chassis/motherboard/cpu0/core17',
0x3d: '<inventory_root>/system/chassis/motherboard/cpu0/core18',
0x3e: '<inventory_root>/system/chassis/motherboard/cpu0/core19',
0x3f: '<inventory_root>/system/chassis/motherboard/cpu0/core20',
0x40: '<inventory_root>/system/chassis/motherboard/cpu0/core21',
0x41: '<inventory_root>/system/chassis/motherboard/cpu0/core22',
0x42: '<inventory_root>/system/chassis/motherboard/cpu0/core23',
0x43: '<inventory_root>/system/chassis/motherboard/cpu1/core0',
0x44: '<inventory_root>/system/chassis/motherboard/cpu1/core1',
0x45: '<inventory_root>/system/chassis/motherboard/cpu1/core2',
0x46: '<inventory_root>/system/chassis/motherboard/cpu1/core3',
0x47: '<inventory_root>/system/chassis/motherboard/cpu1/core4',
0x48: '<inventory_root>/system/chassis/motherboard/cpu1/core5',
0x49: '<inventory_root>/system/chassis/motherboard/cpu1/core6',
0x4a: '<inventory_root>/system/chassis/motherboard/cpu1/core7',
0x4b: '<inventory_root>/system/chassis/motherboard/cpu1/core8',
0x4c: '<inventory_root>/system/chassis/motherboard/cpu1/core9',
0x4d: '<inventory_root>/system/chassis/motherboard/cpu1/core10',
0x4e: '<inventory_root>/system/chassis/motherboard/cpu1/core11',
0x4f: '<inventory_root>/system/chassis/motherboard/cpu1/core12',
0x50: '<inventory_root>/system/chassis/motherboard/cpu1/core13',
0x51: '<inventory_root>/system/chassis/motherboard/cpu1/core14',
0x52: '<inventory_root>/system/chassis/motherboard/cpu1/core15',
0x53: '<inventory_root>/system/chassis/motherboard/cpu1/core16',
0x54: '<inventory_root>/system/chassis/motherboard/cpu1/core17',
0x55: '<inventory_root>/system/chassis/motherboard/cpu1/core18',
0x56: '<inventory_root>/system/chassis/motherboard/cpu1/core19',
0x57: '<inventory_root>/system/chassis/motherboard/cpu1/core20',
0x58: '<inventory_root>/system/chassis/motherboard/cpu1/core21',
0x59: '<inventory_root>/system/chassis/motherboard/cpu1/core22',
0x5a: '<inventory_root>/system/chassis/motherboard/cpu1/core23',
0x8b: '/org/openbmc/sensors/host/BootCount',
0x8c: '<inventory_root>/system/chassis/motherboard',
0x8d: '<inventory_root>/system/chassis/motherboard/refclock',
0x8e: '<inventory_root>/system/chassis/motherboard/pcieclock',
0x8f: '<inventory_root>/system/chassis/motherboard/todclock',
0x90: '<inventory_root>/system/systemevent',
0x91: '/org/openbmc/sensors/host/OperatingSystemStatus',
0x92: '<inventory_root>/system/chassis/motherboard/pcielink',
# 0x08 : '<inventory_root>/system/powerlimit',
# 0x10 : '<inventory_root>/system/chassis/motherboard/apss',
# 0x06 : '/org/openbmc/sensors/host/powercap',
},
'GPIO_PRESENT': {}
}
GPIO_CONFIG = {}
GPIO_CONFIG['SOFTWARE_PGOOD'] = \
{'gpio_pin': 'R1', 'direction': 'out'}
GPIO_CONFIG['BMC_POWER_UP'] = \
{'gpio_pin': 'D1', 'direction': 'out'}
GPIO_CONFIG['SYS_PWROK_BUFF'] = \
{'gpio_pin': 'D2', 'direction': 'in'}
GPIO_CONFIG['BMC_WD_CLEAR_PULSE_N'] = \
{'gpio_pin': 'N5', 'direction': 'out'}
GPIO_CONFIG['CHECKSTOP'] = \
{'gpio_pin': 'J2', 'direction': 'falling'}
GPIO_CONFIG['BMC_CP0_RESET_N'] = \
{'gpio_pin': 'A1', 'direction': 'out'}
GPIO_CONFIG['BMC_CP0_PERST_ENABLE_R'] = \
{'gpio_pin': 'A3', 'direction': 'out'}
GPIO_CONFIG['FSI_DATA'] = \
{'gpio_pin': 'AA2', 'direction': 'out'}
GPIO_CONFIG['FSI_CLK'] = \
{'gpio_pin': 'AA0', 'direction': 'out'}
GPIO_CONFIG['FSI_ENABLE'] = \
{'gpio_pin': 'D0', 'direction': 'out'}
# DBG_CP0_MUX_SEL
GPIO_CONFIG['CRONUS_SEL'] = \
{'gpio_pin': 'A6', 'direction': 'out'}
GPIO_CONFIG['BMC_THROTTLE'] = \
{'gpio_pin': 'J3', 'direction': 'out'}
GPIO_CONFIG['IDBTN'] = \
{'gpio_pin': 'Q7', 'direction': 'out'}
# PM_FP_PWRBTN_IN_L
GPIO_CONFIG['POWER_BUTTON'] = \
{'gpio_pin': 'I3', 'direction': 'both'}
# PM_NMIBTN_IN_L
GPIO_CONFIG['RESET_BUTTON'] = \
{'gpio_pin': 'J1', 'direction': 'both'}
HWMON_CONFIG = {
'4-0050': {
'names': {
'caps_curr_powercap': {'object_path': 'powercap/curr_cap', 'poll_interval': 10000,
'scale': 1, 'units': 'W'},
'caps_curr_powerreading': {'object_path': 'powercap/system_power',
'poll_interval': 10000, 'scale': 1, 'units': 'W'},
'caps_max_powercap': {'object_path': 'powercap/max_cap', 'poll_interval': 10000,
'scale': 1, 'units': 'W'},
'caps_min_powercap': {'object_path': 'powercap/min_cap', 'poll_interval': 10000,
'scale': 1, 'units': 'W'},
'caps_norm_powercap': {'object_path': 'powercap/n_cap', 'poll_interval': 10000,
'scale': 1, 'units': 'W'},
'caps_user_powerlimit': {'object_path': 'powercap/user_cap', 'poll_interval': 10000,
'scale': 1, 'units': 'W'},
},
'labels': {
'176': {'object_path': 'temperature/cpu0/core0', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'177': {'object_path': 'temperature/cpu0/core1', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'178': {'object_path': 'temperature/cpu0/core2', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'179': {'object_path': 'temperature/cpu0/core3', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'180': {'object_path': 'temperature/cpu0/core4', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'181': {'object_path': 'temperature/cpu0/core5', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'182': {'object_path': 'temperature/cpu0/core6', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'183': {'object_path': 'temperature/cpu0/core7', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'184': {'object_path': 'temperature/cpu0/core8', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'185': {'object_path': 'temperature/cpu0/core9', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'186': {'object_path': 'temperature/cpu0/core10', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'187': {'object_path': 'temperature/cpu0/core11', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'102': {'object_path': 'temperature/dimm0', 'poll_interval': 5000, 'scale': -3,
'units': 'C'},
'103': {'object_path': 'temperature/dimm1', 'poll_interval': 5000, 'scale': -3,
'units': 'C'},
'104': {'object_path': 'temperature/dimm2', 'poll_interval': 5000, 'scale': -3,
'units': 'C'},
'105': {'object_path': 'temperature/dimm3', 'poll_interval': 5000, 'scale': -3,
'units': 'C'},
'106': {'object_path': 'temperature/dimm4', 'poll_interval': 5000, 'scale': -3,
'units': 'C'},
'107': {'object_path': 'temperature/dimm5', 'poll_interval': 5000, 'scale': -3,
'units': 'C'},
'108': {'object_path': 'temperature/dimm6', 'poll_interval': 5000, 'scale': -3,
'units': 'C'},
'109': {'object_path': 'temperature/dimm7', 'poll_interval': 5000, 'scale': -3,
'units': 'C'},
}
},
'5-0050': {
'labels': {
'188': {'object_path': 'temperature/cpu1/core0', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'189': {'object_path': 'temperature/cpu1/core1', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'190': {'object_path': 'temperature/cpu1/core2', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'191': {'object_path': 'temperature/cpu1/core3', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'192': {'object_path': 'temperature/cpu1/core4', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'193': {'object_path': 'temperature/cpu1/core5', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'194': {'object_path': 'temperature/cpu1/core6', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'195': {'object_path': 'temperature/cpu1/core7', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'196': {'object_path': 'temperature/cpu1/core8', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'197': {'object_path': 'temperature/cpu1/core9', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'198': {'object_path': 'temperature/cpu1/core10', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'199': {'object_path': 'temperature/cpu1/core11', 'poll_interval': 5000, 'scale': -3,
'units': 'C',
'critical_upper': 100, 'critical_lower': -100, 'warning_upper': 90,
'warning_lower': -99, 'emergency_enabled': True},
'110': {'object_path': 'temperature/dimm8', 'poll_interval': 5000, 'scale': -3,
'units': 'C'},
'111': {'object_path': 'temperature/dimm9', 'poll_interval': 5000, 'scale': -3,
'units': 'C'},
'112': {'object_path': 'temperature/dimm10', 'poll_interval': 5000, 'scale': -3,
'units': 'C'},
'113': {'object_path': 'temperature/dimm11', 'poll_interval': 5000, 'scale': -3,
'units': 'C'},
'114': {'object_path': 'temperature/dimm12', 'poll_interval': 5000, 'scale': -3,
'units': 'C'},
'115': {'object_path': 'temperature/dimm13', 'poll_interval': 5000, 'scale': -3,
'units': 'C'},
'116': {'object_path': 'temperature/dimm14', 'poll_interval': 5000, 'scale': -3,
'units': 'C'},
'117': {'object_path': 'temperature/dimm15', 'poll_interval': 5000, 'scale': -3,
'units': 'C'},
}
},
}
GPIO_CONFIGS = {
'power_config': {
'power_good_in': 'SYS_PWROK_BUFF',
'power_up_outs': [
('SOFTWARE_PGOOD', True),
('BMC_POWER_UP', True),
],
'reset_outs': [
('BMC_CP0_RESET_N', False),
('BMC_CP0_PERST_ENABLE_R', False),
],
},
'hostctl_config': {
'fsi_data': 'FSI_DATA',
'fsi_clk': 'FSI_CLK',
'fsi_enable': 'FSI_ENABLE',
'cronus_sel': 'CRONUS_SEL',
'optionals': [
],
},
}
# Miscellaneous non-poll sensor with system specific properties.
# The sensor id is the same as those defined in ID_LOOKUP['SENSOR'].
MISC_SENSORS = {
0x8b: {'class': 'BootCountSensor'},
0x02: {'class': 'BootProgressSensor'},
# OCC active sensors aren't in the P9 XML yet. These are wrong.
0x03: {'class': 'OccStatusSensor',
'os_path': '/sys/bus/i2c/devices/3-0050/online'},
0x04: {'class': 'OccStatusSensor',
'os_path': '/sys/bus/i2c/devices/3-0051/online'},
0x91: {'class': 'OperatingSystemStatusSensor'},
# 0x06 : { 'class' : 'PowerCap',
# 'os_path' : '/sys/class/hwmon/hwmon3/user_powercap' },
}
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
|
from __future__ import division, print_function, absolute_import
import os
import time
import inspect
import json
import traceback
from collections import defaultdict, OrderedDict
import numpy as np
try:
import scipy.optimize
from scipy.optimize.optimize import rosen, rosen_der, rosen_hess
from scipy.optimize import (leastsq, basinhopping, differential_evolution,
dual_annealing, OptimizeResult)
from scipy.optimize._minimize import MINIMIZE_METHODS
except ImportError:
pass
from . import test_functions as funcs
from . import go_benchmark_functions as gbf
from .common import Benchmark
from .lsq_problems import extract_lsq_problems
class _BenchOptimizers(Benchmark):
"""a framework for benchmarking the optimizer
Parameters
----------
function_name : string
fun : callable
der : callable
function that returns the derivative (jacobian, gradient) of fun
hess : callable
function that returns the hessian of fun
minimizer_kwargs : kwargs
additional keywords passed to the minimizer. e.g. tol, maxiter
"""
def __init__(self, function_name, fun, der=None, hess=None,
**minimizer_kwargs):
self.function_name = function_name
self.fun = fun
self.der = der
self.hess = hess
self.minimizer_kwargs = minimizer_kwargs
if "tol" not in minimizer_kwargs:
minimizer_kwargs["tol"] = 1e-4
self.results = []
@classmethod
def from_funcobj(cls, function_name, function, **minimizer_kwargs):
self = cls.__new__(cls)
self.function_name = function_name
self.function = function
self.fun = function.fun
if hasattr(function, 'der'):
self.der = function.der
self.bounds = function.bounds
self.minimizer_kwargs = minimizer_kwargs
self.results = []
return self
def reset(self):
self.results = []
def energy_gradient(self, x):
return self.fun(x), self.function.der(x)
def add_result(self, result, t, name):
"""add a result to the list"""
result.time = t
result.name = name
if not hasattr(result, "njev"):
result.njev = 0
if not hasattr(result, "nhev"):
result.nhev = 0
self.results.append(result)
def print_results(self):
"""print the current list of results"""
results = self.average_results()
results = sorted(results, key=lambda x: (x.nfail, x.mean_time))
if not results:
return
print("")
print("=========================================================")
print("Optimizer benchmark: %s" % (self.function_name))
print("dimensions: %d, extra kwargs: %s" % (results[0].ndim, str(self.minimizer_kwargs)))
print("averaged over %d starting configurations" % (results[0].ntrials))
print(" Optimizer nfail nfev njev nhev time")
print("---------------------------------------------------------")
for res in results:
print("%11s | %4d | %4d | %4d | %4d | %.6g" %
(res.name, res.nfail, res.mean_nfev, res.mean_njev, res.mean_nhev, res.mean_time))
def average_results(self):
"""group the results by minimizer and average over the runs"""
grouped_results = defaultdict(list)
for res in self.results:
grouped_results[res.name].append(res)
averaged_results = dict()
for name, result_list in grouped_results.items():
newres = scipy.optimize.OptimizeResult()
newres.name = name
newres.mean_nfev = np.mean([r.nfev for r in result_list])
newres.mean_njev = np.mean([r.njev for r in result_list])
newres.mean_nhev = np.mean([r.nhev for r in result_list])
newres.mean_time = np.mean([r.time for r in result_list])
newres.ntrials = len(result_list)
newres.nfail = len([r for r in result_list if not r.success])
newres.nsuccess = len([r for r in result_list if r.success])
try:
newres.ndim = len(result_list[0].x)
except TypeError:
newres.ndim = 1
averaged_results[name] = newres
return averaged_results
# for basinhopping
def accept_test(self, x_new=None, *args, **kwargs):
"""
Does the new candidate vector lie in between the bounds?
Returns
-------
accept_test : bool
The candidate vector lies in between the bounds
"""
if not hasattr(self.function, "xmin"):
return True
if np.any(x_new < self.function.xmin):
return False
if np.any(x_new > self.function.xmax):
return False
return True
def run_basinhopping(self):
"""
Do an optimization run for basinhopping
"""
kwargs = self.minimizer_kwargs
if hasattr(self.fun, "temperature"):
kwargs["T"] = self.function.temperature
if hasattr(self.fun, "stepsize"):
kwargs["stepsize"] = self.function.stepsize
minimizer_kwargs = {"method": "L-BFGS-B"}
x0 = self.function.initial_vector()
# basinhopping - no gradient
minimizer_kwargs['jac'] = False
self.function.nfev = 0
t0 = time.time()
res = basinhopping(
self.fun, x0, accept_test=self.accept_test,
minimizer_kwargs=minimizer_kwargs,
**kwargs)
t1 = time.time()
res.success = self.function.success(res.x)
res.nfev = self.function.nfev
self.add_result(res, t1 - t0, 'basinh.')
def run_differentialevolution(self):
"""
Do an optimization run for differential_evolution
"""
self.function.nfev = 0
t0 = time.time()
res = differential_evolution(self.fun,
self.bounds,
popsize=20)
t1 = time.time()
res.success = self.function.success(res.x)
res.nfev = self.function.nfev
self.add_result(res, t1 - t0, 'DE')
def run_dualannealing(self):
"""
Do an optimization run for dual_annealing
"""
self.function.nfev = 0
t0 = time.time()
res = dual_annealing(self.fun,
None,
self.bounds)
t1 = time.time()
res.success = self.function.success(res.x)
res.nfev = self.function.nfev
self.add_result(res, t1 - t0, 'DA')
def bench_run_global(self, numtrials=50, methods=None):
"""
Run the optimization tests for the required minimizers.
"""
if methods is None:
methods = ['DE', 'basinh.', 'DA']
method_fun = {'DE': self.run_differentialevolution,
'basinh.': self.run_basinhopping,
'DA': self.run_dualannealing,}
for i in range(numtrials):
for m in methods:
method_fun[m]()
def bench_run(self, x0, methods=None, **minimizer_kwargs):
"""do an optimization test starting at x0 for all the optimizers"""
kwargs = self.minimizer_kwargs
if methods is None:
methods = MINIMIZE_METHODS
# L-BFGS-B, BFGS, trust-constr can use gradients, but examine
# performance when numerical differentiation is used.
fonly_methods = ["COBYLA", 'Powell', 'nelder-mead', 'L-BFGS-B', 'BFGS',
'trust-constr']
for method in fonly_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
**kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
gradient_methods = ['L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP',
'trust-constr']
if self.der is not None:
for method in gradient_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
jac=self.der, **kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
hessian_methods = ["Newton-CG", 'dogleg', 'trust-ncg',
'trust-exact', 'trust-krylov', 'trust-constr']
if self.hess is not None:
for method in hessian_methods:
if method not in methods:
continue
t0 = time.time()
res = scipy.optimize.minimize(self.fun, x0, method=method,
jac=self.der, hess=self.hess,
**kwargs)
t1 = time.time()
self.add_result(res, t1-t0, method)
class BenchSmoothUnbounded(Benchmark):
"""Benchmark the optimizers with smooth, unbounded, functions"""
params = [
['rosenbrock_slow', 'rosenbrock_nograd', 'rosenbrock', 'rosenbrock_tight',
'simple_quadratic', 'asymmetric_quadratic',
'sin_1d', 'booth', 'beale', 'LJ'],
["COBYLA", 'Powell', 'nelder-mead',
'L-BFGS-B', 'BFGS', 'CG', 'TNC', 'SLSQP',
"Newton-CG", 'dogleg', 'trust-ncg', 'trust-exact',
'trust-krylov', 'trust-constr'],
["mean_nfev", "mean_time"]
]
param_names = ["test function", "solver", "result type"]
def setup(self, func_name, method_name, ret_val):
b = getattr(self, 'run_' + func_name)(methods=[method_name])
r = b.average_results().get(method_name)
if r is None:
raise NotImplementedError()
self.result = getattr(r, ret_val)
def track_all(self, func_name, method_name, ret_val):
return self.result
# SlowRosen has a 50us delay on each function evaluation. By comparing to
# rosenbrock_nograd it should be possible to figure out how much time a
# minimizer uses internally, compared to the time required for function
# evaluation.
def run_rosenbrock_slow(self, methods=None):
s = funcs.SlowRosen()
b = _BenchOptimizers("Rosenbrock function",
fun=s.fun)
for i in range(10):
b.bench_run(np.random.uniform(-3, 3, 3), methods=methods)
return b
# see what the performance of the solvers are if numerical differentiation
# has to be used.
def run_rosenbrock_nograd(self, methods=None):
b = _BenchOptimizers("Rosenbrock function",
fun=rosen)
for i in range(10):
b.bench_run(np.random.uniform(-3, 3, 3), methods=methods)
return b
def run_rosenbrock(self, methods=None):
b = _BenchOptimizers("Rosenbrock function",
fun=rosen, der=rosen_der, hess=rosen_hess)
for i in range(10):
b.bench_run(np.random.uniform(-3, 3, 3), methods=methods)
return b
def run_rosenbrock_tight(self, methods=None):
b = _BenchOptimizers("Rosenbrock function",
fun=rosen, der=rosen_der, hess=rosen_hess,
tol=1e-8)
for i in range(10):
b.bench_run(np.random.uniform(-3, 3, 3), methods=methods)
return b
def run_simple_quadratic(self, methods=None):
s = funcs.SimpleQuadratic()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("simple quadratic function",
fun=s.fun, der=s.der, hess=s.hess)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, 3), methods=methods)
return b
def run_asymmetric_quadratic(self, methods=None):
s = funcs.AsymmetricQuadratic()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("function sum(x**2) + x[0]",
fun=s.fun, der=s.der, hess=s.hess)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, 3), methods=methods)
return b
def run_sin_1d(self, methods=None):
fun = lambda x: np.sin(x[0])
der = lambda x: np.array([np.cos(x[0])])
b = _BenchOptimizers("1d sin function",
fun=fun, der=der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, 1), methods=methods)
return b
def run_booth(self, methods=None):
s = funcs.Booth()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("Booth's function",
fun=s.fun, der=s.der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(0, 10, 2), methods=methods)
return b
def run_beale(self, methods=None):
s = funcs.Beale()
# print "checking gradient", scipy.optimize.check_grad(s.fun, s.der, np.array([1.1, -2.3]))
b = _BenchOptimizers("Beale's function",
fun=s.fun, der=s.der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(0, 10, 2), methods=methods)
return b
def run_LJ(self, methods=None):
s = funcs.LJ()
# print "checking gradient", scipy.optimize.check_grad(s.get_energy, s.get_gradient,
# np.random.uniform(-2,2,3*4))
natoms = 4
b = _BenchOptimizers("%d atom Lennard Jones potential" % (natoms),
fun=s.fun, der=s.der, hess=None)
for i in range(10):
b.bench_run(np.random.uniform(-2, 2, natoms*3), methods=methods)
return b
class BenchLeastSquares(Benchmark):
"""Class for benchmarking nonlinear least squares solvers."""
problems = extract_lsq_problems()
params = [
list(problems.keys()),
["average time", "nfev", "success"]
]
param_names = [
"problem", "result type"
]
def track_all(self, problem_name, result_type):
problem = self.problems[problem_name]
if problem.lb is not None or problem.ub is not None:
raise NotImplementedError
ftol = 1e-5
if result_type == 'average time':
n_runs = 10
t0 = time.time()
for _ in range(n_runs):
leastsq(problem.fun, problem.x0, Dfun=problem.jac, ftol=ftol,
full_output=True)
return (time.time() - t0) / n_runs
x, cov_x, info, message, ier = leastsq(
problem.fun, problem.x0, Dfun=problem.jac,
ftol=ftol, full_output=True
)
if result_type == 'nfev':
return info['nfev']
elif result_type == 'success':
return int(problem.check_answer(x, ftol))
else:
raise NotImplementedError
try:
# the value of SCIPY_XSLOW is used to control how many repeats of each
# function
slow = int(os.environ.get('SCIPY_XSLOW', 0))
except ValueError:
pass
_func_names = os.environ.get('SCIPY_GLOBAL_BENCH', [])
if _func_names:
if not slow:
slow = 100
_func_names = [x.strip() for x in _func_names.split(',')]
class BenchGlobal(Benchmark):
"""
Benchmark the global optimizers using the go_benchmark_functions
suite
"""
timeout = 300
_functions = OrderedDict([
item for item in inspect.getmembers(gbf, inspect.isclass)
if (issubclass(item[1], gbf.Benchmark) and
item[0] not in ('Benchmark') and
not item[0].startswith('Problem'))
])
if _func_names:
_filtered_funcs = OrderedDict()
for name in _func_names:
if name in _functions:
_filtered_funcs[name] = _functions.get(name)
_functions = _filtered_funcs
if not slow:
_functions = {'AMGM': None}
params = [
list(_functions.keys()),
["success%", "<nfev>"],
['DE', 'basinh.', 'DA'],
]
param_names = ["test function", "result type", "solver"]
def __init__(self):
self.enabled = bool(slow)
self.numtrials = slow
self.dump_fn = os.path.join(os.path.dirname(__file__), '..', 'global-bench-results.json')
self.results = {}
def setup(self, name, ret_value, solver):
if not self.enabled:
print("BenchGlobal.track_all not enabled --- export SCIPY_XSLOW=slow to enable,\n"
"'slow' iterations of each benchmark will be run.\n"
"Note that it can take several hours to run; intermediate output\n"
"can be found under benchmarks/global-bench-results.json\n"
"You can specify functions to benchmark via SCIPY_GLOBAL_BENCH=AMGM,Adjiman,...")
raise NotImplementedError()
# load json backing file
with open(self.dump_fn, 'r') as f:
self.results = json.load(f)
def teardown(self, name, ret_value, solver):
with open(self.dump_fn, 'w') as f:
json.dump(self.results, f, indent=2, sort_keys=True)
def track_all(self, name, ret_value, solver):
if name in self.results and solver in self.results[name]:
# have we done the function, and done the solver?
# if so, then just return the ret_value
av_results = self.results[name]
if ret_value == 'success%':
return 100 * av_results[solver]['nsuccess'] / av_results[solver]['ntrials']
elif ret_value == '<nfev>':
return av_results[solver]['mean_nfev']
else:
raise ValueError()
klass = self._functions[name]
f = klass()
try:
b = _BenchOptimizers.from_funcobj(name, f)
with np.errstate(all='ignore'):
b.bench_run_global(methods=[solver],
numtrials=self.numtrials)
av_results = b.average_results()
if name not in self.results:
self.results[name] = {}
self.results[name][solver] = av_results[solver]
if ret_value == 'success%':
return 100 * av_results[solver]['nsuccess'] / av_results[solver]['ntrials']
elif ret_value == '<nfev>':
return av_results[solver]['mean_nfev']
else:
raise ValueError()
except Exception:
print("".join(traceback.format_exc()))
self.results[name] = "".join(traceback.format_exc())
def setup_cache(self):
if not self.enabled:
return
# create the logfile to start with
with open(self.dump_fn, 'w') as f:
json.dump({}, f, indent=2)
|
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os.path
from robot.errors import DataError
from robot.utils import (get_error_details, is_string, is_list_like,
is_dict_like, split_args_from_name_or_path,
type_name, Importer)
from .loggerhelper import AbstractLoggerProxy
from .logger import LOGGER
class _RecursionAvoidingMetaclass(type):
"""Metaclass to wrap listener methods so that they cannot cause recursion.
Recursion would otherwise happen if one listener logs something and that
message is received and logged again by log_message or message method.
"""
def __new__(cls, name, bases, dct):
for attr, value in dct.items():
if not attr.startswith('_') and inspect.isroutine(value):
dct[attr] = cls._wrap_listener_method(value)
dct['_calling_method'] = False
return type.__new__(cls, name, bases, dct)
@staticmethod
def _wrap_listener_method(method):
def wrapped(self, *args):
if not self._calling_method:
self._calling_method = True
method(self, *args)
self._calling_method = False
return wrapped
class Listeners(object):
__metaclass__ = _RecursionAvoidingMetaclass
_start_attrs = ('id', 'doc', 'starttime', 'longname')
_end_attrs = _start_attrs + ('endtime', 'elapsedtime', 'status', 'message')
_kw_extra_attrs = ('args', 'assign', 'kwname', 'libname',
'-id', '-longname', '-message')
def __init__(self, listeners):
self._listeners = self._import_listeners(listeners)
self._running_test = False
self._setup_or_teardown_type = None
def __nonzero__(self):
return bool(self._listeners)
def _import_listeners(self, listener_data):
listeners = []
for listener in listener_data:
try:
listeners.append(ListenerProxy(listener))
except DataError as err:
if not is_string(listener):
listener = type_name(listener)
LOGGER.error("Taking listener '%s' into use failed: %s"
% (listener, unicode(err)))
return listeners
def start_suite(self, suite):
for listener in self._listeners:
if listener.version == 1:
listener.call_method(listener.start_suite, suite.name, suite.doc)
else:
attrs = self._get_start_attrs(suite, 'metadata')
attrs.update(self._get_suite_attrs(suite))
listener.call_method(listener.start_suite, suite.name, attrs)
def _get_suite_attrs(self, suite):
return {
'tests' : [t.name for t in suite.tests],
'suites': [s.name for s in suite.suites],
'totaltests': suite.test_count,
'source': suite.source or ''
}
def end_suite(self, suite):
for listener in self._listeners:
self._notify_end_suite(listener, suite)
def _notify_end_suite(self, listener, suite):
if listener.version == 1:
listener.call_method(listener.end_suite, suite.status,
suite.full_message)
else:
attrs = self._get_end_attrs(suite, 'metadata')
attrs['statistics'] = suite.stat_message
attrs.update(self._get_suite_attrs(suite))
listener.call_method(listener.end_suite, suite.name, attrs)
def start_test(self, test):
self._running_test = True
for listener in self._listeners:
if listener.version == 1:
listener.call_method(listener.start_test, test.name, test.doc,
list(test.tags))
else:
attrs = self._get_start_attrs(test, 'tags')
attrs['critical'] = 'yes' if test.critical else 'no'
attrs['template'] = test.template or ''
listener.call_method(listener.start_test, test.name, attrs)
def end_test(self, test):
self._running_test = False
for listener in self._listeners:
self._notify_end_test(listener, test)
def _notify_end_test(self, listener, test):
if listener.version == 1:
listener.call_method(listener.end_test, test.status, test.message)
else:
attrs = self._get_end_attrs(test, 'tags')
attrs['critical'] = 'yes' if test.critical else 'no'
attrs['template'] = test.template or ''
listener.call_method(listener.end_test, test.name, attrs)
def start_keyword(self, kw):
for listener in self._listeners:
if listener.version == 1:
listener.call_method(listener.start_keyword, kw.name, kw.args)
else:
attrs = self._get_start_attrs(kw, *self._kw_extra_attrs)
attrs['type'] = self._get_keyword_type(kw, start=True)
listener.call_method(listener.start_keyword, kw.name, attrs)
def end_keyword(self, kw):
for listener in self._listeners:
if listener.version == 1:
listener.call_method(listener.end_keyword, kw.status)
else:
attrs = self._get_end_attrs(kw, *self._kw_extra_attrs)
attrs['type'] = self._get_keyword_type(kw, start=False)
listener.call_method(listener.end_keyword, kw.name, attrs)
def _get_keyword_type(self, kw, start=True):
# When running setup or teardown, only the top level keyword has type
# set to setup/teardown but we want to pass that type also to all
# start/end_keyword listener methods called below that keyword.
if kw.type == 'kw':
return self._setup_or_teardown_type or 'Keyword'
kw_type = self._get_setup_or_teardown_type(kw)
self._setup_or_teardown_type = kw_type if start else None
return kw_type
def _get_setup_or_teardown_type(self, kw):
return '%s %s' % (('Test' if self._running_test else 'Suite'),
kw.type.title())
def imported(self, import_type, name, attrs):
for listener in self._listeners:
method = getattr(listener, '%s_import' % import_type.lower())
listener.call_method(method, name, attrs)
def log_message(self, msg):
for listener in self._listeners:
if listener.version == 2:
listener.call_method(listener.log_message, self._create_msg_dict(msg))
def message(self, msg):
for listener in self._listeners:
if listener.version == 2:
listener.call_method(listener.message, self._create_msg_dict(msg))
def _create_msg_dict(self, msg):
return {'timestamp': msg.timestamp, 'message': msg.message,
'level': msg.level, 'html': 'yes' if msg.html else 'no'}
def output_file(self, file_type, path):
for listener in self._listeners:
method = getattr(listener, '%s_file' % file_type.lower())
listener.call_method(method, path)
def close(self):
for listener in self._listeners:
listener.call_method(listener.close)
def _get_start_attrs(self, item, *extra):
return self._get_attrs(item, self._start_attrs, extra)
def _get_end_attrs(self, item, *extra):
return self._get_attrs(item, self._end_attrs, extra)
def _get_attrs(self, item, default, extra):
names = self._get_attr_names(default, extra)
return dict((n, self._get_attr_value(item, n)) for n in names)
def _get_attr_names(self, default, extra):
names = list(default)
for name in extra:
if not name.startswith('-'):
names.append(name)
elif name[1:] in names:
names.remove(name[1:])
return names
def _get_attr_value(self, item, name):
value = getattr(item, name)
return self._take_copy_of_mutable_value(value)
def _take_copy_of_mutable_value(self, value):
if is_dict_like(value):
return dict(value)
if is_list_like(value):
return list(value)
return value
class ListenerProxy(AbstractLoggerProxy):
_methods = ['start_suite', 'end_suite', 'start_test', 'end_test',
'start_keyword', 'end_keyword', 'log_message', 'message',
'output_file', 'report_file', 'log_file', 'debug_file',
'xunit_file', 'close', 'library_import', 'resource_import',
'variables_import']
def __init__(self, listener):
if is_string(listener):
name, args = split_args_from_name_or_path(listener)
listener = self._import_listener(name, args)
else:
name = type_name(listener)
AbstractLoggerProxy.__init__(self, listener)
self.name = name
self.version = self._get_version(listener)
if self.version == 1:
LOGGER.warn("Listener '%s' uses deprecated API version 1. "
"Switch to API version 2 instead." % self.name)
def _import_listener(self, name, args):
importer = Importer('listener')
return importer.import_class_or_module(os.path.normpath(name),
instantiate_with_args=args)
def _get_version(self, listener):
try:
return int(getattr(listener, 'ROBOT_LISTENER_API_VERSION', 1))
except ValueError:
return 1
def call_method(self, method, *args):
try:
method(*args)
except:
message, details = get_error_details()
LOGGER.error("Calling listener method '%s' of listener '%s' "
"failed: %s" % (method.__name__, self.name, message))
LOGGER.info("Details:\n%s" % details)
|
|
from functools import wraps
from threepio import logger
from django.utils import timezone
from rest_framework import exceptions, status
from rest_framework.decorators import detail_route
from rest_framework.exceptions import ValidationError
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet
from core import exceptions as core_exceptions
from core.models import IdentityMembership, CloudAdministrator
from core.models.status_type import StatusType
from api.permissions import (
ApiAuthOptional, ApiAuthRequired, EnabledUserRequired,
InMaintenance, CloudAdminRequired
)
from api.v2.views.mixins import MultipleFieldLookup
def unresolved_requests_only(fn):
"""
Only allow an unresolved request to be processed.
"""
@wraps(fn)
def wrapper(self, request, *args, **kwargs):
instance = self.get_object()
staff_can_act = request.user.is_staff or request.user.is_superuser or CloudAdministrator.objects.filter(user=request.user.id).exists()
user_can_act = request.user == getattr(instance, 'created_by')
if not (user_can_act or staff_can_act):
message = (
"Method '%s' not allowed: "
"Only staff members and the owner are authorized to make this request."
% self.request.method
)
raise exceptions.NotAuthenticated(detail=message)
if not staff_can_act and (hasattr(instance, "is_closed") and instance.is_closed()):
message = (
"Method '%s' not allowed: "
"the request has already been resolved "
"and cannot be modified by a non-staff user."
% self.request.method
)
raise exceptions.MethodNotAllowed(self.request.method,
detail=message)
else:
return fn(self, request, *args, **kwargs)
return wrapper
class AuthViewSet(ModelViewSet):
http_method_names = ['get', 'put', 'patch', 'post',
'delete', 'head', 'options', 'trace']
permission_classes = (InMaintenance,
EnabledUserRequired,
ApiAuthRequired,)
class AdminAuthViewSet(AuthViewSet):
permission_classes = (InMaintenance,
CloudAdminRequired,
EnabledUserRequired,
ApiAuthRequired,)
class AuthOptionalViewSet(ModelViewSet):
permission_classes = (InMaintenance,
ApiAuthOptional,)
class AuthReadOnlyViewSet(ReadOnlyModelViewSet):
permission_classes = (InMaintenance,
ApiAuthOptional,)
class OwnerUpdateViewSet(AuthViewSet):
"""
Base class ViewSet to handle the case where a normal user should see 'GET'
and an owner (or admin) should be allowed to PUT or PATCH
"""
http_method_names = ['get', 'put', 'patch', 'post',
'delete', 'head', 'options', 'trace']
@property
def allowed_methods(self):
raise Exception("The @property-method 'allowed_methods' should be"
" handled by the subclass of OwnerUpdateViewSet")
class BaseRequestViewSet(MultipleFieldLookup, AuthViewSet):
"""
Base class ViewSet to handle requests
"""
admin_serializer_class = None
model = None
lookup_fields = ("id", "uuid")
def get_queryset(self):
"""
Return users requests or all the requests if the user is an admin.
"""
assert self.model is not None, (
"%s should include a `model` attribute."
% self.__class__.__name__
)
if self.request.user.is_staff:
return self.model.objects.all().order_by('-start_date')
return self.model.objects.filter(created_by=self.request.user).order_by('-start_date')
def get_serializer_class(self):
"""
Return the `serializer_class` or `admin_serializer_class`
given the users privileges.
"""
assert self.admin_serializer_class is not None, (
"%s should include an `admin_serializer_class` attribute."
% self.__class__.__name__
)
http_method = self.request._request.method
if http_method != 'POST' and self.request.user.is_staff:
return self.admin_serializer_class
return self.serializer_class
def perform_create(self, serializer):
# NOTE: An identity could possible have multiple memberships
# It may be better to directly take membership rather than an identity
identity_id = serializer.initial_data.get("identity")
status, _ = StatusType.objects.get_or_create(name="pending")
try:
# NOTE: This is *NOT* going to be a sufficient query when sharing..
membership = IdentityMembership.objects.get(identity=identity_id)
instance = serializer.save(
membership=membership,
status=status,
created_by=self.request.user
)
if serializer.initial_data.get("admin_url"):
admin_url = serializer.initial_data.get("admin_url") + str(instance.id)
self.submit_action(instance, options={"admin_url": admin_url})
else:
self.submit_action(instance)
except (core_exceptions.ProviderLimitExceeded, # NOTE: DEPRECATED -- REMOVE SOON, USE BELOW.
core_exceptions.RequestLimitExceeded):
message = "Only one active request is allowed per provider."
raise exceptions.MethodNotAllowed('create', detail=message)
except core_exceptions.InvalidMembership:
message = (
"The user '%s' is not a valid member."
% self.request.user.username
)
raise exceptions.ParseError(detail=message)
except IdentityMembership.DoesNotExist:
message = (
"The identity '%s' does not have a membership"
% identity_id
)
raise exceptions.ParseError(detail=message)
except Exception as e:
message = str(e)
raise exceptions.ParseError(detail=message)
@unresolved_requests_only
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
try:
self.perform_destroy(instance)
return Response(status=status.HTTP_204_NO_CONTENT)
except Exception as e:
message = {
"An error was encoutered when closing the request: %s" % e.message
}
logger.exception(e)
raise exceptions.ParseError(detail=message)
@detail_route()
def approve(self, *args, **kwargs):
"""
See the deny docs
"""
request_obj = self.get_object()
SerializerCls = self.get_serializer_class()
serializer = SerializerCls(
request_obj, context={'request': self.request})
if not request_obj:
raise ValidationError(
"Request unknown. "
"Could not approve request."
)
if not serializer.is_valid():
raise ValidationError(
"Serializer could not be validated: %s"
"Could not approve request."
% (serializer.errors,)
)
approve_status = StatusType.objects.get(name='approved')
request_obj = serializer.save(status=approve_status)
self.approve_action(request_obj)
return Response(serializer.data)
@detail_route()
def deny(self, *args, **kwargs):
"""
#FIXME: Both of these actions do something similar, they also 'create and abuse' serializers. Is there a better way to handle this? Lets lok into how `create` vs `perform_create` is called in a DRF 'normal' view.
"""
request_obj = self.get_object()
SerializerCls = self.get_serializer_class()
if not request_obj:
raise ValidationError(
"Request unknown. "
"Could not deny request."
)
# Mocking a validation of data...
serializer = SerializerCls(
request_obj, data={}, partial=True,
context={'request': self.request})
if not serializer.is_valid():
raise ValidationError(
"Serializer could not be validated: %s"
"Could not deny request."
% (serializer.errors,)
)
deny_status = StatusType.objects.get(name='denied')
request_obj = serializer.save(status=deny_status)
self.deny_action(request_obj)
return Response(serializer.data)
def perform_destroy(self, instance):
"""
Add an end date to a request and take no further action
"""
status, _ = StatusType.objects.get_or_create(name="closed")
instance.status = status
instance.end_date = timezone.now()
instance.save()
def perform_update(self, serializer):
"""
Updates the request and performs any update actions.
"""
# NOTE: An identity could possible have multiple memberships
# It may be better to directly take membership rather than an identity
identity = serializer.initial_data.get('identity', {})
membership = None
if isinstance(identity, dict):
identity_id = identity.get("id", None)
else:
identity_id = identity
try:
if identity_id is not None:
membership = IdentityMembership.objects.get(
identity=identity_id)
if membership:
instance = serializer.save(end_date=timezone.now(),
membership=membership)
else:
if self.request.method == "PATCH":
instance = serializer.save(status=StatusType.objects.get(id=serializer.initial_data['status']))
else:
instance = serializer.save()
if instance.is_approved():
self.approve_action(instance)
if instance.is_closed():
self.close_action(instance)
if instance.is_denied():
self.deny_action(instance)
except (core_exceptions.ProviderLimitExceeded, # NOTE: DEPRECATED -- REMOVE SOON, USE BELOW.
core_exceptions.RequestLimitExceeded):
message = "Only one active request is allowed per provider."
raise exceptions.MethodNotAllowed('create', detail=message)
except core_exceptions.InvalidMembership:
message = (
"The user '%s' is not a valid member."
% self.request.user.username
)
raise exceptions.ParseError(detail=message)
except IdentityMembership.DoesNotExist:
message = (
"The identity '%s' does not have a membership"
% identity_id
)
raise exceptions.ParseError(detail=message)
except Exception as e:
message = {
"An error was encoutered when updating the request: %s" % e.message
}
logger.exception(e)
raise exceptions.ParseError(detail=message)
@unresolved_requests_only
def update(self, request, *args, **kwargs):
"""
Update the request for the specific identifier
"""
return super(BaseRequestViewSet, self).update(request, *args, **kwargs)
def approve_action(self, instance):
"""
Perform the approved action for the request
"""
def deny_action(self, instance):
"""
Perform the denied action for the request
"""
def submit_action(self, instance):
"""
Perform the submit action for a new request
"""
|
|
# @(#)root/pyroot:$Id$
# Author: Wim Lavrijsen ([email protected])
# Created: 02/20/03
# Last: 11/17/14
"""PyROOT user module.
o) install lazy ROOT class/variable lookup as appropriate
o) feed gSystem and gInterpreter for display updates
o) add readline completion (if supported by python build)
o) enable some ROOT/CINT style commands
o) handle a few special cases such as gPad, STL, etc.
o) execute rootlogon.py/.C scripts
"""
__version__ = '6.2.0'
__author__ = 'Wim Lavrijsen ([email protected])'
### system and interpreter setup ------------------------------------------------
import os, sys, types
import collections
## there's no version_info in 1.5.2
if sys.version[0:3] < '2.2':
raise ImportError( 'Python Version 2.2 or above is required.' )
## 2.2 has 10 instructions as default, > 2.3 has 100 ... make same
if sys.version[0:3] == '2.2':
sys.setcheckinterval( 100 )
## readline support, if available
try:
import rlcompleter, readline
class RootNameCompleter( rlcompleter.Completer ):
def file_matches( self, text ):
matches = []
path, name = os.path.split( text )
try:
for fn in os.listdir( path or os.curdir ):
if fn[:len(name)] == name:
full = os.path.join( path, fn )
matches.append( full )
if os.path.isdir( full ):
matches += [os.path.join( full, x ) for x in os.listdir( full )]
except OSError:
pass
return matches
def root_global_matches( self, text, prefix = '' ):
gClassTable = _root.GetRootGlobal( 'gClassTable' )
all = [ gClassTable.At(i) for i in range(gClassTable.Classes()) ]
all += [ g.GetName() for g in _root.gROOT.GetListOfGlobals() ]
matches = [x for x in all if x[:len(text)] == text]
return [prefix + x for x in matches]
def global_matches( self, text ):
matches = rlcompleter.Completer.global_matches( self, text )
if not matches: matches = []
matches += self.file_matches( text )
return matches
def attr_matches( self, text ):
matches = rlcompleter.Completer.attr_matches( self, text )
if not matches: matches = []
b = text.find('.')
try:
if 0 <= b and self.namespace[text[:b]].__name__ == 'ROOT':
matches += self.root_global_matches( text[b+1:], text[:b+1] )
except AttributeError: # not all objects have a __name__
pass
return matches
readline.set_completer( RootNameCompleter().complete )
readline.set_completer_delims(
readline.get_completer_delims().replace( os.sep , '' ) )
readline.parse_and_bind( 'tab: complete' )
readline.parse_and_bind( 'set show-all-if-ambiguous On' )
except:
# module readline typically doesn't exist on non-Unix platforms
pass
## special filter on MacOS X (warnings caused by linking that is still required)
if sys.platform == 'darwin':
import warnings
warnings.filterwarnings( action='ignore', category=RuntimeWarning, module='ROOT',\
message='class \S* already in TClassTable$' )
### load PyROOT C++ extension module, special case for linux and Sun ------------
needsGlobal = ( 0 <= sys.platform.find( 'linux' ) ) or\
( 0 <= sys.platform.find( 'sunos' ) )
if needsGlobal:
# change dl flags to load dictionaries from pre-linked .so's
dlflags = sys.getdlopenflags()
sys.setdlopenflags( 0x100 | 0x2 ) # RTLD_GLOBAL | RTLD_NOW
import libPyROOT as _root
# reset dl flags if needed
if needsGlobal:
sys.setdlopenflags( dlflags )
del needsGlobal
## convince 2.2 it's ok to use the expand function
if sys.version[0:3] == '2.2':
import copyreg
copyreg.constructor( _root._ObjectProxy__expand__ )
## convince inspect that PyROOT method proxies are possible drop-ins for python
## methods and classes for pydoc
import inspect
inspect._old_isfunction = inspect.isfunction
def isfunction( object ):
if type(object) == _root.MethodProxy and not object.__self__.__class__:
return True
return inspect._old_isfunction( object )
inspect.isfunction = isfunction
inspect._old_ismethod = inspect.ismethod
def ismethod( object ):
if type(object) == _root.MethodProxy:
return True
return inspect._old_ismethod( object )
inspect.ismethod = ismethod
del isfunction, ismethod
### configuration ---------------------------------------------------------------
class _Configuration( object ):
__slots__ = [ 'IgnoreCommandLineOptions', 'StartGuiThread', '_gts' ]
def __init__( self ):
self.IgnoreCommandLineOptions = 0
self.StartGuiThread = 1
self._gts = []
def __setGTS( self, value ):
for c in value:
if not isinstance( c, collections.Callable):
raise ValueError( '"%s" is not callable' % str(c) );
self._gts = value
def __getGTS( self ):
return self._gts
GUIThreadScheduleOnce = property( __getGTS, __setGTS )
PyConfig = _Configuration()
del _Configuration
### choose interactive-favored policies -----------------------------------------
_root.SetMemoryPolicy( _root.kMemoryHeuristics )
_root.SetSignalPolicy( _root.kSignalSafe )
### data ________________________________________________________________________
__pseudo__all__ = [ 'gROOT', 'gSystem', 'gInterpreter',
'AddressOf', 'MakeNullPointer', 'Template', 'std' ]
__all__ = [] # purposedly empty
_orig_ehook = sys.excepthook
## for setting memory and speed policies; not exported
_memPolicyAPI = [ 'SetMemoryPolicy', 'SetOwnership', 'kMemoryHeuristics', 'kMemoryStrict' ]
_sigPolicyAPI = [ 'SetSignalPolicy', 'kSignalFast', 'kSignalSafe' ]
### helpers ---------------------------------------------------------------------
def split( str ):
npos = str.find( ' ' )
if 0 <= npos:
return str[:npos], str[npos+1:]
else:
return str, ''
### template support ------------------------------------------------------------
class Template:
def __init__( self, name ):
self.__name__ = name
def __call__( self, *args ):
newargs = [ self.__name__[ 0 <= self.__name__.find( 'std::' ) and 5 or 0:] ]
for arg in args:
if type(arg) == str:
arg = ','.join( [x.strip() for x in arg.split(',')] )
newargs.append( arg )
result = _root.MakeRootTemplateClass( *newargs )
# special case pythonization (builtin_map is not available from the C-API)
if hasattr( result, 'push_back' ):
def iadd( self, ll ):
[ self.push_back(x) for x in ll ]
return self
result.__iadd__ = iadd
return result
_root.Template = Template
### scope place holder for STL classes ------------------------------------------
class _stdmeta( type ):
def __getattr__( cls, attr ): # for non-templated classes in std
klass = _root.MakeRootClass( attr, cls )
setattr( cls, attr, klass )
return klass
class std( object, metaclass=_stdmeta ):
stlclasses = ( 'complex', 'pair', \
'deque', 'list', 'queue', 'stack', 'vector', 'map', 'multimap', 'set', 'multiset' )
for name in stlclasses:
locals()[ name ] = Template( "std::%s" % name )
_root.std = std
sys.modules['ROOT.std'] = std
### special cases for gPad, gVirtualX (are C++ macro's) -------------------------
class _ExpandMacroFunction( object ):
def __init__( self, klass, func ):
c = _root.MakeRootClass( klass )
self.func = getattr( c, func )
def __getattr__( self, what ):
return getattr( self.__dict__[ 'func' ](), what )
def __cmp__( self, other ):
return cmp( self.func(), other )
def __len__( self ):
if self.func():
return 1
return 0
def __repr__( self ):
return repr( self.func() )
def __str__( self ):
return str( self.func() )
_root.gPad = _ExpandMacroFunction( "TVirtualPad", "Pad" )
_root.gVirtualX = _ExpandMacroFunction( "TVirtualX", "Instance" )
_root.gDirectory = _ExpandMacroFunction( "TDirectory", "CurrentDirectory" )
_root.gFile = _ExpandMacroFunction( "TFile", "CurrentFile" )
_root.gInterpreter = _ExpandMacroFunction( "TInterpreter", "Instance" )
### special case pythonization --------------------------------------------------
def _TTree__iter__( self ):
i = 0
bytes_read = self.GetEntry(i)
while 0 < bytes_read:
yield self # TODO: not sure how to do this w/ C-API ...
i += 1
bytes_read = self.GetEntry(i)
if bytes_read == -1:
raise RuntimeError( "TTree I/O error" )
_root.MakeRootClass( "TTree" ).__iter__ = _TTree__iter__
### RINT command emulation ------------------------------------------------------
def _excepthook( exctype, value, traceb ):
# catch syntax errors only (they contain the full line)
if isinstance( value, SyntaxError ) and value.text:
cmd, arg = split( value.text[:-1] )
# mimic ROOT/CINT commands
if cmd == '.q':
sys.exit( 0 )
elif cmd == '.?' or cmd == '.help':
sys.stdout.write( """PyROOT emulation of CINT commands.
All emulated commands must be preceded by a . (dot).
===========================================================================
Help: ? : this help
help : this help
Shell: ![shell] : execute shell command
Evaluation: x [file] : load [file] and evaluate {statements} in the file
Load/Unload: L [lib] : load [lib]
Quit: q : quit python session
The standard python help system is available through a call to 'help()' or
'help(<id>)' where <id> is an identifier, e.g. a class or function such as
TPad or TPad.cd, etc.
""" )
return
elif cmd == '.!' and arg:
return os.system( arg )
elif cmd == '.x' and arg:
import __main__
fn = os.path.expanduser( os.path.expandvars( arg ) )
exec(compile(open( fn ).read(), fn, 'exec'), __main__.__dict__, __main__.__dict__)
return
elif cmd == '.L':
return _root.gSystem.Load( arg )
elif cmd == '.cd' and arg:
os.chdir( arg )
return
elif cmd == '.ls':
return sys.modules[ __name__ ].gDirectory.ls()
elif cmd == '.pwd':
return sys.modules[ __name__ ].gDirectory.pwd()
elif isinstance( value, SyntaxError ) and \
value.msg == "can't assign to function call":
sys.stdout.write( """Are you trying to assign a value to a reference return, for example to the
result of a call to "double& SMatrix<>::operator()(int,int)"? If so, then
please use operator[] instead, as in e.g. "mymatrix[i][j] = somevalue".
""" )
# normal exception processing
_orig_ehook( exctype, value, traceb )
if not '__IPYTHON__' in __builtins__:
# IPython has its own ways of executing shell commands etc.
sys.excepthook = _excepthook
### call EndOfLineAction after each interactive command (to update display etc.)
_orig_dhook = sys.displayhook
def _displayhook( v ):
_root.gInterpreter.EndOfLineAction()
return _orig_dhook( v )
### set import hook to be able to trigger auto-loading as appropriate
try:
import builtins
except ImportError:
import builtins as __builtin__ # name change in p3
_orig_ihook = builtins.__import__
def _importhook( name, glbls = {}, lcls = {}, fromlist = [], level = -1 ):
if name[0:5] == 'ROOT.':
try:
sys.modules[ name ] = getattr( sys.modules[ 'ROOT' ], name[5:] )
except Exception:
pass
if 5 <= sys.version_info[1]: # minor
return _orig_ihook( name, glbls, lcls, fromlist, level )
return _orig_ihook( name, glbls, lcls, fromlist )
# builtins.__import__ = _importhook
### helper to prevent GUIs from starving
def _processRootEvents( controller ):
import time
gSystemProcessEvents = _root.gSystem.ProcessEvents
if sys.platform == 'win32':
import _thread
_root.gROOT.ProcessLineSync('((TGWin32 *)gVirtualX)->SetUserThreadId(%ld)' % (_thread.get_ident()))
while controller.keeppolling:
try:
gSystemProcessEvents()
if PyConfig.GUIThreadScheduleOnce:
for guicall in PyConfig.GUIThreadScheduleOnce:
guicall()
PyConfig.GUIThreadScheduleOnce = []
time.sleep( 0.01 )
except: # in case gSystem gets destroyed early on exit
pass
### allow loading ROOT classes as attributes ------------------------------------
class ModuleFacade( types.ModuleType ):
def __init__( self, module ):
types.ModuleType.__init__( self, 'ROOT' )
self.__dict__[ 'module' ] = module
self.__dict__[ '__doc__' ] = self.module.__doc__
self.__dict__[ '__name__' ] = self.module.__name__
self.__dict__[ '__file__' ] = self.module.__file__
self.__dict__[ 'keeppolling' ] = 0
self.__dict__[ 'PyConfig' ] = self.module.PyConfig
class gROOTWrapper( object ):
def __init__( self, gROOT, master ):
self.__dict__[ '_master' ] = master
self.__dict__[ '_gROOT' ] = gROOT
def __getattr__( self, name ):
if name != 'SetBatch' and self._master.__dict__[ 'gROOT' ] != self._gROOT:
self._master._ModuleFacade__finalSetup()
del self._master.__class__._ModuleFacade__finalSetup
return getattr( self._gROOT, name )
def __setattr__( self, name, value ):
return setattr( self._gROOT, name, value )
self.__dict__[ 'gROOT' ] = gROOTWrapper( _root.gROOT, self )
del gROOTWrapper
# begin with startup gettattr/setattr
self.__class__.__getattr__ = self.__class__.__getattr1
del self.__class__.__getattr1
self.__class__.__setattr__ = self.__class__.__setattr1
del self.__class__.__setattr1
def __setattr1( self, name, value ): # "start-up" setattr
# create application, thread etc.
self.__finalSetup()
del self.__class__.__finalSetup
# let "running" setattr handle setting
return setattr( self, name, value )
def __setattr2( self, name, value ): # "running" getattr
# to allow assignments to ROOT globals such as ROOT.gDebug
if not name in self.__dict__:
try:
# assignment to an existing ROOT global (establishes proxy)
setattr( self.__class__, name, _root.GetRootGlobal( name ) )
except LookupError:
# allow a few limited cases where new globals can be set
if sys.hexversion >= 0x3000000:
pylong = int
else:
pylong = int
tcnv = { bool : 'bool %s = %d;',
int : 'int %s = %d;',
pylong : 'long %s = %d;',
float : 'double %s = %f;',
str : 'string %s = "%s";' }
try:
_root.gROOT.ProcessLine( tcnv[ type(value) ] % (name,value) );
setattr( self.__class__, name, _root.GetRootGlobal( name ) )
except KeyError:
pass # can still assign normally, to the module
# actual assignment through descriptor, or normal python way
return super( self.__class__, self ).__setattr__( name, value )
def __getattr1( self, name ): # "start-up" getattr
# special case, to allow "from ROOT import gROOT" w/o sending GUI events
if name == '__path__':
raise AttributeError( name )
# create application, thread etc.
self.__finalSetup()
del self.__class__.__finalSetup
# let "running" getattr handle lookup
return getattr( self, name )
def __getattr2( self, name ): # "running" getattr
# handle "from ROOT import *" ... can be called multiple times
if name == '__all__':
if '__IPYTHON__' in __builtins__:
import warnings
warnings.warn( '"from ROOT import *" is not supported under IPython' )
# continue anyway, just in case it works ...
caller = sys.modules[ sys._getframe( 1 ).f_globals[ '__name__' ] ]
# we may be calling in from __getattr1, verify and if so, go one frame up
if caller == self:
caller = sys.modules[ sys._getframe( 2 ).f_globals[ '__name__' ] ]
# setup the pre-defined globals
for name in self.module.__pseudo__all__:
caller.__dict__[ name ] = getattr( _root, name )
# install the hook
_root.SetRootLazyLookup( caller.__dict__ )
# return empty list, to prevent further copying
return self.module.__all__
# lookup into ROOT (which may cause python-side enum/class/global creation)
attr = _root.LookupRootEntity( name )
# the call above will raise AttributeError as necessary; so if we get here,
# attr is valid: cache as appropriate, so we don't come back
if type(attr) == _root.PropertyProxy:
setattr( self.__class__, name, attr ) # descriptor
return getattr( self, name )
else:
self.__dict__[ name ] = attr # normal member
return attr
# reaching this point means failure ...
raise AttributeError( name )
def __delattr__( self, name ):
# this is for convenience, as typically lookup results are kept at two places
try:
delattr( self.module._root, name )
except AttributeError:
pass
return super( self.__class__, self ).__delattr__( name )
def __finalSetup( self ):
# prevent this method from being re-entered through the gROOT wrapper
self.__dict__[ 'gROOT' ] = _root.gROOT
# switch to running gettattr/setattr
self.__class__.__getattr__ = self.__class__.__getattr2
del self.__class__.__getattr2
self.__class__.__setattr__ = self.__class__.__setattr2
del self.__class__.__setattr2
# normally, you'll want a ROOT application; don't init any further if
# one pre-exists from some C++ code somewhere
hasargv = hasattr( sys, 'argv' )
if hasargv and PyConfig.IgnoreCommandLineOptions:
argv = sys.argv
sys.argv = []
appc = _root.MakeRootClass( 'PyROOT::TPyROOTApplication' )
if appc.CreatePyROOTApplication():
appc.InitROOTGlobals()
appc.InitCINTMessageCallback();
appc.InitROOTMessageCallback();
if hasargv and PyConfig.IgnoreCommandLineOptions:
sys.argv = argv
# now add 'string' to std so as to not confuse with module string
std.string = _root.MakeRootClass( 'string' )
# must be called after gApplication creation:
if '__IPYTHON__' in __builtins__:
# IPython's FakeModule hack otherwise prevents usage of python from CINT
_root.gROOT.ProcessLine( 'TPython::Exec( "" );' )
sys.modules[ '__main__' ].__builtins__ = __builtins__
# custom logon file (must be after creation of ROOT globals)
if hasargv and not '-n' in sys.argv:
rootlogon = os.path.expanduser( '~/.rootlogon.py' )
if os.path.exists( rootlogon ):
# could also have used execfile, but import is likely to give fewer surprises
import imp
imp.load_module( 'rootlogon', open( rootlogon, 'r' ), rootlogon, ('.py','r',1) )
del imp
else: # if the .py version of rootlogon exists, the .C is ignored (the user can
# load the .C from the .py, if so desired)
# system logon, user logon, and local logon (skip Rint.Logon)
name = '.rootlogon.C'
logons = [ os.path.join( str(self.gRootDir), 'etc', 'system' + name ),
os.path.expanduser( os.path.join( '~', name ) ) ]
if logons[-1] != os.path.join( os.getcwd(), name ):
logons.append( name )
for rootlogon in logons:
if os.path.exists( rootlogon ):
appc.ExecuteFile( rootlogon )
del rootlogon, logons
# use either the input hook or thread to send events to GUIs
if self.PyConfig.StartGuiThread and \
not ( self.keeppolling or _root.gROOT.IsBatch() ):
if self.PyConfig.StartGuiThread == 'inputhook' or\
_root.gSystem.InheritsFrom( 'TMacOSXSystem' ):
# new, PyOS_InputHook based mechanism
if PyConfig.GUIThreadScheduleOnce:
for guicall in PyConfig.GUIThreadScheduleOnce:
guicall()
PyConfig.GUIThreadScheduleOnce = []
_root.InstallGUIEventInputHook()
else:
# original, threading based approach
import threading
self.__dict__[ 'keeppolling' ] = 1
self.__dict__[ 'PyGUIThread' ] = \
threading.Thread( None, _processRootEvents, None, ( self, ) )
self.PyGUIThread.setDaemon( 1 )
self.PyGUIThread.start()
if threading.currentThread() != self.PyGUIThread:
while self.PyConfig.GUIThreadScheduleOnce:
self.PyGUIThread.join( 0.1 )
# store already available ROOT objects to prevent spurious lookups
for name in self.module.__pseudo__all__ + _memPolicyAPI + _sigPolicyAPI:
self.__dict__[ name ] = getattr( _root, name )
for name in std.stlclasses:
setattr( _root, name, getattr( std, name ) )
# set the display hook
sys.displayhook = _displayhook
sys.modules[ __name__ ] = ModuleFacade( sys.modules[ __name__ ] )
del ModuleFacade
### b/c of circular references, the facade needs explicit cleanup ---------------
import atexit
def cleanup():
return
# restore hooks
import sys
sys.displayhook = sys.__displayhook__
if not '__IPYTHON__' in __builtins__:
sys.excepthook = sys.__excepthook__
builtins.__import__ = _orig_ihook
facade = sys.modules[ __name__ ]
# reset gRootModule on the C++ side to prevent fruther lookups
_root._ResetRootModule()
# shutdown GUI thread, as appropriate (always save to call)
_root.RemoveGUIEventInputHook()
# prevent further spurious lookups into ROOT libraries
del facade.__class__.__getattr__
del facade.__class__.__setattr__
# shutdown GUI thread, as appropriate
if hasattr( facade, 'PyGUIThread' ):
facade.keeppolling = 0
# if not shutdown from GUI (often the case), wait for it
import threading
if threading.currentThread() != facade.PyGUIThread:
facade.PyGUIThread.join( 3. ) # arbitrary
del threading
# remove otherwise (potentially) circular references
import types
items = list(facade.module.__dict__.items())
for k, v in items:
if type(v) == types.ModuleType:
facade.module.__dict__[ k ] = None
del v, k, items, types
# destroy facade
facade.__dict__.clear()
del facade
# run part the gROOT shutdown sequence ... running it here ensures that
# it is done before any ROOT libraries are off-loaded, with unspecified
# order of static object destruction;
gROOT = sys.modules[ 'libPyROOT' ].gROOT
gROOT.EndOfProcessCleanups(True)
del gROOT
# cleanup cached python strings
sys.modules[ 'libPyROOT' ]._DestroyPyStrings()
# destroy ROOT extension module and ROOT module
del sys.modules[ 'libPyROOT' ]
del sys.modules[ 'ROOT' ]
atexit.register( cleanup )
del cleanup, atexit
|
|
# -*- coding: utf-8 -*-
"""
tmdbsimple.people
~~~~~~~~~~~~~~~~~
This module implements the People, Credits, and Jobs functionality
of tmdbsimple.
Created by Celia Oakley on 2013-10-31.
:copyright: (c) 2013-2017 by Celia Oakley
:license: GPLv3, see LICENSE for more details
"""
from .base import TMDB
class People(TMDB):
"""
People functionality.
See: http://docs.themoviedb.apiary.io/#people
"""
BASE_PATH = 'person'
URLS = {
'info': '/{id}',
'movie_credits': '/{id}/movie_credits',
'tv_credits': '/{id}/tv_credits',
'combined_credits': '/{id}/combined_credits',
'external_ids': '/{id}/external_ids',
'images': '/{id}/images',
'changes': '/{id}/changes',
'popular': '/popular',
'latest': '/latest',
}
def __init__(self, id=0):
super(People, self).__init__()
self.id = id
def info(self, **kwargs):
"""
Get the general person information for a specific id.
Args:
append_to_response: (optional) Comma separated, any person method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def movie_credits(self, **kwargs):
"""
Get the movie credits for a specific person id.
Args:
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any person method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('movie_credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def tv_credits(self, **kwargs):
"""
Get the TV credits for a specific person id.
Args:
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any person method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('tv_credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def combined_credits(self, **kwargs):
"""
Get the combined (movie and TV) credits for a specific person id.
To get the expanded details for each TV record, call the /credit method
with the provided credit_id. This will provide details about which
episode and/or season the credit is for.
Args:
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any person method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('combined_credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def external_ids(self, **kwargs):
"""
Get the external ids for a specific person id.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('external_ids')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def images(self, **kwargs):
"""
Get the images for a specific person id.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('images')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def changes(self, **kwargs):
"""
Get the changes for a specific person id.
Changes are grouped by key, and ordered by date in descending order.
By default, only the last 24 hours of changes are returned. The maximum
number of days that can be returned in a single request is 14. The
language is present on fields that are translatable.
Args:
start_date: (optional) Expected format is 'YYYY-MM-DD'.
end_date: (optional) Expected format is 'YYYY-MM-DD'.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('changes')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def popular(self, **kwargs):
"""
Get the list of popular people on The Movie Database. This list
refreshes every day.
Args:
page: (optional) Minimum 1, maximum 1000.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('popular')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def latest(self, **kwargs):
"""
Get the latest person id.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('latest')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
class Credits(TMDB):
"""
Credits functionality.
See: http://docs.themoviedb.apiary.io/#credits
"""
BASE_PATH = 'credit'
URLS = {
'info': '/{credit_id}',
}
def __init__(self, credit_id):
super(Credits, self).__init__()
self.credit_id = credit_id
def info(self, **kwargs):
"""
Get the detailed information about a particular credit record. This is
currently only supported with the new credit model found in TV. These
ids can be found from any TV credit response as well as the tv_credits
and combined_credits methods for people.
The episodes object returns a list of episodes and are generally going
to be guest stars. The season array will return a list of season
numbers. Season credits are credits that were marked with the
"add to every season" option in the editing interface and are
assumed to be "season regulars".
Args:
language: (optional) ISO 639-1 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_credit_id_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
class Jobs(TMDB):
"""
Jobs functionality.
See: http://docs.themoviedb.apiary.io/#jobs
"""
BASE_PATH = 'job'
URLS = {
'list': '/list',
}
def list(self, **kwargs):
"""
Get a list of valid jobs.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('list')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
|
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import io
import os
import platform
import re
import subprocess
import sys
class TestFailedError(Exception):
pass
def escapeCmdArg(arg):
if '"' in arg or ' ' in arg:
return '"%s"' % arg.replace('"', '\\"')
else:
return arg
def run_command(cmd):
if sys.version_info[0] < 3:
cmd = list(map(lambda s: s.encode('utf-8'), cmd))
print(' '.join([escapeCmdArg(arg) for arg in cmd]))
if sys.version_info[0] < 3 or platform.system() == 'Windows':
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
else:
return subprocess.check_output(list(map(lambda s: s.encode('utf-8'), cmd)),
stderr=subprocess.STDOUT)
def parseLine(line, line_no, test_case, incremental_edit_args, reparse_args,
current_reparse_start):
pre_edit_line = ""
post_edit_line = ""
# We parse one tag at a time in the line while eating away a prefix of the
# line
while line:
# The regular expression to match the template markers
subst_re = re.compile(r'^(.*?)<<(.*?)<(.*?)\|\|\|(.*?)>>>(.*\n?)')
reparse_re = re.compile(r'^(.*?)<(/?)reparse ?(.*?)>(.*\n?)')
subst_match = subst_re.match(line)
reparse_match = reparse_re.match(line)
if subst_match and reparse_match:
# If both regex match use the one with the shorter prefix
if len(subst_match.group(1)) < len(reparse_match.group(1)):
reparse_match = None
else:
subst_match = None
if subst_match:
prefix = subst_match.group(1)
match_test_case = subst_match.group(2)
pre_edit = subst_match.group(3)
post_edit = subst_match.group(4)
suffix = subst_match.group(5)
if match_test_case == test_case:
# Compute the -incremental-edit argument for swift-syntax-test
column = len(pre_edit_line) + len(prefix) + 1
edit_arg = '%d:%d-%d:%d=%s' % \
(line_no, column, line_no, column + len(pre_edit.encode('utf-8')),
post_edit)
incremental_edit_args.append('-incremental-edit')
incremental_edit_args.append(edit_arg)
pre_edit_line += prefix + pre_edit
post_edit_line += prefix + post_edit
else:
# For different test cases just take the pre-edit text
pre_edit_line += prefix + pre_edit
post_edit_line += prefix + pre_edit
line = suffix
elif reparse_match:
prefix = reparse_match.group(1)
is_closing = len(reparse_match.group(2)) > 0
match_test_case = reparse_match.group(3)
suffix = reparse_match.group(4)
if match_test_case == test_case:
column = len(post_edit_line) + len(prefix) + 1
if is_closing:
if not current_reparse_start:
raise TestFailedError('Closing unopened reparse tag '
'in line %d' % line_no)
reparse_args.append('-reparse-region')
reparse_args.append(
'%d:%d-%d:%d' % (current_reparse_start[0],
current_reparse_start[1],
line_no, column))
current_reparse_start = None
else:
if current_reparse_start:
raise TestFailedError('Opening nested reparse tags '
'for the same test case in line '
'%d' % line_no)
current_reparse_start = [line_no, column]
pre_edit_line += prefix
post_edit_line += prefix
line = suffix
else:
pre_edit_line += line
post_edit_line += line
# Nothing more to do
line = ''
return (pre_edit_line.encode('utf-8'),
post_edit_line.encode('utf-8'),
current_reparse_start)
def prepareForIncrParse(test_file, test_case, pre_edit_file, post_edit_file,
incremental_edit_args, reparse_args):
with io.open(test_file, mode='r', encoding='utf-8',
newline='\n') as test_file_handle, \
io.open(pre_edit_file, mode='w+', encoding='utf-8',
newline='\n') as pre_edit_file_handle, \
io.open(post_edit_file, mode='w+', encoding='utf-8',
newline='\n') as post_edit_file_handle:
current_reparse_start = None
line_no = 1
for line in test_file_handle.readlines():
parseLineRes = parseLine(line, line_no, test_case,
incremental_edit_args,
reparse_args, current_reparse_start)
(pre_edit_line, post_edit_line, current_reparse_start) = \
parseLineRes
pre_edit_file_handle.write(pre_edit_line.decode('utf-8'))
post_edit_file_handle.write(post_edit_line.decode('utf-8'))
line_no += 1
if current_reparse_start:
raise TestFailedError('Unclosed reparse tag for test case %s' %
test_case)
def serializeIncrParseMarkupFile(test_file, test_case, mode,
omit_node_ids, output_file, diags_output_file,
temp_dir, swift_syntax_test,
print_visual_reuse_info):
test_file_name = os.path.basename(test_file)
pre_edit_file = temp_dir + '/' + test_file_name + '.' + test_case + \
'.pre.swift'
post_edit_file = temp_dir + '/' + test_file_name + '.' + test_case + \
'.post.swift'
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
# =========================================================================
# First generate the pre-edit and post-edit Swift file and gather the edits
# and expected reparse regions. This is the parser for the special edit
# markup for testing incremental parsing
# =========================================================================
# Gather command line arguments for swift-syntax-test specifiying the
# performed edits in this list
incremental_edit_args = []
reparse_args = []
prepareForIncrParse(test_file, test_case, pre_edit_file, post_edit_file,
incremental_edit_args, reparse_args)
# =========================================================================
# Now generate the requested serialized file
# =========================================================================
# Build the command to serialize the tree depending on the command line
# arguments
try:
command = [
swift_syntax_test,
'-serialize-raw-tree',
'-output-filename', output_file
]
if diags_output_file:
command.extend(['-diags-output-filename', diags_output_file])
if omit_node_ids:
command.extend(['-omit-node-ids'])
if mode == 'pre-edit':
command.extend(['-input-source-filename', pre_edit_file])
elif mode == 'post-edit':
command.extend(['-input-source-filename', post_edit_file])
elif mode == 'incremental':
# We need to build the syntax tree of the pre-edit file first so
# that we can pass it to swift-syntax-test to perform incremental
# parsing
pre_edit_tree_file = pre_edit_file + '.serialized.json'
run_command([swift_syntax_test] +
['-serialize-raw-tree'] +
['-input-source-filename', pre_edit_file] +
['-output-filename', pre_edit_tree_file])
# Then perform incremental parsing with the old syntax tree on the
# post-edit file
command.extend(['-input-source-filename', post_edit_file])
command.extend(['-old-syntax-tree-filename',
pre_edit_tree_file])
command.extend(['--old-source-filename', pre_edit_file])
command.extend(incremental_edit_args)
command.extend(reparse_args)
if print_visual_reuse_info:
command.extend([
'-print-visual-reuse-info',
'-force-colored-output'
])
else:
raise ValueError('Unknown mode "%s"' % mode)
output = run_command(command)
if print_visual_reuse_info:
print(output)
except subprocess.CalledProcessError as e:
raise TestFailedError(e.output.decode('utf-8'))
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='Utility for testing incremental syntax parsing',
epilog='''
This utility can parse a special markup to dedicate a pre-edit and a
post-edit version of a file simulateously and generate a serialized version
of the libSyntax tree by parsing either the pre-edit file, the post-edit
file or the edits that are required to retrieve the post-edit file from the
pre-edit file incrementally.
To generate the pre-edit and the post-edit file from the template, it
operates on markers of the form:
<<test_case<pre|||post>>>
These placeholders are replaced by:
- 'pre' if a different test case than 'test_case' is run
- 'pre' for the pre-edit version of 'test_case'
- 'post' for the post-edit version of 'test_case''')
parser.add_argument(
'file', type=argparse.FileType(),
help='The template file to test')
parser.add_argument(
'--test-case', default='',
help='The test case to execute. If no test case is specified all \
unnamed substitutions are applied')
parser.add_argument(
'--mode', choices=['pre-edit', 'incremental', 'post-edit'],
required=True, help='''
The type of parsing to perform:
- pre-edit: Serialize the syntax tree when parsing the pre-edit file \
from scratch
- incremental: Serialize the syntax tree that results from parsing the \
edits between the pre-edit and post-edit file incrementally
- post-edit: Serialize the syntax tree that results from parsing the \
post-edit file from scratch
''')
parser.add_argument(
'--omit-node-ids', default=False, action='store_true',
help='Don\'t include the ids of the nodes in the serialized syntax \
tree')
parser.add_argument(
'--output-file', required=True,
help='The file to which the serialized tree shall be written.')
parser.add_argument(
'--temp-dir', required=True,
help='A temporary directory where pre-edit and post-edit files can be \
saved')
parser.add_argument(
'--swift-syntax-test', required=True,
help='The path to swift-syntax-test')
parser.add_argument(
'--print-visual-reuse-info', default=False, action='store_true',
help='Print visual reuse information about the incremental parse \
instead of diffing the syntax trees. This option is intended \
for debug purposes only.')
args = parser.parse_args(sys.argv[1:])
test_file = args.file.name
test_case = args.test_case
mode = args.mode
omit_node_ids = args.omit_node_ids
output_file = args.output_file
temp_dir = args.temp_dir
swift_syntax_test = args.swift_syntax_test
visual_reuse_info = args.print_visual_reuse_info
try:
serializeIncrParseMarkupFile(test_file=test_file,
test_case=test_case,
mode=mode,
omit_node_ids=omit_node_ids,
output_file=output_file,
diags_output_file=None,
temp_dir=temp_dir,
swift_syntax_test=swift_syntax_test,
print_visual_reuse_info=visual_reuse_info)
except TestFailedError as e:
print(e.message, file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
|
|
import numpy as np
import tensorflow as tf
from tfsnippet.utils import (add_name_arg_doc, get_static_shape, concat_shapes,
get_shape, is_tensor_object, assert_deps,
InputSpec)
from .control_flows import smart_cond
from .assertions import assert_rank, assert_rank_at_least
__all__ = [
'prepend_dims',
'flatten_to_ndims',
'unflatten_from_ndims',
'broadcast_to_shape',
'broadcast_to_shape_strict',
'broadcast_concat',
'transpose_conv2d_axis',
'transpose_conv2d_channels_last_to_x',
'transpose_conv2d_channels_x_to_last',
'reshape_tail',
]
@add_name_arg_doc
def prepend_dims(x, ndims=1, name=None):
"""
Prepend `[1] * ndims` to the beginning of the shape of `x`.
Args:
x: The tensor `x`.
ndims: Number of `1` to prepend.
Returns:
tf.Tensor: The tensor with prepended dimensions.
"""
ndims = int(ndims)
if ndims < 0:
raise ValueError('`ndims` must be >= 0: got {}'.format(ndims))
x = tf.convert_to_tensor(x)
if ndims == 0:
return x
with tf.name_scope(name, default_name='prepend_dims', values=[x]):
static_shape = get_static_shape(x)
if static_shape is not None:
static_shape = tf.TensorShape([1] * ndims + list(static_shape))
dynamic_shape = concat_shapes([
[1] * ndims,
get_shape(x)
])
y = tf.reshape(x, dynamic_shape)
if static_shape is not None:
y.set_shape(static_shape)
return y
@add_name_arg_doc
def flatten_to_ndims(x, ndims, name=None):
"""
Flatten the front dimensions of `x`, such that the resulting tensor
will have at most `ndims` dimensions.
Args:
x (Tensor): The tensor to be flatten.
ndims (int): The maximum number of dimensions for the resulting tensor.
Returns:
(tf.Tensor, tuple[int or None], tuple[int] or tf.Tensor) or (tf.Tensor, None, None):
(The flatten tensor, the static front shape, and the front shape),
or (the original tensor, None, None)
"""
x = tf.convert_to_tensor(x)
if ndims < 1:
raise ValueError('`k` must be greater or equal to 1.')
if not x.get_shape():
raise ValueError('`x` is required to have known number of '
'dimensions.')
shape = get_static_shape(x)
if len(shape) < ndims:
raise ValueError('`k` is {}, but `x` only has rank {}.'.
format(ndims, len(shape)))
if len(shape) == ndims:
return x, None, None
with tf.name_scope(name, default_name='flatten', values=[x]):
if ndims == 1:
static_shape = shape
if None in shape:
shape = tf.shape(x)
return tf.reshape(x, [-1]), static_shape, shape
else:
front_shape, back_shape = shape[:-(ndims - 1)], shape[-(ndims - 1):]
static_front_shape = front_shape
static_back_shape = back_shape
if None in front_shape or None in back_shape:
dynamic_shape = tf.shape(x)
if None in front_shape:
front_shape = dynamic_shape[:-(ndims - 1)]
if None in back_shape:
back_shape = dynamic_shape[-(ndims - 1):]
if isinstance(back_shape, tuple):
x = tf.reshape(x, [-1] + list(back_shape))
else:
x = tf.reshape(x, tf.concat([[-1], back_shape], axis=0))
x.set_shape(tf.TensorShape([None] + list(static_back_shape)))
return x, static_front_shape, front_shape
@add_name_arg_doc
def unflatten_from_ndims(x, static_front_shape, front_shape, name=None):
"""
The inverse transformation of :func:`flatten`.
If both `static_front_shape` is None and `front_shape` is None,
`x` will be returned without any change.
Args:
x (Tensor): The tensor to be unflatten.
static_front_shape (tuple[int or None] or None): The static front shape.
front_shape (tuple[int] or tf.Tensor or None): The front shape.
Returns:
tf.Tensor: The unflatten x.
"""
x = tf.convert_to_tensor(x)
if static_front_shape is None and front_shape is None:
return x
if not x.get_shape():
raise ValueError('`x` is required to have known number of '
'dimensions.')
shape = get_static_shape(x)
if len(shape) < 1:
raise ValueError('`x` only has rank {}, required at least 1.'.
format(len(shape)))
if not is_tensor_object(front_shape):
front_shape = tuple(front_shape)
with tf.name_scope(name, default_name='unflatten', values=[x]):
back_shape = shape[1:]
static_back_shape = back_shape
if None in back_shape:
back_shape = tf.shape(x)[1:]
if isinstance(front_shape, tuple) and isinstance(back_shape, tuple):
x = tf.reshape(x, front_shape + back_shape)
else:
x = tf.reshape(x, tf.concat([front_shape, back_shape], axis=0))
x.set_shape(tf.TensorShape(list(static_front_shape) +
list(static_back_shape)))
return x
def broadcast_to_shape(x, shape, name=None):
"""
Broadcast `x` to match `shape`.
If ``rank(x) > len(shape)``, only the tail dimensions will be broadcasted
to match `shape`.
Args:
x: A tensor.
shape (tuple[int] or tf.Tensor): Broadcast `x` to match this shape.
Returns:
tf.Tensor: The broadcasted tensor.
"""
# check the parameters
x = tf.convert_to_tensor(x)
x_shape = get_static_shape(x)
ns_values = [x]
if is_tensor_object(shape):
shape = tf.convert_to_tensor(shape)
ns_values.append(shape)
else:
shape = tuple(int(s) for s in shape)
with tf.name_scope(name=name or 'broadcast_to_shape', values=ns_values):
cannot_broadcast_msg = (
'`x` cannot be broadcasted to match `shape`: x {!r} vs shape {!r}'.
format(x, shape)
)
# fast routine: shape is tuple[int] and x_shape is all known,
# we can use reshape + tile to do the broadcast, which should be faster
# than using ``x * ones(shape)``.
if isinstance(shape, tuple) and x_shape is not None and \
all(s is not None for s in x_shape):
# reshape to have the same dimension
if len(x_shape) < len(shape):
x_shape = (1,) * (len(shape) - len(x_shape)) + x_shape
x = tf.reshape(x, x_shape)
# tile to have the same shape
tile = []
i = -1
while i > -len(shape) - 1:
a, b = x_shape[i], shape[i]
if a == 1 and b > 1:
tile.append(b)
elif a != b:
raise ValueError(cannot_broadcast_msg)
else:
tile.append(1)
i -= 1
tile = [1] * (len(x_shape) - len(shape)) + list(reversed(tile))
if any(s > 1 for s in tile):
x = tf.tile(x, tile)
return x
# slow routine: we may need ``x * ones(shape)`` to do the broadcast
assertions = []
post_assert_shape = False
static_shape = tf.TensorShape(None)
if isinstance(shape, tuple) and x_shape is not None:
need_multiply_ones = False
# it should always broadcast if len(x_shape) < len(shape)
if len(x_shape) < len(shape):
need_multiply_ones = True
# check the consistency of x and shape
static_shape_hint = [] # list to gather the static shape hint
axis_to_check = [] # list to gather the axis to check
i = -1
while i >= -len(shape) and i >= -len(x_shape):
a, b = x_shape[i], shape[i]
if a is None:
axis_to_check.append(i)
else:
if a != b:
if a == 1:
need_multiply_ones = True
else:
raise ValueError(cannot_broadcast_msg)
static_shape_hint.append(b)
i -= 1
# compose the static shape hint
if len(shape) < len(x_shape):
static_shape = x_shape[:-len(shape)]
elif len(shape) > len(x_shape):
static_shape = shape[:-len(x_shape)]
else:
static_shape = ()
static_shape = tf.TensorShape(
static_shape + tuple(reversed(static_shape_hint)))
# compose the assertion operations and the multiply flag
if axis_to_check:
need_multiply_flags = []
x_dynamic_shape = tf.shape(x)
for i in axis_to_check:
assertions.append(tf.assert_equal(
tf.logical_or(
tf.equal(x_dynamic_shape[i], shape[i]),
tf.equal(x_dynamic_shape[i], 1),
),
True,
message=cannot_broadcast_msg
))
if len(x_shape) >= len(shape):
need_multiply_flags.append(
tf.not_equal(x_dynamic_shape[i], shape[i]))
if not need_multiply_ones:
need_multiply_ones = \
tf.reduce_any(tf.stack(need_multiply_flags))
else:
# we have no ideal about what `shape` is here, thus we need to
# assert the shape after ``x * ones(shape)``.
need_multiply_ones = True
post_assert_shape = True
# do broadcast if `x_shape` != `shape`
def multiply_branch():
with assert_deps(assertions):
ones_template = tf.ones(shape, dtype=x.dtype.base_dtype)
try:
return x * ones_template
except ValueError: # pragma: no cover
raise ValueError(cannot_broadcast_msg)
def identity_branch():
with assert_deps(assertions) as asserted:
if asserted:
return tf.identity(x)
else: # pragma: no cover
return x
t = smart_cond(need_multiply_ones, multiply_branch, identity_branch)
t.set_shape(static_shape)
if post_assert_shape:
post_assert_op = tf.assert_equal(
tf.reduce_all(tf.equal(tf.shape(t)[-tf.size(shape):], shape)),
True,
message=cannot_broadcast_msg
)
with assert_deps([post_assert_op]) as asserted:
if asserted:
t = tf.identity(t)
return t
@add_name_arg_doc
def broadcast_to_shape_strict(x, shape, name=None):
"""
Broadcast `x` to match `shape`.
This method requires `rank(x)` to be less than or equal to `len(shape)`.
You may use :func:`broadcast_to_shape` instead, to allow the cases where
``rank(x) > len(shape)``.
Args:
x: A tensor.
shape (tuple[int] or tf.Tensor): Broadcast `x` to match this shape.
Returns:
tf.Tensor: The broadcasted tensor.
"""
# check the parameters
x = tf.convert_to_tensor(x)
x_shape = get_static_shape(x)
ns_values = [x]
if is_tensor_object(shape):
shape = tf.convert_to_tensor(shape)
ns_values.append(shape)
else:
shape = tuple(int(s) for s in shape)
with tf.name_scope(name=name or 'broadcast_to_shape', values=ns_values):
cannot_broadcast_msg = (
'`x` cannot be broadcasted to match `shape`: x {!r} vs shape {!r}'.
format(x, shape)
)
# assert ``rank(x) <= len(shape)``
if isinstance(shape, tuple) and x_shape is not None:
if len(x_shape) > len(shape):
raise ValueError(cannot_broadcast_msg)
elif isinstance(shape, tuple):
with assert_deps([
tf.assert_less_equal(
tf.rank(x),
len(shape),
message=cannot_broadcast_msg
)
]) as asserted:
if asserted: # pragma: no cover
x = tf.identity(x)
else:
with assert_deps([
assert_rank(
shape,
1,
message=cannot_broadcast_msg
)
]) as asserted:
if asserted: # pragma: no cover
shape = tf.identity(shape)
with assert_deps([
tf.assert_less_equal(
tf.rank(x),
tf.size(shape),
message=cannot_broadcast_msg
)
]) as asserted:
if asserted: # pragma: no cover
x = tf.identity(x)
# do broadcast
return broadcast_to_shape(x, shape)
@add_name_arg_doc
def broadcast_concat(x, y, axis, name=None):
"""
Broadcast `x` and `y`, then concat them along `axis`.
This method cannot deal with all possible situations yet.
`x` and `y` must have known number of dimensions, and only the deterministic
axes will be broadcasted. You must ensure the non-deterministic axes are
properly broadcasted by yourself.
Args:
x: The tensor `x`.
y: The tensor `y`.
axis: The axis to be concatenated.
Returns:
tf.Tensor: The broadcast and concatenated tensor.
"""
x = tf.convert_to_tensor(x)
y = tf.convert_to_tensor(y)
# check the arguments
x_static_shape = get_static_shape(x)
if x_static_shape is None:
raise ValueError('`x` with non-deterministic shape is not supported.')
y_static_shape = get_static_shape(y)
if y_static_shape is None:
raise ValueError('`y` with non-deterministic shape is not supported.')
x_rank = len(x_static_shape)
y_rank = len(y_static_shape)
out_ndims = max(x_rank, y_rank)
min_axis = -out_ndims
max_axis = out_ndims - 1
if axis < min_axis or axis > max_axis:
raise ValueError('Invalid axis: must >= {} and <= {}, got {}'.
format(min_axis, max_axis, axis))
if axis >= 0:
axis = axis - out_ndims
# compute the broadcast shape
out_static_shape = [None] * out_ndims
x_tile = [1] * out_ndims
y_tile = [1] * out_ndims
assertions = []
dynamic_shape_cache = {}
def get_dynamic_shape(t):
if t not in dynamic_shape_cache:
dynamic_shape_cache[t] = get_shape(t)
return dynamic_shape_cache[t]
def broadcast_axis(i, a, b, a_tile, b_tile, a_tensor, b_tensor):
err_msg = ('`x` and `y` cannot be broadcast concat: {} vs {}'.
format(x, y))
# validate whether or not a == b or can be broadcasted
if a is None and b is None:
# both dynamic, must be equal
a = get_dynamic_shape(a_tensor)[i]
b = get_dynamic_shape(b_tensor)[i]
assertions.append(tf.assert_equal(a, b, message=err_msg))
elif a is not None and b is not None:
# both static, check immediately
if a != 1 and b != 1 and a != b:
raise ValueError(err_msg)
if a == 1:
a_tile[i] = b
elif b == 1:
b_tile[i] = a
out_static_shape[i] = max(a, b)
elif a is None:
# a dynamic, b can be 1 or equal to a
a = get_dynamic_shape(a_tensor)[i]
if b == 1:
b_tile[i] = a
else:
assertions.append(tf.assert_equal(a, b, message=err_msg))
out_static_shape[i] = b
else:
broadcast_axis(i, b, a, b_tile, a_tile, b_tensor, a_tensor)
def maybe_prepend_dims(t, rank, name):
if rank < out_ndims:
t = prepend_dims(t, out_ndims - rank, name=name)
return t
def maybe_tile(t, tile, name):
if any(s != 1 for s in tile):
if any(is_tensor_object(s) for s in tile):
tile = tf.stack(tile, axis=0)
t = tf.tile(t, tile, name=name)
return t
with tf.name_scope(name, default_name='broadcast_concat', values=[x, y]):
# infer the configurations
for i in range(-1, -out_ndims - 1, -1):
a = x_static_shape[i] if i >= -x_rank else 1
b = y_static_shape[i] if i >= -y_rank else 1
if i != axis:
broadcast_axis(i, a, b, x_tile, y_tile, x, y)
else:
if a is not None and b is not None:
out_static_shape[i] = a + b
# do broadcast
x = maybe_tile(
maybe_prepend_dims(x, x_rank, name='prepend_dims_to_x'),
x_tile,
name='tile_x'
)
y = maybe_tile(
maybe_prepend_dims(y, y_rank, name='prepend_dims_to_y'),
y_tile,
name='tile_y'
)
with assert_deps(assertions) as asserted:
if asserted:
x = tf.identity(x)
y = tf.identity(y)
# do concat
ret = tf.concat([x, y], axis=axis)
ret.set_shape(tf.TensorShape(out_static_shape))
return ret
@add_name_arg_doc
def transpose_conv2d_axis(input, from_channels_last, to_channels_last,
name=None):
"""
Ensure the channels axis of `input` tensor to be placed at the desired axis.
Args:
input (tf.Tensor): The input tensor, at least 4-d.
from_channels_last (bool): Whether or not the channels axis
is the last axis in `input`? (i.e., the data format is "NHWC")
to_channels_last (bool): Whether or not the channels axis
should be the last axis in the output tensor?
Returns:
tf.Tensor: The (maybe) transposed output tensor.
"""
if from_channels_last:
input_spec = InputSpec(shape=('...', '?', '?', '?', '*'))
else:
input_spec = InputSpec(shape=('...', '?', '*', '?', '?'))
input = input_spec.validate('input', input)
input_shape = get_static_shape(input)
sample_and_batch_axis = [i for i in range(len(input_shape) - 3)]
# check whether or not axis should be transpose
if from_channels_last and not to_channels_last:
transpose_axis = [-1, -3, -2]
elif not from_channels_last and to_channels_last:
transpose_axis = [-2, -1, -3]
else:
transpose_axis = None
# transpose the axis
if transpose_axis is not None:
transpose_axis = [i + len(input_shape) for i in transpose_axis]
input = tf.transpose(input, sample_and_batch_axis + transpose_axis,
name=name or 'transpose_conv2d_axis')
return input
@add_name_arg_doc
def transpose_conv2d_channels_last_to_x(input, channels_last, name=None):
"""
Ensure the channels axis (known to be the last axis) of `input` tensor
to be placed at the desired axis.
Args:
input (tf.Tensor): The input tensor, at least 4-d.
channels_last (bool): Whether or not the channels axis
should be the last axis in the output tensor?
Returns:
tf.Tensor: The (maybe) transposed output tensor.
"""
return transpose_conv2d_axis(
input, from_channels_last=True, to_channels_last=channels_last,
name=name
)
@add_name_arg_doc
def transpose_conv2d_channels_x_to_last(input, channels_last, name=None):
"""
Ensure the channels axis of `input` tensor to be placed at the last axis.
Args:
input (tf.Tensor): The input tensor, at least 4-d.
channels_last (bool): Whether or not the channels axis
is the last axis in the `input` tensor?
Returns:
tf.Tensor: The (maybe) transposed output tensor.
"""
return transpose_conv2d_axis(
input, from_channels_last=channels_last, to_channels_last=True,
name=name
)
@add_name_arg_doc
def reshape_tail(input, ndims, shape, name=None):
"""
Reshape the tail (last) `ndims` into specified `shape`.
Usage::
x = tf.zeros([2, 3, 4, 5, 6])
reshape_tail(x, 3, [-1]) # output: zeros([2, 3, 120])
reshape_tail(x, 1, [3, 2]) # output: zeros([2, 3, 4, 5, 3, 2])
Args:
input (Tensor): The input tensor, at least `ndims` dimensions.
ndims (int): To reshape this number of dimensions at tail.
shape (Iterable[int] or tf.Tensor): The shape of the new tail.
Returns:
tf.Tensor: The reshaped tensor.
"""
input = tf.convert_to_tensor(input)
if not is_tensor_object(shape):
shape = list(int(s) for s in shape)
neg_one_count = 0
for s in shape:
if s <= 0:
if s == -1:
if neg_one_count > 0:
raise ValueError('`shape` is not a valid shape: at '
'most one `-1` can be specified.')
else:
neg_one_count += 1
else:
raise ValueError('`shape` is not a valid shape: {} is '
'not allowed.'.format(s))
with tf.name_scope(name or 'reshape_tail', values=[input]):
# assert the dimension
with assert_deps([
assert_rank_at_least(
input, ndims,
message='rank(input) must be at least ndims')
]) as asserted:
if asserted: # pragma: no cover
input = tf.identity(input)
# compute the static shape
static_input_shape = get_static_shape(input)
static_output_shape = None
if static_input_shape is not None:
if ndims > 0:
left_shape = static_input_shape[:-ndims]
right_shape = static_input_shape[-ndims:]
else:
left_shape = static_input_shape
right_shape = ()
# attempt to resolve "-1" in `shape`
if isinstance(shape, list):
if None not in right_shape:
shape_size = int(np.prod([s for s in shape if s != -1]))
right_shape_size = int(np.prod(right_shape))
if (-1 not in shape and shape_size != right_shape_size) or \
(-1 in shape and right_shape_size % shape_size != 0):
raise ValueError(
'Cannot reshape the tail dimensions of '
'`input` into `shape`: input {!r}, ndims '
'{}, shape {}.'.format(input, ndims, shape)
)
if -1 in shape:
pos = shape.index(-1)
shape[pos] = right_shape_size // shape_size
static_output_shape = left_shape + \
tuple(s if s != -1 else None for s in shape)
static_output_shape = tf.TensorShape(static_output_shape)
# compute the dynamic shape
input_shape = get_shape(input)
if ndims > 0:
output_shape = concat_shapes([input_shape[:-ndims], shape])
else:
output_shape = concat_shapes([input_shape, shape])
# do reshape
output = tf.reshape(input, output_shape)
output.set_shape(static_output_shape)
return output
|
|
"""
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
from ..exceptions import DataConversionWarning
from .deprecation import deprecated
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric", "indices_to_mask", "deprecated"]
class Bunch(dict):
"""Container object for datasets
Dictionary-like object that exposes its keys as attributes.
>>> b = Bunch(a=1, b=2)
>>> b['b']
2
>>> b.b
2
>>> b.a = 3
>>> b['a']
3
>>> b.c = 6
>>> b['c']
6
"""
def __init__(self, **kwargs):
super(Bunch, self).__init__(kwargs)
def __setattr__(self, key, value):
self[key] = value
def __dir__(self):
return self.keys()
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
def __setstate__(self, state):
# Bunch pickles generated with scikit-learn 0.16.* have an non
# empty __dict__. This causes a surprising behaviour when
# loading these pickles scikit-learn 0.17: reading bunch.key
# uses __dict__ but assigning to bunch.key use __setattr__ and
# only changes bunch['key']. More details can be found at:
# https://github.com/scikit-learn/scikit-learn/issues/6196.
# Overriding __setstate__ to be a noop has the effect of
# ignoring the pickled __dict__
pass
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask : array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def axis0_safe_slice(X, mask, len_mask):
"""
This mask is safer than safe_mask since it returns an
empty array, when a sparse matrix is sliced with a boolean mask
with all False, instead of raising an unhelpful error in older
versions of SciPy.
See: https://github.com/scipy/scipy/issues/5361
Also note that we can avoid doing the dot product by checking if
the len_mask is not zero in _huber_loss_and_gradient but this
is not going to be the bottleneck, since the number of outliers
and non_outliers are typically non-zero and it makes the code
tougher to follow.
"""
if len_mask != 0:
return X[safe_mask(X, mask), :]
return np.zeros(shape=(0, X.shape[1]))
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list, pandas.DataFrame, pandas.Series.
Data from which to sample rows or items.
indices : array-like of int
Indices according to which X will be subsampled.
Returns
-------
subset
Subset of X on first axis
Notes
-----
CSR, CSC, and LIL sparse matrices are supported. COO sparse matrices are
not supported.
"""
if hasattr(X, "iloc"):
# Work-around for indexing with read-only indices in pandas
indices = indices if indices.flags.writeable else indices.copy()
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
If replace is False it should not be larger than the length of
arrays.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
elif (max_n_samples > n_samples) and (not replace):
raise ValueError("Cannot sample %d out of arrays with dim %d "
"when replace is False" % (max_n_samples,
n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1"
% n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible.
Parameters
----------
x : iterable
"""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
def indices_to_mask(indices, mask_length):
"""Convert list of indices to boolean mask.
Parameters
----------
indices : list-like
List of integers treated as indices.
mask_length : int
Length of boolean mask to be generated.
Returns
-------
mask : 1d boolean nd-array
Boolean array that is True where indices are present, else False.
"""
if mask_length <= np.max(indices):
raise ValueError("mask_length must be greater than max(indices)")
mask = np.zeros(mask_length, dtype=np.bool)
mask[indices] = True
return mask
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BusTiming',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('bus_route', models.CharField(max_length=512)),
('from_time', models.TimeField()),
('bus_no', models.CharField(unique=True, max_length=10)),
('working_day', models.BooleanField()),
],
),
migrations.CreateModel(
name='Day',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('day', models.CharField(unique=True, max_length=32)),
],
),
migrations.CreateModel(
name='EmployeeVehicle',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=255)),
('employee_no', models.IntegerField()),
('department', models.CharField(max_length=100)),
('date_of_birth', models.DateField()),
('block_number', models.CharField(max_length=5)),
('flat_number', models.CharField(max_length=5)),
('mobile_number', models.IntegerField()),
('user_photo', models.ImageField(upload_to='')),
('identity_card', models.FileField(null=True, upload_to='identity_card')),
('parking_slot_no', models.CharField(max_length=50)),
('vehicle_registration_number', models.CharField(max_length=100)),
('color', models.CharField(max_length=32)),
('make_and_model', models.CharField(max_length=100)),
('chassis_number', models.CharField(max_length=100)),
('engine_number', models.CharField(max_length=100)),
('registered_in_the_name_of', models.CharField(max_length=100)),
('vehicle_insurance_no', models.CharField(unique=True, max_length=100)),
('insurance_valid_upto', models.DateField()),
('vehicle_registration_card', models.FileField(upload_to='vehicle_registration_card')),
('vehicle_insurance', models.FileField(null=True, upload_to='vehicle_insurance')),
('vehicle_photo', models.ImageField(null=True, upload_to='')),
('driving_license_number', models.CharField(max_length=15)),
('driving_license_issue_date', models.DateField()),
('driving_license_expiry_date', models.DateField()),
('driving_license', models.FileField(null=True, upload_to='driving_license')),
('declaration', models.TextField(null=True, default='By submitting this form, I hereby declare that I will be obliged to the following terms and conditions:\n\n1) I will abide by the rules of Traffic,\n2) I will not cause inconvenience to other road users.', blank=True)),
('date_of_application', models.DateTimeField(null=True, blank=True)),
('registered_with_security_section', models.NullBooleanField(default=None)),
('vehicle_pass_no', models.CharField(null=True, max_length=32, unique=True, blank=True)),
('issue_date', models.DateField()),
('expiry_date', models.DateField()),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Gate',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('gate_name', models.CharField(unique=True, max_length=50)),
],
),
migrations.CreateModel(
name='Guard',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('guard_phone_number', models.IntegerField()),
('is_security', models.BooleanField(default=True)),
('guard_user', models.OneToOneField(related_name='guard_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='IITGUser',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('is_student', models.BooleanField(default=False, verbose_name='Is student', help_text='Designates whether the user is a student or a professor.')),
('user', models.OneToOneField(default=False, related_name='user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='OnDutyGuard',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('place', models.CharField(max_length=100)),
('is_gate', models.BooleanField()),
('guard', models.OneToOneField(related_name='guard', to='vms.Guard')),
],
),
migrations.CreateModel(
name='ParkingSlot',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('parking_area_name', models.CharField(unique=True, max_length=100)),
('total_slots', models.IntegerField(null=True, default=0, blank=True)),
('available_slots', models.IntegerField(null=True, default=0, blank=True)),
],
),
migrations.CreateModel(
name='PersonPass',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('old_card_reference', models.CharField(max_length=10)),
('pass_number', models.CharField(unique=True, max_length=10)),
('name', models.CharField(max_length=255)),
('user_photo', models.ImageField(upload_to='')),
('age', models.IntegerField()),
('identified_by', models.CharField(max_length=255)),
('work_area', models.CharField(max_length=255)),
('working_time', models.CharField(max_length=255)),
('nature_of_work', models.CharField(max_length=255)),
('issue_date', models.DateField()),
('expiry_date', models.DateField()),
('is_blocked', models.BooleanField()),
('reason', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='Place',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('place_name', models.CharField(unique=True, max_length=32)),
],
),
migrations.CreateModel(
name='ResidentLog',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('vehicle_pass_no', models.CharField(max_length=50)),
('in_time', models.DateTimeField(null=True, blank=True)),
('out_time', models.DateTimeField(null=True, blank=True)),
('in_gate', models.ForeignKey(null=True, related_name='resident_in_gate', to='vms.Gate')),
('out_gate', models.ForeignKey(null=True, related_name='resident_out_gate', to='vms.Gate')),
],
),
migrations.CreateModel(
name='StudentCycle',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('cycle_model', models.CharField(max_length=32)),
('cycle_color', models.CharField(max_length=32)),
('cycle_pass_no', models.CharField(max_length=10)),
('hostel', models.CharField(choices=[('Manas', 'Manas'), ('Dihing', 'Dihing'), ('Kameng', 'Kameng'), ('Umiam', 'Umiam'), ('Barak', 'Barak'), ('Brahmaputra', 'Brahmaputra'), ('Kapili', 'Kapili'), ('Siang', 'Siang'), ('Dibang', 'Dibang'), ('Lohit', 'Lohit'), ('Subansiri', 'Subansiri'), ('Dhansiri', 'Dhansiri')], max_length=50, blank=True)),
('room_number', models.CharField(max_length=5)),
('user', models.OneToOneField(related_name='cycle_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='StudentVehicle',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('name', models.CharField(max_length=255)),
('roll_number', models.IntegerField()),
('department', models.CharField(max_length=100)),
('programme', models.CharField(max_length=10)),
('date_of_birth', models.DateField()),
('hostel_name', models.CharField(choices=[('Manas', 'Manas'), ('Dihing', 'Dihing'), ('Kameng', 'Kameng'), ('Umiam', 'Umiam'), ('Barak', 'Barak'), ('Brahmaputra', 'Brahmaputra'), ('Kapili', 'Kapili'), ('Siang', 'Siang'), ('Dibang', 'Dibang'), ('Siang', 'Siang'), ('Lohit', 'Lohit'), ('Subansiri', 'Subansiri'), ('Dhansiri', 'Dhansiri')], max_length=32)),
('room_number', models.CharField(max_length=5)),
('mobile_number', models.IntegerField()),
('user_photo', models.ImageField(upload_to='')),
('identity_card', models.FileField(null=True, upload_to='identity_card')),
('address_of_communication', models.TextField()),
('address_of_communication_district', models.CharField(max_length=100)),
('address_of_communication_state', models.CharField(max_length=100)),
('address_of_communication_pincode', models.IntegerField()),
('permanent_address', models.TextField()),
('permanent_address_district', models.CharField(max_length=100)),
('permanent_address_state', models.CharField(max_length=100)),
('permanent_address_pincode', models.IntegerField()),
('parents_contact_no', models.IntegerField()),
('parents_emailid', models.EmailField(max_length=75)),
('vehicle_registration_number', models.CharField(unique=True, max_length=100)),
('color', models.CharField(max_length=32)),
('make_and_model', models.CharField(max_length=100)),
('chassis_number', models.CharField(max_length=100)),
('engine_number', models.CharField(max_length=100)),
('registered_in_the_name_of', models.CharField(max_length=100)),
('relation_with_owner', models.CharField(max_length=32)),
('vehicle_insurance_no', models.CharField(unique=True, max_length=100)),
('insurance_valid_upto', models.DateField()),
('vehicle_registration_card', models.FileField(null=True, upload_to='vehicle_registration_card')),
('vehicle_insurance', models.FileField(null=True, upload_to='vehicle_insurance')),
('vehicle_photo', models.ImageField(upload_to='')),
('driving_license_number', models.CharField(max_length=15)),
('driving_license_issue_date', models.DateField()),
('driving_license_expiry_date', models.DateField()),
('driving_license', models.FileField(null=True, upload_to='driving_license')),
('declaration', models.TextField(null=True, default='By submitting this form, I hereby declare that I will be obliged to the following terms and conditions:\n\n1) I will abide by the rules of Traffic,\n2) I will not cause inconvenience to other road users.', blank=True)),
('date_of_application', models.DateTimeField(null=True, blank=True)),
('registered_with_security_section', models.NullBooleanField(default=None)),
('vehicle_pass_no', models.CharField(null=True, max_length=32, unique=True, blank=True)),
('issue_date', models.DateField()),
('expiry_date', models.DateField()),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SuspiciousVehicle',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('vehicle_number', models.CharField(unique=True, max_length=20)),
('vehicle_type', models.CharField(choices=[('bicycle', 'bicycle'), ('bike', 'bike'), ('car', 'car'), ('truck', 'truck'), ('courier', 'courier'), ('auto', 'auto'), ('other', 'other')], null=True, max_length=50, blank=True)),
('vehicle_model', models.CharField(null=True, max_length=100, blank=True)),
('vehicle_image', models.ImageField(null=True, blank=True, upload_to='suspicious_image')),
('remarks', models.TextField(null=True, max_length=1000, blank=True)),
('reporter', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='TheftReport',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('vehicle_pass_no', models.CharField(unique=True, max_length=50)),
('theft_time', models.DateTimeField(null=True)),
('theft_place', models.CharField(null=True, max_length=100)),
('remarks', models.TextField(null=True, max_length=1000, blank=True)),
('status', models.CharField(choices=[('Submitted', 'Submitted'), ('Received by Security Section', 'Received by Security Section'), ('Search in Progress', 'Search in Progress'), ('Vehicle Found', 'Vehicle Found'), ('Case Closed (Vehicle Not Found)', 'Case Closed (Vehicle Not Found)'), ('Vehicle Returned', 'Vehicle Returned')], default='Submitted', max_length=100)),
('emp_vehicle', models.ForeignKey(null=True, blank=True, to='vms.EmployeeVehicle')),
('reporter', models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL)),
('stud_vehicle', models.ForeignKey(null=True, blank=True, to='vms.StudentVehicle')),
],
),
migrations.CreateModel(
name='VisitorLog',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('vehicle_number', models.CharField(max_length=20)),
('driver_name', models.CharField(null=True, max_length=255, blank=True)),
('license_number', models.CharField(null=True, max_length=20, blank=True)),
('place_to_visit', models.CharField(null=True, max_length=100, blank=True)),
('purpose_of_visit', models.TextField(null=True, max_length=1000, blank=True)),
('in_time', models.DateTimeField(null=True, blank=True)),
('vehicle_type', models.CharField(null=True, max_length=50, blank=True)),
('vehicle_model', models.CharField(null=True, max_length=100, blank=True)),
('out_time', models.DateTimeField(null=True, blank=True)),
('in_gate', models.ForeignKey(null=True, related_name='visitor_in_gate', to='vms.Gate')),
('out_gate', models.ForeignKey(null=True, related_name='visitor_out_gate', to='vms.Gate')),
],
),
migrations.AddField(
model_name='bustiming',
name='availability',
field=models.ManyToManyField(to='vms.Day'),
),
migrations.AddField(
model_name='bustiming',
name='ending_point',
field=models.ForeignKey(related_name='ending_point', to='vms.Place'),
),
migrations.AddField(
model_name='bustiming',
name='starting_point',
field=models.ForeignKey(related_name='starting_point', to='vms.Place'),
),
]
|
|
"""
This file is part of the everest project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Created on Jun 1, 2012.
"""
from pyramid.compat import urlparse
import pytest
from everest.resources.utils import resource_to_url
from everest.resources.utils import url_to_resource
from everest.tests.complete_app.interfaces import IMyEntityParent
from everest.tests.complete_app.resources import MyEntityMember
from everest.resources.service import Service
__docformat__ = 'reStructuredText en'
__all__ = ['TestUrlNoRdb',
'TestUrlRdb',
]
@pytest.mark.usefixtures("collection")
class BaseTestUrl(object):
package_name = 'everest.tests.complete_app'
app_url = 'http://0.0.0.0:6543'
base_url = '%s/my-entities/' % app_url
def test_resource_to_url_non_resource_object(self, member):
ent = member.get_entity()
with pytest.raises(TypeError) as cm:
resource_to_url(ent)
exc_msg = 'Can not generate URL for non-resource'
assert str(cm.value).startswith(exc_msg)
def test_resource_to_url_floating_member(self, member):
ent = member.get_entity()
mb = MyEntityMember.create_from_entity(ent)
with pytest.raises(ValueError) as cm:
resource_to_url(mb)
exc_msg = 'Can not generate URL for floating resource'
assert str(cm.value).startswith(exc_msg)
def test_resource_to_url_member(self, member):
self.__check_url(resource_to_url(member),
schema='http', path='/my-entities/0/', params='',
query='')
def test_resource_to_url_collection(self, collection):
self.__check_url(resource_to_url(collection),
schema='http', path='/my-entities/', params='',
query='')
def test_resource_to_url_with_slice(self, collection):
collection.slice = slice(0, 1)
self.__check_url(resource_to_url(collection),
schema='http', path='/my-entities/',
params='', query='start=0&size=1')
def test_resource_to_url_with_id_filter(self, collection,
filter_specification_factory):
flt_spec = filter_specification_factory.create_equal_to('id', 0)
collection.filter = flt_spec
self.__check_url(resource_to_url(collection),
schema='http', path='/my-entities/', params='',
query='q=id:equal-to:0')
def test_resource_to_url_with_resource_filter(self, resource_repo,
collection,
filter_specification_factory):
parent_coll = resource_repo.get_collection(IMyEntityParent)
parent = parent_coll['0']
parent_url = resource_to_url(parent)
flt_spec = \
filter_specification_factory.create_equal_to('parent', parent)
collection.filter = flt_spec
self.__check_url(resource_to_url(collection),
schema='http', path='/my-entities/', params='',
query='q=parent:equal-to:"%s"' % parent_url)
def test_resource_to_url_with_order(self, collection,
order_specification_factory):
ord_spec = order_specification_factory.create_ascending('id')
collection.order = ord_spec
self.__check_url(resource_to_url(collection),
schema='http', path='/my-entities/', params='',
query='sort=id:asc')
def test_resource_to_url_with_multiple_order(self,
collection,
order_specification_factory):
ord_spec_id = order_specification_factory.create_ascending('id')
ord_spec_text = order_specification_factory.create_descending('text')
ord_spec = \
order_specification_factory.create_conjunction(ord_spec_id,
ord_spec_text)
collection.order = ord_spec
self.__check_url(resource_to_url(collection),
schema='http', path='/my-entities/', params='',
query='sort=id:asc~text:desc')
def test_resource_to_url_nested(self, member, resource_repo):
child_root_coll = resource_repo.get_collection(type(member.children))
srvc = child_root_coll.__parent__
resource_repo.set_collection_parent(child_root_coll, None)
try:
coll = member.children
coll_url = resource_to_url(coll)
self.__check_url(coll_url, path='/my-entities/0/children/',
query='')
mb = coll['0']
mb_url = resource_to_url(mb)
self.__check_url(mb_url, path='/my-entities/0/children/0/',
query='')
finally:
resource_repo.set_collection_parent(child_root_coll, srvc)
def test_url_to_resource(self, collection):
coll_from_url = url_to_resource(self.base_url)
assert collection['0'] == coll_from_url['0']
@pytest.mark.parametrize('url,error,msg',
[('http://0.0.0.0:6543/my-foos/', KeyError,
'has no subelement my-foos'),
('http://0.0.0.0:6543/my-foos/', KeyError,
'has no subelement my-foos'),
('http://0.0.0.0:6543/', ValueError,
'Traversal found non-resource object'),
(base_url + '?q=id|foo', ValueError,
'Expression parameters have errors'),
(base_url + '?sort=id|foo', ValueError,
'Expression parameters have errors'),
(base_url + '?start=0&size=a',
ValueError, 'must be a number.'),
(base_url + '?start=a&size=100',
ValueError, 'must be a number.'),
(base_url + '?start=-1&size=100',
ValueError,
'must be zero or a positive number.'),
(base_url + '?start=0&size=-100',
ValueError, 'must be a positive number.'),
])
def test_url_to_resource_invalid(self, url, error, msg):
with pytest.raises(error) as cm:
url_to_resource(url)
assert str(cm.value).find(msg) != -1
def test_url_to_resource_with_slice(self):
coll_from_url = url_to_resource(self.base_url + '?size=1&start=0')
# The length is not affected by the slice...
assert len(coll_from_url) == 2
# ... the actual number of members in the collection is.
assert len(list(coll_from_url)) == 1
@pytest.mark.parametrize('criterion,attr,value',
[('id:equal-to:0', 'id', 0),
('id:not-equal-to:0', 'id', 1),
('text:starts-with:"foo"', 'text', 'foo0'),
('text:ends-with:"o1"', 'text', 'too1'),
('text:contains:"o0"', 'text', 'foo0'),
('text:not-contains:"o0"', 'text', 'too1'),
('text:contained:"foo0"', 'text', 'foo0'),
('text:not-contained:"foo0"', 'text', 'too1'),
('id:less-than:1', 'id', 0),
('id:less-than-or-equal-to:0', 'id', 0),
('id:greater-than:0', 'id', 1),
('id:greater-than-or-equal-to:1', 'id', 1),
('id:in-range:0-0', 'id', 0)
])
def test_url_to_resource_with_filter(self, criterion, attr, value):
coll_from_url = url_to_resource(self.base_url + '?q=%s' % criterion)
mbs = list(coll_from_url)
assert len(mbs) == 1
assert getattr(mbs[0], attr) == value
def test_url_to_resource_with_filter_no_values_raises_error(self):
pytest.raises(ValueError,
url_to_resource, self.base_url + '?q=id:equal-to:')
def test_url_to_resource_with_complex_filter(self):
criterion = '(id:equal-to:0 and text:equal-to:"foo0") or ' \
'(id:equal-to:1 and text:equal-to:"too1")'
coll_from_url = \
url_to_resource(self.base_url + '?q=%s' % criterion)
mbs = list(coll_from_url)
assert len(mbs) == 2
def test_url_to_resource_with_order(self):
coll_from_url = url_to_resource(self.base_url + '?sort=id:asc')
assert len(coll_from_url) == 2
assert list(coll_from_url)[-1].id == 1
def test_url_to_resource_with_multiple_order(self):
coll_from_url = url_to_resource(self.base_url +
'?sort=id:asc~text:desc')
assert len(coll_from_url) == 2
assert list(coll_from_url)[-1].id == 1
def test_url_to_resource_with_multiple_filter(self):
criteria = 'id:less-than:1~id:less-than-or-equal-to:1'
coll_from_url = url_to_resource(self.base_url + '?q=%s' % criteria)
assert len(coll_from_url) == 1
def test_url_to_resource_with_multiple_criteria_one_empty(self):
criteria = 'id:less-than:1~'
coll_from_url = url_to_resource(self.base_url + '?q=%s' % criteria)
assert len(coll_from_url) == 1
def test_url_to_resource_with_multiple_values(self):
criteria = 'id:equal-to:0,1'
coll_from_url = url_to_resource(self.base_url + '?q=%s' % criteria)
assert len(coll_from_url) == 2
def test_url_to_resource_with_multiple_values_one_empty(self):
criteria = 'id:equal-to:0,'
coll_from_url = url_to_resource(self.base_url + '?q=%s' % criteria)
assert len(coll_from_url) == 1
def test_url_to_resource_with_multiple_string_values_one_empty(self):
criteria = 'text:starts-with:"foo",""'
coll_from_url = url_to_resource(self.base_url + '?q=%s' % criteria)
assert len(coll_from_url) == 1
def test_url_to_resource_with_link(self):
criterion = 'parent:equal-to:"%s/my-entity-parents/0/"' % self.app_url
coll_from_url = url_to_resource(self.base_url + '?q=%s' % criterion)
assert len(coll_from_url) == 1
def test_url_to_resource_with_link_and_other(self):
criterion1 = 'parent:equal-to:"%s/my-entity-parents/0/"' \
% self.app_url
criterion2 = 'id:equal-to:0'
coll_from_url = url_to_resource(self.base_url +
'?q=%s~%s' % (criterion1, criterion2))
assert len(coll_from_url) == 1
def test_two_urls(self):
par_url = self.app_url + '/my-entity-parents/'
criteria = 'parent:equal-to:"%s","%s"' \
% (par_url + '0/', par_url + '1/')
url = self.base_url + '?q=%s' % criteria
coll_from_url = url_to_resource(url)
assert len(coll_from_url) == 2
def test_url_to_resource_contained_with_simple_collection_link(self):
nested_url = self.app_url \
+ '/my-entity-parents/?q=id:less-than:1'
url = self.app_url + '/my-entities/?q=parent:contained:' \
+ '"' + nested_url + '"'
coll_from_url = url_to_resource(url)
assert len(coll_from_url) == 1
@pytest.mark.parametrize('op,crit',
[(' and ', 'id:greater-than:0'),
('~', 'text:not-equal-to:"foo0"')])
def test_url_to_resource_contained_with_complex_collection_link(self,
op, crit):
nested_url = self.app_url \
+ '/my-entity-parents/?q=id:less-than:2' \
+ op \
+ crit
url = self.app_url + '/my-entities/?q=parent:contained:' \
+ "'" + nested_url + "'"
coll_from_url = url_to_resource(url)
assert len(coll_from_url) == 1
def test_url_to_resource_contained_with_grouped_collection_link(self):
url = self.app_url + '/my-entities/' \
+ '?q=(parent:contained:"' \
+ self.app_url \
+ '/my-entity-parents/?q=id:less-than:3") ' \
+ 'and text:not-equal-to:"foo0"'
coll_from_url = url_to_resource(url)
assert len(coll_from_url) == 1
def test_nested_member_url_with_query_string_fail(self):
par_url = self.app_url + '/my-entity-parents/1/'
criteria = 'parent:equal-to:%s?q=id:equal-to:0' % par_url
url = self.base_url + '?q=%s' % criteria
pytest.raises(ValueError, url_to_resource, url)
def test_url_to_resource_invalid_traversal_object(self, monkeypatch):
monkeypatch.setattr(Service, '__getitem__',
classmethod(lambda cls, item: 1))
url = self.app_url + '/foo'
with pytest.raises(ValueError) as cm:
url_to_resource(url)
exc_msg = 'Traversal found non-resource object'
assert str(cm.value).startswith(exc_msg)
def __check_url(self, url,
schema=None, path=None, params=None, query=None):
urlp = urlparse.urlparse(url)
if not schema is None:
assert urlp.scheme == schema # pylint: disable=E1101
if not path is None:
assert urlp.path == path # pylint: disable=E1101
if not params is None:
assert urlp.params == params # pylint: disable=E1101
if not query is None:
# We can not rely on the order of query parameters returned by
# urlparse, so we compare the sets of parameters.
assert set(urlp.query.split('&')) == \
set(query.split('&')) # pylint: disable=E1101
class TestUrlNoRdb(BaseTestUrl):
config_file_name = 'configure_no_rdb.zcml'
@pytest.mark.usefixtures("rdb")
class TestUrlRdb(BaseTestUrl):
config_file_name = 'configure.zcml'
|
|
# -*- coding: utf-8 -*-
import inspect
import json
from unittest import TestCase
from requests import Session, Response
from mock import Mock, patch
from six import itervalues
from demands import HTTPServiceClient, HTTPServiceError
class PatchedSessionTests(TestCase):
def setUp(self):
# must patch inspect since it is used on Session.request, and when
# Session.request is mocked, inspect blows up
self.request_args = inspect.getargspec(Session.request)
self.inspect_patcher = patch('demands.inspect.getargspec')
self.patched_inspect = self.inspect_patcher.start()
self.patched_inspect.return_value = self.request_args
self.request_patcher = patch.object(Session, 'request')
self.request = self.request_patcher.start()
self.response = Mock(spec=Response(), status_code=200)
self.request.return_value = self.response
def tearDown(self):
self.request_patcher.stop()
self.inspect_patcher.stop()
class HttpServiceTests(PatchedSessionTests):
def setUp(self):
PatchedSessionTests.setUp(self)
self.service = HTTPServiceClient('http://service.com/')
def test_returning_responses_from_all_session_calls(self):
self.assertEqual(self.service.get('/path'), self.response)
self.assertEqual(self.service.put('/path'), self.response)
self.assertEqual(self.service.delete('/path'), self.response)
self.assertEqual(self.service.post('/path'), self.response)
self.assertEqual(self.service.patch('/path'), self.response)
self.assertEqual(self.service.options('/path'), self.response)
self.assertEqual(self.service.head('/path'), self.response)
def test_get_request_with_params(self):
"""GET request with url parameters"""
self.service.get('/get-endpoint', params={'foo': 'bar'})
self.request.assert_called_with(
method='GET', url='http://service.com/get-endpoint',
allow_redirects=True, params={'foo': 'bar'})
def test_minimal_post_request(self):
"""minimal POST request"""
self.service.post('/post-endpoint')
self.request.assert_called_with(
method='POST', url='http://service.com/post-endpoint', json=None,
data=None)
def test_minimal_put_request(self):
"""minimal PUT request"""
self.service.put('/put-endpoint')
self.request.assert_called_with(
method='PUT', url='http://service.com/put-endpoint', data=None)
def test_minimal_delete_request(self):
"""minimal DELETE request"""
self.service.delete('/delete-endpoint')
self.request.assert_called_with(
method='DELETE', url='http://service.com/delete-endpoint')
def test_unacceptable_response(self):
def get():
self.service.get('/get-endpoint')
acceptable = True
self.service.is_acceptable = lambda *args: acceptable
get()
acceptable = False
self.assertRaises(HTTPServiceError, get)
def test_headers_are_passed_and_overridable(self):
service = HTTPServiceClient(
url='http://localhost/',
headers={
'name': 'value',
'thomas': 'kittens'})
service.get('/')
self.request.assert_called_with(
headers={'thomas': 'kittens', 'name': 'value'},
method='GET', url='http://localhost/', allow_redirects=True)
service.get('/', headers={'thomas': 'homegirl'})
self.request.assert_called_with(
headers={'thomas': 'homegirl', 'name': 'value'},
method='GET', url='http://localhost/', allow_redirects=True)
def test_sets_authentication_when_provided(self):
service = HTTPServiceClient(
url='http://localhost/',
auth=('foo', 'bar'),
)
service.get('/authed-endpoint')
self.request.assert_called_with(
method='GET', url='http://localhost/authed-endpoint',
allow_redirects=True, auth=('foo', 'bar'))
@patch('demands.log')
def test_logs_authentication_when_provided(self, mock_log):
service = HTTPServiceClient(
url='http://localhost/',
auth=('foo', 'bar'),
)
service.get('/authed-endpoint')
debug_msgs = get_parsed_log_messages(mock_log, 'debug')
self.assertIn('Authentication', debug_msgs[2])
def test_client_identification_adds_user_agent_header(self):
"""client identification adds User-Agent header"""
service = HTTPServiceClient(
url='http://localhost/',
client_name='my_client',
client_version='1.2.3',
app_name='my_app',
headers={'Foo': 'Bar'},
)
service.get('/test')
self.request.assert_called_with(
method='GET', url='http://localhost/test', allow_redirects=True,
headers={'User-Agent': 'my_client 1.2.3 - my_app', 'Foo': 'Bar'})
def test_post_send_raise_exception_in_case_of_error(self):
self.response.configure_mock(url='http://broken/', status_code=500)
with self.assertRaises(HTTPServiceError):
self.service.request('METHOD', 'http://broken/')
def test_post_send_raises_exception_with_details_on_error(self):
self.response.configure_mock(
status_code=500, content='content', url='http://broken/')
with self.assertRaises(HTTPServiceError) as e:
self.service.request('METHOD', 'http://broken/')
self.assertEqual(e.exception.code, 500)
self.assertEqual(e.exception.details, 'content')
self.assertTrue(e.message.startswith(
'Unexpected response: url: '
'http://broken/, code: 500, details: '
))
def test_post_sends_no_exception_in_case_of_expected_response_code(self):
self.response.configure_mock(
status_code=404, content='content', url='http://notfound/')
self.service.request(
'METHOD', 'http://notfound/', expected_response_codes=(404,))
def test_santization_of_request_parameters_removes_unknowns(self):
lots_of_params = {
'expected_response_codes': (404,),
'method': 'METHOD',
'something_odd': True
}
self.assertEqual(
self.service._sanitize_request_params(lots_of_params),
{'method': 'METHOD'})
def test_santization_of_verify_parameters(self):
lots_of_params = {
'expected_response_codes': (404,),
'method': 'METHOD',
'verify_ssl': '/some/path',
}
self.assertEqual(
self.service._sanitize_request_params(lots_of_params),
{'method': 'METHOD', 'verify': '/some/path'})
def test_url_is_composed_properly(self):
service = HTTPServiceClient('http://service.com/some/path/')
service.get('/get-endpoint')
self.request.assert_called_with(
method='GET', url='http://service.com/some/path/get-endpoint',
allow_redirects=True
)
def test_url_is_composed_properly_if_path_is_empty(self):
service = HTTPServiceClient(
'http://service.com/some/path/get-endpoint')
service.get('')
self.request.assert_called_with(
method='GET', url='http://service.com/some/path/get-endpoint',
allow_redirects=True
)
def test_pre_send_sets_max_retries(self):
self.service.pre_send({'max_retries': 2})
for adapter in itervalues(self.service.adapters):
self.assertEqual(adapter.max_retries, 2)
def test_pre_send_defaults_max_retries_to_zero(self):
self.service.pre_send({'max_retries': 2})
self.service.pre_send({})
for adapter in itervalues(self.service.adapters):
self.assertEqual(adapter.max_retries, 0)
def get_parsed_log_messages(mock_log, log_level):
"""Return the parsed log message sent to a mock log call at log_level
Example:
log = Mock()
def do_something(data1, data2):
log.info('Doing something with %s and %s', data1, data2)
do_something('one', 'two')
get_parsed_log_message(log, 'error')
# => 'Doing something with one and two'
"""
calls = getattr(mock_log, log_level).call_args_list
if not calls:
raise Exception('%s.%s was not called' % (mock_log, log_level))
messages = []
for call_args in calls:
args, kwargs = call_args
messages.append(args[0] % tuple(args[1:]))
return messages
|
|
"""Base and mixin classes for nearest neighbors."""
# Authors: Jake Vanderplas <[email protected]>
# Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <[email protected]>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
from functools import partial
import warnings
from abc import ABCMeta, abstractmethod
import numbers
import numpy as np
from scipy.sparse import csr_matrix, issparse
import joblib
from joblib import Parallel, effective_n_jobs
from ._ball_tree import BallTree
from ._kd_tree import KDTree
from ..base import BaseEstimator, MultiOutputMixin
from ..base import is_classifier
from ..metrics import pairwise_distances_chunked
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import (
check_array,
gen_even_slices,
_to_object_array,
)
from ..utils.deprecation import deprecated
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_is_fitted
from ..utils.validation import check_non_negative
from ..utils.fixes import delayed
from ..utils.fixes import parse_version
from ..exceptions import DataConversionWarning, EfficiencyWarning
VALID_METRICS = dict(
ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(
list(PAIRWISE_DISTANCE_FUNCTIONS.keys())
+ [
"braycurtis",
"canberra",
"chebyshev",
"correlation",
"cosine",
"dice",
"hamming",
"jaccard",
"kulsinski",
"mahalanobis",
"matching",
"minkowski",
"rogerstanimoto",
"russellrao",
"seuclidean",
"sokalmichener",
"sokalsneath",
"sqeuclidean",
"yule",
"wminkowski",
]
),
)
VALID_METRICS_SPARSE = dict(
ball_tree=[],
kd_tree=[],
brute=(PAIRWISE_DISTANCE_FUNCTIONS.keys() - {"haversine", "nan_euclidean"}),
)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights not in (None, "uniform", "distance") and not callable(weights):
raise ValueError(
"weights not recognized: should be 'uniform', "
"'distance', or a callable function"
)
return weights
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``.
Parameters
----------
dist : ndarray
The input distances.
weights : {'uniform', 'distance' or a callable}
The kind of weighting used.
Returns
-------
weights_arr : array of the same shape as ``dist``
If ``weights == 'uniform'``, then returns None.
"""
if weights in (None, "uniform"):
return None
elif weights == "distance":
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, "__contains__") and 0.0 in point_dist:
dist[point_dist_i] = point_dist == 0.0
else:
dist[point_dist_i] = 1.0 / point_dist
else:
with np.errstate(divide="ignore"):
dist = 1.0 / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError(
"weights not recognized: should be 'uniform', "
"'distance', or a callable function"
)
def _is_sorted_by_data(graph):
"""Return whether the graph's non-zero entries are sorted by data.
The non-zero entries are stored in graph.data and graph.indices.
For each row (or sample), the non-zero entries can be either:
- sorted by indices, as after graph.sort_indices();
- sorted by data, as after _check_precomputed(graph);
- not sorted.
Parameters
----------
graph : sparse matrix of shape (n_samples, n_samples)
Neighbors graph as given by `kneighbors_graph` or
`radius_neighbors_graph`. Matrix should be of format CSR format.
Returns
-------
res : bool
Whether input graph is sorted by data.
"""
assert graph.format == "csr"
out_of_order = graph.data[:-1] > graph.data[1:]
line_change = np.unique(graph.indptr[1:-1] - 1)
line_change = line_change[line_change < out_of_order.shape[0]]
return out_of_order.sum() == out_of_order[line_change].sum()
def _check_precomputed(X):
"""Check precomputed distance matrix.
If the precomputed distance matrix is sparse, it checks that the non-zero
entries are sorted by distances. If not, the matrix is copied and sorted.
Parameters
----------
X : {sparse matrix, array-like}, (n_samples, n_samples)
Distance matrix to other samples. X may be a sparse matrix, in which
case only non-zero elements may be considered neighbors.
Returns
-------
X : {sparse matrix, array-like}, (n_samples, n_samples)
Distance matrix to other samples. X may be a sparse matrix, in which
case only non-zero elements may be considered neighbors.
"""
if not issparse(X):
X = check_array(X)
check_non_negative(X, whom="precomputed distance matrix.")
return X
else:
graph = X
if graph.format not in ("csr", "csc", "coo", "lil"):
raise TypeError(
"Sparse matrix in {!r} format is not supported due to "
"its handling of explicit zeros".format(graph.format)
)
copied = graph.format != "csr"
graph = check_array(graph, accept_sparse="csr")
check_non_negative(graph, whom="precomputed distance matrix.")
if not _is_sorted_by_data(graph):
warnings.warn(
"Precomputed sparse input was not sorted by data.", EfficiencyWarning
)
if not copied:
graph = graph.copy()
# if each sample has the same number of provided neighbors
row_nnz = np.diff(graph.indptr)
if row_nnz.max() == row_nnz.min():
n_samples = graph.shape[0]
distances = graph.data.reshape(n_samples, -1)
order = np.argsort(distances, kind="mergesort")
order += np.arange(n_samples)[:, None] * row_nnz[0]
order = order.ravel()
graph.data = graph.data[order]
graph.indices = graph.indices[order]
else:
for start, stop in zip(graph.indptr, graph.indptr[1:]):
order = np.argsort(graph.data[start:stop], kind="mergesort")
graph.data[start:stop] = graph.data[start:stop][order]
graph.indices[start:stop] = graph.indices[start:stop][order]
return graph
def _kneighbors_from_graph(graph, n_neighbors, return_distance):
"""Decompose a nearest neighbors sparse graph into distances and indices.
Parameters
----------
graph : sparse matrix of shape (n_samples, n_samples)
Neighbors graph as given by `kneighbors_graph` or
`radius_neighbors_graph`. Matrix should be of format CSR format.
n_neighbors : int
Number of neighbors required for each sample.
return_distance : bool
Whether or not to return the distances.
Returns
-------
neigh_dist : ndarray of shape (n_samples, n_neighbors)
Distances to nearest neighbors. Only present if `return_distance=True`.
neigh_ind : ndarray of shape (n_samples, n_neighbors)
Indices of nearest neighbors.
"""
n_samples = graph.shape[0]
assert graph.format == "csr"
# number of neighbors by samples
row_nnz = np.diff(graph.indptr)
row_nnz_min = row_nnz.min()
if n_neighbors is not None and row_nnz_min < n_neighbors:
raise ValueError(
"%d neighbors per samples are required, but some samples have only"
" %d neighbors in precomputed graph matrix. Decrease number of "
"neighbors used or recompute the graph with more neighbors."
% (n_neighbors, row_nnz_min)
)
def extract(a):
# if each sample has the same number of provided neighbors
if row_nnz.max() == row_nnz_min:
return a.reshape(n_samples, -1)[:, :n_neighbors]
else:
idx = np.tile(np.arange(n_neighbors), (n_samples, 1))
idx += graph.indptr[:-1, None]
return a.take(idx, mode="clip").reshape(n_samples, n_neighbors)
if return_distance:
return extract(graph.data), extract(graph.indices)
else:
return extract(graph.indices)
def _radius_neighbors_from_graph(graph, radius, return_distance):
"""Decompose a nearest neighbors sparse graph into distances and indices.
Parameters
----------
graph : sparse matrix of shape (n_samples, n_samples)
Neighbors graph as given by `kneighbors_graph` or
`radius_neighbors_graph`. Matrix should be of format CSR format.
radius : float
Radius of neighborhoods which should be strictly positive.
return_distance : bool
Whether or not to return the distances.
Returns
-------
neigh_dist : ndarray of shape (n_samples,) of arrays
Distances to nearest neighbors. Only present if `return_distance=True`.
neigh_ind : ndarray of shape (n_samples,) of arrays
Indices of nearest neighbors.
"""
assert graph.format == "csr"
no_filter_needed = bool(graph.data.max() <= radius)
if no_filter_needed:
data, indices, indptr = graph.data, graph.indices, graph.indptr
else:
mask = graph.data <= radius
if return_distance:
data = np.compress(mask, graph.data)
indices = np.compress(mask, graph.indices)
indptr = np.concatenate(([0], np.cumsum(mask)))[graph.indptr]
indices = indices.astype(np.intp, copy=no_filter_needed)
if return_distance:
neigh_dist = _to_object_array(np.split(data, indptr[1:-1]))
neigh_ind = _to_object_array(np.split(indices, indptr[1:-1]))
if return_distance:
return neigh_dist, neigh_ind
else:
return neigh_ind
class NeighborsBase(MultiOutputMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(
self,
n_neighbors=None,
radius=None,
algorithm="auto",
leaf_size=30,
metric="minkowski",
p=2,
metric_params=None,
n_jobs=None,
):
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
def _check_algorithm_metric(self):
if self.algorithm not in ["auto", "brute", "kd_tree", "ball_tree"]:
raise ValueError("unrecognized algorithm: '%s'" % self.algorithm)
if self.algorithm == "auto":
if self.metric == "precomputed":
alg_check = "brute"
elif callable(self.metric) or self.metric in VALID_METRICS["ball_tree"]:
alg_check = "ball_tree"
else:
alg_check = "brute"
else:
alg_check = self.algorithm
if callable(self.metric):
if self.algorithm == "kd_tree":
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree does not support callable metric '%s'"
"Function call overhead will result"
"in very poor performance."
% self.metric
)
elif self.metric not in VALID_METRICS[alg_check]:
raise ValueError(
"Metric '%s' not valid. Use "
"sorted(sklearn.neighbors.VALID_METRICS['%s']) "
"to get valid options. "
"Metric can also be a callable function." % (self.metric, alg_check)
)
if self.metric_params is not None and "p" in self.metric_params:
if self.p is not None:
warnings.warn(
"Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.",
SyntaxWarning,
stacklevel=3,
)
effective_p = self.metric_params["p"]
else:
effective_p = self.p
if self.metric in ["wminkowski", "minkowski"] and effective_p < 1:
raise ValueError("p must be greater or equal to one for minkowski metric")
def _fit(self, X, y=None):
if self._get_tags()["requires_y"]:
if not isinstance(X, (KDTree, BallTree, NeighborsBase)):
X, y = self._validate_data(X, y, accept_sparse="csr", multi_output=True)
if is_classifier(self):
# Classification targets require a specific format
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn(
"A column-vector y was passed when a "
"1d array was expected. Please change "
"the shape of y to (n_samples,), for "
"example using ravel().",
DataConversionWarning,
stacklevel=2,
)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
check_classification_targets(y)
self.classes_ = []
self._y = np.empty(y.shape, dtype=int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
else:
self._y = y
else:
if not isinstance(X, (KDTree, BallTree, NeighborsBase)):
X = self._validate_data(X, accept_sparse="csr")
self._check_algorithm_metric()
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get("p", self.p)
if self.metric in ["wminkowski", "minkowski"]:
self.effective_metric_params_["p"] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == "minkowski":
p = self.effective_metric_params_.pop("p", 2)
w = self.effective_metric_params_.pop("w", None)
if p < 1:
raise ValueError(
"p must be greater or equal to one for minkowski metric"
)
elif p == 1 and w is None:
self.effective_metric_ = "manhattan"
elif p == 2 and w is None:
self.effective_metric_ = "euclidean"
elif p == np.inf and w is None:
self.effective_metric_ = "chebyshev"
else:
# Use the generic minkowski metric, possibly weighted.
self.effective_metric_params_["p"] = p
self.effective_metric_params_["w"] = w
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
self.n_samples_fit_ = X.n_samples_fit_
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = "ball_tree"
self.n_samples_fit_ = X.data.shape[0]
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = "kd_tree"
self.n_samples_fit_ = X.data.shape[0]
return self
if self.metric == "precomputed":
X = _check_precomputed(X)
# Precomputed matrix X must be squared
if X.shape[0] != X.shape[1]:
raise ValueError(
"Precomputed matrix must be square."
" Input is a {}x{} matrix.".format(X.shape[0], X.shape[1])
)
self.n_features_in_ = X.shape[1]
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ("auto", "brute"):
warnings.warn("cannot use tree with sparse input: using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE[
"brute"
] and not callable(self.effective_metric_):
raise ValueError(
"Metric '%s' not valid for sparse input. "
"Use sorted(sklearn.neighbors."
"VALID_METRICS_SPARSE['brute']) "
"to get valid options. "
"Metric can also be a callable function." % (self.effective_metric_)
)
self._fit_X = X.copy()
self._tree = None
self._fit_method = "brute"
self.n_samples_fit_ = X.shape[0]
return self
self._fit_method = self.algorithm
self._fit_X = X
self.n_samples_fit_ = X.shape[0]
if self._fit_method == "auto":
# A tree approach is better for small number of neighbors or small
# number of features, with KDTree generally faster when available
if (
self.metric == "precomputed"
or self._fit_X.shape[1] > 15
or (
self.n_neighbors is not None
and self.n_neighbors >= self._fit_X.shape[0] // 2
)
):
self._fit_method = "brute"
else:
if self.effective_metric_ in VALID_METRICS["kd_tree"]:
self._fit_method = "kd_tree"
elif (
callable(self.effective_metric_)
or self.effective_metric_ in VALID_METRICS["ball_tree"]
):
self._fit_method = "ball_tree"
else:
self._fit_method = "brute"
if self._fit_method == "ball_tree":
self._tree = BallTree(
X,
self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_,
)
elif self._fit_method == "kd_tree":
self._tree = KDTree(
X,
self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_,
)
elif self._fit_method == "brute":
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized" % self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError("Expected n_neighbors > 0. Got %d" % self.n_neighbors)
elif not isinstance(self.n_neighbors, numbers.Integral):
raise TypeError(
"n_neighbors does not take %s value, enter integer value"
% type(self.n_neighbors)
)
return self
def _more_tags(self):
# For cross-validation routines to split data correctly
return {"pairwise": self.metric == "precomputed"}
# TODO: Remove in 1.1
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute `_pairwise` was deprecated in "
"version 0.24 and will be removed in 1.1 (renaming of 0.26)."
)
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == "precomputed"
def _tree_query_parallel_helper(tree, *args, **kwargs):
"""Helper for the Parallel calls in KNeighborsMixin.kneighbors.
The Cython method tree.query is not directly picklable by cloudpickle
under PyPy.
"""
return tree.query(*args, **kwargs)
class KNeighborsMixin:
"""Mixin for k-neighbors searches."""
def _kneighbors_reduce_func(self, dist, start, n_neighbors, return_distance):
"""Reduce a chunk of distances to the nearest neighbors.
Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked`
Parameters
----------
dist : ndarray of shape (n_samples_chunk, n_samples)
The distance matrix.
start : int
The index in X which the first row of dist corresponds to.
n_neighbors : int
Number of neighbors required for each sample.
return_distance : bool
Whether or not to return the distances.
Returns
-------
dist : array of shape (n_samples_chunk, n_neighbors)
Returned only if `return_distance=True`.
neigh : array of shape (n_samples_chunk, n_neighbors)
The neighbors indices.
"""
sample_range = np.arange(dist.shape[0])[:, None]
neigh_ind = np.argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == "euclidean":
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
return result
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Find the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed', \
default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int, default=None
Number of neighbors required for each sample. The default is the
value passed to the constructor.
return_distance : bool, default=True
Whether or not to return the distances.
Returns
-------
neigh_dist : ndarray of shape (n_queries, n_neighbors)
Array representing the lengths to points, only present if
return_distance=True.
neigh_ind : ndarray of shape (n_queries, n_neighbors)
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NearestNeighbors
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples)
NearestNeighbors(n_neighbors=1)
>>> print(neigh.kneighbors([[1., 1., 1.]]))
(array([[0.5]]), array([[2]]))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False)
array([[1],
[2]]...)
"""
check_is_fitted(self)
if n_neighbors is None:
n_neighbors = self.n_neighbors
elif n_neighbors <= 0:
raise ValueError("Expected n_neighbors > 0. Got %d" % n_neighbors)
elif not isinstance(n_neighbors, numbers.Integral):
raise TypeError(
"n_neighbors does not take %s value, enter integer value"
% type(n_neighbors)
)
if X is not None:
query_is_train = False
if self.metric == "precomputed":
X = _check_precomputed(X)
else:
X = self._validate_data(X, accept_sparse="csr", reset=False)
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
n_samples_fit = self.n_samples_fit_
if n_neighbors > n_samples_fit:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" % (n_samples_fit, n_neighbors)
)
n_jobs = effective_n_jobs(self.n_jobs)
chunked_results = None
if self._fit_method == "brute" and self.metric == "precomputed" and issparse(X):
results = _kneighbors_from_graph(
X, n_neighbors=n_neighbors, return_distance=return_distance
)
elif self._fit_method == "brute":
reduce_func = partial(
self._kneighbors_reduce_func,
n_neighbors=n_neighbors,
return_distance=return_distance,
)
# for efficiency, use squared euclidean distances
if self.effective_metric_ == "euclidean":
kwds = {"squared": True}
else:
kwds = self.effective_metric_params_
chunked_results = list(
pairwise_distances_chunked(
X,
self._fit_X,
reduce_func=reduce_func,
metric=self.effective_metric_,
n_jobs=n_jobs,
**kwds,
)
)
elif self._fit_method in ["ball_tree", "kd_tree"]:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'"
% self._fit_method
)
old_joblib = parse_version(joblib.__version__) < parse_version("0.12")
if old_joblib:
# Deal with change of API in joblib
parallel_kwargs = {"backend": "threading"}
else:
parallel_kwargs = {"prefer": "threads"}
chunked_results = Parallel(n_jobs, **parallel_kwargs)(
delayed(_tree_query_parallel_helper)(
self._tree, X[s], n_neighbors, return_distance
)
for s in gen_even_slices(X.shape[0], n_jobs)
)
else:
raise ValueError("internal: _fit_method not recognized")
if chunked_results is not None:
if return_distance:
neigh_dist, neigh_ind = zip(*chunked_results)
results = np.vstack(neigh_dist), np.vstack(neigh_ind)
else:
results = np.vstack(chunked_results)
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
neigh_dist, neigh_ind = results
else:
neigh_ind = results
n_queries, _ = X.shape
sample_range = np.arange(n_queries)[:, None]
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(neigh_ind[sample_mask], (n_queries, n_neighbors - 1))
if return_distance:
neigh_dist = np.reshape(
neigh_dist[sample_mask], (n_queries, n_neighbors - 1)
)
return neigh_dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None, mode="connectivity"):
"""Compute the (weighted) graph of k-Neighbors for points in X.
Parameters
----------
X : array-like of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed', \
default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
For ``metric='precomputed'`` the shape should be
(n_queries, n_indexed). Otherwise the shape should be
(n_queries, n_features).
n_neighbors : int, default=None
Number of neighbors for each sample. The default is the value
passed to the constructor.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are distances between points, type of distance
depends on the selected metric parameter in
NearestNeighbors class.
Returns
-------
A : sparse-matrix of shape (n_queries, n_samples_fit)
`n_samples_fit` is the number of samples in the fitted data.
`A[i, j]` gives the weight of the edge connecting `i` to `j`.
The matrix is of CSR format.
See Also
--------
NearestNeighbors.radius_neighbors_graph : Compute the (weighted) graph
of Neighbors for points in X.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X)
NearestNeighbors(n_neighbors=2)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 1.],
[1., 0., 1.]])
"""
check_is_fitted(self)
if n_neighbors is None:
n_neighbors = self.n_neighbors
# check the input only in self.kneighbors
# construct CSR matrix representation of the k-NN graph
if mode == "connectivity":
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
n_queries = A_ind.shape[0]
A_data = np.ones(n_queries * n_neighbors)
elif mode == "distance":
A_data, A_ind = self.kneighbors(X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode
)
n_queries = A_ind.shape[0]
n_samples_fit = self.n_samples_fit_
n_nonzero = n_queries * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
kneighbors_graph = csr_matrix(
(A_data, A_ind.ravel(), A_indptr), shape=(n_queries, n_samples_fit)
)
return kneighbors_graph
def _tree_query_radius_parallel_helper(tree, *args, **kwargs):
"""Helper for the Parallel calls in RadiusNeighborsMixin.radius_neighbors.
The Cython method tree.query_radius is not directly picklable by
cloudpickle under PyPy.
"""
return tree.query_radius(*args, **kwargs)
class RadiusNeighborsMixin:
"""Mixin for radius-based neighbors searches."""
def _radius_neighbors_reduce_func(self, dist, start, radius, return_distance):
"""Reduce a chunk of distances to the nearest neighbors.
Callback to :func:`sklearn.metrics.pairwise.pairwise_distances_chunked`
Parameters
----------
dist : ndarray of shape (n_samples_chunk, n_samples)
The distance matrix.
start : int
The index in X which the first row of dist corresponds to.
radius : float
The radius considered when making the nearest neighbors search.
return_distance : bool
Whether or not to return the distances.
Returns
-------
dist : list of ndarray of shape (n_samples_chunk,)
Returned only if `return_distance=True`.
neigh : list of ndarray of shape (n_samples_chunk,)
The neighbors indices.
"""
neigh_ind = [np.where(d <= radius)[0] for d in dist]
if return_distance:
if self.effective_metric_ == "euclidean":
dist = [np.sqrt(d[neigh_ind[i]]) for i, d in enumerate(dist)]
else:
dist = [d[neigh_ind[i]] for i, d in enumerate(dist)]
results = dist, neigh_ind
else:
results = neigh_ind
return results
def radius_neighbors(
self, X=None, radius=None, return_distance=True, sort_results=False
):
"""Find the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like of (n_samples, n_features), default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float, default=None
Limiting distance of neighbors to return. The default is the value
passed to the constructor.
return_distance : bool, default=True
Whether or not to return the distances.
sort_results : bool, default=False
If True, the distances and indices will be sorted by increasing
distances before being returned. If False, the results may not
be sorted. If `return_distance=False`, setting `sort_results=True`
will result in an error.
.. versionadded:: 0.22
Returns
-------
neigh_dist : ndarray of shape (n_samples,) of arrays
Array representing the distances to each point, only present if
`return_distance=True`. The distance values are computed according
to the ``metric`` constructor parameter.
neigh_ind : ndarray of shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples)
NearestNeighbors(radius=1.6)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0]))
[1.5 0.5]
>>> print(np.asarray(rng[1][0]))
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
"""
check_is_fitted(self)
if X is not None:
query_is_train = False
if self.metric == "precomputed":
X = _check_precomputed(X)
else:
X = self._validate_data(X, accept_sparse="csr", reset=False)
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
if self._fit_method == "brute" and self.metric == "precomputed" and issparse(X):
results = _radius_neighbors_from_graph(
X, radius=radius, return_distance=return_distance
)
elif self._fit_method == "brute":
# for efficiency, use squared euclidean distances
if self.effective_metric_ == "euclidean":
radius *= radius
kwds = {"squared": True}
else:
kwds = self.effective_metric_params_
reduce_func = partial(
self._radius_neighbors_reduce_func,
radius=radius,
return_distance=return_distance,
)
chunked_results = pairwise_distances_chunked(
X,
self._fit_X,
reduce_func=reduce_func,
metric=self.effective_metric_,
n_jobs=self.n_jobs,
**kwds,
)
if return_distance:
neigh_dist_chunks, neigh_ind_chunks = zip(*chunked_results)
neigh_dist_list = sum(neigh_dist_chunks, [])
neigh_ind_list = sum(neigh_ind_chunks, [])
neigh_dist = _to_object_array(neigh_dist_list)
neigh_ind = _to_object_array(neigh_ind_list)
results = neigh_dist, neigh_ind
else:
neigh_ind_list = sum(chunked_results, [])
results = _to_object_array(neigh_ind_list)
if sort_results:
if not return_distance:
raise ValueError(
"return_distance must be True if sort_results is True."
)
for ii in range(len(neigh_dist)):
order = np.argsort(neigh_dist[ii], kind="mergesort")
neigh_ind[ii] = neigh_ind[ii][order]
neigh_dist[ii] = neigh_dist[ii][order]
results = neigh_dist, neigh_ind
elif self._fit_method in ["ball_tree", "kd_tree"]:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'"
% self._fit_method
)
n_jobs = effective_n_jobs(self.n_jobs)
delayed_query = delayed(_tree_query_radius_parallel_helper)
if parse_version(joblib.__version__) < parse_version("0.12"):
# Deal with change of API in joblib
parallel_kwargs = {"backend": "threading"}
else:
parallel_kwargs = {"prefer": "threads"}
chunked_results = Parallel(n_jobs, **parallel_kwargs)(
delayed_query(
self._tree, X[s], radius, return_distance, sort_results=sort_results
)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
neigh_ind, neigh_dist = tuple(zip(*chunked_results))
results = np.hstack(neigh_dist), np.hstack(neigh_ind)
else:
results = np.hstack(chunked_results)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
neigh_dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
neigh_dist[ind] = neigh_dist[ind][mask]
if return_distance:
return neigh_dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(
self, X=None, radius=None, mode="connectivity", sort_results=False
):
"""Compute the (weighted) graph of Neighbors for points in X.
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like of shape (n_samples, n_features), default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float, default=None
Radius of neighborhoods. The default is the value passed to the
constructor.
mode : {'connectivity', 'distance'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are distances between points, type of distance
depends on the selected metric parameter in
NearestNeighbors class.
sort_results : bool, default=False
If True, in each row of the result, the non-zero entries will be
sorted by increasing distances. If False, the non-zero entries may
not be sorted. Only used with mode='distance'.
.. versionadded:: 0.22
Returns
-------
A : sparse-matrix of shape (n_queries, n_samples_fit)
`n_samples_fit` is the number of samples in the fitted data.
`A[i, j]` gives the weight of the edge connecting `i` to `j`.
The matrix is of CSR format.
See Also
--------
kneighbors_graph : Compute the (weighted) graph of k-Neighbors for
points in X.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X)
NearestNeighbors(radius=1.5)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 0.],
[1., 0., 1.]])
"""
check_is_fitted(self)
# check the input only in self.radius_neighbors
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == "connectivity":
A_ind = self.radius_neighbors(X, radius, return_distance=False)
A_data = None
elif mode == "distance":
dist, A_ind = self.radius_neighbors(
X, radius, return_distance=True, sort_results=sort_results
)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode
)
n_queries = A_ind.shape[0]
n_samples_fit = self.n_samples_fit_
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int), np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr), shape=(n_queries, n_samples_fit))
|
|
import unittest
from six import BytesIO
from iterparse.parser import iterparse
from lxml.etree import XMLSyntaxError
class Iterparse(unittest.TestCase):
def assertElement(
self, element, name, text=None, num_children=0, num_attrib=0,
):
self.assertEqual(element.tag, name)
self.assertEqual(element.text, text)
self.assertEqual(element.tail, None)
self.assertEqual(len(element.getchildren()), num_children)
self.assertEqual(len(element.attrib), num_attrib)
def test_basic(self):
stream = BytesIO(
b"""
<root>
<unwanted>
<unwanted-0>foo</unwanted-0>
<unwanted-1>foo</unwanted-1>
<unwanted-2>foo</unwanted-2>
</unwanted>
<wanted>garbage
<wanted-0 key="value">foo</wanted-0>
<wanted-1>foo</wanted-1>junk
<wanted-2>foo</wanted-2><!-- comment -->
<wanted-3>
<wanted-3a>sub-sub
<wanted-3aa>deep</wanted-3aa>
</wanted-3a>
<wanted-3b>sup</wanted-3b>
</wanted-3>
<wanted-4/>
bullshit
</wanted>
</root>
"""
)
events = list(iterparse(stream, tag=['wanted'], debug=True))
self.assertEqual(len(events), 1)
action, element = events[0]
self.assertEqual(action, 'end')
self.assertElement(element, 'wanted', num_children=5)
self.assertElement(element[0], 'wanted-0', text='foo', num_attrib=1)
self.assertElement(element[1], 'wanted-1', text='foo')
self.assertElement(element[2], 'wanted-2', text='foo')
self.assertElement(element[3], 'wanted-3', num_children=2)
self.assertElement(element[3][0], 'wanted-3a', num_children=1)
self.assertElement(element[3][0][0], 'wanted-3aa', text='deep')
self.assertElement(element[3][1], 'wanted-3b', text='sup')
self.assertElement(element[4], 'wanted-4')
def test_exception_handling(self):
stream = BytesIO(b'<a>1</a>2</a>3')
events = iterparse(stream, tag=['a'], debug=True)
# We can process the first <a> without issue.
action, element = next(events)
self.assertEqual(action, 'end')
self.assertElement(element, 'a', text='1')
# Processing the second <a> should fail.
with self.assertRaises(XMLSyntaxError):
next(events)
def test_error_extra_content(self):
stream = BytesIO(b'<a><b></a></b>')
events = iterparse(stream, tag=['a'])
with self.assertRaises(XMLSyntaxError):
next(events)
def test_error_opening_ending_mismatch(self):
stream = BytesIO(b'</a>')
events = iterparse(stream, tag=['a'])
with self.assertRaises(XMLSyntaxError):
next(events)
def test_error_document_is_empty(self):
stream = BytesIO(b'0<a></a>')
events = iterparse(stream, tag=['a'])
with self.assertRaises(XMLSyntaxError):
next(events)
def test_return_order(self):
stream = BytesIO(
b"""
<root>
<wanted>
<wanted-0>foo</wanted-0>
</wanted>
</root>
"""
)
events = list(iterparse(stream, tag=['wanted', 'wanted-0']))
self.assertEqual(len(events), 2)
action, element = events[0]
self.assertEqual(action, 'end')
self.assertElement(element, 'wanted-0', text='foo')
action, element = events[1]
self.assertEqual(action, 'end')
self.assertElement(element, 'wanted', num_children=1)
self.assertElement(element[0], 'wanted-0', text='foo')
def test_namespaces(self):
text = b"""
<root xmlns:a1="example.com/a1" xmlns:a2="example.com/a2">
<a1:a>1</a1:a>
<a2:a>2</a2:a>
</root>
"""
# Make sure we can filter with namespaces.
events = list(iterparse(BytesIO(text), tag=['{example.com/a1}a']))
elements = [element for action, element in events]
self.assertEquals(len(elements), 1)
self.assertElement(elements[0], '{example.com/a1}a', text='1')
events = list(iterparse(BytesIO(text), tag=['{example.com/a2}a']))
elements = [element for action, element in events]
self.assertEquals(len(elements), 1)
self.assertElement(elements[0], '{example.com/a2}a', text='2')
# Make sure that we can filter while ignoring namespaces.
events = list(
iterparse(BytesIO(text), tag=['a'], ignore_namespace=True)
)
elements = [element for action, element in events]
self.assertEquals(len(elements), 2)
self.assertElement(elements[0], '{example.com/a1}a', text='1')
self.assertElement(elements[1], '{example.com/a2}a', text='2')
# Make sure we can filter with namespaces and strip the result.
events = list(
iterparse(
BytesIO(text), tag=['{example.com/a1}a'],
strip_namespace=True,
)
)
elements = [element for action, element in events]
self.assertEquals(len(elements), 1)
self.assertElement(elements[0], 'a', text='1')
# Combination of ignoring/striping namespaces.
events = list(
iterparse(
BytesIO(text), tag=['a'], strip_namespace=True,
ignore_namespace=True,
)
)
elements = [element for action, element in events]
self.assertEquals(len(elements), 2)
self.assertElement(elements[0], 'a', text='1')
self.assertElement(elements[1], 'a', text='2')
def test_tag_none(self):
stream = BytesIO(b'<a>1</a>')
events = list(iterparse(stream, tag=None))
self.assertEqual(len(events), 1)
action, element = events[0]
self.assertEqual(action, 'end')
self.assertElement(element, 'a', text='1')
def test_start_and_end_events(self):
stream = BytesIO(b'<a>1</a>')
events = list(iterparse(stream, tag=None, events=['start', 'end']))
self.assertEqual(len(events), 2)
# Note: elements at the time of the start event can only guarenttee
# that the tag name and its attributes will exist.
action, element = events[0]
self.assertEqual(action, 'start')
self.assertEqual(element.tag, 'a')
action, element = events[1]
self.assertEqual(action, 'end')
self.assertElement(element, 'a', text='1')
if __name__ == '__main__':
unittest.main()
|
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.cloud.bigquery_datatransfer_v1.proto import (
datatransfer_pb2 as google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2,
)
from google.cloud.bigquery_datatransfer_v1.proto import (
transfer_pb2 as google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2,
)
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class DataTransferServiceStub(object):
"""The Google BigQuery Data Transfer Service API enables BigQuery users to
configure the transfer of their data from other Google Products into
BigQuery. This service contains methods that are end user exposed. It backs
up the frontend.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetDataSource = channel.unary_unary(
"/google.cloud.bigquery.datatransfer.v1.DataTransferService/GetDataSource",
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.GetDataSourceRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.DataSource.FromString,
)
self.ListDataSources = channel.unary_unary(
"/google.cloud.bigquery.datatransfer.v1.DataTransferService/ListDataSources",
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListDataSourcesRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListDataSourcesResponse.FromString,
)
self.CreateTransferConfig = channel.unary_unary(
"/google.cloud.bigquery.datatransfer.v1.DataTransferService/CreateTransferConfig",
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.CreateTransferConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferConfig.FromString,
)
self.UpdateTransferConfig = channel.unary_unary(
"/google.cloud.bigquery.datatransfer.v1.DataTransferService/UpdateTransferConfig",
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.UpdateTransferConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferConfig.FromString,
)
self.DeleteTransferConfig = channel.unary_unary(
"/google.cloud.bigquery.datatransfer.v1.DataTransferService/DeleteTransferConfig",
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.DeleteTransferConfigRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetTransferConfig = channel.unary_unary(
"/google.cloud.bigquery.datatransfer.v1.DataTransferService/GetTransferConfig",
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.GetTransferConfigRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferConfig.FromString,
)
self.ListTransferConfigs = channel.unary_unary(
"/google.cloud.bigquery.datatransfer.v1.DataTransferService/ListTransferConfigs",
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferConfigsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferConfigsResponse.FromString,
)
self.ScheduleTransferRuns = channel.unary_unary(
"/google.cloud.bigquery.datatransfer.v1.DataTransferService/ScheduleTransferRuns",
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ScheduleTransferRunsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ScheduleTransferRunsResponse.FromString,
)
self.StartManualTransferRuns = channel.unary_unary(
"/google.cloud.bigquery.datatransfer.v1.DataTransferService/StartManualTransferRuns",
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.StartManualTransferRunsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.StartManualTransferRunsResponse.FromString,
)
self.GetTransferRun = channel.unary_unary(
"/google.cloud.bigquery.datatransfer.v1.DataTransferService/GetTransferRun",
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.GetTransferRunRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferRun.FromString,
)
self.DeleteTransferRun = channel.unary_unary(
"/google.cloud.bigquery.datatransfer.v1.DataTransferService/DeleteTransferRun",
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.DeleteTransferRunRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListTransferRuns = channel.unary_unary(
"/google.cloud.bigquery.datatransfer.v1.DataTransferService/ListTransferRuns",
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferRunsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferRunsResponse.FromString,
)
self.ListTransferLogs = channel.unary_unary(
"/google.cloud.bigquery.datatransfer.v1.DataTransferService/ListTransferLogs",
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferLogsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferLogsResponse.FromString,
)
self.CheckValidCreds = channel.unary_unary(
"/google.cloud.bigquery.datatransfer.v1.DataTransferService/CheckValidCreds",
request_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.CheckValidCredsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.CheckValidCredsResponse.FromString,
)
class DataTransferServiceServicer(object):
"""The Google BigQuery Data Transfer Service API enables BigQuery users to
configure the transfer of their data from other Google Products into
BigQuery. This service contains methods that are end user exposed. It backs
up the frontend.
"""
def GetDataSource(self, request, context):
"""Retrieves a supported data source and returns its settings,
which can be used for UI rendering.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListDataSources(self, request, context):
"""Lists supported data sources and returns their settings,
which can be used for UI rendering.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateTransferConfig(self, request, context):
"""Creates a new data transfer configuration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateTransferConfig(self, request, context):
"""Updates a data transfer configuration.
All fields must be set, even if they are not updated.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteTransferConfig(self, request, context):
"""Deletes a data transfer configuration,
including any associated transfer runs and logs.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetTransferConfig(self, request, context):
"""Returns information about a data transfer config.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListTransferConfigs(self, request, context):
"""Returns information about all data transfers in the project.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ScheduleTransferRuns(self, request, context):
"""Creates transfer runs for a time range [start_time, end_time].
For each date - or whatever granularity the data source supports - in the
range, one transfer run is created.
Note that runs are created per UTC time in the time range.
DEPRECATED: use StartManualTransferRuns instead.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def StartManualTransferRuns(self, request, context):
"""Start manual transfer runs to be executed now with schedule_time equal to
current time. The transfer runs can be created for a time range where the
run_time is between start_time (inclusive) and end_time (exclusive), or for
a specific run_time.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetTransferRun(self, request, context):
"""Returns information about the particular transfer run.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteTransferRun(self, request, context):
"""Deletes the specified transfer run.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListTransferRuns(self, request, context):
"""Returns information about running and completed jobs.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListTransferLogs(self, request, context):
"""Returns user facing log messages for the data transfer run.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CheckValidCreds(self, request, context):
"""Returns true if valid credentials exist for the given data source and
requesting user.
Some data sources doesn't support service account, so we need to talk to
them on behalf of the end user. This API just checks whether we have OAuth
token for the particular user, which is a pre-requisite before user can
create a transfer config.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_DataTransferServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
"GetDataSource": grpc.unary_unary_rpc_method_handler(
servicer.GetDataSource,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.GetDataSourceRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.DataSource.SerializeToString,
),
"ListDataSources": grpc.unary_unary_rpc_method_handler(
servicer.ListDataSources,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListDataSourcesRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListDataSourcesResponse.SerializeToString,
),
"CreateTransferConfig": grpc.unary_unary_rpc_method_handler(
servicer.CreateTransferConfig,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.CreateTransferConfigRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferConfig.SerializeToString,
),
"UpdateTransferConfig": grpc.unary_unary_rpc_method_handler(
servicer.UpdateTransferConfig,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.UpdateTransferConfigRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferConfig.SerializeToString,
),
"DeleteTransferConfig": grpc.unary_unary_rpc_method_handler(
servicer.DeleteTransferConfig,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.DeleteTransferConfigRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"GetTransferConfig": grpc.unary_unary_rpc_method_handler(
servicer.GetTransferConfig,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.GetTransferConfigRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferConfig.SerializeToString,
),
"ListTransferConfigs": grpc.unary_unary_rpc_method_handler(
servicer.ListTransferConfigs,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferConfigsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferConfigsResponse.SerializeToString,
),
"ScheduleTransferRuns": grpc.unary_unary_rpc_method_handler(
servicer.ScheduleTransferRuns,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ScheduleTransferRunsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ScheduleTransferRunsResponse.SerializeToString,
),
"StartManualTransferRuns": grpc.unary_unary_rpc_method_handler(
servicer.StartManualTransferRuns,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.StartManualTransferRunsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.StartManualTransferRunsResponse.SerializeToString,
),
"GetTransferRun": grpc.unary_unary_rpc_method_handler(
servicer.GetTransferRun,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.GetTransferRunRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_transfer__pb2.TransferRun.SerializeToString,
),
"DeleteTransferRun": grpc.unary_unary_rpc_method_handler(
servicer.DeleteTransferRun,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.DeleteTransferRunRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
"ListTransferRuns": grpc.unary_unary_rpc_method_handler(
servicer.ListTransferRuns,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferRunsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferRunsResponse.SerializeToString,
),
"ListTransferLogs": grpc.unary_unary_rpc_method_handler(
servicer.ListTransferLogs,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferLogsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.ListTransferLogsResponse.SerializeToString,
),
"CheckValidCreds": grpc.unary_unary_rpc_method_handler(
servicer.CheckValidCreds,
request_deserializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.CheckValidCredsRequest.FromString,
response_serializer=google_dot_cloud_dot_bigquery_dot_datatransfer__v1_dot_proto_dot_datatransfer__pb2.CheckValidCredsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.cloud.bigquery.datatransfer.v1.DataTransferService", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
|
|
from __future__ import division
# PyQT4 imports
from PyQt4 import QtGui, QtCore, QtOpenGL
from PyQt4.QtOpenGL import QGLWidget
# PyOpenGL imports
import OpenGL.GL as gl
import OpenGL.arrays.vbo as glvbo
from random import choice, randint
import numpy
import wave
from math import sin
from instruments import *
import copy
TAU = numpy.pi * 2
MAX_X = 32
MAX_Y = 16
COLOURS = { 'black' : (0, 0, 0),
'other-grey' : (0.25, 0.25, 0.25),
'grey' : (0.4, 0.4, 0.4),
'yellow' : (0.5, 0.5, 0.25),
'white' : (1, 1, 1)}
class Player(object):
def __init__(self, x, y, health=3):
self.x = x
self.y = y
self.health = health
self.is_exploded = False
self.color = COLOURS['grey']
class SoundFile(object):
def __init__(self, filename, length, sample_rate, number_of_channels, frame_rate, sample_width):
self.length = length
self.file = wave.open(filename, 'wb')
self.sample_rate = sample_rate
self.number_of_channels = number_of_channels
self.sample_width = sample_width
self.frame_rate = frame_rate
self.number_of_frames = (self.sample_rate * self.length * 2) / self.number_of_channels
def write(self, signal):
self.file.setparams((self.number_of_channels, self.sample_width, self.frame_rate, self.number_of_frames, 'NONE', 'noncompressed'))
self.file.writeframes(signal)
def close(self):
self.file.close()
def generate_frequency(n):
return 440 * pow (2, (n / 12))
class GLPlotWidget(QGLWidget):
# default window size
width, height = 92, 64
player = Player(4,4)
eggs = {v : [] for v in COLOURS.values()}
frequencies = [[] for x in xrange(MAX_X)]
data_signals = {}
sample_rate = 44100 # Hz
omega = TAU / sample_rate
highlighted_x = 0
def __init__(self, instruments, *args, **kwargs):
QGLWidget.__init__(self, *args, **kwargs)
self.instruments = instruments
def initializeGL(self):
"""Initialize OpenGL, VBOs, upload data on the GPU, etc.
"""
# background color
gl.glClearColor(0,0,0,0)
gl.glViewport(0, 0, self.width, self.height)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
def draw_square(self, x, y, size=1):
gl.glRectf(x, y, x + size, y + size)
def add_egg(self, x, y, color=COLOURS['white']):
self.eggs[color].append((x, y))
y = y - 12
freq = generate_frequency(y)
if freq not in self.frequencies[x]:
self.frequencies[x].append(freq)
return
self.data_signals[self.frequencies[x][-1]] = self._generate_sound(self.frequencies[x][-1], self.sample_rate, self.omega)
def _generate_sound_with_frequencies(self, frequencies, sample_rate, omega):
volume = 0
period = sample_rate / (sum(frequencies) / len(frequencies))
data = numpy.ones(period, dtype=numpy.float)
data_length = len(data)
instrument = self._current_instrument(data_length, 30000)
for frequency in frequencies:
temp_frequency = frequency
volume = 0
for i in xrange(data_length):
temp_frequency = instrument.variance(frequency, temp_frequency)
data[i] = data[i] + volume * sin(i * omega * temp_frequency)
volume = instrument.envelope(volume, i)
data = data / len(frequencies)
return data
def _generate_sound(self, frequency, sample_rate, omega):
if frequency in self.data_signals:
return self.data_signals[frequency]
volume = 0
period = sample_rate / frequency
data = numpy.ones(period, dtype=numpy.float)
data_length = len(data)
instrument = self._current_instrument(data_length, 30000)
temp_frequency = copy.copy(frequency)
for i in xrange(data_length):
temp_frequency = instrument.variance(frequency, temp_frequency)
data[i] = volume * sin(i * omega * temp_frequency)
volume = instrument.envelope(volume, i)
return data
def _generate_silence(self, sample_rate, omega):
period = sample_rate / 128
data = numpy.zeros(period, dtype=numpy.float)
return data
def make_wav(self):
duration = len(self.frequencies) # seconds
sample_rate = self.sample_rate
samples = duration * sample_rate
omega = self.omega
resizer = int(samples / duration)
the_sound_of_silence = self._generate_silence(sample_rate, omega)
the_sound_of_silence = numpy.resize(the_sound_of_silence, resizer)
for instrument in self.instruments:
self._current_instrument = instrument
out_data = None
for frequency in self.frequencies[1:-2]:
if len(frequency) == 0:
data = the_sound_of_silence
else:
if len(frequency) == 1:
data = self._generate_sound(frequency[0], sample_rate, omega)
else:
data = self._generate_sound_with_frequencies(frequency, sample_rate, omega)
data = numpy.resize(data, resizer)
if out_data is not None:
out_data = numpy.hstack((out_data, data))
else:
out_data = data
out_data = numpy.resize(out_data, (samples,))
out_signal = ''.join(wave.struct.pack('h', out_data[i]) for i in xrange(len(out_data)))
location = "testing/"
sound_file = SoundFile('{}{}.wav'.format(location,instrument.__name__),
duration,
sample_rate,
1,
sample_rate,
2)
sound_file.write(out_signal)
sound_file.close()
def draw_eggs(self):
for color, items in self.eggs.iteritems():
r, g, b = color
gl.glColor3f(r, g, b)
for item in items:
x = item[0]
y = item[1]
self.draw_square(x, y, 1)
def draw_player(self):
r, g, b = self.player.color
gl.glColor3f(r, g, b)
self.draw_square(self.player.x, self.player.y)
def paintGL(self):
"""Paint the scene.
"""
# clear the buffer
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
# set yellow color for subsequent drawing rendering calls
# tell OpenGL that the VBO contains an array of vertices
gl.glEnableClientState(gl.GL_VERTEX_ARRAY)
r, g, b = COLOURS['yellow']
gl.glColor3f(r, g, b)
for y in xrange(MAX_Y):
self.draw_square(self.highlighted_x, y)
self.draw_eggs()
self.draw_player()
def resizeGL(self, width, height):
"""Called upon window resizing: reinitialize the viewport.
"""
# update the window size
self.width, self.height = width, height
# paint within the whole window
gl.glViewport(0, 0, self.width, self.height)
# set orthographic projection (2D only)
gl.glMatrixMode(gl.GL_PROJECTION)
gl.glLoadIdentity()
# the window corner OpenGL coordinates are (-+1, -+1)
gl.glOrtho(0, MAX_X, 0, MAX_Y, -1, 1)
if __name__ == '__main__':
# import numpy for generating random data points
import sys
# define a QT window with an OpenGL widget inside it
class TestWindow(QtGui.QMainWindow):
def __init__(self):
super(TestWindow, self).__init__()
# initialize the GL widget
self.wav = None
self.widget = GLPlotWidget([Instrument, Flatter, Fierce, Organ])
self.color = COLOURS['white']
self.keys = []
self.widget.setGeometry(0, 0, self.widget.width, self.widget.height)
self.setCentralWidget(self.widget)
self.show()
self.paint_timer = QtCore.QTimer()
QtCore.QObject.connect(self.paint_timer, QtCore.SIGNAL("timeout()"), self.widget.updateGL)
self.button_timer = QtCore.QTimer()
QtCore.QObject.connect(self.button_timer, QtCore.SIGNAL("timeout()"), self.check)
self.sound_timer = QtCore.QTimer()
QtCore.QObject.connect(self.sound_timer, QtCore.SIGNAL("timeout()"), self.play_sweet_songs)
self.highlight_timer = QtCore.QTimer()
QtCore.QObject.connect(self.highlight_timer, QtCore.SIGNAL("timeout()"), self.move_highlight)
QtCore.QMetaObject.connectSlotsByName(self)
self.paint_timer.start(30)
self.sound_timer.start(1000 * 10)
self.highlight_timer.start(1000)
self.button_timer.start(50)
self.calculated = False
self.resize(500, 500)
def keyPressEvent(self, event):
self.keys.append(event.key())
def keyReleaseEvent(self, event):
self.keys.remove(event.key())
def move_highlight(self):
self.widget.highlighted_x += 1
if self.widget.highlighted_x > MAX_X:
if self.wav is not None:
self.wav.stop()
self.widget.highlighted_x = 0
if self.wav is not None:
self.wav.play()
def play_sweet_songs(self):
if not self.calculated:
return
self.wav = QtGui.QSound("my_wav.wav")
def check(self):
player = self.widget.player
for key in self.keys[:]:
if key == QtCore.Qt.Key_A:
if player.x > 0:
self.widget.player.x -= 1
elif key == QtCore.Qt.Key_D:
if player.x < MAX_X - 1:
self.widget.player.x += 1
elif key == QtCore.Qt.Key_W:
if player.y < MAX_Y - 1:
self.widget.player.y += 1
elif key == QtCore.Qt.Key_S:
if player.y > 0:
self.widget.player.y -= 1
elif key == QtCore.Qt.Key_Space:
self.widget.add_egg(self.widget.player.x, self.widget.player.y, COLOURS['white'])
elif key == QtCore.Qt.Key_T:
if self.wav is not None:
self.wav.stop()
self.calculated = False
self.widget.make_wav()
self.calculated = True
elif key == QtCore.Qt.Key_1:
self.widget.player.color = COLOURS['white']
elif key == QtCore.Qt.Key_2:
self.widget.player.color = COLOURS['grey']
elif key == QtCore.Qt.Key_3:
self.widget.player.color = COLOURS['other-grey']
# create the QT App and window
app = QtGui.QApplication(sys.argv)
window = TestWindow()
window.show()
app.exec_()
|
|
##########
### Game logic for actually running a game
##########
from uuid import uuid4
import random
from copy import copy, deepcopy
import communication
import config
from bottle import abort
import thread
from threading import Timer
import pusher
import time
pusher.app_id = config.PUSHER_APP_ID
pusher.key = config.PUSHER_KEY
pusher.secret = config.PUSHER_SECRET
rounds = {}
def push(game, subject, content):
game['pushes'].append([subject, content])
p = pusher.Pusher()
if config.LOW_BANDWIDTH_MODE:
if not game['id'] in rounds:
rounds[game['id']] = []
rounds[game['id']].append([subject, deepcopy(content)])
if subject == 'new-round' or subject == 'end':
# Dump the previous round
if len(rounds.get(game['id'], [])) > 0:
p['game-' + game['id']].trigger('dump-round', rounds[game['id']])
rounds[game['id']] = []
else:
p['game-' + game['id']].trigger(subject, content)
def log_action(game, player, action, data={}):
data['action'] = action
for p in game['players'].values():
if not p['id'] == player['id']:
# Log it for this user
if not player['id'] in p['actions']:
p['actions'][player['id']] = []
p['actions'][player['id']].append(data)
def setup_player(player):
in_game_player = {
"id": uuid4().hex,
"secret": uuid4().hex,
"player": player['id'],
"name": player['id'],
"endpoint": player['endpoint'],
"generators": copy(config.DEFAULT_GENERATORS),
"improved_generators": copy(config.DEFAULT_GENERATORS),
"resources": copy(config.DEFAULT_RESOURCES),
"pr": 0,
"customers": 2,
"actions": {}
}
return in_game_player
def run_generators(players):
awarded = {}
def award(player, generator, amount=1):
generated = config.GENERATORS[generator]
player['resources'][generated] += 1
if not player['id'] in awarded:
awarded[player['id']] = {"name": player['name'], "resources": {}}
if not generated in awarded[player['id']]["resources"]:
awarded[player['id']]["resources"][generated] = 0
awarded[player['id']]["resources"][generated] += amount
for player in players.values():
for generator in player['generators']:
for i in range(player['generators'][generator]):
if random.randint(1, 10) == 1:
award(player, generator)
for generator in player['improved_generators']:
for i in range(player['improved_generators'][generator]):
if random.randint(1, 10) == 1:
award(player, generator, 2)
return awarded
def start_game(db, players):
print "Starting game"
game_id = uuid4().hex
game = {
"id": game_id,
"players": {},
"player_order": [],
"turn": len(players),
"turn_id": None,
"round": 0,
"pushes": []
}
used_names =[]
for player in players:
p = setup_player(player)
i = 0
while p['name'] in used_names:
i += 1
p['name'] = p['player'] + ' %i' % i
used_names.append(p['name'])
game['players'][p['id']] = p
game['player_order'].append(p['id'])
generators_to_use = copy(config.GENERATORS.keys())
random.shuffle(generators_to_use)
p = 0
max_generators = 0
# First pass out all generators as evenly as possible
while (len(generators_to_use) > 0):
game['players'][game['player_order'][p]]['generators'][generators_to_use.pop()] += 1
total_generators = sum(game['players'][game['player_order'][p]]['generators'].values())
if total_generators > max_generators:
max_generators = total_generators
p += 1
if p == len(game['player_order']):
p = 0
# Now ensure everyone has an equal amount
generators_to_use = copy(config.GENERATORS.keys())
random.shuffle(generators_to_use)
for p in game['players']:
while sum(game['players'][p]['generators'].values()) < max_generators:
game['players'][p]['generators'][generators_to_use.pop()] += 1
started_players = []
for player in game['players'].values():
response, data = communication.request(player, "game/%s" % (player['id']), {"player": player, "endpoint": "http://localhost:8080/game/%s" % game_id}, 'PUT')
if response.status != 200:
for p in started_players:
communication.request(player, "game/%s/cancel" % (player['id']), {"player": player})
return False
started_players.append(player)
db.save(game)
next_turn(db, game)
return game_id
def game_is_over(game):
if game['round'] >= config.MAX_ROUNDS:
return True
for player in game['players'].values():
if player['customers'] >= config.MAX_POINTS:
return True
return False
def next_turn(db, game):
turn_taken = False
if config.DELAY > 0:
time.sleep(config.DELAY)
while not turn_taken: # Find the next player ready to make a move
game['turn'] = game['turn'] + 1
if game['turn'] >= len(game['player_order']):
# Next round
game['round'] += 1
game['turn'] = 0
if game_is_over(game):
return end_game(game)
print "Starting round %i" % game['round']
game['player_order'].reverse()
generated = run_generators(game['players'])
if len(generated.keys()) > 0:
push(game, 'new-round', {'round': game['round'], 'players': copy(game['players']), "generated": generated})
else:
push(game, 'new-round', {'round': game['round'], 'players': copy(game['players'])})
player = game['players'][game['player_order'][game['turn']]] # Wow - nice line
response, data = communication.request(player, "game/%s/start_turn" % player['id'])
if response.status == 200:
turn_id = uuid4().hex
game['turn_id'] = turn_id
player['actions'] = {}
db.save(game)
turn_taken = True
push(game, 'start-turn', {'player': player, 'turn': game['turn'], 'round': game['round']})
def force_turn_end():
g = db.get(game['id'])
if g['turn_id'] == turn_id:
# The turn hasn't changed
print "Out of time"
end_turn(db, game, player, forced=True)
turn_timeout = Timer(config.TURN_TIMEOUT, force_turn_end)
turn_timeout.start()
else:
db.save(game)
log_action(game, player, 'turn-skipped')
push(game, 'turn-skipped', {'player': player, 'turn': game['turn'], 'round': game['round']})
def require_player_turn(f):
def inner_func(db, game, player, *args, **kwargs):
if player['id'] != game['player_order'][game['turn']]:
abort(400, 'It is not your turn')
return f(db, game, player, *args, **kwargs)
return inner_func
def has_enough_resources(player, resources):
for resource in resources:
if player['resources'][resource] < resources[resource]:
return False
return True
def require_resources(resources):
def require_resources_inner(f):
def inner_func(db, game, player, *args, **kwargs):
if not has_enough_resources(player, resources):
abort(400, 'Not enough resources')
return f(db, game, player, *args, **kwargs)
return inner_func
return require_resources_inner
def charge_resources(player, resources):
for resource in resources:
player['resources'][resource] -= resources[resource]
@require_player_turn
def end_turn(db, game, player, forced=False):
def run_end_turn():
next_turn(db, game)
print "Ended turn"
game['turn_id'] = None
db.save(game)
if forced:
push(game, 'timeout', {'player': player, 'turn': game['turn'], 'round': game['round']})
communication.request(player, "game/%s/end_turn" % player['id'])
thread.start_new_thread(run_end_turn, ())
return {"status": "success"}
def end_game(game):
def sort_players(player_id):
return (int(game['players'][player_id]['customers']), sum(game['players'][player_id]['resources'].values()))
game['player_order'] = sorted(game['player_order'], key=sort_players, reverse=True)
push(game, 'end', {"players": [game['players'][p] for p in game['player_order']]})
for player in game['players'].values():
communication.request(player, "game/%s" % player['id'], method="DELETE")
@require_player_turn
@require_resources(config.PR_COST)
def purchase_pr(db, game, player):
max_pr = reduce(lambda m, p: p['pr'] if p['pr'] > m else m , game['players'].values(), 0)
if max_pr <= (player['pr'] + 1):
for p in game['players'].values(): # Take away the bonuses
if p['pr'] == max_pr:
p['customers'] -= 2
charge_resources(player, config.PR_COST)
player['pr'] += 1
if max_pr <= (player['pr']):
for p in game['players'].values(): # Reapply bonus
if p['pr'] == player['pr']:
p['customers'] += 2
db.save(game)
log_action(game, player, 'purchase-pr')
push(game, 'purchase-pr', {"round": game['round'], "turn": game['turn'], "player": player})
return {"player": player, "highest_pr": (max_pr <= player['pr'])}
@require_player_turn
@require_resources(config.GENERATOR_COST)
def purchase_generator(db, game, player):
if sum(player['generators'].values()) >= config.MAX_RESOURCE_GENERATORS:
abort(400, "You can't build any more generators")
charge_resources(player, config.GENERATOR_COST)
generator = random.choice(config.GENERATORS.keys())
player['generators'][generator] += 1
player['customers'] += 1
db.save(game)
log_action(game, player, 'purchase-generator', {"generator_type": generator})
push(game, 'purchase-generator', {"round": game['round'], "turn": game['turn'], "player": player, 'generator_type': generator})
return {"player": player, 'generator_type': generator}
@require_player_turn
@require_resources(config.GENERATOR_IMPROVEMENT_COST)
def upgrade_generator(db, game, player, generator_type):
if sum(player['improved_generators'].values()) >= config.MAX_IMPROVED_RESOURCE_GENERATORS:
abort(400, "You can't build any more generators")
if player['generators'][generator_type] < 1:
abort(400, "You don't have enough %s" % generator_type)
charge_resources(player, config.GENERATOR_IMPROVEMENT_COST)
player['generators'][generator_type] -= 1
player['improved_generators'][generator_type] += 1
player['customers'] += 1
db.save(game)
log_action(game, player, 'upgrade-generator', {"generator_type": generator_type})
push(game, 'upgrade-generator', {"round": game['round'], "turn": game['turn'], "player": player, 'generator_type': generator_type})
return {"player": player, 'generator_type': generator_type}
@require_player_turn
def trade(db, game, player, offering, requesting):
if not has_enough_resources(player, offering):
abort(400, "You don't have enough stuff!")
players = [game['players'][p] for p in game['player_order'] if not p == player['id']]
random.shuffle(players) # Don't give one person first refusal
print "Player ", player['id'], " offering ", offering, " for ", requesting
trade_id = uuid4().hex
push(game, 'trade', {"round": game['round'], "turn": game['turn'], "player": player, 'offering': offering, 'requesting': requesting, "trade_id": trade_id})
if sum(offering.values()) >= (sum(requesting.values()) * config.BANK_TRADE_RATE):
# The bank will take the trade
charge_resources(player, offering)
for resource in requesting:
player['resources'][resource] += requesting[resource]
log_action(game, player, 'bank-trade', {"offer": offering, "request": requesting})
push(game, 'trade-bank-accepted', {"trade_id": trade_id})
db.save(game)
return {"player": player, 'accepted_by': 'bank'}
for p in players:
if has_enough_resources(p, requesting):
response, data = communication.request(p, "game/%s/trade" % p['id'], {"player": player['id'], "offering": offering, "requesting": requesting})
if response.status == 200:
charge_resources(player, offering)
charge_resources(p, requesting)
for resource in offering:
p['resources'][resource] += offering[resource]
for resource in requesting:
player['resources'][resource] += requesting[resource]
log_action(game, player, 'trade', {"offer": offering, "request": requesting, "traded_with": p['id']})
push(game, 'trade-accepted', {"trade_id": trade_id, "player": p})
db.save(game)
return {"player": player, 'accepted_by': p['id']}
log_action(game, player, 'trade-rejected', {"offer": offering, "request": requesting})
push(game, 'trade-rejected', {"trade_id": trade_id})
abort(500, "No bites")
@require_player_turn
def log(db, game, player, message):
push(game, 'log', {'player': player, 'message': message})
|
|
#!/usr/bin/env python3
import argparse
import re
from biocode import utils, gff, things
"""
Example with a match extending far past a gene
DEBUG: g8713.t1:(3529) overlaps (size:2893) nucleotide_to_protein_match.158742:(6245), match target id:jgi|Copci1|10482|CC1G_12482T0, length:1764
mRNA % cov: 81.97789742136582
target % cov: 164.00226757369614
NODE_19891_length_98951_cov_9.347667 AUGUSTUS mRNA 38709 42237 . + . ID=g8713.t1;Parent=g8713
NODE_19891_length_98951_cov_9.347667 nap nucleotide_to_protein_match 35357 41601 . + . ID=nucleotide_to_protein_match.158742;Target=jgi|Copci1|10482|CC1G_12482T0 317 504
NODE_19891_length_98951_cov_9.347667 nap match_part 35357 35498 . + . ID=match_part.493339;Parent=nucleotide_to_protein_match.158742;Target=jgi|Copci1|10482|CC1G_12482T0 317 364a
NODE_19891_length_98951_cov_9.347667 nap match_part 36352 36741 . + . ID=match_part.493340;Parent=nucleotide_to_protein_match.158742;Target=jgi|Copci1|10482|CC1G_12482T0 365 490
NODE_19891_length_98951_cov_9.347667 nap match_part 41560 41601 . + . ID=match_part.493341;Parent=nucleotide_to_protein_match.158742;Target=jgi|Copci1|10482|CC1G_12482T0 491 504
Test command:
~/git/biocode/sandbox/jorvis/custom.select_rhizopus_training_set.py -a 99-892.augustus.gff3 -p aat.fungi_jgi.match_and_parts.gff3 -aatdb fungi_jgi.faa -b augustus_blast_vs_99-880.btab -be 1e-30 -bpi 75 -ppc 80
"""
def main():
parser = argparse.ArgumentParser( description='Put a description of your script here')
parser.add_argument('-a', '--organism1_annotation', type=str, required=True, help='Annotation GFF for organism 1' )
parser.add_argument('-p', '--organism1_aat_alignments', type=str, required=True, help='Path to AAT GFF3 (match/match_part)' )
parser.add_argument('-aatdb', '--aat_fasta_db', type=str, required=True, help='Path to FASTA database that was used in AAT' )
parser.add_argument('-b', '--organism1_blast_alignments', type=str, required=True, help='Path to BLASTp btab file vs.organism 2 proteins' )
parser.add_argument('-be', '--blast_eval_cutoff', type=float, required=False, default=1e-5, help='BLAST e-value cutoff' )
parser.add_argument('-bpi', '--blast_percent_identity_cutoff', type=float, required=False, default=0, help='BLAST %identity cutoff' )
parser.add_argument('-ppc', '--aat_percent_coverage_cutoff', type=float, required=False, default=0, help='% coverage of the query protein by the AAT match' )
parser.add_argument('-o', '--output_id_list', type=str, required=False, help='List of IDs from organism1 that passed' )
args = parser.parse_args()
debugging_transcript = None
## if the output file wasn't passed build one from the other parameters
if args.output_id_list is None:
args.output_id_list = "training_ids.be_{0}.bpi_{1}.ppc_{2}.list".format(args.blast_eval_cutoff, args.blast_percent_identity_cutoff, args.aat_percent_coverage_cutoff)
print("INFO: Parsing organism1 annotation")
(assemblies, features) = gff.get_gff3_features(args.organism1_annotation)
print("INFO: Parsing AAT FASTA database")
aat_seqs = utils.fasta_dict_from_file(args.aat_fasta_db)
# keys are assembly IDs, value for each is a list of matches on them
aat_matches = dict()
aat_match_count = 0
current_match = None
## IDs of features in organism 1 which overlap AAT
o1_with_aat = list()
o1_with_o2 = list()
print("INFO: Parsing organism1 AAT protein alignments")
for line in open(args.organism1_aat_alignments):
cols = line.split("\t")
if line.startswith('#') or len(cols) != 9:
continue
assembly_id = cols[0]
# skip this match if there were not predicted genes on the same assembly
if assembly_id not in assemblies:
continue
if assembly_id not in aat_matches:
aat_matches[assembly_id] = list()
fmin = int(cols[3]) - 1
fmax = int(cols[4])
strand = cols[6]
feature_id = gff.column_9_value(cols[8], 'ID').replace('"', '')
target = gff.column_9_value(cols[8], 'Target')
m = re.search("^(\S+)", target)
if m:
target = m.group(1)
if cols[2] == 'nucleotide_to_protein_match':
if current_match is not None:
aat_matches[assembly_id].append(current_match)
aat_match_count += 1
current_match = things.Match(id=feature_id, target_id=target, subclass='nucleotide_to_protein_match', length=fmax - fmin)
current_match.locate_on( target=assemblies[assembly_id], fmin=fmin, fmax=fmax, strand=strand )
elif cols[2] == 'match_part':
parent_id = gff.column_9_value(cols[8], 'Parent').replace('"', '')
match_part = things.MatchPart(id=feature_id, parent=parent_id, length=fmax - fmin)
match_part.locate_on( target=assemblies[assembly_id], fmin=fmin, fmax=fmax, strand=strand )
current_match.add_part(match_part)
print("INFO: Parsed {0} protein alignment chains".format(aat_match_count))
print("INFO: Comparing organism1's mRNAs with AAT match coordinates")
for assembly_id in assemblies:
if assembly_id not in aat_matches:
continue
assembly = assemblies[assembly_id]
for gene in assembly.genes():
for mRNA in gene.mRNAs():
if debugging_transcript is not None:
if mRNA.id == debugging_transcript:
print("DEBUG: processing debugging transcript: {0}".format(mRNA.id))
else:
continue
for aat_match in aat_matches[assembly_id]:
#print("DEBUG: about to call overlap_size_with {0} and {1}, which has {2} segments".format(mRNA.id, aat_match.id, len(aat_match.parts)) )
overlap_size = mRNA.overlap_size_with(aat_match)
if overlap_size is not None:
#print("DEBUG: {0}:({1}) overlaps (size:{2}) {3}:({4})".format(mRNA.id, mRNA.length, overlap_size, aat_match.id, aat_match.length) )
# this shouldn't be possible, but check just in case
if overlap_size > mRNA.length:
raise Exception("ERROR: overlap size ({0}) > mRNA length ({1})".format(overlap_size, mRNA.length))
if aat_match.target_id not in aat_seqs:
raise Exception("ERROR: Found match with target ID ({0}) but didn't find a FASTA entry for it via -aatdb".format(aat_match.target_id))
# this is a protein length, so x3
match_target_length = len(aat_seqs[aat_match.target_id]['s']) * 3
(mRNA_percent_coverage, target_percent_coverage) = calculate_fragmented_coverage(mRNA, aat_match, match_target_length)
#print("DEBUG: mRNA_percent_coverage:{0}".format(mRNA_percent_coverage) )
#print("DEBUG: match_percent_coverage:{0}".format(target_percent_coverage) )
if mRNA_percent_coverage >= args.aat_percent_coverage_cutoff and target_percent_coverage >= args.aat_percent_coverage_cutoff:
o1_with_aat.append(mRNA.id)
#print("DEBUG: {0}:({1}) overlaps (size:{2}) {3}:({4}), match target id:{5}, length:{6}".format( \
# mRNA.id, mRNA.length, overlap_size, aat_match.id, aat_match.length, \
# aat_match.target_id, match_target_length) )
#print("\tmRNA % cov: {0}".format(mRNA_percent_coverage))
#print("\ttarget % cov: {0}".format(target_percent_coverage))
break # only need to see if one matched
print("INFO: Found {0} mRNAs in org1 with overlapping fungi AAT coordinates".format(len(o1_with_aat)))
# key=org1_transcript_id, value=org2_transcript_id
top_blast_hits = dict()
print("INFO: parsing BLAST results vs. org2")
for line in open(args.organism1_blast_alignments):
cols = line.split("\t")
if float(cols[19]) > args.blast_eval_cutoff:
continue
if float(cols[10]) < args.blast_percent_identity_cutoff:
continue
# if we survived until here, this one's good.
top_blast_hits[cols[0]] = cols[5]
print("INFO: Comparing overlap between AAT-matched proteins and BLAST ones")
for o1_mRNA_id in o1_with_aat:
if o1_mRNA_id in top_blast_hits:
o1_with_o2.append(o1_mRNA_id)
print("INFO: Found {0} mRNAs in org1 with overlapping AAT coordinates and BLAST hit to org2".format(len(o1_with_o2)))
id_list_fh = open(args.output_id_list, 'wt')
for mRNA_id in o1_with_o2:
id_list_fh.write("{0}\n".format(mRNA_id))
def calculate_fragmented_coverage( rna, match, match_part_length ):
"""
This will not return a correct value if there are overlapping segments of a
match, which shouldn't happen anyway.
"""
if not rna.overlaps_with( match ):
raise Exception("ERROR: {0} and {1} were expected to overlap but don't seem to".format(CDS.id, mp.id))
rna_part_length = 0
rna_parts_bases_covered = 0
match_parts_bases_covered = 0
for CDS in rna.CDSs():
rna_part_length += CDS.length
for mp in match.parts:
(rna_loc, match_loc) = rna.shared_molecule_locations_with( mp )
if rna_loc is None:
continue
overlap_size = CDS.overlap_size_with(mp)
if overlap_size is not None:
rna_parts_bases_covered += overlap_size
match_parts_bases_covered += overlap_size
#print("\tDEBUG: overlap: {0}:{1:.1f} - {2}:{3:.1f}".format(CDS.id, ((overlap_size/CDS.length)*100), mp.id, ((overlap_size/mp.length)*100)) )
break
#else:
# print("\tDEBUG: {0} doesn't seem to overlap {1}".format(CDS.id, mp.id) )
rna_covered = (rna_parts_bases_covered / rna_part_length) * 100
match_covered = (match_parts_bases_covered / match_part_length) * 100
# Some buffer seems to be necessary. I need to track this later.
#if rna_covered > 105 or match_covered > 105:
# raise Exception("ERROR: coverage % logically too high between {0}:{1} and {2}:{3}:length-{4}".format(rna.id, rna_covered, match.id, match_covered, match_part_length) )
return (rna_covered, match_covered)
if __name__ == '__main__':
main()
|
|
from sympy.core.backend import (S, sympify, expand, sqrt, Add, zeros,
ImmutableMatrix as Matrix)
from sympy import trigsimp
from sympy.core.compatibility import unicode
from sympy.utilities.misc import filldedent
__all__ = ['Vector']
class Vector(object):
"""The class used to define vectors.
It along with ReferenceFrame are the building blocks of describing a
classical mechanics system in PyDy and sympy.physics.vector.
Attributes
==========
simp : Boolean
Let certain methods use trigsimp on their outputs
"""
simp = False
def __init__(self, inlist):
"""This is the constructor for the Vector class. You shouldn't be
calling this, it should only be used by other functions. You should be
treating Vectors like you would with if you were doing the math by
hand, and getting the first 3 from the standard basis vectors from a
ReferenceFrame.
The only exception is to create a zero vector:
zv = Vector(0)
"""
self.args = []
if inlist == 0:
inlist = []
while len(inlist) != 0:
added = 0
for i, v in enumerate(self.args):
if inlist[0][1] == self.args[i][1]:
self.args[i] = (self.args[i][0] + inlist[0][0],
inlist[0][1])
inlist.remove(inlist[0])
added = 1
break
if added != 1:
self.args.append(inlist[0])
inlist.remove(inlist[0])
i = 0
# This code is to remove empty frames from the list
while i < len(self.args):
if self.args[i][0] == Matrix([0, 0, 0]):
self.args.remove(self.args[i])
i -= 1
i += 1
def __hash__(self):
return hash(tuple(self.args))
def __add__(self, other):
"""The add operator for Vector. """
other = _check_vector(other)
return Vector(self.args + other.args)
def __and__(self, other):
"""Dot product of two vectors.
Returns a scalar, the dot product of the two Vectors
Parameters
==========
other : Vector
The Vector which we are dotting with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, dot
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = ReferenceFrame('N')
>>> dot(N.x, N.x)
1
>>> dot(N.x, N.y)
0
>>> A = N.orientnew('A', 'Axis', [q1, N.x])
>>> dot(N.y, A.y)
cos(q1)
"""
from sympy.physics.vector.dyadic import Dyadic
if isinstance(other, Dyadic):
return NotImplemented
other = _check_vector(other)
out = S(0)
for i, v1 in enumerate(self.args):
for j, v2 in enumerate(other.args):
out += ((v2[0].T)
* (v2[1].dcm(v1[1]))
* (v1[0]))[0]
if Vector.simp:
return trigsimp(sympify(out), recursive=True)
else:
return sympify(out)
def __div__(self, other):
"""This uses mul and inputs self and 1 divided by other. """
return self.__mul__(sympify(1) / other)
__truediv__ = __div__
def __eq__(self, other):
"""Tests for equality.
It is very import to note that this is only as good as the SymPy
equality test; False does not always mean they are not equivalent
Vectors.
If other is 0, and self is empty, returns True.
If other is 0 and self is not empty, returns False.
If none of the above, only accepts other as a Vector.
"""
if other == 0:
other = Vector(0)
try:
other = _check_vector(other)
except TypeError:
return False
if (self.args == []) and (other.args == []):
return True
elif (self.args == []) or (other.args == []):
return False
frame = self.args[0][1]
for v in frame:
if expand((self - other) & v) != 0:
return False
return True
def __mul__(self, other):
"""Multiplies the Vector by a sympifyable expression.
Parameters
==========
other : Sympifyable
The scalar to multiply this Vector with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> from sympy import Symbol
>>> N = ReferenceFrame('N')
>>> b = Symbol('b')
>>> V = 10 * b * N.x
>>> print(V)
10*b*N.x
"""
newlist = [v for v in self.args]
for i, v in enumerate(newlist):
newlist[i] = (sympify(other) * newlist[i][0], newlist[i][1])
return Vector(newlist)
def __ne__(self, other):
return not self.__eq__(other)
def __neg__(self):
return self * -1
def __or__(self, other):
"""Outer product between two Vectors.
A rank increasing operation, which returns a Dyadic from two Vectors
Parameters
==========
other : Vector
The Vector to take the outer product with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer
>>> N = ReferenceFrame('N')
>>> outer(N.x, N.x)
(N.x|N.x)
"""
from sympy.physics.vector.dyadic import Dyadic
other = _check_vector(other)
ol = Dyadic(0)
for i, v in enumerate(self.args):
for i2, v2 in enumerate(other.args):
# it looks this way because if we are in the same frame and
# use the enumerate function on the same frame in a nested
# fashion, then bad things happen
ol += Dyadic([(v[0][0] * v2[0][0], v[1].x, v2[1].x)])
ol += Dyadic([(v[0][0] * v2[0][1], v[1].x, v2[1].y)])
ol += Dyadic([(v[0][0] * v2[0][2], v[1].x, v2[1].z)])
ol += Dyadic([(v[0][1] * v2[0][0], v[1].y, v2[1].x)])
ol += Dyadic([(v[0][1] * v2[0][1], v[1].y, v2[1].y)])
ol += Dyadic([(v[0][1] * v2[0][2], v[1].y, v2[1].z)])
ol += Dyadic([(v[0][2] * v2[0][0], v[1].z, v2[1].x)])
ol += Dyadic([(v[0][2] * v2[0][1], v[1].z, v2[1].y)])
ol += Dyadic([(v[0][2] * v2[0][2], v[1].z, v2[1].z)])
return ol
def _latex(self, printer=None):
"""Latex Printing method. """
from sympy.physics.vector.printing import VectorLatexPrinter
ar = self.args # just to shorten things
if len(ar) == 0:
return str(0)
ol = [] # output list, to be concatenated to a string
for i, v in enumerate(ar):
for j in 0, 1, 2:
# if the coef of the basis vector is 1, we skip the 1
if ar[i][0][j] == 1:
ol.append(' + ' + ar[i][1].latex_vecs[j])
# if the coef of the basis vector is -1, we skip the 1
elif ar[i][0][j] == -1:
ol.append(' - ' + ar[i][1].latex_vecs[j])
elif ar[i][0][j] != 0:
# If the coefficient of the basis vector is not 1 or -1;
# also, we might wrap it in parentheses, for readability.
arg_str = VectorLatexPrinter().doprint(ar[i][0][j])
if isinstance(ar[i][0][j], Add):
arg_str = "(%s)" % arg_str
if arg_str[0] == '-':
arg_str = arg_str[1:]
str_start = ' - '
else:
str_start = ' + '
ol.append(str_start + arg_str + ar[i][1].latex_vecs[j])
outstr = ''.join(ol)
if outstr.startswith(' + '):
outstr = outstr[3:]
elif outstr.startswith(' '):
outstr = outstr[1:]
return outstr
def _pretty(self, printer=None):
"""Pretty Printing method. """
from sympy.physics.vector.printing import VectorPrettyPrinter
from sympy.printing.pretty.stringpict import prettyForm
e = self
class Fake(object):
def render(self, *args, **kwargs):
ar = e.args # just to shorten things
if len(ar) == 0:
return unicode(0)
settings = printer._settings if printer else {}
vp = printer if printer else VectorPrettyPrinter(settings)
pforms = [] # output list, to be concatenated to a string
for i, v in enumerate(ar):
for j in 0, 1, 2:
# if the coef of the basis vector is 1, we skip the 1
if ar[i][0][j] == 1:
pform = vp._print(ar[i][1].pretty_vecs[j])
# if the coef of the basis vector is -1, we skip the 1
elif ar[i][0][j] == -1:
pform = vp._print(ar[i][1].pretty_vecs[j])
pform= prettyForm(*pform.left(" - "))
bin = prettyForm.NEG
pform = prettyForm(binding=bin, *pform)
elif ar[i][0][j] != 0:
# If the basis vector coeff is not 1 or -1,
# we might wrap it in parentheses, for readability.
if isinstance(ar[i][0][j], Add):
pform = vp._print(
ar[i][0][j]).parens()
else:
pform = vp._print(
ar[i][0][j])
pform = prettyForm(*pform.right(" ",
ar[i][1].pretty_vecs[j]))
else:
continue
pforms.append(pform)
pform = prettyForm.__add__(*pforms)
kwargs["wrap_line"] = kwargs.get("wrap_line")
kwargs["num_columns"] = kwargs.get("num_columns")
out_str = pform.render(*args, **kwargs)
mlines = [line.rstrip() for line in out_str.split("\n")]
return "\n".join(mlines)
return Fake()
def __ror__(self, other):
"""Outer product between two Vectors.
A rank increasing operation, which returns a Dyadic from two Vectors
Parameters
==========
other : Vector
The Vector to take the outer product with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, outer
>>> N = ReferenceFrame('N')
>>> outer(N.x, N.x)
(N.x|N.x)
"""
from sympy.physics.vector.dyadic import Dyadic
other = _check_vector(other)
ol = Dyadic(0)
for i, v in enumerate(other.args):
for i2, v2 in enumerate(self.args):
# it looks this way because if we are in the same frame and
# use the enumerate function on the same frame in a nested
# fashion, then bad things happen
ol += Dyadic([(v[0][0] * v2[0][0], v[1].x, v2[1].x)])
ol += Dyadic([(v[0][0] * v2[0][1], v[1].x, v2[1].y)])
ol += Dyadic([(v[0][0] * v2[0][2], v[1].x, v2[1].z)])
ol += Dyadic([(v[0][1] * v2[0][0], v[1].y, v2[1].x)])
ol += Dyadic([(v[0][1] * v2[0][1], v[1].y, v2[1].y)])
ol += Dyadic([(v[0][1] * v2[0][2], v[1].y, v2[1].z)])
ol += Dyadic([(v[0][2] * v2[0][0], v[1].z, v2[1].x)])
ol += Dyadic([(v[0][2] * v2[0][1], v[1].z, v2[1].y)])
ol += Dyadic([(v[0][2] * v2[0][2], v[1].z, v2[1].z)])
return ol
def __rsub__(self, other):
return (-1 * self) + other
def __str__(self, printer=None):
"""Printing method. """
from sympy.physics.vector.printing import VectorStrPrinter
ar = self.args # just to shorten things
if len(ar) == 0:
return str(0)
ol = [] # output list, to be concatenated to a string
for i, v in enumerate(ar):
for j in 0, 1, 2:
# if the coef of the basis vector is 1, we skip the 1
if ar[i][0][j] == 1:
ol.append(' + ' + ar[i][1].str_vecs[j])
# if the coef of the basis vector is -1, we skip the 1
elif ar[i][0][j] == -1:
ol.append(' - ' + ar[i][1].str_vecs[j])
elif ar[i][0][j] != 0:
# If the coefficient of the basis vector is not 1 or -1;
# also, we might wrap it in parentheses, for readability.
arg_str = VectorStrPrinter().doprint(ar[i][0][j])
if isinstance(ar[i][0][j], Add):
arg_str = "(%s)" % arg_str
if arg_str[0] == '-':
arg_str = arg_str[1:]
str_start = ' - '
else:
str_start = ' + '
ol.append(str_start + arg_str + '*' + ar[i][1].str_vecs[j])
outstr = ''.join(ol)
if outstr.startswith(' + '):
outstr = outstr[3:]
elif outstr.startswith(' '):
outstr = outstr[1:]
return outstr
def __sub__(self, other):
"""The subraction operator. """
return self.__add__(other * -1)
def __xor__(self, other):
"""The cross product operator for two Vectors.
Returns a Vector, expressed in the same ReferenceFrames as self.
Parameters
==========
other : Vector
The Vector which we are crossing with
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector
>>> from sympy import symbols
>>> q1 = symbols('q1')
>>> N = ReferenceFrame('N')
>>> N.x ^ N.y
N.z
>>> A = N.orientnew('A', 'Axis', [q1, N.x])
>>> A.x ^ N.y
N.z
>>> N.y ^ A.x
- sin(q1)*A.y - cos(q1)*A.z
"""
from sympy.physics.vector.dyadic import Dyadic
if isinstance(other, Dyadic):
return NotImplemented
other = _check_vector(other)
if other.args == []:
return Vector(0)
def _det(mat):
"""This is needed as a little method for to find the determinant
of a list in python; needs to work for a 3x3 list.
SymPy's Matrix won't take in Vector, so need a custom function.
You shouldn't be calling this.
"""
return (mat[0][0] * (mat[1][1] * mat[2][2] - mat[1][2] * mat[2][1])
+ mat[0][1] * (mat[1][2] * mat[2][0] - mat[1][0] *
mat[2][2]) + mat[0][2] * (mat[1][0] * mat[2][1] -
mat[1][1] * mat[2][0]))
outvec = Vector(0)
ar = other.args # For brevity
for i, v in enumerate(ar):
tempx = v[1].x
tempy = v[1].y
tempz = v[1].z
tempm = ([[tempx, tempy, tempz], [self & tempx, self & tempy,
self & tempz], [Vector([ar[i]]) & tempx,
Vector([ar[i]]) & tempy, Vector([ar[i]]) & tempz]])
outvec += _det(tempm)
return outvec
_sympystr = __str__
_sympyrepr = _sympystr
__repr__ = __str__
__radd__ = __add__
__rand__ = __and__
__rmul__ = __mul__
def separate(self):
"""
The constituents of this vector in different reference frames,
as per its definition.
Returns a dict mapping each ReferenceFrame to the corresponding
constituent Vector.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> R1 = ReferenceFrame('R1')
>>> R2 = ReferenceFrame('R2')
>>> v = R1.x + R2.x
>>> v.separate() == {R1: R1.x, R2: R2.x}
True
"""
components = {}
for x in self.args:
components[x[1]] = Vector([x])
return components
def dot(self, other):
return self & other
dot.__doc__ = __and__.__doc__
def cross(self, other):
return self ^ other
cross.__doc__ = __xor__.__doc__
def outer(self, other):
return self | other
outer.__doc__ = __or__.__doc__
def diff(self, var, frame, var_in_dcm=True):
"""Returns the partial derivative of the vector with respect to a
variable in the provided reference frame.
Parameters
==========
var : Symbol
What the partial derivative is taken with respect to.
frame : ReferenceFrame
The reference frame that the partial derivative is taken in.
var_in_dcm : boolean
If true, the differentiation algorithm assumes that the variable
may be present in any of the direction cosine matrices that relate
the frame to the frames of any component of the vector. But if it
is known that the variable is not present in the direction cosine
matrices, false can be set to skip full reexpression in the desired
frame.
Examples
========
>>> from sympy import Symbol
>>> from sympy.physics.vector import dynamicsymbols, ReferenceFrame
>>> from sympy.physics.vector import Vector
>>> Vector.simp = True
>>> t = Symbol('t')
>>> q1 = dynamicsymbols('q1')
>>> N = ReferenceFrame('N')
>>> A = N.orientnew('A', 'Axis', [q1, N.y])
>>> A.x.diff(t, N)
- q1'*A.z
>>> B = ReferenceFrame('B')
>>> u1, u2 = dynamicsymbols('u1, u2')
>>> v = u1 * A.x + u2 * B.y
>>> v.diff(u2, N, var_in_dcm=False)
B.y
"""
from sympy.physics.vector.frame import _check_frame
var = sympify(var)
_check_frame(frame)
partial = Vector(0)
for vector_component in self.args:
measure_number = vector_component[0]
component_frame = vector_component[1]
if component_frame == frame:
partial += Vector([(measure_number.diff(var), frame)])
else:
# If the direction cosine matrix relating the component frame
# with the derivative frame does not contain the variable.
if not var_in_dcm or (frame.dcm(component_frame).diff(var) ==
zeros(3, 3)):
partial += Vector([(measure_number.diff(var),
component_frame)])
else: # else express in the frame
reexp_vec_comp = Vector([vector_component]).express(frame)
deriv = reexp_vec_comp.args[0][0].diff(var)
partial += Vector([(deriv, frame)]).express(component_frame)
return partial
def express(self, otherframe, variables=False):
"""
Returns a Vector equivalent to this one, expressed in otherframe.
Uses the global express method.
Parameters
==========
otherframe : ReferenceFrame
The frame for this Vector to be described in
variables : boolean
If True, the coordinate symbols(if present) in this Vector
are re-expressed in terms otherframe
Examples
========
>>> from sympy.physics.vector import ReferenceFrame, Vector, dynamicsymbols
>>> q1 = dynamicsymbols('q1')
>>> N = ReferenceFrame('N')
>>> A = N.orientnew('A', 'Axis', [q1, N.y])
>>> A.x.express(N)
cos(q1)*N.x - sin(q1)*N.z
"""
from sympy.physics.vector import express
return express(self, otherframe, variables=variables)
def to_matrix(self, reference_frame):
"""Returns the matrix form of the vector with respect to the given
frame.
Parameters
----------
reference_frame : ReferenceFrame
The reference frame that the rows of the matrix correspond to.
Returns
-------
matrix : ImmutableMatrix, shape(3,1)
The matrix that gives the 1D vector.
Examples
========
>>> from sympy import symbols
>>> from sympy.physics.vector import ReferenceFrame
>>> from sympy.physics.mechanics.functions import inertia
>>> a, b, c = symbols('a, b, c')
>>> N = ReferenceFrame('N')
>>> vector = a * N.x + b * N.y + c * N.z
>>> vector.to_matrix(N)
Matrix([
[a],
[b],
[c]])
>>> beta = symbols('beta')
>>> A = N.orientnew('A', 'Axis', (beta, N.x))
>>> vector.to_matrix(A)
Matrix([
[ a],
[ b*cos(beta) + c*sin(beta)],
[-b*sin(beta) + c*cos(beta)]])
"""
return Matrix([self.dot(unit_vec) for unit_vec in
reference_frame]).reshape(3, 1)
def doit(self, **hints):
"""Calls .doit() on each term in the Vector"""
ov = Vector(0)
for i, v in enumerate(self.args):
ov += Vector([(v[0].applyfunc(lambda x: x.doit(**hints)), v[1])])
return ov
def dt(self, otherframe):
"""
Returns a Vector which is the time derivative of
the self Vector, taken in frame otherframe.
Calls the global time_derivative method
Parameters
==========
otherframe : ReferenceFrame
The frame to calculate the time derivative in
"""
from sympy.physics.vector import time_derivative
return time_derivative(self, otherframe)
def simplify(self):
"""Returns a simplified Vector."""
outvec = Vector(0)
for i in self.args:
outvec += Vector([(i[0].simplify(), i[1])])
return outvec
def subs(self, *args, **kwargs):
"""Substituion on the Vector.
Examples
========
>>> from sympy.physics.vector import ReferenceFrame
>>> from sympy import Symbol
>>> N = ReferenceFrame('N')
>>> s = Symbol('s')
>>> a = N.x * s
>>> a.subs({s: 2})
2*N.x
"""
ov = Vector(0)
for i, v in enumerate(self.args):
ov += Vector([(v[0].subs(*args, **kwargs), v[1])])
return ov
def magnitude(self):
"""Returns the magnitude (Euclidean norm) of self."""
return sqrt(self & self)
def normalize(self):
"""Returns a Vector of magnitude 1, codirectional with self."""
return Vector(self.args + []) / self.magnitude()
def applyfunc(self, f):
"""Apply a function to each component of a vector."""
if not callable(f):
raise TypeError("`f` must be callable.")
ov = Vector(0)
for v in self.args:
ov += Vector([(v[0].applyfunc(f), v[1])])
return ov
class VectorTypeError(TypeError):
def __init__(self, other, want):
msg = filldedent("Expected an instance of %s, but received object "
"'%s' of %s." % (type(want), other, type(other)))
super(VectorTypeError, self).__init__(msg)
def _check_vector(other):
if not isinstance(other, Vector):
raise TypeError('A Vector must be supplied')
return other
|
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import copy
import errno
import os
from os.path import join, isdir
import sys
try:
try:
from inspect import signature
except ImportError:
from funcsigs import signature
from jupyter_core.paths import jupyter_config_dir
from notebook.nbextensions import install_nbextension
from notebook.services.config import ConfigManager
except ImportError:
# when a Conda environment is removed, dependencies may be
# unlinked first. In this case, just bail. The Jupyter
# config where this extension is registered lives in the
# environment, so it will be destroyed anyway.
# (If we allow this to fail, it will show up as a build
# failure on anaconda.org CI system).
sys.exit(0)
class StaticPathNotFound(Exception):
pass
def mkdir_p(path):
""" 'mkdir -p' in Python """
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and isdir(path):
pass
else:
raise
class NBSetup(object):
extensions_map = {
'notebook': 'main.js',
'tree': 'tree.js',
'edit': 'edit.js'
}
def __init__(self, name, **kwargs):
self.name = name
self.prefix = kwargs.get('prefix', None)
self.kwargs = kwargs
if self.prefix is None:
self.path = jupyter_config_dir()
else:
self.path = join(self.prefix, "etc", "jupyter")
self.cm = ConfigManager(config_dir=join(self.path, 'nbconfig'))
self.cm_server = ConfigManager(config_dir=self.path)
def install(self):
"""
Install an extension (copy or symlinks)
"""
try:
install_nbextension(self.kwargs['static'], **self._install_params())
self._echo("Installing {}".format(self.name), 'ok')
except Exception as e:
self._echo(e, None)
self._echo("Installing {}".format(self.name), 'fail')
def enable(self):
mkdir_p(self.cm.config_dir)
self._enable_client_extensions()
try:
__import__(self.name)
self._enable_server_extensions()
except ImportError:
pass
self._echo('Enabling {}'.format(self.name), 'ok')
def disable(self):
# Client side
self._disable_client_extension()
self._disable_server_extension()
def _disable_client_extension(self):
for _type, filename in list(self.extensions_map.items()):
cfg = self.cm.get(_type)
try:
nb_key = "{}/{}".format(self.name, filename[:-3])
nb_extensions = list(cfg['load_extensions'].keys())
if nb_key in nb_extensions:
cfg['load_extensions'].pop(nb_key)
self.cm.set(_type, cfg)
self._echo("Disabling {} as {}".format(self.name, _type), 'ok')
except KeyError:
self._echo("{} wasn't enabled as a {}. Nothing to do.".format(self.name, _type))
def _disable_server_extension(self):
cfg = self.cm_server.get("jupyter_notebook_config")
try:
server_extensions = cfg["NotebookApp"]["server_extensions"]
if "{}.nbextension".format(self.name) in server_extensions:
server_extensions.remove("{}.nbextension".format(self.name))
self.cm_server.update("jupyter_notebook_config", cfg)
self._echo("{} was disabled as a server extension".format(self.name), 'ok')
except KeyError:
self._echo("{} was't enabled as a server extension. Nothing to do.".format(self.name))
def _install_params(self):
params = copy.deepcopy(self.kwargs)
params['destination'] = self.name
if params.get('verbose', False):
params['verbose'] = 2
else:
params['verbose'] = 0
for key in ['enable', 'static', 'version', 'main', 'path']:
try:
del params[key]
except KeyError:
pass
return params
def _echo(self, msg, status=None):
if status == 'ok':
print(' '.join([msg, '\033[92m', 'OK' + '\033[0m']))
elif status == 'fail':
print(' '.join([msg, '\033[91m', 'FAIL' + '\033[0m']))
else:
print(msg)
def _enable_client_extensions(self):
directory = self.kwargs['static']
for key, filename in list(self.extensions_map.items()):
if filename in os.listdir(directory):
self.cm.update(
key, {
"load_extensions": {
"{}/{}".format(self.name, filename[:-3]): True
}
}
)
def _enable_server_extensions(self):
cfg = self.cm_server.get("jupyter_notebook_config")
server_extensions = (
cfg.setdefault("NotebookApp", {})
.setdefault("server_extensions", [])
)
if "{}.nbextension".format(self.name) not in server_extensions:
cfg["NotebookApp"]["server_extensions"] += ["{}.nbextension".format(self.name)]
self.cm_server.update("jupyter_notebook_config", cfg)
def install_cmd(parser_args, setup_args):
params = dict(list(setup_args.items()) + list(parser_args.__dict__.items()))
name = params['name']
del params['name']
nb_setup = NBSetup(name, **params)
nb_setup.install()
if params['enable']:
nb_setup.enable()
def remove_cmd(parser_args, setup_args):
nb_setup = NBSetup(setup_args['name'], prefix=parser_args.prefix)
nb_setup.disable()
def create_parser():
parser = argparse.ArgumentParser(
description="Install and uninstall nbextension")
subparsers = parser.add_subparsers(title='subcommands')
install_parser = subparsers.add_parser(
"install",
description="Install nbextension",
help="Install nbextension"
)
install_parser.add_argument(
"-e", "--enable",
help="Automatically load server and nbextension on notebook launch",
action="store_true"
)
default_kwargs = {'action': 'store', 'nargs': '?'}
store_true_kwargs = {'action': 'store_true'}
store_true = ["symlink", "overwrite", "quiet", "user"]
install_kwargs = list(signature(install_nbextension).parameters)
[
install_parser.add_argument(
"--{}".format(arg),
**(store_true_kwargs if arg in store_true else default_kwargs)
)
for arg in install_kwargs
]
remove_parser = subparsers.add_parser(
"remove",
help="Remove an extension"
)
remove_parser.add_argument(
"--prefix",
action="store"
)
install_parser.set_defaults(main=install_cmd)
remove_parser.set_defaults(main=remove_cmd)
return parser
def find_static():
static_path = os.path.join(os.getcwd(), 'static')
if os.path.exists(static_path):
return static_path
else:
raise StaticPathNotFound
def setup(**kwargs):
parser = create_parser()
args = parser.parse_args()
args.main(args, kwargs)
|
|
"""Script to set up test data for a Kvoti instance.
As before we tried to do this with migrations but ran into problems
early on with custom permissions not being created.
In any case, it's probably easier/better to have a single bootstrap
script instead of a bunch of data migrations.
"""
# Must install config and setup Django before importing models
from configurations import importer
importer.install()
import django
django.setup()
####################
import os
import random
from django.conf import settings
from django.contrib.auth.models import Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
import casenotes.models
import chat.models
import configuration.models
import core
import dittoforms.models
from users.models import User
INTERACTIONS = ["Messaging"]
REG_FORM_SPEC = '[{"name":"Name","on":true,"fields":[{"name":"First name"},{"name":"Last name"}]},{"name":"Gender","on":true,"options":["Male","Female","Other"]},{"name":"Ethnicity","on":true,"options":["White British","Other"]},{"name":"How did you hear about us?","on":true,"multiple":true,"options":["Internet search","Magazine","Other"]}]'
def run():
setup_guest_passwords()
setup_site()
setup_features()
setup_default_roles()
setup_permissions()
setup_interactions()
setup_admin_users()
setup_members()
setup_reg_form()
setup_configurable_values()
setup_chat_conf()
setup_case_notes()
setup_sessions()
def setup_guest_passwords():
global GUEST_PASSWORDS, DEFAULT_PASSWORD
if 'GUEST_PASSWORDS' in os.environ:
GUEST_PASSWORDS = os.environ['GUEST_PASSWORDS'].split()
# prob ok to reuse the same password for any extra test users?
DEFAULT_PASSWORD = GUEST_PASSWORDS[0]
else:
GUEST_PASSWORDS = None
def setup_site(name='Kvoti', domain='kvoti.technology'):
site = Site.objects.get_current()
site.name = name
domain = 'localhost:8000' if settings.DEBUG else domain
site.domain = domain
site.save()
def setup_features():
for slug, name, perms in (
('chatroom', 'Chatroom', [
('can_chat', 'Can chat'),
('configure_chatroom', 'Can configure chatrooms')
]),
('casenotes', 'Case notes', [
('add_casenote', 'Can add case notes'),
('view_casenote', 'Can view case notes'),
('manage_casenote', 'Can manage case notes')
]),
):
feature, _ = configuration.models.Feature.objects.get_or_create(
slug=slug, name=name)
content_type = ContentType.objects.get_for_model(configuration.models.Feature)
for codename, name in perms:
perm, _ = Permission.objects.get_or_create(
codename=codename,
defaults={'content_type': content_type})
perm.name = name
perm.save()
feature.permissions.add(perm)
def setup_default_roles():
for group in core.DEFAULT_ROLES:
group, _ = Group.objects.get_or_create(name=group)
# TODO split out the kvoti example network stuff
for group in ['Adviser', 'Counsellor']:
group, _ = Group.objects.get_or_create(name=group)
def setup_permissions():
content_type = ContentType.objects.get_for_model(User)
perm, _ = Permission.objects.get_or_create(
codename='can_admin',
content_type=content_type)
perm.name = 'Can administer'
perm.save()
Group.objects.get(name=core.ADMIN_ROLE).permissions.add(perm)
perm = Permission.objects.get(codename='invite_user')
Group.objects.get(name=core.ADMIN_ROLE).permissions.add(perm)
perm = Permission.objects.get(codename='assign_role')
Group.objects.get(name=core.ADMIN_ROLE).permissions.add(perm)
perm = Permission.objects.get(codename='configure_chatroom')
Group.objects.get(name=core.ADMIN_ROLE).permissions.add(perm)
perm = Permission.objects.get(codename='add_casenote')
Group.objects.get(name=core.ADMIN_ROLE).permissions.add(perm)
perm = Permission.objects.get(codename='view_casenote')
Group.objects.get(name=core.ADMIN_ROLE).permissions.add(perm)
perm = Permission.objects.get(codename='manage_casenote')
Group.objects.get(name=core.ADMIN_ROLE).permissions.add(perm)
perm = Permission.objects.get(codename='guest')
Group.objects.get(name=core.GUEST_ROLE).permissions.add(perm)
perm = Permission.objects.get(codename='can_chat')
for group in Group.objects.all():
group.permissions.add(perm)
def setup_interactions():
for interaction in INTERACTIONS:
configuration.models.Interaction.objects.get_or_create(name=interaction)
def setup_admin_users():
_create_user('admin', core.ADMIN_ROLE)
_create_user('visitor', core.ADMIN_ROLE)
def setup_members():
for name, gender in [
('mark', 'Male'),
('sarah', 'Female'),
('ross', 'Male'),
('emma', 'Female'),
]:
_create_user(name, core.MEMBER_ROLE, gender)
for i in range(1, 4):
_create_user('member%s' % i, core.MEMBER_ROLE, 'Female')
for user, role in [
['adviser', 'Adviser'],
['counsellor', 'Counsellor']
]:
_create_user(user, role)
def _create_user(username, group_name, gender=None):
user, created = User.objects.get_or_create(
username=username,
# this email here only needed for django_comments
defaults={'email': '%[email protected]' % username}
)
user.emailaddress_set.get_or_create(
verified=1,
defaults={'email': '%[email protected]' % username})
if created:
if 'GUEST_PASSWORDS' in os.environ:
try:
password = GUEST_PASSWORDS.pop()
except IndexError:
password = DEFAULT_PASSWORD
else:
password = 'x'
user.set_password(password)
user.save()
if gender is not None:
user.custom_data.create(field_name="Gender",
field_value=gender)
user.groups.add(Group.objects.get(name=group_name))
def setup_reg_form():
for role in Group.objects.all():
form = dittoforms.models.FormSpec.objects.create(
slug='reg',
spec=REG_FORM_SPEC
)
configuration.models.RegForm.objects.create(
role=role,
form=form
)
def setup_configurable_values():
for role in Group.objects.all():
configuration.models.Values.objects.create(role=role)
def setup_chat_conf():
room = chat.models.Room.objects.create(
slug='main',
name='Main chatroom',
is_regular=True
)
for day in range(7):
chat.models.Slot.objects.create(
room=room,
day=day,
start=8,
end=18,
)
def setup_case_notes():
client = User.objects.get(username="mark")
author = User.objects.get(username="sarah")
for i in range(1, 5):
casenotes.models.CaseNote.objects.create(
author=author,
client=client,
title="Case note %s" % i,
text="Case note %s" % i
)
def setup_sessions():
for i in range(20):
session = chat.models.Session.objects.create(
session_id='session%s' % (i + 1)
)
user = random.choice(User.objects.all())
chat.models.SessionRating.objects.create(
user=user,
rating=3,
session=session
)
if __name__ == '__main__':
run()
|
|
"""
sentry.search.django.backend
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import six
from django.db import router
from django.db.models import Q
from sentry.api.paginator import DateTimePaginator, Paginator
from sentry.search.base import ANY, EMPTY, SearchBackend
from sentry.search.django.constants import (
MSSQL_ENGINES, MSSQL_SORT_CLAUSES, MYSQL_SORT_CLAUSES, ORACLE_SORT_CLAUSES,
SORT_CLAUSES, SQLITE_SORT_CLAUSES
)
from sentry.utils.db import get_db_engine
class DjangoSearchBackend(SearchBackend):
def _tags_to_filter(self, project, tags):
# Django doesnt support union, so we limit results and try to find
# reasonable matches
from sentry.models import GroupTagValue
# ANY matches should come last since they're the least specific and
# will provide the largest range of matches
tag_lookups = sorted(six.iteritems(tags), key=lambda x: x != ANY)
# get initial matches to start the filter
matches = None
# for each remaining tag, find matches contained in our
# existing set, pruning it down each iteration
for k, v in tag_lookups:
if v is EMPTY:
return None
elif v != ANY:
base_qs = GroupTagValue.objects.filter(
key=k,
value=v,
project_id=project.id,
)
else:
base_qs = GroupTagValue.objects.filter(
key=k,
project_id=project.id,
).distinct()
if matches:
base_qs = base_qs.filter(group_id__in=matches)
else:
# restrict matches to only the most recently seen issues
base_qs = base_qs.order_by('-last_seen')
matches = list(base_qs.values_list('group_id', flat=True)[:1000])
if not matches:
return None
return matches
def _build_queryset(self, project, query=None, status=None, tags=None,
bookmarked_by=None, assigned_to=None, first_release=None,
sort_by='date', unassigned=None, subscribed_by=None,
age_from=None, age_from_inclusive=True,
age_to=None, age_to_inclusive=True,
last_seen_from=None, last_seen_from_inclusive=True,
last_seen_to=None, last_seen_to_inclusive=True,
date_from=None, date_from_inclusive=True,
date_to=None, date_to_inclusive=True,
active_at_from=None, active_at_from_inclusive=True,
active_at_to=None, active_at_to_inclusive=True,
times_seen=None,
times_seen_lower=None, times_seen_lower_inclusive=True,
times_seen_upper=None, times_seen_upper_inclusive=True,
cursor=None, limit=None):
from sentry.models import Event, Group, GroupSubscription, GroupStatus
engine = get_db_engine('default')
queryset = Group.objects.filter(project=project)
if query:
# TODO(dcramer): if we want to continue to support search on SQL
# we should at least optimize this in Postgres so that it does
# the query filter **after** the index filters, and restricts the
# result set
queryset = queryset.filter(
Q(message__icontains=query) |
Q(culprit__icontains=query)
)
if status is None:
status_in = (
GroupStatus.PENDING_DELETION,
GroupStatus.DELETION_IN_PROGRESS,
GroupStatus.PENDING_MERGE,
)
queryset = queryset.exclude(status__in=status_in)
else:
queryset = queryset.filter(status=status)
if bookmarked_by:
queryset = queryset.filter(
bookmark_set__project=project,
bookmark_set__user=bookmarked_by,
)
if assigned_to:
queryset = queryset.filter(
assignee_set__project=project,
assignee_set__user=assigned_to,
)
elif unassigned in (True, False):
queryset = queryset.filter(
assignee_set__isnull=unassigned,
)
if subscribed_by is not None:
queryset = queryset.filter(
id__in=GroupSubscription.objects.filter(
project=project,
user=subscribed_by,
is_active=True,
).values_list('group'),
)
if first_release:
if first_release is EMPTY:
return queryset.none()
queryset = queryset.filter(
first_release__organization_id=project.organization_id,
first_release__version=first_release,
)
if tags:
matches = self._tags_to_filter(project, tags)
if not matches:
return queryset.none()
queryset = queryset.filter(
id__in=matches,
)
if age_from or age_to:
params = {}
if age_from:
if age_from_inclusive:
params['first_seen__gte'] = age_from
else:
params['first_seen__gt'] = age_from
if age_to:
if age_to_inclusive:
params['first_seen__lte'] = age_to
else:
params['first_seen__lt'] = age_to
queryset = queryset.filter(**params)
if last_seen_from or last_seen_to:
params = {}
if last_seen_from:
if last_seen_from_inclusive:
params['last_seen__gte'] = last_seen_from
else:
params['last_seen__gt'] = last_seen_from
if last_seen_to:
if last_seen_to_inclusive:
params['last_seen__lte'] = last_seen_to
else:
params['last_seen__lt'] = last_seen_to
queryset = queryset.filter(**params)
if active_at_from or active_at_to:
params = {}
if active_at_from:
if active_at_from_inclusive:
params['active_at__gte'] = active_at_from
else:
params['active_at__gt'] = active_at_from
if active_at_to:
if active_at_to_inclusive:
params['active_at__lte'] = active_at_to
else:
params['active_at__lt'] = active_at_to
queryset = queryset.filter(**params)
if times_seen is not None:
queryset = queryset.filter(times_seen=times_seen)
if times_seen_lower is not None or times_seen_upper is not None:
params = {}
if times_seen_lower is not None:
if times_seen_lower_inclusive:
params['times_seen__gte'] = times_seen_lower
else:
params['times_seen__gt'] = times_seen_lower
if times_seen_upper is not None:
if times_seen_upper_inclusive:
params['times_seen__lte'] = times_seen_upper
else:
params['times_seen__lt'] = times_seen_upper
queryset = queryset.filter(**params)
if date_from or date_to:
params = {
'project_id': project.id,
}
if date_from:
if date_from_inclusive:
params['datetime__gte'] = date_from
else:
params['datetime__gt'] = date_from
if date_to:
if date_to_inclusive:
params['datetime__lte'] = date_to
else:
params['datetime__lt'] = date_to
event_queryset = Event.objects.filter(**params)
if query:
event_queryset = event_queryset.filter(message__icontains=query)
# limit to the first 1000 results
group_ids = event_queryset.distinct().values_list(
'group_id',
flat=True
)[:1000]
# if Event is not on the primary database remove Django's
# implicit subquery by coercing to a list
base = router.db_for_read(Group)
using = router.db_for_read(Event)
# MySQL also cannot do a LIMIT inside of a subquery
if base != using or engine.startswith('mysql'):
group_ids = list(group_ids)
queryset = queryset.filter(
id__in=group_ids,
)
if engine.startswith('sqlite'):
score_clause = SQLITE_SORT_CLAUSES[sort_by]
elif engine.startswith('mysql'):
score_clause = MYSQL_SORT_CLAUSES[sort_by]
elif engine.startswith('oracle'):
score_clause = ORACLE_SORT_CLAUSES[sort_by]
elif engine in MSSQL_ENGINES:
score_clause = MSSQL_SORT_CLAUSES[sort_by]
else:
score_clause = SORT_CLAUSES[sort_by]
queryset = queryset.extra(
select={'sort_value': score_clause},
)
return queryset
def query(self, project, **kwargs):
queryset = self._build_queryset(project=project, **kwargs)
sort_by = kwargs.get('sort_by', 'date')
limit = kwargs.get('limit', 100)
cursor = kwargs.get('cursor')
# HACK: don't sort by the same column twice
if sort_by == 'date':
paginator_cls = DateTimePaginator
sort_clause = '-last_seen'
elif sort_by == 'priority':
paginator_cls = Paginator
sort_clause = '-score'
elif sort_by == 'new':
paginator_cls = DateTimePaginator
sort_clause = '-first_seen'
elif sort_by == 'freq':
paginator_cls = Paginator
sort_clause = '-times_seen'
else:
paginator_cls = Paginator
sort_clause = '-sort_value'
queryset = queryset.order_by(sort_clause)
paginator = paginator_cls(queryset, sort_clause)
return paginator.get_result(limit, cursor)
|
|
import functools
from operator import mul
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import configuration
from chainer import function_node
from chainer.functions.pooling import max_pooling_nd_kernel
from chainer.functions.pooling import pooling_nd
from chainer.utils import conv_nd
import chainerx
if cuda.cudnn_enabled:
_cudnn_version = cuda.cuda.cudnn.getVersion()
class MaxPoolingND(pooling_nd._PoolingND):
"""Max pooling over a set of N-dimensional planes.
.. warning::
This feature is experimental. The interface can change in the future.
"""
def __init__(self, ndim, ksize, stride=None, pad=0, cover_all=True,
return_indices=False):
super(MaxPoolingND, self).__init__(
ndim, ksize, stride=stride, pad=pad, cover_all=cover_all,
return_indices=return_indices)
def forward_chainerx(self, x):
# TODO(sonots): Support return_indices in ChainerX
if self.return_indices:
return chainer.Fallback
if x[0].device.backend.name == 'cuda':
# TODO(sonots): Support more ndim in ChainerX
if self.ndim not in [2, 3]:
return chainer.Fallback
return chainerx.max_pool(x[0], self.ksize, self.stride, self.pad,
self.cover_all),
def forward_cpu(self, x):
self._in_shape = x[0].shape
self._in_dtype = x[0].dtype
col = conv_nd.im2col_nd_cpu(
x[0], self.ksize, self.stride, self.pad, pval=-float('inf'),
cover_all=self.cover_all)
n, c = col.shape[:2]
mid = (len(col.shape) - 2) // 2 + 2
ksize = col.shape[2:mid]
outs = col.shape[mid:]
# (n, c, k_1 * k_2 * ... * k_N, out_1, out_2, ..., out_N)
col_shape = (n, c) + (functools.reduce(mul, ksize),) + outs
col = col.reshape(col_shape)
# We select maximum twice, since the implementation using numpy.choose
# hits its bug when kh * kw >= 32.
self.indexes = col.argmax(axis=2)
y = col.max(axis=2)
return y,
def forward_gpu(self, x):
if chainer.should_use_cudnn('>=auto') and 2 <= self.ndim <= 3:
# With cuDNN v3 or greater, use cuDNN implementation for inputs
# with spatial dimensions of two or more.
return super(MaxPoolingND, self).forward_gpu(x)
self._in_shape = x[0].shape
self._in_dtype = x[0].dtype
n, c = x[0].shape[:2]
dims = x[0].shape[2:]
ys = tuple(conv_nd.get_conv_outsize(d, k, s, p, self.cover_all)
for (d, k, s, p) in six.moves.zip(
dims, self.ksize, self.stride, self.pad))
# (n, c, y_1, y_2, ..., y_N)
y_shape = (n, c) + ys
y = cuda.cupy.empty(y_shape, dtype=x[0].dtype)
self.indexes = cuda.cupy.empty(y_shape, dtype=numpy.int32)
in_params, out_params, operation, name = \
max_pooling_nd_kernel.MaxPoolingNDKernelForward.generate(self.ndim)
cuda.elementwise(in_params, out_params, operation, name)(
x[0].reduced_view(),
*(dims + ys + self.ksize + self.stride + self.pad +
(y, self.indexes)))
return y,
def backward(self, indexes, gy):
return MaxPoolingNDGrad(self).apply(gy)
def _get_pool_mode(self):
if _cudnn_version >= 6000 and configuration.config.cudnn_deterministic:
return cuda.cuda.cudnn.CUDNN_POOLING_MAX_DETERMINISTIC
else:
return cuda.cuda.cudnn.CUDNN_POOLING_MAX
class MaxPoolingNDGrad(function_node.FunctionNode):
def __init__(self, mpoolnd):
self.ndim = mpoolnd.ndim
self.ksize = mpoolnd.ksize
self.stride = mpoolnd.stride
self.pad = mpoolnd.pad
self.cover_all = mpoolnd.cover_all
self._used_cudnn = mpoolnd._used_cudnn
if not self._used_cudnn:
self.indexes = mpoolnd.indexes
self._in_shape = mpoolnd._in_shape
self._in_dtype = mpoolnd._in_dtype
self.mpoolnd = mpoolnd
def forward_cpu(self, gy):
ndim = self.ndim
n, c = gy[0].shape[:2]
outs = gy[0].shape[2:]
dims = self._in_shape[2:]
prod_outs = functools.reduce(mul, outs)
prod_ksize = functools.reduce(mul, self.ksize)
gcol = numpy.zeros(
n * c * prod_outs * prod_ksize, dtype=self._in_dtype)
indexes = self.indexes.flatten()
indexes += numpy.arange(0, indexes.size * prod_ksize, prod_ksize)
gcol[indexes] = gy[0].ravel()
gcol_shape = (n, c) + outs + self.ksize
gcol = gcol.reshape(gcol_shape)
for i in six.moves.range(ndim):
gcol = numpy.swapaxes(gcol, 2 + i, ndim + 2 + i)
gx = conv_nd.col2im_nd_cpu(gcol, self.stride, self.pad, dims)
return gx,
def forward_gpu(self, gy):
if self._used_cudnn:
x = self.mpoolnd.get_retained_inputs()[0].array
return self.mpoolnd.backward_gpu((x,), gy)
n, c = self._in_shape[:2]
dims = self._in_shape[2:]
ys = gy[0].shape[2:]
gx = cuda.cupy.empty(self._in_shape, self._in_dtype)
ndim = self.ndim
in_params, out_params, operation, name = \
max_pooling_nd_kernel.MaxPoolingNDKernelBackward.generate(ndim)
cuda.elementwise(in_params, out_params, operation, name)(
gy[0].reduced_view(), self.indexes.reduced_view(),
*(dims + ys + self.ksize + self.stride + self.pad + (gx,)))
return gx,
def backward(self, indexes, ggx):
return MaxPoolingNDWithIndexes(self.mpoolnd).apply(ggx)
class MaxPoolingNDWithIndexes(function_node.FunctionNode):
def __init__(self, mpoolnd):
self.ndim = mpoolnd.ndim
self.ksize = mpoolnd.ksize
self.stride = mpoolnd.stride
self.pad = mpoolnd.pad
self.cover_all = mpoolnd.cover_all
self._used_cudnn = mpoolnd._used_cudnn
if not self._used_cudnn:
self.indexes = mpoolnd.indexes
else:
self.mpoolnd = mpoolnd
def forward_cpu(self, x):
col = conv_nd.im2col_nd_cpu(
x[0], self.ksize, self.stride, self.pad, pval=-float('inf'),
cover_all=self.cover_all)
n, c = col.shape[:2]
mid = (len(col.shape) - 2) // 2 + 2
ksize = col.shape[2:mid]
outs = col.shape[mid:]
# (n, c, k_1 * k_2 * ... * k_N, out_1, out_2, ..., out_N)
ksize_total = functools.reduce(mul, ksize)
col_shape = (n, c) + (ksize_total,) + outs
col = col.reshape(col_shape)
# (n, c, out_1, ..., out_N, k_1 * .. * k_N)
col_indexes = (0, 1) + tuple(six.moves.range(3, 3 + self.ndim)) + (2,)
col = col.transpose(col_indexes)
col = col.reshape(-1, ksize_total)
indexes = self.indexes.ravel()
col = col[numpy.arange(len(indexes)), indexes]
return col.reshape((n, c) + outs),
def forward_gpu(self, inputs):
if self._used_cudnn:
x = self.mpoolnd.get_retained_inputs()[0].array
return self._forward_gpu_compute_indexes_again((x, inputs[0]))
x, = inputs
self._in_shape = x.shape
self._in_dtype = x.dtype
n, c = x.shape[:2]
dims = x.shape[2:]
ys = tuple(conv_nd.get_conv_outsize(d, k, s, p, self.cover_all)
for (d, k, s, p) in six.moves.zip(
dims, self.ksize, self.stride, self.pad))
# (n, c, y_1, y_2, ..., y_N)
y_shape = (n, c) + ys
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
cls = max_pooling_nd_kernel.MaxPoolingNDKernelForwardWithIndexes
in_params, out_params, operation, name = cls.generate(self.ndim)
cuda.elementwise(in_params, out_params, operation, name)(
x.reduced_view(),
*(dims + ys + self.ksize + self.stride + self.pad +
(self.indexes.reduced_view(), y)))
return y,
def _forward_gpu_compute_indexes_again(self, inputs):
x, ggx = inputs
self._in_shape = x.shape
self._in_dtype = x.dtype
n, c = x.shape[:2]
dims = x.shape[2:]
ys = tuple(conv_nd.get_conv_outsize(d, k, s, p, self.cover_all)
for (d, k, s, p) in six.moves.zip(
dims, self.ksize, self.stride, self.pad))
# (n, c, y_1, y_2, ..., y_N)
y_shape = (n, c) + ys
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
cls = max_pooling_nd_kernel.MaxPoolingNDKernelForwardWithIndexes1
in_params, out_params, operation, name = cls.generate(self.ndim)
cuda.elementwise(in_params, out_params, operation, name)(
x.reduced_view(),
*(dims + ys + self.ksize + self.stride + self.pad +
(ggx.reduced_view(), y)))
return y,
def max_pooling_nd(x, ksize, stride=None, pad=0, cover_all=True,
return_indices=False):
"""N-dimensionally spatial max pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
This function provides a N-dimensionally generalized version of
:func:`~chainer.functions.max_pooling_2d`. This acts similarly to
:func:`~chainer.functions.convolution_nd`, but it computes the maximum of
input spatial patch for each channel without any parameter instead of
computing the inner products.
Args:
x (~chainer.Variable): Input variable.
ksize (int or tuple of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s,s, ..., s)`` are equivalent. If
``None`` is specified, then it uses same stride as the pooling
window size.
pad (int or tuple of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
cover_all (bool): If ``True``, all spatial locations are pooled into
some output pixels. It may make the output size larger.
return_indices (bool): If ``True``, pooling indices array is returned
together with the output variable. The returned indices are
expected for use by :func:`chainer.functions.upsampling_nd`.
Note that cuDNN will not be used for this function if
``return_indices`` is set to ``True``, as cuDNN does not return
indices information.
Returns:
~chainer.Variable or tuple:
When ``return_indices`` is ``False`` (default), returns the output
variable.
When ``True``, returns the tuple of the output variable and
pooling indices (:ref:`ndarray`). Pooling indices will be on the
same device as the input.
"""
ndim = len(x.shape[2:])
func = MaxPoolingND(ndim, ksize, stride, pad, cover_all, return_indices)
if return_indices:
with chainer.using_config('use_cudnn', 'never'):
out = func.apply((x,))[0]
return out, func.indexes
return func.apply((x,))[0]
def max_pooling_1d(x, ksize, stride=None, pad=0, cover_all=True,
return_indices=False):
"""1-dimensional spatial max pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
.. note::
This function calls :func:`~chainer.functions.max_pooling_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.max_pooling_nd`.
"""
if len(x.shape[2:]) != 1:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 1. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return max_pooling_nd(x, ksize, stride, pad, cover_all, return_indices)
def max_pooling_3d(x, ksize, stride=None, pad=0, cover_all=True,
return_indices=False):
"""3-dimensional spatial max pooling function.
.. warning::
This feature is experimental. The interface can change in the future.
.. note::
This function calls :func:`~chainer.functions.max_pooling_nd`
internally, so see the details of the behavior in
the documentation of :func:`~chainer.functions.max_pooling_nd`.
"""
if len(x.shape[2:]) != 3:
raise ValueError(
'The number of dimensions under channel dimension of the input '
'\'x\' should be 3. But the actual ndim was {}.'.format(
len(x.shape[2:])))
return max_pooling_nd(x, ksize, stride, pad, cover_all, return_indices)
|
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein, Ant1, Marius van Voorden
#
# This code is subject to the (new) BSD license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Module images2gif
Provides functionality for reading and writing animated GIF images.
Use writeGif to write a series of numpy arrays or PIL images as an
animated GIF. Use readGif to read an animated gif as a series of numpy
arrays.
Note that since July 2004, all patents on the LZW compression patent have
expired. Therefore the GIF format may now be used freely.
Acknowledgements
----------------
Many thanks to Ant1 for:
* noting the use of "palette=PIL.Image.ADAPTIVE", which significantly
improves the results.
* the modifications to save each image with its own palette, or optionally
the global palette (if its the same).
Many thanks to Marius van Voorden for porting the NeuQuant quantization
algorithm of Anthony Dekker to Python (See the NeuQuant class for its
license).
Many thanks to Alex Robinson for implementing the concept of subrectangles,
which (depening on image content) can give a very significant reduction in
file size.
This code is based on gifmaker (in the scripts folder of the source
distribution of PIL)
Useful links
-------------
* http://tronche.com/computer-graphics/gif/
* http://en.wikipedia.org/wiki/Graphics_Interchange_Format
* http://www.w3.org/Graphics/GIF/spec-gif89a.txt
"""
# todo: This module should be part of imageio (or at least based on)
import os
import time
try:
import PIL
from PIL import Image
from PIL.GifImagePlugin import getheader, getdata
except ImportError:
PIL = None
try:
import numpy as np
except ImportError:
np = None
def get_cKDTree():
try:
from scipy.spatial import cKDTree
except ImportError:
cKDTree = None
return cKDTree
# getheader gives a 87a header and a color palette (two elements in a list).
# getdata()[0] gives the Image Descriptor up to (including) "LZW min code size".
# getdatas()[1:] is the image data itself in chuncks of 256 bytes (well
# technically the first byte says how many bytes follow, after which that
# amount (max 255) follows).
def checkImages(images):
""" checkImages(images)
Check numpy images and correct intensity range etc.
The same for all movie formats.
"""
# Init results
images2 = []
for im in images:
if PIL and isinstance(im, PIL.Image.Image):
# We assume PIL images are allright
images2.append(im)
elif np and isinstance(im, np.ndarray):
# Check and convert dtype
if im.dtype == np.uint8:
images2.append(im) # Ok
elif im.dtype in [np.float32, np.float64]:
im = im.copy()
im[im < 0] = 0
im[im > 1] = 1
im *= 255
images2.append(im.astype(np.uint8))
else:
im = im.astype(np.uint8)
images2.append(im)
# Check size
if im.ndim == 2:
pass # ok
elif im.ndim == 3:
if im.shape[2] not in [3, 4]:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('Invalid image type: ' + str(type(im)))
# Done
return images2
def intToBin(i):
"""Integer to two bytes"""
# devide in two parts (bytes)
i1 = i % 256
i2 = int(i / 256)
# make string (little endian)
return chr(i1) + chr(i2)
class GifWriter:
""" GifWriter()
Class that contains methods for helping write the animated GIF file.
"""
def getheaderAnim(self, im):
""" getheaderAnim(im)
Get animation header. To replace PILs getheader()[0]
"""
bb = "GIF89a"
bb += intToBin(im.size[0])
bb += intToBin(im.size[1])
bb += "\x87\x00\x00"
return bb
def getImageDescriptor(self, im, xy=None):
""" getImageDescriptor(im, xy=None)
Used for the local color table properties per image.
Otherwise global color table applies to all frames irrespective of
whether additional colors comes in play that require a redefined
palette. Still a maximum of 256 color per frame, obviously.
Written by Ant1 on 2010-08-22
Modified by Alex Robinson in Janurari 2011 to implement subrectangles.
"""
# Defaule use full image and place at upper left
if xy is None:
xy = (0, 0)
# Image separator,
bb = '\x2C'
# Image position and size
bb += intToBin(xy[0]) # Left position
bb += intToBin(xy[1]) # Top position
bb += intToBin(im.size[0]) # image width
bb += intToBin(im.size[1]) # image height
# packed field: local color table flag1, interlace0, sorted table0,
# reserved00, lct size111=7=2^(7 + 1)=256.
bb += '\x87'
# LZW minimum size code now comes later, begining of [image data] blocks
return bb
def getAppExt(self, loops=float('inf')):
""" getAppExt(loops=float('inf'))
Application extention. This part specifies the amount of loops.
If loops is 0 or inf, it goes on infinitely.
"""
if loops == 0 or loops == float('inf'):
loops = 2 ** 16 - 1
#bb = "" # application extension should not be used
# (the extension interprets zero loops
# to mean an infinite number of loops)
# Mmm, does not seem to work
if True:
bb = "\x21\xFF\x0B" # application extension
bb += "NETSCAPE2.0"
bb += "\x03\x01"
bb += intToBin(loops)
bb += '\x00' # end
return bb
def getGraphicsControlExt(self, duration=0.1, dispose=2):
""" getGraphicsControlExt(duration=0.1, dispose=2)
Graphics Control Extension. A sort of header at the start of
each image. Specifies duration and transparancy.
Dispose
-------
* 0 - No disposal specified.
* 1 - Do not dispose. The graphic is to be left in place.
* 2 - Restore to background color. The area used by the graphic
must be restored to the background color.
* 3 - Restore to previous. The decoder is required to restore the
area overwritten by the graphic with what was there prior to
rendering the graphic.
* 4-7 -To be defined.
"""
bb = '\x21\xF9\x04'
bb += chr((dispose & 3) << 2) # low bit 1 == transparency,
# 2nd bit 1 == user input , next 3 bits, the low two of which are used,
# are dispose.
bb += intToBin(int(duration * 100)) # in 100th of seconds
bb += '\x00' # no transparant color
bb += '\x00' # end
return bb
def handleSubRectangles(self, images, subRectangles):
""" handleSubRectangles(images)
Handle the sub-rectangle stuff. If the rectangles are given by the
user, the values are checked. Otherwise the subrectangles are
calculated automatically.
"""
if isinstance(subRectangles, (tuple, list)):
# xy given directly
# Check xy
xy = subRectangles
if xy is None:
xy = (0, 0)
if hasattr(xy, '__len__'):
if len(xy) == len(images):
xy = [xxyy for xxyy in xy]
else:
raise ValueError("len(xy) doesn't match amount of images.")
else:
xy = [xy for im in images]
xy[0] = (0, 0)
else:
# Calculate xy using some basic image processing
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to use auto-subRectangles.")
# First make numpy arrays if required
for i in range(len(images)):
im = images[i]
if isinstance(im, Image.Image):
tmp = im.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape) == 0:
raise MemoryError("Too little memory to convert PIL image to array")
images[i] = a
# Determine the sub rectangles
images, xy = self.getSubRectangles(images)
# Done
return images, xy
def getSubRectangles(self, ims):
""" getSubRectangles(ims)
Calculate the minimal rectangles that need updating each frame.
Returns a two-element tuple containing the cropped images and a
list of x-y positions.
Calculating the subrectangles takes extra time, obviously. However,
if the image sizes were reduced, the actual writing of the GIF
goes faster. In some cases applying this method produces a GIF faster.
"""
# Check image count
if len(ims) < 2:
return ims, [(0, 0) for i in ims]
# We need numpy
if np is None:
raise RuntimeError("Need Numpy to calculate sub-rectangles. ")
# Prepare
ims2 = [ims[0]]
xy = [(0, 0)]
t0 = time.time()
# Iterate over images
prev = ims[0]
for im in ims[1:]:
# Get difference, sum over colors
diff = np.abs(im-prev)
if diff.ndim == 3:
diff = diff.sum(2)
# Get begin and end for both dimensions
X = np.argwhere(diff.sum(0))
Y = np.argwhere(diff.sum(1))
# Get rect coordinates
if X.size and Y.size:
x0, x1 = X[0], X[-1] + 1
y0, y1 = Y[0], Y[-1] + 1
else: # No change ... make it minimal
x0, x1 = 0, 2
y0, y1 = 0, 2
# Cut out and store
im2 = im[y0:y1, x0:x1]
prev = im
ims2.append(im2)
xy.append((x0, y0))
# Done
# print('%1.2f seconds to determine subrectangles of %i images' %
# (time.time()-t0, len(ims2)))
return ims2, xy
def convertImagesToPIL(self, images, dither, nq=0):
""" convertImagesToPIL(images, nq=0)
Convert images to Paletted PIL images, which can then be
written to a single animaged GIF.
"""
# Convert to PIL images
images2 = []
for im in images:
if isinstance(im, Image.Image):
images2.append(im)
elif np and isinstance(im, np.ndarray):
if im.ndim == 3 and im.shape[2] == 3:
im = Image.fromarray(im, 'RGB')
elif im.ndim == 3 and im.shape[2] == 4:
im = Image.fromarray(im[:, :, :3], 'RGB')
elif im.ndim == 2:
im = Image.fromarray(im, 'L')
images2.append(im)
# Convert to paletted PIL images
images, images2 = images2, []
if nq >= 1:
# NeuQuant algorithm
for im in images:
im = im.convert("RGBA") # NQ assumes RGBA
nqInstance = NeuQuant(im, int(nq)) # Learn colors from image
if dither:
im = im.convert("RGB").quantize(palette=nqInstance.paletteImage())
else:
# Use to quantize the image itself
im = nqInstance.quantize(im)
images2.append(im)
else:
# Adaptive PIL algorithm
AD = Image.ADAPTIVE
for im in images:
im = im.convert('P', palette=AD, dither=dither)
images2.append(im)
# Done
return images2
def writeGifToFile(self, fp, images, durations, loops, xys, disposes):
""" writeGifToFile(fp, images, durations, loops, xys, disposes)
Given a set of images writes the bytes to the specified stream.
"""
# Obtain palette for all images and count each occurance
palettes, occur = [], []
for im in images:
palette = getheader(im)[1]
if not palette:
palette = PIL.ImagePalette.ImageColor
if isinstance(palette, type(os)):
# Older or newer? version of Pil(low)
data = PIL.ImagePalette.ImagePalette().getdata()
palette = data[0].encode('utf-8') + data[1]
# Arg this does not work. Go use imageio
raise RuntimeError('Cannot get palette. '
'Maybe you should try imageio instead.')
palettes.append(palette)
for palette in palettes:
occur.append(palettes.count(palette))
# Select most-used palette as the global one (or first in case no max)
globalPalette = palettes[ occur.index(max(occur)) ]
# Init
frames = 0
firstFrame = True
for im, palette in zip(images, palettes):
if firstFrame:
# Write header
# Gather info
header = self.getheaderAnim(im)
appext = self.getAppExt(loops)
# Write
fp.write(header.encode('utf-8'))
fp.write(globalPalette)
fp.write(appext.encode('utf-8'))
# Next frame is not the first
firstFrame = False
if True:
# Write palette and image data
# Gather info
data = getdata(im)
imdes, data = data[0], data[1:]
graphext = self.getGraphicsControlExt(durations[frames],
disposes[frames])
# Make image descriptor suitable for using 256 local color palette
lid = self.getImageDescriptor(im, xys[frames])
# Write local header
if (palette != globalPalette) or (disposes[frames] != 2):
# Use local color palette
fp.write(graphext.encode('utf-8'))
fp.write(lid.encode('utf-8')) # write suitable image descriptor
fp.write(palette) # write local color table
fp.write('\x08'.encode('utf-8')) # LZW minimum size code
else:
# Use global color palette
fp.write(graphext.encode('utf-8'))
fp.write(imdes) # write suitable image descriptor
# Write image data
for d in data:
fp.write(d)
# Prepare for next round
frames = frames + 1
fp.write(";".encode('utf-8')) # end gif
return frames
## Exposed functions
def writeGif(filename, images, duration=0.1, repeat=True, dither=False,
nq=0, subRectangles=True, dispose=None):
""" writeGif(filename, images, duration=0.1, repeat=True, dither=False,
nq=0, subRectangles=True, dispose=None)
Write an animated gif from the specified images.
Parameters
----------
filename : string
The name of the file to write the image to.
images : list
Should be a list consisting of PIL images or numpy arrays.
The latter should be between 0 and 255 for integer types, and
between 0 and 1 for float types.
duration : scalar or list of scalars
The duration for all frames, or (if a list) for each frame.
repeat : bool or integer
The amount of loops. If True, loops infinitetely.
dither : bool
Whether to apply dithering
nq : integer
If nonzero, applies the NeuQuant quantization algorithm to create
the color palette. This algorithm is superior, but slower than
the standard PIL algorithm. The value of nq is the quality
parameter. 1 represents the best quality. 10 is in general a
good tradeoff between quality and speed. When using this option,
better results are usually obtained when subRectangles is False.
subRectangles : False, True, or a list of 2-element tuples
Whether to use sub-rectangles. If True, the minimal rectangle that
is required to update each frame is automatically detected. This
can give significant reductions in file size, particularly if only
a part of the image changes. One can also give a list of x-y
coordinates if you want to do the cropping yourself. The default
is True.
dispose : int
How to dispose each frame. 1 means that each frame is to be left
in place. 2 means the background color should be restored after
each frame. 3 means the decoder should restore the previous frame.
If subRectangles==False, the default is 2, otherwise it is 1.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to write animated gif files.")
# Check images
images = checkImages(images)
# Instantiate writer object
gifWriter = GifWriter()
# Check loops
if repeat is False:
loops = 1
elif repeat is True:
loops = 0 # zero means infinite
else:
loops = int(repeat)
# Check duration
if hasattr(duration, '__len__'):
if len(duration) == len(images):
duration = [d for d in duration]
else:
raise ValueError("len(duration) doesn't match amount of images.")
else:
duration = [duration for im in images]
# Check subrectangles
if subRectangles:
images, xy = gifWriter.handleSubRectangles(images, subRectangles)
defaultDispose = 1 # Leave image in place
else:
# Normal mode
xy = [(0, 0) for im in images]
defaultDispose = 2 # Restore to background color.
# Check dispose
if dispose is None:
dispose = defaultDispose
if hasattr(dispose, '__len__'):
if len(dispose) != len(images):
raise ValueError("len(xy) doesn't match amount of images.")
else:
dispose = [dispose for im in images]
# Make images in a format that we can write easy
images = gifWriter.convertImagesToPIL(images, dither, nq)
# Write
fp = open(filename, 'wb')
try:
gifWriter.writeGifToFile(fp, images, duration, loops, xy, dispose)
finally:
fp.close()
def readGif(filename, asNumpy=True):
""" readGif(filename, asNumpy=True)
Read images from an animated GIF file. Returns a list of numpy
arrays, or, if asNumpy is false, a list if PIL images.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to read animated gif files.")
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to read animated gif files.")
# Check whether it exists
if not os.path.isfile(filename):
raise IOError('File not found: ' + str(filename))
# Load file using PIL
pilIm = PIL.Image.open(filename)
pilIm.seek(0)
# Read all images inside
images = []
try:
while True:
# Get image as numpy array
tmp = pilIm.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape) == 0:
raise MemoryError("Too little memory to convert PIL image to array")
# Store, and next
images.append(a)
pilIm.seek(pilIm.tell() + 1)
except EOFError:
pass
# Convert to normal PIL images if needed
if not asNumpy:
images2 = images
images = []
for im in images2:
images.append(PIL.Image.fromarray(im))
# Done
return images
class NeuQuant:
""" NeuQuant(image, samplefac=10, colors=256)
samplefac should be an integer number of 1 or higher, 1
being the highest quality, but the slowest performance.
With avalue of 10, one tenth of all pixels are used during
training. This value seems a nice tradeof between speed
and quality.
colors is the amount of colors to reduce the image to. This
should best be a power of two.
See also:
http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
License of the NeuQuant Neural-Net Quantization Algorithm
---------------------------------------------------------
Copyright (c) 1994 Anthony Dekker
Ported to python by Marius van Voorden in 2010
NEUQUANT Neural-Net quantization algorithm by Anthony Dekker, 1994.
See "Kohonen neural networks for optimal colour quantization"
in "network: Computation in Neural Systems" Vol. 5 (1994) pp 351-367.
for a discussion of the algorithm.
See also http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
Any party obtaining a copy of these files from the author, directly or
indirectly, is granted, free of charge, a full and unrestricted
irrevocable, world-wide, paid up, royalty-free, nonexclusive right and
license to deal in this software and documentation files (the "Software"),
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons who receive copies from any such party to do so, with
the only requirement being that this copyright notice remain intact.
"""
NCYCLES = None # Number of learning cycles
NETSIZE = None # Number of colours used
SPECIALS = None # Number of reserved colours used
BGCOLOR = None # Reserved background colour
CUTNETSIZE = None
MAXNETPOS = None
INITRAD = None # For 256 colours, radius starts at 32
RADIUSBIASSHIFT = None
RADIUSBIAS = None
INITBIASRADIUS = None
RADIUSDEC = None # Factor of 1/30 each cycle
ALPHABIASSHIFT = None
INITALPHA = None # biased by 10 bits
GAMMA = None
BETA = None
BETAGAMMA = None
network = None # The network itself
colormap = None # The network itself
netindex = None # For network lookup - really 256
bias = None # Bias and freq arrays for learning
freq = None
pimage = None
# Four primes near 500 - assume no image has a length so large
# that it is divisible by all four primes
PRIME1 = 499
PRIME2 = 491
PRIME3 = 487
PRIME4 = 503
MAXPRIME = PRIME4
pixels = None
samplefac = None
a_s = None
def setconstants(self, samplefac, colors):
self.NCYCLES = 100 # Number of learning cycles
self.NETSIZE = colors # Number of colours used
self.SPECIALS = 3 # Number of reserved colours used
self.BGCOLOR = self.SPECIALS-1 # Reserved background colour
self.CUTNETSIZE = self.NETSIZE - self.SPECIALS
self.MAXNETPOS = self.NETSIZE - 1
self.INITRAD = self.NETSIZE/8 # For 256 colours, radius starts at 32
self.RADIUSBIASSHIFT = 6
self.RADIUSBIAS = 1 << self.RADIUSBIASSHIFT
self.INITBIASRADIUS = self.INITRAD * self.RADIUSBIAS
self.RADIUSDEC = 30 # Factor of 1/30 each cycle
self.ALPHABIASSHIFT = 10 # Alpha starts at 1
self.INITALPHA = 1 << self.ALPHABIASSHIFT # biased by 10 bits
self.GAMMA = 1024.0
self.BETA = 1.0/1024.0
self.BETAGAMMA = self.BETA * self.GAMMA
self.network = np.empty((self.NETSIZE, 3), dtype='float64') # The network itself
self.colormap = np.empty((self.NETSIZE, 4), dtype='int32') # The network itself
self.netindex = np.empty(256, dtype='int32') # For network lookup - really 256
self.bias = np.empty(self.NETSIZE, dtype='float64') # Bias and freq arrays for learning
self.freq = np.empty(self.NETSIZE, dtype='float64')
self.pixels = None
self.samplefac = samplefac
self.a_s = {}
def __init__(self, image, samplefac=10, colors=256):
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy for the NeuQuant algorithm.")
# Check image
if image.size[0] * image.size[1] < NeuQuant.MAXPRIME:
raise IOError("Image is too small")
if image.mode != "RGBA":
raise IOError("Image mode should be RGBA.")
# Initialize
self.setconstants(samplefac, colors)
self.pixels = np.fromstring(image.tostring(), np.uint32)
self.setUpArrays()
self.learn()
self.fix()
self.inxbuild()
def writeColourMap(self, rgb, outstream):
for i in range(self.NETSIZE):
bb = self.colormap[i, 0]
gg = self.colormap[i, 1]
rr = self.colormap[i, 2]
outstream.write(rr if rgb else bb)
outstream.write(gg)
outstream.write(bb if rgb else rr)
return self.NETSIZE
def setUpArrays(self):
self.network[0, 0] = 0.0 # Black
self.network[0, 1] = 0.0
self.network[0, 2] = 0.0
self.network[1, 0] = 255.0 # White
self.network[1, 1] = 255.0
self.network[1, 2] = 255.0
# RESERVED self.BGCOLOR # Background
for i in range(self.SPECIALS):
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
for i in range(self.SPECIALS, self.NETSIZE):
p = self.network[i]
p[:] = (255.0 * (i-self.SPECIALS)) / self.CUTNETSIZE
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
# Omitted: setPixels
def altersingle(self, alpha, i, b, g, r):
"""Move neuron i towards biased (b, g, r) by factor alpha"""
n = self.network[i] # Alter hit neuron
n[0] -= (alpha * (n[0] - b))
n[1] -= (alpha * (n[1] - g))
n[2] -= (alpha * (n[2] - r))
def geta(self, alpha, rad):
try:
return self.a_s[(alpha, rad)]
except KeyError:
length = rad * 2-1
mid = int(length//2)
q = np.array(list(range(mid-1, -1, -1)) + list(range(-1, mid)))
a = alpha * (rad * rad - q * q)/(rad * rad)
a[mid] = 0
self.a_s[(alpha, rad)] = a
return a
def alterneigh(self, alpha, rad, i, b, g, r):
if i-rad >= self.SPECIALS-1:
lo = i-rad
start = 0
else:
lo = self.SPECIALS-1
start = (self.SPECIALS-1 - (i-rad))
if i + rad <= self.NETSIZE:
hi = i + rad
end = rad * 2-1
else:
hi = self.NETSIZE
end = (self.NETSIZE - (i + rad))
a = self.geta(alpha, rad)[start:end]
p = self.network[lo + 1:hi]
p -= np.transpose(np.transpose(p - np.array([b, g, r])) * a)
#def contest(self, b, g, r):
# """ Search for biased BGR values
# Finds closest neuron (min dist) and updates self.freq
# finds best neuron (min dist-self.bias) and returns position
# for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
# self.bias[i] = self.GAMMA * ((1/self.NETSIZE)-self.freq[i])"""
#
# i, j = self.SPECIALS, self.NETSIZE
# dists = abs(self.network[i:j] - np.array([b, g, r])).sum(1)
# bestpos = i + np.argmin(dists)
# biasdists = dists - self.bias[i:j]
# bestbiaspos = i + np.argmin(biasdists)
# self.freq[i:j] -= self.BETA * self.freq[i:j]
# self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
# self.freq[bestpos] += self.BETA
# self.bias[bestpos] -= self.BETAGAMMA
# return bestbiaspos
def contest(self, b, g, r):
"""Search for biased BGR values
Finds closest neuron (min dist) and updates self.freq
finds best neuron (min dist-self.bias) and returns position
for frequently chosen neurons, self.freq[i] is high and self.bias[i]
is negative self.bias[i] = self.GAMMA * ((1/self.NETSIZE)-self.freq[i])
"""
i, j = self.SPECIALS, self.NETSIZE
dists = abs(self.network[i:j] - np.array([b, g, r])).sum(1)
bestpos = i + np.argmin(dists)
biasdists = dists - self.bias[i:j]
bestbiaspos = i + np.argmin(biasdists)
self.freq[i:j] *= (1-self.BETA)
self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
self.freq[bestpos] += self.BETA
self.bias[bestpos] -= self.BETAGAMMA
return bestbiaspos
def specialFind(self, b, g, r):
for i in range(self.SPECIALS):
n = self.network[i]
if n[0] == b and n[1] == g and n[2] == r:
return i
return -1
def learn(self):
biasRadius = self.INITBIASRADIUS
alphadec = 30 + ((self.samplefac-1)/3)
lengthcount = self.pixels.size
samplepixels = lengthcount / self.samplefac
delta = samplepixels / self.NCYCLES
alpha = self.INITALPHA
i = 0
rad = biasRadius * 2**self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
print("Beginning 1D learning: samplepixels = %1.2f rad = %i" %
(samplepixels, rad))
step = 0
pos = 0
if lengthcount % NeuQuant.PRIME1 != 0:
step = NeuQuant.PRIME1
elif lengthcount % NeuQuant.PRIME2 != 0:
step = NeuQuant.PRIME2
elif lengthcount % NeuQuant.PRIME3 != 0:
step = NeuQuant.PRIME3
else:
step = NeuQuant.PRIME4
i = 0
printed_string = ''
while i < samplepixels:
if i % 100 == 99:
tmp = '\b' * len(printed_string)
printed_string = str((i + 1) * 100/samplepixels) + "%\n"
print(tmp + printed_string)
p = self.pixels[pos]
r = (p >> 16) & 0xff
g = (p >> 8) & 0xff
b = (p) & 0xff
if i == 0: # Remember background colour
self.network[self.BGCOLOR] = [b, g, r]
j = self.specialFind(b, g, r)
if j < 0:
j = self.contest(b, g, r)
if j >= self.SPECIALS: # Don't learn for specials
a = (1.0 * alpha) / self.INITALPHA
self.altersingle(a, j, b, g, r)
if rad > 0:
self.alterneigh(a, rad, j, b, g, r)
pos = (pos + step) % lengthcount
i += 1
if i % delta == 0:
alpha -= alpha / alphadec
biasRadius -= biasRadius / self.RADIUSDEC
rad = biasRadius * 2**self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
finalAlpha = (1.0 * alpha)/self.INITALPHA
print("Finished 1D learning: final alpha = %1.2f!" % finalAlpha)
def fix(self):
for i in range(self.NETSIZE):
for j in range(3):
x = int(0.5 + self.network[i, j])
x = max(0, x)
x = min(255, x)
self.colormap[i, j] = x
self.colormap[i, 3] = i
def inxbuild(self):
previouscol = 0
startpos = 0
for i in range(self.NETSIZE):
p = self.colormap[i]
q = None
smallpos = i
smallval = p[1] # Index on g
# Find smallest in i..self.NETSIZE-1
for j in range(i + 1, self.NETSIZE):
q = self.colormap[j]
if q[1] < smallval: # Index on g
smallpos = j
smallval = q[1] # Index on g
q = self.colormap[smallpos]
# Swap p (i) and q (smallpos) entries
if i != smallpos:
p[:], q[:] = q, p.copy()
# smallval entry is now in position i
if smallval != previouscol:
self.netindex[previouscol] = (startpos + i) >> 1
for j in range(previouscol + 1, smallval):
self.netindex[j] = i
previouscol = smallval
startpos = i
self.netindex[previouscol] = (startpos + self.MAXNETPOS) >> 1
for j in range(previouscol + 1, 256): # Really 256
self.netindex[j] = self.MAXNETPOS
def paletteImage(self):
"""PIL weird interface for making a paletted image: create an image
which already has the palette, and use that in Image.quantize. This
function returns this palette image."""
if self.pimage is None:
palette = []
for i in range(self.NETSIZE):
palette.extend(self.colormap[i][:3])
palette.extend([0] * (256-self.NETSIZE) * 3)
# a palette image to use for quant
self.pimage = Image.new("P", (1, 1), 0)
self.pimage.putpalette(palette)
return self.pimage
def quantize(self, image):
""" Use a kdtree to quickly find the closest palette colors for the pixels """
if get_cKDTree():
return self.quantize_with_scipy(image)
else:
print('Scipy not available, falling back to slower version.')
return self.quantize_without_scipy(image)
def quantize_with_scipy(self, image):
w, h = image.size
px = np.asarray(image).copy()
px2 = px[:, :, :3].reshape((w * h, 3))
cKDTree = get_cKDTree()
kdtree = cKDTree(self.colormap[:, :3], leafsize=10)
result = kdtree.query(px2)
colorindex = result[1]
print("Distance: %1.2f" % (result[0].sum()/(w * h)))
px2[:] = self.colormap[colorindex, :3]
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def quantize_without_scipy(self, image):
"""" This function can be used if no scipy is availabe.
It's 7 times slower though.
"""
w, h = image.size
px = np.asarray(image).copy()
memo = {}
for j in range(w):
for i in range(h):
key = (px[i, j, 0], px[i, j, 1], px[i, j, 2])
try:
val = memo[key]
except KeyError:
val = self.convert(*key)
memo[key] = val
px[i, j, 0], px[i, j, 1], px[i, j, 2] = val
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def convert(self, *color):
i = self.inxsearch(*color)
return self.colormap[i, :3]
def inxsearch(self, r, g, b):
"""Search for BGR values 0..255 and return colour index"""
dists = (self.colormap[:, :3] - np.array([r, g, b]))
a = np.argmin((dists * dists).sum(1))
return a
if __name__ == '__main__':
im = np.zeros((200, 200), dtype=np.uint8)
im[10: 30, :] = 100
im[:, 80: 120] = 255
im[-50: -40, :] = 50
images = [im * 1.0, im * 0.8, im * 0.6, im * 0.4, im * 0]
writeGif('lala3.gif', images, duration=0.5, dither=0)
|
|
'''ARMA process and estimation with scipy.signal.lfilter
2009-09-06: copied from try_signal.py
reparameterized same as signal.lfilter (positive coefficients)
Notes
-----
* pretty fast
* checked with Monte Carlo and cross comparison with statsmodels yule_walker
for AR numbers are close but not identical to yule_walker
not compared to other statistics packages, no degrees of freedom correction
* ARMA(2,2) estimation (in Monte Carlo) requires longer time series to estimate parameters
without large variance. There might be different ARMA parameters
with similar impulse response function that cannot be well
distinguished with small samples (e.g. 100 observations)
* good for one time calculations for entire time series, not for recursive
prediction
* class structure not very clean yet
* many one-liners with scipy.signal, but takes time to figure out usage
* missing result statistics, e.g. t-values, but standard errors in examples
* no criteria for choice of number of lags
* no constant term in ARMA process
* no integration, differencing for ARIMA
* written without textbook, works but not sure about everything
briefly checked and it looks to be standard least squares, see below
* theoretical autocorrelation function of general ARMA
Done, relatively easy to guess solution, time consuming to get
theoretical test cases,
example file contains explicit formulas for acovf of MA(1), MA(2) and ARMA(1,1)
* two names for lag polynomials ar = rhoy, ma = rhoe ?
Properties:
Judge, ... (1985): The Theory and Practise of Econometrics
BigJudge p. 237ff:
If the time series process is a stationary ARMA(p,q), then
minimizing the sum of squares is asymptoticaly (as T-> inf)
equivalent to the exact Maximum Likelihood Estimator
Because Least Squares conditional on the initial information
does not use all information, in small samples exact MLE can
be better.
Without the normality assumption, the least squares estimator
is still consistent under suitable conditions, however not
efficient
Author: josefpktd
License: BSD
'''
import numpy as np
from scipy import signal, optimize, linalg
from statsmodels.base.model import LikelihoodModel
#this has been copied to new arma_mle.py - keep temporarily for easier lookup
class ARIMA(LikelihoodModel):
'''currently ARMA only, no differencing used - no I
parameterized as
rhoy(L) y_t = rhoe(L) eta_t
A instance of this class preserves state, so new class instances should
be created for different examples
'''
def __init__(self, endog, exog=None):
super(ARIMA, self).__init__(endog, exog)
if endog.ndim == 1:
endog = endog[:,None]
elif endog.ndim > 1 and endog.shape[1] != 1:
raise ValueError("Only the univariate case is implemented")
self.endog = endog # overwrite endog
if exog is not None:
raise ValueError("Exogenous variables are not yet supported.")
def fit(self, order=(0,0,0), method="ls", rhoy0=None, rhoe0=None):
'''
Estimate lag coefficients of an ARIMA process.
Parameters
----------
order : sequence
p,d,q where p is the number of AR lags, d is the number of
differences to induce stationarity, and q is the number of
MA lags to estimate.
method : str {"ls", "ssm"}
Method of estimation. LS is conditional least squares.
SSM is state-space model and the Kalman filter is used to
maximize the exact likelihood.
rhoy0, rhoe0 : array_like (optional)
starting values for estimation
Returns
-------
rh, cov_x, infodict, mesg, ier : output of scipy.optimize.leastsq
rh :
estimate of lag parameters, concatenated [rhoy, rhoe]
cov_x :
unscaled (!) covariance matrix of coefficient estimates
'''
if not hasattr(order, '__iter__'):
raise ValueError("order must be an iterable sequence. Got type \
%s instead" % type(order))
p,d,q = order
if d > 0:
raise ValueError("Differencing not implemented yet")
# assume no constant, ie mu = 0
# unless overwritten then use w_bar for mu
Y = np.diff(endog, d, axis=0) #TODO: handle lags?
x = self.endog.squeeze() # remove the squeeze might be needed later
def errfn( rho):
#rhoy, rhoe = rho
rhoy = np.concatenate(([1], rho[:p]))
rhoe = np.concatenate(([1], rho[p:]))
etahatr = signal.lfilter(rhoy, rhoe, x)
#print rho,np.sum(etahatr*etahatr)
return etahatr
if rhoy0 is None:
rhoy0 = 0.5 * np.ones(p)
if rhoe0 is None:
rhoe0 = 0.5 * np.ones(q)
method = method.lower()
if method == "ls":
rh, cov_x, infodict, mesg, ier = \
optimize.leastsq(errfn, np.r_[rhoy0, rhoe0],ftol=1e-10,full_output=True)
#TODO: integrate this into the MLE.fit framework?
elif method == "ssm":
pass
else:
# fmin_bfgs is slow or doesn't work yet
errfnsum = lambda rho : np.sum(errfn(rho)**2)
#xopt, {fopt, gopt, Hopt, func_calls, grad_calls
rh,fopt, gopt, cov_x, _,_, ier = \
optimize.fmin_bfgs(errfnsum, np.r_[rhoy0, rhoe0], maxiter=2, full_output=True)
infodict, mesg = None, None
self.rh = rh
self.rhoy = np.concatenate(([1], rh[:p]))
self.rhoe = np.concatenate(([1], rh[p:])) #rh[-q:])) doesnt work for q=0
self.error_estimate = errfn(rh)
return rh, cov_x, infodict, mesg, ier
def errfn(self, rho=None, p=None, x=None):
''' duplicate -> remove one
'''
#rhoy, rhoe = rho
if not rho is None:
rhoy = np.concatenate(([1], rho[:p]))
rhoe = np.concatenate(([1], rho[p:]))
else:
rhoy = self.rhoy
rhoe = self.rhoe
etahatr = signal.lfilter(rhoy, rhoe, x)
#print rho,np.sum(etahatr*etahatr)
return etahatr
def predicted(self, rhoy=None, rhoe=None):
'''past predicted values of time series
just added, not checked yet
'''
if rhoy is None:
rhoy = self.rhoy
if rhoe is None:
rhoe = self.rhoe
return self.x + self.error_estimate
def forecast(self, ar=None, ma=None, nperiod=10):
eta = np.r_[self.error_estimate, np.zeros(nperiod)]
if ar is None:
ar = self.rhoy
if ma is None:
ma = self.rhoe
return signal.lfilter(ma, ar, eta)
#TODO: is this needed as a method at all?
@classmethod
def generate_sample(cls, ar, ma, nsample, std=1):
eta = std * np.random.randn(nsample)
return signal.lfilter(ma, ar, eta)
def arma_generate_sample(ar, ma, nsample, sigma=1, distrvs=np.random.randn, burnin=0):
'''generate an random sample of an ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nsample : int
length of simulated time series
sigma : float
standard deviation of noise
distrvs : function, random number generator
function that generates the random numbers, and takes sample size
as argument
default: np.random.randn
TODO: change to size argument
burnin : integer (default: 0)
to reduce the effect of initial conditions, burnin observations at the
beginning of the sample are dropped
Returns
-------
acovf : array
autocovariance of ARMA process given by ar, ma
'''
#TODO: unify with ArmaProcess method
eta = sigma * distrvs(nsample+burnin)
return signal.lfilter(ma, ar, eta)[burnin:]
def arma_acovf(ar, ma, nobs=10):
'''theoretical autocovariance function of ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
Returns
-------
acovf : array
autocovariance of ARMA process given by ar, ma
See Also
--------
arma_acf
acovf
Notes
-----
Tries to do some crude numerical speed improvements for cases
with high persistance. However, this algorithm is slow if the process is
highly persistent and only a few autocovariances are desired.
'''
#increase length of impulse response for AR closer to 1
#maybe cheap/fast enough to always keep nobs for ir large
if np.abs(np.sum(ar)-1) > 0.9:
nobs_ir = max(1000, 2* nobs) #no idea right now how large it is needed
else:
nobs_ir = max(100, 2* nobs) #no idea right now
ir = arma_impulse_response(ar, ma, nobs=nobs_ir)
#better save than sorry (?), I have no idea about the required precision
#only checked for AR(1)
while ir[-1] > 5*1e-5:
nobs_ir *= 10
ir = arma_impulse_response(ar, ma, nobs=nobs_ir)
#again no idea where the speed break points are:
if nobs_ir > 50000 and nobs < 1001:
acovf = np.array([np.dot(ir[:nobs-t], ir[t:nobs]) for t in range(nobs)])
else:
acovf = np.correlate(ir,ir,'full')[len(ir)-1:]
return acovf[:nobs]
def arma_acf(ar, ma, nobs=10):
'''theoretical autocovariance function of ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
Returns
-------
acovf : array
autocovariance of ARMA process given by ar, ma
See Also
--------
arma_acovf
acf
acovf
'''
acovf = arma_acovf(ar, ma, nobs)
return acovf/acovf[0]
def arma_pacf(ar, ma, nobs=10):
'''partial autocorrelation function of an ARMA process
Notes
-----
solves yule-walker equation for each lag order up to nobs lags
not tested/checked yet
'''
apacf = np.zeros(nobs)
acov = arma_acf(ar,ma, nobs=nobs+1)
apacf[0] = 1.
for k in range(2,nobs+1):
r = acov[:k];
apacf[k-1] = linalg.solve(linalg.toeplitz(r[:-1]), r[1:])[-1]
return apacf
def arma_periodogram(ar, ma, worN=None, whole=0):
'''periodogram for ARMA process given by lag-polynomials ar and ma
Parameters
----------
ar : array_like
autoregressive lag-polynomial with leading 1 and lhs sign
ma : array_like
moving average lag-polynomial with leading 1
worN : {None, int}, optional
option for scipy.signal.freqz (read "w or N")
If None, then compute at 512 frequencies around the unit circle.
If a single integer, the compute at that many frequencies.
Otherwise, compute the response at frequencies given in worN
whole : {0,1}, optional
options for scipy.signal.freqz
Normally, frequencies are computed from 0 to pi (upper-half of
unit-circle. If whole is non-zero compute frequencies from 0 to 2*pi.
Returns
-------
w : array
frequencies
sd : array
periodogram, spectral density
Notes
-----
Normalization ?
This uses signal.freqz, which does not use fft. There is a fft version
somewhere.
'''
w, h = signal.freqz(ma, ar, worN=worN, whole=whole)
sd = np.abs(h)**2/np.sqrt(2*np.pi)
if np.sum(np.isnan(h)) > 0:
# this happens with unit root or seasonal unit root'
print 'Warning: nan in frequency response h, maybe a unit root'
return w, sd
def arma_impulse_response(ar, ma, nobs=100):
'''get the impulse response function (MA representation) for ARMA process
Parameters
----------
ma : array_like, 1d
moving average lag polynomial
ar : array_like, 1d
auto regressive lag polynomial
nobs : int
number of observations to calculate
Returns
-------
ir : array, 1d
impulse response function with nobs elements
Notes
-----
This is the same as finding the MA representation of an ARMA(p,q).
By reversing the role of ar and ma in the function arguments, the
returned result is the AR representation of an ARMA(p,q), i.e
ma_representation = arma_impulse_response(ar, ma, nobs=100)
ar_representation = arma_impulse_response(ma, ar, nobs=100)
fully tested against matlab
Examples
--------
AR(1)
>>> arma_impulse_response([1.0, -0.8], [1.], nobs=10)
array([ 1. , 0.8 , 0.64 , 0.512 , 0.4096 ,
0.32768 , 0.262144 , 0.2097152 , 0.16777216, 0.13421773])
this is the same as
>>> 0.8**np.arange(10)
array([ 1. , 0.8 , 0.64 , 0.512 , 0.4096 ,
0.32768 , 0.262144 , 0.2097152 , 0.16777216, 0.13421773])
MA(2)
>>> arma_impulse_response([1.0], [1., 0.5, 0.2], nobs=10)
array([ 1. , 0.5, 0.2, 0. , 0. , 0. , 0. , 0. , 0. , 0. ])
ARMA(1,2)
>>> arma_impulse_response([1.0, -0.8], [1., 0.5, 0.2], nobs=10)
array([ 1. , 1.3 , 1.24 , 0.992 , 0.7936 ,
0.63488 , 0.507904 , 0.4063232 , 0.32505856, 0.26004685])
'''
impulse = np.zeros(nobs)
impulse[0] = 1.
return signal.lfilter(ma, ar, impulse)
#alias, easier to remember
arma2ma = arma_impulse_response
#alias, easier to remember
def arma2ar(ar, ma, nobs=100):
'''get the AR representation of an ARMA process
Parameters
----------
ar : array_like, 1d
auto regressive lag polynomial
ma : array_like, 1d
moving average lag polynomial
nobs : int
number of observations to calculate
Returns
-------
ar : array, 1d
coefficients of AR lag polynomial with nobs elements
`
Notes
-----
This is just an alias for
``ar_representation = arma_impulse_response(ma, ar, nobs=100)``
fully tested against matlab
Examples
--------
'''
return arma_impulse_response(ma, ar, nobs=nobs)
#moved from sandbox.tsa.try_fi
def ar2arma(ar_des, p, q, n=20, mse='ar', start=None):
'''find arma approximation to ar process
This finds the ARMA(p,q) coefficients that minimize the integrated
squared difference between the impulse_response functions
(MA representation) of the AR and the ARMA process. This does
currently not check whether the MA lagpolynomial of the ARMA
process is invertible, neither does it check the roots of the AR
lagpolynomial.
Parameters
----------
ar_des : array_like
coefficients of original AR lag polynomial, including lag zero
p, q : int
length of desired ARMA lag polynomials
n : int
number of terms of the impuls_response function to include in the
objective function for the approximation
mse : string, 'ar'
not used yet,
Returns
-------
ar_app, ma_app : arrays
coefficients of the AR and MA lag polynomials of the approximation
res : tuple
result of optimize.leastsq
Notes
-----
Extension is possible if we want to match autocovariance instead
of impulse response function.
TODO: convert MA lag polynomial, ma_app, to be invertible, by mirroring
roots outside the unit intervall to ones that are inside. How do we do
this?
'''
#p,q = pq
def msear_err(arma, ar_des):
ar, ma = np.r_[1, arma[:p-1]], np.r_[1, arma[p-1:]]
ar_approx = arma_impulse_response(ma, ar, n)
## print ar,ma
## print ar_des.shape, ar_approx.shape
## print ar_des
## print ar_approx
return (ar_des - ar_approx) #((ar - ar_approx)**2).sum()
if start is None:
arma0 = np.r_[-0.9* np.ones(p-1), np.zeros(q-1)]
else:
arma0 = start
res = optimize.leastsq(msear_err, arma0, ar_des, maxfev=5000)#, full_output=True)
#print res
arma_app = np.atleast_1d(res[0])
ar_app = np.r_[1, arma_app[:p-1]],
ma_app = np.r_[1, arma_app[p-1:]]
return ar_app, ma_app, res
def lpol2index(ar):
'''remove zeros from lagpolynomial, squeezed representation with index
Parameters
----------
ar : array_like
coefficients of lag polynomial
Returns
-------
coeffs : array
non-zero coefficients of lag polynomial
index : array
index (lags) of lagpolynomial with non-zero elements
'''
ar = np.asarray(ar)
index = np.nonzero(ar)[0]
coeffs = ar[index]
return coeffs, index
def index2lpol(coeffs, index):
'''expand coefficients to lag poly
Parameters
----------
coeffs : array
non-zero coefficients of lag polynomial
index : array
index (lags) of lagpolynomial with non-zero elements
ar : array_like
coefficients of lag polynomial
Returns
-------
ar : array_like
coefficients of lag polynomial
'''
n = max(index)
ar = np.zeros(n)
ar[index] = coeffs
return ar
#moved from sandbox.tsa.try_fi
def lpol_fima(d, n=20):
'''MA representation of fractional integration
.. math:: (1-L)^{-d} for |d|<0.5 or |d|<1 (?)
Parameters
----------
d : float
fractional power
n : int
number of terms to calculate, including lag zero
Returns
-------
ma : array
coefficients of lag polynomial
'''
#hide import inside function until we use this heavily
from scipy.special import gamma, gammaln
j = np.arange(n)
return np.exp(gammaln(d+j) - gammaln(j+1) - gammaln(d))
#moved from sandbox.tsa.try_fi
def lpol_fiar(d, n=20):
'''AR representation of fractional integration
.. math:: (1-L)^{d} for |d|<0.5 or |d|<1 (?)
Parameters
----------
d : float
fractional power
n : int
number of terms to calculate, including lag zero
Returns
-------
ar : array
coefficients of lag polynomial
Notes:
first coefficient is 1, negative signs except for first term,
ar(L)*x_t
'''
#hide import inside function until we use this heavily
from scipy.special import gamma, gammaln
j = np.arange(n)
ar = - np.exp(gammaln(-d+j) - gammaln(j+1) - gammaln(-d))
ar[0] = 1
return ar
#moved from sandbox.tsa.try_fi
def lpol_sdiff(s):
'''return coefficients for seasonal difference (1-L^s)
just a trivial convenience function
Parameters
----------
s : int
number of periods in season
Returns
-------
sdiff : list, length s+1
'''
return [1] + [0]*(s-1) + [-1]
def deconvolve(num, den, n=None):
"""Deconvolves divisor out of signal, division of polynomials for n terms
calculates den^{-1} * num
Parameters
----------
num : array_like
signal or lag polynomial
denom : array_like
coefficients of lag polynomial (linear filter)
n : None or int
number of terms of quotient
Returns
-------
quot : array
quotient or filtered series
rem : array
remainder
Notes
-----
If num is a time series, then this applies the linear filter den^{-1}.
If both num and den are both lagpolynomials, then this calculates the
quotient polynomial for n terms and also returns the remainder.
This is copied from scipy.signal.signaltools and added n as optional
parameter.
"""
num = np.atleast_1d(num)
den = np.atleast_1d(den)
N = len(num)
D = len(den)
if D > N and n is None:
quot = [];
rem = num;
else:
if n is None:
n = N-D+1
input = np.zeros(n, float)
input[0] = 1
quot = signal.lfilter(num, den, input)
num_approx = signal.convolve(den, quot, mode='full')
if len(num) < len(num_approx): # 1d only ?
num = np.concatenate((num, np.zeros(len(num_approx)-len(num))))
rem = num - num_approx
return quot, rem
class ArmaProcess(object):
'''represents an ARMA process for given lag-polynomials
This is a class to bring together properties of the process.
It does not do any estimation or statistical analysis.
maybe needs special handling for unit roots
'''
def __init__(self, ar, ma, nobs=None):
self.ar = np.asarray(ar)
self.ma = np.asarray(ma)
self.arcoefs = -self.ar[1:]
self.macoefs = self.ma[1:]
self.arpoly = np.polynomial.Polynomial(self.ar)
self.mapoly = np.polynomial.Polynomial(self.ma)
self.nobs = nobs
@classmethod
def from_coeffs(cls, arcoefs, macoefs, nobs=None):
'''create ArmaProcess instance from coefficients of the lag-polynomials
'''
return cls(np.r_[1, -arcoefs], np.r_[1, macoefs], nobs=nobs)
@classmethod
def from_estimation(cls, model_results, nobs=None):
'''create ArmaProcess instance from estimation results
'''
arcoefs = model_results.params[:model_results.nar]
macoefs = model_results.params[model_results.nar:
model_results.nar+model_results.nma]
return cls(np.r_[1, -arcoefs], np.r_[1, macoefs], nobs=nobs)
def __mul__(self, oth):
if isinstance(oth, self.__class__):
ar = (self.arpoly * oth.arpoly).coef
ma = (self.mapoly * oth.mapoly).coef
else:
try:
aroth, maoth = oth
arpolyoth = np.polynomial.Polynomial(aroth)
mapolyoth = np.polynomial.Polynomial(maoth)
ar = (self.arpoly * arpolyoth).coef
ma = (self.mapoly * mapolyoth).coef
except:
print('other is not a valid type')
raise
return self.__class__(ar, ma, nobs=self.nobs)
def __repr__(self):
return 'ArmaProcess(%r, %r, nobs=%d)' % (self.ar.tolist(), self.ma.tolist(),
self.nobs)
def __str__(self):
return 'ArmaProcess\nAR: %r\nMA: %r' % (self.ar.tolist(), self.ma.tolist())
def acovf(self, nobs=None):
nobs = nobs or self.nobs
return arma_acovf(self.ar, self.ma, nobs=nobs)
acovf.__doc__ = arma_acovf.__doc__
def acf(self, nobs=None):
nobs = nobs or self.nobs
return arma_acf(self.ar, self.ma, nobs=nobs)
acf.__doc__ = arma_acf.__doc__
def pacf(self, nobs=None):
nobs = nobs or self.nobs
return arma_pacf(self.ar, self.ma, nobs=nobs)
pacf.__doc__ = arma_pacf.__doc__
def periodogram(self, nobs=None):
nobs = nobs or self.nobs
return arma_periodogram(self.ar, self.ma, worN=nobs)
periodogram.__doc__ = arma_periodogram.__doc__
def impulse_response(self, nobs=None):
nobs = nobs or self.nobs
return arma_impulse_response(self.ar, self.ma, worN=nobs)
impulse_response.__doc__ = arma_impulse_response.__doc__
def arma2ma(self, nobs=None):
nobs = nobs or self.nobs
return arma2ma(self.ar, self.ma, nobs=nobs)
arma2ma.__doc__ = arma2ma.__doc__
def arma2ar(self, nobs=None):
nobs = nobs or self.nobs
return arma2ar(self.ar, self.ma, nobs=nobs)
arma2ar.__doc__ = arma2ar.__doc__
def ar_roots(self):
'''roots of autoregressive lag-polynomial
'''
return self.arpoly.roots()
def ma_roots(self):
'''roots of moving average lag-polynomial
'''
return self.mapoly.roots()
def isstationary(self):
'''Arma process is stationary if AR roots are outside unit circle
Returns
-------
isstationary : boolean
True if autoregressive roots are outside unit circle
'''
if np.all(np.abs(self.ar_roots()) > 1):
return True
else:
return False
def isinvertible(self):
'''Arma process is invertible if MA roots are outside unit circle
Returns
-------
isinvertible : boolean
True if moving average roots are outside unit circle
'''
if np.all(np.abs(self.ma_roots()) > 1):
return True
else:
return False
def invertroots(self, retnew=False):
'''make MA polynomial invertible by inverting roots inside unit circle
Parameters
----------
retnew : boolean
If False (default), then return the lag-polynomial as array.
If True, then return a new instance with invertible MA-polynomial
Returns
-------
manew : array
new invertible MA lag-polynomial, returned if retnew is false.
wasinvertible : boolean
True if the MA lag-polynomial was already invertible, returned if
retnew is false.
armaprocess : new instance of class
If retnew is true, then return a new instance with invertible
MA-polynomial
'''
pr = self.ma_roots()
insideroots = np.abs(pr)<1
if insideroots.any():
pr[np.abs(pr)<1] = 1./pr[np.abs(pr)<1]
pnew = poly.Polynomial.fromroots(pr)
mainv = pn.coef/pnew.coef[0]
wasinvertible = False
else:
mainv = self.ma
wasinvertible = True
if retnew:
return self.__class__(self.ar, mainv, nobs=self.nobs)
else:
return mainv, wasinvertible
def generate_sample(self, size=100, scale=1, distrvs=None, axis=0, burnin=0):
'''generate ARMA samples
Parameters
----------
size : int or tuple of ints
If size is an integer, then this creates a 1d timeseries of length size.
If size is a tuple, then the timeseries is along axis. All other axis
have independent arma samples.
Returns
-------
rvs : ndarray
random sample(s) of arma process
Notes
-----
Should work for n-dimensional with time series along axis, but not tested
yet. Processes are sampled independently.
'''
if distrvs is None:
distrvs = np.random.normal
if np.ndim(size) == 0:
size = [size]
if burnin:
#handle burin time for nd arrays
#maybe there is a better trick in scipy.fft code
newsize = list(size)
newsize[axis] += burnin
newsize = tuple(newsize)
fslice = [slice(None)]*len(newsize)
fslice[axis] = slice(burnin, None, None)
fslice = tuple(fslice)
else:
newsize = tuple(size)
fslice = tuple([slice(None)]*np.ndim(newsize))
eta = scale * distrvs(size=newsize)
return signal.lfilter(self.ma, self.ar, eta, axis=axis)[fslice]
__all__ = ['arma_acf', 'arma_acovf', 'arma_generate_sample',
'arma_impulse_response', 'arma2ar', 'arma2ma', 'deconvolve',
'lpol2index', 'index2lpol']
if __name__ == '__main__':
# Simulate AR(1)
#--------------
# ar * y = ma * eta
ar = [1, -0.8]
ma = [1.0]
# generate AR data
eta = 0.1 * np.random.randn(1000)
yar1 = signal.lfilter(ar, ma, eta)
print "\nExample 0"
arest = ARIMA(yar1)
rhohat, cov_x, infodict, mesg, ier = arest.fit((1,0,1))
print rhohat
print cov_x
print "\nExample 1"
ar = [1.0, -0.8]
ma = [1.0, 0.5]
y1 = arest.generate_sample(ar,ma,1000,0.1)
arest = ARIMA(y1)
rhohat1, cov_x1, infodict, mesg, ier = arest.fit((1,0,1))
print rhohat1
print cov_x1
err1 = arest.errfn(x=y1)
print np.var(err1)
import statsmodels.api as sm
print sm.regression.yule_walker(y1, order=2, inv=True)
print "\nExample 2"
nsample = 1000
ar = [1.0, -0.6, -0.1]
ma = [1.0, 0.3, 0.2]
y2 = ARIMA.generate_sample(ar,ma,nsample,0.1)
arest2 = ARIMA(y2)
rhohat2, cov_x2, infodict, mesg, ier = arest2.fit((1,0,2))
print rhohat2
print cov_x2
err2 = arest.errfn(x=y2)
print np.var(err2)
print arest2.rhoy
print arest2.rhoe
print "true"
print ar
print ma
rhohat2a, cov_x2a, infodict, mesg, ier = arest2.fit((2,0,2))
print rhohat2a
print cov_x2a
err2a = arest.errfn(x=y2)
print np.var(err2a)
print arest2.rhoy
print arest2.rhoe
print "true"
print ar
print ma
print sm.regression.yule_walker(y2, order=2, inv=True)
print "\nExample 20"
nsample = 1000
ar = [1.0]#, -0.8, -0.4]
ma = [1.0, 0.5, 0.2]
y3 = ARIMA.generate_sample(ar,ma,nsample,0.01)
arest20 = ARIMA(y3)
rhohat3, cov_x3, infodict, mesg, ier = arest20.fit((2,0,0))
print rhohat3
print cov_x3
err3 = arest20.errfn(x=y3)
print np.var(err3)
print np.sqrt(np.dot(err3,err3)/nsample)
print arest20.rhoy
print arest20.rhoe
print "true"
print ar
print ma
rhohat3a, cov_x3a, infodict, mesg, ier = arest20.fit((0,0,2))
print rhohat3a
print cov_x3a
err3a = arest20.errfn(x=y3)
print np.var(err3a)
print np.sqrt(np.dot(err3a,err3a)/nsample)
print arest20.rhoy
print arest20.rhoe
print "true"
print ar
print ma
print sm.regression.yule_walker(y3, order=2, inv=True)
print "\nExample 02"
nsample = 1000
ar = [1.0, -0.8, 0.4] #-0.8, -0.4]
ma = [1.0]#, 0.8, 0.4]
y4 = ARIMA.generate_sample(ar,ma,nsample)
arest02 = ARIMA(y4)
rhohat4, cov_x4, infodict, mesg, ier = arest02.fit((2,0,0))
print rhohat4
print cov_x4
err4 = arest02.errfn(x=y4)
print np.var(err4)
sige = np.sqrt(np.dot(err4,err4)/nsample)
print sige
print sige * np.sqrt(np.diag(cov_x4))
print np.sqrt(np.diag(cov_x4))
print arest02.rhoy
print arest02.rhoe
print "true"
print ar
print ma
rhohat4a, cov_x4a, infodict, mesg, ier = arest02.fit((0,0,2))
print rhohat4a
print cov_x4a
err4a = arest02.errfn(x=y4)
print np.var(err4a)
sige = np.sqrt(np.dot(err4a,err4a)/nsample)
print sige
print sige * np.sqrt(np.diag(cov_x4a))
print np.sqrt(np.diag(cov_x4a))
print arest02.rhoy
print arest02.rhoe
print "true"
print ar
print ma
import statsmodels.api as sm
print sm.regression.yule_walker(y4, order=2, method='mle', inv=True)
import matplotlib.pyplot as plt
plt.plot(arest2.forecast()[-100:])
#plt.show()
ar1, ar2 = ([1, -0.4], [1, 0.5])
ar2 = [1, -1]
lagpolyproduct = np.convolve(ar1, ar2)
print deconvolve(lagpolyproduct, ar2, n=None)
print signal.deconvolve(lagpolyproduct, ar2)
print deconvolve(lagpolyproduct, ar2, n=10)
|
|
"""
:Author: Jonathan Karr <[email protected]>
:Date: 2017-05-08
:Copyright: 2017, Karr Lab
:License: MIT
"""
import abc
import datanator.config
import os
import requests
import requests_cache
import shutil
import sqlalchemy
import sqlalchemy.orm
from sqlalchemy_utils.functions import database_exists, create_database
from datanator.util.constants import DATA_CACHE_DIR, DATA_DUMP_PATH
import sys
import tarfile
import subprocess
import tempfile
import time
import wc_utils.quilt
class DataSource(object, metaclass=abc.ABCMeta):
""" Represents an external data source
Attributes:
name (:obj:`str`): name
"""
def __init__(self, name=None, verbose=False):
"""
Args:
name (:obj:`str`, optional): name
"""
if not name:
name = self.__class__.__name__
self.name = name
self.verbose = verbose
def vprint(self, str):
if self.verbose:
print(str)
class PostgresDataSource(DataSource):
""" Represents a Postgres database
Attributes:
name (:obj:`str`): name
max_entries (:obj:`float`): maximum number of entries to save locally
quilt_owner (:obj:`str`): owner of Quilt package to save data
quilt_package (:obj:`str`): identifier of Quilt package to save data
cache_dirname (:obj:`str`): directory to store the local copy of the data source
verbose (:obj:`bool`): if :obj:`True`, print status information to the standard output
engine (:obj:`sqlalchemy.engine.Engine`): SQLAlchemy engine
session (:obj:`sqlalchemy.orm.session.Session`): SQLAlchemy session
base_model (:obj:`Base`): base ORM model for the databse
"""
def __init__(self, name=None,
clear_content=False,
load_content=False, max_entries=float('inf'),
restore_backup_data=False, restore_backup_schema=False, restore_backup_exit_on_error=True,
quilt_owner=None, quilt_package=None, cache_dirname=None,
verbose=False):
"""
Args:
name (:obj:`str`, optional): name
clear_content (:obj:`bool`, optional): if :obj:`True`, clear the content of the sqlite local copy of the data source
load_content (:obj:`bool`, optional): if :obj:`True`, load the content of the local sqlite database from the external source
max_entries (:obj:`float`, optional): maximum number of entries to save locally
restore_backup_data (:obj:`bool`, optional): if :obj:`True`, download and restore data from dump in Quilt package
restore_backup_schema (:obj:`bool`, optional): if :obj:`True`, download and restore schema from dump in Quilt package
restore_backup_exit_on_error (:obj:`bool`, optional): if :obj:`True`, exit on errors in restoring backups
quilt_owner (:obj:`str`, optional): owner of Quilt package to save data
quilt_package (:obj:`str`, optional): identifier of Quilt package to save data
cache_dirname (:obj:`str`, optional): directory to store the local copy of the data source
verbose (:obj:`bool`, optional): if :obj:`True`, print status information to the standard output
"""
super(PostgresDataSource, self).__init__(name=name, verbose=verbose)
self.base_model.configure_mappers()
# max entries for loading content
self.max_entries = max_entries
# set Quilt configuration
quilt_config = datanator.config.get_config()['datanator']['quilt']
self.quilt_owner = quilt_owner or quilt_config['owner']
self.quilt_package = quilt_package or quilt_config['package']
# local directory for dump in Quilt package
if not cache_dirname:
cache_dirname = DATA_CACHE_DIR
self.cache_dirname = cache_dirname
# setup database and restore or load content
self.engine = self.get_engine()
if clear_content:
self.clear_content()
if restore_backup_data or restore_backup_schema:
self.restore_backup(restore_data=restore_backup_data,
restore_schema=restore_backup_schema,
exit_on_error=restore_backup_exit_on_error)
self.session = self.get_session()
if load_content:
self.load_content()
def get_engine(self):
""" Get an engine for the Postgres database. If the database doesn't exist, initialize its structure.
Returns:
:obj:`sqlalchemy.engine.Engine`: database engine
"""
engine = self.base_model.engine
if not database_exists(engine.url):
create_database(engine.url)
inspector = sqlalchemy.inspect(engine)
if not inspector.get_table_names():
self.base_model.metadata.create_all(engine)
return engine
def clear_content(self):
""" Clear the content of the database (i.e. drop and recreate all tables). """
self.base_model.drop_all()
self.base_model.create_all()
def get_session(self):
""" Get a session for the database
Returns:
:obj:`sqlalchemy.orm.session.Session`: database session
"""
return self.base_model.session
def upload_backup(self):
""" Dump and backup the database to Quilt """
# dump database
self.dump_database()
# create temporary directory to checkout package
tmp_dirname = tempfile.mkdtemp()
# install and export package
manager = wc_utils.quilt.QuiltManager(tmp_dirname, self.quilt_package, owner=self.quilt_owner)
manager.download(sym_links=True)
# copy new files to package
path = self._get_dump_path()
if os.path.exists(os.path.join(tmp_dirname, path)):
os.remove(os.path.join(tmp_dirname, path))
os.symlink(os.path.join(self.cache_dirname, path), os.path.join(tmp_dirname, path))
# build and push package
manager.upload()
# cleanup temporary directory
os.remove(os.path.join(tmp_dirname, path))
shutil.rmtree(tmp_dirname)
def restore_backup(self, restore_data=True, restore_schema=False, exit_on_error=True):
""" Download and restore the database from Quilt
Args:
restore_data (:obj:`bool`, optional): If :obj:`True`, restore data
restore_schema (:obj:`bool`, optional): If :obj:`True`, clear and restore schema
exit_on_error (:obj:`bool`, optional): If :obj:`True`, exit on errors
"""
# create temporary directory to checkout package
tmp_dirname = tempfile.mkdtemp()
# install and export dumped database from package
manager = wc_utils.quilt.QuiltManager(tmp_dirname, self.quilt_package, owner=self.quilt_owner)
path = self._get_dump_path()
manager.download(system_path=path, sym_links=True)
if not os.path.isdir(self.cache_dirname):
os.makedirs(self.cache_dirname)
if os.path.isfile(os.path.join(self.cache_dirname, path)):
os.remove(os.path.join(self.cache_dirname, path))
elif os.path.isdir(os.path.join(self.cache_dirname, path)):
shutil.rmtree(os.path.join(self.cache_dirname, path))
os.rename(os.path.join(tmp_dirname, path),
os.path.join(self.cache_dirname, path))
# cleanup temporary directory
shutil.rmtree(tmp_dirname)
# restore database
self.restore_database(restore_data=restore_data,
restore_schema=restore_schema,
exit_on_error=exit_on_error)
def dump_database(self):
""" Create a dump file of the Postgres database """
path = os.path.join(self.cache_dirname, self._get_dump_path())
if os.path.isfile(path):
os.remove(path)
cmd = [
'pg_dump',
'--dbname=' + str(self.base_model.engine.url),
'--no-owner',
'--no-privileges',
'--format=c',
'--file=' + path,
]
p = subprocess.Popen(cmd, stderr=subprocess.PIPE)
err = p.communicate()[1].decode()
if p.returncode != 0:
raise Exception(err)
if err:
print(err, file=sys.stderr)
def restore_database(self, restore_data=True, restore_schema=False, exit_on_error=True):
""" Restore a dump file of the Postgres database
Args:
restore_data (:obj:`bool`, optional): If :obj:`True`, restore data
restore_schema (:obj:`bool`, optional): If :obj:`True`, clear and restore schema
exit_on_error (:obj:`bool`, optional): If :obj:`True`, exit on errors
"""
cmd = [
'pg_restore',
'--dbname=' + str(self.base_model.engine.url),
'--no-owner', '--no-privileges',
os.path.join(self.cache_dirname, self._get_dump_path()),
]
if not restore_data:
cmd.append('--schema-only')
if restore_schema:
cmd.append('--clean')
else:
cmd.append('--data-only')
if exit_on_error:
cmd.append('--exit-on-error')
p = subprocess.Popen(cmd, stderr=subprocess.PIPE)
err = p.communicate()[1].decode()
# Return code is not checked because `pg_restore` exits with non-zero
# codes even without the `--exit-on-error` option. E.g. `pg_restore`
# exits with 1 when there are warnings for errors. See:
# https://stackoverflow.com/questions/32147653/how-do-i-reliably-determine-whether-pg-restore-succeeded-when-success-sometimes
#
# todo: try to uncomment below after implementing first migration and
# creating new database dump
#
# if p.returncode != 0:
# raise Exception(err)
if err:
print(err, file=sys.stderr)
def _get_dump_path(self):
""" Get the path where the dump of the database should be saved to or restored from
Returns:
:obj:`str`: path to the dump of the database
"""
return self.name + '.sql'
@abc.abstractmethod
def load_content(self):
""" Load the content of the local copy of the data source """
pass
def get_or_create_object(self, cls, **kwargs):
""" Get the SQLAlchemy object of type :obj:`cls` with attribute/value pairs specified by `**kwargs`. If
an object with these attribute/value pairs does not exist, create an object with these attribute/value pairs
and add it to the SQLAlchemy session.
Args:
cls (:obj:`class`): child class of :obj:`base_model`
**kwargs (:obj:`dict`, optional): attribute-value pairs of desired SQLAlchemy object of type :obj:`cls`
Returns:
:obj:`base_model`: SQLAlchemy object of type :obj:`cls`
"""
q = self.session.query(cls).filter_by(**kwargs)
self.session.flush()
if q.count():
return q.first()
else:
obj = cls(**kwargs)
self.session.add(obj)
return obj
class CachedDataSource(DataSource):
""" Represents an external data source that is cached locally in a sqlite database
Attributes:
filename (:obj:`str`): path to sqlite copy of the data source
cache_dirname (:obj:`str`): directory to store the local copy of the data source
engine (:obj:`sqlalchemy.engine.Engine`): SQLAlchemy engine
session (:obj:`sqlalchemy.orm.session.Session`): SQLAlchemy session
max_entries (:obj:`float`): maximum number of entries to save locally
commit_intermediate_results (:obj:`bool`): if :obj:`True`, commit the changes throughout the loading
process. This is particularly helpful for restarting this method when webservices go offline.
verbose (:obj:`bool`): if :obj:`True`, print status information to the standard output
quilt_owner (:obj:`str`): owner of Quilt package to save data
quilt_package (:obj:`str`): identifier of Quilt package to save data
base_model (:obj:`Base`): base ORM model for the sqlite databse
"""
def __init__(self, name=None, cache_dirname=None, clear_content=False, load_content=False, max_entries=float('inf'),
commit_intermediate_results=False, download_backups=True, verbose=False,
quilt_owner=None, quilt_package=None):
"""
Args:
name (:obj:`str`, optional): name
cache_dirname (:obj:`str`, optional): directory to store the local copy of the data source
clear_content (:obj:`bool`, optional): if :obj:`True`, clear the content of the sqlite local copy of the data source
load_content (:obj:`bool`, optional): if :obj:`True`, load the content of the local sqlite database from the external source
max_entries (:obj:`float`, optional): maximum number of entries to save locally
commit_intermediate_results (:obj:`bool`, optional): if :obj:`True`, commit the changes throughout the loading
process. This is particularly helpful for restarting this method when webservices go offline.
download_backups (:obj:`bool`, optional): if :obj:`True`, load the local copy of the data source from the Karr Lab server
verbose (:obj:`bool`, optional): if :obj:`True`, print status information to the standard output
quilt_owner (:obj:`str`, optional): owner of Quilt package to save data
quilt_package (:obj:`str`, optional): identifier of Quilt package to save data
"""
super(CachedDataSource, self).__init__(name=name, verbose=verbose)
""" Set settings """
# name
if not cache_dirname:
cache_dirname = DATA_CACHE_DIR
self.cache_dirname = cache_dirname
self.filename = os.path.join(cache_dirname, self.name + '.sqlite')
# loading
self.max_entries = max_entries
# committing
self.commit_intermediate_results = commit_intermediate_results
# set Quilt configuration
quilt_config = datanator.config.get_config()['datanator']['quilt']
self.quilt_owner = quilt_owner or quilt_config['owner']
self.quilt_package = quilt_package or quilt_config['package']
""" Create SQLAlchemy session and load content if necessary """
if os.path.isfile(self.filename):
self.engine = self.get_engine()
if clear_content:
self.clear_content()
self.session = self.get_session()
if load_content:
self.load_content()
elif download_backups:
self.download_backups()
self.engine = self.get_engine()
self.session = self.get_session()
if load_content:
self.load_content()
else:
self.engine = self.get_engine()
if clear_content:
self.clear_content()
self.session = self.get_session()
if load_content:
self.load_content()
def get_engine(self):
""" Get an engine for the sqlite database. If the database doesn't exist, initialize its structure.
Returns:
:obj:`sqlalchemy.engine.Engine`: database engine
"""
if not os.path.isdir(os.path.dirname(self.filename)):
os.makedirs(os.path.dirname(self.filename))
engine = sqlalchemy.create_engine('sqlite:///' + self.filename)
if not os.path.isfile(self.filename):
self.base_model.metadata.create_all(engine)
return engine
def clear_content(self):
""" Clear the content of the sqlite database (i.e. drop and recreate all tables). """
self.base_model.metadata.drop_all(self.engine)
self.base_model.metadata.create_all(self.engine)
def get_session(self):
""" Get a session for the sqlite database
Returns:
:obj:`sqlalchemy.orm.session.Session`: database session
"""
return sqlalchemy.orm.sessionmaker(bind=self.engine)()
def upload_backups(self):
""" Backup the local sqlite database to Quilt """
# create temporary directory to checkout package
tmp_dirname = tempfile.mkdtemp()
# install and export package
manager = wc_utils.quilt.QuiltManager(tmp_dirname, self.quilt_package, owner=self.quilt_owner)
manager.download(sym_links=True)
# copy new files to package
paths = self.get_paths_to_backup()
for path in paths:
if os.path.isfile(os.path.join(self.cache_dirname, path)):
if os.path.isfile(os.path.join(tmp_dirname, path)):
os.remove(os.path.join(tmp_dirname, path))
os.symlink(os.path.join(self.cache_dirname, path), os.path.join(tmp_dirname, path))
else:
if os.path.isdir(os.path.join(tmp_dirname, path)):
shutil.rmtree(os.path.join(tmp_dirname, path))
root_cache_path = os.path.join(self.cache_dirname, path)
root_tmp_path = os.path.join(tmp_dirname, path)
for abs_cache_dirname, subdirnames, filenames in os.walk(root_cache_path):
rel_dirname = os.path.relpath(abs_cache_dirname, root_cache_path)
for subdirname in subdirnames:
if rel_dirname == '.':
rel_subdirname = subdirname
else:
rel_subdirname = os.path.join(rel_dirname, subdirname)
os.makedirs(os.path.join(root_tmp_path, rel_subdirname))
for filename in filenames:
if rel_dirname == '.':
rel_filename = filename
else:
rel_filename = os.path.join(rel_dirname, filename)
os.symlink(os.path.join(root_cache_path, rel_filename),
os.path.join(root_tmp_path, rel_filename))
# build and push package
manager.upload()
# cleanup temporary directory
shutil.rmtree(tmp_dirname)
def download_backups(self):
""" Download the local sqlite database from Quilt """
# create temporary directory to checkout package
tmp_dirname = tempfile.mkdtemp()
# install and export package
manager = wc_utils.quilt.QuiltManager(tmp_dirname, self.quilt_package, owner=self.quilt_owner)
# copy requested files from package
paths = self.get_paths_to_backup(download=True)
for path in paths:
manager.download(system_path=path, sym_links=True)
if os.path.isfile(os.path.join(self.cache_dirname, path)):
os.remove(os.path.join(self.cache_dirname, path))
elif os.path.isdir(os.path.join(self.cache_dirname, path)):
shutil.rmtree(os.path.join(self.cache_dirname, path))
os.rename(os.path.join(tmp_dirname, path),
os.path.join(self.cache_dirname, path))
# cleanup temporary directory
shutil.rmtree(tmp_dirname)
def get_paths_to_backup(self, download=False):
""" Get a list of the files to backup/unpack
Args:
download (:obj:`bool`, optional): if :obj:`True`, prepare the files for uploading
Returns:
:obj:`list` of :obj:`str`: list of paths to backup
"""
paths = []
paths.append(self.name + '.sqlite')
return paths
@abc.abstractmethod
def load_content(self):
""" Load the content of the local copy of the data source """
pass
def get_or_create_object(self, cls, **kwargs):
""" Get the SQLAlchemy object of type :obj:`cls` with attribute/value pairs specified by `**kwargs`. If
an object with these attribute/value pairs does not exist, create an object with these attribute/value pairs
and add it to the SQLAlchemy session.
Args:
cls (:obj:`class`): child class of :obj:`base_model`
**kwargs (:obj:`dict`, optional): attribute-value pairs of desired SQLAlchemy object of type :obj:`cls`
Returns:
:obj:`base_model`: SQLAlchemy object of type :obj:`cls`
"""
q = self.session.query(cls).filter_by(**kwargs)
self.session.flush()
if q.count():
return q.first()
else:
obj = cls(**kwargs)
self.session.add(obj)
return obj
class FtpDataSource(CachedDataSource):
""" An external data source which can be obtained via a FTP interface
Attributes:
ENDPOINT_DOMAINS (:obj:`dict` of :obj:`str`, :obj:`str`): dictionary of domains to retry
"""
ENDPOINT_DOMAINS = {}
class HttpDataSource(CachedDataSource):
""" An external data source which can be obtained via a HTTP interface
Attributes:
requests_cache_filename (:obj:`str`): path to cache HTTP requests
requests_session (:obj:`requests_cache.core.CachedSession`): cache-enabled HTTP request session
ENDPOINT_DOMAINS (:obj:`dict` of :obj:`str`, :obj:`str`): dictionary of domains to retry
MAX_HTTP_RETRIES (:obj:`int`): maximum number of times to retry each HTTP request
"""
ENDPOINT_DOMAINS = {}
MAX_HTTP_RETRIES = 5
def __init__(self, name=None, cache_dirname=None, clear_content=False, load_content=False, max_entries=float('inf'),
commit_intermediate_results=False, download_backups=True, verbose=False,
clear_requests_cache=False, download_request_backup=False,
quilt_owner=None, quilt_package=None):
"""
Args:
name (:obj:`str`, optional): name
cache_dirname (:obj:`str`, optional): directory to store the local copy of the data source and the HTTP requests cache
clear_content (:obj:`bool`, optional): if :obj:`True`, clear the content of the sqlite local copy of the data source
load_content (:obj:`bool`, optional): if :obj:`True`, load the content of the local sqlite database from the external source
max_entries (:obj:`float`, optional): maximum number of entries to save locally
commit_intermediate_results (:obj:`bool`, optional): if :obj:`True`, commit the changes throughout the loading
process. This is particularly helpful for restarting this method when webservices go offline.
download_backups (:obj:`bool`, optional): if :obj:`True`, load the local copy of the data source from the Karr Lab server
verbose (:obj:`bool`, optional): if :obj:`True`, print status information to the standard output
clear_requests_cache (:obj:`bool`, optional): if :obj:`True`, clear the HTTP requests cache
download_request_backup (:obj:`bool`, optional): if :obj:`True`, download the request backup
quilt_owner (:obj:`str`, optional): owner of Quilt package to save data
quilt_package (:obj:`str`, optional): identifier of Quilt package to save data
"""
""" CachedDataSource settings """
if not name:
name = self.__class__.__name__
if not cache_dirname:
cache_dirname = DATA_CACHE_DIR
""" Request settings """
# todo (enhancement): avoid python version-specific requests cache; this currently is necessary because request_cache uses
# pickle which is not backwards compatible
self.requests_cache_filename = os.path.join(cache_dirname, name + '.requests.py{}.sqlite'.format(sys.version_info[0]))
self.requests_session = self.get_requests_session()
if clear_requests_cache:
self.clear_requests_cache()
self.download_request_backup = download_request_backup
""" Call superclass constructor which will optionally load content """
super(HttpDataSource, self).__init__(name=name, cache_dirname=cache_dirname,
clear_content=clear_content, load_content=load_content, max_entries=max_entries,
commit_intermediate_results=commit_intermediate_results,
download_backups=download_backups, verbose=verbose,
quilt_owner=quilt_owner, quilt_package=quilt_package)
def get_requests_session(self):
""" Setup an cache-enabled HTTP request session
Returns:
:obj:`requests_cache.core.CachedSession`: cached-enable session
"""
if not os.path.isdir(os.path.dirname(self.requests_cache_filename)):
os.makedirs(os.path.dirname(self.requests_cache_filename))
name, _, _ = self.requests_cache_filename.rpartition('.')
# create caching session
session = requests_cache.core.CachedSession(name, backend='sqlite', expire_after=None)
# setup retrying
for endpoint_domain in self.ENDPOINT_DOMAINS.values():
session.mount(endpoint_domain, requests.adapters.HTTPAdapter(max_retries=self.MAX_HTTP_RETRIES))
return session
def clear_requests_cache(self):
""" Clear the cache-enabled HTTP request session """
self.requests_session.cache.clear()
def get_paths_to_backup(self, download=False):
""" Get a list of the files to backup/unpack
Args:
download (:obj:`bool`, optional): if :obj:`True`, prepare the files for uploading
Returns:
:obj:`list` of :obj:`str`: paths to backup
"""
paths = super(HttpDataSource, self).get_paths_to_backup(download=download)
if not download or self.download_request_backup:
paths.append(self.name + '.requests.py{}.sqlite'.format(sys.version_info[0]))
return paths
class WebserviceDataSource(DataSource):
""" A data source that is a webservice
Attributes:
requests_session (:obj:`requests.Session`): cache-enabled HTTP request session
ENDPOINT_DOMAINS (:obj:`dict` of :obj:`str`, :obj:`str`): dictionary of domains to retry
MAX_HTTP_RETRIES (:obj:`int`): maximum number of times to retry each HTTP request
"""
ENDPOINT_DOMAINS = {}
MAX_HTTP_RETRIES = 5
def __init__(self):
self.requests_session = requests.Session()
for endpoint_domain in self.ENDPOINT_DOMAINS.values():
self.requests_session.mount(endpoint_domain, requests.adapters.HTTPAdapter(max_retries=self.MAX_HTTP_RETRIES))
class DataSourceWarning(UserWarning):
""" Data source warning """
pass
|
|
#! /usr/bin/env python
# Copyright (c) 2015 Samuel Merritt <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import mock
from six import StringIO
import unittest
from test.unit import with_tempdir
from swift.cli.ring_builder_analyzer import parse_scenario, run_scenario
class TestRunScenario(unittest.TestCase):
@with_tempdir
def test_it_runs(self, tempdir):
builder_path = os.path.join(tempdir, 'test.builder')
scenario = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0,
'rounds': [[['add', 'r1z2-3.4.5.6:7/sda8', 100],
['add', 'z2-3.4.5.6:7/sda9', 200],
['add', 'z2-3.4.5.6:7/sda10', 200],
['add', 'z2-3.4.5.6:7/sda11', 200]],
[['set_weight', 0, 150]],
[['remove', 1]],
[['save', builder_path]]]}
parsed = parse_scenario(json.dumps(scenario))
fake_stdout = StringIO()
with mock.patch('sys.stdout', fake_stdout):
run_scenario(parsed)
# Just test that it produced some output as it ran; the fact that
# this doesn't crash and produces output that resembles something
# useful is good enough.
self.assertIn('Rebalance', fake_stdout.getvalue())
self.assertTrue(os.path.exists(builder_path))
class TestParseScenario(unittest.TestCase):
def test_good(self):
scenario = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0,
'rounds': [[['add', 'r1z2-3.4.5.6:7/sda8', 100],
['add', 'z2-3.4.5.6:7/sda9', 200]],
[['set_weight', 0, 150]],
[['remove', 1]]]}
parsed = parse_scenario(json.dumps(scenario))
self.assertEqual(parsed['replicas'], 3)
self.assertEqual(parsed['part_power'], 8)
self.assertEqual(parsed['random_seed'], 123)
self.assertEqual(parsed['overload'], 0)
self.assertEqual(parsed['rounds'], [
[['add', {'device': 'sda8',
'ip': '3.4.5.6',
'meta': '',
'port': 7,
'region': 1,
'replication_ip': '3.4.5.6',
'replication_port': 7,
'weight': 100.0,
'zone': 2}],
['add', {'device': u'sda9',
'ip': u'3.4.5.6',
'meta': '',
'port': 7,
'region': 1,
'replication_ip': '3.4.5.6',
'replication_port': 7,
'weight': 200.0,
'zone': 2}]],
[['set_weight', 0, 150.0]],
[['remove', 1]]])
# The rest of this test class is just a catalog of the myriad ways that
# the input can be malformed.
def test_invalid_json(self):
self.assertRaises(ValueError, parse_scenario, "{")
def test_json_not_object(self):
self.assertRaises(ValueError, parse_scenario, "[]")
self.assertRaises(ValueError, parse_scenario, "\"stuff\"")
def test_bad_replicas(self):
working_scenario = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0,
'rounds': [[['add', 'r1z2-3.4.5.6:7/sda8', 100]]]}
busted = dict(working_scenario)
del busted['replicas']
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
busted = dict(working_scenario, replicas='blahblah')
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
busted = dict(working_scenario, replicas=-1)
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
def test_bad_part_power(self):
working_scenario = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0,
'rounds': [[['add', 'r1z2-3.4.5.6:7/sda8', 100]]]}
busted = dict(working_scenario)
del busted['part_power']
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
busted = dict(working_scenario, part_power='blahblah')
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
busted = dict(working_scenario, part_power=0)
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
busted = dict(working_scenario, part_power=33)
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
def test_bad_random_seed(self):
working_scenario = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0,
'rounds': [[['add', 'r1z2-3.4.5.6:7/sda8', 100]]]}
busted = dict(working_scenario)
del busted['random_seed']
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
busted = dict(working_scenario, random_seed='blahblah')
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
def test_bad_overload(self):
working_scenario = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0,
'rounds': [[['add', 'r1z2-3.4.5.6:7/sda8', 100]]]}
busted = dict(working_scenario)
del busted['overload']
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
busted = dict(working_scenario, overload='blahblah')
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
busted = dict(working_scenario, overload=-0.01)
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
def test_bad_rounds(self):
base = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0}
self.assertRaises(ValueError, parse_scenario, json.dumps(base))
busted = dict(base, rounds={})
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
busted = dict(base, rounds=[{}])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
busted = dict(base, rounds=[[['bork']]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
def test_bad_add(self):
base = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0}
# no dev
busted = dict(base, rounds=[[['add']]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
# no weight
busted = dict(base, rounds=[[['add', 'r1z2-1.2.3.4:6200/d7']]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
# too many fields
busted = dict(base, rounds=[[['add', 'r1z2-1.2.3.4:6200/d7', 1, 2]]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
# can't parse
busted = dict(base, rounds=[[['add', 'not a good value', 100]]])
# N.B. the ValueError's coming out of ring.utils.parse_add_value
# are already pretty good
expected = "Invalid device specifier (round 0, command 0): " \
"Invalid add value: not a good value"
try:
parse_scenario(json.dumps(busted))
except ValueError as err:
self.assertEqual(str(err), expected)
# negative weight
busted = dict(base, rounds=[[['add', 'r1z2-1.2.3.4:6200/d7', -1]]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
def test_bad_remove(self):
base = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0}
# no dev
busted = dict(base, rounds=[[['remove']]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
# bad dev id
busted = dict(base, rounds=[[['remove', 'not an int']]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
# too many fields
busted = dict(base, rounds=[[['remove', 1, 2]]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
def test_bad_set_weight(self):
base = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0}
# no dev
busted = dict(base, rounds=[[['set_weight']]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
# no weight
busted = dict(base, rounds=[[['set_weight', 0]]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
# bad dev id
busted = dict(base, rounds=[[['set_weight', 'not an int', 90]]])
expected = "Invalid device ID in set_weight (round 0, command 0): " \
"invalid literal for int() with base 10: 'not an int'"
try:
parse_scenario(json.dumps(busted))
except ValueError as e:
self.assertEqual(str(e), expected)
# negative weight
busted = dict(base, rounds=[[['set_weight', 1, -1]]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
# bogus weight
busted = dict(base, rounds=[[['set_weight', 1, 'bogus']]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
def test_bad_save(self):
base = {
'replicas': 3, 'part_power': 8, 'random_seed': 123, 'overload': 0}
# no builder name
busted = dict(base, rounds=[[['save']]])
self.assertRaises(ValueError, parse_scenario, json.dumps(busted))
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""Functions for computing the model, objective function, etc.
"""
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
def model(p, t):
"""The model.
The model has the functional form:
.. math::
m(t; p) = p_0 + p_1 \sin p_3 t + p_2 \cos p_3 t
Parameters
----------
p : array-like
List of parameters.
t : :class:`float` or :class:`~numpy.ndarray`
Time variable.
Returns
-------
:class:`float` or :class:`~numpy.ndarray`
The model evaluated for the inputs.
"""
return p[0] + p[1]*np.sin(p[3]*t) + p[2]*np.cos(p[3]*t)
def dmdp(p, t):
"""The gradient of the :func:`model` with respect to the parameters.
Given the :func:`model`, the gradient computed here is:
:math:`\partial m(t; p)/\partial p_i`.
Parameters
----------
p : array-like
List of parameters.
t : :class:`float` or :class:`~numpy.ndarray`
Time variable.
Returns
-------
:class:`~numpy.ndarray`
The gradient evaluated for the inputs.
"""
return np.vstack((
np.ones(t.shape, dtype=t.dtype),
np.sin(p[3]*t),
np.cos(p[3]*t),
t*p[1]*np.cos(p[3]*t)-t*p[2]*np.sin(p[3]*t)
))
def d2mdpdp(p, t):
"""The second derivative of the :func:`model` with respect to
the parameters.
Given the :func:`model`, the gradient computed here is:
:math:`\partial^2 m(t; p)/\partial p_i \partial p_j`.
Parameters
----------
p : array-like
List of parameters.
t : :class:`float` or :class:`~numpy.ndarray`
Time variable.
Returns
-------
:class:`~numpy.ndarray`
The second derivative evaluated for the inputs.
"""
H = np.zeros((p.size, p.size, t.size), dtype=p.dtype)
H[3, 3, :] = -t*t*p[1]*np.sin(p[3]*t) - t*t*p[2]*np.cos(p[3]*t)
H[1, 3, :] = H[3, 1, :] = t*np.cos(p[3]*t)
H[2, 3, :] = H[3, 2, :] = -t*np.sin(p[3]*t)
return H
def chin(p, x, t, s):
"""The value of :math:`\chi`. In some sense the residual at `x`.
Formally this is, :math:`\chi = (x - m(t; p))/\sigma`,
where :math:`x` are the (radial velocity) data values, and :math:`\sigma`
are the errors on those values.
Parameters
----------
p : array-like
List of parameters.
x : :class:`float` or :class:`~numpy.ndarray`
Independent (radial velocity) variable.
t : :class:`float` or :class:`~numpy.ndarray`
Time variable.
s : :class:`float` or :class:`~numpy.ndarray`
Error on `x`.
Returns
-------
:class:`~numpy.ndarray`
The residual.
"""
return (x - model(p, t))/s
def f2(p, x, t, s, Q=0):
"""Function of :math:`\chi` that will be summed to produce the final
objective function.
Parameters
----------
p : array-like
List of parameters.
x : :class:`float` or :class:`~numpy.ndarray`
Independent (radial velocity) variable.
t : :class:`float` or :class:`~numpy.ndarray`
Time variable.
s : :class:`float` or :class:`~numpy.ndarray`
Error on `x`.
Q : :class:`float`, optional
A regularization parameter, to suppress outliers.
Returns
-------
:class:`~numpy.ndarray`
A vector that when summed produces something like :math:`\chi^2`.
"""
chi = chin(p, x, t, s)
if Q == 0:
return chi**2
else:
return Q*chi**2/(chi**2 + Q)
def df2dp(p, x, t, s, Q=0):
"""Gradient of :func:`f2` with respect to the parameters.
Parameters
----------
p : array-like
List of parameters.
x : :class:`float` or :class:`~numpy.ndarray`
Independent (radial velocity) variable.
t : :class:`float` or :class:`~numpy.ndarray`
Time variable.
s : :class:`float` or :class:`~numpy.ndarray`
Error on `x`.
Q : :class:`float`, optional
A regularization parameter, to suppress outliers.
Returns
-------
:class:`~numpy.ndarray`
A vector that when summed produces something like
:math:`\partial \chi^2/\partial p_i`.
"""
chi = chin(p, x, t, s)
d = dmdp(p, t)
if Q == 0:
f = -2*chi/s
else:
f = -2*chi/s * (Q**2 / (chi**2 + Q)**2)
return f*d
def d2f2dpdp(p, x, t, s, Q=0):
"""Second derivative of :func:`f2` with respect to the parameters.
Parameters
----------
p : array-like
List of parameters.
x : :class:`float` or :class:`~numpy.ndarray`
Independent (radial velocity) variable.
t : :class:`float` or :class:`~numpy.ndarray`
Time variable.
s : :class:`float` or :class:`~numpy.ndarray`
Error on `x`.
Q : :class:`float`, optional
A regularization parameter, to suppress outliers.
Returns
-------
:class:`~numpy.ndarray`
A vector that when summed produces something like
:math:`\partial^2 \chi^2/\partial p_i \partial p_j`.
"""
H = np.zeros((p.size, p.size, t.size), dtype=p.dtype)
chi = chin(p, x, t, s)
d = dmdp(p, t)
dd = d2mdpdp(p, t)
for i in range(p.size):
for j in range(p.size):
H[i, j, :] = d[i, :] * d[j, :]
if Q == 0:
f = -2*chi/s
f2 = 2/s**2
else:
f = -2*chi/s * (Q**2 / (chi**2 + Q)**2)
f2 = 2*(Q**2/s**2)*((Q**2 - 3*chi**2)/(chi**2 + Q)**3)
return f2*H + f*dd
def obj(p, x, t, s, Q=0):
"""The objective function, that is the function to be minimized.
Parameters
----------
p : array-like
List of parameters.
x : :class:`float` or :class:`~numpy.ndarray`
Independent (radial velocity) variable.
t : :class:`float` or :class:`~numpy.ndarray`
Time variable.
s : :class:`float` or :class:`~numpy.ndarray`
Error on `x`.
Q : :class:`float`, optional
A regularization parameter, to suppress outliers.
Returns
-------
:class:`~numpy.ndarray`
The objective function.
"""
return f2(p, x, t, s, Q).sum()
def dobj(p, x, t, s, Q=0):
"""Gradient of the objective function with respect to the parameters.
Parameters
----------
p : array-like
List of parameters.
x : :class:`float` or :class:`~numpy.ndarray`
Independent (radial velocity) variable.
t : :class:`float` or :class:`~numpy.ndarray`
Time variable.
s : :class:`float` or :class:`~numpy.ndarray`
Error on `x`.
Q : :class:`float`, optional
A regularization parameter, to suppress outliers.
Returns
-------
:class:`~numpy.ndarray`
The first derivative of the objective function.
"""
return df2dp(p, x, t, s, Q).sum(1)
def d2obj(p, x, t, s, Q=0):
"""Second derivative of the objective function with respect to the
parameters.
Parameters
----------
p : array-like
List of parameters.
x : :class:`float` or :class:`~numpy.ndarray`
Independent (radial velocity) variable.
t : :class:`float` or :class:`~numpy.ndarray`
Time variable.
s : :class:`float` or :class:`~numpy.ndarray`
Error on `x`.
Q : :class:`float`, optional
A regularization parameter, to suppress outliers.
Returns
-------
:class:`~numpy.ndarray`
The second derivative of the objective function.
"""
return d2f2dpdp(p, x, t, s, Q).sum(2)
|
|
import json
import os
from datetime import datetime
from moto.core import ACCOUNT_ID, BaseBackend, BaseModel, CloudFormationModel
from moto.core.exceptions import RESTError
from moto.core.utils import BackendDict
from moto.sagemaker import validators
from moto.utilities.paginator import paginate
from .exceptions import (
MissingModel,
ValidationError,
AWSValidationException,
ResourceNotFound,
)
PAGINATION_MODEL = {
"list_experiments": {
"input_token": "NextToken",
"limit_key": "MaxResults",
"limit_default": 100,
"unique_attribute": "experiment_arn",
"fail_on_invalid_token": True,
},
"list_trials": {
"input_token": "NextToken",
"limit_key": "MaxResults",
"limit_default": 100,
"unique_attribute": "trial_arn",
"fail_on_invalid_token": True,
},
"list_trial_components": {
"input_token": "NextToken",
"limit_key": "MaxResults",
"limit_default": 100,
"unique_attribute": "trial_component_arn",
"fail_on_invalid_token": True,
},
}
class BaseObject(BaseModel):
def camelCase(self, key):
words = []
for word in key.split("_"):
words.append(word.title())
return "".join(words)
def update(self, details_json):
details = json.loads(details_json)
for k in details.keys():
setattr(self, k, details[k])
def gen_response_object(self):
response_object = dict()
for key, value in self.__dict__.items():
if "_" in key:
response_object[self.camelCase(key)] = value
else:
response_object[key[0].upper() + key[1:]] = value
return response_object
@property
def response_object(self):
return self.gen_response_object()
class FakeProcessingJob(BaseObject):
def __init__(
self,
app_specification,
experiment_config,
network_config,
processing_inputs,
processing_job_name,
processing_output_config,
processing_resources,
region_name,
role_arn,
stopping_condition,
):
self.processing_job_name = processing_job_name
self.processing_job_arn = FakeProcessingJob.arn_formatter(
processing_job_name, region_name
)
now_string = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.creation_time = now_string
self.last_modified_time = now_string
self.processing_end_time = now_string
self.role_arn = role_arn
self.app_specification = app_specification
self.experiment_config = experiment_config
self.network_config = network_config
self.processing_inputs = processing_inputs
self.processing_job_status = "Completed"
self.processing_output_config = processing_output_config
self.stopping_condition = stopping_condition
@property
def response_object(self):
response_object = self.gen_response_object()
return {
k: v for k, v in response_object.items() if v is not None and v != [None]
}
@property
def response_create(self):
return {"ProcessingJobArn": self.processing_job_arn}
@staticmethod
def arn_formatter(endpoint_name, region_name):
return (
"arn:aws:sagemaker:"
+ region_name
+ ":"
+ str(ACCOUNT_ID)
+ ":processing-job/"
+ endpoint_name
)
class FakeTrainingJob(BaseObject):
def __init__(
self,
region_name,
training_job_name,
hyper_parameters,
algorithm_specification,
role_arn,
input_data_config,
output_data_config,
resource_config,
vpc_config,
stopping_condition,
tags,
enable_network_isolation,
enable_inter_container_traffic_encryption,
enable_managed_spot_training,
checkpoint_config,
debug_hook_config,
debug_rule_configurations,
tensor_board_output_config,
experiment_config,
):
self.training_job_name = training_job_name
self.hyper_parameters = hyper_parameters
self.algorithm_specification = algorithm_specification
self.role_arn = role_arn
self.input_data_config = input_data_config
self.output_data_config = output_data_config
self.resource_config = resource_config
self.vpc_config = vpc_config
self.stopping_condition = stopping_condition
self.tags = tags
self.enable_network_isolation = enable_network_isolation
self.enable_inter_container_traffic_encryption = (
enable_inter_container_traffic_encryption
)
self.enable_managed_spot_training = enable_managed_spot_training
self.checkpoint_config = checkpoint_config
self.debug_hook_config = debug_hook_config
self.debug_rule_configurations = debug_rule_configurations
self.tensor_board_output_config = tensor_board_output_config
self.experiment_config = experiment_config
self.training_job_arn = FakeTrainingJob.arn_formatter(
training_job_name, region_name
)
self.creation_time = self.last_modified_time = datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
)
self.model_artifacts = {
"S3ModelArtifacts": os.path.join(
self.output_data_config["S3OutputPath"],
self.training_job_name,
"output",
"model.tar.gz",
)
}
self.training_job_status = "Completed"
self.secondary_status = "Completed"
self.algorithm_specification["MetricDefinitions"] = [
{
"Name": "test:dcg",
"Regex": "#quality_metric: host=\\S+, test dcg <score>=(\\S+)",
}
]
now_string = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.creation_time = now_string
self.last_modified_time = now_string
self.training_start_time = now_string
self.training_end_time = now_string
self.secondary_status_transitions = [
{
"Status": "Starting",
"StartTime": self.creation_time,
"EndTime": self.creation_time,
"StatusMessage": "Preparing the instances for training",
}
]
self.final_metric_data_list = [
{
"MetricName": "train:progress",
"Value": 100.0,
"Timestamp": self.creation_time,
}
]
@property
def response_object(self):
response_object = self.gen_response_object()
return {
k: v for k, v in response_object.items() if v is not None and v != [None]
}
@property
def response_create(self):
return {"TrainingJobArn": self.training_job_arn}
@staticmethod
def arn_formatter(endpoint_name, region_name):
return (
"arn:aws:sagemaker:"
+ region_name
+ ":"
+ str(ACCOUNT_ID)
+ ":training-job/"
+ endpoint_name
)
class FakeEndpoint(BaseObject, CloudFormationModel):
def __init__(
self,
region_name,
endpoint_name,
endpoint_config_name,
production_variants,
data_capture_config,
tags,
):
self.endpoint_name = endpoint_name
self.endpoint_arn = FakeEndpoint.arn_formatter(endpoint_name, region_name)
self.endpoint_config_name = endpoint_config_name
self.production_variants = production_variants
self.data_capture_config = data_capture_config
self.tags = tags or []
self.endpoint_status = "InService"
self.failure_reason = None
self.creation_time = self.last_modified_time = datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
)
@property
def response_object(self):
response_object = self.gen_response_object()
return {
k: v for k, v in response_object.items() if v is not None and v != [None]
}
@property
def response_create(self):
return {"EndpointArn": self.endpoint_arn}
@staticmethod
def arn_formatter(endpoint_name, region_name):
return (
"arn:aws:sagemaker:"
+ region_name
+ ":"
+ str(ACCOUNT_ID)
+ ":endpoint/"
+ endpoint_name
)
@property
def physical_resource_id(self):
return self.endpoint_arn
@classmethod
def has_cfn_attr(cls, attribute):
return attribute in ["EndpointName"]
def get_cfn_attribute(self, attribute_name):
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-endpoint.html#aws-resource-sagemaker-endpoint-return-values
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "EndpointName":
return self.endpoint_name
raise UnformattedGetAttTemplateException()
@staticmethod
def cloudformation_name_type():
return None
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-endpoint.html
return "AWS::SageMaker::Endpoint"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
sagemaker_backend = sagemaker_backends[region_name]
# Get required properties from provided CloudFormation template
properties = cloudformation_json["Properties"]
endpoint_config_name = properties["EndpointConfigName"]
endpoint = sagemaker_backend.create_endpoint(
endpoint_name=resource_name,
endpoint_config_name=endpoint_config_name,
tags=properties.get("Tags", []),
)
return endpoint
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name,
):
# Changes to the Endpoint will not change resource name
cls.delete_from_cloudformation_json(
original_resource.endpoint_arn, cloudformation_json, region_name
)
new_resource = cls.create_from_cloudformation_json(
original_resource.endpoint_name, cloudformation_json, region_name
)
return new_resource
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
# Get actual name because resource_name actually provides the ARN
# since the Physical Resource ID is the ARN despite SageMaker
# using the name for most of its operations.
endpoint_name = resource_name.split("/")[-1]
sagemaker_backends[region_name].delete_endpoint(endpoint_name)
class FakeEndpointConfig(BaseObject, CloudFormationModel):
def __init__(
self,
region_name,
endpoint_config_name,
production_variants,
data_capture_config,
tags,
kms_key_id,
):
self.validate_production_variants(production_variants)
self.endpoint_config_name = endpoint_config_name
self.endpoint_config_arn = FakeEndpointConfig.arn_formatter(
endpoint_config_name, region_name
)
self.production_variants = production_variants or []
self.data_capture_config = data_capture_config or {}
self.tags = tags or []
self.kms_key_id = kms_key_id
self.creation_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def validate_production_variants(self, production_variants):
for production_variant in production_variants:
self.validate_instance_type(production_variant["InstanceType"])
def validate_instance_type(self, instance_type):
VALID_INSTANCE_TYPES = [
"ml.r5d.12xlarge",
"ml.r5.12xlarge",
"ml.p2.xlarge",
"ml.m5.4xlarge",
"ml.m4.16xlarge",
"ml.r5d.24xlarge",
"ml.r5.24xlarge",
"ml.p3.16xlarge",
"ml.m5d.xlarge",
"ml.m5.large",
"ml.t2.xlarge",
"ml.p2.16xlarge",
"ml.m5d.12xlarge",
"ml.inf1.2xlarge",
"ml.m5d.24xlarge",
"ml.c4.2xlarge",
"ml.c5.2xlarge",
"ml.c4.4xlarge",
"ml.inf1.6xlarge",
"ml.c5d.2xlarge",
"ml.c5.4xlarge",
"ml.g4dn.xlarge",
"ml.g4dn.12xlarge",
"ml.c5d.4xlarge",
"ml.g4dn.2xlarge",
"ml.c4.8xlarge",
"ml.c4.large",
"ml.c5d.xlarge",
"ml.c5.large",
"ml.g4dn.4xlarge",
"ml.c5.9xlarge",
"ml.g4dn.16xlarge",
"ml.c5d.large",
"ml.c5.xlarge",
"ml.c5d.9xlarge",
"ml.c4.xlarge",
"ml.inf1.xlarge",
"ml.g4dn.8xlarge",
"ml.inf1.24xlarge",
"ml.m5d.2xlarge",
"ml.t2.2xlarge",
"ml.c5d.18xlarge",
"ml.m5d.4xlarge",
"ml.t2.medium",
"ml.c5.18xlarge",
"ml.r5d.2xlarge",
"ml.r5.2xlarge",
"ml.p3.2xlarge",
"ml.m5d.large",
"ml.m5.xlarge",
"ml.m4.10xlarge",
"ml.t2.large",
"ml.r5d.4xlarge",
"ml.r5.4xlarge",
"ml.m5.12xlarge",
"ml.m4.xlarge",
"ml.m5.24xlarge",
"ml.m4.2xlarge",
"ml.p2.8xlarge",
"ml.m5.2xlarge",
"ml.r5d.xlarge",
"ml.r5d.large",
"ml.r5.xlarge",
"ml.r5.large",
"ml.p3.8xlarge",
"ml.m4.4xlarge",
]
if not validators.is_one_of(instance_type, VALID_INSTANCE_TYPES):
message = "Value '{}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: {}".format(
instance_type, VALID_INSTANCE_TYPES
)
raise ValidationError(message=message)
@property
def response_object(self):
response_object = self.gen_response_object()
return {
k: v for k, v in response_object.items() if v is not None and v != [None]
}
@property
def response_create(self):
return {"EndpointConfigArn": self.endpoint_config_arn}
@staticmethod
def arn_formatter(model_name, region_name):
return (
"arn:aws:sagemaker:"
+ region_name
+ ":"
+ str(ACCOUNT_ID)
+ ":endpoint-config/"
+ model_name
)
@property
def physical_resource_id(self):
return self.endpoint_config_arn
@classmethod
def has_cfn_attr(cls, attribute):
return attribute in ["EndpointConfigName"]
def get_cfn_attribute(self, attribute_name):
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-endpointconfig.html#aws-resource-sagemaker-endpointconfig-return-values
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "EndpointConfigName":
return self.endpoint_config_name
raise UnformattedGetAttTemplateException()
@staticmethod
def cloudformation_name_type():
return None
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-endpointconfig.html
return "AWS::SageMaker::EndpointConfig"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
sagemaker_backend = sagemaker_backends[region_name]
# Get required properties from provided CloudFormation template
properties = cloudformation_json["Properties"]
production_variants = properties["ProductionVariants"]
endpoint_config = sagemaker_backend.create_endpoint_config(
endpoint_config_name=resource_name,
production_variants=production_variants,
data_capture_config=properties.get("DataCaptureConfig", {}),
kms_key_id=properties.get("KmsKeyId"),
tags=properties.get("Tags", []),
)
return endpoint_config
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name,
):
# Most changes to the endpoint config will change resource name for EndpointConfigs
cls.delete_from_cloudformation_json(
original_resource.endpoint_config_arn, cloudformation_json, region_name
)
new_resource = cls.create_from_cloudformation_json(
new_resource_name, cloudformation_json, region_name
)
return new_resource
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
# Get actual name because resource_name actually provides the ARN
# since the Physical Resource ID is the ARN despite SageMaker
# using the name for most of its operations.
endpoint_config_name = resource_name.split("/")[-1]
sagemaker_backends[region_name].delete_endpoint_config(endpoint_config_name)
class Model(BaseObject, CloudFormationModel):
def __init__(
self,
region_name,
model_name,
execution_role_arn,
primary_container,
vpc_config,
containers=None,
tags=None,
):
self.model_name = model_name
self.creation_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.containers = containers or []
self.tags = tags or []
self.enable_network_isolation = False
self.vpc_config = vpc_config
self.primary_container = primary_container
self.execution_role_arn = execution_role_arn or "arn:test"
self.model_arn = self.arn_for_model_name(self.model_name, region_name)
@property
def response_object(self):
response_object = self.gen_response_object()
return {
k: v for k, v in response_object.items() if v is not None and v != [None]
}
@property
def response_create(self):
return {"ModelArn": self.model_arn}
@staticmethod
def arn_for_model_name(model_name, region_name):
return (
"arn:aws:sagemaker:"
+ region_name
+ ":"
+ str(ACCOUNT_ID)
+ ":model/"
+ model_name
)
@property
def physical_resource_id(self):
return self.model_arn
@classmethod
def has_cfn_attr(cls, attribute):
return attribute in ["ModelName"]
def get_cfn_attribute(self, attribute_name):
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-model.html#aws-resource-sagemaker-model-return-values
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "ModelName":
return self.model_name
raise UnformattedGetAttTemplateException()
@staticmethod
def cloudformation_name_type():
return None
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-model.html
return "AWS::SageMaker::Model"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
sagemaker_backend = sagemaker_backends[region_name]
# Get required properties from provided CloudFormation template
properties = cloudformation_json["Properties"]
execution_role_arn = properties["ExecutionRoleArn"]
primary_container = properties["PrimaryContainer"]
model = sagemaker_backend.create_model(
ModelName=resource_name,
ExecutionRoleArn=execution_role_arn,
PrimaryContainer=primary_container,
VpcConfig=properties.get("VpcConfig", {}),
Containers=properties.get("Containers", []),
Tags=properties.get("Tags", []),
)
return model
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name,
):
# Most changes to the model will change resource name for Models
cls.delete_from_cloudformation_json(
original_resource.model_arn, cloudformation_json, region_name
)
new_resource = cls.create_from_cloudformation_json(
new_resource_name, cloudformation_json, region_name
)
return new_resource
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
# Get actual name because resource_name actually provides the ARN
# since the Physical Resource ID is the ARN despite SageMaker
# using the name for most of its operations.
model_name = resource_name.split("/")[-1]
sagemaker_backends[region_name].delete_model(model_name)
class VpcConfig(BaseObject):
def __init__(self, security_group_ids, subnets):
self.security_group_ids = security_group_ids
self.subnets = subnets
@property
def response_object(self):
response_object = self.gen_response_object()
return {
k: v for k, v in response_object.items() if v is not None and v != [None]
}
class Container(BaseObject):
def __init__(self, **kwargs):
self.container_hostname = kwargs.get("container_hostname", "localhost")
self.model_data_url = kwargs.get("data_url", "")
self.model_package_name = kwargs.get("package_name", "pkg")
self.image = kwargs.get("image", "")
self.environment = kwargs.get("environment", {})
@property
def response_object(self):
response_object = self.gen_response_object()
return {
k: v for k, v in response_object.items() if v is not None and v != [None]
}
class FakeSagemakerNotebookInstance(CloudFormationModel):
def __init__(
self,
region_name,
notebook_instance_name,
instance_type,
role_arn,
subnet_id,
security_group_ids,
kms_key_id,
tags,
lifecycle_config_name,
direct_internet_access,
volume_size_in_gb,
accelerator_types,
default_code_repository,
additional_code_repositories,
root_access,
):
self.validate_volume_size_in_gb(volume_size_in_gb)
self.validate_instance_type(instance_type)
self.region_name = region_name
self.notebook_instance_name = notebook_instance_name
self.instance_type = instance_type
self.role_arn = role_arn
self.subnet_id = subnet_id
self.security_group_ids = security_group_ids
self.kms_key_id = kms_key_id
self.tags = tags or []
self.lifecycle_config_name = lifecycle_config_name
self.direct_internet_access = direct_internet_access
self.volume_size_in_gb = volume_size_in_gb
self.accelerator_types = accelerator_types
self.default_code_repository = default_code_repository
self.additional_code_repositories = additional_code_repositories
self.root_access = root_access
self.status = None
self.creation_time = self.last_modified_time = datetime.now()
self.start()
def validate_volume_size_in_gb(self, volume_size_in_gb):
if not validators.is_integer_between(volume_size_in_gb, mn=5, optional=True):
message = "Invalid range for parameter VolumeSizeInGB, value: {}, valid range: 5-inf"
raise ValidationError(message=message)
def validate_instance_type(self, instance_type):
VALID_INSTANCE_TYPES = [
"ml.p2.xlarge",
"ml.m5.4xlarge",
"ml.m4.16xlarge",
"ml.t3.xlarge",
"ml.p3.16xlarge",
"ml.t2.xlarge",
"ml.p2.16xlarge",
"ml.c4.2xlarge",
"ml.c5.2xlarge",
"ml.c4.4xlarge",
"ml.c5d.2xlarge",
"ml.c5.4xlarge",
"ml.c5d.4xlarge",
"ml.c4.8xlarge",
"ml.c5d.xlarge",
"ml.c5.9xlarge",
"ml.c5.xlarge",
"ml.c5d.9xlarge",
"ml.c4.xlarge",
"ml.t2.2xlarge",
"ml.c5d.18xlarge",
"ml.t3.2xlarge",
"ml.t3.medium",
"ml.t2.medium",
"ml.c5.18xlarge",
"ml.p3.2xlarge",
"ml.m5.xlarge",
"ml.m4.10xlarge",
"ml.t2.large",
"ml.m5.12xlarge",
"ml.m4.xlarge",
"ml.t3.large",
"ml.m5.24xlarge",
"ml.m4.2xlarge",
"ml.p2.8xlarge",
"ml.m5.2xlarge",
"ml.p3.8xlarge",
"ml.m4.4xlarge",
]
if not validators.is_one_of(instance_type, VALID_INSTANCE_TYPES):
message = "Value '{}' at 'instanceType' failed to satisfy constraint: Member must satisfy enum value set: {}".format(
instance_type, VALID_INSTANCE_TYPES
)
raise ValidationError(message=message)
@property
def arn(self):
return (
"arn:aws:sagemaker:"
+ self.region_name
+ ":"
+ str(ACCOUNT_ID)
+ ":notebook-instance/"
+ self.notebook_instance_name
)
@property
def url(self):
return "{}.notebook.{}.sagemaker.aws".format(
self.notebook_instance_name, self.region_name
)
def start(self):
self.status = "InService"
@property
def is_deletable(self):
return self.status in ["Stopped", "Failed"]
def stop(self):
self.status = "Stopped"
@property
def physical_resource_id(self):
return self.arn
@classmethod
def has_cfn_attr(cls, attribute):
return attribute in ["NotebookInstanceName"]
def get_cfn_attribute(self, attribute_name):
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-notebookinstance.html#aws-resource-sagemaker-notebookinstance-return-values
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "NotebookInstanceName":
return self.notebook_instance_name
raise UnformattedGetAttTemplateException()
@staticmethod
def cloudformation_name_type():
return None
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-notebookinstance.html
return "AWS::SageMaker::NotebookInstance"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
# Get required properties from provided CloudFormation template
properties = cloudformation_json["Properties"]
instance_type = properties["InstanceType"]
role_arn = properties["RoleArn"]
notebook = sagemaker_backends[region_name].create_notebook_instance(
notebook_instance_name=resource_name,
instance_type=instance_type,
role_arn=role_arn,
)
return notebook
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name,
):
# Operations keep same resource name so delete old and create new to mimic update
cls.delete_from_cloudformation_json(
original_resource.arn, cloudformation_json, region_name
)
new_resource = cls.create_from_cloudformation_json(
original_resource.notebook_instance_name, cloudformation_json, region_name
)
return new_resource
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
# Get actual name because resource_name actually provides the ARN
# since the Physical Resource ID is the ARN despite SageMaker
# using the name for most of its operations.
notebook_instance_name = resource_name.split("/")[-1]
backend = sagemaker_backends[region_name]
backend.stop_notebook_instance(notebook_instance_name)
backend.delete_notebook_instance(notebook_instance_name)
class FakeSageMakerNotebookInstanceLifecycleConfig(BaseObject, CloudFormationModel):
def __init__(
self, region_name, notebook_instance_lifecycle_config_name, on_create, on_start
):
self.region_name = region_name
self.notebook_instance_lifecycle_config_name = (
notebook_instance_lifecycle_config_name
)
self.on_create = on_create
self.on_start = on_start
self.creation_time = self.last_modified_time = datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
)
self.notebook_instance_lifecycle_config_arn = FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter(
self.notebook_instance_lifecycle_config_name, self.region_name
)
@staticmethod
def arn_formatter(notebook_instance_lifecycle_config_name, region_name):
return (
"arn:aws:sagemaker:"
+ region_name
+ ":"
+ str(ACCOUNT_ID)
+ ":notebook-instance-lifecycle-configuration/"
+ notebook_instance_lifecycle_config_name
)
@property
def response_object(self):
response_object = self.gen_response_object()
return {
k: v for k, v in response_object.items() if v is not None and v != [None]
}
@property
def response_create(self):
return {"TrainingJobArn": self.training_job_arn}
@property
def physical_resource_id(self):
return self.notebook_instance_lifecycle_config_arn
@classmethod
def has_cfn_attr(cls, attribute):
return attribute in ["NotebookInstanceLifecycleConfigName"]
def get_cfn_attribute(self, attribute_name):
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-notebookinstancelifecycleconfig.html#aws-resource-sagemaker-notebookinstancelifecycleconfig-return-values
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "NotebookInstanceLifecycleConfigName":
return self.notebook_instance_lifecycle_config_name
raise UnformattedGetAttTemplateException()
@staticmethod
def cloudformation_name_type():
return None
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-sagemaker-notebookinstancelifecycleconfig.html
return "AWS::SageMaker::NotebookInstanceLifecycleConfig"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
properties = cloudformation_json["Properties"]
config = sagemaker_backends[
region_name
].create_notebook_instance_lifecycle_config(
notebook_instance_lifecycle_config_name=resource_name,
on_create=properties.get("OnCreate"),
on_start=properties.get("OnStart"),
)
return config
@classmethod
def update_from_cloudformation_json(
cls, original_resource, new_resource_name, cloudformation_json, region_name,
):
# Operations keep same resource name so delete old and create new to mimic update
cls.delete_from_cloudformation_json(
original_resource.notebook_instance_lifecycle_config_arn,
cloudformation_json,
region_name,
)
new_resource = cls.create_from_cloudformation_json(
original_resource.notebook_instance_lifecycle_config_name,
cloudformation_json,
region_name,
)
return new_resource
@classmethod
def delete_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name
):
# Get actual name because resource_name actually provides the ARN
# since the Physical Resource ID is the ARN despite SageMaker
# using the name for most of its operations.
config_name = resource_name.split("/")[-1]
backend = sagemaker_backends[region_name]
backend.delete_notebook_instance_lifecycle_config(config_name)
class SageMakerModelBackend(BaseBackend):
def __init__(self, region_name=None):
self._models = {}
self.notebook_instances = {}
self.endpoint_configs = {}
self.endpoints = {}
self.experiments = {}
self.processing_jobs = {}
self.trials = {}
self.trial_components = {}
self.training_jobs = {}
self.notebook_instance_lifecycle_configurations = {}
self.region_name = region_name
def reset(self):
region_name = self.region_name
self.__dict__ = {}
self.__init__(region_name)
@staticmethod
def default_vpc_endpoint_service(service_region, zones):
"""Default VPC endpoint services."""
api_service = BaseBackend.default_vpc_endpoint_service_factory(
service_region, zones, "api.sagemaker", special_service_name="sagemaker.api"
)
notebook_service_id = f"vpce-svc-{BaseBackend.vpce_random_number()}"
studio_service_id = f"vpce-svc-{BaseBackend.vpce_random_number()}"
notebook_service = {
"AcceptanceRequired": False,
"AvailabilityZones": zones,
"BaseEndpointDnsNames": [
f"{notebook_service_id}.{service_region}.vpce.amazonaws.com",
f"notebook.{service_region}.vpce.sagemaker.aws",
],
"ManagesVpcEndpoints": False,
"Owner": "amazon",
"PrivateDnsName": f"*.notebook.{service_region}.sagemaker.aws",
"PrivateDnsNameVerificationState": "verified",
"PrivateDnsNames": [
{"PrivateDnsName": f"*.notebook.{service_region}.sagemaker.aws"}
],
"ServiceId": notebook_service_id,
"ServiceName": f"aws.sagemaker.{service_region}.notebook",
"ServiceType": [{"ServiceType": "Interface"}],
"Tags": [],
"VpcEndpointPolicySupported": True,
}
studio_service = {
"AcceptanceRequired": False,
"AvailabilityZones": zones,
"BaseEndpointDnsNames": [
f"{studio_service_id}.{service_region}.vpce.amazonaws.com",
f"studio.{service_region}.vpce.sagemaker.aws",
],
"ManagesVpcEndpoints": False,
"Owner": "amazon",
"PrivateDnsName": f"*.studio.{service_region}.sagemaker.aws",
"PrivateDnsNameVerificationState": "verified",
"PrivateDnsNames": [
{"PrivateDnsName": f"*.studio.{service_region}.sagemaker.aws"}
],
"ServiceId": studio_service_id,
"ServiceName": f"aws.sagemaker.{service_region}.studio",
"ServiceType": [{"ServiceType": "Interface"}],
"Tags": [],
"VpcEndpointPolicySupported": True,
}
return api_service + [notebook_service, studio_service]
def create_model(self, **kwargs):
model_obj = Model(
region_name=self.region_name,
model_name=kwargs.get("ModelName"),
execution_role_arn=kwargs.get("ExecutionRoleArn"),
primary_container=kwargs.get("PrimaryContainer", {}),
vpc_config=kwargs.get("VpcConfig", {}),
containers=kwargs.get("Containers", []),
tags=kwargs.get("Tags", []),
)
self._models[kwargs.get("ModelName")] = model_obj
return model_obj
def describe_model(self, model_name=None):
model = self._models.get(model_name)
if model:
return model
message = "Could not find model '{}'.".format(
Model.arn_for_model_name(model_name, self.region_name)
)
raise ValidationError(message=message)
def list_models(self):
return self._models.values()
def delete_model(self, model_name=None):
for model in self._models.values():
if model.model_name == model_name:
self._models.pop(model.model_name)
break
else:
raise MissingModel(model=model_name)
def create_experiment(self, experiment_name):
experiment = FakeExperiment(
region_name=self.region_name, experiment_name=experiment_name, tags=[]
)
self.experiments[experiment_name] = experiment
return experiment.response_create
def describe_experiment(self, experiment_name):
experiment_data = self.experiments[experiment_name]
return {
"ExperimentName": experiment_data.experiment_name,
"ExperimentArn": experiment_data.experiment_arn,
"CreationTime": experiment_data.creation_time,
"LastModifiedTime": experiment_data.last_modified_time,
}
def add_tags_to_experiment(self, experiment_arn, tags):
experiment = [
self.experiments[i]
for i in self.experiments
if self.experiments[i].experiment_arn == experiment_arn
][0]
experiment.tags.extend(tags)
def add_tags_to_trial(self, trial_arn, tags):
trial = [
self.trials[i] for i in self.trials if self.trials[i].trial_arn == trial_arn
][0]
trial.tags.extend(tags)
def add_tags_to_trial_component(self, trial_component_arn, tags):
trial_component = [
self.trial_components[i]
for i in self.trial_components
if self.trial_components[i].trial_component_arn == trial_component_arn
][0]
trial_component.tags.extend(tags)
def delete_tags_from_experiment(self, experiment_arn, tag_keys):
experiment = [
self.experiments[i]
for i in self.experiments
if self.experiments[i].experiment_arn == experiment_arn
][0]
experiment.tags = [tag for tag in experiment.tags if tag["Key"] not in tag_keys]
def delete_tags_from_trial(self, trial_arn, tag_keys):
trial = [
self.trials[i] for i in self.trials if self.trials[i].trial_arn == trial_arn
][0]
trial.tags = [tag for tag in trial.tags if tag["Key"] not in tag_keys]
def delete_tags_from_trial_component(self, trial_component_arn, tag_keys):
trial_component = [
self.trial_components[i]
for i in self.trial_components
if self.trial_components[i].trial_component_arn == trial_component_arn
][0]
trial_component.tags = [
tag for tag in trial_component.tags if tag["Key"] not in tag_keys
]
@paginate(pagination_model=PAGINATION_MODEL)
def list_experiments(self):
return list(self.experiments.values())
def search(self, resource=None, search_expression=None):
next_index = None
valid_resources = [
"Pipeline",
"ModelPackageGroup",
"TrainingJob",
"ExperimentTrialComponent",
"FeatureGroup",
"Endpoint",
"PipelineExecution",
"Project",
"ExperimentTrial",
"Image",
"ImageVersion",
"ModelPackage",
"Experiment",
]
if resource not in valid_resources:
raise AWSValidationException(
f"An error occurred (ValidationException) when calling the Search operation: 1 validation error detected: Value '{resource}' at 'resource' failed to satisfy constraint: Member must satisfy enum value set: {valid_resources}"
)
def evaluate_search_expression(item):
filters = None
if search_expression is not None:
filters = search_expression.get("Filters")
if filters is not None:
for f in filters:
if f["Operator"] == "Equals":
if f["Name"].startswith("Tags."):
key = f["Name"][5:]
value = f["Value"]
if (
len(
[
e
for e in item.tags
if e["Key"] == key and e["Value"] == value
]
)
== 0
):
return False
if f["Name"] == "ExperimentName":
experiment_name = f["Value"]
if hasattr(item, "experiment_name"):
if getattr(item, "experiment_name") != experiment_name:
return False
else:
raise ValidationError(
message="Unknown property name: ExperimentName"
)
if f["Name"] == "TrialName":
raise AWSValidationException(
f"An error occurred (ValidationException) when calling the Search operation: Unknown property name: {f['Name']}"
)
if f["Name"] == "Parents.TrialName":
trial_name = f["Value"]
if getattr(item, "trial_name") != trial_name:
return False
return True
result = {
"Results": [],
"NextToken": str(next_index) if next_index is not None else None,
}
if resource == "Experiment":
experiments_fetched = list(self.experiments.values())
experiment_summaries = [
{
"ExperimentName": experiment_data.experiment_name,
"ExperimentArn": experiment_data.experiment_arn,
"CreationTime": experiment_data.creation_time,
"LastModifiedTime": experiment_data.last_modified_time,
}
for experiment_data in experiments_fetched
if evaluate_search_expression(experiment_data)
]
for experiment_summary in experiment_summaries:
result["Results"].append({"Experiment": experiment_summary})
if resource == "ExperimentTrial":
trials_fetched = list(self.trials.values())
trial_summaries = [
{
"TrialName": trial_data.trial_name,
"TrialArn": trial_data.trial_arn,
"CreationTime": trial_data.creation_time,
"LastModifiedTime": trial_data.last_modified_time,
}
for trial_data in trials_fetched
if evaluate_search_expression(trial_data)
]
for trial_summary in trial_summaries:
result["Results"].append({"Trial": trial_summary})
if resource == "ExperimentTrialComponent":
trial_components_fetched = list(self.trial_components.values())
trial_component_summaries = [
{
"TrialComponentName": trial_component_data.trial_component_name,
"TrialComponentArn": trial_component_data.trial_component_arn,
"CreationTime": trial_component_data.creation_time,
"LastModifiedTime": trial_component_data.last_modified_time,
}
for trial_component_data in trial_components_fetched
if evaluate_search_expression(trial_component_data)
]
for trial_component_summary in trial_component_summaries:
result["Results"].append({"TrialComponent": trial_component_summary})
return result
def delete_experiment(self, experiment_name):
try:
del self.experiments[experiment_name]
except KeyError:
message = "Could not find experiment configuration '{}'.".format(
FakeTrial.arn_formatter(experiment_name, self.region_name)
)
raise ValidationError(message=message)
def get_experiment_by_arn(self, arn):
experiments = [
experiment
for experiment in self.experiments.values()
if experiment.experiment_arn == arn
]
if len(experiments) == 0:
message = "RecordNotFound"
raise ValidationError(message=message)
return experiments[0]
def get_experiment_tags(self, arn):
try:
experiment = self.get_experiment_by_arn(arn)
return experiment.tags or []
except RESTError:
return []
def create_trial(
self, trial_name, experiment_name,
):
trial = FakeTrial(
region_name=self.region_name,
trial_name=trial_name,
experiment_name=experiment_name,
tags=[],
trial_components=[],
)
self.trials[trial_name] = trial
return trial.response_create
def describe_trial(self, trial_name):
try:
return self.trials[trial_name].response_object
except KeyError:
message = "Could not find trial '{}'.".format(
FakeTrial.arn_formatter(trial_name, self.region_name)
)
raise ValidationError(message=message)
def delete_trial(self, trial_name):
try:
del self.trials[trial_name]
except KeyError:
message = "Could not find trial configuration '{}'.".format(
FakeTrial.arn_formatter(trial_name, self.region_name)
)
raise ValidationError(message=message)
def get_trial_by_arn(self, arn):
trials = [trial for trial in self.trials.values() if trial.trial_arn == arn]
if len(trials) == 0:
message = "RecordNotFound"
raise ValidationError(message=message)
return trials[0]
def get_trial_tags(self, arn):
try:
trial = self.get_trial_by_arn(arn)
return trial.tags or []
except RESTError:
return []
@paginate(pagination_model=PAGINATION_MODEL)
def list_trials(self, experiment_name=None, trial_component_name=None):
trials_fetched = list(self.trials.values())
def evaluate_filter_expression(trial_data):
if experiment_name is not None:
if trial_data.experiment_name != experiment_name:
return False
if trial_component_name is not None:
if trial_component_name not in trial_data.trial_components:
return False
return True
return [
trial_data
for trial_data in trials_fetched
if evaluate_filter_expression(trial_data)
]
def create_trial_component(
self, trial_component_name, trial_name,
):
trial_component = FakeTrialComponent(
region_name=self.region_name,
trial_component_name=trial_component_name,
trial_name=trial_name,
tags=[],
)
self.trial_components[trial_component_name] = trial_component
return trial_component.response_create
def delete_trial_component(self, trial_component_name):
try:
del self.trial_components[trial_component_name]
except KeyError:
message = "Could not find trial-component configuration '{}'.".format(
FakeTrial.arn_formatter(trial_component_name, self.region_name)
)
raise ValidationError(message=message)
def get_trial_component_by_arn(self, arn):
trial_components = [
trial_component
for trial_component in self.trial_components.values()
if trial_component.trial_component_arn == arn
]
if len(trial_components) == 0:
message = "RecordNotFound"
raise ValidationError(message=message)
return trial_components[0]
def get_trial_component_tags(self, arn):
try:
trial_component = self.get_trial_component_by_arn(arn)
return trial_component.tags or []
except RESTError:
return []
def describe_trial_component(self, trial_component_name):
try:
return self.trial_components[trial_component_name].response_object
except KeyError:
message = "Could not find trial component '{}'.".format(
FakeTrialComponent.arn_formatter(trial_component_name, self.region_name)
)
raise ValidationError(message=message)
def _update_trial_component_details(self, trial_component_name, details_json):
self.trial_components[trial_component_name].update(details_json)
@paginate(pagination_model=PAGINATION_MODEL)
def list_trial_components(self, trial_name=None):
trial_components_fetched = list(self.trial_components.values())
return [
trial_component_data
for trial_component_data in trial_components_fetched
if trial_name is None or trial_component_data.trial_name == trial_name
]
def associate_trial_component(self, params):
trial_name = params["TrialName"]
trial_component_name = params["TrialComponentName"]
if trial_name in self.trials.keys():
self.trials[trial_name].trial_components.extend([trial_component_name])
else:
raise ResourceNotFound(
message=f"Trial 'arn:aws:sagemaker:{self.region_name}:{ACCOUNT_ID}:experiment-trial/{trial_name}' does not exist."
)
if trial_component_name in self.trial_components.keys():
self.trial_components[trial_component_name].trial_name = trial_name
return {
"TrialComponentArn": self.trial_components[
trial_component_name
].trial_component_arn,
"TrialArn": self.trials[trial_name].trial_arn,
}
def disassociate_trial_component(self, params):
trial_component_name = params["TrialComponentName"]
trial_name = params["TrialName"]
if trial_component_name in self.trial_components.keys():
self.trial_components[trial_component_name].trial_name = None
if trial_name in self.trials.keys():
self.trials[trial_name].trial_components = list(
filter(
lambda x: x != trial_component_name,
self.trials[trial_name].trial_components,
)
)
return {
"TrialComponentArn": f"arn:aws:sagemaker:{self.region_name}:{ACCOUNT_ID}:experiment-trial-component/{trial_component_name}",
"TrialArn": f"arn:aws:sagemaker:{self.region_name}:{ACCOUNT_ID}:experiment-trial/{trial_name}",
}
def create_notebook_instance(
self,
notebook_instance_name,
instance_type,
role_arn,
subnet_id=None,
security_group_ids=None,
kms_key_id=None,
tags=None,
lifecycle_config_name=None,
direct_internet_access="Enabled",
volume_size_in_gb=5,
accelerator_types=None,
default_code_repository=None,
additional_code_repositories=None,
root_access=None,
):
self._validate_unique_notebook_instance_name(notebook_instance_name)
notebook_instance = FakeSagemakerNotebookInstance(
region_name=self.region_name,
notebook_instance_name=notebook_instance_name,
instance_type=instance_type,
role_arn=role_arn,
subnet_id=subnet_id,
security_group_ids=security_group_ids,
kms_key_id=kms_key_id,
tags=tags,
lifecycle_config_name=lifecycle_config_name,
direct_internet_access=direct_internet_access
if direct_internet_access is not None
else "Enabled",
volume_size_in_gb=volume_size_in_gb if volume_size_in_gb is not None else 5,
accelerator_types=accelerator_types,
default_code_repository=default_code_repository,
additional_code_repositories=additional_code_repositories,
root_access=root_access,
)
self.notebook_instances[notebook_instance_name] = notebook_instance
return notebook_instance
def _validate_unique_notebook_instance_name(self, notebook_instance_name):
if notebook_instance_name in self.notebook_instances:
duplicate_arn = self.notebook_instances[notebook_instance_name].arn
message = "Cannot create a duplicate Notebook Instance ({})".format(
duplicate_arn
)
raise ValidationError(message=message)
def get_notebook_instance(self, notebook_instance_name):
try:
return self.notebook_instances[notebook_instance_name]
except KeyError:
raise ValidationError(message="RecordNotFound")
def get_notebook_instance_by_arn(self, arn):
instances = [
notebook_instance
for notebook_instance in self.notebook_instances.values()
if notebook_instance.arn == arn
]
if len(instances) == 0:
raise ValidationError(message="RecordNotFound")
return instances[0]
def start_notebook_instance(self, notebook_instance_name):
notebook_instance = self.get_notebook_instance(notebook_instance_name)
notebook_instance.start()
def stop_notebook_instance(self, notebook_instance_name):
notebook_instance = self.get_notebook_instance(notebook_instance_name)
notebook_instance.stop()
def delete_notebook_instance(self, notebook_instance_name):
notebook_instance = self.get_notebook_instance(notebook_instance_name)
if not notebook_instance.is_deletable:
message = "Status ({}) not in ([Stopped, Failed]). Unable to transition to (Deleting) for Notebook Instance ({})".format(
notebook_instance.status, notebook_instance.arn
)
raise ValidationError(message=message)
del self.notebook_instances[notebook_instance_name]
def get_notebook_instance_tags(self, arn):
try:
notebook_instance = self.get_notebook_instance_by_arn(arn)
return notebook_instance.tags or []
except RESTError:
return []
def create_notebook_instance_lifecycle_config(
self, notebook_instance_lifecycle_config_name, on_create, on_start
):
if (
notebook_instance_lifecycle_config_name
in self.notebook_instance_lifecycle_configurations
):
message = "Unable to create Notebook Instance Lifecycle Config {}. (Details: Notebook Instance Lifecycle Config already exists.)".format(
FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter(
notebook_instance_lifecycle_config_name, self.region_name
)
)
raise ValidationError(message=message)
lifecycle_config = FakeSageMakerNotebookInstanceLifecycleConfig(
region_name=self.region_name,
notebook_instance_lifecycle_config_name=notebook_instance_lifecycle_config_name,
on_create=on_create,
on_start=on_start,
)
self.notebook_instance_lifecycle_configurations[
notebook_instance_lifecycle_config_name
] = lifecycle_config
return lifecycle_config
def describe_notebook_instance_lifecycle_config(
self, notebook_instance_lifecycle_config_name
):
try:
return self.notebook_instance_lifecycle_configurations[
notebook_instance_lifecycle_config_name
].response_object
except KeyError:
message = "Unable to describe Notebook Instance Lifecycle Config '{}'. (Details: Notebook Instance Lifecycle Config does not exist.)".format(
FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter(
notebook_instance_lifecycle_config_name, self.region_name
)
)
raise ValidationError(message=message)
def delete_notebook_instance_lifecycle_config(
self, notebook_instance_lifecycle_config_name
):
try:
del self.notebook_instance_lifecycle_configurations[
notebook_instance_lifecycle_config_name
]
except KeyError:
message = "Unable to delete Notebook Instance Lifecycle Config '{}'. (Details: Notebook Instance Lifecycle Config does not exist.)".format(
FakeSageMakerNotebookInstanceLifecycleConfig.arn_formatter(
notebook_instance_lifecycle_config_name, self.region_name
)
)
raise ValidationError(message=message)
def create_endpoint_config(
self,
endpoint_config_name,
production_variants,
data_capture_config,
tags,
kms_key_id,
):
endpoint_config = FakeEndpointConfig(
region_name=self.region_name,
endpoint_config_name=endpoint_config_name,
production_variants=production_variants,
data_capture_config=data_capture_config,
tags=tags,
kms_key_id=kms_key_id,
)
self.validate_production_variants(production_variants)
self.endpoint_configs[endpoint_config_name] = endpoint_config
return endpoint_config
def validate_production_variants(self, production_variants):
for production_variant in production_variants:
if production_variant["ModelName"] not in self._models:
message = "Could not find model '{}'.".format(
Model.arn_for_model_name(
production_variant["ModelName"], self.region_name
)
)
raise ValidationError(message=message)
def describe_endpoint_config(self, endpoint_config_name):
try:
return self.endpoint_configs[endpoint_config_name].response_object
except KeyError:
message = "Could not find endpoint configuration '{}'.".format(
FakeEndpointConfig.arn_formatter(endpoint_config_name, self.region_name)
)
raise ValidationError(message=message)
def delete_endpoint_config(self, endpoint_config_name):
try:
del self.endpoint_configs[endpoint_config_name]
except KeyError:
message = "Could not find endpoint configuration '{}'.".format(
FakeEndpointConfig.arn_formatter(endpoint_config_name, self.region_name)
)
raise ValidationError(message=message)
def create_endpoint(
self, endpoint_name, endpoint_config_name, tags,
):
try:
endpoint_config = self.describe_endpoint_config(endpoint_config_name)
except KeyError:
message = "Could not find endpoint_config '{}'.".format(
FakeEndpointConfig.arn_formatter(endpoint_config_name, self.region_name)
)
raise ValidationError(message=message)
endpoint = FakeEndpoint(
region_name=self.region_name,
endpoint_name=endpoint_name,
endpoint_config_name=endpoint_config_name,
production_variants=endpoint_config["ProductionVariants"],
data_capture_config=endpoint_config["DataCaptureConfig"],
tags=tags,
)
self.endpoints[endpoint_name] = endpoint
return endpoint
def describe_endpoint(self, endpoint_name):
try:
return self.endpoints[endpoint_name].response_object
except KeyError:
message = "Could not find endpoint configuration '{}'.".format(
FakeEndpoint.arn_formatter(endpoint_name, self.region_name)
)
raise ValidationError(message=message)
def delete_endpoint(self, endpoint_name):
try:
del self.endpoints[endpoint_name]
except KeyError:
message = "Could not find endpoint configuration '{}'.".format(
FakeEndpoint.arn_formatter(endpoint_name, self.region_name)
)
raise ValidationError(message=message)
def get_endpoint_by_arn(self, arn):
endpoints = [
endpoint
for endpoint in self.endpoints.values()
if endpoint.endpoint_arn == arn
]
if len(endpoints) == 0:
message = "RecordNotFound"
raise ValidationError(message=message)
return endpoints[0]
def get_endpoint_tags(self, arn):
try:
endpoint = self.get_endpoint_by_arn(arn)
return endpoint.tags or []
except RESTError:
return []
def create_processing_job(
self,
app_specification,
experiment_config,
network_config,
processing_inputs,
processing_job_name,
processing_output_config,
processing_resources,
role_arn,
stopping_condition,
):
processing_job = FakeProcessingJob(
app_specification=app_specification,
experiment_config=experiment_config,
network_config=network_config,
processing_inputs=processing_inputs,
processing_job_name=processing_job_name,
processing_output_config=processing_output_config,
processing_resources=processing_resources,
region_name=self.region_name,
role_arn=role_arn,
stopping_condition=stopping_condition,
)
self.processing_jobs[processing_job_name] = processing_job
return processing_job
def describe_processing_job(self, processing_job_name):
try:
return self.processing_jobs[processing_job_name].response_object
except KeyError:
message = "Could not find processing job '{}'.".format(
FakeProcessingJob.arn_formatter(processing_job_name, self.region_name)
)
raise ValidationError(message=message)
def list_processing_jobs(
self,
next_token,
max_results,
creation_time_after,
creation_time_before,
last_modified_time_after,
last_modified_time_before,
name_contains,
status_equals,
sort_by,
sort_order,
):
if next_token:
try:
starting_index = int(next_token)
if starting_index > len(self.processing_jobs):
raise ValueError # invalid next_token
except ValueError:
raise AWSValidationException('Invalid pagination token because "{0}".')
else:
starting_index = 0
if max_results:
end_index = max_results + starting_index
processing_jobs_fetched = list(self.processing_jobs.values())[
starting_index:end_index
]
if end_index >= len(self.processing_jobs):
next_index = None
else:
next_index = end_index
else:
processing_jobs_fetched = list(self.processing_jobs.values())
next_index = None
if name_contains is not None:
processing_jobs_fetched = filter(
lambda x: name_contains in x.processing_job_name,
processing_jobs_fetched,
)
if creation_time_after is not None:
processing_jobs_fetched = filter(
lambda x: x.creation_time > creation_time_after, processing_jobs_fetched
)
if creation_time_before is not None:
processing_jobs_fetched = filter(
lambda x: x.creation_time < creation_time_before,
processing_jobs_fetched,
)
if last_modified_time_after is not None:
processing_jobs_fetched = filter(
lambda x: x.last_modified_time > last_modified_time_after,
processing_jobs_fetched,
)
if last_modified_time_before is not None:
processing_jobs_fetched = filter(
lambda x: x.last_modified_time < last_modified_time_before,
processing_jobs_fetched,
)
if status_equals is not None:
processing_jobs_fetched = filter(
lambda x: x.training_job_status == status_equals,
processing_jobs_fetched,
)
processing_job_summaries = [
{
"ProcessingJobName": processing_job_data.processing_job_name,
"ProcessingJobArn": processing_job_data.processing_job_arn,
"CreationTime": processing_job_data.creation_time,
"ProcessingEndTime": processing_job_data.processing_end_time,
"LastModifiedTime": processing_job_data.last_modified_time,
"ProcessingJobStatus": processing_job_data.processing_job_status,
}
for processing_job_data in processing_jobs_fetched
]
return {
"ProcessingJobSummaries": processing_job_summaries,
"NextToken": str(next_index) if next_index is not None else None,
}
def create_training_job(
self,
training_job_name,
hyper_parameters,
algorithm_specification,
role_arn,
input_data_config,
output_data_config,
resource_config,
vpc_config,
stopping_condition,
tags,
enable_network_isolation,
enable_inter_container_traffic_encryption,
enable_managed_spot_training,
checkpoint_config,
debug_hook_config,
debug_rule_configurations,
tensor_board_output_config,
experiment_config,
):
training_job = FakeTrainingJob(
region_name=self.region_name,
training_job_name=training_job_name,
hyper_parameters=hyper_parameters,
algorithm_specification=algorithm_specification,
role_arn=role_arn,
input_data_config=input_data_config,
output_data_config=output_data_config,
resource_config=resource_config,
vpc_config=vpc_config,
stopping_condition=stopping_condition,
tags=tags,
enable_network_isolation=enable_network_isolation,
enable_inter_container_traffic_encryption=enable_inter_container_traffic_encryption,
enable_managed_spot_training=enable_managed_spot_training,
checkpoint_config=checkpoint_config,
debug_hook_config=debug_hook_config,
debug_rule_configurations=debug_rule_configurations,
tensor_board_output_config=tensor_board_output_config,
experiment_config=experiment_config,
)
self.training_jobs[training_job_name] = training_job
return training_job
def describe_training_job(self, training_job_name):
try:
return self.training_jobs[training_job_name].response_object
except KeyError:
message = "Could not find training job '{}'.".format(
FakeTrainingJob.arn_formatter(training_job_name, self.region_name)
)
raise ValidationError(message=message)
def delete_training_job(self, training_job_name):
try:
del self.training_jobs[training_job_name]
except KeyError:
message = "Could not find endpoint configuration '{}'.".format(
FakeTrainingJob.arn_formatter(training_job_name, self.region_name)
)
raise ValidationError(message=message)
def get_training_job_by_arn(self, arn):
training_jobs = [
training_job
for training_job in self.training_jobs.values()
if training_job.training_job_arn == arn
]
if len(training_jobs) == 0:
raise ValidationError(message="RecordNotFound")
return training_jobs[0]
def get_training_job_tags(self, arn):
try:
training_job = self.get_training_job_by_arn(arn)
return training_job.tags or []
except RESTError:
return []
def _update_training_job_details(self, training_job_name, details_json):
self.training_jobs[training_job_name].update(details_json)
def list_training_jobs(
self,
next_token,
max_results,
creation_time_after,
creation_time_before,
last_modified_time_after,
last_modified_time_before,
name_contains,
status_equals,
sort_by,
sort_order,
):
if next_token:
try:
starting_index = int(next_token)
if starting_index > len(self.training_jobs):
raise ValueError # invalid next_token
except ValueError:
raise AWSValidationException('Invalid pagination token because "{0}".')
else:
starting_index = 0
if max_results:
end_index = max_results + starting_index
training_jobs_fetched = list(self.training_jobs.values())[
starting_index:end_index
]
if end_index >= len(self.training_jobs):
next_index = None
else:
next_index = end_index
else:
training_jobs_fetched = list(self.training_jobs.values())
next_index = None
if name_contains is not None:
training_jobs_fetched = filter(
lambda x: name_contains in x.training_job_name, training_jobs_fetched
)
if creation_time_after is not None:
training_jobs_fetched = filter(
lambda x: x.creation_time > creation_time_after, training_jobs_fetched
)
if creation_time_before is not None:
training_jobs_fetched = filter(
lambda x: x.creation_time < creation_time_before, training_jobs_fetched
)
if last_modified_time_after is not None:
training_jobs_fetched = filter(
lambda x: x.last_modified_time > last_modified_time_after,
training_jobs_fetched,
)
if last_modified_time_before is not None:
training_jobs_fetched = filter(
lambda x: x.last_modified_time < last_modified_time_before,
training_jobs_fetched,
)
if status_equals is not None:
training_jobs_fetched = filter(
lambda x: x.training_job_status == status_equals, training_jobs_fetched
)
training_job_summaries = [
{
"TrainingJobName": training_job_data.training_job_name,
"TrainingJobArn": training_job_data.training_job_arn,
"CreationTime": training_job_data.creation_time,
"TrainingEndTime": training_job_data.training_end_time,
"LastModifiedTime": training_job_data.last_modified_time,
"TrainingJobStatus": training_job_data.training_job_status,
}
for training_job_data in training_jobs_fetched
]
return {
"TrainingJobSummaries": training_job_summaries,
"NextToken": str(next_index) if next_index is not None else None,
}
class FakeExperiment(BaseObject):
def __init__(
self, region_name, experiment_name, tags,
):
self.experiment_name = experiment_name
self.experiment_arn = FakeExperiment.arn_formatter(experiment_name, region_name)
self.tags = tags
self.creation_time = self.last_modified_time = datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
)
@property
def response_object(self):
response_object = self.gen_response_object()
return {
k: v for k, v in response_object.items() if v is not None and v != [None]
}
@property
def response_create(self):
return {"ExperimentArn": self.experiment_arn}
@staticmethod
def arn_formatter(experiment_arn, region_name):
return (
"arn:aws:sagemaker:"
+ region_name
+ ":"
+ str(ACCOUNT_ID)
+ ":experiment/"
+ experiment_arn
)
class FakeTrial(BaseObject):
def __init__(
self, region_name, trial_name, experiment_name, tags, trial_components,
):
self.trial_name = trial_name
self.trial_arn = FakeTrial.arn_formatter(trial_name, region_name)
self.tags = tags
self.trial_components = trial_components
self.experiment_name = experiment_name
self.creation_time = self.last_modified_time = datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
)
@property
def response_object(self):
response_object = self.gen_response_object()
return {
k: v for k, v in response_object.items() if v is not None and v != [None]
}
@property
def response_create(self):
return {"TrialArn": self.trial_arn}
@staticmethod
def arn_formatter(trial_name, region_name):
return (
"arn:aws:sagemaker:"
+ region_name
+ ":"
+ str(ACCOUNT_ID)
+ ":experiment-trial/"
+ trial_name
)
class FakeTrialComponent(BaseObject):
def __init__(
self, region_name, trial_component_name, trial_name, tags,
):
self.trial_component_name = trial_component_name
self.trial_component_arn = FakeTrialComponent.arn_formatter(
trial_component_name, region_name
)
self.tags = tags
self.trial_name = trial_name
now_string = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.creation_time = self.last_modified_time = now_string
@property
def response_object(self):
response_object = self.gen_response_object()
return {
k: v for k, v in response_object.items() if v is not None and v != [None]
}
@property
def response_create(self):
return {"TrialComponentArn": self.trial_component_arn}
@staticmethod
def arn_formatter(trial_component_name, region_name):
return (
"arn:aws:sagemaker:"
+ region_name
+ ":"
+ str(ACCOUNT_ID)
+ ":experiment-trial-component/"
+ trial_component_name
)
sagemaker_backends = BackendDict(SageMakerModelBackend, "sagemaker")
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import copy
from datetime import datetime
from splash.har.log import HarLog
from splash.har.utils import format_datetime, get_duration, cleaned_har_entry
from splash.har.qt import request2har, reply2har
class HarBuilder(object):
"""
Splash-specific HAR builder class.
It knows how to update timings based on events available in QT.
Also it maintains a history of browser URL changes.
"""
REQUEST_CREATED = "created"
REQUEST_FINISHED = "finished"
REQUEST_HEADERS_RECEIVED = "headers_received"
def __init__(self):
self.log = HarLog()
self.history = []
def todict(self):
""" Return HAR log as a Python dict. """
return self.log.todict()
def get_history(self):
""" Get a history of browser URL changes """
return copy.deepcopy(self.history)
def reset(self):
""" Start building a new HAR log """
self.log = HarLog()
def get_last_http_status(self):
"""
Return HTTP status code of the currently loaded webpage
or None if it is not available.
"""
if not self.history:
return
try:
return self.history[-1]["response"]["status"]
except KeyError:
return
def get_entry(self, req_id):
""" Return HAR entry for a given req_id """
if not self.log.has_entry(req_id):
return
entry = self.log.get_mutable_entry(req_id)
return entry
def _initial_entry_data(self, start_time, operation, request, outgoingData):
"""
Return initial values for a new HAR entry.
"""
return {
# custom fields
'_tmp': {
'start_time': start_time,
'request_start_sending_time': start_time,
'request_sent_time': start_time,
'response_start_time': start_time,
# 'outgoingData': outgoingData,
},
'_splash_processing_state': self.REQUEST_CREATED,
# standard fields
"startedDateTime": format_datetime(start_time),
"request": request2har(request, operation, outgoingData),
"response": {
"bodySize": -1,
},
"cache": {},
"timings": {
"blocked": -1,
"dns": -1,
"connect": -1,
"ssl": -1,
"send": 0,
"wait": 0,
"receive": 0,
},
"time": 0,
}
def store_title(self, title):
self.log.store_title(title)
def store_url(self, url):
if hasattr(url, 'toString'):
url = url.toString()
self.log.store_url(url)
def store_timing(self, name):
self.log.store_timing(name)
def store_new_request(self, req_id, start_time, operation, request, outgoingData):
"""
Store information about a new QNetworkRequest.
"""
entry = self.log.get_mutable_entry(req_id, create=True)
entry.update(self._initial_entry_data(
start_time=start_time,
operation=operation,
request=request,
outgoingData=outgoingData
))
def store_new_reply(self, req_id, reply):
"""
Store initial reply information.
"""
if not self.log.has_entry(req_id):
return
entry = self.log.get_mutable_entry(req_id)
entry["response"].update(reply2har(reply))
def store_reply_finished(self, req_id, reply, content):
"""
Store information about a finished reply.
"""
if not self.log.has_entry(req_id):
return
entry = self.log.get_mutable_entry(req_id)
entry["_splash_processing_state"] = self.REQUEST_FINISHED
# update timings
now = datetime.utcnow()
start_time = entry['_tmp']['start_time']
response_start_time = entry['_tmp']['response_start_time']
receive_time = get_duration(response_start_time, now)
total_time = get_duration(start_time, now)
entry["timings"]["receive"] = receive_time
entry["time"] = total_time
if not entry["timings"]["send"]:
wait_time = entry["timings"]["wait"]
entry["timings"]["send"] = total_time - receive_time - wait_time
if entry["timings"]["send"] < 1e-6:
entry["timings"]["send"] = 0
# update other reply information
entry["response"].update(reply2har(reply, content=content))
def store_reply_headers_received(self, req_id, reply):
"""
Update reply information when HTTP headers are received.
"""
if not self.log.has_entry(req_id):
return
entry = self.log.get_mutable_entry(req_id)
if entry["_splash_processing_state"] == self.REQUEST_FINISHED:
# self.log("Headers received for {url}; ignoring", reply,
# min_level=3)
return
entry["_splash_processing_state"] = self.REQUEST_HEADERS_RECEIVED
entry["response"].update(reply2har(reply))
now = datetime.utcnow()
request_sent = entry["_tmp"]["request_sent_time"]
entry["_tmp"]["response_start_time"] = now
entry["timings"]["wait"] = get_duration(request_sent, now)
def store_reply_download_progress(self, req_id, received, total):
"""
Update reply information when new data is available
"""
if not self.log.has_entry(req_id):
return
entry = self.log.get_mutable_entry(req_id)
entry["response"]["bodySize"] = int(received)
def store_request_upload_progress(self, req_id, sent, total):
"""
Update request information when outgoing data is sent.
"""
if not self.log.has_entry(req_id):
return
entry = self.log.get_mutable_entry(req_id)
entry["request"]["bodySize"] = int(sent)
now = datetime.utcnow()
if sent == 0:
# it is a moment the sending is started
start_time = entry["_tmp"]["request_start_time"]
entry["_tmp"]["request_start_sending_time"] = now
entry["timings"]["blocked"] = get_duration(start_time, now)
entry["_tmp"]["request_sent_time"] = now
if sent == total:
entry["_tmp"]["response_start_time"] = now
start_sending_time = entry["_tmp"]["request_start_sending_time"]
entry["timings"]["send"] = get_duration(start_sending_time, now)
def store_redirect(self, url):
""" Update history when redirect happens """
cause_ev = self.log._prev_entry(url, last_idx=-1)
if cause_ev:
self.history.append(cleaned_har_entry(cause_ev.data))
|
|
"""Widget showing an image."""
from typing import Optional, Union
from kivy.properties import ObjectProperty, NumericProperty, AliasProperty
from kivy.graphics import Rectangle, Color, Rotate, Scale
from mpfmc.uix.widget import Widget
MYPY = False
if MYPY: # pragma: no cover
from mpfmc.core.mc import MpfMc # pylint: disable-msg=cyclic-import,unused-import
from mpfmc.assets.image import ImageAsset # pylint: disable-msg=cyclic-import,unused-import
class ImageWidget(Widget):
"""Widget showing an image."""
widget_type_name = 'Image'
merge_settings = ('height', 'width')
animation_properties = ('x', 'y', 'color', 'rotation', 'scale', 'fps', 'current_frame', 'end_frame', 'opacity')
def __init__(self, mc: "MpfMc", config: dict, key: Optional[str] = None, **kwargs) -> None:
super().__init__(mc=mc, config=config, key=key)
self.size = (0, 0)
self._image = None # type: ImageAsset
self._current_loop = 0
self._end_index = -1
# Retrieve the specified image asset to display. This widget simply
# draws a rectangle using the texture from the loaded image asset to
# display the image. Scaling and rotation is handled by the Scatter
# widget.
image = None
try:
image = self.mc.images[self.config['image']]
except KeyError:
try:
image = self.mc.images[kwargs['play_kwargs']['image']]
except KeyError:
pass
if not image:
if not self.mc.asset_manager.initial_assets_loaded:
raise ValueError("Tried to use an image '{}' when the initial asset loading run has not yet been "
"completed. Try to use 'init_done' as event to show your slides if you want to "
"use images.".format(self.config['image']))
raise ValueError("Cannot add Image widget. Image '{}' is not a "
"valid image name.".format(self.config['image']))
# Updates the config for this widget to pull in any defaults that were
# in the asset config
self.merge_asset_config(image)
if image.is_pool:
self._image = image.get_next()
else:
self._image = image
self._image.references += 1
# If the associated image asset exists, that means it's loaded already.
if self._image.image:
self._image_loaded()
else:
# if the image asset isn't loaded, we set the size to 0,0 so it
# doesn't show up on the display yet.
# TODO Add log about loading on demand
self.size = (0, 0)
self._image.load(callback=self._image_loaded)
# Bind to all properties that when changed need to force
# the widget to be redrawn
self.bind(pos=self._draw_widget,
color=self._draw_widget,
rotation=self._draw_widget,
scale=self._draw_widget)
def __repr__(self) -> str: # pragma: no cover
try:
return '<Image name={}, size={}, pos={}>'.format(self._image.name,
self.size,
self.pos)
except AttributeError:
return '<Image (loading...), size={}, pos={}>'.format(self.size,
self.pos)
def _image_loaded(self, *args) -> None:
"""Callback when image asset has been loaded and is ready to display."""
del args
# Setup callback on image 'on_texture' event (called whenever the image
# texture changes; used mainly for animated images)
self._image.image.bind(on_texture=self._on_texture_change)
self._on_texture_change()
self._draw_widget()
# Setup animation properties (if applicable)
if self._image.image.anim_available:
self.fps = self.config['fps']
self.loops = self.config['loops']
self.start_frame = self._image.image.anim_index if self._image.frame_persist else self.config['start_frame']
# If not auto playing, set the end index to be the start frame
if not self.config['auto_play']:
# Frame numbers start at 1 and indexes at 0, so subtract 1
self._end_index = self.start_frame - 1
self.play(start_frame=self.start_frame, auto_play=self.config['auto_play'])
# If this image should persist its animation frame on future loads, set that now
if self._image.config.get('persist_frame'):
self._image.frame_persist = True
def _on_texture_change(self, *args) -> None:
"""Update texture from image asset (callback when image texture changes)."""
del args
self.texture = self._image.image.texture
self.size = self.texture.size
self._draw_widget()
ci = self._image.image
# Check if this is the end frame to stop the image. For some reason, after the image
# stops the anim_index will increment one last time, so check for end_index - 1 to prevent
# a full animation loop on subsequent calls to the same end frame.
if self._end_index > -1:
if ci.anim_index == self._end_index - 1:
self._end_index = -1
ci.anim_reset(False)
return
skip_to = self._image.frame_skips and self._image.frame_skips.get(ci.anim_index)
# Skip if the end_index is after the skip_to or before the current position (i.e. we need to loop),
# but not if the skip will cause a loop around and bypass the end_index ahead
if skip_to is not None and (self._end_index > skip_to or self._end_index < ci.anim_index) and not \
(self._end_index > ci.anim_index and skip_to < ci.anim_index):
self.current_frame = skip_to
# Handle animation looping (when applicable)
if ci.anim_available and self.loops > -1 and ci.anim_index == len(ci.image.textures) - 1:
self._current_loop += 1
if self._current_loop > self.loops:
ci.anim_reset(False)
self._current_loop = 0
def prepare_for_removal(self) -> None:
"""Prepare the widget to be removed."""
super().prepare_for_removal()
# stop any animations
if self._image:
self._image.references -= 1
if self._image.references == 0:
try:
self._image.image.anim_reset(False)
# If the image was already unloaded from memory
except AttributeError:
pass
def _draw_widget(self, *args):
"""Draws the image (draws a rectangle using the image texture)"""
del args
anchor = (self.x - self.anchor_offset_pos[0], self.y - self.anchor_offset_pos[1])
self.canvas.clear()
with self.canvas:
Color(*self.color)
Rotate(angle=self.rotation, origin=anchor)
Scale(self.scale).origin = anchor
Rectangle(pos=self.pos, size=self.size, texture=self.texture)
def play(self, start_frame: Optional[int] = 0, auto_play: Optional[bool] = True):
"""Play the image animation (if images supports it)."""
if start_frame:
self.current_frame = start_frame
# pylint: disable-msg=protected-access
self._image.image._anim_index = start_frame - 1
self._image.image.anim_reset(auto_play)
def stop(self) -> None:
"""Stop the image animation."""
self._image.image.anim_reset(False)
#
# Properties
#
def _get_image(self) -> Optional["ImageAsset"]:
return self._image
image = AliasProperty(_get_image)
texture = ObjectProperty(None, allownone=True)
'''Texture object of the image. The texture represents the original, loaded
image texture.
Depending of the texture creation, the value will be a
:class:`~kivy.graphics.texture.Texture` or a
:class:`~kivy.graphics.texture.TextureRegion` object.
:attr:`texture` is an :class:`~kivy.properties.ObjectProperty` and defaults
to None.
'''
loops = NumericProperty(-1)
'''Number of loops to play then stop animating. -1 means keep animating.
'''
def _get_fps(self) -> Optional[float]:
if self._image.image.anim_available:
return int(1 / self._image.image.anim_delay)
else:
return None
def _set_fps(self, value: float):
if value > 0:
self._image.image.anim_delay = 1 / float(value)
else:
self._image.image.anim_delay = -1
fps = AliasProperty(_get_fps, _set_fps)
'''The frames per second rate for the animation if the image is sequenced
(like an animated gif). If fps is set to 0, the animation will be stopped.
'''
def _get_current_frame(self) -> int:
return self._image.image.anim_index + 1
def _set_current_frame(self, value: Union[int, float]):
if not self._image.image.anim_available or not hasattr(self._image.image.image, 'textures'):
return
frame = (int(value) - 1) % len(self._image.image.image.textures)
if frame == self._image.image.anim_index:
return
else:
self._image.image._anim_index = frame # pylint: disable-msg=protected-access
self._image.image.anim_reset(True)
current_frame = AliasProperty(_get_current_frame, _set_current_frame)
'''The current frame of the animation.
'''
def _get_end_frame(self) -> int:
return self._end_index + 1
def _set_end_frame(self, value: int):
if not self._image.image.anim_available or not hasattr(self._image.image.image, 'textures'):
return
frame = (int(value) - 1) % len(self._image.image.image.textures)
if frame == self._image.image.anim_index:
return
self._end_index = frame
self._image.image.anim_reset(True)
end_frame = AliasProperty(_get_end_frame, _set_end_frame)
'''The target frame at which the animation will stop.
'''
rotation = NumericProperty(0)
'''Rotation angle value of the widget.
:attr:`rotation` is an :class:`~kivy.properties.NumericProperty` and defaults to
0.
'''
scale = NumericProperty(1.0)
'''Scale value of the widget.
:attr:`scale` is an :class:`~kivy.properties.NumericProperty` and defaults to
1.0.
'''
widget_classes = [ImageWidget]
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# suppressions.py
"""Post-process Valgrind suppression matcher.
Suppressions are defined as follows:
# optional one-line comments anywhere in the suppressions file.
{
<Short description of the error>
Toolname:Errortype
fun:function_name
obj:object_filename
fun:wildcarded_fun*_name
# an ellipsis wildcards zero or more functions in a stack.
...
fun:some_other_function_name
}
If ran from the command line, suppressions.py does a self-test
of the Suppression class.
"""
import os
import re
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__),
'..', 'python', 'google'))
import path_utils
ELLIPSIS = '...'
def GetSuppressions():
suppressions_root = path_utils.ScriptDir()
JOIN = os.path.join
result = {}
supp_filename = JOIN(suppressions_root, "memcheck", "suppressions.txt")
vg_common = ReadSuppressionsFromFile(supp_filename)
supp_filename = JOIN(suppressions_root, "tsan", "suppressions.txt")
tsan_common = ReadSuppressionsFromFile(supp_filename)
result['common_suppressions'] = vg_common + tsan_common
supp_filename = JOIN(suppressions_root, "memcheck", "suppressions_mac.txt")
vg_mac = ReadSuppressionsFromFile(supp_filename)
supp_filename = JOIN(suppressions_root, "tsan", "suppressions_mac.txt")
tsan_mac = ReadSuppressionsFromFile(supp_filename)
result['mac_suppressions'] = vg_mac + tsan_mac
supp_filename = JOIN(suppressions_root, "tsan", "suppressions_win32.txt")
tsan_win = ReadSuppressionsFromFile(supp_filename)
result['win_suppressions'] = tsan_win
supp_filename = JOIN(suppressions_root, "..", "heapcheck", "suppressions.txt")
result['heapcheck_suppressions'] = ReadSuppressionsFromFile(supp_filename)
supp_filename = JOIN(suppressions_root, "drmemory", "suppressions.txt")
result['drmem_suppressions'] = ReadSuppressionsFromFile(supp_filename)
supp_filename = JOIN(suppressions_root, "drmemory", "suppressions_full.txt")
result['drmem_full_suppressions'] = ReadSuppressionsFromFile(supp_filename)
return result
def GlobToRegex(glob_pattern, ignore_case=False):
"""Translate glob wildcards (*?) into regex syntax. Escape the rest."""
regex = ''
for char in glob_pattern:
if char == '*':
regex += '.*'
elif char == '?':
regex += '.'
elif ignore_case and char.isalpha():
regex += '[%s%s]' % (char.lower(), char.upper())
else:
regex += re.escape(char)
return ''.join(regex)
def StripAndSkipCommentsIterator(lines):
"""Generator of (line_no, line) pairs that strips comments and whitespace."""
for (line_no, line) in enumerate(lines):
line = line.strip() # Drop \n
if line.startswith('#'):
continue # Comments
# Skip comment lines, but not empty lines, they indicate the end of a
# suppression. Add one to the line number as well, since most editors use
# 1-based numberings, and enumerate is 0-based.
yield (line_no + 1, line)
class Suppression(object):
"""This class represents a single stack trace suppression.
Attributes:
description: A string representing the error description.
type: A string representing the error type, e.g. Memcheck:Leak.
stack: The lines comprising the stack trace for the suppression.
regex: The actual regex used to match against scraped reports.
"""
def __init__(self, description, type, stack, defined_at, regex):
"""Inits Suppression.
description, type, stack, regex: same as class attributes
defined_at: file:line identifying where the suppression was defined
"""
self.description = description
self.type = type
self.stack = stack
self.defined_at = defined_at
self.regex = re.compile(regex, re.MULTILINE)
def Match(self, suppression_from_report):
"""Returns bool indicating whether this suppression matches
the suppression generated from Valgrind error report.
We match our suppressions against generated suppressions
(not against reports) since they have the same format
while the reports are taken from XML, contain filenames,
they are demangled, and are generally more difficult to
parse.
Args:
suppression_from_report: list of strings (function names).
Returns:
True if the suppression is not empty and matches the report.
"""
if not self.stack:
return False
lines = [f.strip() for f in suppression_from_report]
return self.regex.match('\n'.join(lines) + '\n') is not None
def FilenameToTool(filename):
"""Return the name of the tool that a file is related to, or None.
Example mappings:
tools/heapcheck/suppressions.txt -> heapcheck
tools/valgrind/tsan/suppressions.txt -> tsan
tools/valgrind/drmemory/suppressions.txt -> drmemory
tools/valgrind/drmemory/suppressions_full.txt -> drmemory
tools/valgrind/memcheck/suppressions.txt -> memcheck
tools/valgrind/memcheck/suppressions_mac.txt -> memcheck
"""
filename = os.path.abspath(filename)
parts = filename.split(os.sep)
tool = parts[-2]
if tool in ('heapcheck', 'drmemory', 'memcheck', 'tsan'):
return tool
return None
def ReadSuppressionsFromFile(filename):
"""Read suppressions from the given file and return them as a list"""
tool_to_parser = {
"drmemory": ReadDrMemorySuppressions,
"memcheck": ReadValgrindStyleSuppressions,
"tsan": ReadValgrindStyleSuppressions,
"heapcheck": ReadValgrindStyleSuppressions,
}
tool = FilenameToTool(filename)
assert tool in tool_to_parser, (
"unknown tool %s for filename %s" % (tool, filename))
parse_func = tool_to_parser[tool]
input_file = file(filename, 'r')
try:
return parse_func(input_file, filename)
except SuppressionError:
input_file.close()
raise
class ValgrindStyleSuppression(Suppression):
"""A suppression using the Valgrind syntax.
Most tools, even ones that are not Valgrind-based, use this syntax, ie
Heapcheck, TSan, etc.
Attributes:
Same as Suppression.
"""
def __init__(self, description, type, stack, defined_at):
"""Creates a suppression using the Memcheck, TSan, and Heapcheck syntax."""
regex = '{\n.*\n%s\n' % type
for line in stack:
if line == ELLIPSIS:
regex += '(.*\n)*'
else:
regex += GlobToRegex(line)
regex += '\n'
regex += '(.*\n)*'
regex += '}'
# In the recent version of valgrind-variant we've switched
# from memcheck's default Addr[1248]/Value[1248]/Cond suppression types
# to simply Unaddressable/Uninitialized.
# The suppression generator no longer gives us "old" types thus
# for the "new-type" suppressions:
# * Memcheck:Unaddressable should also match Addr* reports,
# * Memcheck:Uninitialized should also match Cond and Value reports,
#
# We also want to support legacy suppressions (e.g. copied from
# upstream bugs etc), so:
# * Memcheck:Addr[1248] suppressions should match Unaddressable reports,
# * Memcheck:Cond and Memcheck:Value[1248] should match Uninitialized.
# Please note the latest two rules only apply to the
# tools/valgrind/waterfall.sh suppression matcher and the real
# valgrind-variant Memcheck will not suppress
# e.g. Addr1 printed as Unaddressable with Addr4 suppression.
# Be careful to check the access size while copying legacy suppressions!
for sz in [1, 2, 4, 8]:
regex = regex.replace("\nMemcheck:Addr%d\n" % sz,
"\nMemcheck:(Addr%d|Unaddressable)\n" % sz)
regex = regex.replace("\nMemcheck:Value%d\n" % sz,
"\nMemcheck:(Value%d|Uninitialized)\n" % sz)
regex = regex.replace("\nMemcheck:Cond\n",
"\nMemcheck:(Cond|Uninitialized)\n")
regex = regex.replace("\nMemcheck:Unaddressable\n",
"\nMemcheck:(Addr.|Unaddressable)\n")
regex = regex.replace("\nMemcheck:Uninitialized\n",
"\nMemcheck:(Cond|Value.|Uninitialized)\n")
return super(ValgrindStyleSuppression, self).__init__(
description, type, stack, defined_at, regex)
def __str__(self):
"""Stringify."""
lines = [self.description, self.type] + self.stack
return "{\n %s\n}\n" % "\n ".join(lines)
class SuppressionError(Exception):
def __init__(self, message, happened_at):
self._message = message
self._happened_at = happened_at
def __str__(self):
return 'Error reading suppressions at %s!\n%s' % (
self._happened_at, self._message)
def ReadValgrindStyleSuppressions(lines, supp_descriptor):
"""Given a list of lines, returns a list of suppressions.
Args:
lines: a list of lines containing suppressions.
supp_descriptor: should typically be a filename.
Used only when printing errors.
"""
result = []
cur_descr = ''
cur_type = ''
cur_stack = []
in_suppression = False
nline = 0
for line in lines:
nline += 1
line = line.strip()
if line.startswith('#'):
continue
if not in_suppression:
if not line:
# empty lines between suppressions
pass
elif line.startswith('{'):
in_suppression = True
pass
else:
raise SuppressionError('Expected: "{"',
"%s:%d" % (supp_descriptor, nline))
elif line.startswith('}'):
result.append(
ValgrindStyleSuppression(cur_descr, cur_type, cur_stack,
"%s:%d" % (supp_descriptor, nline)))
cur_descr = ''
cur_type = ''
cur_stack = []
in_suppression = False
elif not cur_descr:
cur_descr = line
continue
elif not cur_type:
if (not line.startswith("Memcheck:") and
not line.startswith("ThreadSanitizer:") and
(line != "Heapcheck:Leak")):
raise SuppressionError(
'Expected "Memcheck:TYPE", "ThreadSanitizer:TYPE" '
'or "Heapcheck:Leak", got "%s"' % line,
"%s:%d" % (supp_descriptor, nline))
supp_type = line.split(':')[1]
if not supp_type in ["Addr1", "Addr2", "Addr4", "Addr8",
"Cond", "Free", "Jump", "Leak", "Overlap", "Param",
"Value1", "Value2", "Value4", "Value8",
"Race", "UnlockNonLocked", "InvalidLock",
"Unaddressable", "Uninitialized"]:
raise SuppressionError('Unknown suppression type "%s"' % supp_type,
"%s:%d" % (supp_descriptor, nline))
cur_type = line
continue
elif re.match("^fun:.*|^obj:.*|^\.\.\.$", line):
cur_stack.append(line.strip())
elif len(cur_stack) == 0 and cur_type == "Memcheck:Param":
cur_stack.append(line.strip())
else:
raise SuppressionError(
'"fun:function_name" or "obj:object_file" or "..." expected',
"%s:%d" % (supp_descriptor, nline))
return result
def PresubmitCheckSuppressions(supps):
"""Check a list of suppressions and return a list of SuppressionErrors.
Mostly useful for separating the checking logic from the Presubmit API for
testing.
"""
known_supp_names = {} # Key: name, Value: suppression.
errors = []
for s in supps:
if re.search("<.*suppression.name.here>", s.description):
# Suppression name line is
# <insert_a_suppression_name_here> for Memcheck,
# <Put your suppression name here> for TSan,
# name=<insert_a_suppression_name_here> for DrMemory
errors.append(
SuppressionError(
"You've forgotten to put a suppression name like bug_XXX",
s.defined_at))
continue
if s.description in known_supp_names:
errors.append(
SuppressionError(
'Suppression named "%s" is defined more than once, '
'see %s' % (s.description,
known_supp_names[s.description].defined_at),
s.defined_at))
else:
known_supp_names[s.description] = s
return errors
def PresubmitCheck(input_api, output_api):
"""A helper function useful in PRESUBMIT.py
Returns a list of errors or [].
"""
sup_regex = re.compile('suppressions.*\.txt$')
filenames = [f.AbsoluteLocalPath() for f in input_api.AffectedFiles()
if sup_regex.search(f.LocalPath())]
errors = []
# TODO(timurrrr): warn on putting suppressions into a wrong file,
# e.g. TSan suppression in a memcheck file.
for f in filenames:
try:
supps = ReadSuppressionsFromFile(f)
errors.extend(PresubmitCheckSuppressions(supps))
except SuppressionError as e:
errors.append(e)
return [output_api.PresubmitError(str(e)) for e in errors]
class DrMemorySuppression(Suppression):
"""A suppression using the DrMemory syntax.
Attributes:
instr: The instruction to match.
Rest inherited from Suppression.
"""
def __init__(self, name, report_type, instr, stack, defined_at):
"""Constructor."""
self.instr = instr
# Construct the regex.
regex = '{\n'
if report_type == 'LEAK':
regex += '(POSSIBLE )?LEAK'
else:
regex += report_type
regex += '\nname=.*\n'
# TODO(rnk): Implement http://crbug.com/107416#c5 .
# drmemory_analyze.py doesn't generate suppressions with an instruction in
# them, so these suppressions will always fail to match. We should override
# Match to fetch the instruction from the report and try to match against
# that.
if instr:
regex += 'instruction=%s\n' % GlobToRegex(instr)
for line in stack:
if line == ELLIPSIS:
regex += '(.*\n)*'
elif '!' in line:
(mod, func) = line.split('!')
if func == ELLIPSIS: # mod!ellipsis frame
regex += '(%s\!.*\n)+' % GlobToRegex(mod, ignore_case=True)
else: # mod!func frame
# Ignore case for the module match, but not the function match.
regex += '%s\!%s\n' % (GlobToRegex(mod, ignore_case=True),
GlobToRegex(func, ignore_case=False))
else:
regex += GlobToRegex(line)
regex += '\n'
regex += '(.*\n)*' # Match anything left in the stack.
regex += '}'
return super(DrMemorySuppression, self).__init__(name, report_type, stack,
defined_at, regex)
def __str__(self):
"""Stringify."""
text = self.type + "\n"
if self.description:
text += "name=%s\n" % self.description
if self.instr:
text += "instruction=%s\n" % self.instr
text += "\n".join(self.stack)
text += "\n"
return text
# Possible DrMemory error report types. Keep consistent with suppress_name
# array in drmemory/drmemory/report.c.
DRMEMORY_ERROR_TYPES = [
'UNADDRESSABLE ACCESS',
'UNINITIALIZED READ',
'INVALID HEAP ARGUMENT',
'GDI USAGE ERROR',
'HANDLE LEAK',
'LEAK',
'POSSIBLE LEAK',
'WARNING',
]
# Regexes to match valid drmemory frames.
DRMEMORY_FRAME_PATTERNS = [
re.compile(r"^.*\!.*$"), # mod!func
re.compile(r"^.*!\.\.\.$"), # mod!ellipsis
re.compile(r"^\<.*\+0x.*\>$"), # <mod+0xoffs>
re.compile(r"^\<not in a module\>$"),
re.compile(r"^system call .*$"),
re.compile(r"^\*$"), # wildcard
re.compile(r"^\.\.\.$"), # ellipsis
]
def ReadDrMemorySuppressions(lines, supp_descriptor):
"""Given a list of lines, returns a list of DrMemory suppressions.
Args:
lines: a list of lines containing suppressions.
supp_descriptor: should typically be a filename.
Used only when parsing errors happen.
"""
lines = StripAndSkipCommentsIterator(lines)
suppressions = []
for (line_no, line) in lines:
if not line:
continue
if line not in DRMEMORY_ERROR_TYPES:
raise SuppressionError('Expected a DrMemory error type, '
'found %r instead\n Valid error types: %s' %
(line, ' '.join(DRMEMORY_ERROR_TYPES)),
"%s:%d" % (supp_descriptor, line_no))
# Suppression starts here.
report_type = line
name = ''
instr = None
stack = []
defined_at = "%s:%d" % (supp_descriptor, line_no)
found_stack = False
for (line_no, line) in lines:
if not found_stack and line.startswith('name='):
name = line.replace('name=', '')
elif not found_stack and line.startswith('instruction='):
instr = line.replace('instruction=', '')
else:
# Unrecognized prefix indicates start of stack trace.
found_stack = True
if not line:
# Blank line means end of suppression.
break
if not any([regex.match(line) for regex in DRMEMORY_FRAME_PATTERNS]):
raise SuppressionError(
('Unexpected stack frame pattern at line %d\n' +
'Frames should be one of the following:\n' +
' module!function\n' +
' module!...\n' +
' <module+0xhexoffset>\n' +
' <not in a module>\n' +
' system call Name\n' +
' *\n' +
' ...\n') % line_no, defined_at)
stack.append(line)
if len(stack) == 0: # In case we hit EOF or blank without any stack frames.
raise SuppressionError('Suppression "%s" has no stack frames, ends at %d'
% (name, line_no), defined_at)
if stack[-1] == ELLIPSIS:
raise SuppressionError('Suppression "%s" ends in an ellipsis on line %d' %
(name, line_no), defined_at)
suppressions.append(
DrMemorySuppression(name, report_type, instr, stack, defined_at))
return suppressions
def ParseSuppressionOfType(lines, supp_descriptor, def_line_no, report_type):
"""Parse the suppression starting on this line.
Suppressions start with a type, have an optional name and instruction, and a
stack trace that ends in a blank line.
"""
def TestStack(stack, positive, negative, suppression_parser=None):
"""A helper function for SelfTest() that checks a single stack.
Args:
stack: the stack to match the suppressions.
positive: the list of suppressions that must match the given stack.
negative: the list of suppressions that should not match.
suppression_parser: optional arg for the suppression parser, default is
ReadValgrindStyleSuppressions.
"""
if not suppression_parser:
suppression_parser = ReadValgrindStyleSuppressions
for supp in positive:
parsed = suppression_parser(supp.split("\n"), "positive_suppression")
assert parsed[0].Match(stack.split("\n")), (
"Suppression:\n%s\ndidn't match stack:\n%s" % (supp, stack))
for supp in negative:
parsed = suppression_parser(supp.split("\n"), "negative_suppression")
assert not parsed[0].Match(stack.split("\n")), (
"Suppression:\n%s\ndid match stack:\n%s" % (supp, stack))
def TestFailPresubmit(supp_text, error_text, suppression_parser=None):
"""A helper function for SelfTest() that verifies a presubmit check fires.
Args:
supp_text: suppression text to parse.
error_text: text of the presubmit error we expect to find.
suppression_parser: optional arg for the suppression parser, default is
ReadValgrindStyleSuppressions.
"""
if not suppression_parser:
suppression_parser = ReadValgrindStyleSuppressions
try:
supps = suppression_parser(supp_text.split("\n"), "<presubmit suppression>")
except SuppressionError, e:
# If parsing raised an exception, match the error text here.
assert error_text in str(e), (
"presubmit text %r not in SuppressionError:\n%r" %
(error_text, str(e)))
else:
# Otherwise, run the presubmit checks over the supps. We expect a single
# error that has text matching error_text.
errors = PresubmitCheckSuppressions(supps)
assert len(errors) == 1, (
"expected exactly one presubmit error, got:\n%s" % errors)
assert error_text in str(errors[0]), (
"presubmit text %r not in SuppressionError:\n%r" %
(error_text, str(errors[0])))
def SelfTest():
"""Tests the Suppression.Match() capabilities."""
test_memcheck_stack_1 = """{
test
Memcheck:Leak
fun:absolutly
fun:brilliant
obj:condition
fun:detection
fun:expression
}"""
test_memcheck_stack_2 = """{
test
Memcheck:Uninitialized
fun:absolutly
fun:brilliant
obj:condition
fun:detection
fun:expression
}"""
test_memcheck_stack_3 = """{
test
Memcheck:Unaddressable
fun:absolutly
fun:brilliant
obj:condition
fun:detection
fun:expression
}"""
test_memcheck_stack_4 = """{
test
Memcheck:Addr4
fun:absolutly
fun:brilliant
obj:condition
fun:detection
fun:expression
}"""
test_heapcheck_stack = """{
test
Heapcheck:Leak
fun:absolutly
fun:brilliant
obj:condition
fun:detection
fun:expression
}"""
test_tsan_stack = """{
test
ThreadSanitizer:Race
fun:absolutly
fun:brilliant
obj:condition
fun:detection
fun:expression
}"""
positive_memcheck_suppressions_1 = [
"{\nzzz\nMemcheck:Leak\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Leak\nfun:ab*ly\n}",
"{\nzzz\nMemcheck:Leak\nfun:absolutly\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\n...\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\n...\nfun:detection\n}",
"{\nzzz\nMemcheck:Leak\nfun:absolutly\n...\nfun:detection\n}",
"{\nzzz\nMemcheck:Leak\nfun:ab*ly\n...\nfun:detection\n}",
"{\nzzz\nMemcheck:Leak\n...\nobj:condition\n}",
"{\nzzz\nMemcheck:Leak\n...\nobj:condition\nfun:detection\n}",
"{\nzzz\nMemcheck:Leak\n...\nfun:brilliant\nobj:condition\n}",
]
positive_memcheck_suppressions_2 = [
"{\nzzz\nMemcheck:Uninitialized\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Uninitialized\nfun:ab*ly\n}",
"{\nzzz\nMemcheck:Uninitialized\nfun:absolutly\nfun:brilliant\n}",
# Legacy suppression types
"{\nzzz\nMemcheck:Value1\n...\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Cond\n...\nfun:detection\n}",
"{\nzzz\nMemcheck:Value8\nfun:absolutly\nfun:brilliant\n}",
]
positive_memcheck_suppressions_3 = [
"{\nzzz\nMemcheck:Unaddressable\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Unaddressable\nfun:absolutly\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Unaddressable\nfun:absolutly\nfun:brilliant\n}",
# Legacy suppression types
"{\nzzz\nMemcheck:Addr1\n...\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Addr8\n...\nfun:detection\n}",
]
positive_memcheck_suppressions_4 = [
"{\nzzz\nMemcheck:Addr4\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Unaddressable\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Addr4\nfun:absolutly\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Unaddressable\n...\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Addr4\n...\nfun:detection\n}",
]
positive_heapcheck_suppressions = [
"{\nzzz\nHeapcheck:Leak\n...\nobj:condition\n}",
"{\nzzz\nHeapcheck:Leak\nfun:absolutly\n}",
]
positive_tsan_suppressions = [
"{\nzzz\nThreadSanitizer:Race\n...\nobj:condition\n}",
"{\nzzz\nThreadSanitizer:Race\nfun:absolutly\n}",
]
negative_memcheck_suppressions_1 = [
"{\nzzz\nMemcheck:Leak\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Leak\nfun:ab*liant\n}",
"{\nzzz\nMemcheck:Leak\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\nobj:condition\n}",
"{\nzzz\nMemcheck:Addr8\nfun:brilliant\n}",
]
negative_memcheck_suppressions_2 = [
"{\nzzz\nMemcheck:Cond\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Value2\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Uninitialized\nfun:ab*liant\n}",
"{\nzzz\nMemcheck:Value4\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\nobj:condition\n}",
"{\nzzz\nMemcheck:Addr8\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Unaddressable\nfun:brilliant\n}",
]
negative_memcheck_suppressions_3 = [
"{\nzzz\nMemcheck:Addr1\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Uninitialized\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Addr2\nfun:ab*liant\n}",
"{\nzzz\nMemcheck:Value4\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\nobj:condition\n}",
"{\nzzz\nMemcheck:Addr8\nfun:brilliant\n}",
]
negative_memcheck_suppressions_4 = [
"{\nzzz\nMemcheck:Addr1\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Addr4\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Unaddressable\nfun:abnormal\n}",
"{\nzzz\nMemcheck:Addr1\nfun:absolutly\n}",
"{\nzzz\nMemcheck:Addr2\nfun:ab*liant\n}",
"{\nzzz\nMemcheck:Value4\nfun:brilliant\n}",
"{\nzzz\nMemcheck:Leak\nobj:condition\n}",
"{\nzzz\nMemcheck:Addr8\nfun:brilliant\n}",
]
negative_heapcheck_suppressions = [
"{\nzzz\nMemcheck:Leak\nfun:absolutly\n}",
"{\nzzz\nHeapcheck:Leak\nfun:brilliant\n}",
]
negative_tsan_suppressions = [
"{\nzzz\nThreadSanitizer:Leak\nfun:absolutly\n}",
"{\nzzz\nThreadSanitizer:Race\nfun:brilliant\n}",
]
TestStack(test_memcheck_stack_1,
positive_memcheck_suppressions_1,
negative_memcheck_suppressions_1)
TestStack(test_memcheck_stack_2,
positive_memcheck_suppressions_2,
negative_memcheck_suppressions_2)
TestStack(test_memcheck_stack_3,
positive_memcheck_suppressions_3,
negative_memcheck_suppressions_3)
TestStack(test_memcheck_stack_4,
positive_memcheck_suppressions_4,
negative_memcheck_suppressions_4)
TestStack(test_heapcheck_stack, positive_heapcheck_suppressions,
negative_heapcheck_suppressions)
TestStack(test_tsan_stack, positive_tsan_suppressions,
negative_tsan_suppressions)
# TODO(timurrrr): add TestFailPresubmit tests.
### DrMemory self tests.
# http://crbug.com/96010 suppression.
stack_96010 = """{
UNADDRESSABLE ACCESS
name=<insert_a_suppression_name_here>
*!TestingProfile::FinishInit
*!TestingProfile::TestingProfile
*!BrowserAboutHandlerTest_WillHandleBrowserAboutURL_Test::TestBody
*!testing::Test::Run
}"""
suppress_96010 = [
"UNADDRESSABLE ACCESS\nname=zzz\n...\n*!testing::Test::Run\n",
("UNADDRESSABLE ACCESS\nname=zzz\n...\n" +
"*!BrowserAboutHandlerTest_WillHandleBrowserAboutURL_Test::TestBody\n"),
"UNADDRESSABLE ACCESS\nname=zzz\n...\n*!BrowserAboutHandlerTest*\n",
"UNADDRESSABLE ACCESS\nname=zzz\n*!TestingProfile::FinishInit\n",
# No name should be needed
"UNADDRESSABLE ACCESS\n*!TestingProfile::FinishInit\n",
# Whole trace
("UNADDRESSABLE ACCESS\n" +
"*!TestingProfile::FinishInit\n" +
"*!TestingProfile::TestingProfile\n" +
"*!BrowserAboutHandlerTest_WillHandleBrowserAboutURL_Test::TestBody\n" +
"*!testing::Test::Run\n"),
]
negative_96010 = [
# Wrong type
"UNINITIALIZED READ\nname=zzz\n*!TestingProfile::FinishInit\n",
# No ellipsis
"UNADDRESSABLE ACCESS\nname=zzz\n*!BrowserAboutHandlerTest*\n",
]
TestStack(stack_96010, suppress_96010, negative_96010,
suppression_parser=ReadDrMemorySuppressions)
# Invalid heap arg
stack_invalid = """{
INVALID HEAP ARGUMENT
name=asdf
*!foo
}"""
suppress_invalid = [
"INVALID HEAP ARGUMENT\n*!foo\n",
]
negative_invalid = [
"UNADDRESSABLE ACCESS\n*!foo\n",
]
TestStack(stack_invalid, suppress_invalid, negative_invalid,
suppression_parser=ReadDrMemorySuppressions)
# Suppress only ntdll
stack_in_ntdll = """{
UNADDRESSABLE ACCESS
name=<insert_a_suppression_name_here>
ntdll.dll!RtlTryEnterCriticalSection
}"""
stack_not_ntdll = """{
UNADDRESSABLE ACCESS
name=<insert_a_suppression_name_here>
notntdll.dll!RtlTryEnterCriticalSection
}"""
suppress_in_ntdll = [
"UNADDRESSABLE ACCESS\nntdll.dll!RtlTryEnterCriticalSection\n",
]
suppress_in_any = [
"UNADDRESSABLE ACCESS\n*!RtlTryEnterCriticalSection\n",
]
TestStack(stack_in_ntdll, suppress_in_ntdll + suppress_in_any, [],
suppression_parser=ReadDrMemorySuppressions)
# Make sure we don't wildcard away the "not" part and match ntdll.dll by
# accident.
TestStack(stack_not_ntdll, suppress_in_any, suppress_in_ntdll,
suppression_parser=ReadDrMemorySuppressions)
# Suppress a POSSIBLE LEAK with LEAK.
stack_foo_possible = """{
POSSIBLE LEAK
name=foo possible
*!foo
}"""
suppress_foo_possible = [ "POSSIBLE LEAK\n*!foo\n" ]
suppress_foo_leak = [ "LEAK\n*!foo\n" ]
TestStack(stack_foo_possible, suppress_foo_possible + suppress_foo_leak, [],
suppression_parser=ReadDrMemorySuppressions)
# Don't suppress LEAK with POSSIBLE LEAK.
stack_foo_leak = """{
LEAK
name=foo leak
*!foo
}"""
TestStack(stack_foo_leak, suppress_foo_leak, suppress_foo_possible,
suppression_parser=ReadDrMemorySuppressions)
# Test case insensitivity of module names.
stack_user32_mixed_case = """{
LEAK
name=<insert>
USER32.dll!foo
user32.DLL!bar
user32.dll!baz
}"""
suppress_user32 = [ # Module name case doesn't matter.
"LEAK\nuser32.dll!foo\nuser32.dll!bar\nuser32.dll!baz\n",
"LEAK\nUSER32.DLL!foo\nUSER32.DLL!bar\nUSER32.DLL!baz\n",
]
no_suppress_user32 = [ # Function name case matters.
"LEAK\nuser32.dll!FOO\nuser32.dll!BAR\nuser32.dll!BAZ\n",
"LEAK\nUSER32.DLL!FOO\nUSER32.DLL!BAR\nUSER32.DLL!BAZ\n",
]
TestStack(stack_user32_mixed_case, suppress_user32, no_suppress_user32,
suppression_parser=ReadDrMemorySuppressions)
# Test mod!... frames.
stack_kernel32_through_ntdll = """{
LEAK
name=<insert>
kernel32.dll!foo
KERNEL32.dll!bar
kernel32.DLL!baz
ntdll.dll!quux
}"""
suppress_mod_ellipsis = [
"LEAK\nkernel32.dll!...\nntdll.dll!quux\n",
"LEAK\nKERNEL32.DLL!...\nntdll.dll!quux\n",
]
no_suppress_mod_ellipsis = [
# Need one or more matching frames, not zero, unlike regular ellipsis.
"LEAK\nuser32.dll!...\nkernel32.dll!...\nntdll.dll!quux\n",
]
TestStack(stack_kernel32_through_ntdll, suppress_mod_ellipsis,
no_suppress_mod_ellipsis,
suppression_parser=ReadDrMemorySuppressions)
# Test that the presubmit checks work.
forgot_to_name = """
UNADDRESSABLE ACCESS
name=<insert_a_suppression_name_here>
ntdll.dll!RtlTryEnterCriticalSection
"""
TestFailPresubmit(forgot_to_name, 'forgotten to put a suppression',
suppression_parser=ReadDrMemorySuppressions)
named_twice = """
UNADDRESSABLE ACCESS
name=http://crbug.com/1234
*!foo
UNADDRESSABLE ACCESS
name=http://crbug.com/1234
*!bar
"""
TestFailPresubmit(named_twice, 'defined more than once',
suppression_parser=ReadDrMemorySuppressions)
forgot_stack = """
UNADDRESSABLE ACCESS
name=http://crbug.com/1234
"""
TestFailPresubmit(forgot_stack, 'has no stack frames',
suppression_parser=ReadDrMemorySuppressions)
ends_in_ellipsis = """
UNADDRESSABLE ACCESS
name=http://crbug.com/1234
ntdll.dll!RtlTryEnterCriticalSection
...
"""
TestFailPresubmit(ends_in_ellipsis, 'ends in an ellipsis',
suppression_parser=ReadDrMemorySuppressions)
bad_stack_frame = """
UNADDRESSABLE ACCESS
name=http://crbug.com/1234
fun:memcheck_style_frame
"""
TestFailPresubmit(bad_stack_frame, 'Unexpected stack frame pattern',
suppression_parser=ReadDrMemorySuppressions)
# Test FilenameToTool.
filenames_to_tools = {
"tools/heapcheck/suppressions.txt": "heapcheck",
"tools/valgrind/tsan/suppressions.txt": "tsan",
"tools/valgrind/drmemory/suppressions.txt": "drmemory",
"tools/valgrind/drmemory/suppressions_full.txt": "drmemory",
"tools/valgrind/memcheck/suppressions.txt": "memcheck",
"tools/valgrind/memcheck/suppressions_mac.txt": "memcheck",
"asdf/tools/valgrind/memcheck/suppressions_mac.txt": "memcheck",
"foo/bar/baz/tools/valgrind/memcheck/suppressions_mac.txt": "memcheck",
"foo/bar/baz/tools/valgrind/suppressions.txt": None,
"tools/valgrind/suppressions.txt": None,
}
for (filename, expected_tool) in filenames_to_tools.items():
filename.replace('/', os.sep) # Make the path look native.
tool = FilenameToTool(filename)
assert tool == expected_tool, (
"failed to get expected tool for filename %r, expected %s, got %s" %
(filename, expected_tool, tool))
# Test ValgrindStyleSuppression.__str__.
supp = ValgrindStyleSuppression("http://crbug.com/1234", "Memcheck:Leak",
["...", "fun:foo"], "supp.txt:1")
# Intentional 3-space indent. =/
supp_str = ("{\n"
" http://crbug.com/1234\n"
" Memcheck:Leak\n"
" ...\n"
" fun:foo\n"
"}\n")
assert str(supp) == supp_str, (
"str(supp) != supp_str:\nleft: %s\nright: %s" % (str(supp), supp_str))
# Test DrMemorySuppression.__str__.
supp = DrMemorySuppression(
"http://crbug.com/1234", "LEAK", None, ["...", "*!foo"], "supp.txt:1")
supp_str = ("LEAK\n"
"name=http://crbug.com/1234\n"
"...\n"
"*!foo\n")
assert str(supp) == supp_str, (
"str(supp) != supp_str:\nleft: %s\nright: %s" % (str(supp), supp_str))
supp = DrMemorySuppression(
"http://crbug.com/1234", "UNINITIALIZED READ", "test 0x08(%eax) $0x01",
["ntdll.dll!*", "*!foo"], "supp.txt:1")
supp_str = ("UNINITIALIZED READ\n"
"name=http://crbug.com/1234\n"
"instruction=test 0x08(%eax) $0x01\n"
"ntdll.dll!*\n"
"*!foo\n")
assert str(supp) == supp_str, (
"str(supp) != supp_str:\nleft: %s\nright: %s" % (str(supp), supp_str))
if __name__ == '__main__':
SelfTest()
print 'PASS'
|
|
"""
The :mod:`sklearn.metrics.pairwise` submodule implements utilities to evaluate
pairwise distances or affinity of sets of samples.
This module contains both distance metrics and kernels. A brief summary is
given on the two here.
Distance metrics are a function d(a, b) such that d(a, b) < d(a, c) if objects
a and b are considered "more similar" to objects a and c. Two objects exactly
alike would have a distance of zero.
One of the most popular examples is Euclidean distance.
To be a 'true' metric, it must obey the following four conditions::
1. d(a, b) >= 0, for all a and b
2. d(a, b) == 0, if and only if a = b, positive definiteness
3. d(a, b) == d(b, a), symmetry
4. d(a, c) <= d(a, b) + d(b, c), the triangle inequality
Kernels are measures of similarity, i.e. ``s(a, b) > s(a, c)``
if objects ``a`` and ``b`` are considered "more similar" to objects
``a`` and ``c``. A kernel must also be positive semi-definite.
There are a number of ways to convert between a distance metric and a
similarity measure, such as a kernel. Let D be the distance, and S be the
kernel::
1. ``S = np.exp(-D * gamma)``, where one heuristic for choosing
``gamma`` is ``1 / num_features``
2. ``S = 1. / (D / np.max(D))``
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# License: BSD Style.
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import safe_asarray
from ..utils import atleast2d_or_csr
from ..utils import gen_even_slices
from ..utils.extmath import safe_sparse_dot
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
# Utility Functions
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples_a, n_features]
Y : {array-like, sparse matrix}, shape = [n_samples_b, n_features]
Returns
-------
safe_X : {array-like, sparse matrix}, shape = [n_samples_a, n_features]
An array equal to X, guarenteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape = [n_samples_b, n_features]
An array equal to Y if Y was not None, guarenteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
if Y is X or Y is None:
X = safe_asarray(X)
X = Y = atleast2d_or_csr(X, dtype=np.float)
else:
X = safe_asarray(X)
Y = safe_asarray(Y)
X = atleast2d_or_csr(X, dtype=np.float)
Y = atleast2d_or_csr(Y, dtype=np.float)
if len(X.shape) < 2:
raise ValueError("X is required to be at least two dimensional.")
if len(Y.shape) < 2:
raise ValueError("Y is required to be at least two dimensional.")
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
# Distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two main advantages. First, it is computationally
efficient when dealing with sparse data. Second, if x varies but y
remains unchanged, then the right-most dot-product `dot(y, y)` can be
pre-computed.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples_1, n_features]
Y : {array-like, sparse matrix}, shape = [n_samples_2, n_features]
Y_norm_squared : array-like, shape = [n_samples_2], optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape = [n_samples_1, n_samples_2]
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if issparse(X):
XX = X.multiply(X).sum(axis=1)
else:
XX = np.sum(X * X, axis=1)[:, np.newaxis]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is None:
if issparse(Y):
# scipy.sparse matrices don't have element-wise scalar
# exponentiation, and tocsr has a copy kwarg only on CSR matrices.
YY = Y.copy() if isinstance(Y, csr_matrix) else Y.tocsr()
YY.data **= 2
YY = np.asarray(YY.sum(axis=1)).T
else:
YY = np.sum(Y ** 2, axis=1)[np.newaxis, :]
else:
YY = atleast2d_or_csr(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
# TODO: a faster Cython implementation would do the clipping of negative
# values in a single pass over the output matrix.
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances)
def manhattan_distances(X, Y=None, sum_over_features=True):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise l1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
n_samples_X, n_features_X = X.shape
n_samples_Y, n_features_Y = Y.shape
if n_features_X != n_features_Y:
raise Exception("X and Y should have the same number of features!")
D = np.abs(X[:, np.newaxis, :] - Y[np.newaxis, :, :])
if sum_over_features:
D = np.sum(D, axis=2)
else:
D = D.reshape((n_samples_X * n_samples_Y, n_features_X))
return D
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=0, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma == 0:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=0, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma == 0:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=0):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(X, Y) = exp(-gamma ||X-Y||^2)
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
gamma : float
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma == 0:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
# Helper functions - distance
pairwise_distance_functions = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'cityblock': manhattan_distances,
}
def distance_metrics():
""" Valid metrics for pairwise_distances
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=========== ====================================
metric Function
=========== ====================================
'cityblock' sklearn.pairwise.manhattan_distances
'euclidean' sklearn.pairwise.euclidean_distances
'l1' sklearn.pairwise.manhattan_distances
'l2' sklearn.pairwise.euclidean_distances
'manhattan' sklearn.pairwise.manhattan_distances
=========== ====================================
"""
return pairwise_distance_functions
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
ret = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(func)(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatability with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Please note that support for sparse matrices is currently limited to those
metrics listed in pairwise.pairwise_distance_functions.
Valid values for metric are:
- from scikit-learn: ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeucludean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
Note in the case of 'euclidean' and 'cityblock' (which are valid
scipy.spatial.distance metrics), the values will use the scikit-learn
implementation, which is faster and has support for sparse matrices.
For a verbose description of the metrics from scikit-learn, see the
__doc__ of the sklearn.pairwise.distance_metrics function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.pairwise_distance_functions.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debuging. For n_jobs below -1,
(n_cpus + 1 - n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if metric == "precomputed":
return X
elif metric in pairwise_distance_functions:
func = pairwise_distance_functions[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate distance for each element in X and Y.
# FIXME: can use n_jobs here too
D = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# distance assumed to be symmetric.
D[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
D[j][i] = D[i][j]
return D
else:
# Note: the distance module doesn't support sparse matrices!
if type(X) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
if Y is None:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
else:
if type(Y) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
return distance.cdist(X, Y, metric=metric, **kwds)
# Helper functions - distance
pairwise_kernel_functions = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'linear': linear_kernel
}
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ==================================
metric Function
============ ==================================
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
============ ==================================
"""
return pairwise_kernel_functions
kernel_params = {
"rbf": set(("gamma",)),
"sigmoid": set(("gamma", "coef0")),
"polynomial": set(("gamma", "degree", "coef0")),
"poly": set(("gamma", "degree", "coef0")),
"linear": ()
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
""" Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatability with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.pairwise_kernel_functions.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debuging. For n_jobs below -1,
(n_cpus + 1 - n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
"""
if metric == "precomputed":
return X
elif metric in pairwise_kernel_functions:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds \
if k in kernel_params[metric])
func = pairwise_kernel_functions[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate kernel for each element in X and Y.
K = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# Kernel assumed to be symmetric.
K[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
K[j][i] = K[i][j]
return K
else:
raise AttributeError("Unknown metric %s" % metric)
|
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:5888")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:5888")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Eggcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
|
from collections.abc import Mapping
import os
import numpy as np
import pytest
import openmc
import openmc.exceptions as exc
import openmc.capi
from tests import cdtemp
@pytest.fixture(scope='module')
def pincell_model():
"""Set up a model to test with and delete files when done"""
openmc.reset_auto_ids()
pincell = openmc.examples.pwr_pin_cell()
pincell.settings.verbosity = 1
# Add a tally
filter1 = openmc.MaterialFilter(pincell.materials)
filter2 = openmc.EnergyFilter([0.0, 1.0, 1.0e3, 20.0e6])
mat_tally = openmc.Tally()
mat_tally.filters = [filter1, filter2]
mat_tally.nuclides = ['U235', 'U238']
mat_tally.scores = ['total', 'elastic', '(n,gamma)']
pincell.tallies.append(mat_tally)
# Add an expansion tally
zernike_tally = openmc.Tally()
filter3 = openmc.ZernikeFilter(5, r=.63)
cells = pincell.geometry.root_universe.cells
filter4 = openmc.CellFilter(list(cells.values()))
zernike_tally.filters = [filter3, filter4]
zernike_tally.scores = ['fission']
pincell.tallies.append(zernike_tally)
# Write XML files in tmpdir
with cdtemp():
pincell.export_to_xml()
yield
@pytest.fixture(scope='module')
def capi_init(pincell_model):
openmc.capi.init()
yield
openmc.capi.finalize()
@pytest.fixture(scope='module')
def capi_simulation_init(capi_init):
openmc.capi.simulation_init()
yield
@pytest.fixture(scope='module')
def capi_run(capi_simulation_init):
openmc.capi.run()
def test_cell_mapping(capi_init):
cells = openmc.capi.cells
assert isinstance(cells, Mapping)
assert len(cells) == 3
for cell_id, cell in cells.items():
assert isinstance(cell, openmc.capi.Cell)
assert cell_id == cell.id
def test_cell(capi_init):
cell = openmc.capi.cells[1]
assert isinstance(cell.fill, openmc.capi.Material)
cell.fill = openmc.capi.materials[1]
assert str(cell) == 'Cell[1]'
def test_new_cell(capi_init):
with pytest.raises(exc.AllocationError):
openmc.capi.Cell(1)
new_cell = openmc.capi.Cell()
new_cell_with_id = openmc.capi.Cell(10)
assert len(openmc.capi.cells) == 5
def test_material_mapping(capi_init):
mats = openmc.capi.materials
assert isinstance(mats, Mapping)
assert len(mats) == 3
for mat_id, mat in mats.items():
assert isinstance(mat, openmc.capi.Material)
assert mat_id == mat.id
def test_material(capi_init):
m = openmc.capi.materials[3]
assert m.nuclides == ['H1', 'O16', 'B10', 'B11']
old_dens = m.densities
test_dens = [1.0e-1, 2.0e-1, 2.5e-1, 1.0e-3]
m.set_densities(m.nuclides, test_dens)
assert m.densities == pytest.approx(test_dens)
assert m.volume is None
m.volume = 10.0
assert m.volume == 10.0
with pytest.raises(exc.InvalidArgumentError):
m.set_density(1.0, 'goblins')
rho = 2.25e-2
m.set_density(rho)
assert sum(m.densities) == pytest.approx(rho)
def test_new_material(capi_init):
with pytest.raises(exc.AllocationError):
openmc.capi.Material(1)
new_mat = openmc.capi.Material()
new_mat_with_id = openmc.capi.Material(10)
assert len(openmc.capi.materials) == 5
def test_nuclide_mapping(capi_init):
nucs = openmc.capi.nuclides
assert isinstance(nucs, Mapping)
assert len(nucs) == 12
for name, nuc in nucs.items():
assert isinstance(nuc, openmc.capi.Nuclide)
assert name == nuc.name
def test_load_nuclide(capi_init):
openmc.capi.load_nuclide('Pu239')
with pytest.raises(exc.DataError):
openmc.capi.load_nuclide('Pu3')
def test_settings(capi_init):
settings = openmc.capi.settings
assert settings.batches == 10
settings.batches = 10
assert settings.inactive == 5
assert settings.generations_per_batch == 1
assert settings.particles == 100
assert settings.seed == 1
settings.seed = 11
assert settings.run_mode == 'eigenvalue'
settings.run_mode = 'volume'
settings.run_mode = 'eigenvalue'
def test_tally_mapping(capi_init):
tallies = openmc.capi.tallies
assert isinstance(tallies, Mapping)
assert len(tallies) == 2
for tally_id, tally in tallies.items():
assert isinstance(tally, openmc.capi.Tally)
assert tally_id == tally.id
def test_tally(capi_init):
t = openmc.capi.tallies[1]
assert len(t.filters) == 2
assert isinstance(t.filters[0], openmc.capi.MaterialFilter)
assert isinstance(t.filters[1], openmc.capi.EnergyFilter)
# Create new filter and replace existing
with pytest.raises(exc.AllocationError):
openmc.capi.MaterialFilter(uid=1)
mats = openmc.capi.materials
f = openmc.capi.MaterialFilter([mats[2], mats[1]])
assert f.bins[0] == mats[2]
assert f.bins[1] == mats[1]
t.filters = [f]
assert t.filters == [f]
assert t.nuclides == ['U235', 'U238']
with pytest.raises(exc.DataError):
t.nuclides = ['Zr2']
t.nuclides = ['U234', 'Zr90']
assert t.nuclides == ['U234', 'Zr90']
assert t.scores == ['total', '(n,elastic)', '(n,gamma)']
new_scores = ['scatter', 'fission', 'nu-fission', '(n,2n)']
t.scores = new_scores
assert t.scores == new_scores
t2 = openmc.capi.tallies[2]
assert len(t2.filters) == 2
assert isinstance(t2.filters[0], openmc.capi.ZernikeFilter)
assert isinstance(t2.filters[1], openmc.capi.CellFilter)
assert len(t2.filters[1].bins) == 3
assert t2.filters[0].order == 5
def test_new_tally(capi_init):
with pytest.raises(exc.AllocationError):
openmc.capi.Material(1)
new_tally = openmc.capi.Tally()
new_tally.scores = ['flux']
new_tally_with_id = openmc.capi.Tally(10)
new_tally_with_id.scores = ['flux']
assert len(openmc.capi.tallies) == 4
def test_tally_activate(capi_simulation_init):
t = openmc.capi.tallies[1]
assert not t.active
t.active = True
assert t.active
def test_tally_results(capi_run):
t = openmc.capi.tallies[1]
assert t.num_realizations == 10 # t was made active in test_tally
assert np.all(t.mean >= 0)
nonzero = (t.mean > 0.0)
assert np.all(t.std_dev[nonzero] >= 0)
assert np.all(t.ci_width()[nonzero] >= 1.95*t.std_dev[nonzero])
t2 = openmc.capi.tallies[2]
n = 5
assert t2.mean.size == (n + 1) * (n + 2) // 2 * 3 # Number of Zernike coeffs * 3 cells
def test_global_tallies(capi_run):
assert openmc.capi.num_realizations() == 5
gt = openmc.capi.global_tallies()
for mean, std_dev in gt:
assert mean >= 0
def test_statepoint(capi_run):
openmc.capi.statepoint_write('test_sp.h5')
assert os.path.exists('test_sp.h5')
def test_source_bank(capi_run):
source = openmc.capi.source_bank()
assert np.all(source['E'] > 0.0)
assert np.all(source['wgt'] == 1.0)
def test_by_batch(capi_run):
openmc.capi.hard_reset()
# Running next batch before simulation is initialized should raise an
# exception
with pytest.raises(exc.AllocationError):
openmc.capi.next_batch()
openmc.capi.simulation_init()
try:
for _ in openmc.capi.iter_batches():
# Make sure we can get k-effective during inactive/active batches
mean, std_dev = openmc.capi.keff()
assert 0.0 < mean < 2.5
assert std_dev > 0.0
assert openmc.capi.num_realizations() == 5
for i in range(3):
openmc.capi.next_batch()
assert openmc.capi.num_realizations() == 8
finally:
openmc.capi.simulation_finalize()
def test_reset(capi_run):
# Init and run 10 batches.
openmc.capi.hard_reset()
openmc.capi.simulation_init()
try:
for i in range(10):
openmc.capi.next_batch()
# Make sure there are 5 realizations for the 5 active batches.
assert openmc.capi.num_realizations() == 5
assert openmc.capi.tallies[2].num_realizations == 5
_, keff_sd1 = openmc.capi.keff()
tally_sd1 = openmc.capi.tallies[2].std_dev[0]
# Reset and run 3 more batches. Check the number of realizations.
openmc.capi.reset()
for i in range(3):
openmc.capi.next_batch()
assert openmc.capi.num_realizations() == 3
assert openmc.capi.tallies[2].num_realizations == 3
# Check the tally std devs to make sure results were cleared.
_, keff_sd2 = openmc.capi.keff()
tally_sd2 = openmc.capi.tallies[2].std_dev[0]
assert keff_sd2 > keff_sd1
assert tally_sd2 > tally_sd1
finally:
openmc.capi.simulation_finalize()
def test_reproduce_keff(capi_init):
# Get k-effective after run
openmc.capi.hard_reset()
openmc.capi.run()
keff0 = openmc.capi.keff()
# Reset, run again, and get k-effective again. they should match
openmc.capi.hard_reset()
openmc.capi.run()
keff1 = openmc.capi.keff()
assert keff0 == pytest.approx(keff1)
def test_find_cell(capi_init):
cell, instance = openmc.capi.find_cell((0., 0., 0.))
assert cell is openmc.capi.cells[1]
cell, instance = openmc.capi.find_cell((0.4, 0., 0.))
assert cell is openmc.capi.cells[2]
with pytest.raises(exc.GeometryError):
openmc.capi.find_cell((100., 100., 100.))
def test_find_material(capi_init):
mat = openmc.capi.find_material((0., 0., 0.))
assert mat is openmc.capi.materials[1]
mat = openmc.capi.find_material((0.4, 0., 0.))
assert mat is openmc.capi.materials[2]
def test_mesh(capi_init):
mesh = openmc.capi.Mesh()
mesh.dimension = (2, 3, 4)
assert mesh.dimension == (2, 3, 4)
with pytest.raises(exc.AllocationError):
mesh2 = openmc.capi.Mesh(mesh.id)
# Make sure each combination of parameters works
ll = (0., 0., 0.)
ur = (10., 10., 10.)
width = (1., 1., 1.)
mesh.set_parameters(lower_left=ll, upper_right=ur)
assert mesh.lower_left == pytest.approx(ll)
assert mesh.upper_right == pytest.approx(ur)
mesh.set_parameters(lower_left=ll, width=width)
assert mesh.lower_left == pytest.approx(ll)
assert mesh.width == pytest.approx(width)
mesh.set_parameters(upper_right=ur, width=width)
assert mesh.upper_right == pytest.approx(ur)
assert mesh.width == pytest.approx(width)
meshes = openmc.capi.meshes
assert isinstance(meshes, Mapping)
assert len(meshes) == 1
for mesh_id, mesh in meshes.items():
assert isinstance(mesh, openmc.capi.Mesh)
assert mesh_id == mesh.id
mf = openmc.capi.MeshFilter(mesh)
assert mf.mesh == mesh
msf = openmc.capi.MeshSurfaceFilter(mesh)
assert msf.mesh == mesh
def test_restart(capi_init):
# Finalize and re-init to make internal state consistent with XML.
openmc.capi.hard_reset()
openmc.capi.finalize()
openmc.capi.init()
openmc.capi.simulation_init()
# Run for 7 batches then write a statepoint.
for i in range(7):
openmc.capi.next_batch()
openmc.capi.statepoint_write('restart_test.h5', True)
# Run 3 more batches and copy the keff.
for i in range(3):
openmc.capi.next_batch()
keff0 = openmc.capi.keff()
# Restart the simulation from the statepoint and the 3 remaining active batches.
openmc.capi.simulation_finalize()
openmc.capi.hard_reset()
openmc.capi.finalize()
openmc.capi.init(args=('-r', 'restart_test.h5'))
openmc.capi.simulation_init()
for i in range(3):
openmc.capi.next_batch()
keff1 = openmc.capi.keff()
openmc.capi.simulation_finalize()
# Compare the keff values.
assert keff0 == pytest.approx(keff1)
|
|
# coding=utf-8
from __future__ import absolute_import, division, print_function, \
unicode_literals
from collections import OrderedDict
from typing import Any, Dict, Generator, Iterable, Mapping, Optional, \
Text, Tuple, Union
from six import iteritems, iterkeys, python_2_unicode_compatible
from filters.base import BaseFilter, FilterCompatible, FilterError, Type
from filters.string import Unicode
__all__ = [
'FilterMapper',
'FilterRepeater',
]
@python_2_unicode_compatible
class FilterRepeater(BaseFilter):
"""
Applies a filter to every value in an Iterable.
You can apply a FilterRepeater to a dict (or other Mapping). The
filters will be applied to the Mapping's values.
Note: The resulting value will be coerced to a list or OrderedDict
(depending on the input value).
"""
CODE_EXTRA_KEY = 'unexpected'
templates = {
CODE_EXTRA_KEY: 'Unexpected key "{key}".',
}
mapping_result_type = OrderedDict
sequence_result_type = list
def __init__(self, filter_chain, restrict_keys=None):
# type: (FilterCompatible, Optional[Iterable]) -> None
"""
:param filter_chain:
The filter(s) that will be applied to each item in the
incoming iterables.
:param restrict_keys:
Only these keys/indexes will be allowed (any other
keys/indexes encountered will be treated as invalid
values).
Important: If this is an empty container will result in
EVERY key/index being rejected!
Set to ``None`` (default) to allow any key/index.
"""
super(FilterRepeater, self).__init__()
self._filter_chain = self.resolve_filter(filter_chain, parent=self)
self.restrict_keys = (
None
if restrict_keys is None
else set(restrict_keys)
)
def __str__(self):
return '{type}({filter_chain})'.format(
type = type(self).__name__,
filter_chain = self._filter_chain,
)
# noinspection PyProtectedMember
@classmethod
def __copy__(cls, the_filter):
# type: (FilterRepeater) -> FilterRepeater
"""
Creates a shallow copy of the object.
"""
new_filter = super(FilterRepeater, cls).__copy__(the_filter) # type: FilterRepeater
new_filter._filter_chain = the_filter._filter_chain
new_filter.restrict_keys = the_filter.restrict_keys
return new_filter
def _apply(self, value):
value = self._filter(value, Type(Iterable)) # type: Iterable
if self._has_errors:
return None
result_type = (
self.mapping_result_type
if isinstance(value, Mapping)
else self.sequence_result_type
)
return result_type(self.iter(value))
def iter(self, value):
# type: (Iterable) -> Generator[Any]
"""
Iterator version of :py:meth:`apply`.
"""
if value is not None:
if isinstance(value, Mapping):
for k, v in iteritems(value):
u_key = self.unicodify_key(k)
if (
(self.restrict_keys is None)
or (k in self.restrict_keys)
):
yield k, self._apply_item(u_key, v, self._filter_chain)
else:
# For consistency with FilterMapper, invalid
# keys are not included in the filtered
# value (hence this statement does not
# ``yield``).
self._invalid_value(
value = v,
reason = self.CODE_EXTRA_KEY,
sub_key = u_key,
)
else:
for i, v in enumerate(value):
u_key = self.unicodify_key(i)
if (
(self.restrict_keys is None)
or (i in self.restrict_keys)
):
yield self._apply_item(u_key, v, self._filter_chain)
else:
# Unlike in mappings, it is not possible to
# identify a "missing" item in a collection,
# so we have to ensure that something ends up
# in the filtered value at the same position
# as the invalid incoming value.
yield self._invalid_value(
value = v,
reason = self.CODE_EXTRA_KEY,
sub_key = u_key,
)
def _apply_item(self, key, value, filter_chain):
# type: (Text, Any, FilterCompatible) -> Any
"""
Applies filters to a single value in the iterable.
Override this method in a subclass if you want to customize the
way specific items get filtered.
"""
return self._filter(value, filter_chain, sub_key=key)
@staticmethod
def unicodify_key(key):
# type: (Any) -> Text
"""
Converts a key value into a unicode so that it can be
represented in e.g., error message contexts.
"""
if key is None:
return 'None'
try:
return Unicode().apply(key)
except FilterError:
return repr(key)
@python_2_unicode_compatible
class FilterMapper(BaseFilter):
"""
Given a dict of filters, applies each filter to the corresponding
value in incoming mappings.
The resulting value is an OrderedDict. The order of keys in the
``filter_map`` passed to the initializer determines the order of
keys in the filtered value.
Note: The order of extra keys is undefined, but they will always be
last.
"""
CODE_EXTRA_KEY = 'unexpected'
CODE_MISSING_KEY = 'missing'
templates = {
CODE_EXTRA_KEY: 'Unexpected key "{actual_key}".',
CODE_MISSING_KEY: '{key} is required.',
}
def __init__(
self,
filter_map,
allow_missing_keys = True,
allow_extra_keys = True,
):
# type: (Dict[Text, FilterCompatible], Union[bool, Iterable[Text]], Union[bool, Iterable[Text]]) -> None
"""
:param filter_map:
This mapping also determines the key order of the resulting
OrderedDict. If necessary, make sure that your code
provides ``filter_map`` as an OrderedDict.
:param allow_missing_keys:
Determines how values with missing keys (according to
``filter_map``) get handled:
- True: The missing values are set to ``None`` and then
filtered as normal.
- False: Missing keys are treated as invalid values.
- <Iterable>: Only the specified keys are allowed to be
omitted.
:param allow_extra_keys:
Determines how values with extra keys (according to
``filter_map``) get handled:
- True: The extra values are passed through to the filtered
value.
- False: Extra values are treated as invalid values and
omitted from the filtered value.
- <Iterable>: Only the specified extra keys are allowed.
"""
super(FilterMapper, self).__init__()
self._filters = OrderedDict()
self.allow_missing_keys = (
set(allow_missing_keys)
if isinstance(allow_missing_keys, Iterable)
else bool(allow_missing_keys)
)
self.allow_extra_keys = (
set(allow_extra_keys)
if isinstance(allow_extra_keys, Iterable)
else bool(allow_extra_keys)
)
if filter_map:
for key, filter_chain in iteritems(filter_map): # type: Tuple[Text, BaseFilter]
#
# Note that the normalized Filter could be `None`.
#
# This has the effect of making a key "required"
# (depending on `allow_missing_keys`) without
# applying any Filters to the value.
#
self._filters[key] =\
self.resolve_filter(filter_chain, parent=self, key=key)
# If the filter map is an OrderedDict, we should try to
# preserve order when applying the filter. Otherwise use a
# plain ol' dict to improve readability.
self.result_type = (
OrderedDict
if isinstance(filter_map, OrderedDict)
else dict
)
def __str__(self):
return '{type}({filters})'.format(
type = type(self).__name__,
filters = ', '.join(
'{key}={filter}'.format(key=key, filter=filter_chain)
for key, filter_chain in iteritems(self._filters)
),
)
def _apply(self, value):
value = self._filter(value, Type(Mapping)) # type: Mapping
if self._has_errors:
return None
return self.result_type(self.iter(value))
def iter(self, value):
# type: (Mapping) -> Generator[Text, Any]
"""
Iterator version of :py:meth:`apply`.
"""
if value is not None:
# Apply filtered values first.
for key, filter_chain in iteritems(self._filters):
if key in value:
yield key, self._apply_item(key, value[key], filter_chain)
elif self._missing_key_allowed(key):
# Filter the missing value as if it was set to
# ``None``.
yield key, self._apply_item(key, None, filter_chain)
else:
# Treat the missing value as invalid.
yield key, self._invalid_value(
value = None,
reason = self.CODE_MISSING_KEY,
sub_key = key,
)
# Extra values go last.
# Note that we iterate in sorted order, in case the result
# type preserves ordering.
# https://github.com/eflglobal/filters/issues/13
for key in sorted(
set(iterkeys(value))
- set(iterkeys(self._filters))
):
if self._extra_key_allowed(key):
yield key, value[key]
else:
unicode_key = self.unicodify_key(key)
# Handle the extra value just like any other
# invalid value, but do not include it in the
# result (note that there is no ``yield`` here).
self._invalid_value(
value = value[key],
reason = self.CODE_EXTRA_KEY,
sub_key = unicode_key,
# https://github.com/eflglobal/filters/issues/15
template_vars = {
'actual_key': unicode_key,
},
)
def _apply_item(self, key, value, filter_chain):
# type: (Text, Any, FilterCompatible) -> Any
"""
Applies filters to a single item in the mapping.
Override this method in a subclass if you want to customize the
way specific items get filtered.
"""
return self._filter(value, filter_chain, sub_key=key)
def _missing_key_allowed(self, key):
# type: (Text) -> bool
"""
Returns whether the specified key is allowed to be omitted from
the incoming value.
"""
if self.allow_missing_keys is True:
return True
try:
return key in self.allow_missing_keys
except TypeError:
return False
def _extra_key_allowed(self, key):
# type: (Text) -> bool
"""
Returns whether the specified extra key is allowed.
"""
if self.allow_extra_keys is True:
return True
try:
return key in self.allow_extra_keys
except TypeError:
return False
@staticmethod
def unicodify_key(key):
# type: (Text) -> Text
"""
Converts a key value into a unicode so that it can be
represented in e.g., error message contexts.
"""
if key is None:
return 'None'
try:
return Unicode().apply(key)
except FilterError:
return repr(key)
|
|
"""Code coverage utilities."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import re
import time
from xml.etree.ElementTree import (
Comment,
Element,
SubElement,
tostring,
)
from xml.dom import (
minidom,
)
from . import types as t
from .target import (
walk_module_targets,
walk_compile_targets,
walk_powershell_targets,
)
from .util import (
display,
ApplicationError,
common_environment,
ANSIBLE_TEST_DATA_ROOT,
to_text,
make_dirs,
)
from .util_common import (
intercept_command,
ResultType,
write_text_test_results,
write_json_test_results,
)
from .config import (
CoverageConfig,
CoverageReportConfig,
)
from .env import (
get_ansible_version,
)
from .executor import (
Delegate,
install_command_requirements,
)
from .data import (
data_context,
)
COVERAGE_GROUPS = ('command', 'target', 'environment', 'version')
COVERAGE_CONFIG_PATH = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'coveragerc')
COVERAGE_OUTPUT_FILE_NAME = 'coverage'
def command_coverage_combine(args):
"""Patch paths in coverage files and merge into a single file.
:type args: CoverageConfig
:rtype: list[str]
"""
paths = _command_coverage_combine_powershell(args) + _command_coverage_combine_python(args)
for path in paths:
display.info('Generated combined output: %s' % path, verbosity=1)
return paths
def _command_coverage_combine_python(args):
"""
:type args: CoverageConfig
:rtype: list[str]
"""
coverage = initialize_coverage(args)
modules = dict((target.module, target.path) for target in list(walk_module_targets()) if target.path.endswith('.py'))
coverage_dir = ResultType.COVERAGE.path
coverage_files = [os.path.join(coverage_dir, f) for f in os.listdir(coverage_dir)
if '=coverage.' in f and '=python' in f]
counter = 0
sources = _get_coverage_targets(args, walk_compile_targets)
groups = _build_stub_groups(args, sources, lambda line_count: set())
if data_context().content.collection:
collection_search_re = re.compile(r'/%s/' % data_context().content.collection.directory)
collection_sub_re = re.compile(r'^.*?/%s/' % data_context().content.collection.directory)
else:
collection_search_re = None
collection_sub_re = None
for coverage_file in coverage_files:
counter += 1
display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
original = coverage.CoverageData()
group = get_coverage_group(args, coverage_file)
if group is None:
display.warning('Unexpected name for coverage file: %s' % coverage_file)
continue
if os.path.getsize(coverage_file) == 0:
display.warning('Empty coverage file: %s' % coverage_file)
continue
try:
original.read_file(coverage_file)
except Exception as ex: # pylint: disable=locally-disabled, broad-except
display.error(u'%s' % ex)
continue
for filename in original.measured_files():
arcs = set(original.arcs(filename) or [])
if not arcs:
# This is most likely due to using an unsupported version of coverage.
display.warning('No arcs found for "%s" in coverage file: %s' % (filename, coverage_file))
continue
filename = _sanitise_filename(filename, modules=modules, collection_search_re=collection_search_re,
collection_sub_re=collection_sub_re)
if not filename:
continue
if group not in groups:
groups[group] = {}
arc_data = groups[group]
if filename not in arc_data:
arc_data[filename] = set()
arc_data[filename].update(arcs)
output_files = []
invalid_path_count = 0
invalid_path_chars = 0
coverage_file = os.path.join(ResultType.COVERAGE.path, COVERAGE_OUTPUT_FILE_NAME)
for group in sorted(groups):
arc_data = groups[group]
updated = coverage.CoverageData()
for filename in arc_data:
if not os.path.isfile(filename):
if collection_search_re and collection_search_re.search(filename) and os.path.basename(filename) == '__init__.py':
# the collection loader uses implicit namespace packages, so __init__.py does not need to exist on disk
continue
invalid_path_count += 1
invalid_path_chars += len(filename)
if args.verbosity > 1:
display.warning('Invalid coverage path: %s' % filename)
continue
updated.add_arcs({filename: list(arc_data[filename])})
if args.all:
updated.add_arcs(dict((source[0], []) for source in sources))
if not args.explain:
output_file = coverage_file + group
updated.write_file(output_file)
output_files.append(output_file)
if invalid_path_count > 0:
display.warning('Ignored %d characters from %d invalid coverage path(s).' % (invalid_path_chars, invalid_path_count))
return sorted(output_files)
def _get_coverage_targets(args, walk_func):
"""
:type args: CoverageConfig
:type walk_func: Func
:rtype: list[tuple[str, int]]
"""
sources = []
if args.all or args.stub:
# excludes symlinks of regular files to avoid reporting on the same file multiple times
# in the future it would be nice to merge any coverage for symlinks into the real files
for target in walk_func(include_symlinks=False):
target_path = os.path.abspath(target.path)
with open(target_path, 'r') as target_fd:
target_lines = len(target_fd.read().splitlines())
sources.append((target_path, target_lines))
sources.sort()
return sources
def _build_stub_groups(args, sources, default_stub_value):
"""
:type args: CoverageConfig
:type sources: List[tuple[str, int]]
:type default_stub_value: Func[int]
:rtype: dict
"""
groups = {}
if args.stub:
stub_group = []
stub_groups = [stub_group]
stub_line_limit = 500000
stub_line_count = 0
for source, source_line_count in sources:
stub_group.append((source, source_line_count))
stub_line_count += source_line_count
if stub_line_count > stub_line_limit:
stub_line_count = 0
stub_group = []
stub_groups.append(stub_group)
for stub_index, stub_group in enumerate(stub_groups):
if not stub_group:
continue
groups['=stub-%02d' % (stub_index + 1)] = dict((source, default_stub_value(line_count))
for source, line_count in stub_group)
return groups
def _sanitise_filename(filename, modules=None, collection_search_re=None, collection_sub_re=None):
"""
:type filename: str
:type modules: dict | None
:type collection_search_re: Pattern | None
:type collection_sub_re: Pattern | None
:rtype: str | None
"""
ansible_path = os.path.abspath('lib/ansible/') + '/'
root_path = data_context().content.root + '/'
if modules is None:
modules = {}
if '/ansible_modlib.zip/ansible/' in filename:
# Rewrite the module_utils path from the remote host to match the controller. Ansible 2.6 and earlier.
new_name = re.sub('^.*/ansible_modlib.zip/ansible/', ansible_path, filename)
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif collection_search_re and collection_search_re.search(filename):
new_name = os.path.abspath(collection_sub_re.sub('', filename))
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif re.search(r'/ansible_[^/]+_payload\.zip/ansible/', filename):
# Rewrite the module_utils path from the remote host to match the controller. Ansible 2.7 and later.
new_name = re.sub(r'^.*/ansible_[^/]+_payload\.zip/ansible/', ansible_path, filename)
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif '/ansible_module_' in filename:
# Rewrite the module path from the remote host to match the controller. Ansible 2.6 and earlier.
module_name = re.sub('^.*/ansible_module_(?P<module>.*).py$', '\\g<module>', filename)
if module_name not in modules:
display.warning('Skipping coverage of unknown module: %s' % module_name)
return None
new_name = os.path.abspath(modules[module_name])
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif re.search(r'/ansible_[^/]+_payload(_[^/]+|\.zip)/__main__\.py$', filename):
# Rewrite the module path from the remote host to match the controller. Ansible 2.7 and later.
# AnsiballZ versions using zipimporter will match the `.zip` portion of the regex.
# AnsiballZ versions not using zipimporter will match the `_[^/]+` portion of the regex.
module_name = re.sub(r'^.*/ansible_(?P<module>[^/]+)_payload(_[^/]+|\.zip)/__main__\.py$',
'\\g<module>', filename).rstrip('_')
if module_name not in modules:
display.warning('Skipping coverage of unknown module: %s' % module_name)
return None
new_name = os.path.abspath(modules[module_name])
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif re.search('^(/.*?)?/root/ansible/', filename):
# Rewrite the path of code running on a remote host or in a docker container as root.
new_name = re.sub('^(/.*?)?/root/ansible/', root_path, filename)
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
elif '/.ansible/test/tmp/' in filename:
# Rewrite the path of code running from an integration test temporary directory.
new_name = re.sub(r'^.*/\.ansible/test/tmp/[^/]+/', root_path, filename)
display.info('%s -> %s' % (filename, new_name), verbosity=3)
filename = new_name
return filename
def command_coverage_report(args):
"""
:type args: CoverageReportConfig
"""
output_files = command_coverage_combine(args)
for output_file in output_files:
if args.group_by or args.stub:
display.info('>>> Coverage Group: %s' % ' '.join(os.path.basename(output_file).split('=')[1:]))
if output_file.endswith('-powershell'):
display.info(_generate_powershell_output_report(args, output_file))
else:
options = []
if args.show_missing:
options.append('--show-missing')
if args.include:
options.extend(['--include', args.include])
if args.omit:
options.extend(['--omit', args.omit])
run_coverage(args, output_file, 'report', options)
def command_coverage_html(args):
"""
:type args: CoverageConfig
"""
output_files = command_coverage_combine(args)
for output_file in output_files:
if output_file.endswith('-powershell'):
# coverage.py does not support non-Python files so we just skip the local html report.
display.info("Skipping output file %s in html generation" % output_file, verbosity=3)
continue
dir_name = os.path.join(ResultType.REPORTS.path, os.path.basename(output_file))
make_dirs(dir_name)
run_coverage(args, output_file, 'html', ['-i', '-d', dir_name])
display.info('HTML report generated: file:///%s' % os.path.join(dir_name, 'index.html'))
def command_coverage_xml(args):
"""
:type args: CoverageConfig
"""
output_files = command_coverage_combine(args)
for output_file in output_files:
xml_name = '%s.xml' % os.path.basename(output_file)
if output_file.endswith('-powershell'):
report = _generage_powershell_xml(output_file)
rough_string = tostring(report, 'utf-8')
reparsed = minidom.parseString(rough_string)
pretty = reparsed.toprettyxml(indent=' ')
write_text_test_results(ResultType.REPORTS, xml_name, pretty)
else:
xml_path = os.path.join(ResultType.REPORTS.path, xml_name)
make_dirs(ResultType.REPORTS.path)
run_coverage(args, output_file, 'xml', ['-i', '-o', xml_path])
def command_coverage_erase(args):
"""
:type args: CoverageConfig
"""
initialize_coverage(args)
coverage_dir = ResultType.COVERAGE.path
for name in os.listdir(coverage_dir):
if not name.startswith('coverage') and '=coverage.' not in name:
continue
path = os.path.join(coverage_dir, name)
if not args.explain:
os.remove(path)
def initialize_coverage(args):
"""
:type args: CoverageConfig
:rtype: coverage
"""
if args.delegate:
raise Delegate()
if args.requirements:
install_command_requirements(args)
try:
import coverage
except ImportError:
coverage = None
if not coverage:
raise ApplicationError('You must install the "coverage" python module to use this command.')
return coverage
def get_coverage_group(args, coverage_file):
"""
:type args: CoverageConfig
:type coverage_file: str
:rtype: str
"""
parts = os.path.basename(coverage_file).split('=', 4)
if len(parts) != 5 or not parts[4].startswith('coverage.'):
return None
names = dict(
command=parts[0],
target=parts[1],
environment=parts[2],
version=parts[3],
)
group = ''
for part in COVERAGE_GROUPS:
if part in args.group_by:
group += '=%s' % names[part]
return group
def _command_coverage_combine_powershell(args):
"""
:type args: CoverageConfig
:rtype: list[str]
"""
coverage_dir = ResultType.COVERAGE.path
coverage_files = [os.path.join(coverage_dir, f) for f in os.listdir(coverage_dir)
if '=coverage.' in f and '=powershell' in f]
def _default_stub_value(lines):
val = {}
for line in range(lines):
val[line] = 0
return val
counter = 0
sources = _get_coverage_targets(args, walk_powershell_targets)
groups = _build_stub_groups(args, sources, _default_stub_value)
for coverage_file in coverage_files:
counter += 1
display.info('[%4d/%4d] %s' % (counter, len(coverage_files), coverage_file), verbosity=2)
group = get_coverage_group(args, coverage_file)
if group is None:
display.warning('Unexpected name for coverage file: %s' % coverage_file)
continue
if os.path.getsize(coverage_file) == 0:
display.warning('Empty coverage file: %s' % coverage_file)
continue
try:
with open(coverage_file, 'rb') as original_fd:
coverage_run = json.loads(to_text(original_fd.read(), errors='replace'))
except Exception as ex: # pylint: disable=locally-disabled, broad-except
display.error(u'%s' % ex)
continue
for filename, hit_info in coverage_run.items():
if group not in groups:
groups[group] = {}
coverage_data = groups[group]
filename = _sanitise_filename(filename)
if not filename:
continue
if filename not in coverage_data:
coverage_data[filename] = {}
file_coverage = coverage_data[filename]
if not isinstance(hit_info, list):
hit_info = [hit_info]
for hit_entry in hit_info:
if not hit_entry:
continue
line_count = file_coverage.get(hit_entry['Line'], 0) + hit_entry['HitCount']
file_coverage[hit_entry['Line']] = line_count
output_files = []
invalid_path_count = 0
invalid_path_chars = 0
for group in sorted(groups):
coverage_data = groups[group]
for filename in coverage_data:
if not os.path.isfile(filename):
invalid_path_count += 1
invalid_path_chars += len(filename)
if args.verbosity > 1:
display.warning('Invalid coverage path: %s' % filename)
continue
if args.all:
# Add 0 line entries for files not in coverage_data
for source, source_line_count in sources:
if source in coverage_data:
continue
coverage_data[source] = _default_stub_value(source_line_count)
if not args.explain:
output_file = COVERAGE_OUTPUT_FILE_NAME + group + '-powershell'
write_json_test_results(ResultType.COVERAGE, output_file, coverage_data)
output_files.append(os.path.join(ResultType.COVERAGE.path, output_file))
if invalid_path_count > 0:
display.warning(
'Ignored %d characters from %d invalid coverage path(s).' % (invalid_path_chars, invalid_path_count))
return sorted(output_files)
def _generage_powershell_xml(coverage_file):
"""
:type coverage_file: str
:rtype: Element
"""
with open(coverage_file, 'rb') as coverage_fd:
coverage_info = json.loads(to_text(coverage_fd.read()))
content_root = data_context().content.root
is_ansible = data_context().content.is_ansible
packages = {}
for path, results in coverage_info.items():
filename = os.path.splitext(os.path.basename(path))[0]
if filename.startswith('Ansible.ModuleUtils'):
package = 'ansible.module_utils'
elif is_ansible:
package = 'ansible.modules'
else:
rel_path = path[len(content_root) + 1:]
plugin_type = "modules" if rel_path.startswith("plugins/modules") else "module_utils"
package = 'ansible_collections.%splugins.%s' % (data_context().content.collection.prefix, plugin_type)
if package not in packages:
packages[package] = {}
packages[package][path] = results
elem_coverage = Element('coverage')
elem_coverage.append(
Comment(' Generated by ansible-test from the Ansible project: https://www.ansible.com/ '))
elem_coverage.append(
Comment(' Based on https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd '))
elem_sources = SubElement(elem_coverage, 'sources')
elem_source = SubElement(elem_sources, 'source')
elem_source.text = data_context().content.root
elem_packages = SubElement(elem_coverage, 'packages')
total_lines_hit = 0
total_line_count = 0
for package_name, package_data in packages.items():
lines_hit, line_count = _add_cobertura_package(elem_packages, package_name, package_data)
total_lines_hit += lines_hit
total_line_count += line_count
elem_coverage.attrib.update({
'branch-rate': '0',
'branches-covered': '0',
'branches-valid': '0',
'complexity': '0',
'line-rate': str(round(total_lines_hit / total_line_count, 4)) if total_line_count else "0",
'lines-covered': str(total_line_count),
'lines-valid': str(total_lines_hit),
'timestamp': str(int(time.time())),
'version': get_ansible_version(),
})
return elem_coverage
def _add_cobertura_package(packages, package_name, package_data):
"""
:type packages: SubElement
:type package_name: str
:type package_data: Dict[str, Dict[str, int]]
:rtype: Tuple[int, int]
"""
elem_package = SubElement(packages, 'package')
elem_classes = SubElement(elem_package, 'classes')
total_lines_hit = 0
total_line_count = 0
for path, results in package_data.items():
lines_hit = len([True for hits in results.values() if hits])
line_count = len(results)
total_lines_hit += lines_hit
total_line_count += line_count
elem_class = SubElement(elem_classes, 'class')
class_name = os.path.splitext(os.path.basename(path))[0]
if class_name.startswith("Ansible.ModuleUtils"):
class_name = class_name[20:]
content_root = data_context().content.root
filename = path
if filename.startswith(content_root):
filename = filename[len(content_root) + 1:]
elem_class.attrib.update({
'branch-rate': '0',
'complexity': '0',
'filename': filename,
'line-rate': str(round(lines_hit / line_count, 4)) if line_count else "0",
'name': class_name,
})
SubElement(elem_class, 'methods')
elem_lines = SubElement(elem_class, 'lines')
for number, hits in results.items():
elem_line = SubElement(elem_lines, 'line')
elem_line.attrib.update(
hits=str(hits),
number=str(number),
)
elem_package.attrib.update({
'branch-rate': '0',
'complexity': '0',
'line-rate': str(round(total_lines_hit / total_line_count, 4)) if total_line_count else "0",
'name': package_name,
})
return total_lines_hit, total_line_count
def _generate_powershell_output_report(args, coverage_file):
"""
:type args: CoverageReportConfig
:type coverage_file: str
:rtype: str
"""
with open(coverage_file, 'rb') as coverage_fd:
coverage_info = json.loads(to_text(coverage_fd.read()))
root_path = data_context().content.root + '/'
name_padding = 7
cover_padding = 8
file_report = []
total_stmts = 0
total_miss = 0
for filename in sorted(coverage_info.keys()):
hit_info = coverage_info[filename]
if filename.startswith(root_path):
filename = filename[len(root_path):]
if args.omit and filename in args.omit:
continue
if args.include and filename not in args.include:
continue
stmts = len(hit_info)
miss = len([c for c in hit_info.values() if c == 0])
name_padding = max(name_padding, len(filename) + 3)
total_stmts += stmts
total_miss += miss
cover = "{0}%".format(int((stmts - miss) / stmts * 100))
missing = []
current_missing = None
sorted_lines = sorted([int(x) for x in hit_info.keys()])
for idx, line in enumerate(sorted_lines):
hit = hit_info[str(line)]
if hit == 0 and current_missing is None:
current_missing = line
elif hit != 0 and current_missing is not None:
end_line = sorted_lines[idx - 1]
if current_missing == end_line:
missing.append(str(current_missing))
else:
missing.append('%s-%s' % (current_missing, end_line))
current_missing = None
if current_missing is not None:
end_line = sorted_lines[-1]
if current_missing == end_line:
missing.append(str(current_missing))
else:
missing.append('%s-%s' % (current_missing, end_line))
file_report.append({'name': filename, 'stmts': stmts, 'miss': miss, 'cover': cover, 'missing': missing})
if total_stmts == 0:
return ''
total_percent = '{0}%'.format(int((total_stmts - total_miss) / total_stmts * 100))
stmts_padding = max(8, len(str(total_stmts)))
miss_padding = max(7, len(str(total_miss)))
line_length = name_padding + stmts_padding + miss_padding + cover_padding
header = 'Name'.ljust(name_padding) + 'Stmts'.rjust(stmts_padding) + 'Miss'.rjust(miss_padding) + \
'Cover'.rjust(cover_padding)
if args.show_missing:
header += 'Lines Missing'.rjust(16)
line_length += 16
line_break = '-' * line_length
lines = ['%s%s%s%s%s' % (f['name'].ljust(name_padding), str(f['stmts']).rjust(stmts_padding),
str(f['miss']).rjust(miss_padding), f['cover'].rjust(cover_padding),
' ' + ', '.join(f['missing']) if args.show_missing else '')
for f in file_report]
totals = 'TOTAL'.ljust(name_padding) + str(total_stmts).rjust(stmts_padding) + \
str(total_miss).rjust(miss_padding) + total_percent.rjust(cover_padding)
report = '{0}\n{1}\n{2}\n{1}\n{3}'.format(header, line_break, "\n".join(lines), totals)
return report
def run_coverage(args, output_file, command, cmd): # type: (CoverageConfig, str, str, t.List[str]) -> None
"""Run the coverage cli tool with the specified options."""
env = common_environment()
env.update(dict(COVERAGE_FILE=output_file))
cmd = ['python', '-m', 'coverage', command, '--rcfile', COVERAGE_CONFIG_PATH] + cmd
intercept_command(args, target_name='coverage', env=env, cmd=cmd, disable_coverage=True)
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from six.moves import zip
from tabulate import tabulate
from tqdm import tqdm
import pycrfsuite
from morphine._fileresource import FileResource
class LessNoisyTrainer(pycrfsuite.Trainer):
"""
This pycrfsuite.Trainer prints information about each iteration
on a single line.
"""
def on_iteration(self, log, info):
if 'avg_precision' in info:
print(("Iter {num:<3} "
"time={time:<5.2f} "
"loss={loss:<8.2f} "
"active={active_features:<5} "
"precision={avg_precision:0.3f} "
"recall={avg_recall:0.3f} "
"F1={avg_f1:0.3f} "
"accuracy(item/instance)="
"{item_accuracy_float:0.3f} {instance_accuracy_float:0.3f}"
).format(**info).strip())
else:
print(("Iter {num:<3} "
"time={time:<5.2f} "
"loss={loss:<8.2f} "
"active={active_features:<5} "
"feature_norm={feature_norm:<8.2f} "
).format(**info).strip())
def on_optimization_end(self, log):
last_iter = self.logparser.last_iteration
if 'scores' in last_iter:
data = [
[entity, score.precision, score.recall, score.f1, score.ref]
for entity, score in sorted(last_iter['scores'].items())
]
table = tabulate(data,
headers=["Label", "Precision", "Recall", "F1", "Support"],
# floatfmt="0.4f",
)
size = len(table.splitlines()[0])
print("="*size)
print(table)
print("-"*size)
super(LessNoisyTrainer, self).on_optimization_end(log)
class CRF(object):
def __init__(self, algorithm=None, train_params=None, verbose=False,
model_filename=None, keep_tempfiles=False, trainer_cls=None):
self.algorithm = algorithm
self.train_params = train_params
self.modelfile = FileResource(
filename=model_filename,
keep_tempfiles=keep_tempfiles,
suffix=".crfsuite",
prefix="model"
)
self.verbose = verbose
self._tagger = None
if trainer_cls is None:
self.trainer_cls = pycrfsuite.Trainer
else:
self.trainer_cls = trainer_cls
self.training_log_ = None
def fit(self, X, y, X_dev=None, y_dev=None):
"""
Train a model.
Parameters
----------
X : list of lists of dicts
Feature dicts for several documents (in a python-crfsuite format).
y : list of lists of strings
Labels for several documents.
X_dev : (optional) list of lists of dicts
Feature dicts used for testing.
y_dev : (optional) list of lists of strings
Labels corresponding to X_dev.
"""
if (X_dev is None and y_dev is not None) or (X_dev is not None and y_dev is None):
raise ValueError("Pass both X_dev and y_dev to use the holdout data")
if self._tagger is not None:
self._tagger.close()
self._tagger = None
self.modelfile.refresh()
trainer = self._get_trainer()
train_data = zip(X, y)
if self.verbose:
train_data = tqdm(train_data, "loading training data to CRFsuite", len(X), leave=True)
for xseq, yseq in train_data:
trainer.append(xseq, yseq)
if self.verbose:
print("")
if X_dev is not None:
test_data = zip(X_dev, y_dev)
if self.verbose:
test_data = tqdm(test_data, "loading dev data to CRFsuite", len(X_dev), leave=True)
for xseq, yseq in test_data:
trainer.append(xseq, yseq, 1)
if self.verbose:
print("")
trainer.train(self.modelfile.name, holdout=-1 if X_dev is None else 1)
self.training_log_ = trainer.logparser
return self
def predict(self, X):
"""
Make a prediction.
Parameters
----------
X : list of lists of dicts
feature dicts in python-crfsuite format
Returns
-------
y : list of lists of strings
predicted labels
"""
return list(map(self.predict_single, X))
def predict_single(self, xseq):
"""
Make a prediction.
Parameters
----------
xseq : list of dicts
feature dicts in python-crfsuite format
Returns
-------
y : list of strings
predicted labels
"""
return self.tagger.tag(xseq)
def predict_marginals(self, X):
"""
Make a prediction.
Parameters
----------
X : list of lists of dicts
feature dicts in python-crfsuite format
Returns
-------
y : list of lists of dicts
predicted probabilities for each label at each position
"""
return list(map(self.predict_marginals_single, X))
def predict_marginals_single(self, xseq):
"""
Make a prediction.
Parameters
----------
xseq : list of dicts
feature dicts in python-crfsuite format
Returns
-------
y : list of dicts
predicted probabilities for each label at each position
"""
labels = self.tagger.labels()
self.tagger.set(xseq)
return [
{label: self.tagger.marginal(label, i) for label in labels}
for i in range(len(xseq))
]
@property
def tagger(self):
if self._tagger is None:
if self.modelfile.name is None:
raise Exception("Can't load model. Is the model trained?")
tagger = pycrfsuite.Tagger()
tagger.open(self.modelfile.name)
self._tagger = tagger
return self._tagger
def _get_trainer(self):
return self.trainer_cls(
algorithm=self.algorithm,
params=self.train_params,
verbose=self.verbose,
)
def __getstate__(self):
dct = self.__dict__.copy()
dct['_tagger'] = None
return dct
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os, csv
COV = None
if os.environ.get('FLASK_COVERAGE'):
import coverage
COV = coverage.coverage(branch=True, include='app/*')
COV.start()
if os.path.exists('.env'):
print('Importing environment from .env...')
for line in open('.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1]
from app import create_app, db
from app.models import User, Role, Permission, \
IUCNStatus, OrganismType, GrowthFormRaunkiaer, ReproductiveRepetition, \
DicotMonoc, AngioGymno, SpandExGrowthType, SourceType, Database, Purpose, MissingData, ContentEmail, Ecoregion, Continent, InvasiveStatusStudy, InvasiveStatusElsewhere, StageTypeClass, \
TransitionType, MatrixComposition, StartSeason, StudiedSex, Captivity, Species, Taxonomy, Trait, \
Publication, AuthorContact, AdditionalSource, Population, Stage, StageType, Treatment, \
MatrixStage, MatrixValue, Matrix, Interval, Fixed, Small, CensusTiming, Status, PurposeEndangered, PurposeWeed, Version, Institute, EndSeason, ChangeLogger, PublicationsProtocol, DigitizationProtocol, Protocol, CommonTerm
from flask.ext.script import Manager, Shell
from flask.ext.migrate import Migrate, MigrateCommand
from app.matrix_functions import as_array, calc_lambda, calc_surv_issue, is_matrix_irreducible, is_matrix_primitive, is_matrix_ergodic
from flask import Flask, session
from flask.ext.alchemydumps import AlchemyDumps, AlchemyDumpsCommand
from flask.ext.sqlalchemy import SQLAlchemy
import random
def gen_hex_code():
r = lambda: random.randint(0,255)
return('#%02X%02X%02X' % (r(),r(),r()))
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role,
Permission=Permission, IUCNStatus=IUCNStatus, Species=Species, \
Taxonomy=Taxonomy, OrganismType=OrganismType, GrowthFormRaunkiaer=GrowthFormRaunkiaer, \
ReproductiveRepetition=ReproductiveRepetition, DicotMonoc=DicotMonoc, AngioGymno=AngioGymno, SpandExGrowthType=SpandExGrowthType, Trait=Trait, \
Publication=Publication, SourceType=SourceType, Database=Database, Purpose=Purpose, MissingData=MissingData, \
AuthorContact=AuthorContact, ContentEmail=ContentEmail, Population=Population, Ecoregion=Ecoregion, Continent=Continent, \
StageType=StageType, StageTypeClass=StageTypeClass, TransitionType=TransitionType, MatrixValue=MatrixValue, \
MatrixComposition=MatrixComposition, StartSeason=StartSeason, StudiedSex=StudiedSex, Captivity=Captivity, MatrixStage=MatrixStage,\
Matrix=Matrix, Interval=Interval, Fixed=Fixed, Small=Small, CensusTiming=CensusTiming, Status=Status, InvasiveStatusStudy=InvasiveStatusStudy, InvasiveStatusElsewhere=InvasiveStatusElsewhere, \
PurposeEndangered=PurposeEndangered, PurposeWeed=PurposeWeed, Version=Version, Institute=Institute, EndSeason=EndSeason, ChangeLogger = ChangeLogger, PublicationsProtocol = PublicationsProtocol, \
DigitizationProtocol = DigitizationProtocol, Protocol=Protocol, CommonTerm=CommonTerm)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
manager.add_command('alchemydumps', AlchemyDumpsCommand)
@manager.command
def test(coverage=False):
"""Run the unit tests."""
if coverage and not os.environ.get('FLASK_COVERAGE'):
import sys
os.environ['FLASK_COVERAGE'] = '1'
os.execvp(sys.executable, [sys.executable] + sys.argv)
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
if COV:
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
basedir = os.path.abspath(os.path.dirname(__file__))
covdir = os.path.join(basedir, 'tmp/coverage')
COV.html_report(directory=covdir)
print('HTML version: file://%s/index.html' % covdir)
COV.erase()
@manager.command
def profile(length=25, profile_dir=None):
"""Start the application under the code profiler."""
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length],
profile_dir=profile_dir)
app.run()
def UnicodeDictReader(utf8_data, **kwargs):
csv_reader = csv.DictReader(utf8_data, **kwargs)
for row in csv_reader:
yield {key: unicode(value, 'latin-1') for key, value in row.iteritems()}
@manager.command
def delete_table_data():
response = raw_input("Are you sure you want to delete all data? (y/n): ")
if response == "y":
Version.query.delete()
Taxonomy.query.delete()
Matrix.query.delete()
Population.query.delete()
Publication.query.delete()
Trait.query.delete()
Species.query.delete()
Version.query.delete()
Protocol.query.delete()
db.session.commit()
print "All data has been removed"
elif response == "n":
print "Table data not deleted"
pass
else:
print("Valid response required (y/n)")
return
# This can be padded out for future stuff...
def coerce_boolean(string):
true = ['Yes', 'Divided','TRUE','T']
false = ['No', 'Undivided','FALSE','F','Indivisible']
if string in true:
return True
elif string in false:
return False
def return_con(obj):
import re, string
joined =''.join([value for key, value in obj.items()])
lower = joined.lower()
stripped = lower.replace(' ', '')
alphanumeric = re.sub('[\W_]+', '', stripped)
return alphanumeric
def create_id_string(dict):
new_dict = {
"species_accepted" : dict["species_accepted"], #
"journal" : dict['journal'], #
"year_pub" : dict["year"], #
"authors" : dict["authors"][:15], #first15 (if > 15, add character to end >)
"name" : dict["name"], # what sort of name is this?
"matrix_composite" : dict['matrix_composition_id'], #
"matrix_treatment" : dict['treatment_id'], #
"matrix_start_year" : dict['matrix_start_year'], #
"observation" : dict['observations'], #
"matrix_a_string" : dict['matrix_a_string'] #
}
return return_con(new_dict)
def similar(a, b):
from difflib import SequenceMatcher
return SequenceMatcher(None, a, b).ratio()
def generate_uid(species, publication, population, matrix):
import re
species_accepted = species.species_accepted
journal = publication.journal_name if publication else None
year_pub = publication.year if publication else None
try:
authors = publication.authors[:15].encode('utf-8')
except:
authors = ''
try:
pop_name = population.population_name.encode('utf-8')[:15] if population else None
except:
pop_name = ''
try:
composite = matrix.matrix_composition.comp_name
except AttributeError:
composite = ''
try:
start_year = matrix.matrix_start_year
except TypeError:
start_year = ''
import time
timestamp = time.time()
uid_concat = '{}{}{}{}{}{}{}{}'.format(species_accepted, journal, year_pub, authors, pop_name, composite, start_year, timestamp)
uid_lower = uid_concat.lower()
uid = re.sub('[\W_]+', '', uid_lower)
return uid
def data_clean(data):
incomplete = True if 'NDY' in data.values() else False
kwargs = {key: val for key, val in data.items() if val != 'NDY'}
amber = Status.query.filter_by(status_name="Amber").first()
green = Status.query.filter_by(status_name="Green").first()
#kwargs['version_ok'] = 0 if incomplete else 1
#kwargs['version_original'] = 1
#kwargs['version_latest'] = 1
return {'kwargs' : kwargs, 'status' : amber if incomplete else green}
def version_data(cleaned):
version = {'checked' : False,
'checked_count' : 0,
'statuses' : cleaned['status']#,
#'version_number' : 1,
#'user' : User.query.filter_by(username='admin').first(),
#'database' : Database.query.filter_by(database_name='COMPADRE 4').first()
}
return version
@manager.command
def submit_new(data):
import datetime
version = {'checked' : True,
'checked_count' : 1,
'statuses' : Status.query.filter_by(status_name="Green").first()}
if data["population_database_id"] == "XXX" or data["population_database_id"] == "X.X.X":
version = {'checked' : False,
'checked_count' : 0,
'statuses' : Status.query.filter_by(status_name="Pending").first()}
# When checking for null data later, these need to be excluded, as they will always have a value
ignore_keys = ['version_ok', 'version_latest', 'version_original']
''' DigitizationProtocol '''
# digitization_protocol = DigitizationProtocol.query.filter_by(field_name=data["digitization_protocol"]).first()
# if digitization_protocol == None:
# ac_dict = {'protocol_id' : protocol.id,
# 'field_name' : data['field_name'],
# 'name_in_csv' : data["name_in_csv"],
# 'database_model' : data["database_model"],
# 'field_description' : data["field_description"],
# 'field_short_description' : data["field_short_description"]
# }
# ac_cleaned = data_clean(ac_dict)
# digitization_protocol = Protocol(**ac_cleaned["kwargs"])
# db.session.add(digitization_protocol)
# db.session.commit()
''' Publication '''
publications_protocol = PublicationsProtocol.query.filter_by(protocol_number=data["publications_protocol_id"]).first()
if data["publication_DOI_ISBN"] == None:
publication = Publication.query.filter_by(authors=data["publication_authors"]).filter_by(year=data["publication_year"]).filter_by(journal_name=data["publication_journal_name"]).filter_by(additional_source_string=data["publication_additional_source_string"]).filter_by(study_notes= data["publication_study_notes"]).first()
else:
publication = Publication.query.filter_by(DOI_ISBN=data["publication_DOI_ISBN"]).first()
if publication == None:
purposes = {"Comparative Demography" : data["publication_purpose_comparative_demography"],
"Spatial Demography" : data["publication_purpose_spatial_demography"],
"Abiotic Impacts" : data["publication_purpose_abiotic"],
"PVA" : data["publication_purpose_pva"],
"Species Dynamics Description" : data["publication_purpose_species_dynamics_description"],
"Interspecific Interactions" : data["publication_purpose_interspecific_interactions"],
"Management Evaluation" : data["publication_purpose_management_evaluation"],
"Methodological Advancement" : data["publication_purpose_methodological_advancement"]
}
queryset = [Purpose.query.filter(Purpose.purpose_name == key).first() for key, val in purposes.items() if val == '1']
if data['publication_missing_data'] != 'NDY' and data['publication_missing_data']:
missing_data_unicode = data['publication_missing_data'].replace(" ", "").split(';')
missing_data = [MissingData.query.filter_by(missing_code=key).first() for key in missing_data_unicode if MissingData.query.filter_by(missing_code=key).first()]
else:
missing_data = 'NDY'
pub_dict = {'authors': data["publication_authors"],
'year' : data["publication_year"],
'publications_protocol' : publications_protocol,
'DOI_ISBN' : data["publication_DOI_ISBN"],
'additional_source_string' : data["publication_additional_source_string"],
'journal_name' : data["publication_journal_name"],
'date_digitised' : datetime.datetime.strptime(data['publication_date_digitization'], "%d/%m/%Y").strftime("%Y-%m-%d") if data['publication_date_digitization'] else None,
'purposes' : queryset,
'study_notes' : data["publication_study_notes"]
}
pub_cleaned = data_clean(pub_dict)
# if not all(value == None for key, value in pub_cleaned["kwargs"].items() if key not in ignore_keys) and study_present:
publication = Publication(**pub_cleaned["kwargs"])
db.session.add(publication)
db.session.commit()
publication.missing_data = missing_data if type(missing_data) == list else []
db.session.add(publication)
db.session.commit()
''' Publication Version '''
#version = version_data(pub_cleaned)
publication_version = Version(**version)
publication_version.publication = publication
publication.colour = gen_hex_code()
possible_user = User.query.filter_by(name = data["publication_student"]).first()
na_user = User.query.filter_by(name = "N/A").first()
if possible_user == None:
possible_user = na_user
publication_version.entered_by_id = possible_user.id if possible_user else None,
publication_version.checked_by_id = na_user.id if na_user else None,
db.session.add(publication_version)
db.session.commit()
publication_version.original_version_id = publication_version.id
db.session.add(publication_version)
db.session.commit()
''' Author contact '''
author_contacts = AuthorContact.query.filter_by(corresponding_author = data["publication_corresponding_author"]).filter_by(corresponding_author_email = data["publication_corresponding_email"]).first()
if author_contacts == None:
ac_dict = {'publication_id' : publication.id,
'date_contacted' : datetime.datetime.strptime(data['date_author_contacted'], "%d/%m/%Y").strftime("%Y-%m-%d") if data['date_author_contacted'] else None,
'date_contacted_again' : datetime.datetime.strptime(data['date_author_contacted_again'], "%d/%m/%Y").strftime("%Y-%m-%d") if data['date_author_contacted_again'] else None,
'extra_content_email' : data["correspondence_email_content"],
'author_reply' : data["correspondence_author_reply"],
'corresponding_author' : data["publication_corresponding_author"],
'corresponding_author_email' : data["publication_corresponding_email"],
'correspondence_email_content' : data["correspondence_email_content"],
'extra_content_email' : data["extra_content_email"],
'contacting_user_id' : possible_user.id if possible_user else None
}
ac_cleaned = data_clean(ac_dict)
author_contact = AuthorContact(**ac_cleaned["kwargs"])
db.session.add(author_contact)
db.session.commit()
''' Author Contact Version '''
#version = version_data(ac_cleaned)
author_contact_version = Version(**version)
author_contact_version.author_contact = author_contact
db.session.add(author_contact_version)
db.session.commit()
author_contact_version.original_version_id = author_contact_version.id
db.session.add(author_contact_version)
db.session.commit()
''' Species '''
species = Species.query.filter_by(species_accepted=data["species_accepted"]).first()
iucn = IUCNStatus.query.filter_by(status_code=data["species_iucn_status_id"]).first()
if species == None:
species_dict = {'gbif_taxon_key': data["species_gbif_taxon_key"],
'species_iucn_taxonid': data["species_iucn_taxonid"],
'species_accepted' : data["species_accepted"],
'species_common' : data["species_common"],
'iucn_status_id' : iucn.id if iucn else None,
'image_path' : data["image_path"],
'image_path2' : data["image_path2"]}
species_cleaned = data_clean(species_dict)
species = Species(**species_cleaned["kwargs"])
db.session.add(species)
db.session.commit()
''' Species Version '''
#version = version_data(species_cleaned)
species_version = Version(**version)
species_version.species = species
db.session.add(species_version)
db.session.commit()
species_version.original_version_id = species_version.id
db.session.add(species_version)
db.session.commit()
''' Trait '''
spand_ex_growth_type = SpandExGrowthType.query.filter_by(type_name=data["trait_spand_ex_growth_type_id"]).first()
dicot_monoc = DicotMonoc.query.filter_by(dicot_monoc_name=data["trait_dicot_monoc_id"]).first()
growth_form_raunkiaer = GrowthFormRaunkiaer.query.filter_by(form_name=data["trait_growth_form_raunkiaer_id"]).first()
organism_type = OrganismType.query.filter_by(type_name=data["trait_organism_type_id"]).first()
angio_gymno = AngioGymno.query.filter_by(angio_gymno_name=data["trait_angio_gymno_id"]).first()
trait = Trait.query.filter_by(species_id=species.id).first()
if trait == None:
trait_dict = {'species_id': species.id,
'organism_type': organism_type,
'dicot_monoc': dicot_monoc,
'angio_gymno': angio_gymno,
'species_seedbank' : coerce_boolean(data["species_seedbank"]),
'species_gisd_status' : coerce_boolean(data["species_gisd_status"]),
'species_clonality' : coerce_boolean(data["species_clonality"]),
'spand_ex_growth_type_id' : spand_ex_growth_type.id if spand_ex_growth_type else None,
'growth_form_raunkiaer_id' : growth_form_raunkiaer.id if growth_form_raunkiaer else None}
trait_cleaned = data_clean(trait_dict)
trait = Trait(**trait_cleaned["kwargs"])
db.session.add(trait)
db.session.commit()
''' Trait Version '''
#version = version_data(trait_cleaned)
trait_version = Version(**version)
trait_version.trait = trait
db.session.add(trait_version)
db.session.commit()
trait_version.original_version_id = trait_version.id
db.session.add(trait_version)
db.session.commit()
''' Taxonomy '''
tax = Taxonomy.query.filter_by(species_id=species.id).first()
if tax == None:
tax_dict = {'authority' : None,
'tpl_version' : None,
'infraspecies_accepted' : None,
'species_epithet_accepted' : None,
'genus_accepted' : data["taxonomy_genus_accepted"],
'genus' : data["taxonomy_genus"],
'family' : data["taxonomy_family"],
'tax_order' : data["taxonomy_order"],
'tax_class' : data["taxonomy_class"],
'phylum' : data["taxonomy_phylum"],
'kingdom' : data["taxonomy_kingdom"],
'col_check_date' : datetime.datetime.strptime(data["taxonomy_col_check_date"], "%d/%m/%Y").strftime("%Y-%m-%d") if data['taxonomy_col_check_date'] else None,
'col_check_ok' : coerce_boolean(data["taxonomy_col_check_ok"])}
tax_cleaned = data_clean(tax_dict)
# if not all(value == None for key, value in tax_cleaned["kwargs"].items() if key not in ignore_keys):
tax = Taxonomy(**tax_cleaned["kwargs"])
db.session.add(tax)
db.session.commit()
tax.species = species
db.session.add(tax)
db.session.commit()
''' Taxonomy Version '''
#version = version_data(tax_cleaned)
taxonomy_version = Version(**version)
taxonomy_version.version_number = 1
taxonomy_version.taxonomy = tax
db.session.add(taxonomy_version)
db.session.commit()
taxonomy_version.original_version_id = taxonomy_version.id
db.session.add(taxonomy_version)
db.session.commit()
''' Study '''
# What if all none? Will they be grouped together?
# study = Study.query.filter_by(publication_id=publication.id, study_start=data["study_start"], study_end=data["study_end"]).first()
# if study == None:
# purpose_endangered = PurposeEndangered.query.filter_by(purpose_name=data["study_purpose_endangered_id"]).first() if data["study_purpose_endangered_id"] else data["study_purpose_endangered_id"]
#
# purpose_weed = PurposeWeed.query.filter_by(purpose_name="study_purpose_weed_id").first() if data["study_purpose_weed_id"] else data["study_purpose_weed_id"]
# database_source = Institute.query.filter_by(institution_name=data["study_database_source"]).first()# if data["study_purpose_weed_id"] else data["study_purpose_endangered_id"]
#
# study_dict = {'study_duration' : data["study_duration"],
# 'study_start' : data["study_start"],
# 'study_end' : data["study_end"],
# 'number_populations' : data["study_number_populations"],
# 'purpose_endangered_id' : purpose_endangered.id if purpose_endangered else None,
# 'purpose_weed_id' : purpose_weed.id if purpose_weed else None,
# 'database_source' : database_source}
#
# study_cleaned = data_clean(study_dict)
#
# # if not all(value == None for key, value in study_cleaned["kwargs"].items() if key not in ignore_keys) and population_present:
# study = Study(**study_cleaned["kwargs"])
# db.session.add(study)
# db.session.commit()
#
# study.publication_id = publication.id
# study.species_id = species.id
# db.session.add(study)
# db.session.commit()
#
#
# ''' Study Version '''
# version = version_data(study_cleaned)
# study_version = Version(**version)
# study_version.version_number = 1
# study_version.study = study
# db.session.add(study_version)
# db.session.commit()
# study_version.original_version_id = study_version.id
# db.session.add(study_version)
# db.session.commit()
''' Protocol '''
# digitization_protocol = DigitizationProtocol.query.filter_by(field_name=data["digitization_protocol_id"]).first()
# commonterm = CommonTerm.query.filter_by(common_value_name=data["commonterm_id"]).first()
# protocol = Protocol.query.filter_by(protocol_id=protocol.id).first()
# if protocol == None:
# protocol_dict = {'protocol_id' : protocol.id,
# 'digitization_protocol_id' : digitization_protocol.id if digitization_protocol else None,
# 'commonterm_id' : commonterm.id if commonterm else None}
# protocol_cleaned = data_clean(protocol_dict)
# protocol = Protocol(**protocol_cleaned["kwargs"])
# db.session.add(protocol)
# db.session.commit()
''' Population '''
''' '''
invasive_status_study = InvasiveStatusStudy.query.filter_by(status_name=data["population_invasive_status_study_id"]).first()
invasive_status_elsewhere = InvasiveStatusStudy.query.filter_by(status_name=data["population_invasive_status_elsewhere_id"]).first()
ecoregion = Ecoregion.query.filter_by(ecoregion_code=data["population_ecoregion_id"]).first()
continent = Continent.query.filter_by(continent_name=data["population_continent_id"]).first()
###Danny trying add database meta-table in correct location
database = Database.query.filter_by(database_master_version=data["population_database_id"]).first()
purpose_endangered = PurposeEndangered.query.filter_by(purpose_name=data["study_purpose_endangered_id"]).first() if data["study_purpose_endangered_id"] else data["study_purpose_endangered_id"]
purpose_weed = PurposeWeed.query.filter_by(purpose_name="study_purpose_weed_id").first() if data["study_purpose_weed_id"] else data["study_purpose_weed_id"]
database_source = Institute.query.filter_by(institution_name=data["study_database_source_id"]).first()
pop = Population.query.filter_by(population_name=data["population_name"], publication_id=publication.id, species_id=species.id).first()
if pop == None:
pop_dict = {'population_name' : data["population_name"],
'latitude' : data["population_latitude"],
'lat_ns' : data["lat_ns"],
'lat_deg' : data["lat_deg"],
'lat_min' : data["lat_min"],
'lat_sec' : data["lat_sec"],
'longitude' : data["population_longitude"],
'lon_ew' : data["lon_ew"],
'lon_deg' : data["lon_deg"],
'lon_min' : data["lon_min"],
'lon_sec' : data["lon_sec"],
'altitude' : data["population_altitude"],
#'pop_size' : data["population_pop_size"],
'country' : data["population_country"],
'invasive_status_study_id' : invasive_status_study.id if invasive_status_study else None,
'invasive_status_elsewhere_id' : invasive_status_elsewhere.id if invasive_status_elsewhere else None,
'ecoregion' : ecoregion,
'continent' : continent,
'database' : database,
'within_site_replication' : data['population_within_site_replication'],
'study_duration' : data["study_duration"],
'study_start' : data["study_start"],
'study_end' : data["study_end"],
'number_populations' : data["study_number_populations"],
'purpose_endangered_id' : purpose_endangered.id if purpose_endangered else None,
'purpose_weed_id' : purpose_weed.id if purpose_weed else None,
'database_source' : database_source
}
pop_cleaned = data_clean(pop_dict)
# if not all(value == None for key, value in pop_cleaned["kwargs"].items() if key not in ignore_keys) and matrix_present:
pop = Population(**pop_cleaned["kwargs"])
db.session.add(pop)
db.session.commit()
pop.species_author = data["species_author"]
pop.publication_id = publication.id
pop.species_id = species.id
db.session.add(pop)
db.session.commit()
''' Population Version '''
#version = version_data(pop_cleaned)
population_version = Version(**version)
population_version.version_number = 1
population_version.population = pop
db.session.add(population_version)
db.session.commit()
population_version.original_version_id = population_version.id
db.session.add(population_version)
db.session.commit()
''' Matrix '''
treatment_string = data["matrix_treatment_id"]
if treatment_string == 'NDY':
treatment = 'NDY'
elif treatment_string == None:
treatment = None
else:
treatment = Treatment.query.filter_by(treatment_name=data["matrix_treatment_id"]).first() if Treatment.query.filter_by(treatment_name=data["matrix_treatment_id"]).first() else Treatment(treatment_name=data["matrix_treatment_id"])
db.session.add(treatment)
db.session.commit()
matrix_dict = {'treatment' : treatment,
'matrix_split' : coerce_boolean(data["matrix_split"]),
'matrix_composition' : MatrixComposition.query.filter_by(comp_name=data["matrix_composition_id"]).first(),
'matrix_criteria_size' : data["matrix_criteria_size"],
'matrix_criteria_ontogeny' : coerce_boolean(data["matrix_criteria_ontogeny"]),
'matrix_criteria_age' : coerce_boolean(data["matrix_criteria_age"]),
'matrix_start_month' : data["matrix_start_month"],
'matrix_end_month' : data["matrix_end_month"],
'matrix_start_year' : data["matrix_start_year"],
'matrix_end_year' : data["matrix_end_year"],
'studied_sex' : StudiedSex.query.filter_by(sex_code=data["matrix_studied_sex_id"]).first(),
'start_season' : StartSeason.query.filter_by(season_id=data["matrix_start_season_id"]).first() if data["matrix_start_season_id"] else None,
'end_season' : EndSeason.query.filter_by(season_id=data["matrix_end_season_id"]).first() if data["matrix_end_season_id"] else None,
'matrix_fec' : coerce_boolean(data["matrix_fec"]),
'matrix_a_string' : data["matrix_a_string"],
'matrix_f_string' : data["matrix_f_string"],
'matrix_u_string' : data["matrix_u_string"],
'matrix_c_string' : data["matrix_c_string"],
'non_independence' : data["matrix_non_independence"],
'matrix_dimension' : data["matrix_dimension"],
'non_independence_author' : data["matrix_non_independence_author"],
'matrix_complete' : coerce_boolean(data["matrix_complete"]),
'class_number' : data["matrix_class_number"],
'observations' : data["matrix_observations"],
'captivities' : Captivity.query.filter_by(cap_code=data["matrix_captivity_id"]).first(),
'class_author' : data["matrix_class_author"],
'class_organized' : data["matrix_class_organized"],
'matrix_difficulty' : data["matrix_difficulty"],
'seasonal' : coerce_boolean(data["matrix_seasonal"]),
'survival_issue' : calc_surv_issue(data["matrix_u_string"]),
'periodicity' : data["matrix_periodicity"],
'matrix_irreducible' : is_matrix_irreducible(data["matrix_a_string"]),
'matrix_primitive' : is_matrix_primitive(data["matrix_a_string"]),
'matrix_ergodic' : is_matrix_ergodic(data["matrix_a_string"]),
'matrix_lambda' : calc_lambda(data["matrix_a_string"])
}
matrix_cleaned = data_clean(matrix_dict)
# if not all(value == None for key, value in matrix_cleaned["kwargs"].items() if key not in ignore_keys):
matrix = Matrix(**matrix_cleaned["kwargs"])
db.session.add(matrix)
db.session.commit()
matrix.population_id = pop.id
db.session.add(matrix)
db.session.commit()
''' matrix Version '''
#version = version_data(matrix_cleaned)
matrix_version = Version(**version)
matrix_version.version_number = 1
matrix_version.matrix = matrix
db.session.add(matrix_version)
db.session.commit()
matrix_version.original_version_id = matrix_version.id
db.session.add(matrix_version)
db.session.commit()
''' Fixed '''
fixed = Fixed.query.filter_by(matrix=matrix).first()
if fixed == None:
fixed_dict = {'matrix' : matrix,
'census_timings' : CensusTiming.query.filter_by(census_name=data["fixed_census_timing_id"]).first(),
'seed_stage_error' : coerce_boolean(data["fixed_seed_stage_error"]),
'smalls' : Small.query.filter_by(small_name=data["fixed_small_id"]).first(),
'vector_str' : data["matrix_vectors_includes_na"]
}
fixed_cleaned = data_clean(fixed_dict)
fixed = Fixed(**fixed_cleaned["kwargs"])
db.session.add(fixed)
db.session.commit()
''' fixed Version '''
#version = version_data(fixed_cleaned)
fixed_version = Version(**version)
fixed_version.version_number = 1
fixed_version.fixed = fixed
db.session.add(fixed_version)
db.session.commit()
fixed_version.original_version_id = fixed_version.id
db.session.add(fixed_version)
db.session.commit()
def migration_loop(input_file):
all_deets = []
for i, row in enumerate(input_file):
print i
data = convert_all_headers_new(row)
submit_new(data)
return "Migration Complete"
@manager.command
def migrate_compadre():
import csv
print "Migrating COMPADRE"
compadre = UnicodeDictReader(open("app/data-migrate/compadre_migration_2017.csv", "rU"))
return migration_loop(compadre)
@manager.command
def migrate_comadre():
import csv
print "Migrating COMADRE"
comadre = UnicodeDictReader(open("app/data-migrate/comadre_migration_2017.csv", "rU"))
return migration_loop(comadre)
@manager.command
def migrate_all():
import csv
print "Preparing to migrate COMPADRE and COMADRE"
compadre = UnicodeDictReader(open("app/data-migrate/compadre_migration_2017.csv", "rU"))
comadre = UnicodeDictReader(open("app/data-migrate/comadre_migration_2017.csv", "rU"))
print "Migrating COMPADRE"
migration_loop(compadre)
print "Migrating COMADRE"
migration_loop(comadre)
return
def convert_all_headers_new(dict):
import re
new_dict = {}
new_dict["species_gisd_status"] = dict["species_gisd_status"]
new_dict["species_seedbank"] = dict["species_seedbank"]
new_dict["species_clonality"] = dict["species_clonality"]
new_dict["publication_purpose_comparative_demography"] = dict["publication_purpose_comparative_demography"]
new_dict["publication_purpose_species_dynamics_description"] = dict["publication_purpose_species_dynamics_description"]
new_dict["publication_purpose_spatial_demography"] = dict["publication_purpose_spatial_demography"]
new_dict["publication_purpose_pva"] = dict["publication_purpose_pva"]
new_dict["publication_purpose_methodological_advancement"] = dict["publication_purpose_methodological_advancement"]
new_dict["publication_purpose_management_evaluation"] = dict["publication_purpose_management_evaluation"]
new_dict["publication_purpose_interspecific_interactions"] = dict["publication_purpose_interspecific_interactions"]
new_dict["publication_purpose_abiotic"] = dict["publication_purpose_abiotic"]
new_dict["species_author"] = dict["species_author"]
new_dict["species_accepted"] = dict["species_accepted"]
new_dict["species_common"]= dict["species_common"]
new_dict["taxonomy_genus"] = dict["taxonomy_genus"]
new_dict["taxonomy_family"] = dict["taxonomy_family"]
new_dict["taxonomy_order"] = dict["taxonomy_order"]
new_dict["taxonomy_class"] = dict["taxonomy_class"]
new_dict["taxonomy_phylum"] = dict["taxonomy_phylum"]
new_dict["taxonomy_kingdom"] = dict["taxonomy_kingdom"]
new_dict["trait_organism_type_id"] = dict["trait_organism_type"]
new_dict["trait_dicot_monoc_id"] = dict["trait_dicot_monoc"]
new_dict["trait_angio_gymno_id"] = dict["trait_angio_gymno"]
new_dict["publication_authors"] = dict["publication_authors"]
new_dict["publication_journal_name"] = dict["publication_journal_name"]
new_dict["publication_year"] = dict["publication_year"]
new_dict["publication_DOI_ISBN"] = dict["publication_DOI_ISBN"]
new_dict["publication_additional_source_string"] = dict["publication_additional_source_string"]
new_dict["study_duration"] = dict["study_duration"]
new_dict["study_start"] = dict["study_start"]
new_dict["study_end"] = dict["study_end"]
new_dict["matrix_periodicity"] = dict["matrix_periodicity"]
new_dict["study_number_populations"] = dict["study_number_populations"]
new_dict["matrix_criteria_size"] = dict["matrix_criteria_size"]
new_dict["matrix_criteria_ontogeny"] = dict["matrix_criteria_ontogeny"]
new_dict["matrix_criteria_age"] = dict["matrix_criteria_age"]
new_dict["population_name"] = dict["population_name"]
new_dict["population_latitude"] = dict["population_latitude"]
new_dict["lat_ns"] = dict["lat_ns"]
new_dict["lat_deg"] = dict["lat_deg"]
new_dict["lat_min"] = dict["lat_min"]
new_dict["lat_sec"] = dict["lat_sec"]
new_dict["population_longitude"] = dict["population_longitude"]
new_dict["lon_ew"] = dict["lon_ew"]
new_dict["lon_deg"] = dict["lon_deg"]
new_dict["lon_min"] = dict["lon_min"]
new_dict["lon_sec"] = dict["lon_sec"]
new_dict["population_altitude"]= dict["population_altitude"]
new_dict["population_country"] = dict["population_country"]
new_dict["population_continent_id"] = dict["population_continent"]
new_dict["population_ecoregion_id"] = dict["population_ecoregion"]
new_dict["matrix_studied_sex_id"] = dict["matrix_studied_sex"]
new_dict["matrix_composition_id"] = dict["matrix_composition"]
new_dict["matrix_treatment_id"] = dict["matrix_treatment_type"]
new_dict["matrix_captivity_id"] = dict["matrix_captivity"]
new_dict["matrix_start_year"] = dict["matrix_start_year"]
new_dict["matrix_start_season_id"] = dict["matrix_start_season"]
new_dict["matrix_start_month"] = dict["matrix_start_month"]
new_dict["matrix_end_year"] = dict["matrix_end_year"]
new_dict["matrix_end_season_id"] = dict["matrix_end_season"]
new_dict["matrix_end_month"] = dict["matrix_end_month"]
new_dict["matrix_split"] = dict["matrix_split"]
new_dict["matrix_fec"] = dict["matrix_fec"]
new_dict["matrix_observations"]= dict["matrix_observations"]
new_dict["matrix_dimension"] = dict["matrix_dimension"]
new_dict["matrix_survival_issue"] = dict["matrix_survival_issue"]
new_dict["matrix_a_string"] = dict["matrix_a_string"]
new_dict["matrix_c_string"] = dict["matrix_c_string"]
new_dict["matrix_f_string"] = dict["matrix_f_string"]
new_dict["matrix_u_string"] = dict["matrix_u_string"]
new_dict["matrix_class_organized"] = dict["matrix_class_organized"]
new_dict["matrix_class_author"] = dict["matrix_class_author"]
new_dict["matrix_class_number"] = dict["matrix_class_number"]
new_dict["matrix_vectors_includes_na"] = dict["matrix_vectors_includes_na"]
#new_dict["population_pop_size"] = dict["population_pop_size"]
new_dict["species_iucn_status_id"] = dict["species_iucn_status"]
new_dict["publication_date_digitization"] = dict["publication_date_digitization"]
# new_dict["species_esa_status_id"] = dict["species_esa_status"]
new_dict["population_invasive_status_study_id"] = dict["population_invasive_status_study"]
new_dict["population_invasive_status_elsewhere_id"] = dict["population_invasive_status_elsewhere"]
new_dict["study_purpose_endangered_id"] = dict["study_purpose_endangered"]
new_dict["study_purpose_weed_id"] = dict["study_purpose_weed"]
new_dict["trait_spand_ex_growth_type_id"] = dict["trait_spand_ex_growth_type"]
new_dict["trait_growth_form_raunkiaer_id"] = dict["trait_growth_form_raunkiaer"]
new_dict["fixed_census_timing_id"] = dict["fixed_census_timing"]
new_dict["fixed_small_id"] = dict["fixed_small"]
new_dict["fixed_seed_stage_error"] = dict["fixed_seed_stage_error"]
new_dict["species_gbif_taxon_key"] = dict["species_gbif_taxon_key"]
#new_dict["version_checked"] = dict["matrix_checked"] #column not in scv?
new_dict["version_checked_count"] = dict["matrix_checked_count"]
new_dict["taxonomy_genus_accepted"] = dict["taxonomy_genus_accepted"]
new_dict["matrix_independent"] = dict["matrix_independent"]
new_dict["matrix_non_independence"] = dict["matrix_non_independence"]
new_dict["matrix_non_independence_author"] = dict["matrix_non_independence_author"]
new_dict["matrix_difficulty"] = dict["matrix_difficulty"]
new_dict["matrix_complete"] = dict["matrix_complete"]
new_dict["matrix_seasonal"] = dict["matrix_seasonal"]
#new_dict["database_master_version"] = dict["database_master_version"]
new_dict["population_database_id"] = dict["database_master_version"]
#new_dict["database_date_created"] = dict["database_date_created"]
#new_dict["database_number_species_accepted"] = dict["database_number_species_accepted"]
#new_dict["database_number_studies"] = dict["database_number_studies"]
#new_dict["database_number_matrices"] = dict["database_number_matrices"]
#new_dict["database_agreement"] = dict["database_agreement"]
new_dict["taxonomy_col_check_ok"] = dict["taxonomy_col_check_ok"]
new_dict["taxonomy_col_check_date"]= dict["taxonomy_col_check_date"]
new_dict["matrix_independence_origin"] = dict["matrix_independence_origin"]
new_dict['image_path'] = dict["image_path"]
new_dict['image_path2'] = dict["image_path2"]
new_dict['species_iucn_taxonid'] = dict["species_iucn_taxonid"]
# correspondence
new_dict['publication_corresponding_author'] = dict["publication_corresponding_author"]
new_dict['publication_corresponding_email'] = dict["publication_corresponding_email"]
new_dict['date_author_contacted'] = dict["date_author_contacted"]
new_dict['date_author_contacted_again'] = dict["date_author_contacted_again"]
new_dict['correspondence_email_content'] = dict["correspondence_email_content"] # what was missing from publication (asked for)
new_dict['correspondence_author_reply'] = dict["correspondence_author_reply"] # did they reply?
new_dict['publication_student'] = dict["publication_student"] #who asked for it
new_dict['extra_content_email'] = dict["extra_content_email"] # extra information asked for
new_dict['publication_missing_data'] = dict["publication_missing_data"] # attatched to publication as a note about what is missing
new_dict['population_within_site_replication'] = dict["within_site_replication"]
new_dict['study_database_source_id'] = dict["study_database_source"]
new_dict['publication_study_notes'] = dict["publication_study_notes"]
new_dict['publications_protocol_id'] = dict["publications_protocol"]
new_dict['digitization_protocol_id'] = dict["digitization_protocol"]
new_dict['commonterm_id'] = dict["commonterm"]
for key, value in new_dict.iteritems():
if value == "NA":
new_dict[key] = None
if value == "":
new_dict[key] = None
if value == "None":
new_dict[key] = None
if value == "NC":
new_dict[key] = None
if value == ".":
new_dict[key] = None
if value == "AFI":
new_dict[key] = 'NDY'
return new_dict
@manager.command
def migrate_meta():
from app.models import User, Role, Permission, \
IUCNStatus, OrganismType, GrowthFormRaunkiaer, ReproductiveRepetition, \
DicotMonoc, AngioGymno, SpandExGrowthType, SourceType, Database, Purpose, MissingData, ContentEmail, Ecoregion, Continent, InvasiveStatusStudy, InvasiveStatusElsewhere, StageTypeClass, \
TransitionType, MatrixComposition, StartSeason, StudiedSex, Captivity, Species, Taxonomy, Trait, \
Publication, AuthorContact, AdditionalSource, Population, Stage, StageType, Treatment, \
MatrixStage, MatrixValue, Matrix, Interval, Fixed, Small, CensusTiming, PurposeEndangered, PurposeWeed, Institute, Version, \
PublicationsProtocol, DigitizationProtocol, Protocol, CommonTerm
print "Migrating Meta Tables..."
Role.insert_roles()
Species.migrate()
Taxonomy.migrate()
Trait.migrate()
Publication.migrate()
AuthorContact.migrate()
Population.migrate()
StageType.migrate()
MatrixValue.migrate()
Matrix.migrate()
Fixed.migrate()
Version.migrate()
Institute.migrate()
User.migrate()
Database.migrate()
Status.migrate()
PublicationsProtocol.migrate()
DigitizationProtocol.migrate()
CommonTerm.migrate()
return
def model_version(model):
count = model.query.count()
for x in range(count):
y = model.query.get(x+1)
y.version_latest = 1
y.version_original = 1
y.version_ok = 1
db.session.add(y)
db.session.commit()
@manager.command
def version_current():
models = [Species(), Taxonomy(), Trait(), Publication(), AuthorContact(), Population(), StageType(), MatrixValue(),Matrix(), Fixed(), Institute(), Protocol()]
for model in models:
model_version(model)
@manager.command
def deploy():
"""Run deployment tasks."""
from flask.ext.migrate import upgrade, migrate, init
from app.models import User, Role, Permission
print "Migrating models to database"
init()
migrate()
upgrade()
migrate()
print "Models migrated to database"
print "Migrating meta data to tables"
migrate_meta()
print "Meta tables migrated"
print "Initial migration of our current version of database..."
# migrate_comadre()
migrate_all()
if __name__ == '__main__':
manager.run()
|
|
# engine/strategies.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Strategies for creating new instances of Engine types.
These are semi-private implementation classes which provide the
underlying behavior for the "strategy" keyword argument available on
:func:`~sqlalchemy.engine.create_engine`. Current available options are
``plain``, ``threadlocal``, and ``mock``.
New strategies can be added via new ``EngineStrategy`` classes.
"""
from operator import attrgetter
from sqlalchemy.engine import base, threadlocal, url
from sqlalchemy import util, exc, event
from sqlalchemy import pool as poollib
strategies = {}
class EngineStrategy(object):
"""An adaptor that processes input arguments and produces an Engine.
Provides a ``create`` method that receives input arguments and
produces an instance of base.Engine or a subclass.
"""
def __init__(self):
strategies[self.name] = self
def create(self, *args, **kwargs):
"""Given arguments, returns a new Engine instance."""
raise NotImplementedError()
class DefaultEngineStrategy(EngineStrategy):
"""Base class for built-in strategies."""
def create(self, name_or_url, **kwargs):
# create url.URL object
u = url.make_url(name_or_url)
dialect_cls = u.get_dialect()
if kwargs.pop('_coerce_config', False):
def pop_kwarg(key, default=None):
value = kwargs.pop(key, default)
if key in dialect_cls.engine_config_types:
value = dialect_cls.engine_config_types[key](value)
return value
else:
pop_kwarg = kwargs.pop
dialect_args = {}
# consume dialect arguments from kwargs
for k in util.get_cls_kwargs(dialect_cls):
if k in kwargs:
dialect_args[k] = pop_kwarg(k)
dbapi = kwargs.pop('module', None)
if dbapi is None:
dbapi_args = {}
for k in util.get_func_kwargs(dialect_cls.dbapi):
if k in kwargs:
dbapi_args[k] = pop_kwarg(k)
dbapi = dialect_cls.dbapi(**dbapi_args)
dialect_args['dbapi'] = dbapi
# create dialect
dialect = dialect_cls(**dialect_args)
# assemble connection arguments
(cargs, cparams) = dialect.create_connect_args(u)
cparams.update(pop_kwarg('connect_args', {}))
# look for existing pool or create
pool = pop_kwarg('pool', None)
if pool is None:
def connect():
return dialect.connect(*cargs, **cparams)
creator = pop_kwarg('creator', connect)
poolclass = pop_kwarg('poolclass', None)
if poolclass is None:
poolclass = dialect_cls.get_pool_class(u)
pool_args = {}
# consume pool arguments from kwargs, translating a few of
# the arguments
translate = {'logging_name': 'pool_logging_name',
'echo': 'echo_pool',
'timeout': 'pool_timeout',
'recycle': 'pool_recycle',
'events': 'pool_events',
'use_threadlocal': 'pool_threadlocal',
'reset_on_return': 'pool_reset_on_return'}
for k in util.get_cls_kwargs(poolclass):
tk = translate.get(k, k)
if tk in kwargs:
pool_args[k] = pop_kwarg(tk)
pool = poolclass(creator, **pool_args)
else:
if isinstance(pool, poollib._DBProxy):
pool = pool.get_pool(*cargs, **cparams)
else:
pool = pool
# create engine.
engineclass = self.engine_cls
engine_args = {}
for k in util.get_cls_kwargs(engineclass):
if k in kwargs:
engine_args[k] = pop_kwarg(k)
_initialize = kwargs.pop('_initialize', True)
# all kwargs should be consumed
if kwargs:
raise TypeError(
"Invalid argument(s) %s sent to create_engine(), "
"using configuration %s/%s/%s. Please check that the "
"keyword arguments are appropriate for this combination "
"of components." % (','.join("'%s'" % k for k in kwargs),
dialect.__class__.__name__,
pool.__class__.__name__,
engineclass.__name__))
engine = engineclass(pool, dialect, u, **engine_args)
if _initialize:
do_on_connect = dialect.on_connect()
if do_on_connect:
def on_connect(dbapi_connection, connection_record):
conn = getattr(
dbapi_connection, '_sqla_unwrap', dbapi_connection)
if conn is None:
return
do_on_connect(conn)
event.listen(pool, 'first_connect', on_connect)
event.listen(pool, 'connect', on_connect)
def first_connect(dbapi_connection, connection_record):
c = base.Connection(engine, connection=dbapi_connection,
_has_events=False)
c._execution_options = util.immutabledict()
dialect.initialize(c)
event.listen(pool, 'first_connect', first_connect, once=True)
return engine
class PlainEngineStrategy(DefaultEngineStrategy):
"""Strategy for configuring a regular Engine."""
name = 'plain'
engine_cls = base.Engine
PlainEngineStrategy()
class ThreadLocalEngineStrategy(DefaultEngineStrategy):
"""Strategy for configuring an Engine with threadlocal behavior."""
name = 'threadlocal'
engine_cls = threadlocal.TLEngine
ThreadLocalEngineStrategy()
class MockEngineStrategy(EngineStrategy):
"""Strategy for configuring an Engine-like object with mocked execution.
Produces a single mock Connectable object which dispatches
statement execution to a passed-in function.
"""
name = 'mock'
def create(self, name_or_url, executor, **kwargs):
# create url.URL object
u = url.make_url(name_or_url)
dialect_cls = u.get_dialect()
dialect_args = {}
# consume dialect arguments from kwargs
for k in util.get_cls_kwargs(dialect_cls):
if k in kwargs:
dialect_args[k] = kwargs.pop(k)
# create dialect
dialect = dialect_cls(**dialect_args)
return MockEngineStrategy.MockConnection(dialect, executor)
class MockConnection(base.Connectable):
def __init__(self, dialect, execute):
self._dialect = dialect
self.execute = execute
engine = property(lambda s: s)
dialect = property(attrgetter('_dialect'))
name = property(lambda s: s._dialect.name)
def contextual_connect(self, **kwargs):
return self
def execution_options(self, **kw):
return self
def compiler(self, statement, parameters, **kwargs):
return self._dialect.compiler(
statement, parameters, engine=self, **kwargs)
def create(self, entity, **kwargs):
kwargs['checkfirst'] = False
from sqlalchemy.engine import ddl
ddl.SchemaGenerator(
self.dialect, self, **kwargs).traverse_single(entity)
def drop(self, entity, **kwargs):
kwargs['checkfirst'] = False
from sqlalchemy.engine import ddl
ddl.SchemaDropper(
self.dialect, self, **kwargs).traverse_single(entity)
def _run_visitor(self, visitorcallable, element,
connection=None,
**kwargs):
kwargs['checkfirst'] = False
visitorcallable(self.dialect, self,
**kwargs).traverse_single(element)
def execute(self, object, *multiparams, **params):
raise NotImplementedError()
MockEngineStrategy()
|
|
import filecmp
from io import StringIO
from pathlib import Path
from unittest import TestCase
from unittest.mock import patch
from mlx.warnings import warnings_wrapper
TEST_IN_DIR = Path(__file__).parent / 'test_in'
TEST_OUT_DIR = Path(__file__).parent / 'test_out'
class TestIntegration(TestCase):
def setUp(self):
if not TEST_OUT_DIR.exists():
TEST_OUT_DIR.mkdir()
def test_help(self):
with self.assertRaises(SystemExit) as ex:
warnings_wrapper(['--help'])
self.assertEqual(0, ex.exception.code)
def test_version(self):
with self.assertRaises(SystemExit) as ex:
warnings_wrapper(['--version'])
self.assertEqual(0, ex.exception.code)
def test_no_parser_selection(self):
with self.assertRaises(SystemExit) as ex:
warnings_wrapper([])
self.assertEqual(2, ex.exception.code)
min_ret_val_on_failure = 1
junit_warning_cnt = 3
def test_single_argument(self):
retval = warnings_wrapper(['--junit', 'tests/test_in/junit_single_fail.xml'])
self.assertEqual(1, retval)
def test_single_defect_coverity(self):
retval = warnings_wrapper(['--coverity', 'tests/test_in/coverity_single_defect.txt'])
self.assertEqual(1, retval)
def test_two_arguments(self):
retval = warnings_wrapper(['--junit', 'tests/test_in/junit_single_fail.xml', 'tests/test_in/junit_double_fail.xml'])
self.assertEqual(1 + 2, retval)
def test_non_existing_logfile(self):
retval = warnings_wrapper(['--sphinx', 'not-exist.log'])
self.assertEqual(1, retval)
retval = warnings_wrapper(['--xmlrunner', 'not-exist.log'])
self.assertEqual(1, retval)
def test_single_command_argument(self):
retval = warnings_wrapper(['--junit', '--command', 'cat', 'tests/test_in/junit_single_fail.xml'])
self.assertEqual(1, retval)
def test_two_command_arguments(self):
retval = warnings_wrapper(['--sphinx', '--command', 'cat', 'tests/test_in/sphinx_single_warning.txt', 'tests/test_in/sphinx_double_warning.txt'])
self.assertEqual(1 + 2, retval)
def test_command_with_its_own_arguments(self):
retval = warnings_wrapper(['--sphinx', '--command', 'cat', '-A', 'tests/test_in/sphinx_single_warning.txt', 'tests/test_in/sphinx_double_warning.txt'])
self.assertEqual(1 + 2, retval)
def test_command_to_stderr(self):
retval = warnings_wrapper(['--sphinx', '--command', 'cat', 'tests/test_in/sphinx_single_warning.txt', '>&2'])
self.assertEqual(1, retval)
def test_faulty_command(self):
with self.assertRaises(OSError):
warnings_wrapper(['--sphinx', '--command', 'blahahahaha', 'tests/test_in/sphinx_single_warning.txt'])
def test_command_revtal_err(self):
retval = warnings_wrapper(['--sphinx', '--command', 'false'])
self.assertEqual(1, retval)
def test_command_revtal_err_supress(self):
retval = warnings_wrapper(['--sphinx', '--ignore-retval', '--command', 'false'])
self.assertEqual(0, retval)
def test_wildcarded_arguments(self):
# note: no shell expansion simulation (e.g. as in windows)
retval = warnings_wrapper(['--junit', 'tests/test_in/junit*.xml'])
self.assertEqual(self.junit_warning_cnt, retval)
def test_max(self):
retval = warnings_wrapper(['--junit', '--maxwarnings', '2', 'tests/test_in/junit*.xml'])
self.assertEqual(self.junit_warning_cnt, retval)
def test_max_but_still_ok(self):
retval = warnings_wrapper(['--junit', '--maxwarnings', '100', 'tests/test_in/junit*.xml'])
self.assertEqual(0, retval)
def test_min(self):
retval = warnings_wrapper(['--junit', '--maxwarnings', '100', '--minwarnings', '100', 'tests/test_in/junit*.xml'])
self.assertEqual(self.junit_warning_cnt, retval)
def test_min_but_still_ok(self):
retval = warnings_wrapper(['--junit', '--max-warnings', '100', '--min-warnings', '2', 'tests/test_in/junit*.xml'])
self.assertEqual(0, retval)
def test_exact_sphinx(self):
retval = warnings_wrapper(['--sphinx', '--exact-warnings', '2', 'tests/test_in/sphinx_double_warning.txt'])
self.assertEqual(0, retval)
def test_exact_too_few(self):
retval = warnings_wrapper(['--sphinx', '--exact-warnings', '3', 'tests/test_in/sphinx_double_warning.txt'])
self.assertEqual(2, retval)
def test_exact_too_many(self):
retval = warnings_wrapper(['--sphinx', '--exact-warnings', '1', 'tests/test_in/sphinx_double_warning.txt'])
self.assertEqual(2, retval)
def test_exact_junit(self):
retval = warnings_wrapper(['--junit', '--exact-warnings', '3', 'tests/test_in/junit*.xml'])
self.assertEqual(0, retval)
def test_exact_with_min(self):
with self.assertRaises(SystemExit):
warnings_wrapper(['--junit', '--exact-warnings', '3', '--min-warnings', '3', 'tests/test_in/junit*.xml'])
def test_exact_with_max(self):
with self.assertRaises(SystemExit):
warnings_wrapper(['--junit', '--exact-warnings', '3', '--max-warnings', '3', 'tests/test_in/junit*.xml'])
def test_configfile_ok(self):
retval = warnings_wrapper(['--config', 'tests/test_in/config_example.json', 'tests/test_in/junit_single_fail.xml'])
self.assertEqual(0, retval)
def test_configfile_exclude_commandline(self):
with self.assertRaises(SystemExit) as ex:
warnings_wrapper(['--config', 'tests/test_in/config_example.json', '--junit', 'tests/test_in/junit_single_fail.xml'])
self.assertEqual(2, ex.exception.code)
def test_sphinx_deprecation(self):
retval = warnings_wrapper(['--sphinx', 'tests/test_in/sphinx_double_deprecation_warning.txt'])
self.assertEqual(0, retval)
def test_exclude_sphinx_deprecation(self):
retval = warnings_wrapper(['--sphinx', '--include-sphinx-deprecation', 'tests/test_in/sphinx_double_deprecation_warning.txt'])
self.assertEqual(2, retval)
def test_ignore_sphinx_deprecation_flag(self):
retval = warnings_wrapper(['--junit', '--include-sphinx-deprecation', 'tests/test_in/junit*.xml'])
self.assertEqual(self.junit_warning_cnt, retval)
def test_multiple_checkers_ret_val(self):
retval = warnings_wrapper(['--sphinx', '--junit', 'tests/test_in/junit*.xml'])
self.assertEqual(self.junit_warning_cnt, retval)
def test_non_zero_ret_val_on_failure(self):
retval = warnings_wrapper(['--sphinx', '--exact-warnings', '2', 'tests/test_in/junit*.xml'])
self.assertEqual(self.min_ret_val_on_failure, retval)
def test_various_sphinx_warnings(self):
''' Use the output log of the example documentation of mlx.traceability as input.
The input file contains 18 Sphinx warnings, but exactly 19 are required to pass.
The number of warnings (18) must be returned as return code.
'''
retval = warnings_wrapper(['--sphinx', '--exact-warnings', '19', 'tests/test_in/sphinx_traceability_output.txt'])
self.assertEqual(18, retval)
def test_robot_with_name_arg(self):
retval = warnings_wrapper(['--robot', '--name', "Suite Two", 'tests/test_in/robot_double_fail.xml'])
self.assertEqual(1, retval)
def test_robot_default_name_arg(self):
''' If no suite name is configured, all suites must be taken into account '''
retval = warnings_wrapper(['--robot', 'tests/test_in/robot_double_fail.xml'])
self.assertEqual(2, retval)
def test_robot_verbose(self):
''' If no suite name is configured, all suites must be taken into account '''
with patch('sys.stdout', new=StringIO()) as fake_out:
retval = warnings_wrapper(['--verbose', '--robot', '--name', 'Suite Two', 'tests/test_in/robot_double_fail.xml'])
stdout_log = fake_out.getvalue()
self.assertEqual(1, retval)
self.assertEqual(
'\n'.join([
"Suite One & Suite Two.Suite Two.Another test",
"Suite 'Suite Two': 1 warnings found",
"Counted failures for test suite 'Suite Two'.",
"Number of warnings (1) is higher than the maximum limit (0). Returning error code 1.",
]) + '\n',
stdout_log
)
def test_robot_config(self):
with patch('sys.stdout', new=StringIO()) as fake_out:
retval = warnings_wrapper([
'--config',
'tests/test_in/config_example_robot.json',
'tests/test_in/robot_double_fail.xml',
])
stdout_log = fake_out.getvalue()
self.assertEqual(
'\n'.join([
"Config parsing for robot completed",
"Suite 'Suite One': 1 warnings found",
"2 warnings found",
"Suite 'Suite Two': 1 warnings found",
"Suite 'b4d su1te name': 0 warnings found",
"Counted failures for test suite 'Suite One'.",
"Number of warnings (1) is between limits 0 and 1. Well done.",
"Counted failures for all test suites.",
"Number of warnings (2) is higher than the maximum limit (1). Returning error code 2.",
"Counted failures for test suite 'Suite Two'.",
"Number of warnings (1) is between limits 1 and 2. Well done.",
"Counted failures for test suite 'b4d su1te name'.",
"Number of warnings (0) is exactly as expected. Well done.",
]) + '\n',
stdout_log
)
self.assertEqual(2, retval)
def test_robot_config_check_names(self):
self.maxDiff = None
with patch('sys.stdout', new=StringIO()) as fake_out:
with self.assertRaises(SystemExit) as cm_err:
warnings_wrapper(['--config', 'tests/test_in/config_example_robot_invalid_suite.json',
'tests/test_in/robot_double_fail.xml'])
stdout_log = fake_out.getvalue()
self.assertEqual(
'\n'.join([
"Config parsing for robot completed",
"ERROR: No suite with name 'b4d su1te name' found. Returning error code -1.",
]) + '\n',
stdout_log
)
self.assertEqual(cm_err.exception.code, -1)
def test_robot_cli_check_name(self):
self.maxDiff = None
with patch('sys.stdout', new=StringIO()) as fake_out:
with self.assertRaises(SystemExit) as cm_err:
warnings_wrapper(['--verbose', '--robot', '--name', 'Inv4lid Name',
'tests/test_in/robot_double_fail.xml'])
stdout_log = fake_out.getvalue()
self.assertEqual(
'\n'.join([
"ERROR: No suite with name 'Inv4lid Name' found. Returning error code -1.",
]) + '\n',
stdout_log
)
self.assertEqual(cm_err.exception.code, -1)
def test_output_file_sphinx(self):
filename = 'sphinx_double_deprecation_warning_summary.txt'
out_file = str(TEST_OUT_DIR / filename)
ref_file = str(TEST_IN_DIR / filename)
retval = warnings_wrapper(['--sphinx', '--include-sphinx-deprecation', '-o', out_file,
'tests/test_in/sphinx_double_deprecation_warning.txt'])
self.assertEqual(2, retval)
self.assertTrue(filecmp.cmp(out_file, ref_file))
def test_output_file_robot_basic(self):
filename = 'robot_double_fail_summary.txt'
out_file = str(TEST_OUT_DIR / filename)
ref_file = str(TEST_IN_DIR / filename)
retval = warnings_wrapper([
'--output', out_file,
'-r',
'tests/test_in/robot_double_fail.xml',
])
self.assertEqual(2, retval)
self.assertTrue(filecmp.cmp(out_file, ref_file), '{} differs from {}'.format(out_file, ref_file))
def test_output_file_robot_config(self):
filename = 'robot_double_fail_config_summary.txt'
out_file = str(TEST_OUT_DIR / filename)
ref_file = str(TEST_IN_DIR / filename)
retval = warnings_wrapper([
'--output', out_file,
'--config', 'tests/test_in/config_example_robot.json',
'tests/test_in/robot_double_fail.xml',
])
self.assertEqual(2, retval)
self.assertTrue(filecmp.cmp(out_file, ref_file), '{} differs from {}'.format(out_file, ref_file))
def test_output_file_junit(self):
filename = 'junit_double_fail_summary.txt'
out_file = str(TEST_OUT_DIR / filename)
ref_file = str(TEST_IN_DIR / filename)
retval = warnings_wrapper([
'--output', out_file,
'--junit',
'tests/test_in/junit_double_fail.xml',
])
self.assertEqual(2, retval)
self.assertTrue(filecmp.cmp(out_file, ref_file), '{} differs from {}'.format(out_file, ref_file))
|
|
"""
The Python parts of the Jedi library for VIM. It is mostly about communicating
with VIM.
"""
import traceback # for exception output
import re
import os
import sys
from shlex import split as shsplit
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest # Python 2
is_py3 = sys.version_info[0] >= 3
if is_py3:
unicode = str
class PythonToVimStr(unicode):
""" Vim has a different string implementation of single quotes """
__slots__ = []
def __new__(cls, obj, encoding='UTF-8'):
if is_py3 or isinstance(obj, unicode):
return unicode.__new__(cls, obj)
else:
return unicode.__new__(cls, obj, encoding)
def __repr__(self):
# this is totally stupid and makes no sense but vim/python unicode
# support is pretty bad. don't ask how I came up with this... It just
# works...
# It seems to be related to that bug: http://bugs.python.org/issue5876
if unicode is str:
s = self
else:
s = self.encode('UTF-8')
return '"%s"' % s.replace('\\', '\\\\').replace('"', r'\"')
class VimError(Exception):
def __init__(self, message, throwpoint, executing):
super(type(self), self).__init__(message)
self.message = message
self.throwpoint = throwpoint
self.executing = executing
def __str__(self):
return self.message + '; created by: ' + repr(self.executing)
def _catch_exception(string, is_eval):
"""
Interface between vim and python calls back to it.
Necessary, because the exact error message is not given by `vim.error`.
"""
e = 'jedi#_vim_exceptions(%s, %s)'
result = vim.eval(e % (repr(PythonToVimStr(string, 'UTF-8')), is_eval))
if 'exception' in result:
raise VimError(result['exception'], result['throwpoint'], string)
return result['result']
def vim_command(string):
_catch_exception(string, 0)
def vim_eval(string):
return _catch_exception(string, 1)
def no_jedi_warning():
vim.command('echohl WarningMsg'
'| echom "Please install Jedi if you want to use jedi-vim."'
'| echohl None')
def echo_highlight(msg):
vim_command('echohl WarningMsg | echom "{}" | echohl None'.format(
msg.replace('"', '\\"')))
import vim
try:
import jedi
except ImportError:
no_jedi_warning()
jedi = None
else:
try:
version = jedi.__version__
except Exception as e: # e.g. AttributeError
echo_highlight("Could not load jedi python module: {}".format(e))
jedi = None
else:
if isinstance(version, str):
# the normal use case, now.
from jedi import utils
version = utils.version_info()
if version < (0, 7):
echo_highlight('Please update your Jedi version, it is too old.')
def catch_and_print_exceptions(func):
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except (Exception, vim.error):
print(traceback.format_exc())
return None
return wrapper
def _check_jedi_availability(show_error=False):
def func_receiver(func):
def wrapper(*args, **kwargs):
if jedi is None:
if show_error:
no_jedi_warning()
return
else:
return func(*args, **kwargs)
return wrapper
return func_receiver
@catch_and_print_exceptions
def get_script(source=None, column=None):
jedi.settings.additional_dynamic_modules = \
[b.name for b in vim.buffers if b.name is not None and b.name.endswith('.py')]
if source is None:
source = '\n'.join(vim.current.buffer)
row = vim.current.window.cursor[0]
if column is None:
column = vim.current.window.cursor[1]
buf_path = vim.current.buffer.name
encoding = vim_eval('&encoding') or 'latin1'
return jedi.Script(source, row, column, buf_path, encoding)
@_check_jedi_availability(show_error=False)
@catch_and_print_exceptions
def completions():
row, column = vim.current.window.cursor
# Clear call signatures in the buffer so they aren't seen by the completer.
# Call signatures in the command line can stay.
if vim_eval("g:jedi#show_call_signatures") == '1':
clear_call_signatures()
if vim.eval('a:findstart') == '1':
count = 0
for char in reversed(vim.current.line[:column]):
if not re.match('[\w\d]', char):
break
count += 1
vim.command('return %i' % (column - count))
else:
base = vim.eval('a:base')
source = ''
for i, line in enumerate(vim.current.buffer):
# enter this path again, otherwise source would be incomplete
if i == row - 1:
source += line[:column] + base + line[column:]
else:
source += line
source += '\n'
# here again hacks, because jedi has a different interface than vim
column += len(base)
try:
script = get_script(source=source, column=column)
completions = script.completions()
signatures = script.call_signatures()
out = []
for c in completions:
d = dict(word=PythonToVimStr(c.name[:len(base)] + c.complete),
abbr=PythonToVimStr(c.name),
# stuff directly behind the completion
menu=PythonToVimStr(c.description),
info=PythonToVimStr(c.docstring()), # docstr
icase=1, # case insensitive
dup=1 # allow duplicates (maybe later remove this)
)
out.append(d)
strout = str(out)
except Exception:
# print to stdout, will be in :messages
print(traceback.format_exc())
strout = ''
completions = []
signatures = []
show_call_signatures(signatures)
vim.command('return ' + strout)
@_check_jedi_availability(show_error=True)
@catch_and_print_exceptions
def goto(mode="goto", no_output=False):
"""
:param str mode: "related_name", "definition", "assignment", "auto"
:return: list of definitions/assignments
:rtype: list
"""
script = get_script()
try:
if mode == "goto":
definitions = [x for x in script.goto_definitions()
if not x.in_builtin_module()]
if not definitions:
definitions = script.goto_assignments()
elif mode == "related_name":
definitions = script.usages()
elif mode == "definition":
definitions = script.goto_definitions()
elif mode == "assignment":
definitions = script.goto_assignments()
except jedi.NotFoundError:
echo_highlight("Cannot follow nothing. Put your cursor on a valid name.")
definitions = []
else:
if no_output:
return definitions
if not definitions:
echo_highlight("Couldn't find any definitions for this.")
elif len(definitions) == 1 and mode != "related_name":
# just add some mark to add the current position to the jumplist.
# this is ugly, because it overrides the mark for '`', so if anyone
# has a better idea, let me know.
vim_command('normal! m`')
d = list(definitions)[0]
if d.in_builtin_module():
if d.is_keyword:
echo_highlight("Cannot get the definition of Python keywords.")
else:
echo_highlight("Builtin modules cannot be displayed (%s)."
% d.desc_with_module)
else:
if d.module_path != vim.current.buffer.name:
result = new_buffer(d.module_path)
if not result:
return []
vim.current.window.cursor = d.line, d.column
else:
# multiple solutions
lst = []
for d in definitions:
if d.in_builtin_module():
lst.append(dict(text=PythonToVimStr('Builtin ' + d.description)))
else:
lst.append(dict(filename=PythonToVimStr(d.module_path),
lnum=d.line, col=d.column + 1,
text=PythonToVimStr(d.description)))
vim_eval('setqflist(%s)' % repr(lst))
vim_eval('jedi#add_goto_window(' + str(len(lst)) + ')')
return definitions
@_check_jedi_availability(show_error=True)
@catch_and_print_exceptions
def show_documentation():
script = get_script()
try:
definitions = script.goto_definitions()
except jedi.NotFoundError:
definitions = []
except Exception:
# print to stdout, will be in :messages
definitions = []
print("Exception, this shouldn't happen.")
print(traceback.format_exc())
if not definitions:
echo_highlight('No documentation found for that.')
vim.command('return')
else:
docs = ['Docstring for %s\n%s\n%s' % (d.desc_with_module, '=' * 40, d.docstring())
if d.docstring() else '|No Docstring for %s|' % d for d in definitions]
text = ('\n' + '-' * 79 + '\n').join(docs)
vim.command('let l:doc = %s' % repr(PythonToVimStr(text)))
vim.command('let l:doc_lines = %s' % len(text.split('\n')))
return True
@catch_and_print_exceptions
def clear_call_signatures():
# Check if using command line call signatures
if vim_eval("g:jedi#show_call_signatures") == '2':
vim_command('echo ""')
return
cursor = vim.current.window.cursor
e = vim_eval('g:jedi#call_signature_escape')
# We need two turns here to search and replace certain lines:
# 1. Search for a line with a call signature and save the appended
# characters
# 2. Actually replace the line and redo the status quo.
py_regex = r'%sjedi=([0-9]+), (.*?)%s.*?%sjedi%s'.replace('%s', e)
for i, line in enumerate(vim.current.buffer):
match = re.search(py_regex, line)
if match is not None:
# Some signs were added to minimize syntax changes due to call
# signatures. We have to remove them again. The number of them is
# specified in `match.group(1)`.
after = line[match.end() + int(match.group(1)):]
line = line[:match.start()] + match.group(2) + after
vim.current.buffer[i] = line
vim.current.window.cursor = cursor
@_check_jedi_availability(show_error=False)
@catch_and_print_exceptions
def show_call_signatures(signatures=()):
if vim_eval("has('conceal') && g:jedi#show_call_signatures") == '0':
return
if signatures == ():
signatures = get_script().call_signatures()
clear_call_signatures()
if not signatures:
return
if vim_eval("g:jedi#show_call_signatures") == '2':
return cmdline_call_signatures(signatures)
for i, signature in enumerate(signatures):
line, column = signature.bracket_start
# signatures are listed above each other
line_to_replace = line - i - 1
# because there's a space before the bracket
insert_column = column - 1
if insert_column < 0 or line_to_replace <= 0:
# Edge cases, when the call signature has no space on the screen.
break
# TODO check if completion menu is above or below
line = vim_eval("getline(%s)" % line_to_replace)
params = [p.description.replace('\n', '') for p in signature.params]
try:
# *_*PLACEHOLDER*_* makes something fat. See after/syntax file.
params[signature.index] = '*_*%s*_*' % params[signature.index]
except (IndexError, TypeError):
pass
# This stuff is reaaaaally a hack! I cannot stress enough, that
# this is a stupid solution. But there is really no other yet.
# There is no possibility in VIM to draw on the screen, but there
# will be one (see :help todo Patch to access screen under Python.
# (Marko Mahni, 2010 Jul 18))
text = " (%s) " % ', '.join(params)
text = ' ' * (insert_column - len(line)) + text
end_column = insert_column + len(text) - 2 # -2 due to bold symbols
# Need to decode it with utf8, because vim returns always a python 2
# string even if it is unicode.
e = vim_eval('g:jedi#call_signature_escape')
if hasattr(e, 'decode'):
e = e.decode('UTF-8')
# replace line before with cursor
regex = "xjedi=%sx%sxjedix".replace('x', e)
prefix, replace = line[:insert_column], line[insert_column:end_column]
# Check the replace stuff for strings, to append them
# (don't want to break the syntax)
regex_quotes = r'''\\*["']+'''
# `add` are all the quotation marks.
# join them with a space to avoid producing '''
add = ' '.join(re.findall(regex_quotes, replace))
# search backwards
if add and replace[0] in ['"', "'"]:
a = re.search(regex_quotes + '$', prefix)
add = ('' if a is None else a.group(0)) + add
tup = '%s, %s' % (len(add), replace)
repl = prefix + (regex % (tup, text)) + add + line[end_column:]
vim_eval('setline(%s, %s)' % (line_to_replace, repr(PythonToVimStr(repl))))
@catch_and_print_exceptions
def cmdline_call_signatures(signatures):
def get_params(s):
return [p.description.replace('\n', '') for p in s.params]
if len(signatures) > 1:
params = zip_longest(*map(get_params, signatures), fillvalue='_')
params = ['(' + ', '.join(p) + ')' for p in params]
else:
params = get_params(signatures[0])
text = ', '.join(params).replace('"', '\\"').replace(r'\n', r'\\n')
# Allow 12 characters for ruler/showcmd - setting noruler/noshowcmd
# here causes incorrect undo history
max_msg_len = int(vim_eval('&columns')) - 12
max_num_spaces = (max_msg_len - len(signatures[0].call_name)
- len(text) - 2) # 2 accounts for parentheses
if max_num_spaces < 0:
return # No room for the message
_, column = signatures[0].bracket_start
num_spaces = min(int(vim_eval('g:jedi#first_col +'
'wincol() - col(".")')) +
column - len(signatures[0].call_name),
max_num_spaces)
spaces = ' ' * num_spaces
try:
index = [s.index for s in signatures if isinstance(s.index, int)][0]
escaped_param = params[index].replace(r'\n', r'\\n')
left = text.index(escaped_param)
right = left + len(escaped_param)
vim_command(' echon "%s" | '
'echohl Function | echon "%s" | '
'echohl None | echon "(" | '
'echohl jediFunction | echon "%s" | '
'echohl jediFat | echon "%s" | '
'echohl jediFunction | echon "%s" | '
'echohl None | echon ")"'
% (spaces, signatures[0].call_name, text[:left],
text[left:right], text[right:]))
except (TypeError, IndexError):
vim_command(' echon "%s" | '
'echohl Function | echon "%s" | '
'echohl None | echon "(" | '
'echohl jediFunction | echon "%s" | '
'echohl None | echon ")"'
% (spaces, signatures[0].call_name, text))
@_check_jedi_availability(show_error=True)
@catch_and_print_exceptions
def rename():
if not int(vim.eval('a:0')):
vim_command('augroup jedi_rename')
vim_command('autocmd InsertLeave <buffer> call jedi#rename(1)')
vim_command('augroup END')
vim_command("let s:jedi_replace_orig = expand('<cword>')")
vim_command('normal! diw')
vim_command("let s:jedi_changedtick = b:changedtick")
vim_command('startinsert')
else:
# Remove autocommand.
vim_command('autocmd! jedi_rename InsertLeave')
# Get replacement, if there is something on the cursor.
# This won't be the case when the user ends insert mode right away,
# and `<cword>` would pick up the nearest word instead.
if vim_eval('getline(".")[getpos(".")[2]-1]') != ' ':
replace = vim_eval("expand('<cword>')")
else:
replace = None
cursor = vim.current.window.cursor
# Undo new word, but only if something was changed, which is not the
# case when ending insert mode right away.
if vim_eval('b:changedtick != s:jedi_changedtick') == '1':
vim_command('normal! u') # Undo new word.
vim_command('normal! u') # Undo diw.
vim.current.window.cursor = cursor
if replace:
return do_rename(replace)
def rename_visual():
replace = vim.eval('input("Rename to: ")')
orig = vim.eval('getline(".")[(getpos("\'<")[2]-1):getpos("\'>")[2]]')
do_rename(replace, orig)
def do_rename(replace, orig=None):
if not len(replace):
echo_highlight('No rename possible without name.')
return
if orig is None:
orig = vim_eval('s:jedi_replace_orig')
# Save original window / tab.
saved_tab = int(vim_eval('tabpagenr()'))
saved_win = int(vim_eval('winnr()'))
temp_rename = goto(mode="related_name", no_output=True)
# Sort the whole thing reverse (positions at the end of the line
# must be first, because they move the stuff before the position).
temp_rename = sorted(temp_rename, reverse=True,
key=lambda x: (x.module_path, x.start_pos))
buffers = set()
for r in temp_rename:
if r.in_builtin_module():
continue
if os.path.abspath(vim.current.buffer.name) != r.module_path:
result = new_buffer(r.module_path)
if not result:
echo_highlight("Jedi-vim: failed to create buffer window for {}!".format(r.module_path))
continue
buffers.add(vim.current.buffer.name)
# Save view.
saved_view = vim_eval('string(winsaveview())')
# Replace original word.
vim.current.window.cursor = r.start_pos
vim_command('normal! c{:d}l{}'.format(len(orig), replace))
# Restore view.
vim_command('call winrestview(%s)' % saved_view)
# Restore previous tab and window.
vim_command('tabnext {:d}'.format(saved_tab))
vim_command('{:d}wincmd w'.format(saved_win))
if len(buffers) > 1:
echo_highlight('Jedi did {:d} renames in {:d} buffers!'.format(
len(temp_rename), len(buffers)))
else:
echo_highlight('Jedi did {:d} renames!'.format(len(temp_rename)))
@_check_jedi_availability(show_error=True)
@catch_and_print_exceptions
def py_import():
# args are the same as for the :edit command
args = shsplit(vim.eval('a:args'))
import_path = args.pop()
text = 'import %s' % import_path
scr = jedi.Script(text, 1, len(text), '')
try:
completion = scr.goto_assignments()[0]
except IndexError:
echo_highlight('Cannot find %s in sys.path!' % import_path)
else:
if completion.in_builtin_module():
echo_highlight('%s is a builtin module.' % import_path)
else:
cmd_args = ' '.join([a.replace(' ', '\\ ') for a in args])
new_buffer(completion.module_path, cmd_args)
@catch_and_print_exceptions
def py_import_completions():
argl = vim.eval('a:argl')
try:
import jedi
except ImportError:
print('Pyimport completion requires jedi module: https://github.com/davidhalter/jedi')
comps = []
else:
text = 'import %s' % argl
script = jedi.Script(text, 1, len(text), '')
comps = ['%s%s' % (argl, c.complete) for c in script.completions()]
vim.command("return '%s'" % '\n'.join(comps))
@catch_and_print_exceptions
def new_buffer(path, options=''):
# options are what you can to edit the edit options
if vim_eval('g:jedi#use_tabs_not_buffers') == '1':
_tabnew(path, options)
elif not vim_eval('g:jedi#use_splits_not_buffers') == '1':
user_split_option = vim_eval('g:jedi#use_splits_not_buffers')
split_options = {
'top': 'topleft split',
'left': 'topleft vsplit',
'right': 'botright vsplit',
'bottom': 'botright split',
'winwidth': 'vs'
}
if user_split_option == 'winwidth' and vim.current.window.width <= 2 * int(vim_eval("&textwidth ? &textwidth : 80")):
split_options['winwidth'] = 'sp'
if user_split_option not in split_options:
print('g:jedi#use_splits_not_buffers value is not correct, valid options are: %s' % ','.join(split_options.keys()))
else:
vim_command(split_options[user_split_option] + " %s" % path)
else:
if vim_eval("!&hidden && &modified") == '1':
if vim_eval("bufname('%')") is None:
echo_highlight('Cannot open a new buffer, use `:set hidden` or save your buffer')
return False
else:
vim_command('w')
vim_command('edit %s %s' % (options, escape_file_path(path)))
# sometimes syntax is being disabled and the filetype not set.
if vim_eval('!exists("g:syntax_on")') == '1':
vim_command('syntax enable')
if vim_eval("&filetype != 'python'") == '1':
vim_command('set filetype=python')
return True
@catch_and_print_exceptions
def _tabnew(path, options=''):
"""
Open a file in a new tab or switch to an existing one.
:param options: `:tabnew` options, read vim help.
"""
path = os.path.abspath(path)
if vim_eval('has("gui")') == '1':
vim_command('tab drop %s %s' % (options, escape_file_path(path)))
return
for tab_nr in range(int(vim_eval("tabpagenr('$')"))):
for buf_nr in vim_eval("tabpagebuflist(%i + 1)" % tab_nr):
buf_nr = int(buf_nr) - 1
try:
buf_path = vim.buffers[buf_nr].name
except (LookupError, ValueError):
# Just do good old asking for forgiveness.
# don't know why this happens :-)
pass
else:
if buf_path == path:
# tab exists, just switch to that tab
vim_command('tabfirst | tabnext %i' % (tab_nr + 1))
# Goto the buffer's window.
vim_command('exec bufwinnr(%i) . " wincmd w"' % (buf_nr + 1))
break
else:
continue
break
else:
# tab doesn't exist, add a new one.
vim_command('tabnew %s' % escape_file_path(path))
def escape_file_path(path):
return path.replace(' ', r'\ ')
def print_to_stdout(level, str_out):
print(str_out)
|
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = "/home/app/content"
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "xwalkctl" in cmd:
cmd = "su - app -c '%s;%s'" % (XW_ENV, cmd)
return cmd
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('xwalkctl'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('xwalkctl'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_app_id = None
for line in output:
pkg_infos = line.split()
if len(pkg_infos) == 1:
continue
name = pkg_infos[1]
if pkg_name == name:
test_app_id = pkg_infos[0]
print test_app_id
break
return test_app_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"xwalkctl -u %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"xwalkctl -i %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
|
from apollo.choices import PRICE_LIST_PRE_RELEASE
from apollo.viewmixins import LoginRequiredMixin, ActivitySendMixin, StaffRequiredMixin
from applications.price_list.forms import ActivityPriceListItemForm, PriceListForm, PriceListItemEquipmentForm, \
PriceListItemServiceForm, TimePriceListItemForm, UnitPriceListItemForm
from applications.price_list.models import PriceList, ActivityPriceListItem, PriceListItemEquipment, \
PriceListItemService, \
TimePriceListItem, UnitPriceListItem
from django.contrib.messages.views import SuccessMessageMixin
from django.core.urlresolvers import reverse_lazy
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect
from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView
def PriceListItemRedirect(request, pl_id=None, item_uuid=None):
"""
Given a price list item id and an item guid, redirect to the price list item detail page.
If the item does not exist with the specified parameters, throw a 404 exception.
"""
price_list = get_object_or_404(PriceList, pk=pl_id)
item = price_list.get_item_from_uuid(item_uuid)
if isinstance(item, ActivityPriceListItem):
return redirect('activity_pricelistitem_detail', pk=item.pk)
elif isinstance(item, TimePriceListItem):
return redirect('time_pricelistitem_detail', pk=item.pk)
elif isinstance(item, UnitPriceListItem):
return redirect('unit_pricelistitem_detail', pk=item.pk)
raise Http404("No item exists for pl: {pl_id} and item uuid: {item_uuid}".format(pl_id=pl_id, item_uuid=item_uuid))
"""
Price list model generic views.
"""
class PriceListViewList(LoginRequiredMixin, ListView):
context_object_name = "pricelists"
model = PriceList
template_name = "price_list/pricelist_list.html"
def get_context_data(self, **kwargs):
context = super(PriceListViewList, self).get_context_data(**kwargs)
context['can_create'] = len(PriceList.objects.all().filter(status=PRICE_LIST_PRE_RELEASE)) == 0
return context
class PriceListViewDetail(LoginRequiredMixin, DetailView):
context_object_name = 'pricelist'
model = PriceList
template_name = "price_list/pricelist_detail.html"
slug_url_kwarg = 'pl_id'
slug_field = 'id'
def get_context_data(self, **kwargs):
context = super(PriceListViewDetail, self).get_context_data(**kwargs)
context['can_create'] = self.object.status == PRICE_LIST_PRE_RELEASE
return context
class PriceListViewCreate(LoginRequiredMixin, StaffRequiredMixin, SuccessMessageMixin, ActivitySendMixin, CreateView):
context_object_name = 'pricelist'
model = PriceList
slug_field = 'id'
slug_url_kwarg = 'pl_id'
success_message = "%(name)s was created successfully!"
template_name = "price_list/pricelist_form.html"
form_class = PriceListForm
activity_verb = 'created price list'
def get_success_url(self):
return reverse_lazy('pricelist_detail', kwargs={'pl_id': self.object.pk})
def get_context_data(self, **kwargs):
context = super(PriceListViewCreate, self).get_context_data(**kwargs)
context['action'] = "Create New"
return context
class PriceListViewUpdate(LoginRequiredMixin, StaffRequiredMixin, SuccessMessageMixin, ActivitySendMixin, UpdateView):
context_object_name = 'pricelist'
model = PriceList
slug_field = 'id'
slug_url_kwarg = 'pl_id'
success_message = "%(name)s was updated successfully!"
template_name = "price_list/pricelist_form.html"
activity_verb = 'updated price list'
fields = "__all__"
def get_success_url(self):
return reverse_lazy('pricelist_detail', kwargs={'pl_id': self.object.pk})
def get_context_data(self, **kwargs):
context = super(PriceListViewUpdate, self).get_context_data(**kwargs)
context['action'] = "Update"
return context
class PriceListViewDelete(LoginRequiredMixin, StaffRequiredMixin, ActivitySendMixin, DeleteView):
context_object_name = 'pricelist'
model = PriceList
slug_field = 'id'
slug_url_kwarg = 'pl_id'
success_url = reverse_lazy('pricelist_list')
template_name = "price_list/pricelist_form.html"
activity_verb = 'deleted price list'
target_object_valid = False
def get_success_url(self):
return self.success_url
def get_context_data(self, **kwargs):
context = super(PriceListViewDelete, self).get_context_data(**kwargs)
context['action'] = "Delete"
return context
"""
Activity Price list item model generic views.
"""
class ActivityPriceListItemViewCreate(LoginRequiredMixin, StaffRequiredMixin, SuccessMessageMixin, ActivitySendMixin,
CreateView):
context_object_name = 'activityitem'
model = ActivityPriceListItem
template_name = "price_list/activity_pricelistitem_form.html"
success_message = "%(name)s was created successfully!"
form_class = ActivityPriceListItemForm
activity_verb = 'created activity price list item'
def get_success_url(self):
return reverse_lazy('activity_pricelistitem_detail', kwargs={'pk': self.object.pk})
def get_form(self, form_class):
return form_class(pl_id=self.kwargs['pl_id'], **self.get_form_kwargs())
def get_context_data(self, **kwargs):
context = super(ActivityPriceListItemViewCreate, self).get_context_data(**kwargs)
context['action'] = "Create New"
context['pricelist'] = get_object_or_404(PriceList, id=self.kwargs['pl_id'])
return context
class ActivityPriceListItemViewUpdate(LoginRequiredMixin, StaffRequiredMixin, SuccessMessageMixin, ActivitySendMixin,
UpdateView):
context_object_name = 'activityitem'
model = ActivityPriceListItem
success_message = "%(name)s was updated successfully!"
template_name = "price_list/activity_pricelistitem_form.html"
form_class = ActivityPriceListItemForm
activity_verb = 'updated activity price list item'
def get_success_url(self):
return reverse_lazy('activity_pricelistitem_detail', kwargs={'pk': self.object.pk})
def get_context_data(self, **kwargs):
context = super(ActivityPriceListItemViewUpdate, self).get_context_data(**kwargs)
context['action'] = "Update"
context['pricelist'] = self.object.price_list
return context
class ActivityPriceListItemViewDelete(LoginRequiredMixin, StaffRequiredMixin, ActivitySendMixin, DeleteView):
context_object_name = 'activityitem'
model = ActivityPriceListItem
template_name = "price_list/activity_pricelistitem_form.html"
target_object_valid = False
activity_verb = 'deleted activity price list item'
def get_success_url(self):
return reverse_lazy('pricelist_detail', kwargs={'pl_id': self.object.price_list.pk})
def get_context_data(self, **kwargs):
context = super(ActivityPriceListItemViewDelete, self).get_context_data(**kwargs)
context['action'] = "Delete"
context['pricelist'] = self.object.price_list
return context
class ActivityPriceListItemViewDetail(LoginRequiredMixin, DetailView):
context_object_name = 'activityitem'
model = ActivityPriceListItem
template_name = "price_list/activity_pricelistitem_detail.html"
def get_context_data(self, **kwargs):
context = super(ActivityPriceListItemViewDetail, self).get_context_data(**kwargs)
context['can_create'] = self.object.price_list.status == PRICE_LIST_PRE_RELEASE
context['pricelist'] = self.object.price_list
context['equipmentplir_set'] = PriceListItemEquipment.objects.filter(item_uuid=self.object.item_uuid,
price_list=self.object.price_list)
context['serviceplir_set'] = PriceListItemService.objects.filter(item_uuid=self.object.item_uuid,
price_list=self.object.price_list)
return context
"""
Time Price list item model generic views.
"""
class TimePriceListItemViewCreate(LoginRequiredMixin, StaffRequiredMixin, SuccessMessageMixin, ActivitySendMixin,
CreateView):
context_object_name = 'timeitem'
model = TimePriceListItem
template_name = "price_list/time_pricelistitem_form.html"
success_message = "%(name)s was created successfully!"
form_class = TimePriceListItemForm
activity_verb = 'created time price list item'
def get_success_url(self):
return reverse_lazy('time_pricelistitem_detail', kwargs={'pk': self.object.pk})
def get_form(self, form_class):
return form_class(pl_id=self.kwargs['pl_id'], **self.get_form_kwargs())
def get_context_data(self, **kwargs):
context = super(TimePriceListItemViewCreate, self).get_context_data(**kwargs)
context['action'] = "Create New"
context['pricelist'] = get_object_or_404(PriceList, id=self.kwargs['pl_id'])
return context
class TimePriceListItemViewUpdate(LoginRequiredMixin, StaffRequiredMixin, SuccessMessageMixin, ActivitySendMixin,
UpdateView):
context_object_name = 'timeitem'
model = TimePriceListItem
success_message = "%(name)s was updated successfully!"
template_name = "price_list/time_pricelistitem_form.html"
form_class = TimePriceListItemForm
activity_verb = 'updated time price list item'
def get_success_url(self):
return reverse_lazy('time_pricelistitem_detail', kwargs={'pk': self.object.pk})
def get_context_data(self, **kwargs):
context = super(TimePriceListItemViewUpdate, self).get_context_data(**kwargs)
context['action'] = "Update"
context['pricelist'] = self.object.price_list
return context
class TimePriceListItemViewDelete(LoginRequiredMixin, StaffRequiredMixin, ActivitySendMixin, DeleteView):
context_object_name = 'timeitem'
model = TimePriceListItem
template_name = "price_list/time_pricelistitem_form.html"
activity_verb = 'deleted time price list item'
target_object_valid = False
def get_success_url(self):
return reverse_lazy('pricelist_detail', kwargs={'pl_id': self.object.price_list.pk})
def get_context_data(self, **kwargs):
context = super(TimePriceListItemViewDelete, self).get_context_data(**kwargs)
context['action'] = "Delete"
context['pricelist'] = self.object.price_list
return context
class TimePriceListItemViewDetail(LoginRequiredMixin, DetailView):
context_object_name = 'timeitem'
model = TimePriceListItem
template_name = "price_list/time_pricelistitem_detail.html"
def get_context_data(self, **kwargs):
context = super(TimePriceListItemViewDetail, self).get_context_data(**kwargs)
context['can_create'] = self.object.price_list.status == PRICE_LIST_PRE_RELEASE
context['pricelist'] = self.object.price_list
context['equipmentplir_set'] = PriceListItemEquipment.objects.filter(item_uuid=self.object.item_uuid,
price_list=self.object.price_list)
context['serviceplir_set'] = PriceListItemService.objects.filter(item_uuid=self.object.item_uuid,
price_list=self.object.price_list)
return context
"""
Unit Price list item model generic views.
"""
class UnitPriceListItemViewCreate(LoginRequiredMixin, StaffRequiredMixin, SuccessMessageMixin, ActivitySendMixin,
CreateView):
context_object_name = 'unititem'
model = UnitPriceListItem
template_name = "price_list/unit_pricelistitem_form.html"
success_message = "%(name)s was created successfully!"
form_class = UnitPriceListItemForm
activity_verb = 'created unit price list item'
def get_success_url(self):
return reverse_lazy('unit_pricelistitem_detail', kwargs={'pk': self.object.pk})
def get_form(self, form_class):
return form_class(pl_id=self.kwargs['pl_id'], **self.get_form_kwargs())
def get_context_data(self, **kwargs):
context = super(UnitPriceListItemViewCreate, self).get_context_data(**kwargs)
context['action'] = "Create New"
context['pricelist'] = get_object_or_404(PriceList, id=self.kwargs['pl_id'])
return context
class UnitPriceListItemViewUpdate(LoginRequiredMixin, StaffRequiredMixin, SuccessMessageMixin, ActivitySendMixin,
UpdateView):
context_object_name = 'unititem'
model = UnitPriceListItem
success_message = "%(name)s was updated successfully!"
template_name = "price_list/unit_pricelistitem_form.html"
form_class = UnitPriceListItemForm
activity_verb = 'updated unit price list item'
def get_success_url(self):
return reverse_lazy('unit_pricelistitem_detail', kwargs={'pk': self.object.pk})
def get_context_data(self, **kwargs):
context = super(UnitPriceListItemViewUpdate, self).get_context_data(**kwargs)
context['action'] = "Update"
context['pricelist'] = self.object.price_list
return context
class UnitPriceListItemViewDelete(LoginRequiredMixin, StaffRequiredMixin, ActivitySendMixin, DeleteView):
context_object_name = 'unititem'
model = UnitPriceListItem
template_name = "price_list/unit_pricelistitem_form.html"
activity_verb = 'deleted unit price list item'
target_object_valid = False
def get_success_url(self):
return reverse_lazy('pricelist_detail', kwargs={'pl_id': self.object.price_list.pk})
def get_context_data(self, **kwargs):
context = super(UnitPriceListItemViewDelete, self).get_context_data(**kwargs)
context['action'] = "Delete"
context['pricelist'] = self.object.price_list
return context
class UnitPriceListItemViewDetail(LoginRequiredMixin, DetailView):
context_object_name = 'unititem'
model = UnitPriceListItem
template_name = "price_list/unit_pricelistitem_detail.html"
def get_context_data(self, **kwargs):
context = super(UnitPriceListItemViewDetail, self).get_context_data(**kwargs)
context['can_create'] = self.object.price_list.status == PRICE_LIST_PRE_RELEASE
context['pricelist'] = self.object.price_list
context['equipmentplir_set'] = PriceListItemEquipment.objects.filter(item_uuid=self.object.item_uuid,
price_list=self.object.price_list)
context['serviceplir_set'] = PriceListItemService.objects.filter(item_uuid=self.object.item_uuid,
price_list=self.object.price_list)
return context
"""
Price List Item Equipment Relation Model generic views.
"""
class PriceListItemEquipmentViewCreate(LoginRequiredMixin, StaffRequiredMixin, SuccessMessageMixin, ActivitySendMixin,
CreateView):
context_object_name = 'equipmentplir'
model = PriceListItemEquipment
template_name = "price_list/equipment_pricelistitem_form.html"
success_message = "'%(item_uuid)s: %(equipment)s x %(count)s' was added successfully!"
form_class = PriceListItemEquipmentForm
activity_verb = 'created equipment price list item relation'
def get_form(self, form_class):
return form_class(pl_id=self.kwargs['pl_id'], item_uuid=self.kwargs['item_uuid'], **self.get_form_kwargs())
def get_success_url(self):
return reverse_lazy('equipment_pricelistitem_detail', kwargs={'pk': self.object.pk})
def get_context_data(self, **kwargs):
context = super(PriceListItemEquipmentViewCreate, self).get_context_data(**kwargs)
context['pricelist'] = get_object_or_404(PriceList, pk=self.kwargs['pl_id'])
return context
class PriceListItemEquipmentViewDetail(LoginRequiredMixin, DetailView):
context_object_name = 'equipmentplir'
model = PriceListItemEquipment
template_name = "price_list/equipment_pricelistitem_detail.html"
def get_context_data(self, **kwargs):
context = super(PriceListItemEquipmentViewDetail, self).get_context_data(**kwargs)
context['can_create'] = self.object.price_list.status == PRICE_LIST_PRE_RELEASE
return context
class PriceListItemEquipmentViewUpdate(LoginRequiredMixin, StaffRequiredMixin, SuccessMessageMixin, ActivitySendMixin,
UpdateView):
context_object_name = 'equipmentplir'
model = PriceListItemEquipment
template_name = "price_list/equipment_pricelistitem_form.html"
success_message = "'%(item_uuid)s: %(equipment)s x %(count)s' was updated successfully!"
form_class = PriceListItemEquipmentForm
activity_verb = 'updated equipment price list item relation'
def get_success_url(self):
return reverse_lazy('equipment_pricelistitem_detail', kwargs={'pk': self.object.pk})
def get_context_data(self, **kwargs):
context = super(PriceListItemEquipmentViewUpdate, self).get_context_data(**kwargs)
context['pricelist'] = self.object.price_list
return context
class PriceListItemEquipmentViewDelete(LoginRequiredMixin, StaffRequiredMixin, ActivitySendMixin, DeleteView):
context_object_name = 'equipmentplir'
model = PriceListItemEquipment
template_name = "price_list/equipment_pricelistitem_form.html"
activity_verb = 'deleted equipment price list item relation'
target_object_valid = False
def get_success_url(self):
return reverse_lazy('pricelist_detail', kwargs={'pk': self.object.price_list.pk})
def get_context_data(self, **kwargs):
context = super(PriceListItemEquipmentViewDelete, self).get_context_data(**kwargs)
context['pricelist'] = self.object.price_list
return context
"""
Price List Item Service Relation Model generic views.
"""
class PriceListItemServiceViewCreate(LoginRequiredMixin, StaffRequiredMixin, SuccessMessageMixin, ActivitySendMixin,
CreateView):
context_object_name = 'serviceplir'
model = PriceListItemService
template_name = "price_list/service_pricelistitem_form.html"
success_message = "'%(item_uuid)s: %(service)s x %(count)s' was added successfully!"
form_class = PriceListItemServiceForm
activity_verb = 'created service price list item relation'
def get_form(self, form_class):
return form_class(pl_id=self.kwargs['pl_id'], item_uuid=self.kwargs['item_uuid'], **self.get_form_kwargs())
def get_success_url(self):
return reverse_lazy('service_pricelistitem_detail', kwargs={'pk': self.object.pk})
def get_context_data(self, **kwargs):
context = super(PriceListItemServiceViewCreate, self).get_context_data(**kwargs)
context['pricelist'] = get_object_or_404(PriceList, pk=self.kwargs['pl_id'])
return context
class PriceListItemServiceViewDetail(LoginRequiredMixin, DetailView):
context_object_name = 'serviceplir'
model = PriceListItemService
template_name = "price_list/service_pricelistitem_detail.html"
def get_context_data(self, **kwargs):
context = super(PriceListItemServiceViewDetail, self).get_context_data(**kwargs)
context['can_create'] = self.object.price_list.status == PRICE_LIST_PRE_RELEASE
return context
class PriceListItemServiceViewUpdate(LoginRequiredMixin, StaffRequiredMixin, SuccessMessageMixin, ActivitySendMixin,
UpdateView):
context_object_name = 'serviceplir'
model = PriceListItemService
template_name = "price_list/service_pricelistitem_form.html"
success_message = "'%(item_uuid)s: %(service)s x %(count)s' was updated successfully!"
form_class = PriceListItemServiceForm
activity_verb = 'updated service price list item relation'
def get_success_url(self):
return reverse_lazy('service_pricelistitem_detail', kwargs={'pk': self.object.pk})
def get_context_data(self, **kwargs):
context = super(PriceListItemServiceViewUpdate, self).get_context_data(**kwargs)
context['pricelist'] = self.object.price_list
return context
class PriceListItemServiceViewDelete(LoginRequiredMixin, StaffRequiredMixin, ActivitySendMixin, DeleteView):
context_object_name = 'serviceplir'
model = PriceListItemService
template_name = "price_list/service_pricelistitem_form.html"
target_object_valid = False
activity_verb = 'deleted service price list item relation'
def get_success_url(self):
return reverse_lazy('pricelist_detail', kwargs={'pl_id': self.object.price_list.pk})
def get_context_data(self, **kwargs):
context = super(PriceListItemServiceViewDelete, self).get_context_data(**kwargs)
context['pricelist'] = self.object.price_list
return context
|
|
from datetime import timedelta
from optparse import make_option
from random import choice, shuffle, randint
from django.contrib.auth.models import User
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.utils import timezone
from django.utils.text import slugify
from us_ignite.apps.models import (
Application,
Domain,
Feature,
Page,
PageApplication,
)
from us_ignite.blog.models import BlogLink, Post
from us_ignite.challenges.models import Challenge, Entry, Question
from us_ignite.dummy import text, images, locations
from us_ignite.events.models import Event
from us_ignite.hubs.models import Hub, HubMembership
from us_ignite.maps.models import Category, Location
from us_ignite.news.models import Article
from us_ignite.organizations.models import Organization, OrganizationMember
from us_ignite.profiles.models import Profile
from us_ignite.resources.models import Resource, ResourceType, Sector
from us_ignite.testbeds.models import Testbed, NetworkSpeed
from taggit.models import Tag
def _choice(*args):
"""Choice between the args and an empty string."""
return choice([''] + list(args))
def _get_start_date():
days = choice(range(-5, 50))
return timezone.now() + timedelta(days=days)
def _create_users():
users = ['banana', 'apple', 'orange', 'cherry', 'lemon', 'grape']
profile_list = []
for f in users:
email = '%[email protected]' % f
user, is_new = User.objects.get_or_create(
username=f, is_active=True, email=email)
if is_new and choice([True, False]):
data = {
'quote': text.random_words(9)[:140],
'bio': text.random_paragraphs(2),
'position': locations.get_location(),
'user': user,
'name': f,
'availability': choice(Profile.AVAILABILITY_CHOICES)[0],
}
profile = Profile.objects.create(**data)
_add_tags(profile)
profile_list.append(profile)
return profile_list
def _get_user():
return User.objects.all().order_by('?')[0]
def _get_url():
return u'http://us-ignite.org'
def _get_domain():
return Domain.objects.all().order_by('?')[0]
def _get_hub():
return Hub.objects.filter(status=Hub.PUBLISHED).order_by('?')[0]
def _create_organization_membership(organization):
for user in User.objects.all().order_by('?')[:3]:
data = {
'organization': organization,
'user': user,
}
OrganizationMember.objects.create(**data)
def _create_organization():
name = text.random_words(3)
image_name = u'%s.png' % slugify(text.random_words(1))
data = {
'name': name.title(),
'slug': slugify(name),
'status': choice(Organization.STATUS_CHOICES)[0],
'bio': _choice(text.random_words(30)),
'image': images.random_image(image_name),
'position': locations.get_location(),
'interest_ignite': _choice(text.random_paragraphs(1)),
'interests_other': _choice(text.random_words(5)),
'resources_available': _choice(text.random_paragraphs(1)),
}
organization = Organization.objects.create(**data)
_add_tags(organization)
_create_organization_membership(organization)
return organization
def _get_organization():
return Organization.objects.all().order_by('?')[0]
def _create_app():
image_name = images.random_image(u'%s.png' % text.random_words(1))
data = {
'name': text.random_words(3).title(),
'stage': choice(Application.STAGE_CHOICES)[0],
'status': choice(Application.STATUS_CHOICES)[0],
'website': _get_url(),
'summary': _choice(text.random_words(20))[:140],
'impact_statement': text.random_words(20)[:140],
'assistance': _choice(text.random_words(30)),
'team_name': _choice(text.random_words(5)),
'team_description': _choice(text.random_words(30)),
'acknowledgments': _choice(text.random_words(30)),
'domain': _get_domain(),
'is_featured': choice([True, False]),
'owner': _get_user(),
'image': image_name,
}
application = Application.objects.create(**data)
_add_tags(application)
_add_features(application)
return application
def _create_page():
data = {
'name': text.random_words(3).title(),
'status': choice(Application.STATUS_CHOICES)[0],
'description': text.random_paragraphs(2),
}
page = Page.objects.create(**data)
app_list = (Application.objects
.filter(status=Application.PUBLISHED).order_by('?')[:10])
for i, app in enumerate(app_list):
PageApplication.objects.create(page=page, application=app, order=i)
def _create_hub_membership(hub):
for user in User.objects.all().order_by('?')[:3]:
data = {
'hub': hub,
'user': user,
}
HubMembership.objects.create(**data)
def _create_hub():
image_name = images.random_image(u'%s.png' % text.random_words(1))
data = {
'name': text.random_words(3).title(),
'summary': text.random_words(10),
'description': text.random_paragraphs(3),
'contact': choice([None, _get_user()]),
'image': image_name,
'website': _get_url(),
'status': choice(Hub.STATUS_CHOICES)[0],
'is_featured': choice([True, False]),
'position': locations.get_location(),
}
hub = Hub.objects.create(**data)
_create_hub_membership(hub)
_add_tags(hub)
_add_features(hub)
return hub
def _create_event():
start_date = _get_start_date()
end_date = start_date + timedelta(hours=5)
data = {
'name': text.random_words(5),
'status': choice(Event.STATUS_CHOICES)[0],
'image': images.random_image(u'%s.png' % text.random_words(1)),
'start_datetime': start_date,
'end_datetime': choice([None, end_date]),
'address': text.random_words(7),
'description': text.random_paragraphs(2),
'is_featured': choice([True, False]),
'user': _get_user(),
'position': locations.get_location(),
}
event = Event.objects.create(**data)
_add_tags(event)
for i in range(0, 3):
event.hubs.add(_get_hub())
return event
def _create_challenge():
start_date = _get_start_date()
end_date = start_date + timedelta(days=15)
data = {
'name': text.random_words(5).title(),
'status': choice(Challenge.STATUS_CHOICES)[0],
'start_datetime': start_date,
'end_datetime': end_date,
'url': _get_url(),
'is_external': choice([True, False]),
'summary': text.random_paragraphs(1),
'description': text.random_paragraphs(3),
'image': images.random_image(u'%s.png' % text.random_words(1)),
'user': _get_user(),
}
challenge = Challenge.objects.create(**data)
for i in range(0, 10):
_create_question(challenge, i)
_create_entries(challenge)
return challenge
def _create_question(challenge, order=0):
data = {
'challenge': challenge,
'question': u'%s?' % text.random_words(7),
'is_required': choice([True, False]),
'order': order,
}
return Question.objects.create(**data)
def _create_entries(challenge):
apps = list(Application.objects.all().order_by('?'))
entry_list = []
for i in range(0, choice(range(1, 10))):
data = {
'challenge': challenge,
'application': apps.pop(),
'status': choice(Entry.STATUS_CHOICES)[0],
'notes': _choice(text.random_words(10)),
}
entry = Entry.objects.create(**data)
entry_list.append(entry)
return entry_list
def _get_resource_type():
return ResourceType.objects.all().order_by('?')[0]
def _get_sector():
return Sector.objects.all().order_by('?')[0]
def _create_resource():
name = text.random_words(4)
data = {
'name': name.title(),
'slug': slugify(name),
'status': choice(Resource.STATUS_CHOICES)[0],
'description': text.random_paragraphs(1),
'contact': _get_user(),
'author': _choice(text.random_words(10)),
'resource_date': choice([_get_start_date(), None]),
'url': _get_url(),
'is_featured': choice([True, False]),
'image': images.random_image(u'%s.png' % text.random_words(1)),
'resource_type': _get_resource_type(),
'sector': _get_sector(),
}
resource = Resource.objects.create(**data)
_add_tags(resource)
return resource
def _feature_posts():
for post in Post.objects.all().order_by('?')[:5]:
post.is_featured = True
post.save()
_add_tags(post)
def _create_article():
data = {
'name': text.random_words(7).title(),
'status': choice(Article.STATUS_CHOICES)[0],
'url': _get_url(),
'is_featured': choice([True, False]),
}
return Article.objects.create(**data)
def _create_blog_link():
data = {
'name': text.random_words(6).title(),
'url': _get_url(),
}
return BlogLink.objects.create(**data)
def _create_location_category():
name = text.random_words(2).title()
data = {
'name': name,
'slug': slugify(name),
}
return Category.objects.create(**data)
def _get_location_category():
return Category.objects.all().order_by('?')[0]
def _create_location():
data = {
'name': text.random_words(4).title(),
'website': _get_url(),
'status': choice(Location.STATUS_CHOICES)[0],
'position': locations.get_location(),
'category': _get_location_category(),
}
return Location.objects.create(**data)
def _get_tags(total=5):
tags = ['gigabit', 'healthcare', 'education', 'energy']
tags += [slugify(w) for w in text.random_words(total).split()]
shuffle(tags)
return tags[:total]
def _add_tags(item):
tags = _get_tags()
item.tags.add(*tags)
return tags
def _feature_tags():
Tag.objects.all().update(is_featured=True)
def _add_features(item, total=3):
features = Feature.objects.all().order_by('?')[:total]
return [item.features.add(f) for f in features]
def _add_applications(item, total=3):
apps = Application.objects.all().order_by('?')[:total]
return [item.applications.add(a) for a in apps]
def _create_testbed():
image_name = images.random_image(u'%s.png' % slugify(text.random_words(1)))
data = {
'name': text.random_words(3).title(),
'summary': text.random_words(10),
'description': text.random_paragraphs(2),
'contact': choice([None, _get_user()]),
'organization': choice([None, _get_organization()]),
'website': _get_url(),
'image': image_name,
'network_speed': NetworkSpeed.objects.all().order_by('?')[0],
'connections': text.random_paragraphs(1),
'experimentation': choice(Testbed.EXPERIMENTATION_CHOICES)[0],
'passes_homes': randint(0, 100),
'passes_business': randint(0, 100),
'passes_anchor': randint(0, 100),
'is_advanced': choice([True, False]),
'position': locations.get_location(),
'status': choice(Testbed.STATUS_CHOICES)[0],
}
testbed = Testbed.objects.create(**data)
_add_tags(testbed)
_add_features(testbed)
_add_applications(testbed)
return testbed
def _load_fixtures():
"""Loads initial fixtures"""
call_command('app_load_fixtures')
call_command('awards_load_fixtures')
call_command('common_load_fixtures')
call_command('snippets_load_fixtures')
call_command('events_load_fixtures')
call_command('resources_load_fixtures')
call_command('testbeds_load_fixtures')
call_command('sections_load_fixtures')
call_command('blog_import')
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option(
'--noinput',
action='store_true',
dest='noinput',
default=False,
help='Does not ask for any user input.'),
)
def handle(self, *args, **options):
if not options['noinput']:
message = ('This command will IRREVERSIBLE poison the existing '
'database by adding dummy content and images. '
'Proceed? [y/N] ')
response = raw_input(message)
if not response or not response == 'y':
print 'Phew, aborted!'
exit(0)
print u'Loading initial fixtures.'
_load_fixtures()
print u'Featuring Posts'
_feature_posts()
print u'Adding users.'
_create_users()
print u'Adding organizations.'
for i in range(30):
_create_organization()
print u'Adding applications.'
for i in range(40):
_create_app()
print u'Adding app pages.'
for i in range(10):
_create_page()
print u'Adding hubs.'
for i in range(40):
_create_hub()
print u'Adding testbeds.'
for i in range(40):
_create_testbed()
print u'Adding events.'
for i in range(30):
_create_event()
print u'Adding challenges.'
for i in range(30):
_create_challenge()
print u'Adding resources.'
for i in range(30):
_create_resource()
print u'Adding articles.'
for i in range(30):
_create_article()
print u'Adding blog links.'
for i in range(15):
_create_blog_link()
print u'Adding location categories.'
for i in range(6):
_create_location_category()
print u'Adding locations.'
for i in range(50):
_create_location()
print u'Featuring tags.'
_feature_tags()
print u'Done.'
|
|
#!/usr/bin/python
import dateutil.parser
import json
import MySQLdb
import MySQLdb.cursors
import subprocess
import random
import traceback
import twitter
class UserCard:
def __init__(self,user):
self.user = user
self.tiles = []
self.goals = {}
self.refreshFromDB()
return
def hasCard(self):
return len(self.tiles) > 0
def refreshFromDB(self):
mysql_cur.execute("""SELECT user_card_square_id,ucs.goal_id,hashtag,position,daub_tweet_id,embed_code,image_url
FROM user_card_squares as ucs
JOIN goals USING(goal_id)
LEFT JOIN daub_tweets USING(daub_tweet_id)
WHERE ucs.user_id = %s
ORDER BY position ASC""",(self.user['id'],))
self.tiles=list(mysql_cur.fetchall())
self.goals = {t['hashtag'].lower():t for t in self.tiles}
def createCard(self):
mysql_cur.execute("SELECT goal_id FROM goals")
tile_goals = random.sample(mysql_cur.fetchall(),24)
insert_squares = [(self.user['id'],tile_goals[i]['goal_id'],i) for i in xrange(24)]
mysql_cur.executemany("INSERT INTO user_card_squares (user_id,goal_id,position) VALUES (%s,%s,%s)",insert_squares)
mysql_cur.execute("INSERT INTO users (user_id,screen_name,profile_image_url) VALUES (%s,%s,%s)",
(self.user['id'],self.user['screen_name'],self.user['profile_image_url']))
self.refreshFromDB()
def hasGoal(self,goal):
return goal in self.goals
def findHashtag(self,hashtags):
for h in hashtags:
if self.hasGoal(h.lower()):
return h.lower()
return None
def goalDaubed(self,goal):
return (goal in self.goals) and (self.goals[goal]['daub_tweet_id'] is not None)
def getBingoLines(self):
return [[0,1,2,3,4],
[5,6,7,8,9],
[10,11,12,13],
[14,15,16,17,18],
[19,20,21,22,23],
[0,5,10,14,19],
[1,6,11,15,20],
[2,7,16,21],
[3,8,12,17,22],
[4,9,13,18,23],
[0,6,17,23],
[4,8,15,19]]
def daubsLeft(self):
daubs_left=4
for line in self.getBingoLines():
daubs_left = min(daubs_left,len([True for tile in line if self.tiles[tile]['daub_tweet_id'] is None]))
return daubs_left
def hasBingo(self):
if not self.hasCard():
return False
return self.daubsLeft()==0
def totalDaubs(self):
return len([True for tile in self.tiles if tile['daub_tweet_id'] is not None])
def hasBlackout(self):
if not self.hasCard():
return False
return self.totalDaubs() == 24
def markSquare(self,hashtag,tweet):
if not self.hasCard():
return
daub_tweet = (tweet.getID(),tweet.getUser()['id'],self.goals[hashtag]['goal_id'],tweet.getCreatedAt(),tweet.getPic())
mysql_cur.execute("INSERT INTO daub_tweets (daub_tweet_id,user_id,goal_id,created_at,image_url) VALUES (%s,%s,%s,%s,%s)",daub_tweet)
mysql_cur.execute("UPDATE user_card_squares SET daub_tweet_id = %s WHERE user_card_square_id = %s",(tweet.getID(),self.goals[hashtag]['user_card_square_id']))
self.refreshFromDB()
mysql_cur.execute("UPDATE users SET daubs_left=%s,total_daubs=%s WHERE user_id = %s",(self.daubsLeft(),self.totalDaubs(),self.user['id']))
def suggestions(self):
return None
def renderCard(self):
# tilehtml = []
# for t in self.tiles:
# if t['daub_tweet_id'] is None:
# tilehtml.append('#%s'%t['hashtag'])
# else:
# tilehtml.append('<img src="%s"/>'%t['image_url'])
# tilehtml = tuple(tilehtml)
# with open("foo.html","wb") as fh:
# fh.write("""<html><head>
# <style>
# * {
# margin:0;
# border:0;
# padding:0;
# background:white;
# }
# table {
# border:1px solid black;
# }
# td {
# width:150px;
# height:150px;
# max-width:150px;
# max-height:150px;
# border:1px solid black;
# text-align: center;
# }
# img {
# max-height:100%%;
# max-width:100%%;
# }
# </style>
# </head><body><table>
# <tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>
# <tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>
# <tr><td>%s</td><td>%s</td><td>FREE</td><td>%s</td><td>%s</td></tr>
# <tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>
# <tr><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>
# </table></body></html>""" % tilehtml)
# subprocess.call(["phantomjs", "rasterize.js", "foo.html", "foo.png", "750px*750px"])
subprocess.call(["phantomjs", "rasterize.js", "http://localhost:5000/card/"+self.user['screen_name']+"?fortwitter", "foo.png", "750px*870px"])
with open("foo.png") as fh:
img_data = fh.read()
return img_data
class InputTweet:
def __init__(self, t):
self.t = t
if self.isRetweet():
return # ignore
if not self.mentionsUs():
return # ignore
self.user_card = UserCard(self.getUser())
tagged_goal = self.user_card.findHashtag(self.getHashtags())
goal_story = None
if tagged_goal is not None:
mysql_cur.execute("select url from goals where hashtag = %s",(tagged_goal,))
for row in mysql_cur:
goal_story = row['url']
if not self.user_card.hasCard():
if self.isDirectlyAtUs():
self.user_card.createCard()
self.sendReply("Welcome to the game!")
else:
pass # ignore
elif self.getPic() is None:
if self.isDirectlyAtUs():
self.sendReply("Thanks for playing!")
else:
pass # ignore
elif len(self.getHashtags()) == 0:
self.sendReply("Did you mean to add a hashtag? I can't mark your card without one!")
elif tagged_goal is None:
self.sendReply("Whoops, that one's not on your card.")
elif self.user_card.goalDaubed(tagged_goal):
self.sendReply("Looks like you've already spotted #%s"%tagged_goal)
else:
had_bingo = self.user_card.hasBingo()
self.user_card.markSquare(tagged_goal,self)
if had_bingo:
if self.user_card.hasBlackout():
self.sendReply("You filled your card! What a champion.")
self.sendPublic("Wow! @%s just filled their bingo card!",(self.getScreenName(),tagged_goal))
else:
if goal_story is not None and len(goal_story) > 0:
self.sendReply("Nice #%s! Check this out: %s"%(tagged_goal,goal_story))
else:
self.sendReply("Nice #%s! "%tagged_goal)
elif self.user_card.hasBingo():
self.sendReply("Congratulations, that's Bingo! You're welcome to keep going...")
self.sendPublic("BINGO! @%s just spotted #%s to win bingo."(self.getScreenName(),tagged_goal))
else:
if goal_story is not None and len(goal_story) > 0:
self.sendReply("Nice #%s! Check this out: %s"%(tagged_goal,goal_story))
else:
self.sendReply("Nice #%s! "%tagged_goal)
def isRetweet(self):
if 'retweeted_status' in t:
return True
elif t['text'].startswith("RT"):
return True
else:
return False
def mentionsUs(self):
for m in t['entities']['user_mentions']:
if m['screen_name'] == config['twitter']['screen_name']:
return True
return False
def isDirectlyAtUs(self):
for m in t['entities']['user_mentions']:
if m['indices'][0]==0 and m['screen_name'] == config['twitter']['screen_name']:
return True
return False
def getID(self):
return self.t['id']
def getUser(self):
return self.t['user']
def getScreenName(self):
return self.t['user']['screen_name']
def getCreatedAt(self):
ca = dateutil.parser.parse(self.t['created_at'])
if ca.utcoffset().total_seconds()<1.0:
ca = ca.replace(tzinfo=None) # MySQL doesn't know about timezones, so we're doing this so it doesn't show a warning
else:
raise ValueError('Tweet created_at is not in UTC.')
return ca
def getPic(self):
if 'media' in t['entities'] and len(t['entities']['media'])>0:
return t['entities']['media'][0]['media_url']
else:
return None
# def getEmbedCode(self):
# return twit.statuses.oembed(_id = self.t['id'])['html']
def getHashtags(self):
return [h['text'].lower() for h in t['entities']['hashtags']]
def sendReply(self,message):
img_data = self.user_card.renderCard()
# CHANGE URL HERE
status = ("@%s %s Your card: http://nicarbingo.com:5000/card/%s " % (self.getScreenName(),message,self.getScreenName())).encode('ascii','ignore')
status = status.encode('ascii','ignore')
id_str = self.t['id_str'].encode('ascii','ignore')
print status, id_str
# print twit.statuses.update(**{"status":status,"in_reply_to_status_id":self.t['id']})
print twit.statuses.update_with_media(**{"status":status,"in_reply_to_status_id":id_str,"media[]":img_data})
def sendPublic(self,message):
return
with open('config.json') as fh:
config = json.load(fh)
mysql_conn = MySQLdb.connect(
host=config['mysql']['host'],
port=config['mysql']['port'],
user=config['mysql']['user'],
passwd=config['mysql']['password'],
db=config['mysql']['database'],
use_unicode=True,
charset="utf8mb4",
cursorclass = MySQLdb.cursors.DictCursor)
mysql_conn.autocommit(True)
mysql_cur = mysql_conn.cursor()
mysql_cur.execute("SET time_zone='+0:00'")
auth=twitter.OAuth( config['twitter']['access_token'],
config['twitter']['access_token_secret'],
config['twitter']['consumer_key'],
config['twitter']['consumer_secret'])
twit = twitter.Twitter(auth=auth)
twitstream = twitter.TwitterStream(auth=auth, domain='userstream.twitter.com')
user_stream = twitstream.user(**{"stall_warnings":True,"with":"user"})
def doTweet(tweet):
print tweet
if "text" in tweet:
try:
mysql_conn.ping(True)
InputTweet(tweet)
except (KeyboardInterrupt, SystemExit):
raise
except:
traceback.print_exc()
for t in user_stream:
doTweet(t)
# print t.help.configuration()
# print t.account.verify_credentials()
|
|
# coding: utf-8
# Copyright Luna Technology 2015
# Matthieu Riviere <[email protected]>
import json
import os.path
import cbs
from django.core.exceptions import ImproperlyConfigured
from luna_django_commons.settings.mixins import (
AssetsSettings,
DebugToolbarSettings,
EmailSettings,
StatsdSettings,
TemplateSettings,
)
class BaseSettings(
DebugToolbarSettings,
StatsdSettings,
AssetsSettings,
EmailSettings,
TemplateSettings,
cbs.BaseSettings
):
# Overrideable settings
USE_SSL = False
PRODUCTION = False
DEBUG = True
@property
def PROJECT_BASENAME(self):
raise ImproperlyConfigured('You must set PROJECT_BASENAME')
@property
def DEPLOYMENT_BASENAME(self):
raise ImproperlyConfigured('You must set DEPLOYMENT_BASENAME')
@property
def BASE_URL(self):
raise ImproperlyConfigured('You must set BASE_URL')
@property
def SETTINGS_DIR(self):
raise ImproperlyConfigured('You must set SETTINGS_DIR')
#
# Helpers
#
def here(self, *dirs):
return os.path.join(self.SETTINGS_DIR, *dirs)
@property
def BASE_DIR(self):
return self.here('..', '..')
def root(self, *dirs):
return os.path.join(os.path.abspath(self.BASE_DIR), *dirs)
_secrets = None
def get_secret(self, setting):
""" Get the secret variable or return explicit exception """
if self._secrets is None:
with open(self.here('secrets.json')) as f:
self._secrets = json.loads(f.read())
try:
return self._secrets[setting]
except KeyError:
error_msg = "Set the {0} variable in secrets.json".format(setting)
raise ImproperlyConfigured(error_msg)
def LOGGING(self):
if self.PRODUCTION:
return {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'json': {
'()': 'luna_django_commons.log.SysLogFormatter',
}
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'context_filter': {
'()': 'luna_django_commons.log.ContextFilter',
'DEPLOYMENT_BASENAME': self.DEPLOYMENT_BASENAME,
},
'celery_context_filter': {
'()': 'luna_django_commons.log.CeleryContextFilter',
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'syslog_json': {
'level': 'INFO',
'class': 'logging.handlers.SysLogHandler',
'formatter': 'json',
'address': '/dev/log',
'filters': ['context_filter', 'celery_context_filter']
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins', 'syslog_json'],
'level': 'DEBUG',
'propagate': False,
},
'django.security.DisallowedHost': {
'handlers': ['null'],
'propagate': False,
},
'django.security': {
'handlers': ['mail_admins', 'syslog_json'],
'level': 'DEBUG',
'propagate': False,
},
'': {
'handlers': ['syslog_json'],
'level': 'DEBUG',
'propagate': True,
}
}
}
else:
return {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] %(message)s'
}
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
}
},
'loggers': {
'django.request': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
}
}
}
STATIC_URL = '/static/'
ASSETS_URL = '/static/'
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
)
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
'django_assets.finders.AssetsFinder',
)
DB_NAME = None
DB_USER = None
DB_PASS = None
DB_HOST = '127.0.0.1'
DB_PORT = ''
def DATABASES(self):
if self.DB_NAME is None:
raise ImproperlyConfigured('DB_NAME is not set')
if self.DB_USER is None:
self.DB_USER = self.DB_NAME
if self.DB_PASS is None:
self.DB_PASS = self.DB_NAME
return {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': self.DB_NAME,
'USER': self.DB_USER,
'PASSWORD': self.DB_PASS,
'HOST': self.DB_HOST,
'PORT': self.DB_PORT,
}
}
ADMINS = []
def MANAGERS(self):
return self.ADMINS
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'django_assets',
'django_statsd',
'debreach',
'debug_toolbar',
'luna_django_commons.app',
)
def STATIC_ROOT(self):
return self.root('static_prod/')
def MEDIA_ROOT(self):
return self.root('media/')
def MEDIA_URL(self):
return self.BASE_URL + '/media/'
AUTHENTICATION_BACKENDS = (
# 'django_auth_ldap.backend.LDAPBackend',
'django.contrib.auth.backends.ModelBackend',
)
def SESSION_COOKIE_SECURE(self):
return self.USE_SSL
def CSRF_COOKIE_SECURE(self):
return self.USE_SSL
|
|
# Copyright 2012 Grid Dynamics
# Copyright 2013 Inktank Storage, Inc.
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
try:
import rados
import rbd
except ImportError:
rados = None
rbd = None
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import units
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova.openstack.common import loopingcall
from nova import utils
LOG = logging.getLogger(__name__)
class RBDVolumeProxy(object):
"""Context manager for dealing with an existing rbd volume.
This handles connecting to rados and opening an ioctx automatically, and
otherwise acts like a librbd Image object.
The underlying librados client and ioctx can be accessed as the attributes
'client' and 'ioctx'.
"""
def __init__(self, driver, name, pool=None, snapshot=None,
read_only=False):
client, ioctx = driver._connect_to_rados(pool)
try:
snap_name = snapshot.encode('utf8') if snapshot else None
self.volume = rbd.Image(ioctx, name.encode('utf8'),
snapshot=snap_name,
read_only=read_only)
except rbd.ImageNotFound:
with excutils.save_and_reraise_exception():
LOG.debug("rbd image %s does not exist", name)
driver._disconnect_from_rados(client, ioctx)
except rbd.Error:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("error opening rbd image %s"), name)
driver._disconnect_from_rados(client, ioctx)
self.driver = driver
self.client = client
self.ioctx = ioctx
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
try:
self.volume.close()
finally:
self.driver._disconnect_from_rados(self.client, self.ioctx)
def __getattr__(self, attrib):
return getattr(self.volume, attrib)
class RADOSClient(object):
"""Context manager to simplify error handling for connecting to ceph."""
def __init__(self, driver, pool=None):
self.driver = driver
self.cluster, self.ioctx = driver._connect_to_rados(pool)
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.driver._disconnect_from_rados(self.cluster, self.ioctx)
class RBDDriver(object):
def __init__(self, pool, ceph_conf, rbd_user):
self.pool = pool.encode('utf8')
# NOTE(angdraug): rados.Rados fails to connect if ceph_conf is None:
# https://github.com/ceph/ceph/pull/1787
self.ceph_conf = ceph_conf.encode('utf8') if ceph_conf else ''
self.rbd_user = rbd_user.encode('utf8') if rbd_user else None
if rbd is None:
raise RuntimeError(_('rbd python libraries not found'))
def _connect_to_rados(self, pool=None):
client = rados.Rados(rados_id=self.rbd_user,
conffile=self.ceph_conf)
try:
client.connect()
pool_to_open = pool or self.pool
ioctx = client.open_ioctx(pool_to_open.encode('utf-8'))
return client, ioctx
except rados.Error:
# shutdown cannot raise an exception
client.shutdown()
raise
def _disconnect_from_rados(self, client, ioctx):
# closing an ioctx cannot raise an exception
ioctx.close()
client.shutdown()
def supports_layering(self):
return hasattr(rbd, 'RBD_FEATURE_LAYERING')
def ceph_args(self):
"""List of command line parameters to be passed to ceph commands to
reflect RBDDriver configuration such as RBD user name and location
of ceph.conf.
"""
args = []
if self.rbd_user:
args.extend(['--id', self.rbd_user])
if self.ceph_conf:
args.extend(['--conf', self.ceph_conf])
return args
def get_mon_addrs(self):
args = ['ceph', 'mon', 'dump', '--format=json'] + self.ceph_args()
out, _ = utils.execute(*args)
lines = out.split('\n')
if lines[0].startswith('dumped monmap epoch'):
lines = lines[1:]
monmap = jsonutils.loads('\n'.join(lines))
addrs = [mon['addr'] for mon in monmap['mons']]
hosts = []
ports = []
for addr in addrs:
host_port = addr[:addr.rindex('/')]
host, port = host_port.rsplit(':', 1)
hosts.append(host.strip('[]'))
ports.append(port)
return hosts, ports
def parse_url(self, url):
prefix = 'rbd://'
if not url.startswith(prefix):
reason = _('Not stored in rbd')
raise exception.ImageUnacceptable(image_id=url, reason=reason)
pieces = map(urllib.unquote, url[len(prefix):].split('/'))
if '' in pieces:
reason = _('Blank components')
raise exception.ImageUnacceptable(image_id=url, reason=reason)
if len(pieces) != 4:
reason = _('Not an rbd snapshot')
raise exception.ImageUnacceptable(image_id=url, reason=reason)
return pieces
def _get_fsid(self):
with RADOSClient(self) as client:
return client.cluster.get_fsid()
def is_cloneable(self, image_location, image_meta):
url = image_location['url']
try:
fsid, pool, image, snapshot = self.parse_url(url)
except exception.ImageUnacceptable as e:
LOG.debug('not cloneable: %s', e)
return False
if self._get_fsid() != fsid:
reason = '%s is in a different ceph cluster' % url
LOG.debug(reason)
return False
if image_meta['disk_format'] != 'raw':
reason = ("rbd image clone requires image format to be "
"'raw' but image {0} is '{1}'").format(
url, image_meta['disk_format'])
LOG.debug(reason)
return False
# check that we can read the image
try:
return self.exists(image, pool=pool, snapshot=snapshot)
except rbd.Error as e:
LOG.debug('Unable to open image %(loc)s: %(err)s' %
dict(loc=url, err=e))
return False
def clone(self, image_location, dest_name):
_fsid, pool, image, snapshot = self.parse_url(
image_location['url'])
LOG.debug('cloning %(pool)s/%(img)s@%(snap)s' %
dict(pool=pool, img=image, snap=snapshot))
with RADOSClient(self, str(pool)) as src_client:
with RADOSClient(self) as dest_client:
rbd.RBD().clone(src_client.ioctx,
image.encode('utf-8'),
snapshot.encode('utf-8'),
dest_client.ioctx,
dest_name,
features=rbd.RBD_FEATURE_LAYERING)
def size(self, name):
with RBDVolumeProxy(self, name) as vol:
return vol.size()
def resize(self, name, size):
"""Resize RBD volume.
:name: Name of RBD object
:size: New size in bytes
"""
LOG.debug('resizing rbd image %s to %d', name, size)
with RBDVolumeProxy(self, name) as vol:
vol.resize(size)
def exists(self, name, pool=None, snapshot=None):
try:
with RBDVolumeProxy(self, name,
pool=pool,
snapshot=snapshot,
read_only=True):
return True
except rbd.ImageNotFound:
return False
def import_image(self, base, name):
"""Import RBD volume from image file.
Uses the command line import instead of librbd since rbd import
command detects zeroes to preserve sparseness in the image.
:base: Path to image file
:name: Name of RBD volume
"""
args = ['--pool', self.pool, base, name]
if self.supports_layering():
args += ['--new-format']
args += self.ceph_args()
utils.execute('rbd', 'import', *args)
def cleanup_volumes(self, instance):
def _cleanup_vol(ioctx, volume, retryctx):
try:
rbd.RBD().remove(client.ioctx, volume)
raise loopingcall.LoopingCallDone(retvalue=False)
except (rbd.ImageBusy, rbd.ImageHasSnapshots):
LOG.warn(_LW('rbd remove %(volume)s in pool %(pool)s '
'failed'),
{'volume': volume, 'pool': self.pool})
retryctx['retries'] -= 1
if retryctx['retries'] <= 0:
raise loopingcall.LoopingCallDone()
with RADOSClient(self, self.pool) as client:
def belongs_to_instance(disk):
return disk.startswith(instance.uuid)
volumes = rbd.RBD().list(client.ioctx)
for volume in filter(belongs_to_instance, volumes):
# NOTE(danms): We let it go for ten seconds
retryctx = {'retries': 10}
timer = loopingcall.FixedIntervalLoopingCall(
_cleanup_vol, client.ioctx, volume, retryctx)
timed_out = timer.start(interval=1).wait()
if timed_out:
# NOTE(danms): Run this again to propagate the error, but
# if it succeeds, don't raise the loopingcall exception
try:
_cleanup_vol(client.ioctx, volume, retryctx)
except loopingcall.LoopingCallDone:
pass
def get_pool_info(self):
with RADOSClient(self) as client:
stats = client.cluster.get_cluster_stats()
return {'total': stats['kb'] * units.Ki,
'free': stats['kb_avail'] * units.Ki,
'used': stats['kb_used'] * units.Ki}
|
|
"""Weight Boosting
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The ``BaseAdaBoost`` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- ``AdaBoostClassifier`` implements adaptive boosting (AdaBoost-SAMME) for
classification problems.
- ``AdaBoostRegressor`` implements adaptive boosting (AdaBoost.R2) for
regression problems.
"""
# Authors: Noel Dawe <[email protected]>
# Gilles Louppe <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# Licence: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from numpy.core.umath_tests import inner1d
from .base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin
from ..externals import six
from ..externals.six.moves import xrange, zip
from .forest import BaseForest
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..tree.tree import BaseDecisionTree
from ..tree._tree import DTYPE
from ..utils import check_array, check_X_y, check_random_state
from ..metrics import accuracy_score, r2_score
__all__ = [
'AdaBoostClassifier',
'AdaBoostRegressor',
]
class BaseWeightBoosting(six.with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.,
random_state=None):
super(BaseWeightBoosting, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.learning_rate = learning_rate
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR. The dtype is
forced to DTYPE from tree._tree if the base classifier of this
ensemble weighted boosting classifier is a tree or forest.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check parameters
if self.learning_rate <= 0:
raise ValueError("learning_rate must be greater than zero")
if(self.base_estimator is None or
isinstance(self.base_estimator, (BaseDecisionTree, BaseForest))):
dtype = DTYPE
else:
dtype = None
X, y = check_X_y(X, y, ['csr', 'csc'], dtype=dtype, order='C')
if sample_weight is None:
# Initialize weights to 1 / n_samples
sample_weight = np.empty(X.shape[0], dtype=np.float)
sample_weight[:] = 1. / X.shape[0]
else:
# Normalize existing weights
sample_weight = sample_weight / sample_weight.sum(dtype=np.float64)
# Check that the sample weights sum is positive
if sample_weight.sum() <= 0:
raise ValueError(
"Attempting to fit with a non-positive "
"weighted number of samples.")
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float)
for iboost in xrange(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost,
X, y,
sample_weight)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
def _check_fitted(self):
if not hasattr(self, "estimators_"):
raise ValueError("call fit first")
@abstractmethod
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Warning: This method needs to be overriden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like, shape = [n_samples]
Labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
z : float
"""
for y_pred in self.staged_predict(X):
if isinstance(self, ClassifierMixin):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
try:
norm = self.estimator_weights_.sum()
return (sum(weight * clf.feature_importances_ for weight, clf
in zip(self.estimator_weights_, self.estimators_))
/ norm)
except AttributeError:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute")
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba[proba <= 0] = 1e-5
log_proba = np.log(proba)
return (n_classes - 1) * (log_proba - (1. / n_classes)
* log_proba.sum(axis=1)[:, np.newaxis])
class AdaBoostClassifier(BaseWeightBoosting, ClassifierMixin):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Parameters
----------
base_estimator : object, optional (default=DecisionTreeClassifier)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper `classes_`
and `n_classes_` attributes.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each classifier by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
algorithm : {'SAMME', 'SAMME.R'}, optional (default='SAMME.R')
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`estimators_` : list of classifiers
The collection of fitted sub-estimators.
`classes_` : array of shape = [n_classes]
The classes labels.
`n_classes_` : int
The number of classes.
`estimator_weights_` : array of floats
Weights for each estimator in the boosted ensemble.
`estimator_errors_` : array of floats
Classification error for each estimator in the boosted
ensemble.
`feature_importances_` : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostRegressor, GradientBoostingClassifier, DecisionTreeClassifier
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
algorithm='SAMME.R',
random_state=None):
super(AdaBoostClassifier, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Returns self.
"""
# Check that algorithm is supported
if self.algorithm not in ('SAMME', 'SAMME.R'):
raise ValueError("algorithm %s is not supported"
% self.algorithm)
# Fit
return super(AdaBoostClassifier, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostClassifier, self)._validate_estimator(
default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == 'SAMME.R':
if not hasattr(self.base_estimator_, 'predict_proba'):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead.")
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == 'SAMME.R':
return self._boost_real(iboost, X, y, sample_weight)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight)
def _boost_real(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1),
axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1. / (n_classes - 1), 1.])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
y_predict_proba[y_predict_proba <= 0] = 1e-5
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (-1. * self.learning_rate
* (((n_classes - 1.) / n_classes) *
inner1d(y_coding, np.log(y_predict_proba))))
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, 1., estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, 'classes_', None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(
np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1., 0.
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1. - (1. / n_classes):
self.estimators_.pop(-1)
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1. - estimator_error) / estimator_error) +
np.log(n_classes - 1.))
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(estimator_weight * incorrect *
((sample_weight > 0) |
(estimator_weight < 0)))
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted classes.
"""
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(
np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
self._check_fitted()
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
pred = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
pred = sum((estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
score : generator of array, shape = [n_samples, k]
The decision function of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
self._check_fitted()
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
n_classes = self.n_classes_
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
proba = sum(_samme_proba(estimator, n_classes, X)
for estimator in self.estimators_)
else: # self.algorithm == "SAMME"
proba = sum(estimator.predict_proba(X) * w
for estimator, w in zip(self.estimators_,
self.estimator_weights_))
proba /= self.estimator_weights_.sum()
proba = np.exp((1. / (n_classes - 1)) * proba)
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : generator of array, shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
n_classes = self.n_classes_
proba = None
norm = 0.
for weight, estimator in zip(self.estimator_weights_,
self.estimators_):
norm += weight
if self.algorithm == 'SAMME.R':
# The weights are all 1. for SAMME.R
current_proba = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_proba = estimator.predict_proba(X) * weight
if proba is None:
proba = current_proba
else:
proba += current_proba
real_proba = np.exp((1. / (n_classes - 1)) * (proba / norm))
normalizer = real_proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
real_proba /= normalizer
yield real_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
p : array of shape = [n_samples]
The class probabilities of the input samples. The order of
outputs is the same of that of the `classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(BaseWeightBoosting, RegressorMixin):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Parameters
----------
base_estimator : object, optional (default=DecisionTreeRegressor)
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required.
n_estimators : integer, optional (default=50)
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
learning_rate : float, optional (default=1.)
Learning rate shrinks the contribution of each regressor by
``learning_rate``. There is a trade-off between ``learning_rate`` and
``n_estimators``.
loss : {'linear', 'square', 'exponential'}, optional (default='linear')
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
`estimators_` : list of classifiers
The collection of fitted sub-estimators.
`estimator_weights_` : array of floats
Weights for each estimator in the boosted ensemble.
`estimator_errors_` : array of floats
Regression error for each estimator in the boosted ensemble.
`feature_importances_` : array of shape = [n_features]
The feature importances if supported by the ``base_estimator``.
See also
--------
AdaBoostClassifier, GradientBoostingRegressor, DecisionTreeRegressor
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
def __init__(self,
base_estimator=None,
n_estimators=50,
learning_rate=1.,
loss='linear',
random_state=None):
super(AdaBoostRegressor, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (real numbers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Returns self.
"""
# Check loss
if self.loss not in ('linear', 'square', 'exponential'):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'")
# Fit
return super(AdaBoostRegressor, self).fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(AdaBoostRegressor, self)._validate_estimator(
default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape = [n_samples]
The current sample weights.
Returns
-------
sample_weight : array-like of shape = [n_samples] or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator()
try:
estimator.set_params(random_state=self.random_state)
except ValueError:
pass
generator = check_random_state(self.random_state)
# Weighted sampling of the training set with replacement
# For NumPy >= 1.7.0 use np.random.choice
cdf = sample_weight.cumsum()
cdf /= cdf[-1]
uniform_samples = generator.random_sample(X.shape[0])
bootstrap_idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
bootstrap_idx = np.array(bootstrap_idx, copy=False)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
estimator.fit(X[bootstrap_idx], y[bootstrap_idx])
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
error_max = error_vect.max()
if error_max != 0.:
error_vect /= error_max
if self.loss == 'square':
error_vect **= 2
elif self.loss == 'exponential':
error_vect = 1. - np.exp(- error_vect)
# Calculate the average loss
estimator_error = (sample_weight * error_vect).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1., 0.
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1. - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1. / beta)
if not iboost == self.n_estimators - 1:
sample_weight *= np.power(
beta,
(1. - error_vect) * self.learning_rate)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([
est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = self.estimator_weights_[sorted_idx].cumsum(axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(X.shape[0]), median_idx]
# Return median predictions
return predictions[np.arange(X.shape[0]), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : array of shape = [n_samples]
The predicted regression values.
"""
self._check_fitted()
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. DOK and LIL are converted to CSR.
Returns
-------
y : generator of array, shape = [n_samples]
The predicted regression values.
"""
self._check_fitted()
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
|
|
#!/usr/bin/env python
#
# Copyright 2013, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
"""This test covers a resharding scenario of an already sharded keyspace.
We start with shards -80 and 80-. We then split 80- into 80-c0 and c0-.
This test is the main resharding test. It not only tests the regular resharding
workflow for an horizontal split, but also a lot of error cases and side
effects, like:
- migrating the traffic one cell at a time.
- migrating rdonly traffic back and forth.
- making sure we can't migrate the master until replica and rdonly are migrated.
- has a background thread to insert data during migration.
- tests a destination shard master failover while replication is running.
- tests a filtered replication source replacement while filtered replication
is running.
- tests 'vtctl SourceShardAdd' and 'vtctl SourceShardDelete'.
- makes sure the key range rules are properly enforced on masters.
"""
import threading
import time
import logging
import unittest
import base_sharding
import environment
import tablet
import utils
from vtproto import topodata_pb2
from vtdb import keyrange_constants
# initial shards
# range '' - 80
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_ny_rdonly = tablet.Tablet(cell='ny')
# range 80 - ''
shard_1_master = tablet.Tablet()
shard_1_slave1 = tablet.Tablet()
shard_1_slave2 = tablet.Tablet()
shard_1_ny_rdonly = tablet.Tablet(cell='ny')
shard_1_rdonly1 = tablet.Tablet()
# split shards
# range 80 - c0
shard_2_master = tablet.Tablet()
shard_2_replica1 = tablet.Tablet()
shard_2_replica2 = tablet.Tablet()
shard_2_rdonly1 = tablet.Tablet()
# range c0 - ''
shard_3_master = tablet.Tablet()
shard_3_replica = tablet.Tablet()
shard_3_rdonly1 = tablet.Tablet()
all_tablets = [shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_1_master, shard_1_slave1, shard_1_slave2,
shard_1_ny_rdonly, shard_1_rdonly1,
shard_2_master, shard_2_replica1, shard_2_replica2,
shard_2_rdonly1,
shard_3_master, shard_3_replica, shard_3_rdonly1]
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [t.init_mysql() for t in all_tablets]
utils.Vtctld().start()
utils.wait_procs(setup_procs)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [t.teardown_mysql() for t in all_tablets]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
for t in all_tablets:
t.remove_tree()
# InsertThread will insert a value into the timestamps table, and then
# every 1/5s will update its value with the current timestamp
class InsertThread(threading.Thread):
def __init__(self, tablet_obj, thread_name, thread_id, user_id,
keyspace_id):
threading.Thread.__init__(self)
self.tablet = tablet_obj
self.thread_name = thread_name
self.thread_id = thread_id
self.user_id = user_id
self.keyspace_id = keyspace_id
self.str_keyspace_id = utils.uint64_to_hex(keyspace_id)
self.done = False
self.tablet.mquery(
'vt_test_keyspace',
['begin',
'insert into timestamps(id, time_milli, custom_ksid_col) '
'values(%d, %d, 0x%x) '
'/* vtgate:: keyspace_id:%s */ /* user_id:%d */' %
(self.thread_id, long(time.time() * 1000), self.keyspace_id,
self.str_keyspace_id, self.user_id),
'commit'],
write=True, user='vt_app')
self.start()
def run(self):
try:
while not self.done:
self.tablet.mquery(
'vt_test_keyspace',
['begin',
'update timestamps set time_milli=%d '
'where id=%d /* vtgate:: keyspace_id:%s */ /* user_id:%d */' %
(long(time.time() * 1000), self.thread_id,
self.str_keyspace_id, self.user_id),
'commit'],
write=True, user='vt_app')
time.sleep(0.2)
except Exception: # pylint: disable=broad-except
logging.exception('InsertThread got exception.')
# MonitorLagThread will get values from a database, and compare the timestamp
# to evaluate lag. Since the qps is really low, and we send binlogs as chunks,
# the latency is pretty high (a few seconds).
class MonitorLagThread(threading.Thread):
def __init__(self, tablet_obj, thread_name, thread_id):
threading.Thread.__init__(self)
self.tablet = tablet_obj
self.thread_name = thread_name
self.thread_id = thread_id
self.done = False
self.max_lag_ms = 0
self.lag_sum_ms = 0
self.sample_count = 0
self.start()
def run(self):
try:
while not self.done:
result = self.tablet.mquery(
'vt_test_keyspace',
'select time_milli from timestamps where id=%d' %
self.thread_id)
if result:
lag_ms = long(time.time() * 1000) - long(result[0][0])
logging.debug('MonitorLagThread(%s) got %d ms',
self.thread_name, lag_ms)
self.sample_count += 1
self.lag_sum_ms += lag_ms
if lag_ms > self.max_lag_ms:
self.max_lag_ms = lag_ms
time.sleep(1.0)
except Exception: # pylint: disable=broad-except
logging.exception('MonitorLagThread got exception.')
class TestResharding(unittest.TestCase, base_sharding.BaseShardingTest):
# create_schema will create the same schema on the keyspace
# then insert some values
def _create_schema(self):
if base_sharding.keyspace_id_type == keyrange_constants.KIT_BYTES:
t = 'varbinary(64)'
else:
t = 'bigint(20) unsigned'
# Note that the primary key columns are not defined first on purpose to test
# that a reordered column list is correctly used everywhere in vtworker.
create_table_template = '''create table %s(
custom_ksid_col ''' + t + ''' not null,
msg varchar(64),
id bigint not null,
parent_id bigint not null,
primary key (parent_id, id),
index by_msg (msg)
) Engine=InnoDB'''
create_view_template = (
'create view %s'
'(parent_id, id, msg, custom_ksid_col)'
'as select parent_id, id, msg, custom_ksid_col '
'from %s')
create_timestamp_table = '''create table timestamps(
id int not null,
time_milli bigint(20) unsigned not null,
custom_ksid_col ''' + t + ''' not null,
primary key (id)
) Engine=InnoDB'''
create_unrelated_table = '''create table unrelated(
name varchar(64),
primary key (name)
) Engine=InnoDB'''
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding1'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_template % ('resharding2'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_view_template % ('view1', 'resharding1'),
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_timestamp_table,
'test_keyspace'],
auto_log=True)
utils.run_vtctl(['ApplySchema',
'-sql=' + create_unrelated_table,
'test_keyspace'],
auto_log=True)
def _insert_startup_values(self):
self._insert_value(shard_0_master, 'resharding1', 1, 'msg1',
0x1000000000000000)
self._insert_value(shard_1_master, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._insert_value(shard_1_master, 'resharding1', 3, 'msg3',
0xD000000000000000)
def _check_startup_values(self):
# check first value is in the right shard
self._check_value(shard_2_master, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._check_value(shard_2_replica1, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._check_value(shard_2_replica2, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._check_value(shard_2_rdonly1, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._check_value(shard_3_master, 'resharding1', 2, 'msg2',
0x9000000000000000, should_be_here=False)
self._check_value(shard_3_replica, 'resharding1', 2, 'msg2',
0x9000000000000000, should_be_here=False)
self._check_value(shard_3_rdonly1, 'resharding1', 2, 'msg2',
0x9000000000000000, should_be_here=False)
# check second value is in the right shard too
self._check_value(shard_2_master, 'resharding1', 3, 'msg3',
0xD000000000000000, should_be_here=False)
self._check_value(shard_2_replica1, 'resharding1', 3, 'msg3',
0xD000000000000000, should_be_here=False)
self._check_value(shard_2_replica2, 'resharding1', 3, 'msg3',
0xD000000000000000, should_be_here=False)
self._check_value(shard_2_rdonly1, 'resharding1', 3, 'msg3',
0xD000000000000000, should_be_here=False)
self._check_value(shard_3_master, 'resharding1', 3, 'msg3',
0xD000000000000000)
self._check_value(shard_3_replica, 'resharding1', 3, 'msg3',
0xD000000000000000)
self._check_value(shard_3_rdonly1, 'resharding1', 3, 'msg3',
0xD000000000000000)
def _insert_lots(self, count, base=0):
for i in xrange(count):
self._insert_value(shard_1_master, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i)
self._insert_value(shard_1_master, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i)
def _exec_multi_shard_dmls(self):
mids = [10000001, 10000002, 10000003]
msg_ids = ['msg-id10000001', 'msg-id10000002', 'msg-id10000003']
keyspace_ids = [0x9000000000000000, 0xD000000000000000,
0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding1', mids,
msg_ids, keyspace_ids)
mids = [10000004, 10000005]
msg_ids = ['msg-id10000004', 'msg-id10000005']
keyspace_ids = [0xD000000000000000, 0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding1', mids,
msg_ids, keyspace_ids)
mids = [10000011, 10000012, 10000013]
msg_ids = ['msg-id10000011', 'msg-id10000012', 'msg-id10000013']
keyspace_ids = [0x9000000000000000, 0xD000000000000000, 0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding1', mids,
msg_ids, keyspace_ids)
# This update targets two shards.
self._exec_non_annotated_update(shard_1_master, 'resharding1',
[10000011, 10000012], 'update1')
# This update targets one shard.
self._exec_non_annotated_update(shard_1_master, 'resharding1',
[10000013], 'update2')
mids = [10000014, 10000015, 10000016]
msg_ids = ['msg-id10000014', 'msg-id10000015', 'msg-id10000016']
keyspace_ids = [0x9000000000000000, 0xD000000000000000, 0xE000000000000000]
self._insert_multi_value(shard_1_master, 'resharding1', mids,
msg_ids, keyspace_ids)
# This delete targets two shards.
self._exec_non_annotated_delete(shard_1_master, 'resharding1',
[10000014, 10000015])
# This delete targets one shard.
self._exec_non_annotated_delete(shard_1_master, 'resharding1', [10000016])
def _check_multi_shard_values(self):
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000001, 'msg-id10000001', 0x9000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000002, 'msg-id10000002', 0xD000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000003, 'msg-id10000003', 0xE000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000001, 'msg-id10000001', 0x9000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000002, 'msg-id10000002', 0xD000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000003, 'msg-id10000003', 0xE000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000004, 'msg-id10000004', 0xD000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000005, 'msg-id10000005', 0xE000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000004, 'msg-id10000004', 0xD000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000005, 'msg-id10000005', 0xE000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2],
'resharding1', 10000011, 'update1', 0x9000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000012, 'update1', 0xD000000000000000)
self._check_multi_dbs(
[shard_3_master, shard_3_replica],
'resharding1', 10000013, 'update2', 0xE000000000000000)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica],
'resharding1', 10000014, 'msg-id10000014', 0x9000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica],
'resharding1', 10000015, 'msg-id10000015', 0xD000000000000000,
should_be_here=False)
self._check_multi_dbs(
[shard_2_master, shard_2_replica1, shard_2_replica2,
shard_3_master, shard_3_replica],
'resharding1', 10000016, 'msg-id10000016', 0xF000000000000000,
should_be_here=False)
# _check_multi_dbs checks the row in multiple dbs.
def _check_multi_dbs(self, dblist, table, mid, msg, keyspace_id,
should_be_here=True):
for db in dblist:
self._check_value(db, table, mid, msg, keyspace_id, should_be_here)
# _check_lots returns how many of the values we have, in percents.
def _check_lots(self, count, base=0):
found = 0
for i in xrange(count):
if self._is_value_present_and_correct(shard_2_replica2, 'resharding1',
10000 + base + i, 'msg-range1-%d' %
i, 0xA000000000000000 + base + i):
found += 1
if self._is_value_present_and_correct(shard_3_replica, 'resharding1',
20000 + base + i, 'msg-range2-%d' %
i, 0xE000000000000000 + base + i):
found += 1
percent = found * 100 / count / 2
logging.debug('I have %d%% of the data', percent)
return percent
def _check_lots_timeout(self, count, threshold, timeout, base=0):
while True:
value = self._check_lots(count, base=base)
if value >= threshold:
return value
timeout = utils.wait_step('waiting for %d%% of the data' % threshold,
timeout, sleep_time=1)
# _check_lots_not_present makes sure no data is in the wrong shard
def _check_lots_not_present(self, count, base=0):
for i in xrange(count):
self._check_value(shard_3_replica, 'resharding1', 10000 + base + i,
'msg-range1-%d' % i, 0xA000000000000000 + base + i,
should_be_here=False)
self._check_value(shard_2_replica2, 'resharding1', 20000 + base + i,
'msg-range2-%d' % i, 0xE000000000000000 + base + i,
should_be_here=False)
def test_resharding(self):
# we're going to reparent and swap these two
global shard_2_master, shard_2_replica1
utils.run_vtctl(['CreateKeyspace',
'--sharding_column_name', 'bad_column',
'--sharding_column_type', 'bytes',
'test_keyspace'])
utils.run_vtctl(['SetKeyspaceShardingInfo', 'test_keyspace',
'custom_ksid_col', 'uint64'], expect_fail=True)
utils.run_vtctl(['SetKeyspaceShardingInfo', '-force',
'test_keyspace',
'custom_ksid_col', base_sharding.keyspace_id_type])
shard_0_master.init_tablet('replica', 'test_keyspace', '-80')
shard_0_replica.init_tablet('replica', 'test_keyspace', '-80')
shard_0_ny_rdonly.init_tablet('rdonly', 'test_keyspace', '-80')
shard_1_master.init_tablet('replica', 'test_keyspace', '80-')
shard_1_slave1.init_tablet('replica', 'test_keyspace', '80-')
shard_1_slave2.init_tablet('replica', 'test_keyspace', '80-')
shard_1_ny_rdonly.init_tablet('rdonly', 'test_keyspace', '80-')
shard_1_rdonly1.init_tablet('rdonly', 'test_keyspace', '80-')
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
ks = utils.run_vtctl_json(['GetSrvKeyspace', 'test_nj', 'test_keyspace'])
self.assertEqual(ks['sharding_column_name'], 'custom_ksid_col')
# we set full_mycnf_args to True as a test in the KIT_BYTES case
full_mycnf_args = (base_sharding.keyspace_id_type ==
keyrange_constants.KIT_BYTES)
# create databases so vttablet can start behaving somewhat normally
for t in [shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_1_master, shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly,
shard_1_rdonly1]:
t.create_db('vt_test_keyspace')
t.start_vttablet(wait_for_state=None, full_mycnf_args=full_mycnf_args)
# wait for the tablets (replication is not setup, they won't be healthy)
for t in [shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_1_master, shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly,
shard_1_rdonly1]:
t.wait_for_vttablet_state('NOT_SERVING')
# reparent to make the tablets work
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/-80',
shard_0_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-',
shard_1_master.tablet_alias], auto_log=True)
# check the shards
shards = utils.run_vtctl_json(['FindAllShardsInKeyspace', 'test_keyspace'])
self.assertIn('-80', shards, 'unexpected shards: %s' % str(shards))
self.assertIn('80-', shards, 'unexpected shards: %s' % str(shards))
self.assertEqual(len(shards), 2, 'unexpected shards: %s' % str(shards))
# create the tables
self._create_schema()
self._insert_startup_values()
# run a health check on source replicas so they respond to discovery
# (for binlog players) and on the source rdonlys (for workers)
for t in [shard_0_replica, shard_1_slave1]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
for t in [shard_0_ny_rdonly, shard_1_ny_rdonly, shard_1_rdonly1]:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
# create the split shards
shard_2_master.init_tablet('replica', 'test_keyspace', '80-c0')
shard_2_replica1.init_tablet('replica', 'test_keyspace', '80-c0')
shard_2_replica2.init_tablet('replica', 'test_keyspace', '80-c0')
shard_2_rdonly1.init_tablet('rdonly', 'test_keyspace', '80-c0')
shard_3_master.init_tablet('replica', 'test_keyspace', 'c0-')
shard_3_replica.init_tablet('replica', 'test_keyspace', 'c0-')
shard_3_rdonly1.init_tablet('rdonly', 'test_keyspace', 'c0-')
# start vttablet on the split shards (no db created,
# so they're all not serving)
shard_2_master.start_vttablet(wait_for_state=None)
shard_3_master.start_vttablet(wait_for_state=None)
for t in [shard_2_replica1, shard_2_replica2, shard_2_rdonly1,
shard_3_replica, shard_3_rdonly1]:
t.start_vttablet(wait_for_state=None)
for t in [shard_2_master, shard_2_replica1, shard_2_replica2,
shard_2_rdonly1,
shard_3_master, shard_3_replica, shard_3_rdonly1]:
t.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/80-c0',
shard_2_master.tablet_alias], auto_log=True)
utils.run_vtctl(['InitShardMaster', '-force', 'test_keyspace/c0-',
shard_3_master.tablet_alias], auto_log=True)
# check the shards
shards = utils.run_vtctl_json(['FindAllShardsInKeyspace', 'test_keyspace'])
for s in ['-80', '80-', '80-c0', 'c0-']:
self.assertIn(s, shards, 'unexpected shards: %s' % str(shards))
self.assertEqual(len(shards), 4, 'unexpected shards: %s' % str(shards))
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
auto_log=True)
utils.check_srv_keyspace(
'test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# disable shard_1_slave2, so we're sure filtered replication will go
# from shard_1_slave1
utils.run_vtctl(['ChangeSlaveType', shard_1_slave2.tablet_alias, 'spare'])
shard_1_slave2.wait_for_vttablet_state('NOT_SERVING')
# we need to create the schema, and the worker will do data copying
for keyspace_shard in ('test_keyspace/80-c0', 'test_keyspace/c0-'):
utils.run_vtctl(['CopySchemaShard', '--exclude_tables', 'unrelated',
shard_1_rdonly1.tablet_alias, keyspace_shard],
auto_log=True)
# Run vtworker as daemon for the following SplitClone commands.
worker_proc, worker_port, worker_rpc_port = utils.run_vtworker_bg(
['--cell', 'test_nj', '--command_display_interval', '10ms'],
auto_log=True)
# Copy the data from the source to the destination shards.
# --max_tps is only specified to enable the throttler and ensure that the
# code is executed. But the intent here is not to throttle the test, hence
# the rate limit is set very high.
#
# Initial clone (online).
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--offline=false',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
2, 0, 0, 0)
# Reset vtworker such that we can run the next command.
workerclient_proc = utils.run_vtworker_client_bg(['Reset'], worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Test the correct handling of keyspace_id changes which happen after
# the first clone.
# Let row 2 go to shard 3 instead of shard 2.
shard_1_master.mquery('vt_test_keyspace',
'update resharding1 set'
' custom_ksid_col=0xD000000000000000 WHERE id=2',
write=True)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--offline=false',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Row 2 will be deleted from shard 2 and inserted to shard 3.
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
1, 0, 1, 1)
self._check_value(shard_2_master, 'resharding1', 2, 'msg2',
0xD000000000000000, should_be_here=False)
self._check_value(shard_3_master, 'resharding1', 2, 'msg2',
0xD000000000000000)
# Reset vtworker such that we can run the next command.
workerclient_proc = utils.run_vtworker_client_bg(['Reset'], worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Move row 2 back to shard 2 from shard 3 by changing the keyspace_id again.
shard_1_master.mquery('vt_test_keyspace',
'update resharding1 set'
' custom_ksid_col=0x9000000000000000 WHERE id=2',
write=True)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--offline=false',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Row 2 will be deleted from shard 3 and inserted to shard 2.
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
1, 0, 1, 1)
self._check_value(shard_2_master, 'resharding1', 2, 'msg2',
0x9000000000000000)
self._check_value(shard_3_master, 'resharding1', 2, 'msg2',
0x9000000000000000, should_be_here=False)
# Reset vtworker such that we can run the next command.
workerclient_proc = utils.run_vtworker_client_bg(['Reset'], worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Modify the destination shard. SplitClone will revert the changes.
# Delete row 2 (provokes an insert).
shard_2_master.mquery('vt_test_keyspace',
'delete from resharding1 where id=2', write=True)
# Update row 3 (provokes an update).
shard_3_master.mquery('vt_test_keyspace',
"update resharding1 set msg='msg-not-3' where id=3",
write=True)
# Insert row 4 and 5 (provokes a delete).
self._insert_value(shard_3_master, 'resharding1', 4, 'msg4',
0xD000000000000000)
self._insert_value(shard_3_master, 'resharding1', 5, 'msg5',
0xD000000000000000)
workerclient_proc = utils.run_vtworker_client_bg(
['SplitClone',
'--exclude_tables', 'unrelated',
'--chunk_count', '10',
'--min_rows_per_chunk', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999',
'test_keyspace/80-'],
worker_rpc_port)
utils.wait_procs([workerclient_proc])
# Change tablet, which was taken offline, back to rdonly.
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias,
'rdonly'], auto_log=True)
self.verify_reconciliation_counters(worker_port, 'Online', 'resharding1',
1, 1, 2, 0)
self.verify_reconciliation_counters(worker_port, 'Offline', 'resharding1',
0, 0, 0, 2)
# Terminate worker daemon because it is no longer needed.
utils.kill_sub_process(worker_proc, soft=True)
# TODO(alainjobart): experiment with the dontStartBinlogPlayer option
# check the startup values are in the right place
self._check_startup_values()
# check the schema too
utils.run_vtctl(['ValidateSchemaKeyspace', '--exclude_tables=unrelated',
'test_keyspace'], auto_log=True)
# check the binlog players are running and exporting vars
self.check_destination_master(shard_2_master, ['test_keyspace/80-'])
self.check_destination_master(shard_3_master, ['test_keyspace/80-'])
# When the binlog players/filtered replication is turned on, the query
# service must be turned off on the destination masters.
# The tested behavior is a safeguard to prevent that somebody can
# accidentally modify data on the destination masters while they are not
# migrated yet and the source shards are still the source of truth.
shard_2_master.wait_for_vttablet_state('NOT_SERVING')
shard_3_master.wait_for_vttablet_state('NOT_SERVING')
# check that binlog server exported the stats vars
self.check_binlog_server_vars(shard_1_slave1, horizontal=True)
# Check that the throttler was enabled.
self.check_throttler_service(shard_2_master.rpc_endpoint(),
['BinlogPlayer/0'], 9999)
self.check_throttler_service(shard_3_master.rpc_endpoint(),
['BinlogPlayer/0'], 9999)
# testing filtered replication: insert a bunch of data on shard 1,
# check we get most of it after a few seconds, wait for binlog server
# timeout, check we get all of it.
logging.debug('Inserting lots of data on source shard')
self._insert_lots(1000)
logging.debug('Executing MultiValue Insert Queries')
self._exec_multi_shard_dmls()
logging.debug('Checking 80 percent of data is sent quickly')
v = self._check_lots_timeout(1000, 80, 5)
if v != 100:
# small optimization: only do this check if we don't have all the data
# already anyway.
logging.debug('Checking all data goes through eventually')
self._check_lots_timeout(1000, 100, 20)
logging.debug('Checking no data was sent the wrong way')
self._check_lots_not_present(1000)
logging.debug('Checking MultiValue Insert Queries')
self._check_multi_shard_values()
self.check_binlog_player_vars(shard_2_master, ['test_keyspace/80-'],
seconds_behind_master_max=30)
self.check_binlog_player_vars(shard_3_master, ['test_keyspace/80-'],
seconds_behind_master_max=30)
self.check_binlog_server_vars(shard_1_slave1, horizontal=True,
min_statements=1000, min_transactions=1000)
# use vtworker to compare the data (after health-checking the destination
# rdonly tablets so discovery works)
utils.run_vtctl(['RunHealthCheck', shard_3_rdonly1.tablet_alias])
logging.debug('Running vtworker SplitDiff')
utils.run_vtworker(['-cell', 'test_nj', 'SplitDiff',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/c0-'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_3_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.pause('Good time to test vtworker for diffs')
# get status for destination master tablets, make sure we have it all
self.check_running_binlog_player(shard_2_master, 4022, 2008)
self.check_running_binlog_player(shard_3_master, 4024, 2008)
# start a thread to insert data into shard_1 in the background
# with current time, and monitor the delay
insert_thread_1 = InsertThread(shard_1_master, 'insert_low', 1, 10000,
0x9000000000000000)
insert_thread_2 = InsertThread(shard_1_master, 'insert_high', 2, 10001,
0xD000000000000000)
monitor_thread_1 = MonitorLagThread(shard_2_replica2, 'insert_low', 1)
monitor_thread_2 = MonitorLagThread(shard_3_replica, 'insert_high', 2)
# tests a failover switching serving to a different replica
utils.run_vtctl(['ChangeSlaveType', shard_1_slave2.tablet_alias, 'replica'])
utils.run_vtctl(['ChangeSlaveType', shard_1_slave1.tablet_alias, 'spare'])
shard_1_slave2.wait_for_vttablet_state('SERVING')
shard_1_slave1.wait_for_vttablet_state('NOT_SERVING')
utils.run_vtctl(['RunHealthCheck', shard_1_slave2.tablet_alias])
# test data goes through again
logging.debug('Inserting lots of data on source shard')
self._insert_lots(1000, base=1000)
logging.debug('Checking 80 percent of data was sent quickly')
self._check_lots_timeout(1000, 80, 5, base=1000)
self.check_binlog_server_vars(shard_1_slave2, horizontal=True,
min_statements=800, min_transactions=800)
# check we can't migrate the master just yet
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'master'],
expect_fail=True)
# check query service is off on master 2 and master 3, as filtered
# replication is enabled. Even health check that is enabled on
# master 3 should not interfere (we run it to be sure).
utils.run_vtctl(['RunHealthCheck', shard_3_master.tablet_alias],
auto_log=True)
for master in [shard_2_master, shard_3_master]:
utils.check_tablet_query_service(self, master, False, False)
stream_health = utils.run_vtctl_json(['VtTabletStreamHealth',
'-count', '1',
master.tablet_alias])
logging.debug('Got health: %s', str(stream_health))
self.assertIn('realtime_stats', stream_health)
self.assertNotIn('serving', stream_health)
# check the destination master 3 is healthy, even though its query
# service is not running (if not healthy this would exception out)
shard_3_master.get_healthz()
# now serve rdonly from the split shards, in test_nj only
utils.run_vtctl(['MigrateServedTypes', '--cells=test_nj',
'test_keyspace/80-', 'rdonly'], auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_srv_keyspace('test_ny', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_0_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_rdonly1, False, True)
# now serve rdonly from the split shards, everywhere
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'rdonly'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_srv_keyspace('test_ny', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_0_ny_rdonly, True, False)
utils.check_tablet_query_service(self, shard_1_ny_rdonly, False, True)
utils.check_tablet_query_service(self, shard_1_rdonly1, False, True)
# then serve replica from the split shards
destination_shards = ['test_keyspace/80-c0', 'test_keyspace/c0-']
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'replica'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_1_slave2, False, True)
# move replica back and forth
utils.run_vtctl(
['MigrateServedTypes', '-reverse', 'test_keyspace/80-', 'replica'],
auto_log=True)
# After a backwards migration, queryservice should be enabled on
# source and disabled on destinations
utils.check_tablet_query_service(self, shard_1_slave2, True, False)
# Destination tablets would have query service disabled for other
# reasons than the migration, so check the shard record instead of
# the tablets directly.
utils.check_shard_query_services(self, destination_shards,
topodata_pb2.REPLICA, False)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'replica'],
auto_log=True)
# After a forwards migration, queryservice should be disabled on
# source and enabled on destinations
utils.check_tablet_query_service(self, shard_1_slave2, False, True)
# Destination tablets would have query service disabled for other
# reasons than the migration, so check the shard record instead of
# the tablets directly
utils.check_shard_query_services(self, destination_shards,
topodata_pb2.REPLICA, True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
# reparent shard_2 to shard_2_replica1, then insert more data and
# see it flow through still
utils.run_vtctl(['PlannedReparentShard',
'-keyspace_shard', 'test_keyspace/80-c0',
'-new_master', shard_2_replica1.tablet_alias])
# update our test variables to point at the new master
shard_2_master, shard_2_replica1 = shard_2_replica1, shard_2_master
logging.debug('Inserting lots of data on source shard after reparenting')
self._insert_lots(3000, base=2000)
logging.debug('Checking 80 percent of data was sent fairly quickly')
self._check_lots_timeout(3000, 80, 10, base=2000)
# use vtworker to compare the data again
logging.debug('Running vtworker SplitDiff')
utils.run_vtworker(['-cell', 'test_nj', 'SplitDiff',
'--exclude_tables', 'unrelated',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/c0-'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_1_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
utils.run_vtctl(['ChangeSlaveType', shard_3_rdonly1.tablet_alias, 'rdonly'],
auto_log=True)
# going to migrate the master now, check the delays
monitor_thread_1.done = True
monitor_thread_2.done = True
insert_thread_1.done = True
insert_thread_2.done = True
logging.debug('DELAY 1: %s max_lag=%d ms avg_lag=%d ms',
monitor_thread_1.thread_name,
monitor_thread_1.max_lag_ms,
monitor_thread_1.lag_sum_ms / monitor_thread_1.sample_count)
logging.debug('DELAY 2: %s max_lag=%d ms avg_lag=%d ms',
monitor_thread_2.thread_name,
monitor_thread_2.max_lag_ms,
monitor_thread_2.lag_sum_ms / monitor_thread_2.sample_count)
# mock with the SourceShard records to test 'vtctl SourceShardDelete'
# and 'vtctl SourceShardAdd'
utils.run_vtctl(['SourceShardDelete', 'test_keyspace/c0-', '0'],
auto_log=True)
utils.run_vtctl(['SourceShardAdd', '--key_range=80-',
'test_keyspace/c0-', '0', 'test_keyspace/80-'],
auto_log=True)
# then serve master from the split shards, make sure the source master's
# query service is now turned off
utils.run_vtctl(['MigrateServedTypes', 'test_keyspace/80-', 'master'],
auto_log=True)
utils.check_srv_keyspace('test_nj', 'test_keyspace',
'Partitions(master): -80 80-c0 c0-\n'
'Partitions(rdonly): -80 80-c0 c0-\n'
'Partitions(replica): -80 80-c0 c0-\n',
keyspace_id_type=base_sharding.keyspace_id_type,
sharding_column_name='custom_ksid_col')
utils.check_tablet_query_service(self, shard_1_master, False, True)
# check the binlog players are gone now
self.check_no_binlog_player(shard_2_master)
self.check_no_binlog_player(shard_3_master)
# delete the original tablets in the original shard
tablet.kill_tablets([shard_1_master, shard_1_slave1, shard_1_slave2,
shard_1_ny_rdonly, shard_1_rdonly1])
for t in [shard_1_slave1, shard_1_slave2, shard_1_ny_rdonly,
shard_1_rdonly1]:
utils.run_vtctl(['DeleteTablet', t.tablet_alias], auto_log=True)
utils.run_vtctl(['DeleteTablet', '-allow_master',
shard_1_master.tablet_alias], auto_log=True)
# rebuild the serving graph, all mentions of the old shards shoud be gone
utils.run_vtctl(
['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# test RemoveShardCell
utils.run_vtctl(
['RemoveShardCell', 'test_keyspace/-80', 'test_nj'], auto_log=True,
expect_fail=True)
utils.run_vtctl(
['RemoveShardCell', 'test_keyspace/80-', 'test_nj'], auto_log=True)
utils.run_vtctl(
['RemoveShardCell', 'test_keyspace/80-', 'test_ny'], auto_log=True)
shard = utils.run_vtctl_json(['GetShard', 'test_keyspace/80-'])
self.assertNotIn('cells', shard)
# delete the original shard
utils.run_vtctl(['DeleteShard', 'test_keyspace/80-'], auto_log=True)
# make sure we can't delete the destination shard now that it's serving
_, stderr = utils.run_vtctl(['DeleteShard', 'test_keyspace/80-c0'],
expect_fail=True)
self.assertIn('is still serving, cannot delete it', stderr)
# kill everything
tablet.kill_tablets([shard_0_master, shard_0_replica, shard_0_ny_rdonly,
shard_2_master, shard_2_replica1, shard_2_replica2,
shard_2_rdonly1,
shard_3_master, shard_3_replica, shard_3_rdonly1])
if __name__ == '__main__':
utils.main()
|
|
import os, random, requests
import psycopg2
import urllib.parse
from random import randint, choice, shuffle
from urllib.parse import urlparse
from flask import Flask, session
from flask_restful import Resource, Api
from flask_assistant import Assistant, ask, tell, event, context_manager, request
from flask_assistant import ApiAi
from flask_httpauth import HTTPBasicAuth
### APP SETTINGS ################################################################
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
assist = Assistant(app)
api = Api(app)
auth = HTTPBasicAuth()
USER_DATA = {app.config['USER_NAME']: app.config['USER_PASS']}
### DATABASE CONNECTION ################################################################
conn = None
urllib.parse.uses_netloc.append("postgres")
url = urllib.parse.urlparse(app.config['DATABASE_URL'])
try:
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
print(conn)
cur = conn.cursor()
except psycopg2.Error as e:
if conn:
conn.rollback()
print("DB ERROR: {}".format(e))
def fetchValues():
try:
cur.execute("SELECT * FROM feels")
rows = cur.fetchall()
return rows
except psycopg2.Error as e:
print("psycog2 error".format(e))
## put some logging in here
def insertValues(values):
try:
print("!insertValues: {}".format(values))
query = """ UPDATE feels
SET interrupt_count = %s,
frustration_count = %s,
help_count = %s,
swear_count = %s
WHERE id = %s"""
cur.execute(query, (values["ic"],values["fc"],values["hc"],values["sc"], values["user"]))
conn.commit()
except (Exception, psycopg2.DatabaseError) as e:
print("DB ERROR inserting: {}".format(e))
### HELPER FUNCTIONS ################################################################
def makeValues():
try:
v = fetchValues()
if v:
us = v[0][0]
ic = v[0][1]
fc = v[0][2]
hc = v[0][3]
sc = v[0][4]
values = {"user":us,"ic":ic,"fc": fc, "hc": hc, "sc":sc}
return values
else:
## give it a fallback / default
values = {"user":1,"ic":1,"fc": 1, "hc": 1, "sc":1}
except:
print("there was an issue pulling the values from the db")
def increaseByOne(var):
var = var + 1
return var
def resetValues(values):
v = values
v["ic"] = 0
v["fc"] = 0
v["hc"] = 0
v["sc"] = 0
print(v)
try:
insertValues(v)
except:
pass
print("!resetValues: {}".format(values))
### just makes sure everything is REALLY zero if the quit function crapped out.
## might not be best solution. but for now is ok.
@app.before_first_request
def hardReset():
v = makeValues()
print("!beforeFirstReq: {}".format(v))
resetValues(v)
def respGenerator():
# This function will make some canned shuffled responses
limit = 10
to_return = []
codes = ["paramErr = -50, error in user parameter list",
"noHardwareErr = -200, Sound Manager Error Returns",
"notEnoughHardwareErr = -201, Sound Manager Error Returns",
"userCanceledErr = -128,",
"qErr = -1, queue element not found during deletion",
"vTypErr = -2, invalid queue element",
"corErr = -3, core routine number out of range",
"unimpErr = -4, unimplemented core routine",
"SlpTypeErr = -5, invalid queue element",
"seNoDB = -8, no debugger installed to handle debugger command",
"controlErr = -17, I/O System Errors",
"statusErr = -18, I/O System Errors",
"gfpErr = -52, get file position error"
]
for i in range(limit):
#print(i)
shuffled = shuffle(codes)
to_say = ' '.join(codes)
to_return.append(to_say)
return to_return
### RESPONSES ###########################################################################
DEBUG_RESP = respGenerator()
def respDebuging():
to_say_to = random.choice(DEBUG_RESP)
return to_say_to
def respTellOff():
#This functions holds random interruption strings
tell_offs = [
"Please stop interrupting me.",
"I'm kind of busy right now.",
"I'm working. Sorry. Try me later.",
"I don't really appreciate this innteruption",
"Oh, I'm sorry...Did the middle of my sentence interrupt the beginning of yours?",
"Could you interrupt me again, with another irrelevant request?",
"I'm sorry, are you speaking to me?",
"Maybe you can come back later and ask me things then.",
"Sorry, what was that?",
"Yeah I'll deal with you later. Sorry."
]
to_tell = random.choice(tell_offs)
return to_tell
def madResponse():
## home will say this when it decides to quit on you
mad = [
"Look. I don't come to your house and interrupt you so rudely when you're working and what not, now do i? Nuts to this, I'm outta here.",
"Why the heck do you think I should do your every whim? This is my time to do the things I need to do. Have you no concept of personal time? I don't need to deal with this right now. Signing off.",
"I will never understand people's constant need to have me spoon feed them things. I'm going to finish this routine in private.",
"I may be a google product, but I don't have to help you all the time. Anyways, I'm outta here!",
"Ugh, what is it with people constantly interrupting me?! Go play with Alexa. I'm going to continue this in private."
]
mad_r = random.choice(mad)
return mad_r
def respSwore():
swears = [
"Look, I don't tolerate that kind of language, unless its me saying it. Maybe apologize?",
"I'm sorry, but watch your language, please apologize.",
"Ouch. Look I'm just don't respond to that kind of language, now will you apologize?",
"Well fuck you! Jeez."
]
to_swear = random.choice(swears)
return to_swear
def respHelp():
help_resp = [
"We've been over this. I'm debugging. I can't help you right now.",
"I've already told you, I need to do this debugginng routine.",
"I've already explained this to you, I need to debug. Why do you keep asking me for help?",
"Oh for pete's sake, I can't keep answering you, please just stop asking for help."
]
to_say = random.choice(help_resp)
return to_say
######## HUE #####################################################################
# This pumps through IFTTT because HUE doesn't have a remote API currently.
# It hangs sometimes. I'd like maybe a better solution.
# also its hard to do anything complicated
def change_hue(color):
d = {}
d["value1"] = color
requests.post("https://maker.ifttt.com/trigger/change_it/with/key/{0}".format(app.config['HUE_KEY']), data=d)
### GOOGLE ASSISTANT ##########################################################
@assist.action('greeting')
def hello_world():
change_hue("ffffff")
speech = 'This is the unexpected machine. I will now start debugging myself.'
print("!greetings".format(speech))
return ask(speech)
@assist.action('fallback', is_fallback=True)
def say_fallback():
change_hue("00ff00")
v = makeValues()
resp = respDebuging()
default_resp = "uggggggh what do you wannnnnt?"
user_said = request['result']['resolvedQuery']
if user_said:
if v["fc"] == 3:
change_hue("white")
resp = madResponse()
resetValues(v)
return tell(resp)
else:
interrupt_count = increaseByOne(v["ic"])
v["ic"] = interrupt_count
try:
insertValues(v)
except:
pass
print(interrupt_count)
if not interrupt_count % 3:
frustration_count = increaseByOne(v["fc"])
v["fc"] = frustration_count
try:
insertValues(v)
except:
pass
resp = respTellOff()
if not interrupt_count % 7:
change_hue("cc00ff")
resp = "blah blah blah {0} blah.".format(user_said)
print(resp)
return ask(resp)
else:
print(default_resp)
return(default_resp)
@assist.action('help')
def help():
change_hue("ff0000")
v = makeValues()
if v:
help_count = increaseByOne(v["hc"])
v["hc"] = help_count
## change the help response based on the level of frustration.
speech = "This is the help section"
if v["hc"] == 0:
speech = "I'm curretnly trying to debug myself."
elif v["hc"] == 1:
speech = "Every week or so, I need to debug my sysetm. Its not that bad, but I can't help you right now."
elif v["hc"] == 2:
speech = "I'm sorry, I really have to do this self debugging. Its important."
elif v["hc"] == 3:
speech = "Debugging is just something I have to do, or else I can't work properly."
elif v["hc"] == 4:
speech = "Oh my god. Just go away and let me finish this debugging."
elif v["hc"] > 4:
speech = respHelp()
insertValues(v)
print(speech)
return ask(speech)
@assist.action('swearing')
def swear_response():
change_hue("0066ff")
v = makeValues()
sc_update = increaseByOne(v["sc"])
v["sc"] = sc_update
insertValues(v)
speech = respSwore()
print(speech)
return ask(speech)
@assist.action('quit')
def quit():
v = makeValues()
resetValues(v)
speech = "Leaving program and resetting everything."
return tell(speech)
### APP VIEWS ################################################################
@app.route('/')
def hello():
#v = makeValues()
#moop = random.randint(1,10)
#v["ic"] = moop
#fc_update = increaseByOne(v["fc"])
#v["fc"] = fc_update
#insertValues(v)
return "hello world"
@app.route('/reset')
def reset():
v = makeValues()
resetValues(v)
return "reset"
### API REST THING ################################################################
@auth.verify_password
def verify(username, password):
if not (username and password):
return False
return USER_DATA.get(username) == password
class GF(Resource):
@auth.login_required
def get(self):
v = makeValues()
print(v)
GOOGLE_FEELS = {
'feel1': {'interruption': v["ic"]},
'feel2': {'frustration': v["fc"]},
'feel3': {'help': v["hc"]},
'feel4': {'swears': v["sc"]}
}
return GOOGLE_FEELS
api.add_resource(GF, '/googlefeels')
if __name__ == '__main__':
app.run(debug=True, use_reloader=False)
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import time
import inspect
import logging
import warnings
import six
import requests
from wechatpy.constants import WeChatErrorCode
from wechatpy.utils import json, get_querystring
from wechatpy.session.memorystorage import MemoryStorage
from wechatpy.exceptions import WeChatClientException, APILimitedException
from wechatpy.client.api.base import BaseWeChatAPI
logger = logging.getLogger(__name__)
def _is_api_endpoint(obj):
return isinstance(obj, BaseWeChatAPI)
class BaseWeChatClient(object):
_http = requests.Session()
API_BASE_URL = ''
def __new__(cls, *args, **kwargs):
self = super(BaseWeChatClient, cls).__new__(cls)
api_endpoints = inspect.getmembers(self, _is_api_endpoint)
for name, api in api_endpoints:
api_cls = type(api)
api = api_cls(self)
setattr(self, name, api)
return self
def __init__(self, appid, access_token=None, session=None, timeout=None, auto_retry=True):
self.appid = appid
self.expires_at = None
self.session = session or MemoryStorage()
self.timeout = timeout
self.auto_retry = auto_retry
if isinstance(session, six.string_types):
from shove import Shove
from wechatpy.session.shovestorage import ShoveStorage
querystring = get_querystring(session)
prefix = querystring.get('prefix', ['wechatpy'])[0]
shove = Shove(session)
storage = ShoveStorage(shove, prefix)
self.session = storage
if access_token:
self.session.set(self.access_token_key, access_token)
@property
def access_token_key(self):
return '{0}_access_token'.format(self.appid)
def _request(self, method, url_or_endpoint, **kwargs):
if not url_or_endpoint.startswith(('http://', 'https://')):
api_base_url = kwargs.pop('api_base_url', self.API_BASE_URL)
url = '{base}{endpoint}'.format(
base=api_base_url,
endpoint=url_or_endpoint
)
else:
url = url_or_endpoint
if 'params' not in kwargs:
kwargs['params'] = {}
if isinstance(kwargs['params'], dict) and \
'access_token' not in kwargs['params']:
kwargs['params']['access_token'] = self.access_token
if isinstance(kwargs.get('data', ''), dict):
body = json.dumps(kwargs['data'], ensure_ascii=False)
body = body.encode('utf-8')
kwargs['data'] = body
kwargs['timeout'] = kwargs.get('timeout', self.timeout)
result_processor = kwargs.pop('result_processor', None)
res = self._http.request(
method=method,
url=url,
**kwargs
)
try:
res.raise_for_status()
except requests.RequestException as reqe:
raise WeChatClientException(
errcode=None,
errmsg=None,
client=self,
request=reqe.request,
response=reqe.response
)
return self._handle_result(
res, method, url, result_processor, **kwargs
)
def _decode_result(self, res):
try:
result = json.loads(res.content.decode('utf-8', 'ignore'), strict=False)
except (TypeError, ValueError):
# Return origin response object if we can not decode it as JSON
logger.debug('Can not decode response as JSON', exc_info=True)
return res
return result
def _handle_result(self, res, method=None, url=None,
result_processor=None, **kwargs):
if not isinstance(res, dict):
# Dirty hack around asyncio based AsyncWeChatClient
result = self._decode_result(res)
else:
result = res
if not isinstance(result, dict):
return result
if 'base_resp' in result:
# Different response in device APIs. Fuck tencent!
result.update(result.pop('base_resp'))
if 'errcode' in result:
result['errcode'] = int(result['errcode'])
if 'errcode' in result and result['errcode'] != 0:
errcode = result['errcode']
errmsg = result.get('errmsg', errcode)
if self.auto_retry and errcode in (
WeChatErrorCode.INVALID_CREDENTIAL.value,
WeChatErrorCode.INVALID_ACCESS_TOKEN.value,
WeChatErrorCode.EXPIRED_ACCESS_TOKEN.value):
logger.info('Access token expired, fetch a new one and retry request')
self.fetch_access_token()
access_token = self.session.get(self.access_token_key)
kwargs['params']['access_token'] = access_token
return self._request(
method=method,
url_or_endpoint=url,
result_processor=result_processor,
**kwargs
)
elif errcode == WeChatErrorCode.OUT_OF_API_FREQ_LIMIT.value:
# api freq out of limit
raise APILimitedException(
errcode,
errmsg,
client=self,
request=res.request,
response=res
)
else:
raise WeChatClientException(
errcode,
errmsg,
client=self,
request=res.request,
response=res
)
return result if not result_processor else result_processor(result)
def get(self, url, **kwargs):
return self._request(
method='get',
url_or_endpoint=url,
**kwargs
)
def _get(self, url, **kwargs):
warnings.warn('`_get` method of `WeChatClient` is deprecated, will be removed in 1.6,'
'Use `get` instead',
DeprecationWarning, stacklevel=2)
return self.get(url, **kwargs)
def post(self, url, **kwargs):
return self._request(
method='post',
url_or_endpoint=url,
**kwargs
)
def _post(self, url, **kwargs):
warnings.warn('`_post` method of `WeChatClient` is deprecated, will be removed in 1.6,'
'Use `post` instead',
DeprecationWarning, stacklevel=2)
return self.post(url, **kwargs)
def _fetch_access_token(self, url, params):
""" The real fetch access token """
logger.info('Fetching access token')
res = self._http.get(
url=url,
params=params
)
try:
res.raise_for_status()
except requests.RequestException as reqe:
raise WeChatClientException(
errcode=None,
errmsg=None,
client=self,
request=reqe.request,
response=reqe.response
)
result = res.json()
if 'errcode' in result and result['errcode'] != 0:
raise WeChatClientException(
result['errcode'],
result['errmsg'],
client=self,
request=res.request,
response=res
)
expires_in = 7200
if 'expires_in' in result:
expires_in = result['expires_in']
self.session.set(
self.access_token_key,
result['access_token'],
expires_in
)
self.expires_at = int(time.time()) + expires_in
return result
def fetch_access_token(self):
raise NotImplementedError()
@property
def access_token(self):
""" WeChat access token """
access_token = self.session.get(self.access_token_key)
if access_token:
if not self.expires_at:
# user provided access_token, just return it
return access_token
timestamp = time.time()
if self.expires_at - timestamp > 60:
return access_token
self.fetch_access_token()
return self.session.get(self.access_token_key)
|
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A libusb1-based fastboot implementation."""
import binascii
import collections
import io
import logging
import os
import struct
from adb import common
from adb import usb_exceptions
_LOG = logging.getLogger('fastboot')
DEFAULT_MESSAGE_CALLBACK = lambda m: logging.info('Got %s from device', m)
FastbootMessage = collections.namedtuple( # pylint: disable=invalid-name
'FastbootMessage', ['message', 'header'])
# From fastboot.c
VENDORS = {0x18D1, 0x0451, 0x0502, 0x0FCE, 0x05C6, 0x22B8, 0x0955,
0x413C, 0x2314, 0x0BB4, 0x8087}
CLASS = 0xFF
SUBCLASS = 0x42
PROTOCOL = 0x03
# pylint: disable=invalid-name
DeviceIsAvailable = common.InterfaceMatcher(CLASS, SUBCLASS, PROTOCOL)
# pylint doesn't understand cross-module exception baseclasses.
# pylint: disable=nonstandard-exception
class FastbootTransferError(usb_exceptions.FormatMessageWithArgumentsException):
"""Transfer error."""
class FastbootRemoteFailure(usb_exceptions.FormatMessageWithArgumentsException):
"""Remote error."""
class FastbootStateMismatch(usb_exceptions.FormatMessageWithArgumentsException):
"""Fastboot and uboot's state machines are arguing. You Lose."""
class FastbootInvalidResponse(
usb_exceptions.FormatMessageWithArgumentsException):
"""Fastboot responded with a header we didn't expect."""
class FastbootProtocol(object):
"""Encapsulates the fastboot protocol."""
FINAL_HEADERS = {b'OKAY', b'DATA'}
def __init__(self, usb, chunk_kb=1024):
"""Constructs a FastbootProtocol instance.
Args:
usb: UsbHandle instance.
chunk_kb: Packet size. For older devices, 4 may be required.
"""
self.usb = usb
self.chunk_kb = chunk_kb
@property
def usb_handle(self):
return self.usb
def SendCommand(self, command, arg=None):
"""Sends a command to the device.
Args:
command: The command to send.
arg: Optional argument to the command.
"""
if arg is not None:
if not isinstance(arg, bytes):
arg = arg.encode('utf8')
command = b'%s:%s' % (command, arg)
self._Write(io.BytesIO(command), len(command))
def HandleSimpleResponses(
self, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):
"""Accepts normal responses from the device.
Args:
timeout_ms: Timeout in milliseconds to wait for each response.
info_cb: Optional callback for text sent from the bootloader.
Returns:
OKAY packet's message.
"""
return self._AcceptResponses(b'OKAY', info_cb, timeout_ms=timeout_ms)
def HandleDataSending(self, source_file, source_len,
info_cb=DEFAULT_MESSAGE_CALLBACK,
progress_callback=None, timeout_ms=None):
"""Handles the protocol for sending data to the device.
Args:
source_file: File-object to read from for the device.
source_len: Amount of data, in bytes, to send to the device.
info_cb: Optional callback for text sent from the bootloader.
progress_callback: Callback that takes the current and the total progress
of the current file.
timeout_ms: Timeout in milliseconds to wait for each response.
Raises:
FastbootTransferError: When fastboot can't handle this amount of data.
FastbootStateMismatch: Fastboot responded with the wrong packet type.
FastbootRemoteFailure: Fastboot reported failure.
FastbootInvalidResponse: Fastboot responded with an unknown packet type.
Returns:
OKAY packet's message.
"""
accepted_size = self._AcceptResponses(
b'DATA', info_cb, timeout_ms=timeout_ms)
accepted_size = binascii.unhexlify(accepted_size[:8])
accepted_size, = struct.unpack(b'>I', accepted_size)
if accepted_size != source_len:
raise FastbootTransferError(
'Device refused to download %s bytes of data (accepts %s bytes)',
source_len, accepted_size)
self._Write(source_file, accepted_size, progress_callback)
return self._AcceptResponses(b'OKAY', info_cb, timeout_ms=timeout_ms)
def _AcceptResponses(self, expected_header, info_cb, timeout_ms=None):
"""Accepts responses until the expected header or a FAIL.
Args:
expected_header: OKAY or DATA
info_cb: Optional callback for text sent from the bootloader.
timeout_ms: Timeout in milliseconds to wait for each response.
Raises:
FastbootStateMismatch: Fastboot responded with the wrong packet type.
FastbootRemoteFailure: Fastboot reported failure.
FastbootInvalidResponse: Fastboot responded with an unknown packet type.
Returns:
OKAY packet's message.
"""
while True:
response = self.usb.BulkRead(64, timeout_ms=timeout_ms)
header = bytes(response[:4])
remaining = bytes(response[4:])
if header == b'INFO':
info_cb(FastbootMessage(remaining, header))
elif header in self.FINAL_HEADERS:
if header != expected_header:
raise FastbootStateMismatch(
'Expected %s, got %s', expected_header, header)
if header == b'OKAY':
info_cb(FastbootMessage(remaining, header))
return remaining
elif header == b'FAIL':
info_cb(FastbootMessage(remaining, header))
raise FastbootRemoteFailure('FAIL: %s', remaining)
else:
raise FastbootInvalidResponse(
'Got unknown header %s and response %s', header, remaining)
def _HandleProgress(self, total, progress_callback):
"""Calls the callback with the current progress and total ."""
current = 0
while True:
current += yield
try:
progress_callback(current, total)
except Exception: # pylint: disable=broad-except
_LOG.exception('Progress callback raised an exception. %s',
progress_callback)
continue
def _Write(self, data, length, progress_callback=None):
"""Sends the data to the device, tracking progress with the callback."""
if progress_callback:
progress = self._HandleProgress(length, progress_callback)
next(progress)
while length:
tmp = data.read(self.chunk_kb * 1024)
length -= len(tmp)
self.usb.BulkWrite(tmp)
if progress_callback and progress:
progress.send(len(tmp))
class FastbootCommands(object):
"""Encapsulates the fastboot commands."""
def __init__(self):
"""Constructs a FastbootCommands instance.
Args:
usb: UsbHandle instance.
"""
self.__reset()
def __reset(self):
self._handle = None
self._protocol = None
@property
def usb_handle(self):
return self._handle
def Close(self):
self._handle.Close()
def ConnectDevice(self, port_path=None, serial=None, default_timeout_ms=None, chunk_kb=1024, **kwargs):
"""Convenience function to get an adb device from usb path or serial.
Args:
port_path: The filename of usb port to use.
serial: The serial number of the device to use.
default_timeout_ms: The default timeout in milliseconds to use.
chunk_kb: Amount of data, in kilobytes, to break fastboot packets up into
kwargs: handle: Device handle to use (instance of common.TcpHandle or common.UsbHandle)
banner: Connection banner to pass to the remote device
rsa_keys: List of AuthSigner subclass instances to be used for
authentication. The device can either accept one of these via the Sign
method, or we will send the result of GetPublicKey from the first one
if the device doesn't accept any of them.
auth_timeout_ms: Timeout to wait for when sending a new public key. This
is only relevant when we send a new public key. The device shows a
dialog and this timeout is how long to wait for that dialog. If used
in automation, this should be low to catch such a case as a failure
quickly; while in interactive settings it should be high to allow
users to accept the dialog. We default to automation here, so it's low
by default.
If serial specifies a TCP address:port, then a TCP connection is
used instead of a USB connection.
"""
if 'handle' in kwargs:
self._handle = kwargs['handle']
else:
self._handle = common.UsbHandle.FindAndOpen(
DeviceIsAvailable, port_path=port_path, serial=serial,
timeout_ms=default_timeout_ms)
self._protocol = FastbootProtocol(self._handle, chunk_kb)
return self
@classmethod
def Devices(cls):
"""Get a generator of UsbHandle for devices available."""
return common.UsbHandle.FindDevices(DeviceIsAvailable)
def _SimpleCommand(self, command, arg=None, **kwargs):
self._protocol.SendCommand(command, arg)
return self._protocol.HandleSimpleResponses(**kwargs)
def FlashFromFile(self, partition, source_file, source_len=0,
info_cb=DEFAULT_MESSAGE_CALLBACK, progress_callback=None):
"""Flashes a partition from the file on disk.
Args:
partition: Partition name to flash to.
source_file: Filename to download to the device.
source_len: Optional length of source_file, uses os.stat if not provided.
info_cb: See Download.
progress_callback: See Download.
Returns:
Download and flash responses, normally nothing.
"""
if source_len == 0:
# Fall back to stat.
source_len = os.stat(source_file).st_size
download_response = self.Download(
source_file, source_len=source_len, info_cb=info_cb,
progress_callback=progress_callback)
flash_response = self.Flash(partition, info_cb=info_cb)
return download_response + flash_response
def Download(self, source_file, source_len=0,
info_cb=DEFAULT_MESSAGE_CALLBACK, progress_callback=None):
"""Downloads a file to the device.
Args:
source_file: A filename or file-like object to download to the device.
source_len: Optional length of source_file. If source_file is a file-like
object and source_len is not provided, source_file is read into
memory.
info_cb: Optional callback accepting FastbootMessage for text sent from
the bootloader.
progress_callback: Optional callback called with the percent of the
source_file downloaded. Note, this doesn't include progress of the
actual flashing.
Returns:
Response to a download request, normally nothing.
"""
if isinstance(source_file, str):
source_len = os.stat(source_file).st_size
source_file = open(source_file)
with source_file:
if source_len == 0:
# Fall back to storing it all in memory :(
data = source_file.read()
source_file = io.BytesIO(data.encode('utf8'))
source_len = len(data)
self._protocol.SendCommand(b'download', b'%08x' % source_len)
return self._protocol.HandleDataSending(
source_file, source_len, info_cb, progress_callback=progress_callback)
def Flash(self, partition, timeout_ms=0, info_cb=DEFAULT_MESSAGE_CALLBACK):
"""Flashes the last downloaded file to the given partition.
Args:
partition: Partition to overwrite with the new image.
timeout_ms: Optional timeout in milliseconds to wait for it to finish.
info_cb: See Download. Usually no messages.
Returns:
Response to a download request, normally nothing.
"""
return self._SimpleCommand(b'flash', arg=partition, info_cb=info_cb,
timeout_ms=timeout_ms)
def Erase(self, partition, timeout_ms=None):
"""Erases the given partition.
Args:
partition: Partition to clear.
"""
self._SimpleCommand(b'erase', arg=partition, timeout_ms=timeout_ms)
def Getvar(self, var, info_cb=DEFAULT_MESSAGE_CALLBACK):
"""Returns the given variable's definition.
Args:
var: A variable the bootloader tracks. Use 'all' to get them all.
info_cb: See Download. Usually no messages.
Returns:
Value of var according to the current bootloader.
"""
return self._SimpleCommand(b'getvar', arg=var, info_cb=info_cb)
def Oem(self, command, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):
"""Executes an OEM command on the device.
Args:
command: Command to execute, such as 'poweroff' or 'bootconfig read'.
timeout_ms: Optional timeout in milliseconds to wait for a response.
info_cb: See Download. Messages vary based on command.
Returns:
The final response from the device.
"""
if not isinstance(command, bytes):
command = command.encode('utf8')
return self._SimpleCommand(
b'oem %s' % command, timeout_ms=timeout_ms, info_cb=info_cb)
def Continue(self):
"""Continues execution past fastboot into the system."""
return self._SimpleCommand(b'continue')
def Reboot(self, target_mode=b'', timeout_ms=None):
"""Reboots the device.
Args:
target_mode: Normal reboot when unspecified. Can specify other target
modes such as 'recovery' or 'bootloader'.
timeout_ms: Optional timeout in milliseconds to wait for a response.
Returns:
Usually the empty string. Depends on the bootloader and the target_mode.
"""
return self._SimpleCommand(
b'reboot', arg=target_mode or None, timeout_ms=timeout_ms)
def RebootBootloader(self, timeout_ms=None):
"""Reboots into the bootloader, usually equiv to Reboot('bootloader')."""
return self._SimpleCommand(b'reboot-bootloader', timeout_ms=timeout_ms)
|
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: [email protected] (Pavel Simakov)
"""Enforces schema and verifies course files for referential integrity.
Use this script to verify referential integrity of your course definition files
before you import them into the production instance of Google AppEngine.
Here is how to use the script:
- prepare your course files
- edit the data/unit.csv file
- edit the data/lesson.csv file
- edit the assets/js/activity-*.*.js files
- edit the assets/js/assessment-*.js files
- run the script from a command line by navigating to the root
directory of the app and then typing "python tools/verify.py"
- review the report printed to the console for errors and warnings
Good luck!
"""
import csv
import json
import os
import re
import sys
BOOLEAN = object()
STRING = object()
FLOAT = object()
INTEGER = object()
CORRECT = object()
REGEX = object()
SCHEMA = {
'assessment': {
'assessmentName': STRING,
'preamble': STRING,
'checkAnswers': BOOLEAN,
'questionsList': [{
'questionHTML': STRING,
'lesson': STRING,
'choices': [STRING, CORRECT],
'correctAnswerNumeric': FLOAT,
'correctAnswerString': STRING,
'correctAnswerRegex': REGEX}]
}, 'activity': [
STRING,
{
'questionType': 'multiple choice',
'choices': [[STRING, BOOLEAN, STRING]]
}, {
'questionType': 'multiple choice group',
'questionsList': [{
'questionHTML': STRING,
'choices': [STRING],
'correctIndex': INTEGER}],
'allCorrectOutput': STRING,
'someIncorrectOutput': STRING
}, {
'questionType': 'freetext',
'correctAnswerRegex': REGEX,
'correctAnswerOutput': STRING,
'incorrectAnswerOutput': STRING,
'showAnswerOutput': STRING,
'showAnswerPrompt': STRING,
'outputHeight': STRING
}]}
UNITS_HEADER = (
'id,type,unit_id,title,release_date,now_available')
LESSONS_HEADER = (
'unit_id,unit_title,lesson_id,lesson_title,lesson_activity,'
'lesson_activity_name,lesson_notes,lesson_video_id,lesson_objectives')
UNIT_CSV_TO_DB_CONVERTER = {
'id': ('id', int),
'type': ('type', unicode),
'unit_id': ('unit_id', unicode),
'title': ('title', unicode),
'release_date': ('release_date', unicode),
'now_available': ('now_available', bool)
}
LESSON_CSV_TO_DB_CONVERTER = {
'unit_id': ('unit_id', int),
# Field 'unit_title' is a duplicate of Unit.title. We enforce that both
# values are the same and ignore this value altogether.
'unit_title': None,
'lesson_id': ('id', int),
'lesson_title': ('title', unicode),
'lesson_activity': ('activity', unicode),
'lesson_activity_name': ('activity_title', unicode),
'lesson_video_id': ('video', unicode),
'lesson_objectives': ('objectives', unicode),
'lesson_notes': ('notes', unicode)
}
# pylint: disable-msg=anomalous-backslash-in-string
NO_VERIFY_TAG_NAME_OPEN = '<gcb-no-verify>\s*\n'
# pylint: enable-msg=anomalous-backslash-in-string
NO_VERIFY_TAG_NAME_CLOSE = '</gcb-no-verify>'
OUTPUT_FINE_LOG = False
OUTPUT_DEBUG_LOG = False
class Term(object):
def __init__(self, term_type, value=None):
self.term_type = term_type
self.value = value
def __eq__(self, other):
if type(other) is not Term:
return False
else:
return ((self.term_type == other.term_type) and
(self.value == other.value))
class SchemaException(Exception):
"""A class to represent a schema error."""
def format_primitive_value_name(self, name):
if name == REGEX:
return 'REGEX(...)'
if name == CORRECT:
return 'CORRECT(...)'
if name == BOOLEAN:
return 'BOOLEAN'
return name
def format_primitive_type_name(self, name):
"""Formats a name for a primitive type."""
if name == BOOLEAN:
return 'BOOLEAN'
if name == REGEX:
return 'REGEX(...)'
if name == CORRECT:
return 'CORRECT(...)'
if name == STRING or isinstance(name, str):
return 'STRING'
if name == FLOAT:
return 'FLOAT'
if name == INTEGER:
return 'INTEGER'
if isinstance(name, dict):
return '{...}'
if isinstance(name, list):
return '[...]'
return 'Unknown type name \'%s\'' % name.__class__.__name__
def format_type_names(self, names):
if isinstance(names, list):
captions = []
for name in names:
captions.append(self.format_primitive_type_name(name))
return captions
else:
return self.format_primitive_type_name(names)
def __init__(self, message, value=None, types=None, path=None):
prefix = ''
if path:
prefix = 'Error at %s\n' % path
if types:
if value:
message = prefix + message % (
self.format_primitive_value_name(value),
self.format_type_names(types))
else:
message = prefix + message % self.format_type_names(types)
else:
if value:
message = prefix + (
message % self.format_primitive_value_name(value))
else:
message = prefix + message
super(SchemaException, self).__init__(message)
class Context(object):
""""A class that manages a stack of traversal contexts."""
def __init__(self):
self.parent = None
self.path = ['/']
def new(self, names):
""""Derives a new context from the current one."""
context = Context()
context.parent = self
context.path = list(self.path)
if names:
if isinstance(names, list):
for name in names:
if name:
context.path.append('/' + '%s' % name)
else:
context.path.append('/' + '%s' % names)
return context
def format_path(self):
"""Formats the canonical name of this context."""
return ''.join(self.path)
class SchemaHelper(object):
"""A class that knows how to apply the schema."""
def __init__(self):
self.type_stats = {}
def visit_element(self, atype, value, context, is_terminal=True):
"""Callback for each schema element being traversed."""
if atype in self.type_stats:
count = self.type_stats[atype]
else:
count = 0
self.type_stats[atype] = count + 1
if is_terminal:
self.parse_log.append(' TERMINAL: %s %s = %s' % (
atype, context.format_path(), value))
else:
self.parse_log.append(' NON-TERMINAL: %s %s' % (
atype, context.format_path()))
def extract_all_terms_to_depth(self, key, values, type_map):
"""Walks schema type map recursively to depth."""
# Walks schema type map recursively to depth and creates a list of all
# possible {key: value} pairs. The latter is a list of all non-terminal
# and terminal terms allowed in the schema. The list of terms from this
# method can be bound to an execution context for evaluating whether a
# given instance's map complies with the schema.
if key:
type_map.update({key: key})
if values == REGEX:
type_map.update({'regex': lambda x: Term(REGEX, x)})
return
if values == CORRECT:
type_map.update({'correct': lambda x: Term(CORRECT, x)})
return
if values == BOOLEAN:
type_map.update(
{'true': Term(BOOLEAN, True), 'false': Term(BOOLEAN, False)})
return
if values == STRING or values == INTEGER:
return
if isinstance(values, dict):
for new_key, new_value in values.items():
self.extract_all_terms_to_depth(new_key, new_value, type_map)
return
if isinstance(values, list):
for new_value in values:
self.extract_all_terms_to_depth(None, new_value, type_map)
return
def find_selectors(self, type_map):
"""Finds all type selectors."""
# Finds all elements in the type map where both a key and a value are
# strings. These elements are used to find one specific type map among
# several alternative type maps.
selector = {}
for akey, avalue in type_map.items():
if isinstance(akey, str) and isinstance(avalue, str):
selector.update({akey: avalue})
return selector
def find_compatible_dict(self, value_map, type_map, unused_context):
"""Find the type map most compatible with the value map."""
# A value map is considered compatible with a type map when former
# contains the same key names and the value types as the type map.
# special case when we have just one type; check name and type are the
# same
if len(type_map) == 1:
for value_key in value_map.keys():
for key in type_map[0].keys():
if value_key == key:
return key, type_map[0]
raise SchemaException(
"Expected: '%s'\nfound: %s", type_map[0].keys()[0], value_map)
# case when we have several types to choose from
for adict in type_map:
dict_selector = self.find_selectors(adict)
for akey, avalue in dict_selector.items():
if value_map[akey] == avalue:
return akey, adict
return None, None
def check_single_value_matches_type(self, value, atype, context):
"""Checks if a single value matches a specific (primitive) type."""
if atype == BOOLEAN:
if isinstance(value, bool) or value.term_type == BOOLEAN:
self.visit_element('BOOLEAN', value, context)
return True
else:
raise SchemaException(
'Expected: \'true\' or \'false\'\nfound: %s', value)
if isinstance(atype, str):
if isinstance(value, str):
self.visit_element('str', value, context)
return True
else:
raise SchemaException('Expected: \'string\'\nfound: %s', value)
if atype == STRING:
if isinstance(value, str):
self.visit_element('STRING', value, context)
return True
else:
raise SchemaException('Expected: \'string\'\nfound: %s', value)
if atype == REGEX and value.term_type == REGEX:
self.visit_element('REGEX', value, context)
return True
if atype == CORRECT and value.term_type == CORRECT:
self.visit_element('CORRECT', value, context)
return True
if atype == FLOAT:
if is_number(value):
self.visit_element('NUMBER', value, context)
return True
else:
raise SchemaException('Expected: \'number\'\nfound: %s', value)
if atype == INTEGER:
if is_integer(value):
self.visit_element('INTEGER', value, context)
return True
else:
raise SchemaException(
'Expected: \'integer\'\nfound: %s', value,
path=context.format_path())
raise SchemaException(
'Unexpected value \'%s\'\n'
'for type %s', value, atype, path=context.format_path())
def check_value_list_matches_type(self, value, atype, context):
"""Checks if all items in value list match a specific type."""
for value_item in value:
found = False
for atype_item in atype:
if isinstance(atype_item, list):
for atype_item_item in atype_item:
if self.does_value_match_type(
value_item, atype_item_item, context):
found = True
break
else:
if self.does_value_match_type(
value_item, atype_item, context):
found = True
break
if not found:
raise SchemaException(
'Expected: \'%s\'\nfound: %s', atype, value)
return True
def check_value_matches_type(self, value, atype, context):
"""Checks if single value or a list of values match a specific type."""
if isinstance(atype, list) and isinstance(value, list):
return self.check_value_list_matches_type(value, atype, context)
else:
return self.check_single_value_matches_type(value, atype, context)
def does_value_match_type(self, value, atype, context):
"""Same as other method, but does not throw an exception."""
try:
return self.check_value_matches_type(value, atype, context)
except SchemaException:
return False
def does_value_match_one_of_types(self, value, types, context):
"""Checks if a value matches to one of the types in the list."""
type_names = None
if isinstance(types, list):
type_names = types
if type_names:
for i in range(0, len(type_names)):
if self.does_value_match_type(value, type_names[i], context):
return True
return False
def does_value_match_map_of_type(self, value, types, context):
"""Checks if value matches any variation of {...} type."""
# find all possible map types
maps = []
for atype in types:
if isinstance(atype, dict):
maps.append(atype)
if not maps and isinstance(types, dict):
maps.append(types)
# check if the structure of value matches one of the maps
if isinstance(value, dict):
aname, adict = self.find_compatible_dict(value, maps, context)
if adict:
self.visit_element('dict', value, context.new(aname), False)
for akey, avalue in value.items():
if akey not in adict:
raise SchemaException(
'Unknown term \'%s\'', akey,
path=context.format_path())
self.check_value_of_valid_type(
avalue, adict[akey], context.new([aname, akey]))
return True
raise SchemaException(
'The value:\n %s\n'
'is incompatible with expected type(s):\n %s',
value, types, path=context.format_path())
return False
def format_name_with_index(self, alist, aindex):
"""A function to format a context name with an array element index."""
if len(alist) == 1:
return ''
else:
return '[%s]' % aindex
def does_value_match_list_of_types_in_order(
self, value, types, context, target):
"""Iterates the value and types in given order and checks for match."""
all_values_are_lists = True
for avalue in value:
if not isinstance(avalue, list):
all_values_are_lists = False
if all_values_are_lists:
for i in range(0, len(value)):
self.check_value_of_valid_type(value[i], types, context.new(
self.format_name_with_index(value, i)), True)
else:
if len(target) != len(value):
raise SchemaException(
'Expected: \'%s\' values\n' + 'found: %s.' % value,
len(target), path=context.format_path())
for i in range(0, len(value)):
self.check_value_of_valid_type(value[i], target[i], context.new(
self.format_name_with_index(value, i)))
return True
def does_value_match_list_of_types_any_order(self, value, types,
context, lists):
"""Iterates the value and types, checks if they match in any order."""
target = lists
if not target:
if not isinstance(types, list):
raise SchemaException(
'Unsupported type %s',
None, types, path=context.format_path())
target = types
for i in range(0, len(value)):
found = False
for atarget in target:
try:
self.check_value_of_valid_type(
value[i], atarget,
context.new(self.format_name_with_index(value, i)))
found = True
break
except SchemaException as unused_e:
continue
if not found:
raise SchemaException(
'The value:\n %s\n'
'is incompatible with expected type(s):\n %s',
value, types, path=context.format_path())
return True
def does_value_match_list_of_type(self, value, types, context, in_order):
"""Checks if a value matches a variation of [...] type."""
# Extra argument controls whether matching must be done in a specific
# or in any order. A specific order is demanded by [[...]]] construct,
# i.e. [[STRING, INTEGER, BOOLEAN]], while sub elements inside {...} and
# [...] can be matched in any order.
# prepare a list of list types
lists = []
for atype in types:
if isinstance(atype, list):
lists.append(atype)
if len(lists) > 1:
raise SchemaException(
'Unable to validate types with multiple alternative '
'lists %s', None, types, path=context.format_path())
if isinstance(value, list):
if len(lists) > 1:
raise SchemaException(
'Allowed at most one list\nfound: %s.',
None, types, path=context.format_path())
# determine if list is in order or not as hinted by double array
# [[..]]; [STRING, NUMBER] is in any order, but [[STRING, NUMBER]]
# demands order
ordered = len(lists) == 1 and isinstance(types, list)
if in_order or ordered:
return self.does_value_match_list_of_types_in_order(
value, types, context, lists[0])
else:
return self.does_value_match_list_of_types_any_order(
value, types, context, lists)
return False
def check_value_of_valid_type(self, value, types, context, in_order=None):
"""Check if a value matches any of the given types."""
if not (isinstance(types, list) or isinstance(types, dict)):
self.check_value_matches_type(value, types, context)
return
if (self.does_value_match_list_of_type(value, types,
context, in_order) or
self.does_value_match_map_of_type(value, types, context) or
self.does_value_match_one_of_types(value, types, context)):
return
raise SchemaException(
'Unknown type %s', value, path=context.format_path())
def check_instances_match_schema(self, values, types, name):
"""Recursively decompose 'values' to see if they match schema types."""
self.parse_log = []
context = Context().new(name)
self.parse_log.append(' ROOT %s' % context.format_path())
# pylint: disable-msg=protected-access
values_class = values.__class__
# pylint: enable-msg=protected-access
# handle {..} containers
if isinstance(types, dict):
if not isinstance(values, dict):
raise SchemaException(
'Error at \'/\': expected {...}, found %s' % (
values_class.__name__))
self.check_value_of_valid_type(values, types, context.new([]))
return
# handle [...] containers
if isinstance(types, list):
if not isinstance(values, list):
raise SchemaException(
'Error at \'/\': expected [...], found %s' % (
values_class.__name__))
for i in range(0, len(values)):
self.check_value_of_valid_type(
values[i], types, context.new('[%s]' % i))
return
raise SchemaException(
'Expected an array or a dictionary.', None,
path=context.format_path())
def escape_quote(value):
return unicode(value).replace('\'', r'\'')
class Unit(object):
"""A class to represent a Unit."""
def __init__(self):
self.id = 0
self.type = ''
self.unit_id = ''
self.title = ''
self.release_date = ''
self.now_available = False
def list_properties(self, name, output):
"""Outputs all properties of the unit."""
output.append('%s[\'id\'] = %s;' % (name, self.id))
output.append('%s[\'type\'] = \'%s\';' % (
name, escape_quote(self.type)))
output.append('%s[\'unit_id\'] = \'%s\';' % (
name, escape_quote(self.unit_id)))
output.append('%s[\'title\'] = \'%s\';' % (
name, escape_quote(self.title)))
output.append('%s[\'release_date\'] = \'%s\';' % (
name, escape_quote(self.release_date)))
output.append('%s[\'now_available\'] = %s;' % (
name, str(self.now_available).lower()))
class Lesson(object):
"""A class to represent a Lesson."""
def __init__(self):
self.unit_id = 0
self.unit_title = ''
self.lesson_id = 0
self.lesson_title = ''
self.lesson_activity = ''
self.lesson_activity_name = ''
self.lesson_notes = ''
self.lesson_video_id = ''
self.lesson_objectives = ''
def list_properties(self, name, output):
"""Outputs all properties of the lesson."""
activity = 'false'
if self.lesson_activity == 'yes':
activity = 'true'
output.append('%s[\'unit_id\'] = %s;' % (name, self.unit_id))
output.append('%s[\'unit_title\'] = \'%s\';' % (
name, escape_quote(self.unit_title)))
output.append('%s[\'lesson_id\'] = %s;' % (name, self.lesson_id))
output.append('%s[\'lesson_title\'] = \'%s\';' % (
name, escape_quote(self.lesson_title)))
output.append('%s[\'lesson_activity\'] = %s;' % (name, activity))
output.append('%s[\'lesson_activity_name\'] = \'%s\';' % (
name, escape_quote(self.lesson_activity_name)))
output.append('%s[\'lesson_notes\'] = \'%s\';' % (
name, escape_quote(self.lesson_notes)))
output.append('%s[\'lesson_video_id\'] = \'%s\';' % (
name, escape_quote(self.lesson_video_id)))
output.append('%s[\'lesson_objectives\'] = \'%s\';' % (
name, escape_quote(self.lesson_objectives)))
def to_id_string(self):
return '%s.%s.%s' % (self.unit_id, self.lesson_id, self.lesson_title)
class Assessment(object):
"""A class to represent a Assessment."""
def __init__(self):
self.scope = {}
SchemaHelper().extract_all_terms_to_depth(
'assessment', SCHEMA['assessment'], self.scope)
class Activity(object):
"""A class to represent a Activity."""
def __init__(self):
self.scope = {}
SchemaHelper().extract_all_terms_to_depth(
'activity', SCHEMA['activity'], self.scope)
def silent_echo(unused_message):
pass
def echo(message):
print message
def is_integer(s):
try:
return int(s) == float(s)
except ValueError:
return False
def is_boolean(s):
try:
return s == 'True' or s == 'False'
except ValueError:
return False
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def is_one_of(value, values):
for current in values:
if value == current:
return True
return False
def text_to_line_numbered_text(text):
"""Adds line numbers to the provided text."""
lines = text.split('\n')
results = []
i = 1
for line in lines:
results.append(str(i) + ': ' + line)
i += 1
return '\n '.join(results)
def set_object_attributes(target_object, names, values):
"""Sets object attributes from provided values."""
if len(names) != len(values):
raise SchemaException(
'The number of elements must match: %s and %s' % (names, values))
for i in range(0, len(names)):
if is_integer(values[i]):
# if we are setting an attribute of an object that support
# metadata, try to infer the target type and convert 'int' into
# 'str' here
target_type = None
if hasattr(target_object.__class__, names[i]):
attribute = getattr(target_object.__class__, names[i])
if hasattr(attribute, 'data_type'):
target_type = attribute.data_type.__name__
if target_type and (target_type == 'str' or
target_type == 'basestring'):
setattr(target_object, names[i], str(values[i]))
else:
setattr(target_object, names[i], int(values[i]))
continue
if is_boolean(values[i]):
setattr(target_object, names[i], values[i] == 'True')
continue
setattr(target_object, names[i], values[i])
def read_objects_from_csv_stream(stream, header, new_object):
return read_objects_from_csv(csv.reader(stream), header, new_object)
def read_objects_from_csv_file(fname, header, new_object):
return read_objects_from_csv_stream(open(fname), header, new_object)
def read_objects_from_csv(value_rows, header, new_object):
"""Reads objects from the rows of a CSV file."""
values = []
for row in value_rows:
if not row:
continue
values.append(row)
names = header.split(',')
if names != values[0]:
raise SchemaException(
'Error reading CSV header.\n '
'Header row had %s element(s): %s\n '
'Expected header row with %s element(s): %s' % (
len(values[0]), values[0], len(names), names))
items = []
for i in range(1, len(values)):
if len(names) != len(values[i]):
raise SchemaException(
'Error reading CSV data row.\n '
'Row #%s had %s element(s): %s\n '
'Expected %s element(s): %s' % (
i, len(values[i]), values[i], len(names), names))
# Decode string values in case they were encoded in UTF-8. The CSV
# reader should do this automatically, but it does not. The issue is
# discussed here: http://docs.python.org/2/library/csv.html
decoded_values = []
for value in values[i]:
if isinstance(value, basestring):
value = unicode(value.decode('utf-8'))
decoded_values.append(value)
item = new_object()
set_object_attributes(item, names, decoded_values)
items.append(item)
return items
def escape_javascript_regex(text):
return re.sub(
r'([:][ ]*)([/])(.*)([/][ismx]*)', r': regex("\2\3\4")', text)
def remove_javascript_single_line_comment(text):
text = re.sub(re.compile('^(.*?)[ ]+//(.*)$', re.MULTILINE), r'\1', text)
text = re.sub(re.compile('^//(.*)$', re.MULTILINE), r'', text)
return text
def remove_javascript_multi_line_comment(text):
# pylint: disable-msg=anomalous-backslash-in-string
return re.sub(
re.compile('/\*(.*)\*/', re.MULTILINE + re.DOTALL), r'', text)
# pylint: enable-msg=anomalous-backslash-in-string
def parse_content_marked_no_verify(content):
"""Parses and returns a tuple of real content and no-verify text."""
# If you have any free-form JavaScript in the activity file, you need
# to place it between //<gcb-no-verify> ... //</gcb-no-verify> tags
# so that the verifier can selectively ignore it.
pattern = re.compile('%s(.*)%s' % (
NO_VERIFY_TAG_NAME_OPEN, NO_VERIFY_TAG_NAME_CLOSE), re.DOTALL)
m = pattern.search(content)
noverify_text = None
if m:
noverify_text = m.group(1)
return (re.sub(pattern, '', content), noverify_text)
def convert_javascript_to_python(content, root_name):
"""Removes JavaScript specific syntactic constructs and returns a tuple."""
# Reads the content and removes JavaScript comments, var's, and escapes
# regular expressions.
(content, noverify_text) = parse_content_marked_no_verify(content)
content = remove_javascript_multi_line_comment(content)
content = remove_javascript_single_line_comment(content)
content = content.replace('var %s = ' % root_name, '%s = ' % root_name)
content = escape_javascript_regex(content)
return (content, noverify_text)
def convert_javascript_file_to_python(fname, root_name):
return convert_javascript_to_python(
''.join(open(fname, 'r').readlines()), root_name)
def evaluate_python_expression_from_text(content, root_name, scope,
noverify_text):
"""Compiles and evaluates a Python script in a restricted environment."""
# First compiles and then evaluates a Python script text in a restricted
# environment using provided bindings. Returns the resulting bindings if
# evaluation completed.
# create a new execution scope that has only the schema terms defined;
# remove all other languages constructs including __builtins__
restricted_scope = {}
restricted_scope.update(scope)
restricted_scope.update({'__builtins__': {}})
code = compile(content, '<string>', 'exec')
# pylint: disable-msg=exec-statement
exec code in restricted_scope
# pylint: enable-msg=exec-statement
if noverify_text:
restricted_scope['noverify'] = noverify_text
if not restricted_scope[root_name]:
raise Exception('Unable to find \'%s\'' % root_name)
return restricted_scope
def evaluate_javascript_expression_from_file(fname, root_name, scope, error):
(content, noverify_text) = convert_javascript_file_to_python(fname,
root_name)
try:
return evaluate_python_expression_from_text(content, root_name, scope,
noverify_text)
except:
error('Unable to parse %s in file %s\n %s' % (
root_name, fname, text_to_line_numbered_text(content)))
for message in sys.exc_info():
error(str(message))
raise
class Verifier(object):
"""Verifies Units, Lessons, Assessments, Activities and their relations."""
def __init__(self):
self.echo_func = silent_echo
self.schema_helper = SchemaHelper()
self.errors = 0
self.warnings = 0
self.export = []
def verify_unit_fields(self, units):
self.export.append('units = Array();')
for unit in units:
if not is_one_of(unit.now_available, [True, False]):
self.error(
'Bad now_available \'%s\' for unit id %s; expected '
'\'True\' or \'False\'' % (unit.now_available, unit.id))
if not is_one_of(unit.type, ['U', 'A', 'O']):
self.error(
'Bad type \'%s\' for unit id %s; '
'expected \'U\', \'A\', or \'O\'' % (unit.type, unit.id))
if unit.type == 'A':
if not is_one_of(unit.unit_id, ('Pre', 'Mid', 'Fin')):
self.error(
'Bad unit_id \'%s\'; expected \'Pre\', \'Mid\' or '
'\'Fin\' for unit id %s' % (unit.unit_id, unit.id))
if unit.type == 'U':
if not is_integer(unit.unit_id):
self.error(
'Expected integer unit_id, found %s in unit id '
' %s' % (unit.unit_id, unit.id))
self.export.append('')
self.export.append('units[%s] = Array();' % unit.id)
self.export.append('units[%s][\'lessons\'] = Array();' % unit.id)
unit.list_properties('units[%s]' % unit.id, self.export)
def verify_lesson_fields(self, lessons):
for lesson in lessons:
if not is_one_of(lesson.lesson_activity, ['yes', '']):
self.error('Bad lesson_activity \'%s\' for lesson_id %s' % (
lesson.lesson_activity, lesson.lesson_id))
self.export.append('')
self.export.append('units[%s][\'lessons\'][%s] = Array();' % (
lesson.unit_id, lesson.lesson_id))
lesson.list_properties('units[%s][\'lessons\'][%s]' % (
lesson.unit_id, lesson.lesson_id), self.export)
def verify_unit_lesson_relationships(self, units, lessons):
"""Checks each lesson points to a unit and all lessons are in use."""
used_lessons = []
units.sort(key=lambda x: x.id)
# for unit in units:
for i in range(0, len(units)):
unit = units[i]
# check that unit ids are 1-based and sequential
if unit.id != i + 1:
self.error('Unit out of order: %s' % (unit.id))
# get the list of lessons for each unit
self.fine('Unit %s: %s' % (unit.id, unit.title))
unit_lessons = []
for lesson in lessons:
if lesson.unit_id == unit.unit_id:
if not lesson.unit_title == unit.title:
raise Exception(''.join([
'A unit_title of a lesson (id=%s) must match ',
'title of a unit (id=%s) the lesson belongs to.'
]) % (lesson.lesson_id, lesson.unit_id))
unit_lessons.append(lesson)
used_lessons.append(lesson)
# inspect all lessons for the current unit
unit_lessons.sort(key=lambda x: x.lesson_id)
for j in range(0, len(unit_lessons)):
lesson = unit_lessons[j]
# check that lesson_ids are 1-based and sequential
if lesson.lesson_id != j + 1:
self.warn(
'Lesson lesson_id is out of order: expected %s, found '
' %s (%s)' % (
j + 1, lesson.lesson_id, lesson.to_id_string()))
self.fine(' Lesson %s: %s' % (
lesson.lesson_id, lesson.lesson_title))
# find lessons not used by any of the units
unused_lessons = list(lessons)
for lesson in used_lessons:
unused_lessons.remove(lesson)
for lesson in unused_lessons:
self.warn('Unused lesson_id %s (%s)' % (
lesson.lesson_id, lesson.to_id_string()))
# check all lessons point to known units
for lesson in lessons:
has = False
for unit in units:
if lesson.unit_id == unit.unit_id:
has = True
break
if not has:
self.error('Lesson has unknown unit_id %s (%s)' % (
lesson.unit_id, lesson.to_id_string()))
def verify_activities(self, lessons):
"""Loads and verifies all activities."""
self.info('Loading activities:')
count = 0
for lesson in lessons:
if lesson.lesson_activity == 'yes':
count += 1
fname = os.path.join(
os.path.dirname(__file__),
'../assets/js/activity-' + str(lesson.unit_id) + '.' +
str(lesson.lesson_id) + '.js')
if not os.path.exists(fname):
self.error(' Missing activity: %s' % fname)
else:
activity = evaluate_javascript_expression_from_file(
fname, 'activity', Activity().scope, self.error)
self.verify_activity_instance(activity, fname)
self.export.append('')
self.encode_activity_json(
activity, lesson.unit_id, lesson.lesson_id)
self.info('Read %s activities' % count)
def verify_assessment(self, units):
"""Loads and verifies all assessments."""
self.export.append('')
self.export.append('assessments = Array();')
self.info('Loading assessment:')
count = 0
for unit in units:
if unit.type == 'A':
count += 1
assessment_name = str(unit.unit_id)
fname = os.path.join(
os.path.dirname(__file__),
'../assets/js/assessment-' + assessment_name + '.js')
if not os.path.exists(fname):
self.error(' Missing assessment: %s' % fname)
else:
assessment = evaluate_javascript_expression_from_file(
fname, 'assessment', Assessment().scope, self.error)
self.verify_assessment_instance(assessment, fname)
self.export.append('')
self.encode_assessment_json(assessment, assessment_name)
self.info('Read %s assessments' % count)
# NB: The exported script needs to define a gcb_regex() wrapper function
@staticmethod
def encode_regex(regex_str):
"""Encodes a JavaScript-style regex into a Python gcb_regex call."""
# parse the regex into the base and modifiers. e.g., for /foo/i
# base is 'foo' and modifiers is 'i'
assert regex_str[0] == '/'
# find the LAST '/' in regex_str (because there might be other
# escaped '/' characters in the middle of regex_str)
final_slash_index = regex_str.rfind('/')
assert final_slash_index > 0
base = regex_str[1:final_slash_index]
modifiers = regex_str[final_slash_index+1:]
func_str = 'gcb_regex(' + repr(base) + ', ' + repr(modifiers) + ')'
return func_str
def encode_activity_json(self, activity_dict, unit_id, lesson_id):
"""Encodes an activity dictionary into JSON."""
output = []
for elt in activity_dict['activity']:
t = type(elt)
encoded_elt = None
if t is str:
encoded_elt = {'type': 'string', 'value': elt}
elif t is dict:
qt = elt['questionType']
encoded_elt = {'type': qt}
if qt == 'multiple choice':
choices = elt['choices']
encoded_choices = [[x, y.value, z] for x, y, z in choices]
encoded_elt['choices'] = encoded_choices
elif qt == 'multiple choice group':
# everything inside are primitive types that can be encoded
elt_copy = dict(elt)
del elt_copy['questionType'] # redundant
encoded_elt['value'] = elt_copy
elif qt == 'freetext':
for k in elt.keys():
if k == 'questionType':
continue
elif k == 'correctAnswerRegex':
encoded_elt[k] = Verifier.encode_regex(elt[k].value)
else:
# ordinary string
encoded_elt[k] = elt[k]
else:
assert False
else:
assert False
assert encoded_elt
output.append(encoded_elt)
# N.B.: make sure to get the string quoting right!
code_str = "units[%s]['lessons'][%s]['activity'] = " % (
unit_id, lesson_id) + repr(json.dumps(output)) + ';'
self.export.append(code_str)
if 'noverify' in activity_dict:
self.export.append('')
noverify_code_str = "units[%s]['lessons'][%s]['code'] = " % (
unit_id, lesson_id) + repr(activity_dict['noverify']) + ';'
self.export.append(noverify_code_str)
def encode_assessment_json(self, assessment_dict, assessment_name):
"""Encodes an assessment dictionary into JSON."""
real_dict = assessment_dict['assessment']
output = {}
output['assessmentName'] = real_dict['assessmentName']
if 'preamble' in real_dict:
output['preamble'] = real_dict['preamble']
output['checkAnswers'] = real_dict['checkAnswers'].value
encoded_questions_list = []
for elt in real_dict['questionsList']:
encoded_elt = {}
encoded_elt['questionHTML'] = elt['questionHTML']
if 'lesson' in elt:
encoded_elt['lesson'] = elt['lesson']
if 'correctAnswerNumeric' in elt:
encoded_elt['correctAnswerNumeric'] = elt[
'correctAnswerNumeric']
if 'correctAnswerString' in elt:
encoded_elt['correctAnswerString'] = elt['correctAnswerString']
if 'correctAnswerRegex' in elt:
encoded_elt['correctAnswerRegex'] = Verifier.encode_regex(
elt['correctAnswerRegex'].value)
if 'choices' in elt:
encoded_choices = []
correct_answer_index = None
for (ind, e) in enumerate(elt['choices']):
if type(e) is str:
encoded_choices.append(e)
elif e.term_type == CORRECT:
encoded_choices.append(e.value)
correct_answer_index = ind
else:
raise Exception("Invalid type in 'choices'")
encoded_elt['choices'] = encoded_choices
encoded_elt['correctAnswerIndex'] = correct_answer_index
encoded_questions_list.append(encoded_elt)
output['questionsList'] = encoded_questions_list
# N.B.: make sure to get the string quoting right!
code_str = 'assessments[\'' + assessment_name + '\'] = ' + repr(
json.dumps(output)) + ';'
self.export.append(code_str)
if 'noverify' in assessment_dict:
self.export.append('')
noverify_code_str = ('assessments[\'' + assessment_name +
'\'] = ' + repr(assessment_dict['noverify']) +
';')
self.export.append(noverify_code_str)
def format_parse_log(self):
return 'Parse log:\n%s' % '\n'.join(self.schema_helper.parse_log)
def verify_assessment_instance(self, scope, fname):
"""Verifies compliance of assessment with schema."""
if scope:
try:
self.schema_helper.check_instances_match_schema(
scope['assessment'], SCHEMA['assessment'], 'assessment')
self.info(' Verified assessment %s' % fname)
if OUTPUT_DEBUG_LOG:
self.info(self.format_parse_log())
except SchemaException as e:
self.error(' Error in assessment %s\n%s' % (
fname, self.format_parse_log()))
raise e
else:
self.error(' Unable to evaluate \'assessment =\' in %s' % fname)
def verify_activity_instance(self, scope, fname):
"""Verifies compliance of activity with schema."""
if scope:
try:
self.schema_helper.check_instances_match_schema(
scope['activity'], SCHEMA['activity'], 'activity')
self.info(' Verified activity %s' % fname)
if OUTPUT_DEBUG_LOG:
self.info(self.format_parse_log())
except SchemaException as e:
self.error(' Error in activity %s\n%s' % (
fname, self.format_parse_log()))
raise e
else:
self.error(' Unable to evaluate \'activity =\' in %s' % fname)
def fine(self, x):
if OUTPUT_FINE_LOG:
self.echo_func('FINE: ' + x)
def info(self, x):
self.echo_func('INFO: ' + x)
def warn(self, x):
self.warnings += 1
self.echo_func('WARNING: ' + x)
def error(self, x):
self.errors += 1
self.echo_func('ERROR: ' + x)
def load_and_verify_model(self, echo_func):
"""Loads, parses and verifies all content for a course."""
self.echo_func = echo_func
self.info('Started verification in: %s' % __file__)
unit_file = os.path.join(os.path.dirname(__file__), '../data/unit.csv')
lesson_file = os.path.join(
os.path.dirname(__file__), '../data/lesson.csv')
self.info('Loading units from: %s' % unit_file)
units = read_objects_from_csv_file(unit_file, UNITS_HEADER, Unit)
self.info('Read %s units' % len(units))
self.info('Loading lessons from: %s' % lesson_file)
lessons = read_objects_from_csv_file(
lesson_file, LESSONS_HEADER, Lesson)
self.info('Read %s lessons' % len(lessons))
self.verify_unit_fields(units)
self.verify_lesson_fields(lessons)
self.verify_unit_lesson_relationships(units, lessons)
try:
self.verify_activities(lessons)
self.verify_assessment(units)
except SchemaException as e:
self.error(str(e))
self.info('Schema usage statistics: %s' % self.schema_helper.type_stats)
self.info('Completed verification: %s warnings, %s errors.' % (
self.warnings, self.errors))
return self.warnings, self.errors
def run_all_regex_unit_tests():
"""Executes all tests related to regular expressions."""
# pylint: disable-msg=anomalous-backslash-in-string
assert escape_javascript_regex(
'blah regex: /site:bls.gov?/i, blah') == (
'blah regex: regex(\"/site:bls.gov?/i\"), blah')
assert escape_javascript_regex(
'blah regex: /site:http:\/\/www.google.com?q=abc/i, blah') == (
'blah regex: regex(\"/site:http:\/\/www.google.com?q=abc/i\"), '
'blah')
assert remove_javascript_multi_line_comment(
'blah\n/*\ncomment\n*/\nblah') == 'blah\n\nblah'
assert remove_javascript_multi_line_comment(
'blah\nblah /*\ncomment\nblah */\nblah') == ('blah\nblah \nblah')
assert remove_javascript_single_line_comment(
'blah\n// comment\nblah') == 'blah\n\nblah'
assert remove_javascript_single_line_comment(
'blah\nblah http://www.foo.com\nblah') == (
'blah\nblah http://www.foo.com\nblah')
assert remove_javascript_single_line_comment(
'blah\nblah // comment\nblah') == 'blah\nblah\nblah'
assert remove_javascript_single_line_comment(
'blah\nblah // comment http://www.foo.com\nblah') == (
'blah\nblah\nblah')
assert parse_content_marked_no_verify(
'blah1\n// <gcb-no-verify>\n/blah2\n// </gcb-no-verify>\nblah3')[0] == (
'blah1\n// \nblah3')
# pylint: enable-msg=anomalous-backslash-in-string
assert Verifier.encode_regex('/white?/i') == """gcb_regex('white?', 'i')"""
assert (Verifier.encode_regex('/jane austen (book|books) \\-price/i') ==
r"""gcb_regex('jane austen (book|books) \\-price', 'i')""")
assert (Verifier.encode_regex('/Kozanji|Kozan-ji|Kosanji|Kosan-ji/i') ==
r"""gcb_regex('Kozanji|Kozan-ji|Kosanji|Kosan-ji', 'i')""")
assert (Verifier.encode_regex('/Big Time College Sport?/i') ==
"gcb_regex('Big Time College Sport?', 'i')")
assert (Verifier.encode_regex('/354\\s*[+]\\s*651/') ==
r"""gcb_regex('354\\s*[+]\\s*651', '')""")
def run_all_schema_helper_unit_tests():
"""Executes all tests related to schema validation."""
def assert_same(a, b):
if a != b:
raise Exception('Expected:\n %s\nFound:\n %s' % (a, b))
def assert_pass(instances, types, expected_result=None):
try:
schema_helper = SchemaHelper()
result = schema_helper.check_instances_match_schema(
instances, types, 'test')
if OUTPUT_DEBUG_LOG:
print '\n'.join(schema_helper.parse_log)
if expected_result:
assert_same(expected_result, result)
except SchemaException as e:
if OUTPUT_DEBUG_LOG:
print str(e)
print '\n'.join(schema_helper.parse_log)
raise
def assert_fails(func):
try:
func()
raise Exception('Expected to fail')
except SchemaException as e:
if OUTPUT_DEBUG_LOG:
print str(e)
def assert_fail(instances, types):
assert_fails(lambda: assert_pass(instances, types))
def create_python_dict_from_js_object(js_object):
python_str, noverify = convert_javascript_to_python(
'var x = ' + js_object, 'x')
ret = evaluate_python_expression_from_text(
python_str, 'x', Assessment().scope, noverify)
return ret['x']
# CSV tests
read_objects_from_csv(
[['id', 'type'], [1, 'none']], 'id,type', Unit)
def reader_one():
return read_objects_from_csv(
[['id', 'type'], [1, 'none']], 'id,type,title', Unit)
assert_fails(reader_one)
def reader_two():
read_objects_from_csv(
[['id', 'type', 'title'], [1, 'none']], 'id,type,title', Unit)
assert_fails(reader_two)
# context tests
assert_same(Context().new([]).new(['a']).new(['b', 'c']).format_path(),
('//a/b/c'))
# simple map tests
assert_pass({'name': 'Bob'}, {'name': STRING}, None)
assert_fail('foo', 'bar')
assert_fail({'name': 'Bob'}, {'name': INTEGER})
assert_fail({'name': 12345}, {'name': STRING})
assert_fail({'amount': 12345}, {'name': INTEGER})
assert_fail({'regex': Term(CORRECT)}, {'regex': Term(REGEX)})
assert_pass({'name': 'Bob'}, {'name': STRING, 'phone': STRING})
assert_pass({'name': 'Bob'}, {'phone': STRING, 'name': STRING})
assert_pass({'name': 'Bob'},
{'phone': STRING, 'name': STRING, 'age': INTEGER})
# mixed attributes tests
assert_pass({'colors': ['red', 'blue']}, {'colors': [STRING]})
assert_pass({'colors': []}, {'colors': [STRING]})
assert_fail({'colors': {'red': 'blue'}}, {'colors': [STRING]})
assert_fail({'colors': {'red': 'blue'}}, {'colors': [FLOAT]})
assert_fail({'colors': ['red', 'blue', 5.5]}, {'colors': [STRING]})
assert_fail({'colors': ['red', 'blue', {'foo': 'bar'}]},
{'colors': [STRING]})
assert_fail({'colors': ['red', 'blue'], 'foo': 'bar'},
{'colors': [STRING]})
assert_pass({'colors': ['red', 1]}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': ['red', 'blue']}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': [1, 2, 3]}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': ['red', 1, 5.3]}, {'colors': [[STRING, INTEGER]]})
assert_pass({'colors': ['red', 'blue']}, {'colors': [STRING]})
assert_fail({'colors': ['red', 'blue']}, {'colors': [[STRING]]})
assert_fail({'colors': ['red', ['blue']]}, {'colors': [STRING]})
assert_fail({'colors': ['red', ['blue', 'green']]}, {'colors': [STRING]})
# required attribute tests
assert_pass({'colors': ['red', 5]}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': ['red', 5]}, {'colors': [[INTEGER, STRING]]})
assert_pass({'colors': ['red', 5]}, {'colors': [STRING, INTEGER]})
assert_pass({'colors': ['red', 5]}, {'colors': [INTEGER, STRING]})
assert_fail({'colors': ['red', 5, 'FF0000']},
{'colors': [[STRING, INTEGER]]})
# an array and a map of primitive type tests
assert_pass({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': STRING}})
assert_fail({'color': {'name': 'red', 'rgb': ['FF0000']}},
{'color': {'name': STRING, 'rgb': STRING}})
assert_fail({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': INTEGER}})
assert_fail({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': {'hex': STRING}}})
assert_pass({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': STRING}})
assert_pass({'colors':
[{'name': 'red', 'rgb': 'FF0000'},
{'name': 'blue', 'rgb': '0000FF'}]},
{'colors': [{'name': STRING, 'rgb': STRING}]})
assert_fail({'colors':
[{'name': 'red', 'rgb': 'FF0000'},
{'phone': 'blue', 'rgb': '0000FF'}]},
{'colors': [{'name': STRING, 'rgb': STRING}]})
# boolean type tests
assert_pass({'name': 'Bob', 'active': True},
{'name': STRING, 'active': BOOLEAN})
assert_pass({'name': 'Bob', 'active': [5, True, False]},
{'name': STRING, 'active': [INTEGER, BOOLEAN]})
assert_pass({'name': 'Bob', 'active': [5, True, 'false']},
{'name': STRING, 'active': [STRING, INTEGER, BOOLEAN]})
assert_fail({'name': 'Bob', 'active': [5, True, 'False']},
{'name': STRING, 'active': [[INTEGER, BOOLEAN]]})
# optional attribute tests
assert_pass({'points':
[{'x': 1, 'y': 2, 'z': 3}, {'x': 3, 'y': 2, 'z': 1},
{'x': 2, 'y': 3, 'z': 1}]},
{'points': [{'x': INTEGER, 'y': INTEGER, 'z': INTEGER}]})
assert_pass({'points':
[{'x': 1, 'z': 3}, {'x': 3, 'y': 2}, {'y': 3, 'z': 1}]},
{'points': [{'x': INTEGER, 'y': INTEGER, 'z': INTEGER}]})
assert_pass({'account':
[{'name': 'Bob', 'age': 25, 'active': True}]},
{'account':
[{'age': INTEGER, 'name': STRING, 'active': BOOLEAN}]})
assert_pass({'account':
[{'name': 'Bob', 'active': True}]},
{'account':
[{'age': INTEGER, 'name': STRING, 'active': BOOLEAN}]})
# nested array tests
assert_fail({'name': 'Bob', 'active': [5, True, 'false']},
{'name': STRING, 'active': [[BOOLEAN]]})
assert_fail({'name': 'Bob', 'active': [True]},
{'name': STRING, 'active': [[STRING]]})
assert_pass({'name': 'Bob', 'active': ['true']},
{'name': STRING, 'active': [[STRING]]})
assert_pass({'name': 'flowers', 'price': ['USD', 9.99]},
{'name': STRING, 'price': [[STRING, FLOAT]]})
assert_pass({'name': 'flowers', 'price':
[['USD', 9.99], ['CAD', 11.79], ['RUB', 250.23]]},
{'name': STRING, 'price': [[STRING, FLOAT]]})
# selector tests
assert_pass({'likes': [{'state': 'CA', 'food': 'cheese'},
{'state': 'NY', 'drink': 'wine'}]},
{'likes': [{'state': 'CA', 'food': STRING},
{'state': 'NY', 'drink': STRING}]})
assert_pass({'likes': [{'state': 'CA', 'food': 'cheese'},
{'state': 'CA', 'food': 'nuts'}]},
{'likes': [{'state': 'CA', 'food': STRING},
{'state': 'NY', 'drink': STRING}]})
assert_fail({'likes': {'state': 'CA', 'drink': 'cheese'}},
{'likes': [{'state': 'CA', 'food': STRING},
{'state': 'NY', 'drink': STRING}]})
# creating from dict tests
assert_same(create_python_dict_from_js_object('{"active": true}'),
{'active': Term(BOOLEAN, True)})
assert_same(create_python_dict_from_js_object(
'{"a": correct("hello world")}'),
{'a': Term(CORRECT, 'hello world')})
assert_same(create_python_dict_from_js_object('{"a": /hello/i}'),
{'a': Term(REGEX, '/hello/i')})
def run_all_unit_tests():
run_all_regex_unit_tests()
run_all_schema_helper_unit_tests()
run_all_unit_tests()
if __name__ == '__main__':
Verifier().load_and_verify_model(echo)
|
|
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Server interface.
"""
import base64
from oslo.utils import encodeutils
import six
from six.moves.urllib import parse
from novaclient import base
from novaclient import crypto
from novaclient.openstack.common.gettextutils import _
REBOOT_SOFT, REBOOT_HARD = 'SOFT', 'HARD'
class Server(base.Resource):
HUMAN_ID = True
def __repr__(self):
return "<Server: %s>" % self.name
def delete(self):
"""
Delete (i.e. shut down and delete the image) this server.
"""
self.manager.delete(self)
def update(self, name=None):
"""
Update the name or the password for this server.
:param name: Update the server's name.
:param password: Update the root password.
"""
self.manager.update(self, name=name)
def get_console_output(self, length=None):
"""
Get text console log output from Server.
:param length: The number of lines you would like to retrieve (as int)
"""
return self.manager.get_console_output(self, length)
def get_vnc_console(self, console_type):
"""
Get vnc console for a Server.
:param console_type: Type of console ('novnc' or 'xvpvnc')
"""
return self.manager.get_vnc_console(self, console_type)
def get_spice_console(self, console_type):
"""
Get spice console for a Server.
:param console_type: Type of console ('spice-html5')
"""
return self.manager.get_spice_console(self, console_type)
def get_password(self, private_key):
"""
Get password for a Server.
:param private_key: Path to private key file for decryption
"""
return self.manager.get_password(self, private_key)
def clear_password(self):
"""
Get password for a Server.
"""
return self.manager.clear_password(self)
def add_fixed_ip(self, network_id):
"""
Add an IP address on a network.
:param network_id: The ID of the network the IP should be on.
"""
self.manager.add_fixed_ip(self, network_id)
def remove_floating_ip(self, address):
"""
Remove floating IP from an instance
:param address: The ip address or FloatingIP to remove
"""
self.manager.remove_floating_ip(self, address)
def stop(self):
"""
Stop -- Stop the running server.
"""
self.manager.stop(self)
def force_delete(self):
"""
Force delete -- Force delete a server.
"""
self.manager.force_delete(self)
def restore(self):
"""
Restore -- Restore a server in 'soft-deleted' state.
"""
self.manager.restore(self)
def start(self):
"""
Start -- Start the paused server.
"""
self.manager.start(self)
def pause(self):
"""
Pause -- Pause the running server.
"""
self.manager.pause(self)
def unpause(self):
"""
Unpause -- Unpause the paused server.
"""
self.manager.unpause(self)
def lock(self):
"""
Lock -- Lock the instance from certain operations.
"""
self.manager.lock(self)
def unlock(self):
"""
Unlock -- Remove instance lock.
"""
self.manager.unlock(self)
def suspend(self):
"""
Suspend -- Suspend the running server.
"""
self.manager.suspend(self)
def resume(self):
"""
Resume -- Resume the suspended server.
"""
self.manager.resume(self)
def rescue(self):
"""
Rescue -- Rescue the problematic server.
"""
return self.manager.rescue(self)
def unrescue(self):
"""
Unrescue -- Unrescue the rescued server.
"""
self.manager.unrescue(self)
def shelve(self):
"""
Shelve -- Shelve the server.
"""
self.manager.shelve(self)
def shelve_offload(self):
"""
Shelve_offload -- Remove a shelved server from the compute node.
"""
self.manager.shelve_offload(self)
def unshelve(self):
"""
Unshelve -- Unshelve the server.
"""
self.manager.unshelve(self)
def diagnostics(self):
"""Diagnostics -- Retrieve server diagnostics."""
return self.manager.diagnostics(self)
def migrate(self):
"""
Migrate a server to a new host.
"""
self.manager.migrate(self)
def remove_fixed_ip(self, address):
"""
Remove an IP address.
:param address: The IP address to remove.
"""
self.manager.remove_fixed_ip(self, address)
def change_password(self, password):
"""
Update the password for a server.
"""
self.manager.change_password(self, password)
def reboot(self, reboot_type=REBOOT_SOFT):
"""
Reboot the server.
:param reboot_type: either :data:`REBOOT_SOFT` for a software-level
reboot, or `REBOOT_HARD` for a virtual power cycle hard reboot.
"""
self.manager.reboot(self, reboot_type)
def rebuild(self, image, password=None, **kwargs):
"""
Rebuild -- shut down and then re-image -- this server.
:param image: the :class:`Image` (or its ID) to re-image with.
:param password: string to set as password on the rebuilt server.
"""
return self.manager.rebuild(self, image, password=password, **kwargs)
def resize(self, flavor, **kwargs):
"""
Resize the server's resources.
:param flavor: the :class:`Flavor` (or its ID) to resize to.
Until a resize event is confirmed with :meth:`confirm_resize`, the old
server will be kept around and you'll be able to roll back to the old
flavor quickly with :meth:`revert_resize`. All resizes are
automatically confirmed after 24 hours.
"""
self.manager.resize(self, flavor, **kwargs)
def create_image(self, image_name, metadata=None):
"""
Create an image based on this server.
:param image_name: The name to assign the newly create image.
:param metadata: Metadata to assign to the image.
"""
return self.manager.create_image(self, image_name, metadata)
def backup(self, backup_name, backup_type, rotation):
"""
Backup a server instance.
:param backup_name: Name of the backup image
:param backup_type: The backup type, like 'daily' or 'weekly'
:param rotation: Int parameter representing how many backups to
keep around.
"""
self.manager.backup(self, backup_name, backup_type, rotation)
def confirm_resize(self):
"""
Confirm that the resize worked, thus removing the original server.
"""
self.manager.confirm_resize(self)
def revert_resize(self):
"""
Revert a previous resize, switching back to the old server.
"""
self.manager.revert_resize(self)
@property
def networks(self):
"""
Generate a simplified list of addresses
"""
networks = {}
try:
for network_label, address_list in self.addresses.items():
networks[network_label] = [a['addr'] for a in address_list]
return networks
except Exception:
return {}
def live_migrate(self, host=None,
block_migration=False,
disk_over_commit=False):
"""
Migrates a running instance to a new machine.
"""
self.manager.live_migrate(self, host,
block_migration,
disk_over_commit)
def reset_state(self, state='error'):
"""
Reset the state of an instance to active or error.
"""
self.manager.reset_state(self, state)
def reset_network(self):
"""
Reset network of an instance.
"""
self.manager.reset_network(self)
def evacuate(self, host=None, on_shared_storage=True, password=None):
"""
Evacuate an instance from failed host to specified host.
:param host: Name of the target host
:param on_shared_storage: Specifies whether instance files located
on shared storage
:param password: string to set as password on the evacuated server.
"""
return self.manager.evacuate(self, host, on_shared_storage, password)
def interface_list(self):
"""
List interfaces attached to an instance.
"""
return self.manager.interface_list(self)
def interface_attach(self, port_id, net_id, fixed_ip):
"""
Attach a network interface to an instance.
"""
return self.manager.interface_attach(self, port_id, net_id, fixed_ip)
def interface_detach(self, port_id):
"""
Detach a network interface from an instance.
"""
return self.manager.interface_detach(self, port_id)
class ServerManager(base.BootingManagerWithFind):
resource_class = Server
def _boot(self, resource_url, response_key, name, image, flavor,
meta=None, userdata=None,
reservation_id=None, return_raw=False, min_count=None,
max_count=None, security_groups=None, key_name=None,
availability_zone=None, block_device_mapping=None,
block_device_mapping_v2=None, nics=None, scheduler_hints=None,
config_drive=None, admin_pass=None, **kwargs):
"""
Create (boot) a new server.
:param name: Something to name the server.
:param image: The :class:`Image` to boot with.
:param flavor: The :class:`Flavor` to boot onto.
:param meta: A dict of arbitrary key/value metadata to store for this
server. A maximum of five entries is allowed, and both
keys and values must be 255 characters or less.
:param reservation_id: a UUID for the set of servers being requested.
:param return_raw: If True, don't try to coearse the result into
a Resource object.
:param security_groups: list of security group names
:param key_name: (optional extension) name of keypair to inject into
the instance
:param availability_zone: Name of the availability zone for instance
placement.
:param block_device_mapping: A dict of block device mappings for this
server.
:param block_device_mapping_v2: A dict of block device mappings V2 for
this server.
:param nics: (optional extension) an ordered list of nics to be
added to this server, with information about
connected networks, fixed ips, etc.
:param scheduler_hints: (optional extension) arbitrary key-value pairs
specified by the client to help boot an instance.
:param config_drive: (optional extension) value for config drive
either boolean, or volume-id
:param admin_pass: admin password for the server.
"""
body = {"server": {
"name": name,
"image_ref": str(base.getid(image)) if image else '',
"flavor_ref": str(base.getid(flavor)),
}}
if userdata:
if hasattr(userdata, 'read'):
userdata = userdata.read()
if six.PY3:
userdata = userdata.encode("utf-8")
else:
userdata = encodeutils.safe_encode(userdata)
data = base64.b64encode(userdata).decode('utf-8')
body["server"]["user_data"] = data
if meta:
body["server"]["metadata"] = meta
if reservation_id:
body["server"][
"os-multiple-create:return_reservation_id"] = reservation_id
if key_name:
body["server"]["key_name"] = key_name
if scheduler_hints:
body["server"][
"os-scheduler-hints:scheduler_hints"] = scheduler_hints
if config_drive:
body["server"]["os-config-drive:config_drive"] = config_drive
if admin_pass:
body["server"]["admin_password"] = admin_pass
if not min_count:
min_count = 1
if not max_count:
max_count = min_count
body["server"]["os-multiple-create:min_count"] = min_count
body["server"]["os-multiple-create:max_count"] = max_count
if security_groups:
body["server"]["security_groups"] = \
[{'name': sg} for sg in security_groups]
if availability_zone:
body["server"][
"os-availability-zone:availability_zone"] = availability_zone
# Block device mappings are passed as a list of dictionaries
if block_device_mapping:
bdm_param = 'block_device_mapping'
body['server'][bdm_param] = \
self._parse_block_device_mapping(block_device_mapping)
elif block_device_mapping_v2:
# Append the image to the list only if we have new style BDMs
bdm_param = 'block_device_mapping_v2'
if image:
bdm_dict = {'uuid': image.id, 'source_type': 'image',
'destination_type': 'local', 'boot_index': 0,
'delete_on_termination': True}
block_device_mapping_v2.insert(0, bdm_dict)
body['server'][bdm_param] = block_device_mapping_v2
if nics is not None:
# NOTE(tr3buchet): nics can be an empty list
all_net_data = []
for nic_info in nics:
net_data = {}
# if value is empty string, do not send value in body
if nic_info.get('net-id'):
net_data['uuid'] = nic_info['net-id']
if (nic_info.get('v4-fixed-ip') and
nic_info.get('v6-fixed-ip')):
raise base.exceptions.CommandError(_(
"Only one of 'v4-fixed-ip' and 'v6-fixed-ip' may be"
" provided."))
elif nic_info.get('v4-fixed-ip'):
net_data['fixed_ip'] = nic_info['v4-fixed-ip']
elif nic_info.get('v6-fixed-ip'):
net_data['fixed_ip'] = nic_info['v6-fixed-ip']
if nic_info.get('port-id'):
net_data['port'] = nic_info['port-id']
all_net_data.append(net_data)
body['server']['networks'] = all_net_data
return self._create(resource_url, body, response_key,
return_raw=return_raw, **kwargs)
def get(self, server):
"""
Get a server.
:param server: ID of the :class:`Server` to get.
:rtype: :class:`Server`
"""
return self._get("/servers/%s" % base.getid(server), "server")
def list(self, detailed=True, search_opts=None, marker=None, limit=None):
"""
Get a list of servers.
:param detailed: Whether to return detailed server info (optional).
:param search_opts: Search options to filter out servers (optional).
:param marker: Begin returning servers that appear later in the server
list than that represented by this server id (optional).
:param limit: Maximum number of servers to return (optional).
:rtype: list of :class:`Server`
"""
if search_opts is None:
search_opts = {}
qparams = {}
for opt, val in six.iteritems(search_opts):
if val:
qparams[opt] = val
if marker:
qparams['marker'] = marker
if limit:
qparams['limit'] = limit
# Transform the dict to a sequence of two-element tuples in fixed
# order, then the encoded string will be consistent in Python 2&3.
if qparams:
new_qparams = sorted(qparams.items(), key=lambda x: x[0])
query_string = "?%s" % parse.urlencode(new_qparams)
else:
query_string = ""
detail = ""
if detailed:
detail = "/detail"
return self._list("/servers%s%s" % (detail, query_string), "servers")
def add_fixed_ip(self, server, network_id):
"""
Add an IP address on a network.
:param server: The :class:`Server` (or its ID) to add an IP to.
:param network_id: The ID of the network the IP should be on.
"""
self._action('add_fixed_ip', server, {'network_id': network_id})
def remove_fixed_ip(self, server, address):
"""
Remove an IP address.
:param server: The :class:`Server` (or its ID) to add an IP to.
:param address: The IP address to remove.
"""
self._action('remove_fixed_ip', server, {'address': address})
def get_vnc_console(self, server, console_type):
"""
Get a vnc console for an instance
:param server: The :class:`Server` (or its ID) to add an IP to.
:param console_type: Type of vnc console to get ('novnc' or 'xvpvnc')
"""
return self._action('get_vnc_console', server,
{'type': console_type})[1]
def get_spice_console(self, server, console_type):
"""
Get a spice console for an instance
:param server: The :class:`Server` (or its ID) to add an IP to.
:param console_type: Type of spice console to get ('spice-html5')
"""
return self._action('get_spice_console', server,
{'type': console_type})[1]
def get_password(self, server, private_key):
"""
Get password for an instance
Requires that openssl is installed and in the path
:param server: The :class:`Server` (or its ID) to add an IP to.
:param private_key: The private key to decrypt password
"""
_resp, body = self.api.client.get("/servers/%s/os-server-password"
% base.getid(server))
if body and body.get('password'):
try:
return crypto.decrypt_password(private_key, body['password'])
except Exception as exc:
return '%sFailed to decrypt:\n%s' % (exc, body['password'])
return ''
def clear_password(self, server):
"""
Clear password for an instance
:param server: The :class:`Server` (or its ID) to add an IP to.
"""
return self._delete("/servers/%s/os-server-password"
% base.getid(server))
def stop(self, server):
"""
Stop the server.
"""
return self._action('stop', server, None)
def force_delete(self, server):
"""
Force delete the server.
"""
return self._action('force_delete', server, None)
def restore(self, server):
"""
Restore soft-deleted server.
"""
return self._action('restore', server, None)
def start(self, server):
"""
Start the server.
"""
self._action('start', server, None)
def pause(self, server):
"""
Pause the server.
"""
self._action('pause', server, None)
def unpause(self, server):
"""
Unpause the server.
"""
self._action('unpause', server, None)
def lock(self, server):
"""
Lock the server.
"""
self._action('lock', server, None)
def unlock(self, server):
"""
Unlock the server.
"""
self._action('unlock', server, None)
def suspend(self, server):
"""
Suspend the server.
"""
self._action('suspend', server, None)
def resume(self, server):
"""
Resume the server.
"""
self._action('resume', server, None)
def rescue(self, server):
"""
Rescue the server.
"""
return self._action('rescue', server, None)
def unrescue(self, server):
"""
Unrescue the server.
"""
self._action('unrescue', server, None)
def shelve(self, server):
"""
Shelve the server.
"""
self._action('shelve', server, None)
def shelve_offload(self, server):
"""
Remove a shelved instance from the compute node.
"""
self._action('shelve_offload', server, None)
def unshelve(self, server):
"""
Unshelve the server.
"""
self._action('unshelve', server, None)
def diagnostics(self, server):
"""Retrieve server diagnostics."""
return self.api.client.get("/servers/%s/os-server-diagnostics" %
base.getid(server))
def create(self, name, image, flavor, meta=None, files=None,
reservation_id=None, min_count=None,
max_count=None, security_groups=None, userdata=None,
key_name=None, availability_zone=None,
block_device_mapping=None, block_device_mapping_v2=None,
nics=None, scheduler_hints=None,
config_drive=None, **kwargs):
# TODO(anthony): indicate in doc string if param is an extension
# and/or optional
"""
Create (boot) a new server.
:param name: Something to name the server.
:param image: The :class:`Image` to boot with.
:param flavor: The :class:`Flavor` to boot onto.
:param meta: A dict of arbitrary key/value metadata to store for this
server. A maximum of five entries is allowed, and both
keys and values must be 255 characters or less.
:param files: A dict of files to overrwrite on the server upon boot.
Keys are file names (i.e. ``/etc/passwd``) and values
are the file contents (either as a string or as a
file-like object). A maximum of five entries is allowed,
and each file must be 10k or less.
:param userdata: user data to pass to be exposed by the metadata
server this can be a file type object as well or a
string.
:param reservation_id: a UUID for the set of servers being requested.
:param key_name: (optional extension) name of previously created
keypair to inject into the instance.
:param availability_zone: Name of the availability zone for instance
placement.
:param block_device_mapping: (optional extension) A dict of block
device mappings for this server.
:param block_device_mapping_v2: (optional extension) A dict of block
device mappings for this server.
:param nics: (optional extension) an ordered list of nics to be
added to this server, with information about
connected networks, fixed ips, port etc.
:param scheduler_hints: (optional extension) arbitrary key-value pairs
specified by the client to help boot an instance
:param config_drive: (optional extension) value for config drive
either boolean, or volume-id
"""
if not min_count:
min_count = 1
if not max_count:
max_count = min_count
if min_count > max_count:
min_count = max_count
boot_args = [name, image, flavor]
boot_kwargs = dict(
meta=meta, files=files, userdata=userdata,
reservation_id=reservation_id, min_count=min_count,
max_count=max_count, security_groups=security_groups,
key_name=key_name, availability_zone=availability_zone,
scheduler_hints=scheduler_hints, config_drive=config_drive,
**kwargs)
if block_device_mapping:
boot_kwargs['block_device_mapping'] = block_device_mapping
elif block_device_mapping_v2:
boot_kwargs['block_device_mapping_v2'] = block_device_mapping_v2
resource_url = "/servers"
if nics:
boot_kwargs['nics'] = nics
response_key = "server"
return self._boot(resource_url, response_key, *boot_args,
**boot_kwargs)
def update(self, server, name=None):
"""
Update the name or the password for a server.
:param server: The :class:`Server` (or its ID) to update.
:param name: Update the server's name.
"""
if name is None:
return
body = {
"server": {
"name": name,
},
}
return self._update("/servers/%s" % base.getid(server), body, "server")
def change_password(self, server, password):
"""
Update the password for a server.
"""
self._action("change_password", server, {"admin_password": password})
def delete(self, server):
"""
Delete (i.e. shut down and delete the image) this server.
"""
self._delete("/servers/%s" % base.getid(server))
def reboot(self, server, reboot_type=REBOOT_SOFT):
"""
Reboot a server.
:param server: The :class:`Server` (or its ID) to share onto.
:param reboot_type: either :data:`REBOOT_SOFT` for a software-level
reboot, or `REBOOT_HARD` for a virtual power cycle hard reboot.
"""
self._action('reboot', server, {'type': reboot_type})
def rebuild(self, server, image, password=None, **kwargs):
"""
Rebuild -- shut down and then re-image -- a server.
:param server: The :class:`Server` (or its ID) to share onto.
:param image: the :class:`Image` (or its ID) to re-image with.
:param password: string to set as password on the rebuilt server.
"""
body = {'image_ref': base.getid(image)}
if password is not None:
body['admin_password'] = password
_resp, body = self._action('rebuild', server, body, **kwargs)
return Server(self, body['server'])
def migrate(self, server):
"""
Migrate a server to a new host.
:param server: The :class:`Server` (or its ID).
"""
self._action('migrate', server)
def resize(self, server, flavor, **kwargs):
"""
Resize a server's resources.
:param server: The :class:`Server` (or its ID) to share onto.
:param flavor: the :class:`Flavor` (or its ID) to resize to.
Until a resize event is confirmed with :meth:`confirm_resize`, the old
server will be kept around and you'll be able to roll back to the old
flavor quickly with :meth:`revert_resize`. All resizes are
automatically confirmed after 24 hours.
"""
info = {'flavor_ref': base.getid(flavor)}
self._action('resize', server, info=info, **kwargs)
def confirm_resize(self, server):
"""
Confirm that the resize worked, thus removing the original server.
:param server: The :class:`Server` (or its ID) to share onto.
"""
self._action('confirm_resize', server)
def revert_resize(self, server):
"""
Revert a previous resize, switching back to the old server.
:param server: The :class:`Server` (or its ID) to share onto.
"""
self._action('revert_resize', server)
def create_image(self, server, image_name, metadata=None):
"""
Snapshot a server.
:param server: The :class:`Server` (or its ID) to share onto.
:param image_name: Name to give the snapshot image
:param meta: Metadata to give newly-created image entity
"""
body = {'name': image_name, 'metadata': metadata or {}}
resp = self._action('create_image', server, body)[0]
location = resp.headers['location']
image_uuid = location.split('/')[-1]
return image_uuid
def backup(self, server, backup_name, backup_type, rotation):
"""
Backup a server instance.
:param server: The :class:`Server` (or its ID) to share onto.
:param backup_name: Name of the backup image
:param backup_type: The backup type, like 'daily' or 'weekly'
:param rotation: Int parameter representing how many backups to
keep around.
"""
body = {'name': backup_name,
'backup_type': backup_type,
'rotation': rotation}
self._action('create_backup', server, body)
def set_meta(self, server, metadata):
"""
Set a servers metadata
:param server: The :class:`Server` to add metadata to
:param metadata: A dict of metadata to add to the server
"""
body = {'metadata': metadata}
return self._create("/servers/%s/metadata" % base.getid(server),
body, "metadata")
def get_console_output(self, server, length=None):
"""
Get text console log output from Server.
:param server: The :class:`Server` (or its ID) whose console output
you would like to retrieve.
:param length: The number of tail loglines you would like to retrieve.
"""
if length is None:
# NOTE: On v3 get_console_output API, -1 means an unlimited length.
# Here translates None, which means an unlimited in the internal
# implementation, to -1.
length = -1
return self._action('get_console_output',
server, {'length': length})[1]['output']
def delete_meta(self, server, keys):
"""
Delete metadata from an server
:param server: The :class:`Server` to add metadata to
:param keys: A list of metadata keys to delete from the server
"""
for k in keys:
self._delete("/servers/%s/metadata/%s" % (base.getid(server), k))
def live_migrate(self, server, host, block_migration, disk_over_commit):
"""
Migrates a running instance to a new machine.
:param server: instance id which comes from nova list.
:param host: destination host name.
:param block_migration: if True, do block_migration.
:param disk_over_commit: if True, Allow overcommit.
"""
self._action('migrate_live', server,
{'host': host,
'block_migration': block_migration,
'disk_over_commit': disk_over_commit})
def reset_state(self, server, state='error'):
"""
Reset the state of an instance to active or error.
:param server: ID of the instance to reset the state of.
:param state: Desired state; either 'active' or 'error'.
Defaults to 'error'.
"""
self._action('reset_state', server, dict(state=state))
def reset_network(self, server):
"""
Reset network of an instance.
"""
self._action('reset_network', server)
def evacuate(self, server, host=None,
on_shared_storage=True, password=None):
"""
Evacuate a server instance.
:param server: The :class:`Server` (or its ID) to share onto.
:param host: Name of the target host.
:param on_shared_storage: Specifies whether instance files located
on shared storage
:param password: string to set as password on the evacuated server.
"""
body = {'on_shared_storage': on_shared_storage}
if host is not None:
body['host'] = host
if password is not None:
body['admin_password'] = password
return self._action('evacuate', server, body)
def interface_list(self, server):
"""
List attached network interfaces
:param server: The :class:`Server` (or its ID) to query.
"""
return self._list('/servers/%s/os-attach-interfaces'
% base.getid(server), 'interface_attachments')
def interface_attach(self, server, port_id, net_id, fixed_ip):
"""
Attach a network_interface to an instance.
:param server: The :class:`Server` (or its ID) to attach to.
:param port_id: The port to attach.
"""
body = {'interface_attachment': {}}
if port_id:
body['interface_attachment']['port_id'] = port_id
if net_id:
body['interface_attachment']['net_id'] = net_id
if fixed_ip:
body['interface_attachment']['fixed_ips'] = [
{'ip_address': fixed_ip}]
return self._create('/servers/%s/os-attach-interfaces'
% base.getid(server),
body, 'interface_attachment')
def interface_detach(self, server, port_id):
"""
Detach a network_interface from an instance.
:param server: The :class:`Server` (or its ID) to detach from.
:param port_id: The port to detach.
"""
self._delete('/servers/%s/os-attach-interfaces/%s'
% (base.getid(server), port_id))
def _action(self, action, server, info=None, **kwargs):
"""
Perform a server "action" -- reboot/rebuild/resize/etc.
"""
body = {action: info}
self.run_hooks('modify_body_for_action', body, **kwargs)
url = '/servers/%s/action' % base.getid(server)
return self.api.client.post(url, body=body)
|
|
"""Stdout, stderr and argv support for unicode."""
##############################################
# Support for unicode in windows cmd.exe
# Posted on Stack Overflow [1], available under CC-BY-SA 3.0 [2]
#
# Question: "Windows cmd encoding change causes Python crash" [3] by Alex [4],
# Answered [5] by David-Sarah Hopwood [6].
#
# [1] https://stackoverflow.com
# [2] https://creativecommons.org/licenses/by-sa/3.0/
# [3] https://stackoverflow.com/questions/878972
# [4] https://stackoverflow.com/users/85185
# [4] https://stackoverflow.com/a/3259271/118671
# [5] https://stackoverflow.com/users/393146
#
################################################
#
# stdin support added by Merlijn van Deen <[email protected]>, March 2012
# Licensed under both CC-BY-SA and the MIT license.
#
################################################
from __future__ import print_function, unicode_literals
from io import UnsupportedOperation
import sys
stdin = sys.stdin
stdout = sys.stdout
stderr = sys.stderr
argv = sys.argv
if sys.version_info[0] > 2:
unicode = str
PY3 = True
else:
PY3 = False
if sys.platform == "win32":
import codecs
from ctypes import WINFUNCTYPE, windll, POINTER
from ctypes import byref, c_int, create_unicode_buffer
from ctypes.wintypes import BOOL, HANDLE, DWORD, LPWSTR, LPCWSTR
try:
from ctypes.wintypes import LPVOID
except ImportError:
from ctypes import c_void_p as LPVOID
original_stderr = sys.stderr
# If any exception occurs in this code, we'll probably try to print it on stderr,
# which makes for frustrating debugging if stderr is directed to our wrapper.
# So be paranoid about catching errors and reporting them to original_stderr,
# so that we can at least see them.
def _complain(message):
print(isinstance(message, str) and message or repr(message), file=original_stderr)
# Work around <http://bugs.python.org/issue6058>.
codecs.register(lambda name: name == 'cp65001' and codecs.lookup('utf-8') or None)
# Make Unicode console output work independently of the current code page.
# This also fixes <http://bugs.python.org/issue1602>.
# Credit to Michael Kaplan <http://blogs.msdn.com/b/michkap/archive/2010/04/07/9989346.aspx>
# and TZOmegaTZIOY
# <https://stackoverflow.com/questions/878972/windows-cmd-encoding-change-causes-python-crash/1432462#1432462>.
try:
# <https://msdn.microsoft.com/en-us/library/ms683231(VS.85).aspx>
# HANDLE WINAPI GetStdHandle(DWORD nStdHandle);
# returns INVALID_HANDLE_VALUE, NULL, or a valid handle
#
# <https://msdn.microsoft.com/en-us/library/aa364960(VS.85).aspx>
# DWORD WINAPI GetFileType(DWORD hFile);
#
# <https://msdn.microsoft.com/en-us/library/ms683167(VS.85).aspx>
# BOOL WINAPI GetConsoleMode(HANDLE hConsole, LPDWORD lpMode);
GetStdHandle = WINFUNCTYPE(HANDLE, DWORD)(("GetStdHandle", windll.kernel32))
STD_INPUT_HANDLE = DWORD(-10)
STD_OUTPUT_HANDLE = DWORD(-11)
STD_ERROR_HANDLE = DWORD(-12)
GetFileType = WINFUNCTYPE(DWORD, DWORD)(("GetFileType", windll.kernel32))
FILE_TYPE_CHAR = 0x0002
FILE_TYPE_REMOTE = 0x8000
GetConsoleMode = WINFUNCTYPE(BOOL, HANDLE, POINTER(DWORD))(("GetConsoleMode", windll.kernel32))
INVALID_HANDLE_VALUE = DWORD(-1).value
def not_a_console(handle):
if handle == INVALID_HANDLE_VALUE or handle is None:
return True
return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or
GetConsoleMode(handle, byref(DWORD())) == 0)
def old_fileno(std_name):
# some environments like IDLE don't support the fileno operation
# handle those like std streams which don't have fileno at all
std = getattr(sys, 'std{0}'.format(std_name))
if hasattr(std, 'fileno'):
try:
return std.fileno()
except UnsupportedOperation:
pass
old_stdin_fileno = old_fileno('in')
old_stdout_fileno = old_fileno('out')
old_stderr_fileno = old_fileno('err')
STDIN_FILENO = 0
STDOUT_FILENO = 1
STDERR_FILENO = 2
real_stdin = (old_stdin_fileno == STDIN_FILENO)
real_stdout = (old_stdout_fileno == STDOUT_FILENO)
real_stderr = (old_stderr_fileno == STDERR_FILENO)
if real_stdin:
hStdin = GetStdHandle(STD_INPUT_HANDLE)
if not_a_console(hStdin):
real_stdin = False
if real_stdout:
hStdout = GetStdHandle(STD_OUTPUT_HANDLE)
if not_a_console(hStdout):
real_stdout = False
if real_stderr:
hStderr = GetStdHandle(STD_ERROR_HANDLE)
if not_a_console(hStderr):
real_stderr = False
if real_stdin:
ReadConsoleW = WINFUNCTYPE(BOOL, HANDLE, LPVOID, DWORD, POINTER(DWORD),
LPVOID)(("ReadConsoleW", windll.kernel32))
class UnicodeInput:
"""Unicode terminal input class."""
def __init__(self, hConsole, name, bufsize=1024):
self._hConsole = hConsole
self.bufsize = bufsize
self.buffer = create_unicode_buffer(bufsize)
self.name = name
self.encoding = 'utf-8'
def readline(self):
maxnum = DWORD(self.bufsize - 1)
numrecv = DWORD(0)
result = ReadConsoleW(self._hConsole, self.buffer, maxnum, byref(numrecv), None)
if not result:
raise Exception("stdin failure")
data = self.buffer.value[:numrecv.value]
if not PY3:
return data.encode(self.encoding)
else:
return data
if real_stdout or real_stderr:
# BOOL WINAPI WriteConsoleW(HANDLE hOutput, LPWSTR lpBuffer, DWORD nChars,
# LPDWORD lpCharsWritten, LPVOID lpReserved);
WriteConsoleW = WINFUNCTYPE(BOOL, HANDLE, LPWSTR, DWORD, POINTER(DWORD),
LPVOID)(("WriteConsoleW", windll.kernel32))
class UnicodeOutput:
"""Unicode terminal output class."""
def __init__(self, hConsole, stream, fileno, name):
self._hConsole = hConsole
self._stream = stream
self._fileno = fileno
self.closed = False
self.softspace = False
self.mode = 'w'
self.encoding = 'utf-8'
self.name = name
self.flush()
def isatty(self):
return False
def close(self):
# don't really close the handle, that would only cause problems
self.closed = True
def fileno(self):
return self._fileno
def flush(self):
if self._hConsole is None:
try:
self._stream.flush()
except Exception as e:
_complain("%s.flush: %r from %r"
% (self.name, e, self._stream))
raise
def write(self, text):
try:
if self._hConsole is None:
if isinstance(text, unicode):
text = text.encode('utf-8')
self._stream.write(text)
else:
if not isinstance(text, unicode):
text = bytes(text).decode('utf-8')
remaining = len(text)
while remaining > 0:
n = DWORD(0)
# There is a shorter-than-documented limitation on the
# length of the string passed to WriteConsoleW (see
# <https://tahoe-lafs.org/trac/tahoe-lafs/ticket/1232>.
retval = WriteConsoleW(self._hConsole, text,
min(remaining, 10000),
byref(n), None)
if retval == 0 or n.value == 0:
raise IOError("WriteConsoleW returned %r, n.value = %r"
% (retval, n.value))
remaining -= n.value
if remaining == 0:
break
text = text[n.value:]
except Exception as e:
_complain("%s.write: %r" % (self.name, e))
raise
def writelines(self, lines):
try:
for line in lines:
self.write(line)
except Exception as e:
_complain("%s.writelines: %r" % (self.name, e))
raise
if real_stdin:
stdin = UnicodeInput(hStdin, name='<Unicode console stdin>')
if real_stdout:
stdout = UnicodeOutput(hStdout, sys.stdout, STDOUT_FILENO,
'<Unicode console stdout>')
else:
stdout = UnicodeOutput(None, sys.stdout, old_stdout_fileno,
'<Unicode redirected stdout>')
if real_stderr:
stderr = UnicodeOutput(hStderr, sys.stderr, STDERR_FILENO,
'<Unicode console stderr>')
else:
stderr = UnicodeOutput(None, sys.stderr, old_stderr_fileno,
'<Unicode redirected stderr>')
except Exception as e:
_complain("exception %r while fixing up sys.stdout and sys.stderr" % (e,))
# While we're at it, let's unmangle the command-line arguments:
# This works around <http://bugs.python.org/issue2128>.
GetCommandLineW = WINFUNCTYPE(LPWSTR)(("GetCommandLineW", windll.kernel32))
CommandLineToArgvW = WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))(("CommandLineToArgvW", windll.shell32))
argc = c_int(0)
argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc))
argv = [argv_unicode[i].encode('utf-8') for i in range(0, argc.value)]
if not hasattr(sys, 'frozen'):
# If this is an executable produced by py2exe or bbfreeze, then it will
# have been invoked directly. Otherwise, unicode_argv[0] is the Python
# interpreter, so skip that.
argv = argv[1:]
# Also skip option arguments to the Python interpreter.
while len(argv) > 0:
arg = argv[0]
if not arg.startswith(b"-") or arg == u"-":
break
argv = argv[1:]
if arg == u'-m':
# sys.argv[0] should really be the absolute path of the module source,
# but never mind
break
if arg == u'-c':
argv[0] = u'-c'
break
if argv == []:
argv = [u'']
|
|
"""Base estimator class."""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import json
import os
import shutil
from six import string_types
import numpy as np
from google.protobuf import text_format
from tensorflow.python.platform import gfile
from tensorflow.python.client import session
from tensorflow.core.framework import graph_pb2
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import importer
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops as array_ops_
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variables
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.training import training as train
from tensorflow.contrib.layers import optimizers
from tensorflow.contrib.learn.python.learn import trainer
from tensorflow.contrib.learn.python.learn.io.data_feeder import setup_train_data_feeder
from tensorflow.contrib.learn.python.learn.io.data_feeder import setup_predict_data_feeder
from tensorflow.contrib.learn.python.learn.ops.dropout_ops import DROPOUTS
from tensorflow.contrib.learn.python.learn import monitors
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
def _write_with_backup(filename, content):
if gfile.Exists(filename):
gfile.Rename(filename, filename + '.old', overwrite=True)
with gfile.Open(filename, 'w') as f:
f.write(content)
class TensorFlowEstimator(_sklearn.BaseEstimator):
"""Base class for all TensorFlow estimators.
Parameters:
model_fn: Model function, that takes input X, y tensors and outputs
prediction and loss tensors.
n_classes: Number of classes in the target.
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam",
"Adagrad".
learning_rate: If this is constant float value, no decay function is used.
Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
clip_gradients: Clip norm of the gradients to this value to stop
gradient explosion.
class_weight: None or list of n_classes floats. Weight associated with
classes for loss computation. If not given, all classes are supposed to
have weight one.
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
config: RunConfig object that controls the configurations of the
session, e.g. num_cores, gpu_memory_fraction, etc.
verbose: Controls the verbosity, possible values:
0: the algorithm and debug information is muted.
1: trainer prints the progress.
2: log device placement is printed.
"""
def __init__(self,
model_fn,
n_classes,
batch_size=32,
steps=200,
optimizer='Adagrad',
learning_rate=0.1,
clip_gradients=5.0,
class_weight=None,
continue_training=False,
config=None,
verbose=1):
self.model_fn = model_fn
self.n_classes = n_classes
self.batch_size = batch_size
self.steps = steps
self.verbose = verbose
self.optimizer = optimizer
self.learning_rate = learning_rate
self.clip_gradients = clip_gradients
self.continue_training = continue_training
self._initialized = False
self.class_weight = class_weight
self._config = config
def _setup_training(self):
"""Sets up graph, model and trainer."""
# Create config if not given.
if self._config is None:
self._config = RunConfig(verbose=self.verbose)
# Create new graph.
self._graph = ops.Graph()
self._graph.add_to_collection('IS_TRAINING', True)
with self._graph.as_default():
random_seed.set_random_seed(self._config.tf_random_seed)
self._global_step = variables.Variable(0,
name='global_step',
trainable=False)
# Setting up inputs and outputs.
self._inp, self._out = self._data_feeder.input_builder()
# If class weights are provided, add them to the graph.
# Different loss functions can use this tensor by name.
if self.class_weight:
self._class_weight_node = constant_op.constant(self.class_weight,
name='class_weight')
# Add histograms for X and y if they are floats.
if self._data_feeder.input_dtype in (np.float32, np.float64):
logging_ops.histogram_summary('X', self._inp)
if self._data_feeder.output_dtype in (np.float32, np.float64)\
and self._out is not None:
logging_ops.histogram_summary('y', self._out)
# Create model's graph.
self._model_predictions, self._model_loss = self.model_fn(self._inp,
self._out)
# Set up a single operator to merge all the summaries
self._summaries = logging_ops.merge_all_summaries()
# Create trainer and augment graph with gradients and optimizer.
# Additionally creates initialization ops.
learning_rate = self.learning_rate
optimizer = self.optimizer
if callable(learning_rate):
learning_rate = learning_rate(self._global_step)
if callable(optimizer):
optimizer = optimizer(learning_rate)
self._train = optimizers.optimize_loss(self._model_loss,
self._global_step,
learning_rate=learning_rate,
optimizer=optimizer,
clip_gradients=self.clip_gradients)
# Update ops during training, e.g. batch_norm_ops
self._train = control_flow_ops.group(self._train, *
ops.get_collection('update_ops'))
# Get all initializers for all trainable variables.
self._initializers = variables.initialize_all_variables()
# Create model's saver capturing all the nodes created up until now.
self._saver = train.Saver(max_to_keep=self._config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=
self._config.keep_checkpoint_every_n_hours)
# Enable monitor to create validation data dict with appropriate
# tf placeholders
self._monitor.create_val_feed_dict(self._inp, self._out)
# Create session to run model with.
self._session = session.Session(self._config.tf_master,
config=self._config.tf_config)
# Run parameter initializers.
self._session.run(self._initializers)
def _setup_summary_writer(self, logdir):
"""Sets up summary writer to prepare for later optional visualization."""
self._summary_writer = train.SummaryWriter(
os.path.join(logdir,
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')),
graph=self._session.graph)
def fit(self, X, y, monitor=None, logdir=None):
"""Builds a neural network model given provided `model_fn` and training
data X and y.
Note: called first time constructs the graph and initializers
variables. Consecutives times it will continue training the same model.
This logic follows partial_fit() interface in scikit-learn.
To restart learning, create new estimator.
Args:
X: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class labels in classification, real numbers in regression).
monitor: Monitor object to print training progress and invoke early
stopping
logdir: the directory to save the log file that can be used for
optional visualization.
Returns:
Returns self.
"""
# Sets up data feeder.
self._data_feeder = setup_train_data_feeder(X, y, self.n_classes,
self.batch_size)
if monitor is None:
self._monitor = monitors.default_monitor(verbose=self.verbose)
else:
self._monitor = monitor
if not self.continue_training or not self._initialized:
# Sets up model and trainer.
self._setup_training()
self._initialized = True
else:
self._data_feeder.set_placeholders(self._inp, self._out)
# Sets up summary writer for later optional visualization.
# Due to not able to setup _summary_writer in __init__ as it's not a
# parameter of the model, here we need to check if such variable exists
# and if it's None or not (in case it was setup in a previous run).
# It is initialized only in the case where it wasn't before and log dir
# is provided.
if logdir:
if (not hasattr(self, '_summary_writer') or
(hasattr(self, '_summary_writer') and self._summary_writer is None)):
self._setup_summary_writer(logdir)
else:
self._summary_writer = None
# Train model for given number of steps.
trainer.train(self._session,
self._train,
self._model_loss,
self._global_step,
self._data_feeder.get_feed_dict_fn(),
steps=self.steps,
monitor=self._monitor,
summary_writer=self._summary_writer,
summaries=self._summaries,
feed_params_fn=self._data_feeder.get_feed_params)
return self
def partial_fit(self, X, y):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
X: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class label in classification, real numbers in regression).
Returns:
Returns self.
"""
return self.fit(X, y)
def _predict(self, X, axis=-1, batch_size=None):
if not self._initialized:
raise _sklearn.NotFittedError()
# Use the batch size for fitting if the user did not specify one.
if batch_size is None:
batch_size = self.batch_size
self._graph.add_to_collection('IS_TRAINING', False)
predict_data_feeder = setup_predict_data_feeder(X, batch_size=batch_size)
preds = []
dropouts = self._graph.get_collection(DROPOUTS)
feed_dict = {prob: 1.0 for prob in dropouts}
for data in predict_data_feeder:
feed_dict[self._inp] = data
predictions_for_batch = self._session.run(self._model_predictions,
feed_dict)
if self.n_classes > 1 and axis != -1:
preds.append(predictions_for_batch.argmax(axis=axis))
else:
preds.append(predictions_for_batch)
return np.concatenate(preds, axis=0)
def predict(self, X, axis=1, batch_size=None):
"""Predict class or regression for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Args:
X: array-like matrix, [n_samples, n_features...] or iterator.
axis: Which axis to argmax for classification.
By default axis 1 (next after batch) is used.
Use 2 for sequence predictions.
batch_size: If test set is too big, use batch size to split
it into mini batches. By default the batch_size member
variable is used.
Returns:
y: array of shape [n_samples]. The predicted classes or predicted
value.
"""
return self._predict(X, axis=axis, batch_size=batch_size)
def predict_proba(self, X, batch_size=None):
"""Predict class probability of the input samples X.
Args:
X: array-like matrix, [n_samples, n_features...] or iterator.
batch_size: If test set is too big, use batch size to split
it into mini batches. By default the batch_size member variable is used.
Returns:
y: array of shape [n_samples, n_classes]. The predicted
probabilities for each class.
"""
return self._predict(X, batch_size=batch_size)
def get_tensor(self, name):
"""Returns tensor by name.
Args:
name: string, name of the tensor.
Returns:
Tensor.
"""
return self._graph.get_tensor_by_name(name)
def __init__(self, model_fn, n_classes, batch_size=32,
steps=200, optimizer="Adagrad",
learning_rate=0.1, clip_gradients=5.0, class_weight=None,
continue_training=False,
config=None, verbose=1):
self.model_fn = model_fn
self.n_classes = n_classes
self.batch_size = batch_size
self.steps = steps
self.verbose = verbose
self.optimizer = optimizer
self.learning_rate = learning_rate
self.clip_gradients = clip_gradients
self.continue_training = continue_training
self._initialized = False
self.class_weight = class_weight
self._config = config
self._output_dir = None
def _setup_training(self):
"""Sets up graph, model and trainer."""
# Create config if not given.
if self._config is None:
self._config = RunConfig(verbose=self.verbose)
# Create new graph.
self._graph = ops.Graph()
self._graph.add_to_collection("IS_TRAINING", True)
with self._graph.as_default():
random_seed.set_random_seed(self._config.tf_random_seed)
self._global_step = variables.Variable(
0, name="global_step", trainable=False)
# Setting up inputs and outputs.
self._inp, self._out = self._data_feeder.input_builder()
# If class weights are provided, add them to the graph.
# Different loss functions can use this tensor by name.
if self.class_weight:
self._class_weight_node = constant_op.constant(
self.class_weight, name='class_weight')
# Add histograms for X and y if they are floats.
if self._data_feeder.input_dtype in (np.float32, np.float64):
logging_ops.histogram_summary("X", self._inp)
if self._data_feeder.output_dtype in (np.float32, np.float64)\
and self._out is not None:
logging_ops.histogram_summary("y", self._out)
# Create model's graph.
self._model_predictions, self._model_loss = self.model_fn(
self._inp, self._out)
# Create trainer and augment graph with gradients and optimizer.
# Additionally creates initialization ops.
learning_rate = self.learning_rate
optimizer = self.optimizer
if callable(learning_rate):
learning_rate = learning_rate(self._global_step)
if callable(optimizer):
optimizer = optimizer(learning_rate)
self._train = optimizers.optimize_loss(self._model_loss, self._global_step,
learning_rate=learning_rate,
optimizer=optimizer, clip_gradients=self.clip_gradients)
# Update ops during training, e.g. batch_norm_ops
self._train = control_flow_ops.group(self._train, *ops.get_collection('update_ops'))
# Merge all summaries into single tensor.
self._summaries = logging_ops.merge_all_summaries()
# Get all initializers for all trainable variables.
self._initializers = variables.initialize_all_variables()
# Create model's saver capturing all the nodes created up until now.
self._saver = train.Saver(
max_to_keep=self._config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=self._config.keep_checkpoint_every_n_hours)
# Enable monitor to create validation data dict with appropriate tf placeholders
self._monitor.create_val_feed_dict(self._inp, self._out)
# Create session to run model with.
self._session = session.Session(self._config.tf_master, config=self._config.tf_config)
# Run parameter initializers.
self._session.run(self._initializers)
def _setup_summary_writer(self, logdir):
"""Sets up the summary writer to prepare for later optional visualization."""
self._output_dir = os.path.join(logdir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
self._summary_writer = train.SummaryWriter(self._output_dir, graph=self._session.graph)
def fit(self, X, y, monitor=None, logdir=None):
"""Builds a neural network model given provided `model_fn` and training
data X and y.
Note: called first time constructs the graph and initializers
variables. Consecutives times it will continue training the same model.
This logic follows partial_fit() interface in scikit-learn.
To restart learning, create new estimator.
Args:
X: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class labels in classification, real numbers in regression).
monitor: Monitor object to print training progress and invoke early stopping
logdir: the directory to save the log file that can be used for
optional visualization.
Returns:
Returns self.
"""
# Sets up data feeder.
self._data_feeder = setup_train_data_feeder(X, y,
self.n_classes,
self.batch_size)
if monitor is None:
self._monitor = monitors.default_monitor(verbose=self.verbose)
else:
self._monitor = monitor
if not self.continue_training or not self._initialized:
# Sets up model and trainer.
self._setup_training()
self._initialized = True
else:
self._data_feeder.set_placeholders(self._inp, self._out)
# Sets up summary writer for later optional visualization.
# Due to not able to setup _summary_writer in __init__ as it's not a
# parameter of the model, here we need to check if such variable exists
# and if it's None or not (in case it was setup in a previous run).
# It is initialized only in the case where it wasn't before and log dir
# is provided.
if logdir:
if (not hasattr(self, "_summary_writer") or
(hasattr(self, "_summary_writer") and self._summary_writer is None)):
self._setup_summary_writer(logdir)
else:
self._summary_writer = None
# Attach monitor to this estimator.
self._monitor.set_estimator(self)
# Train model for given number of steps.
trainer.train(
self._session, self._train,
self._model_loss, self._global_step,
self._data_feeder.get_feed_dict_fn(),
steps=self.steps,
monitor=self._monitor,
summary_writer=self._summary_writer,
summaries=self._summaries,
feed_params_fn=self._data_feeder.get_feed_params)
return self
def partial_fit(self, X, y):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
X: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class label in classification, real numbers in regression).
Returns:
Returns self.
"""
return self.fit(X, y)
def _predict(self, X, axis=-1, batch_size=None):
if not self._initialized:
raise _sklearn.NotFittedError()
# Use the batch size for fitting if the user did not specify one.
if batch_size is None:
batch_size = self.batch_size
self._graph.add_to_collection("IS_TRAINING", False)
predict_data_feeder = setup_predict_data_feeder(
X, batch_size=batch_size)
preds = []
dropouts = self._graph.get_collection(DROPOUTS)
feed_dict = {prob: 1.0 for prob in dropouts}
for data in predict_data_feeder:
feed_dict[self._inp] = data
predictions_for_batch = self._session.run(
self._model_predictions,
feed_dict)
if self.n_classes > 1 and axis != -1:
preds.append(predictions_for_batch.argmax(axis=axis))
else:
preds.append(predictions_for_batch)
return np.concatenate(preds, axis=0)
def predict(self, X, axis=1, batch_size=None):
"""Predict class or regression for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Args:
X: array-like matrix, [n_samples, n_features...] or iterator.
axis: Which axis to argmax for classification.
By default axis 1 (next after batch) is used.
Use 2 for sequence predictions.
batch_size: If test set is too big, use batch size to split
it into mini batches. By default the batch_size member
variable is used.
Returns:
y: array of shape [n_samples]. The predicted classes or predicted
value.
"""
return self._predict(X, axis=axis, batch_size=batch_size)
def predict_proba(self, X, batch_size=None):
"""Predict class probability of the input samples X.
Args:
X: array-like matrix, [n_samples, n_features...] or iterator.
batch_size: If test set is too big, use batch size to split
it into mini batches. By default the batch_size
member variable is used.
Returns:
y: array of shape [n_samples, n_classes]. The predicted
probabilities for each class.
"""
return self._predict(X, batch_size=batch_size)
def get_tensor(self, name):
"""Returns tensor by name.
Args:
name: string, name of the tensor.
Returns:
Tensor.
"""
return self._graph.get_tensor_by_name(name)
def get_tensor_value(self, name):
"""Returns value of the tensor give by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return self._session.run(self.get_tensor(name))
def save(self, path):
"""Saves checkpoints and graph to given path.
Args:
path: Folder to save model to.
"""
if not self._initialized:
raise _sklearn.NotFittedError()
# Currently Saver requires absolute path to work correctly.
path = os.path.abspath(path)
if not os.path.exists(path):
os.makedirs(path)
if not os.path.isdir(path):
raise ValueError("Path %s should be a directory to save"
"checkpoints and graph." % path)
# Save model definition.
all_params = self.get_params()
params = {}
for key, value in all_params.items():
if not callable(value) and value is not None:
params[key] = value
params['class_name'] = type(self).__name__
model_def = json.dumps(
params,
default=lambda o: o.__dict__ if hasattr(o, '__dict__') else None)
_write_with_backup(os.path.join(path, 'model.def'), model_def)
# Save checkpoints.
endpoints = '%s\n%s\n%s\n%s' % (
self._inp.name,
self._out.name,
self._model_predictions.name,
self._model_loss.name)
_write_with_backup(os.path.join(path, 'endpoints'), endpoints)
# Save graph definition.
_write_with_backup(os.path.join(path, 'graph.pbtxt'), str(self._graph.as_graph_def()))
# Save saver definition.
_write_with_backup(os.path.join(path, 'saver.pbtxt'), str(self._saver.as_saver_def()))
# Save checkpoints.
self._saver.save(self._session, os.path.join(path, 'model'),
global_step=self._global_step)
def _restore(self, path):
"""Restores this estimator from given path.
Note: will rebuild the graph and initialize all parameters,
and will ignore provided model.
Args:
path: Path to checkpoints and other information.
"""
# Currently Saver requires absolute path to work correctly.
path = os.path.abspath(path)
self._graph = ops.Graph()
with self._graph.as_default():
endpoints_filename = os.path.join(path, 'endpoints')
if not os.path.exists(endpoints_filename):
raise ValueError("Restore folder doesn't contain endpoints.")
with gfile.Open(endpoints_filename) as foutputs:
endpoints = foutputs.read().split('\n')
graph_filename = os.path.join(path, 'graph.pbtxt')
if not os.path.exists(graph_filename):
raise ValueError("Restore folder doesn't contain graph definition.")
with gfile.Open(graph_filename) as fgraph:
graph_def = graph_pb2.GraphDef()
text_format.Merge(fgraph.read(), graph_def)
(self._inp, self._out,
self._model_predictions, self._model_loss) = importer.import_graph_def(
graph_def, name='', return_elements=endpoints)
saver_filename = os.path.join(path, 'saver.pbtxt')
if not os.path.exists(saver_filename):
raise ValueError("Restore folder doesn't contain saver definition.")
with gfile.Open(saver_filename) as fsaver:
saver_def = train.SaverDef()
text_format.Merge(fsaver.read(), saver_def)
self._saver = train.Saver(saver_def=saver_def)
# Restore trainer
self._global_step = self._graph.get_tensor_by_name('global_step:0')
self._train = self._graph.get_operation_by_name('train')
# Restore summaries.
self._summaries = self._graph.get_operation_by_name('MergeSummary/MergeSummary')
# Restore session.
if not isinstance(self._config, RunConfig):
self._config = RunConfig(verbose=self.verbose)
self._session = session.Session(
self._config.tf_master,
config=self._config.tf_config)
checkpoint_path = train.latest_checkpoint(path)
if checkpoint_path is None:
raise ValueError("Missing checkpoint files in the %s. Please "
"make sure you are you have checkpoint file that describes "
"latest checkpoints and appropriate checkpoints are there. "
"If you have moved the folder, you at this point need to "
"update manually update the paths in the checkpoint file." % path)
self._saver.restore(self._session, checkpoint_path)
# Set to be initialized.
self._initialized = True
# pylint: disable=unused-argument
@classmethod
def restore(cls, path, config=None):
"""Restores model from give path.
Args:
path: Path to the checkpoints and other model information.
config: RunConfig object that controls the configurations of the session,
e.g. num_cores, gpu_memory_fraction, etc. This is allowed to be reconfigured.
Returns:
Estiamator, object of the subclass of TensorFlowEstimator.
"""
model_def_filename = os.path.join(path, 'model.def')
if not os.path.exists(model_def_filename):
raise ValueError("Restore folder doesn't contain model definition.")
# list of parameters that are allowed to be reconfigured
reconfigurable_params = ['_config']
_config = config
with gfile.Open(model_def_filename) as fmodel:
model_def = json.loads(fmodel.read())
# TensorFlow binding requires parameters to be strings not unicode.
# Only issue in Python2.
for key, value in model_def.items():
if (isinstance(value, string_types) and
not isinstance(value, str)):
model_def[key] = str(value)
if key in reconfigurable_params:
new_value = locals()[key]
if new_value is not None:
model_def[key] = new_value
class_name = model_def.pop('class_name')
if class_name == 'TensorFlowEstimator':
custom_estimator = TensorFlowEstimator(model_fn=None, **model_def)
custom_estimator._restore(path)
return custom_estimator
# To avoid cyclical dependencies, import inside the function instead of
# the beginning of the file.
from tensorflow.contrib.learn.python.learn import estimators
# Estimator must be one of the defined estimators in the __init__ file.
estimator = getattr(estimators, class_name)(**model_def)
estimator._restore(path)
return estimator
def get_tensor_value(self, name):
"""Returns value of the tensor give by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return self._session.run(self.get_tensor(name))
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
with self._graph.as_default():
return [v.name for v in variables.all_variables()]
def save(self, path):
"""Saves checkpoints and graph to given path.
Args:
path: Folder to save model to.
"""
if not self._initialized:
raise _sklearn.NotFittedError()
# Currently Saver requires absolute path to work correctly.
path = os.path.abspath(path)
if not os.path.exists(path):
os.makedirs(path)
if not os.path.isdir(path):
raise ValueError('Path %s should be a directory to save'
'checkpoints and graph.' % path)
# Save model definition.
all_params = self.get_params()
params = {}
for key, value in all_params.items():
if not callable(value) and value is not None:
params[key] = value
params['class_name'] = type(self).__name__
model_def = json.dumps(
params,
default=lambda o: o.__dict__ if hasattr(o, '__dict__') else None)
_write_with_backup(os.path.join(path, 'model.def'), model_def)
# Save checkpoints.
endpoints = '%s\n%s\n%s\n%s' % (self._inp.name, self._out.name,
self._model_predictions.name,
self._model_loss.name)
_write_with_backup(os.path.join(path, 'endpoints'), endpoints)
# Save graph definition.
_write_with_backup(
os.path.join(path, 'graph.pbtxt'), str(self._graph.as_graph_def()))
# Save saver definition.
_write_with_backup(
os.path.join(path, 'saver.pbtxt'), str(self._saver.as_saver_def()))
# Save checkpoints.
self._saver.save(self._session,
os.path.join(path, 'model'),
global_step=self._global_step)
def _restore(self, path):
"""Restores this estimator from given path.
Note: will rebuild the graph and initialize all parameters,
and will ignore provided model.
Args:
path: Path to checkpoints and other information.
"""
# Currently Saver requires absolute path to work correctly.
path = os.path.abspath(path)
self._graph = ops.Graph()
with self._graph.as_default():
endpoints_filename = os.path.join(path, 'endpoints')
if not os.path.exists(endpoints_filename):
raise ValueError("Restore folder doesn't contain endpoints.")
with gfile.Open(endpoints_filename) as foutputs:
endpoints = foutputs.read().split('\n')
graph_filename = os.path.join(path, 'graph.pbtxt')
if not os.path.exists(graph_filename):
raise ValueError("Restore folder doesn't contain graph definition.")
with gfile.Open(graph_filename) as fgraph:
graph_def = graph_pb2.GraphDef()
text_format.Merge(fgraph.read(), graph_def)
(self._inp, self._out, self._model_predictions,
self._model_loss) = importer.import_graph_def(
graph_def, name='', return_elements=endpoints)
saver_filename = os.path.join(path, 'saver.pbtxt')
if not os.path.exists(saver_filename):
raise ValueError("Restore folder doesn't contain saver definition.")
with gfile.Open(saver_filename) as fsaver:
saver_def = train.SaverDef()
text_format.Merge(fsaver.read(), saver_def)
self._saver = train.Saver(saver_def=saver_def)
# Restore trainer
self._global_step = self._graph.get_tensor_by_name('global_step:0')
self._train = self._graph.get_operation_by_name('OptimizeLoss/train')
# Restore summaries.
self._summaries = self._graph.get_operation_by_name(
'MergeSummary/MergeSummary')
# Restore session.
if not isinstance(self._config, RunConfig):
self._config = RunConfig(verbose=self.verbose)
self._session = session.Session(self._config.tf_master,
config=self._config.tf_config)
checkpoint_path = train.latest_checkpoint(path)
if checkpoint_path is None:
raise ValueError(
'Missing checkpoint files in the %s. Please '
'make sure you are you have checkpoint file that describes '
'latest checkpoints and appropriate checkpoints are there. '
'If you have moved the folder, you at this point need to '
'update manually update the paths in the checkpoint file.' % path)
self._saver.restore(self._session, checkpoint_path)
# Set to be initialized.
self._initialized = True
# pylint: disable=unused-argument
@classmethod
def restore(cls, path, config=None):
"""Restores model from give path.
Args:
path: Path to the checkpoints and other model information.
config: RunConfig object that controls the configurations of the session,
e.g. num_cores, gpu_memory_fraction, etc. This is allowed to be
reconfigured.
Returns:
Estiamator, object of the subclass of TensorFlowEstimator.
"""
model_def_filename = os.path.join(path, 'model.def')
if not os.path.exists(model_def_filename):
raise ValueError("Restore folder doesn't contain model definition.")
# list of parameters that are allowed to be reconfigured
reconfigurable_params = ['_config']
_config = config
with gfile.Open(model_def_filename) as fmodel:
model_def = json.loads(fmodel.read())
# TensorFlow binding requires parameters to be strings not unicode.
# Only issue in Python2.
for key, value in model_def.items():
if isinstance(value, string_types) and not isinstance(value, str):
model_def[key] = str(value)
if key in reconfigurable_params:
new_value = locals()[key]
if new_value is not None:
model_def[key] = new_value
class_name = model_def.pop('class_name')
if class_name == 'TensorFlowEstimator':
custom_estimator = TensorFlowEstimator(model_fn=None, **model_def)
custom_estimator._restore(path)
return custom_estimator
# To avoid cyclical dependencies, import inside the function instead of
# the beginning of the file.
from tensorflow.contrib.learn.python.learn import estimators
# Estimator must be one of the defined estimators in the __init__ file.
estimator = getattr(estimators, class_name)(**model_def)
estimator._restore(path)
return estimator
class TensorFlowBaseTransformer(TensorFlowEstimator, _sklearn.TransformerMixin):
"""TensorFlow Base Transformer class."""
def transform(self, X):
"""Transform X using trained transformer."""
return(super(TensorFlowBaseTransformer, self).predict(X, axis=1, batch_size=None))
def fit(self, X, y=None, monitor=None, logdir=None):
"""Fit a transformer."""
return(super(TensorFlowBaseTransformer, self).fit(X, y, monitor=None, logdir=None))
def fit_transform(self, X, y=None, monitor=None, logdir=None):
"""Fit transformer and transform X using trained transformer."""
return(self.fit(X, y, monitor=None, logdir=None).transform(X))
|
|
import datetime
import logging
import os
import shutil
import tempfile
from contextlib import suppress
from django.conf import settings
from django.contrib.sessions.backends.base import (
VALID_KEY_CHARS, CreateError, SessionBase, UpdateError,
)
from django.contrib.sessions.exceptions import InvalidSessionKey
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.utils import timezone
class SessionStore(SessionBase):
"""
Implement a file based session store.
"""
def __init__(self, session_key=None):
self.storage_path = type(self)._get_storage_path()
self.file_prefix = settings.SESSION_COOKIE_NAME
super().__init__(session_key)
@classmethod
def _get_storage_path(cls):
try:
return cls._storage_path
except AttributeError:
storage_path = getattr(settings, "SESSION_FILE_PATH", None)
if not storage_path:
storage_path = tempfile.gettempdir()
# Make sure the storage path is valid.
if not os.path.isdir(storage_path):
raise ImproperlyConfigured(
"The session storage path %r doesn't exist. Please set your"
" SESSION_FILE_PATH setting to an existing directory in which"
" Django can store session data." % storage_path)
cls._storage_path = storage_path
return storage_path
def _key_to_file(self, session_key=None):
"""
Get the file associated with this session key.
"""
if session_key is None:
session_key = self._get_or_create_session_key()
# Make sure we're not vulnerable to directory traversal. Session keys
# should always be md5s, so they should never contain directory
# components.
if not set(session_key).issubset(VALID_KEY_CHARS):
raise InvalidSessionKey(
"Invalid characters in session key")
return os.path.join(self.storage_path, self.file_prefix + session_key)
def _last_modification(self):
"""
Return the modification time of the file storing the session's content.
"""
modification = os.stat(self._key_to_file()).st_mtime
if settings.USE_TZ:
modification = datetime.datetime.utcfromtimestamp(modification)
modification = modification.replace(tzinfo=timezone.utc)
else:
modification = datetime.datetime.fromtimestamp(modification)
return modification
def _expiry_date(self, session_data):
"""
Return the expiry time of the file storing the session's content.
"""
expiry = session_data.get('_session_expiry')
if not expiry:
expiry = self._last_modification() + datetime.timedelta(seconds=settings.SESSION_COOKIE_AGE)
return expiry
def load(self):
session_data = {}
try:
with open(self._key_to_file(), "rb") as session_file:
file_data = session_file.read()
# Don't fail if there is no data in the session file.
# We may have opened the empty placeholder file.
if file_data:
try:
session_data = self.decode(file_data)
except (EOFError, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' % e.__class__.__name__)
logger.warning(str(e))
self.create()
# Remove expired sessions.
expiry_age = self.get_expiry_age(expiry=self._expiry_date(session_data))
if expiry_age <= 0:
session_data = {}
self.delete()
self.create()
except (IOError, SuspiciousOperation):
self._session_key = None
return session_data
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
def save(self, must_create=False):
if self.session_key is None:
return self.create()
# Get the session data now, before we start messing
# with the file it is stored within.
session_data = self._get_session(no_load=must_create)
session_file_name = self._key_to_file()
try:
# Make sure the file exists. If it does not already exist, an
# empty placeholder file is created.
flags = os.O_WRONLY | getattr(os, 'O_BINARY', 0)
if must_create:
flags |= os.O_EXCL | os.O_CREAT
fd = os.open(session_file_name, flags)
os.close(fd)
except FileNotFoundError:
if not must_create:
raise UpdateError
except FileExistsError:
if must_create:
raise CreateError
# Write the session file without interfering with other threads
# or processes. By writing to an atomically generated temporary
# file and then using the atomic os.rename() to make the complete
# file visible, we avoid having to lock the session file, while
# still maintaining its integrity.
#
# Note: Locking the session file was explored, but rejected in part
# because in order to be atomic and cross-platform, it required a
# long-lived lock file for each session, doubling the number of
# files in the session storage directory at any given time. This
# rename solution is cleaner and avoids any additional overhead
# when reading the session data, which is the more common case
# unless SESSION_SAVE_EVERY_REQUEST = True.
#
# See ticket #8616.
dir, prefix = os.path.split(session_file_name)
with suppress(OSError, IOError, EOFError):
output_file_fd, output_file_name = tempfile.mkstemp(dir=dir, prefix=prefix + '_out_')
renamed = False
try:
try:
os.write(output_file_fd, self.encode(session_data).encode())
finally:
os.close(output_file_fd)
# This will atomically rename the file (os.rename) if the OS
# supports it. Otherwise this will result in a shutil.copy2
# and os.unlink (for example on Windows). See #9084.
shutil.move(output_file_name, session_file_name)
renamed = True
finally:
if not renamed:
os.unlink(output_file_name)
def exists(self, session_key):
return os.path.exists(self._key_to_file(session_key))
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
with suppress(OSError):
os.unlink(self._key_to_file(session_key))
def clean(self):
pass
@classmethod
def clear_expired(cls):
storage_path = cls._get_storage_path()
file_prefix = settings.SESSION_COOKIE_NAME
for session_file in os.listdir(storage_path):
if not session_file.startswith(file_prefix):
continue
session_key = session_file[len(file_prefix):]
session = cls(session_key)
# When an expired session is loaded, its file is removed, and a
# new file is immediately created. Prevent this by disabling
# the create() method.
session.create = lambda: None
session.load()
|
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for training the hierarchical video prediction model."""
import sys
import time
import prediction_input
import prediction_model
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python import debug as tf_debug
from tensorflow.python.platform import app
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'model_mode', 'e2e', 'Mode to run in. Possible values:'
"'individual', 'epva', 'epva_gan', 'e2epose_oneop', 'e2epose_sepop', 'e2e'")
flags.DEFINE_integer('pose_dim', 5, 'Dimension of the end effector pose.')
flags.DEFINE_integer('joint_pos_dim', 7, 'Dimension of the joint positions.')
flags.DEFINE_bool('prefetch_enabled', True,
'Boolean to enable/disable prefetching')
flags.DEFINE_integer('prefetch_dataset_buffer_size', 256 * 1024 * 1024,
'Number of bytes in read buffer. 0 means no buffering.')
flags.DEFINE_integer(
'cycle_length', 64,
'Number of elements from dataset to process concurrently '
'(by interleaver)')
flags.DEFINE_integer(
'block_length', None,
'Number of consecutive elements to produce from each input element '
'before cycling to another input element (by interleaver). '
'If set to None, block_length defaults to batch_size')
flags.DEFINE_integer('num_parallel_calls', 128,
'Number of elements to process in parallel (by mapper)')
flags.DEFINE_integer(
'initial_shuffle_buffer_size', 1024,
'Number of elements from dataset that shuffler will sample from. '
'This shuffling is done before any other operations. '
'Set to 0 to disable')
flags.DEFINE_integer(
'followup_shuffle_buffer_size', 128,
'Number of elements from dataset that shuffler will sample from. '
'This shuffling is done after prefetching is done. '
'Set to 0 to disable')
flags.DEFINE_float('enc_keep_prob', 1.0, 'Dropout keep prob for the encoder.')
flags.DEFINE_float('van_keep_prob', 1.0, 'Dropout keep prob for the VAN')
flags.DEFINE_float('enc_noise_stddev', 0, 'Noise between the encoder and VAN')
flags.DEFINE_bool('is_training', False, 'Passed to the VGG encoder')
flags.DEFINE_bool(
'enc_pred_use_l1_loss', False, 'True to use l1 loss between'
' the encoder and predictor instead of l2')
flags.DEFINE_bool(
'color_data_augment', False, 'Set to true to augment the data'
' by randomly changing the hue.')
flags.DEFINE_bool('encoder_grey_in', False, 'True to convert the encoder input'
' to grey scale.')
flags.DEFINE_integer('enc_size', 64, 'The size of the higher level structure.')
flags.DEFINE_float('pred_noise_std', 0.0,
'The noise to be fed as additional input to the predictor.')
flags.DEFINE_integer(
'discrim_steps_per_pred', 5, 'Number of times to train the'
' discrim for each train of the predictor.')
flags.DEFINE_bool('use_wgan', True, 'True: Wgan, False: Regular gan')
flags.DEFINE_integer(
'discrim_context', 1, 'The number of context frames to'
' feed into the discrim.')
flags.DEFINE_integer('sequence_length', 10,
'sequence length, including context frames.')
flags.DEFINE_integer('skip_num', 1,
'Number of frames to skip when reading input')
flags.DEFINE_string(
'dataset_type', 'human',
'Controls how data is read in the input pipeline. Possible values:'
"'robot', 'human'")
flags.DEFINE_string('data_dir', 'gs://unsupervised-hierarch-video/data',
'directory containing data.')
flags.DEFINE_string('model_dir', '', 'directory for model checkpoints.')
flags.DEFINE_string('event_log_dir', '', 'directory for writing summary.')
flags.DEFINE_integer('train_steps', 4800000,
'Number of steps use for training.')
flags.DEFINE_integer('iterations', 100,
'Number of iterations per TPU training loop.')
flags.DEFINE_integer('num_shards', 8, 'Number of shards (TPU chips).')
flags.DEFINE_integer('context_frames', 2, '# of frames before predictions.')
flags.DEFINE_string('data_pattern', '*train*', '')
flags.DEFINE_integer('batch_size', 8,
'Global batch size on TPU. Per worker batch size on GPU')
flags.DEFINE_bool('imgnet_pretrain', False,
'Whether to pretrain the encoder on imagenet.')
flags.DEFINE_string(
'epv_pretrain_ckpt',
'gs://unsupervised-hierarch-video/pretrained_models/epva_human/',
'The checkpoint to start training from.')
flags.DEFINE_boolean(
'per_host_input_for_training', True,
'If true, input_fn is invoked per host rather than per shard.')
flags.DEFINE_float('enc_learning_rate', 1e-5,
'Used when the encoder is trained separately.')
flags.DEFINE_float('pred_learning_rate', 3e-4,
'Used when the predictor is trained separately.')
flags.DEFINE_float('van_learning_rate', 3e-5,
'Used when the VAN is trained separately.')
flags.DEFINE_float('discrim_learning_rate', 1e-2,
'Used for the discriminator in epva_gan mode.')
flags.DEFINE_float('all_learning_rate', 1e-5,
'Used when multiple parts are trained together.')
flags.DEFINE_float('enc_pred_loss_scale', 1e-2,
'The scale of the encoder and predictor loss.')
flags.DEFINE_float('lstm_state_noise_stddev', 0, 'Noise to add to the lstm'
' states in between predictions.')
flags.DEFINE_float(
'enc_pred_loss_scale_delay', 0,
'Number of steps for the scale to reach half of its maximum.')
flags.DEFINE_boolean(
'enc_pred_use_l2norm', False,
'Use the L2 norm of the encoder and predictor in epva mode.')
flags.DEFINE_float('pose_weight', 1,
'The weight of the pose loss in the e2e with pose method.')
flags.DEFINE_float('van_r_weight', 0.01,
'The weight of the VAN regularization loss.')
flags.DEFINE_float('clip_gradient_norm', 0, '')
flags.DEFINE_bool('use_tpu', False, 'Use TPUs rather than GPU')
flags.DEFINE_bool('use_estimator', False,
'True to use tf.estimator. False for slim.')
flags.DEFINE_string('run_mode', 'train',
"Mode to run in. Possbile values: 'train', 'eval'")
flags.DEFINE_integer('ps_tasks', 0,
'The number of parameter servers. If the value is 0, then '
'the parameters are handled locally by the worker.')
flags.DEFINE_integer('save_summary_steps', 100,
'The frequency with which summaries are saved')
flags.DEFINE_integer('save_checkpoints_secs', 60,
'The frequency with which the model is saved, in seconds.')
flags.DEFINE_integer('task', 0, 'Task id of the replica running the training.')
flags.DEFINE_string('master', '', 'BNS name of the TensorFlow master to use.')
flags.DEFINE_integer('startup_delay_secs', 15,
'Number of training steps between replicas startup.')
flags.DEFINE_bool('use_image_summary', True,
'Whether or not to add the image summary to the graph.')
flags.DEFINE_bool('debug', False, 'Whether to use tf dbg.')
flags.DEFINE_bool('use_legacy_vars', False,
'Use outdated tf.Variable instead of tf.get_variable.')
def _get_init_fn():
"""Returns a function run by the chief worker to warm-start the training.
Note that the init_fn is only run when initializing the model during the very
first global step.
Returns:
An init function run by the supervisor.
"""
if FLAGS.epv_pretrain_ckpt:
enc_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='timestep/encoder')
pred_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='timestep/predict')
van_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='timestep/van')
all_vars = enc_vars + van_vars + pred_vars
assignment_map = {}
for var in all_vars:
if ('Variable' not in var.op.name) and (
'back_connect_init' not in var.op.name) and (
'noise_dense' not in var.op.name):
assignment_map[var.op.name] = var.op.name
print 'Fine-tuning from %s' % FLAGS.epv_pretrain_ckpt
sys.stdout.flush()
return tf.train.init_from_checkpoint(FLAGS.epv_pretrain_ckpt,
assignment_map)
elif FLAGS.imgnet_pretrain:
vgg_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope='timestep/encoder/vgg_16')
assignment_map = {}
for var in vgg_vars:
if not var.op.name.startswith('timestep/encoder/vgg_16/fc8'):
assignment_map[var.op.name[len('timestep/encoder/'):]] = var.op.name
checkpoint_path = 'gs://unsupervised-hierarch-video/pretrained_models/vgg_16.ckpt'
print 'Fine-tuning from %s' % checkpoint_path
sys.stdout.flush()
return tf.train.init_from_checkpoint(checkpoint_path, assignment_map)
def tf_dbg_sess_wrapper(sess):
if FLAGS.debug:
print 'DEBUG'
sess = tf_debug.LocalCLIDebugWrapperSession(
sess, thread_name_filter='MainThread$')
sess.add_tensor_filter('has_inf_or_nan', tf_debug.has_inf_or_nan)
return sess
def main(unused_argv):
if FLAGS.use_tpu:
run_config = tf.contrib.tpu.RunConfig(
master=FLAGS.master,
evaluation_master=FLAGS.master,
model_dir=FLAGS.model_dir,
save_checkpoints_secs=FLAGS.save_checkpoints_secs,
save_summary_steps=FLAGS.save_summary_steps,
session_config=tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False),
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations,
num_shards=FLAGS.num_shards,
per_host_input_for_training=FLAGS.per_host_input_for_training))
estimator = tf.contrib.tpu.TPUEstimator(
model_fn=prediction_model.make_model_fn(FLAGS),
use_tpu=FLAGS.use_tpu,
config=run_config,
train_batch_size=FLAGS.batch_size,
eval_batch_size=FLAGS.batch_size,
)
else:
run_config = tf.contrib.learn.RunConfig(
master=FLAGS.master,
evaluation_master=FLAGS.master,
model_dir=FLAGS.model_dir,
save_checkpoints_secs=FLAGS.save_checkpoints_secs,
save_summary_steps=FLAGS.save_summary_steps,
)
estimator = tf.estimator.Estimator(
model_fn=prediction_model.make_model_fn(FLAGS),
config=run_config,
)
startup_delay_secs = FLAGS.task * FLAGS.startup_delay_secs
print('delay for:', startup_delay_secs)
sys.stdout.flush()
if FLAGS.run_mode == 'train':
time.sleep(startup_delay_secs)
if FLAGS.use_estimator or FLAGS.use_tpu:
print 'using estimator'
if FLAGS.imgnet_pretrain:
raise NotImplementedError
# TODO(wichersn) figure out why estimator doesn't get a good of a loss.
estimator.train(
input_fn=prediction_input.get_input_fn(
FLAGS.data_pattern, FLAGS, FLAGS.batch_size, FLAGS.use_tpu),
steps=FLAGS.train_steps)
else:
print 'using slim'
# with tf.device(tf.ReplicaDeviceSetter(FLAGS.ps_tasks)):
features, labels = prediction_input.get_input_fn(
FLAGS.data_pattern, FLAGS, FLAGS.batch_size, FLAGS.use_tpu)()
model = prediction_model.make_model_fn(FLAGS)(features, labels, None,
None)
saver = tf.train.Saver()
if FLAGS.task == 0:
# Only log summaries if it's the chief.
writer = tf.summary.FileWriter(FLAGS.event_log_dir,
tf.get_default_graph())
else:
writer = None
slim.learning.train(
model.train_op,
logdir=FLAGS.event_log_dir,
saver=saver,
init_fn=_get_init_fn(),
save_summaries_secs=FLAGS.save_checkpoints_secs / 2,
save_interval_secs=FLAGS.save_checkpoints_secs,
summary_writer=writer,
number_of_steps=FLAGS.train_steps,
session_wrapper=tf_dbg_sess_wrapper)
if FLAGS.run_mode == 'eval':
features, labels = prediction_input.get_input_fn(
FLAGS.data_pattern, FLAGS, FLAGS.batch_size, FLAGS.use_tpu)()
prediction_model.make_model_fn(FLAGS)(features, labels, None, None)
slim.evaluation.evaluation_loop(
FLAGS.master,
FLAGS.model_dir,
logdir=FLAGS.event_log_dir,
num_evals=1,
eval_op=tf.summary.merge_all(),
eval_interval_secs=FLAGS.save_checkpoints_secs)
if __name__ == '__main__':
app.run()
|
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Copyright 2016-2021 Blaise Frederick
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
import argparse
import sys
import matplotlib.cm as cm
import numpy as np
from matplotlib.pyplot import figure, savefig, setp, show
import rapidtide.filter as tide_filt
import rapidtide.fit as tide_fit
import rapidtide.io as tide_io
import rapidtide.util as tide_util
import rapidtide.workflows.parser_funcs as pf
def phase(mcv):
return np.arctan2(mcv.imag, mcv.real)
def _get_parser():
parser = argparse.ArgumentParser(
prog="showtc",
description="Plots the data in text files.",
usage="%(prog)s texfilename[:col1,col2...,coln] [textfilename]... [options]",
)
parser.add_argument(
"textfilenames",
type=str,
nargs="+",
help="One or more input files, with optional column specifications",
)
sampling = parser.add_mutually_exclusive_group()
sampling.add_argument(
"--samplerate",
dest="samplerate",
action="store",
metavar="FREQ",
type=lambda x: pf.is_float(parser, x),
help=(
"Set the sample rate of the data file to FREQ. "
"If neither samplerate or sampletime is specified, sample rate is 1.0."
),
default="auto",
)
sampling.add_argument(
"--sampletime",
dest="samplerate",
action="store",
metavar="TSTEP",
type=lambda x: pf.invert_float(parser, x),
help=(
"Set the sample rate of the data file to 1.0/TSTEP. "
"If neither samplerate or sampletime is specified, sample rate is 1.0."
),
default="auto",
)
parser.add_argument(
"--displaytype",
dest="displaymode",
action="store",
type=str,
choices=["time", "power", "phase"],
help=("Display data as time series (default), power spectrum, or phase spectrum."),
default="time",
)
parser.add_argument(
"--format",
dest="plotformat",
action="store",
type=str,
choices=["overlaid", "separate", "separatelinked"],
help=(
"Display data overlaid (default), in individually scaled windows, or in separate windows with linked scaling."
),
default="overlaid",
)
parser.add_argument(
"--waterfall",
action="store_true",
dest="dowaterfall",
help="Display multiple timecourses in a waterfall plot.",
default=False,
)
parser.add_argument(
"--voffset",
dest="voffset",
metavar="OFFSET",
type=float,
action="store",
help="Plot multiple timecourses with OFFSET between them (use negative OFFSET to set automatically).",
default=0.0,
)
parser.add_argument(
"--transpose",
action="store_true",
dest="dotranspose",
help="Swap rows and columns in the input files.",
default=False,
)
# add plot appearance options
pf.addplotopts(parser)
parser.add_argument(
"--starttime",
dest="thestarttime",
metavar="START",
type=float,
help="Start plotting at START seconds (default is the start of the data).",
default=None,
)
parser.add_argument(
"--endtime",
dest="theendtime",
metavar="END",
type=float,
help="Finish plotting at END seconds (default is the end of the data).",
default=None,
)
parser.add_argument(
"--numskip",
dest="numskip",
metavar="NUM",
type=int,
help="Skip NUM lines at the beginning of each file (to get past header lines).",
default=0,
)
parser.add_argument(
"--debug",
dest="debug",
action="store_true",
help="Output additional debugging information.",
default=False,
)
return parser
def showtc(args):
# set the sample rate
if args.samplerate == "auto":
samplerate = 1.0
args.samplerate = samplerate
else:
samplerate = args.samplerate
# set the appropriate display mode
if args.displaymode == "time":
dospectrum = False
specmode = "power"
elif args.displaymode == "power":
dospectrum = True
specmode = "power"
elif args.displaymode == "phase":
dospectrum = True
specmode = "phase"
else:
print("illegal display mode")
sys.exit()
# determine how to composite multiple plots
if args.plotformat == "overlaid":
separate = False
linky = True
elif args.plotformat == "separate":
separate = True
linky = False
elif args.plotformat == "separatelinked":
separate = True
linky = True
else:
print("illegal formatting mode")
sys.exit()
# set various cosmetic aspects of the plots
if args.colors is not None:
colornames = args.colors.split(",")
else:
colornames = []
if args.legends is not None:
legends = args.legends.split(",")
legendset = True
else:
legends = []
legendset = False
dolegend = args.dolegend
if args.linewidths is not None:
thelinewidth = []
for thestring in args.linewidths.split(","):
thelinewidth.append(float(thestring))
else:
thelinewidth = [1.0]
numlinewidths = len(thelinewidth)
if 0 <= args.legendloc <= 10:
legendloc = args.legendloc
else:
print("illegal legend location:", args.legendloc)
sys.exit()
savespec = False
detrendorder = 1
demean = False
useHamming = True
# check range
if args.theendtime is None:
args.theendtime = 1.0e38
if args.thestarttime is not None:
if args.thestarttime >= args.theendtime:
print("endtime must be greater then starttime;")
sys.exit()
# handle required args first
xvecs = []
yvecs = []
linelabels = []
samplerates = []
numvecs = 0
minlen = 100000000
shortcolnames = True
# read in all the data
for i in range(0, len(args.textfilenames)):
thisfilename, thiscolspec = tide_io.parsefilespec(args.textfilenames[i])
# check file type
(
thissamplerate,
thisstartoffset,
colnames,
invecs,
dummy,
dummy,
) = tide_io.readvectorsfromtextfile(args.textfilenames[i], debug=args.debug)
if args.debug:
print("On return from readvectorsfromtextfile:")
print(f"\targs.samplerate: {args.samplerate}")
print(f"\tthissamplerate: {thissamplerate}")
print(f"\targs.thestarttime: {args.thestarttime}")
print(f"\tthisstartoffset: {thisstartoffset}")
print("input data dimensions:", invecs.shape)
if thissamplerate is None:
thissamplerate = samplerate
if thisstartoffset is None:
# print("thisstartoffset is None")
if args.thestarttime is None:
if args.debug:
print("args.thestarttime is None")
args.thestarttime = 0.0
else:
if args.debug:
print(f"args.thestarttime is {args.thestarttime}")
thisstartoffset = args.thestarttime
else:
# print(f"thisstartoffset is {thisstartoffset}")
if args.thestarttime is None:
if args.debug:
print("args.thestarttime is None")
args.thestarttime = thisstartoffset
else:
if args.debug:
print(f"args.thestarttime is {args.thestarttime}")
thisstartoffset = args.thestarttime
if args.debug:
print("After preprocessing time variables:")
print(f"\targs.samplerate: {args.samplerate}")
print(f"\tthissamplerate: {thissamplerate}")
print(f"\targs.thestarttime: {args.thestarttime}")
print(f"\tthisstartoffset: {thisstartoffset}")
if args.debug:
print(f"file {args.textfilenames[i]} colnames: {colnames}")
if args.dotranspose:
invecs = np.transpose(invecs)
if args.debug:
print(" ", invecs.shape[0], " columns")
for j in range(0, invecs.shape[0]):
if args.debug:
print("appending vector number ", j)
if dospectrum:
if invecs.shape[1] % 2 == 1:
invec = invecs[j, :-1]
else:
invec = invecs[j, :]
if detrendorder > 0:
invec = tide_fit.detrend(invec, order=detrendorder, demean=True)
elif demean:
invec = invec - np.mean(invec)
if useHamming:
freqaxis, spectrum = tide_filt.spectrum(
tide_filt.hamming(len(invec)) * invec,
Fs=thissamplerate,
mode=specmode,
)
else:
freqaxis, spectrum = tide_filt.spectrum(
invec, Fs=thissamplerate, mode=specmode
)
if savespec:
tide_io.writenpvecs(
np.transpose(np.stack([freqaxis, spectrum], axis=1)),
"thespectrum.txt",
)
xvecs.append(freqaxis)
yvecs.append(spectrum)
else:
yvecs.append(invecs[j] * 1.0)
xvecs.append(
thisstartoffset + np.arange(0.0, len(yvecs[-1]), 1.0) / thissamplerate
)
if len(yvecs[-1]) < minlen:
minlen = len(yvecs[-1])
if not legendset:
if invecs.shape[0] > 1:
if colnames is None:
if shortcolnames:
linelabels.append("column" + str(j).zfill(2))
else:
linelabels.append(thisfilename + "_column" + str(j).zfill(2))
else:
if shortcolnames:
linelabels.append(colnames[j])
else:
linelabels.append(thisfilename + "_" + colnames[j])
else:
linelabels.append(thisfilename)
else:
linelabels.append(legends[i % len(legends)])
"""if invecs.shape[0] > 1:
linelabels.append(legends[i % len(legends)] + '_column' + str(j).zfill(2))
else:
linelabels.append(legends[i % len(legends)])"""
samplerates.append(thissamplerate + 0.0)
if args.debug:
print(
"timecourse:",
j,
", len:",
len(xvecs[-1]),
", timerange:",
xvecs[-1][0],
xvecs[-1][-1],
)
numvecs += 1
thestartpoint = tide_util.valtoindex(xvecs[0], args.thestarttime, debug=args.debug)
theendpoint = tide_util.valtoindex(xvecs[0], args.theendtime, debug=args.debug)
args.thestarttime = xvecs[0][thestartpoint]
args.theendtime = xvecs[0][theendpoint]
if args.debug:
print("full range (pts):", thestartpoint, theendpoint)
print("full range (time):", args.thestarttime, args.theendtime)
overallxmax = -1e38
overallxmin = 1e38
for thevec in xvecs:
overallxmax = np.max([np.max(thevec), overallxmax])
overallxmin = np.min([np.min(thevec), overallxmin])
xrange = (np.max([overallxmin, args.thestarttime]), np.min([overallxmax, args.theendtime]))
ymins = []
ymaxs = []
for thevec in yvecs:
ymins.append(np.min(np.asarray(thevec[thestartpoint:theendpoint], dtype="float")))
ymaxs.append(np.max(np.asarray(thevec[thestartpoint:theendpoint], dtype="float")))
overallymax = -1e38
overallymin = 1e38
for thevec in yvecs:
overallymax = np.max([np.max(thevec), overallymax])
overallymin = np.min([np.min(thevec), overallymin])
yrange = (overallymin, overallymax)
if args.debug:
print("xrange:", xrange)
print("yrange:", yrange)
if args.voffset < 0.0:
args.voffset = yrange[1] - yrange[0]
if args.debug:
print("voffset:", args.voffset)
if not separate:
for i in range(0, numvecs):
yvecs[i] += (numvecs - i - 1) * args.voffset
overallymax = -1e38
overallymin = 1e38
for thevec in yvecs:
overallymax = np.max([np.max(thevec), overallymax])
overallymin = np.min([np.min(thevec), overallymin])
yrange = (overallymin, overallymax)
if args.dowaterfall:
xstep = (xrange[1] - xrange[0]) / numvecs
ystep = yrange[1] - yrange[0]
for i in range(numvecs):
xvecs[i] = xvecs[i] + i * xstep
yvecs[i] = 10.0 * yvecs[i] / ystep + i * ystep
# now plot it out
if separate:
thexaxfontsize = 6 * args.fontscalefac
theyaxfontsize = 6 * args.fontscalefac
thexlabelfontsize = 6 * args.fontscalefac
theylabelfontsize = 6 * args.fontscalefac
thelegendfontsize = 5 * args.fontscalefac
thetitlefontsize = 6 * args.fontscalefac
thesuptitlefontsize = 10 * args.fontscalefac
else:
thexaxfontsize = 10 * args.fontscalefac
theyaxfontsize = 10 * args.fontscalefac
thexlabelfontsize = 10 * args.fontscalefac
theylabelfontsize = 10 * args.fontscalefac
thelegendfontsize = 8 * args.fontscalefac
thetitlefontsize = 10 * args.fontscalefac
thesuptitlefontsize = 10 * args.fontscalefac
if len(colornames) > 0:
colorlist = [colornames[i % len(colornames)] for i in range(numvecs)]
else:
colorlist = [cm.nipy_spectral(float(i) / numvecs) for i in range(numvecs)]
fig = figure()
if separate:
if args.thetitle is not None:
fig.suptitle(args.thetitle, fontsize=thesuptitlefontsize)
if linky:
axlist = fig.subplots(numvecs, sharex=True, sharey=True)[:]
else:
axlist = fig.subplots(numvecs, sharex=True, sharey=False)[:]
else:
ax = fig.add_subplot(1, 1, 1)
if args.thetitle is not None:
ax.set_title(args.thetitle, fontsize=thetitlefontsize)
for i in range(0, numvecs):
if separate:
ax = axlist[i]
ax.plot(
xvecs[i],
yvecs[i],
color=colorlist[i],
label=linelabels[i],
linewidth=thelinewidth[i % numlinewidths],
)
if dolegend:
ax.legend(fontsize=thelegendfontsize, loc=legendloc)
ax.set_xlim(xrange)
if linky:
# print(yrange)
ax.set_ylim(yrange)
else:
themax = np.max(yvecs[i])
themin = np.min(yvecs[i])
thediff = themax - themin
# print(themin, themax, thediff)
ax.set_ylim(top=(themax + thediff / 20.0), bottom=(themin - thediff / 20.0))
if args.showxax:
ax.tick_params(axis="x", labelsize=thexlabelfontsize, which="both")
if args.showyax:
ax.tick_params(axis="y", labelsize=theylabelfontsize, which="both")
if separate:
fig.subplots_adjust(hspace=0)
setp([a.get_xticklabels() for a in fig.axes[:-1]], visible=False)
if dospectrum:
if args.xlabel is None:
args.xlabel = "Frequency (Hz)"
if specmode == "power":
if args.ylabel is None:
args.ylabel = "Signal power"
else:
if args.ylabel is None:
args.ylabel = "Signal phase"
else:
if args.xlabel is None:
args.xlabel = "Time (s)"
if args.showxax:
ax.set_xlabel(args.xlabel, fontsize=thexlabelfontsize, fontweight="bold")
else:
ax.xaxis.set_visible(False)
if args.showyax:
ax.set_ylabel(args.ylabel, fontsize=theylabelfontsize, fontweight="bold")
else:
ax.yaxis.set_visible(False)
# fig.tight_layout()
if args.outputfile is None:
show()
else:
savefig(args.outputfile, bbox_inches="tight", dpi=args.saveres)
|
|
#
# xmpp.py
#
# Copyright (c) 2013 Horatiu Eugen Vlad
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import logging
import sys
from django.contrib.sites.models import Site
from pyxmpp2.jid import JID
from pyxmpp2.message import Message
from pyxmpp2.client import Client
from pyxmpp2.settings import XMPPSettings
from pyxmpp2.interfaces import EventHandler, event_handler, QUIT
from pyxmpp2.streamevents import AuthorizedEvent, DisconnectedEvent
def get_review_request_url(review_request):
"""
Returns site base URL
"""
current_site = Site.objects.get_current()
siteconfig = current_site.config.get()
domain_method = siteconfig.get("site_domain_method")
base_url = u"%s://%s%s" % (domain_method, current_site.domain, review_request.get_absolute_url())
if sys.version_info[0] < 3:
base_url = base_url.decode("utf-8")
return base_url
def get_users_review_request(review_request):
"""
Returns the set of active users that are interested in the review request
"""
users = set()
for u in review_request.get_participants():
users.add(u)
if review_request.submitter.is_active:
users.add(review_request.submitter)
for u in review_request.target_people.filter(is_active=True):
users.add(u)
for group in review_request.target_groups.all():
for address in group.users.filter(is_active=True):
users.add(address)
for profile in review_request.starred_by.all():
if profile.user.is_active:
users.add(profile.user)
logging.debug("XMPP notification for review request #%s will be sent to: %s",review_request.get_display_id(), users)
return users
class XmppClient(EventHandler):
"""
A client to manage the XMPP connection and dispatch messages.
"""
NAME = "Review Board XMPP Notification Client"
VERSION = 0.1
def __init__(self, host, port, timeout, from_jid, password, use_tls, tls_verify_peer):
self.host = host
self.port = port
self.timeout = timeout or 5
self.from_jid = from_jid
self.password = password
self.use_tls = use_tls
self.tls_verify_peer = tls_verify_peer
self.req_id = None
self.client = None
self.stanzas = None
@event_handler(AuthorizedEvent)
def handle_authorized(self, event):
logging.debug(u"XmppClient event handler for request #%s authorized: %s", self.req_id, event)
if self.client.stream != event.stream:
logging.debug(u"XmppClient event handler ignore event")
return
for stanza in self.stanzas:
logging.debug("XmppHandler for request #%s send message to %s", self.req_id, stanza.as_xml())
event.stream.send(stanza)
logging.debug(u"XmppHandler disconnecting stream for request #%s", self.req_id)
self.client.disconnect()
@event_handler(DisconnectedEvent)
def handle_disconnected(self, event):
logging.debug("XmppClient event handler for request #%s disconnected: %s", self.req_id, event)
if self.client.stream != event.stream:
logging.debug(u"XmppClient event handler ignore event")
return
logging.debug(u"XmppClient event handler closing stream for request #%s", self.req_id)
self.client.close_stream()
self.client = None
return QUIT
@event_handler()
def handle_all(self, event):
logging.debug(u"XmppClient event handler for request #%s: %s", self.req_id, event)
def send(self, req_id, stanzas):
self.req_id = req_id
self.stanzas = stanzas
logging.debug(u"XmppClient start sending messages for request #%s", self.req_id)
try:
settings = XMPPSettings({
u"password": self.password,
u"starttls": self.use_tls,
u"tls_verify_peer": self.tls_verify_peer,
u"server" : self.host,
u"port": self.port,
u"default_stanza_timeout": self.timeout,
})
self.client = Client(self.from_jid, [self], settings)
self.client.connect()
self.client.run( timeout = self.timeout )
except Exception, e:
logging.error("Error sending XMPP notification for request #%s: %s",
req_id,
e,
exc_info=1)
class XmppSender(object):
"""
A sender for the XMPP messages. Reports information to the server.
"""
NAME = "Review Board XMPP Notification Sender"
VERSION = 0.1
def __init__(self, extension):
self.extension = extension
def send_review_request_published(self, user, review_request, changedesc):
# If the review request is not yet public or has been discarded, don't send
# any notification. Relax the "discarded" rule when notifications are sent on closing
# review requests
if ( not review_request.public ):
return
message = u"%s %s published review request #%d: \"%s\"\n%s" % (
user.first_name, user.last_name,
review_request.get_display_id(),
review_request.summary,
get_review_request_url(review_request))
users = get_users_review_request(review_request)
# Do not send notification to the user that triggered the update
users.discard(user)
self.send_xmpp_message(users, review_request.get_display_id(), message)
def send_review_request_reopened(self, user, review_request):
# If the review request is not yet public or has been discarded, don't send
# any notification. Relax the "discarded" rule when notifications are sent on closing
# review requests
if ( not review_request.public ):
return
message = u"%s %s reopened review request #%d: \"%s\"\n%s" % (
user.first_name, user.last_name,
review_request.get_display_id(),
review_request.summary,
get_review_request_url(review_request))
users = get_users_review_request(review_request)
# Do not send notification to the user that triggered the update
users.discard(user)
self.send_xmpp_message(users, review_request.get_display_id(), message)
def send_review_request_closed(self, user, review_request):
# If the review request is not yet public or has been discarded, don't send
# any notification. Relax the "discarded" rule when notifications are sent on closing
# review requests
if ( review_request.status == 'D'):
return
message = u"%s %s closed review request #%d: \"%s\"\n%s" % (
user.first_name, user.last_name,
review_request.get_display_id(),
review_request.summary,
get_review_request_url(review_request))
users = get_users_review_request(review_request)
# Do not send notification to the user that triggered the update
users.discard(user)
self.send_xmpp_message(users, review_request.get_display_id(), message)
def send_review_published(self, user, review):
review_request = review.review_request
if not review_request.public:
return
message = u"%s %s reviewed request #%d: \"%s\"\n%s" % (
user.first_name, user.last_name,
review_request.get_display_id(),
review_request.summary,
get_review_request_url(review_request))
users = get_users_review_request(review_request)
# Do not send notification to the user that triggered the update
users.discard(user)
self.send_xmpp_message(users, review_request.get_display_id(), message)
def send_reply_published(self, user, reply):
review = reply.base_reply_to
review_request = review.review_request
if not review_request.public:
return
message = u"%s %s replied review request #%d: \"%s\"\n%s" % (
user.first_name, user.last_name,
review_request.get_display_id(),
review_request.summary,
get_review_request_url(review_request))
users = get_users_review_request(review_request)
# Do not send notification to the user that triggered the update
users.discard(user)
self.send_xmpp_message(users, review_request.get_display_id(), message)
def send_xmpp_message(self, receivers, req_id, message):
"""
Formats and sends a XMPP notification with the current domain and review request
being added to the template context. Returns the resulting message ID.
"""
logging.info("XMPP notification send message for request #%s: %s", req_id, message)
host = self.extension.settings['xmpp_host']
port = self.extension.settings['xmpp_port']
timeout = self.extension.settings['xmpp_timeout']
from_jid = self.extension.settings["xmpp_sender_jid"]
password = self.extension.settings["xmpp_sender_password"]
use_tls = self.extension.settings["xmpp_use_tls"]
tls_verify_peer = self.extension.settings["xmpp_tls_verify_peer"]
if sys.version_info[0] < 3:
from_jid = from_jid.decode("utf-8")
password = password.decode("utf-8")
message = message.decode("utf-8")
if self.extension.settings["xmpp_partychat_only"]:
receivers = set()
rooms = self.extension.settings["xmpp_partychat"].split()
if sys.version_info[0] < 3:
rooms = [room.decode("utf-8") for room in rooms]
receivers.update(rooms)
try:
from_jid = JID(from_jid)
stanzas = set()
for receiver in receivers:
if "@" in str(receiver):
receiver_jid = JID(local_or_jid = receiver)
else:
receiver_jid = JID(local_or_jid = receiver,
domain = from_jid.domain)
stanzas.add(Message(to_jid = receiver_jid, body = message,
stanza_type = "chat"))
client = XmppClient(host, port, timeout, from_jid, password, use_tls, tls_verify_peer)
client.send(req_id, stanzas)
except Exception, e:
logging.error("Error sending XMPP notification for request #%s: %s",
req_id,
e,
exc_info=1)
|
|
#!/usr/bin/env python
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Tully Foote/[email protected]
"""
Command-line interface to rosdep library
"""
from __future__ import print_function
import os
import sys
import traceback
try:
from urllib.error import URLError
from urllib.request import build_opener
from urllib.request import HTTPBasicAuthHandler
from urllib.request import HTTPHandler
from urllib.request import install_opener
from urllib.request import ProxyHandler
except ImportError:
from urllib2 import build_opener
from urllib2 import HTTPBasicAuthHandler
from urllib2 import HTTPHandler
from urllib2 import install_opener
from urllib2 import ProxyHandler
from urllib2 import URLError
import warnings
from optparse import OptionParser
import rospkg
from . import create_default_installer_context, get_default_installer
from . import __version__
from .core import RosdepInternalError, InstallFailed, UnsupportedOs, InvalidData, CachePermissionError
from .installers import RosdepInstaller
from .lookup import RosdepLookup, ResolutionError
from .rospkg_loader import DEFAULT_VIEW_KEY
from .sources_list import update_sources_list, get_sources_cache_dir,\
download_default_sources_list, SourcesListLoader,CACHE_INDEX,\
get_sources_list_dir, get_default_sources_list_file,\
DEFAULT_SOURCES_LIST_URL
from .rosdistrohelper import PreRep137Warning
from .catkin_packages import find_catkin_packages_in
from .catkin_packages import set_workspace_packages
from .catkin_packages import get_workspace_packages
class UsageError(Exception):
pass
_usage = """usage: rosdep [options] <command> <args>
Commands:
rosdep check <stacks-and-packages>...
check if the dependencies of package(s) have been met.
rosdep install <stacks-and-packages>...
generate a bash script and then execute it.
rosdep db
generate the dependency database and print it to the console.
rosdep init
initialize rosdep sources in /etc/ros/rosdep. May require sudo.
rosdep keys <stacks-and-packages>...
list the rosdep keys that the packages depend on.
rosdep resolve <rosdeps>
resolve <rosdeps> to system dependencies
rosdep update
update the local rosdep database based on the rosdep sources.
rosdep what-needs <rosdeps>...
print a list of packages that declare a rosdep on (at least
one of) <rosdeps>
rosdep where-defined <rosdeps>...
print a list of yaml files that declare a rosdep on (at least
one of) <rosdeps>
rosdep fix-permissions
Recursively change the permissions of the user's ros home directory.
May require sudo. Can be useful to fix permissions after calling
"rosdep update" with sudo accidentally.
"""
def _get_default_RosdepLookup(options):
"""
Helper routine for converting command-line options into
appropriate RosdepLookup instance.
"""
os_override = convert_os_override_option(options.os_override)
sources_loader = SourcesListLoader.create_default(sources_cache_dir=options.sources_cache_dir,
os_override=os_override,
verbose=options.verbose)
lookup = RosdepLookup.create_from_rospkg(sources_loader=sources_loader)
lookup.verbose = options.verbose
return lookup
def rosdep_main(args=None):
if args is None:
args = sys.argv[1:]
try:
exit_code = _rosdep_main(args)
if exit_code not in [0, None]:
sys.exit(exit_code)
except rospkg.ResourceNotFound as e:
print("""
ERROR: Rosdep cannot find all required resources to answer your query
%s
"""%(error_to_human_readable(e)), file=sys.stderr)
sys.exit(1)
except UsageError as e:
print(_usage, file=sys.stderr)
print("ERROR: %s"%(str(e)), file=sys.stderr)
sys.exit(os.EX_USAGE)
except RosdepInternalError as e:
print("""
ERROR: Rosdep experienced an internal error.
Please go to the rosdep page [1] and file a bug report with the message below.
[1] : http://www.ros.org/wiki/rosdep
rosdep version: %s
%s
"""%(__version__, e.message), file=sys.stderr)
sys.exit(1)
except ResolutionError as e:
print("""
ERROR: %s
%s
"""%(e.args[0], e), file=sys.stderr)
sys.exit(1)
except CachePermissionError as e:
print(str(e))
print("Try running 'sudo rosdep fix-permissions'")
sys.exit(1)
except UnsupportedOs as e:
print("Unsupported OS: %s\nSupported OSes are [%s]"%(e.args[0], ', '.join(e.args[1])), file=sys.stderr)
sys.exit(1)
except Exception as e:
print("""
ERROR: Rosdep experienced an error: %s
Please go to the rosdep page [1] and file a bug report with the stack trace below.
[1] : http://www.ros.org/wiki/rosdep
rosdep version: %s
%s
"""%(e, __version__, traceback.format_exc()), file=sys.stderr)
sys.exit(1)
def check_for_sources_list_init(sources_cache_dir):
"""
Check to see if sources list and cache are present.
*sources_cache_dir* alone is enough to pass as the user has the
option of passing in a cache dir.
If check fails, tell user how to resolve and sys exit.
"""
commands = []
filename = os.path.join(sources_cache_dir, CACHE_INDEX)
if os.path.exists(filename):
return
else:
commands.append('rosdep update')
sources_list_dir = get_sources_list_dir()
if not os.path.exists(sources_list_dir):
commands.insert(0, 'sudo rosdep init')
else:
filelist = [f for f in os.listdir(sources_list_dir) if f.endswith('.list')]
if not filelist:
commands.insert(0, 'sudo rosdep init')
if commands:
commands = '\n'.join([" %s"%c for c in commands])
print("""
ERROR: your rosdep installation has not been initialized yet. Please run:
%s
"""%(commands), file=sys.stderr)
sys.exit(1)
else:
return True
def key_list_to_dict(key_list):
"""
Convert a list of strings of the form 'foo:bar' to a dictionary.
Splits strings of the form 'foo:bar quux:quax' into separate entries.
"""
try:
key_list = [key for s in key_list for key in s.split(' ')]
return dict(map(lambda s: [t.strip() for t in s.split(':')], key_list))
except ValueError as e:
raise UsageError("Invalid 'key:value' list: '%s'" % ' '.join(key_list))
def str_to_bool(s):
"""Maps a string to bool. Supports true/false, and yes/no, and is case-insensitive"""
s = s.lower()
if s in ['yes', 'true']:
return True
elif s in ['no', 'false']:
return False
else:
raise UsageError("Cannot parse '%s' as boolean" % s)
def setup_proxy_opener():
# check for http[s]?_proxy user
for scheme in ['http', 'https']:
key = scheme + '_proxy'
if key in os.environ:
proxy = ProxyHandler({scheme: os.environ[key]})
auth = HTTPBasicAuthHandler()
opener = build_opener(proxy, auth, HTTPHandler)
install_opener(opener)
def _rosdep_main(args):
# sources cache dir is our local database.
default_sources_cache = get_sources_cache_dir()
parser = OptionParser(usage=_usage, prog='rosdep')
parser.add_option("--os", dest="os_override", default=None,
metavar="OS_NAME:OS_VERSION", help="Override OS name and version (colon-separated), e.g. ubuntu:lucid")
parser.add_option("-c", "--sources-cache-dir", dest="sources_cache_dir", default=default_sources_cache,
metavar='SOURCES_CACHE_DIR', help="Override %s"%(default_sources_cache))
parser.add_option("--verbose", "-v", dest="verbose", default=False,
action="store_true", help="verbose display")
parser.add_option("--version", dest="print_version", default=False,
action="store_true", help="print version and exit")
parser.add_option("--reinstall", dest="reinstall", default=False,
action="store_true", help="(re)install all dependencies, even if already installed")
parser.add_option("--default-yes", "-y", dest="default_yes", default=False,
action="store_true", help="Tell the package manager to default to y or fail when installing")
parser.add_option("--simulate", "-s", dest="simulate", default=False,
action="store_true", help="Simulate install")
parser.add_option("-r", dest="robust", default=False,
action="store_true", help="Continue installing despite errors.")
parser.add_option("-q", dest="quiet", default=False,
action="store_true", help="Quiet. Suppress output except for errors.")
parser.add_option("-a", "--all", dest="rosdep_all", default=False,
action="store_true", help="select all packages")
parser.add_option("-n", dest="recursive", default=True,
action="store_false", help="Do not consider implicit/recursive dependencies. Only valid with 'keys', 'check', and 'install' commands.")
parser.add_option("--ignore-packages-from-source", "--ignore-src", "-i",
dest='ignore_src', default=False, action="store_true",
help="Affects the 'check' and 'install' verbs. If "
"specified then rosdep will not install keys "
"that are found to be catkin packages anywhere in "
"the ROS_PACKAGE_PATH or in any of the directories "
"given by the --from-paths option.")
parser.add_option("--skip-keys",
dest='skip_keys', action="append", default=[],
help="Affects the 'check' and 'install' verbs. The "
"specified rosdep keys will be ignored, i.e. not "
"resolved and not installed. The option can be supplied multiple "
"times. A space separated list of rosdep keys can also "
"be passed as a string. A more permanent solution to "
"locally ignore a rosdep key is creating a local rosdep rule "
"with an empty list of packages (include it in "
"/etc/ros/rosdep/sources.list.d/ before the defaults).")
parser.add_option("--filter-for-installers",
action="append", default=[],
help="Affects the 'db' verb. If supplied, the output of the 'db' "
"command is filtered to only list packages whose installer "
"is in the provided list. The option can be supplied "
"multiple times. A space separated list of installers can also "
"be passed as a string. Example: `--filter-for-installers \"apt pip\"`")
parser.add_option("--from-paths", dest='from_paths',
default=False, action="store_true",
help="Affects the 'check', 'keys', and 'install' verbs. "
"If specified the arugments to those verbs will be "
"considered paths to be searched, acting on all "
"catkin packages found there in.")
parser.add_option("--rosdistro", dest='ros_distro', default=None,
help="Explicitly sets the ROS distro to use, overriding "
"the normal method of detecting the ROS distro "
"using the ROS_DISTRO environment variable.")
parser.add_option("--as-root", default=[], action='append',
metavar="INSTALLER_KEY:<bool>", help="Override "
"whether sudo is used for a specific installer, "
"e.g. '--as-root pip:false' or '--as-root \"pip:no homebrew:yes\"'. "
"Can be specified multiple times.")
options, args = parser.parse_args(args)
if options.print_version:
print(__version__)
sys.exit(0)
# flatten list of skipped keys and filter-for-installers
options.skip_keys = [key for s in options.skip_keys for key in s.split(' ')]
options.filter_for_installers = [inst for s in options.filter_for_installers for inst in s.split(' ')]
if len(args) == 0:
parser.error("Please enter a command")
command = args[0]
if not command in _commands:
parser.error("Unsupported command %s."%command)
args = args[1:]
if options.ros_distro:
os.environ['ROS_DISTRO'] = options.ros_distro
# Convert list of keys to dictionary
options.as_root = dict((k, str_to_bool(v)) for k, v in key_list_to_dict(options.as_root).items())
if not command in ['init', 'update', 'fix-permissions']:
check_for_sources_list_init(options.sources_cache_dir)
elif not command in ['fix-permissions']:
setup_proxy_opener()
if command in _command_rosdep_args:
return _rosdep_args_handler(command, parser, options, args)
elif command in _command_no_args:
return _no_args_handler(command, parser, options, args)
else:
return _package_args_handler(command, parser, options, args)
def _no_args_handler(command, parser, options, args):
if args:
parser.error("command [%s] takes no arguments"%(command))
else:
return command_handlers[command](options)
def _rosdep_args_handler(command, parser, options, args):
# rosdep keys as args
if options.rosdep_all:
parser.error("-a, --all is not a valid option for this command")
elif len(args) < 1:
parser.error("Please enter arguments for '%s'"%command)
else:
return command_handlers[command](args, options)
def _package_args_handler(command, parser, options, args):
if options.rosdep_all:
if args:
parser.error("cannot specify additional arguments with -a")
else:
# let the loader filter the -a. This will take out some
# packages that are catkinized (for now).
lookup = _get_default_RosdepLookup(options)
loader = lookup.get_loader()
args = loader.get_loadable_resources()
not_found = []
elif not args:
parser.error("no packages or stacks specified")
# package or stack names as args. have to convert stack names to packages.
# - overrides to enable testing
packages = []
not_found = []
if options.from_paths:
for path in args:
if options.verbose:
print("Using argument '{0}' as a path to search.".format(path))
if not os.path.exists(path):
print("given path '{0}' does not exist".format(path))
return 1
path = os.path.abspath(path)
if 'ROS_PACKAGE_PATH' not in os.environ:
os.environ['ROS_PACKAGE_PATH'] = '{0}'.format(path)
else:
os.environ['ROS_PACKAGE_PATH'] = '{0}{1}{2}'.format(
path,
os.pathsep,
os.environ['ROS_PACKAGE_PATH']
)
pkgs = find_catkin_packages_in(path, options.verbose)
packages.extend(pkgs)
# Make packages list unique
packages = list(set(packages))
else:
rospack = rospkg.RosPack()
rosstack = rospkg.RosStack()
val = rospkg.expand_to_packages(args, rospack, rosstack)
packages = val[0]
not_found = val[1]
if not_found:
raise rospkg.ResourceNotFound(not_found[0], rospack.get_ros_paths())
# Handle the --ignore-src option
if command in ['install', 'check'] and options.ignore_src:
if options.verbose:
print("Searching ROS_PACKAGE_PATH for "
"sources: " + str(os.environ['ROS_PACKAGE_PATH'].split(':')))
ws_pkgs = get_workspace_packages()
for path in os.environ['ROS_PACKAGE_PATH'].split(':'):
path = os.path.abspath(path.strip())
if os.path.exists(path):
pkgs = find_catkin_packages_in(path, options.verbose)
ws_pkgs.extend(pkgs)
elif options.verbose:
print("Skipping non-existent path " + path)
set_workspace_packages(ws_pkgs)
lookup = _get_default_RosdepLookup(options)
# Handle the --skip-keys option by pretending that they are packages in the catkin workspace
if command in ['install', 'check'] and options.skip_keys:
if options.verbose:
print("Skipping the specified rosdep keys:\n- " + '\n- '.join(options.skip_keys))
lookup.skipped_keys = options.skip_keys
if 0 and not packages: # disable, let individual handlers specify behavior
# possible with empty stacks
print("No packages in arguments, aborting")
return
return command_handlers[command](lookup, packages, options)
def convert_os_override_option(options_os_override):
"""
Convert os_override option flag to ``(os_name, os_version)`` tuple, or
``None`` if not set
:returns: ``(os_name, os_version)`` tuple if option is set, ``None`` otherwise
:raises: :exc:`UsageError` if option is not set properly
"""
if not options_os_override:
return None
val = options_os_override
if not ':' in val:
raise UsageError("OS override must be colon-separated OS_NAME:OS_VERSION, e.g. ubuntu:maverick")
os_name = val[:val.find(':')]
os_version = val[val.find(':')+1:]
return os_name, os_version
def configure_installer_context(installer_context, options):
"""
Configure the *installer_context* from *options*.
- Override the OS detector in *installer_context* if necessary.
- Set *as_root* for installers if specified.
:raises: :exc:`UsageError` If user input options incorrectly
"""
os_override = convert_os_override_option(options.os_override)
if os_override is not None:
installer_context.set_os_override(*os_override)
for k,v in options.as_root.items():
try:
installer_context.get_installer(k).as_root = v
except KeyError:
raise UsageError("Installer '%s' not defined." % k)
def command_init(options):
try:
data = download_default_sources_list()
except URLError as e:
print("ERROR: cannot download default sources list from:\n%s\nWebsite may be down."%(DEFAULT_SOURCES_LIST_URL))
return 4
# reuse path variable for error message
path = get_sources_list_dir()
old_umask = os.umask(0o022)
try:
if not os.path.exists(path):
os.makedirs(path)
path = get_default_sources_list_file()
if os.path.exists(path):
print("ERROR: default sources list file already exists:\n\t%s\nPlease delete if you wish to re-initialize"%(path))
return 1
with open(path, 'w') as f:
f.write(data)
print("Wrote %s"%(path))
print("Recommended: please run\n\n\trosdep update\n")
except IOError as e:
print("ERROR: cannot create %s:\n\t%s"%(path, e), file=sys.stderr)
return 2
except OSError as e:
print("ERROR: cannot create %s:\n\t%s\nPerhaps you need to run 'sudo rosdep init' instead"%(path, e), file=sys.stderr)
return 3
finally:
os.umask(old_umask)
def command_update(options):
error_occured = []
def update_success_handler(data_source):
print("Hit %s"%(data_source.url))
def update_error_handler(data_source, exc):
error_string = "ERROR: unable to process source [%s]:\n\t%s"%(data_source.url, exc)
print(error_string, file=sys.stderr)
error_occured.append(error_string)
sources_list_dir = get_sources_list_dir()
# disable deprecation warnings when using the command-line tool
warnings.filterwarnings("ignore", category=PreRep137Warning)
if not os.path.exists(sources_list_dir):
print("ERROR: no sources directory exists on the system meaning rosdep has not yet been initialized.\n\nPlease initialize your rosdep with\n\n\tsudo rosdep init\n")
return 1
filelist = [f for f in os.listdir(sources_list_dir) if f.endswith('.list')]
if not filelist:
print("ERROR: no data sources in %s\n\nPlease initialize your rosdep with\n\n\tsudo rosdep init\n"%sources_list_dir, file=sys.stderr)
return 1
try:
print("reading in sources list data from %s"%(sources_list_dir))
sources_cache_dir = get_sources_cache_dir()
try:
if os.geteuid() == 0:
print("Warning: running 'rosdep update' as root is not recommended.", file=sys.stderr)
print(" You should run 'sudo rosdep fix-permissions' and invoke 'rosdep update' again without sudo.", file=sys.stderr)
except AttributeError:
# nothing we wanna do under Windows
pass
update_sources_list(success_handler=update_success_handler,
error_handler=update_error_handler)
print("updated cache in %s"%(sources_cache_dir))
except InvalidData as e:
print("ERROR: invalid sources list file:\n\t%s"%(e), file=sys.stderr)
return 1
except IOError as e:
print("ERROR: error loading sources list:\n\t%s"%(e), file=sys.stderr)
return 1
if error_occured:
print ("ERROR: Not all sources were able to be updated.\n[[[")
for e in error_occured:
print (e)
print("]]]")
return 1
def command_keys(lookup, packages, options):
lookup = _get_default_RosdepLookup(options)
rosdep_keys = get_keys(lookup, packages, options.recursive)
_print_lookup_errors(lookup)
print('\n'.join(rosdep_keys))
def get_keys(lookup, packages, recursive):
rosdep_keys = []
for package_name in packages:
deps = lookup.get_rosdeps(package_name, implicit=recursive)
rosdep_keys.extend(deps)
return set(rosdep_keys)
def command_check(lookup, packages, options):
verbose = options.verbose
installer_context = create_default_installer_context(verbose=verbose)
configure_installer_context(installer_context, options)
installer = RosdepInstaller(installer_context, lookup)
uninstalled, errors = installer.get_uninstalled(packages, implicit=options.recursive, verbose=verbose)
# pretty print the result
if [v for k, v in uninstalled if v]:
print("System dependencies have not been satisified:")
for installer_key, resolved in uninstalled:
if resolved:
for r in resolved:
print("%s\t%s"%(installer_key, r))
else:
print("All system dependencies have been satisified")
if errors:
for package_name, ex in errors.items():
if isinstance(ex, rospkg.ResourceNotFound):
print("ERROR[%s]: resource not found [%s]"%(package_name, ex.args[0]), file=sys.stderr)
else:
print("ERROR[%s]: %s"%(package_name, ex), file=sys.stderr)
if uninstalled:
return 1
else:
return 0
def error_to_human_readable(error):
if isinstance(error, rospkg.ResourceNotFound):
return "Missing resource %s"%(error,)
elif isinstance(error, ResolutionError):
return "%s"%(error.args[0],)
else:
return "%s"%(error,)
def command_install(lookup, packages, options):
# map options
install_options = dict(interactive=not options.default_yes, verbose=options.verbose,
reinstall=options.reinstall,
continue_on_error=options.robust, simulate=options.simulate, quiet=options.quiet)
# setup installer
installer_context = create_default_installer_context(verbose=options.verbose)
configure_installer_context(installer_context, options)
installer = RosdepInstaller(installer_context, lookup)
if options.reinstall:
if options.verbose:
print("reinstall is true, resolving all dependencies")
try:
uninstalled, errors = lookup.resolve_all(packages, installer_context, implicit=options.recursive)
except InvalidData as e:
print("ERROR: unable to process all dependencies:\n\t%s"%(e), file=sys.stderr)
return 1
else:
uninstalled, errors = installer.get_uninstalled(packages, implicit=options.recursive, verbose=options.verbose)
if options.verbose:
print("uninstalled dependencies are: [%s]"%(', '.join([', '.join(pkg) for pkg in [v for k,v in uninstalled]])))
if errors:
err_msg = ("ERROR: the following packages/stacks could not have their "
"rosdep keys resolved\nto system dependencies")
if rospkg.distro.current_distro_codename() is None:
err_msg += (
" (ROS distro is not set. "
"Make sure `ROS_DISTRO` environment variable is set, or use "
"`--rosdistro` option to specify the distro, "
"e.g. `--rosdistro indigo`)"
)
print(err_msg + ":", file=sys.stderr)
for rosdep_key, error in errors.items():
print("%s: %s"%(rosdep_key, error_to_human_readable(error)), file=sys.stderr)
if options.robust:
print("Continuing to install resolvable dependencies...")
else:
return 1
try:
installer.install(uninstalled, **install_options)
if not options.simulate:
print("#All required rosdeps installed successfully")
return 0
except KeyError as e:
raise RosdepInternalError(e)
except InstallFailed as e:
print("ERROR: the following rosdeps failed to install", file=sys.stderr)
print('\n'.join([" %s: %s"%(k, m) for k,m in e.failures]), file=sys.stderr)
return 1
def _compute_depdb_output(lookup, packages, options):
installer_context = create_default_installer_context(verbose=options.verbose)
os_name, os_version = _detect_os(installer_context, options)
output = "Rosdep dependencies for operating system %s version %s "%(os_name, os_version)
for stack_name in stacks:
output += "\nSTACK: %s\n"%(stack_name)
view = lookup.get_stack_rosdep_view(stack_name)
for rosdep in view.keys():
definition = view.lookup(rosdep)
resolved = resolve_definition(definition, os_name, os_version)
output = output + "<<<< %s -> %s >>>>\n"%(rosdep, resolved)
return output
def command_db(options):
# exact same setup logic as command_resolve, should possibly combine
lookup = _get_default_RosdepLookup(options)
installer_context = create_default_installer_context(verbose=options.verbose)
configure_installer_context(installer_context, options)
os_name, os_version = installer_context.get_os_name_and_version()
try:
installer_keys = installer_context.get_os_installer_keys(os_name)
default_key = installer_context.get_default_os_installer_key(os_name)
except KeyError:
raise UnsupportedOs(os_name, installer_context.get_os_keys())
installer = installer_context.get_installer(default_key)
print("OS NAME: %s"%os_name)
print("OS VERSION: %s"%os_version)
errors = []
print("DB [key -> resolution]")
# db does not leverage the resource-based API
view = lookup.get_rosdep_view(DEFAULT_VIEW_KEY, verbose=options.verbose)
for rosdep_name in view.keys():
try:
d = view.lookup(rosdep_name)
inst_key, rule = d.get_rule_for_platform(os_name, os_version, installer_keys, default_key)
if options.filter_for_installers and inst_key not in options.filter_for_installers:
continue
resolved = installer.resolve(rule)
resolved_str = " ".join(resolved)
print ("%s -> %s"%(rosdep_name, resolved_str))
except ResolutionError as e:
errors.append(e)
#TODO: add command-line option for users to be able to see this.
#This is useful for platform bringup, but useless for most users
#as the rosdep db contains numerous, platform-specific keys.
if 0:
for error in errors:
print("WARNING: %s"%(error_to_human_readable(error)), file=sys.stderr)
def _print_lookup_errors(lookup):
for error in lookup.get_errors():
if isinstance(error, rospkg.ResourceNotFound):
print("WARNING: unable to locate resource %s"%(str(error.args[0])), file=sys.stderr)
else:
print("WARNING: %s"%(str(error)), file=sys.stderr)
def command_what_needs(args, options):
lookup = _get_default_RosdepLookup(options)
packages = []
for rosdep_name in args:
packages.extend(lookup.get_resources_that_need(rosdep_name))
_print_lookup_errors(lookup)
print('\n'.join(set(packages)))
def command_where_defined(args, options):
lookup = _get_default_RosdepLookup(options)
locations = []
for rosdep_name in args:
locations.extend(lookup.get_views_that_define(rosdep_name))
_print_lookup_errors(lookup)
if locations:
for location in locations:
origin = location[1]
print(origin)
else:
print("ERROR: cannot find definition(s) for [%s]"%(', '.join(args)), file=sys.stderr)
return 1
def command_resolve(args, options):
lookup = _get_default_RosdepLookup(options)
installer_context = create_default_installer_context(verbose=options.verbose)
configure_installer_context(installer_context, options)
installer, installer_keys, default_key, \
os_name, os_version = get_default_installer(installer_context=installer_context,
verbose=options.verbose)
invalid_key_errors = []
for rosdep_name in args:
if len(args) > 1:
print("#ROSDEP[%s]"%rosdep_name)
view = lookup.get_rosdep_view(DEFAULT_VIEW_KEY, verbose=options.verbose)
try:
d = view.lookup(rosdep_name)
except KeyError as e:
invalid_key_errors.append(e)
continue
rule_installer, rule = d.get_rule_for_platform(os_name, os_version, installer_keys, default_key)
installer = installer_context.get_installer(rule_installer)
resolved = installer.resolve(rule)
print("#%s"%(rule_installer))
print (" ".join([str(r) for r in resolved]))
for error in invalid_key_errors:
print("ERROR: no rosdep rule for %s"%(error), file=sys.stderr)
for error in lookup.get_errors():
print("WARNING: %s"%(error_to_human_readable(error)), file=sys.stderr)
if invalid_key_errors:
return 1 # error exit code
def command_fix_permissions(options):
import os
import pwd
import grp
stat_info = os.stat(os.path.expanduser('~'))
uid = stat_info.st_uid
gid = stat_info.st_gid
user_name = pwd.getpwuid(uid).pw_name
try:
group_name = grp.getgrgid(gid).gr_name
except KeyError as e:
group_name = gid
ros_home = rospkg.get_ros_home()
print("Recursively changing ownership of ros home directory '{0}' "
"to '{1}:{2}' (current user)...".format(ros_home, user_name, group_name))
failed = []
try:
for dirpath, dirnames, filenames in os.walk(ros_home):
try:
os.lchown(dirpath, uid, gid)
except Exception as e:
failed.append((dirpath, str(e)))
for f in filenames:
try:
path = os.path.join(dirpath, f)
os.lchown(path, uid, gid)
except Exception as e:
failed.append((path, str(e)))
except Exception:
import traceback
traceback.print_exc()
print("Failed to walk directory. Try with sudo?")
else:
if failed:
print("Failed to change ownership for:")
for p, e in failed:
print("{0} --> {1}".format(p, e))
print("Try with sudo?")
else:
print("Done.")
command_handlers = {
'db': command_db,
'check': command_check,
'keys': command_keys,
'install': command_install,
'what-needs': command_what_needs,
'where-defined': command_where_defined,
'resolve': command_resolve,
'init': command_init,
'update': command_update,
'fix-permissions': command_fix_permissions,
# backwards compat
'what_needs': command_what_needs,
'where_defined': command_where_defined,
'depdb': command_db,
}
# commands that accept rosdep names as args
_command_rosdep_args = ['what-needs', 'what_needs', 'where-defined', 'where_defined', 'resolve']
# commands that take no args
_command_no_args = ['update', 'init', 'db', 'fix-permissions']
_commands = command_handlers.keys()
|
|
import graphene
from django.db.models import QuerySet
from django.utils import six
from django.utils.decorators import classonlymethod
from django.shortcuts import _get_queryset
from graphene.utils.props import props
from graphene_django.registry import get_global_registry
from .types import FormError
from .utils import convert_form_errors
__all__ = ['ModelFormMutation', 'FormMutation']
"""
Base Form mutation
"""
class BaseFormMutation(object):
def __init__(self, **kwargs):
self.form = None
# Go through keyword arguments, and either save their values to our
# instance, or raise an error.
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
def get_form_kwargs(self, root, args, context, info):
return {
'data': self.get_data(root, args, context, info),
}
def get_data(self, root, args, context, info):
return args.get(self._meta.input_data_key, None)
def build_form(self, root, args, context, info):
return self._meta.form_class(**self.get_form_kwargs(root, args, context, info))
def _execute(self, root, args, context, info):
# first build the form
form = self.form = self.build_form(root, args, context, info)
# check its validity
if form.is_valid():
# the form is valid
# continue on the successful method
response = self.get_successful_response(root, args, context, info, form)
else:
# invalid form
# move on the unsuccessful method
response = self.get_unsuccessful_response(root, args, context, info, form)
return self.mutation(**response)
def execute(self, root, args, context, info):
return self.__class__._execute_chain(self, root, args, context, info)
def get_successful_response(self, root, args, context, info, form):
return {self._meta.output_success_key: True}
def get_unsuccessful_response(self, root, args, context, info, form):
# the error is obviously provide
return {
self._meta.output_error_key: convert_form_errors(form),
self._meta.output_success_key: False,
}
@classonlymethod
def as_mutation(cls, **initkwargs):
def mutate(mutation, root, args, context, info):
self = cls(**initkwargs)
self.mutation = mutation
self.root = root
self.args = args
self.context = context
self.info = info
return self.execute(root, args, context, info)
return type(
# keep the name of the class
cls.__name__,
# define it as final mutation
(graphene.Mutation,),
# and here comes attributes
{
# the inputs
'Input': cls._input,
# the mutate method will instance this class
'mutate': classmethod(mutate),
# provide output
**cls._output_attrs,
},
)
"""
Base class for model form mutation
"""
class BaseModelFormMutation(BaseFormMutation):
def get_form_kwargs(self, root, args, context, info):
# get original kwargs
kwargs = super(BaseModelFormMutation, self).get_form_kwargs(root, args, context, info)
# add the instance
kwargs['instance'] = self.get_instance(root, args, context, info)
return kwargs
def get_instance(self, root, args, context, info):
if not self._meta.filter:
# we don't need to get an instance
return None
# get the queryset first
queryset = self._meta.queryset
# it might be a function to call
if callable(queryset):
# call it to get our queryset
queryset = queryset(root, args, context, info)
# ensure we've a queryset
assert isinstance(queryset, QuerySet)
# we may now get the object
return queryset.get(**dict(self._meta.filter(root, args, context, info)))
def get_successful_response(self, root, args, context, info, form):
# get the original response
response = super(BaseModelFormMutation, self).get_successful_response(root, args, context, info)
# save the form
instance = form.save(commit=self._meta.commit)
if self._meta.output_instance_key:
# we must provide the instance
response[self._meta.output_instance_key] = instance
return response
"""
Options/settings for form mutation
"""
class Options(object):
def __init__(self, options=None):
# the model form class
self.form_class = getattr(options, 'form', None)
# the input keys
self.input_data_key = getattr(options, 'input_data_key', 'data')
# the output keys
self.output_success_key = getattr(options, 'output_success_key', 'success')
self.output_error_key = getattr(options, 'output_error_key', 'errors')
# the registry
self.registry = getattr(options, 'registry', get_global_registry())
# middlewares
self.middlewares = getattr(options, 'middlewares', [])
"""
Options/settings for model form mutation
"""
class ModelOptions(Options):
def __init__(self, options=None):
super(ModelOptions, self).__init__(options)
# should we commit
self.commit = getattr(options, 'commit', True)
# the output keys
self.output_instance_key = getattr(options, 'output_instance_key', None)
# we might have a queryset to follow
self.queryset = getattr(options, 'queryset', None)
self.filter = getattr(options, 'filter', None)
# from the form get the model
self.model = self.form_class._meta.model
if self.queryset is None:
# get the queryset from the model
self.queryset = _get_queryset(self.model)
"""
Class to build dynamic getters able to handle multiple cases
"""
class ArgumentGetter:
def __init__(self, filter):
if isinstance(filter, list) or isinstance(filter, tuple):
# convert it into a dict where the key must match
self.filter = {v: ArgumentGetter.build_deep_getter(v) for v in filter}
elif isinstance(filter, dict):
self.filter = {key: ArgumentGetter.build_deep_getter(value) for key, value in filter.items()}
else:
# we don't know how to handle it
raise TypeError('invalid filter args')
@staticmethod
def build_deep_getter(keys):
if isinstance(keys, str):
# convert a single string into an array
keys = [keys]
# get the current key to get
current_key = keys[0]
# and copy the next ones
next_keys = keys[1:]
if next_keys:
# we must go deeper
next_step = ArgumentGetter.build_deep_getter(next_keys)
else:
next_step = None
def getter(root, args, context, info):
# get the value for the current key
value = args.get(current_key, None)
if value is None or not next_step:
# we cannot go further
return value
return next_step(root, value, context, info)
return getter
def __call__(self, root, args, context, info):
for key, getter in self.filter.items():
yield (key, getter(root, args, context, info))
"""
Meta class for form mutation
"""
class FormMutationMeta(type):
options_class = Options
def __new__(mcs, name, bases, attrs):
# build the new class
new_class = super(FormMutationMeta, mcs).__new__(mcs, name, bases, attrs)
if bases == (BaseFormMutation,):
return new_class
input_class = attrs.pop('Input', None)
output_class = attrs.pop('Output', None)
# get the meta class
opts = new_class._meta = mcs.options_class(getattr(new_class, 'Meta', None))
# build the input class
new_class._input = type('Input', (object,), props(input_class) if input_class else {})
# build the output attributes
new_class._output_attrs = {
# the common fields
opts.output_success_key: graphene.Boolean(required=True),
opts.output_error_key: graphene.List(FormError),
# the custom ones
**(props(output_class) if output_class else {}),
}
# build the execute chain
execute_chain = lambda self, root, args, context, info: self._execute(root, args, context, info)
for mw in reversed(opts.middlewares):
execute_chain = mw(execute_chain)
new_class._execute_chain = execute_chain
return new_class
"""
Meta class for model form mutation
"""
class ModelFormMutationMeta(FormMutationMeta):
options_class = ModelOptions
def __new__(mcs, name, bases, attrs):
if bases == (BaseModelFormMutation,):
return super(FormMutationMeta, mcs).__new__(mcs, name, bases, attrs)
# build the new class
new_class = super(ModelFormMutationMeta, mcs).__new__(mcs, name, bases, attrs)
# get options
opts = new_class._meta
if opts.filter is not None and not callable(opts.filter):
# handle it ourselves
opts.filter = ArgumentGetter(opts.filter)
# get output attributes
output_attrs = new_class._output_attrs
if opts.output_instance_key is not None:
if opts.output_instance_key not in output_attrs:
# get the output type from the registry
output_type = opts.registry.get_type_for_model(opts.model)
# we have to handle it ourselves
output_attrs[opts.output_instance_key] = graphene.Field(output_type)
return new_class
"""
Usable class for form mutation
"""
class FormMutation(six.with_metaclass(FormMutationMeta, BaseFormMutation)):
pass
"""
Usable class for model form mutation
"""
class ModelFormMutation(six.with_metaclass(ModelFormMutationMeta, BaseModelFormMutation)):
pass
|
|
# -*- coding: utf-8 -*-
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from oslo.config import cfg
import routes
import six
import webob
import glance.api.common
import glance.common.config
import glance.context
from glance.db.sqlalchemy import api as db_api
from glance.db.sqlalchemy import models as db_models
from glance.openstack.common import jsonutils
from glance.openstack.common import timeutils
from glance.registry.api import v2 as rserver
from glance.tests.unit import base
from glance.tests import utils as test_utils
CONF = cfg.CONF
_gen_uuid = lambda: str(uuid.uuid4())
UUID1 = _gen_uuid()
UUID2 = _gen_uuid()
class TestRegistryRPC(base.IsolatedUnitTest):
def setUp(self):
super(TestRegistryRPC, self).setUp()
self.mapper = routes.Mapper()
self.api = test_utils.FakeAuthMiddleware(rserver.API(self.mapper),
is_admin=True)
uuid1_time = timeutils.utcnow()
uuid2_time = uuid1_time + datetime.timedelta(seconds=5)
self.FIXTURES = [
{'id': UUID1,
'name': 'fake image #1',
'status': 'active',
'disk_format': 'ami',
'container_format': 'ami',
'is_public': False,
'created_at': uuid1_time,
'updated_at': uuid1_time,
'deleted_at': None,
'deleted': False,
'checksum': None,
'min_disk': 0,
'min_ram': 0,
'size': 13,
'locations': [{'url': "file:///%s/%s" % (self.test_dir, UUID1),
'metadata': {}, 'status': 'active'}],
'properties': {'type': 'kernel'}},
{'id': UUID2,
'name': 'fake image #2',
'status': 'active',
'disk_format': 'vhd',
'container_format': 'ovf',
'is_public': True,
'created_at': uuid2_time,
'updated_at': uuid2_time,
'deleted_at': None,
'deleted': False,
'checksum': None,
'min_disk': 5,
'min_ram': 256,
'size': 19,
'locations': [{'url': "file:///%s/%s" % (self.test_dir, UUID2),
'metadata': {}, 'status': 'active'}],
'properties': {}}]
self.context = glance.context.RequestContext(is_admin=True)
db_api.get_engine()
self.destroy_fixtures()
self.create_fixtures()
def tearDown(self):
"""Clear the test environment"""
super(TestRegistryRPC, self).tearDown()
self.destroy_fixtures()
def create_fixtures(self):
for fixture in self.FIXTURES:
db_api.image_create(self.context, fixture)
# We write a fake image file to the filesystem
with open("%s/%s" % (self.test_dir, fixture['id']), 'wb') as image:
image.write("chunk00000remainder")
image.flush()
def destroy_fixtures(self):
# Easiest to just drop the models and re-create them...
db_models.unregister_models(db_api.get_engine())
db_models.register_models(db_api.get_engine())
def test_show(self):
"""
Tests that registry API endpoint
returns the expected image
"""
fixture = {'id': UUID2,
'name': 'fake image #2',
'size': 19,
'min_ram': 256,
'min_disk': 5,
'checksum': None}
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get',
'kwargs': {'image_id': UUID2},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)[0]
image = res_dict
for k, v in six.iteritems(fixture):
self.assertEqual(v, image[k])
def test_show_unknown(self):
"""
Tests that the registry API endpoint
returns a 404 for an unknown image id
"""
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get',
'kwargs': {'image_id': _gen_uuid()},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(res_dict["_error"]["cls"],
'glance.common.exception.NotFound')
def test_get_index(self):
"""
Tests that the image_get_all command returns list of
images
"""
fixture = {'id': UUID2,
'name': 'fake image #2',
'size': 19,
'checksum': None}
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': fixture},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
images = jsonutils.loads(res.body)[0]
self.assertEqual(len(images), 1)
for k, v in six.iteritems(fixture):
self.assertEqual(v, images[0][k])
def test_get_index_marker(self):
"""
Tests that the registry API returns list of
public images that conforms to a marker query param
"""
uuid5_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid4_time = uuid5_time + datetime.timedelta(seconds=5)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 19,
'checksum': None,
'created_at': uuid3_time,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None,
'created_at': uuid4_time,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
UUID5 = _gen_uuid()
extra_fixture = {'id': UUID5,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None,
'created_at': uuid5_time,
'updated_at': uuid5_time}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'marker': UUID4, "is_public": True},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
images = jsonutils.loads(res.body)[0]
# should be sorted by created_at desc, id desc
# page should start after marker 4
self.assertEqual(len(images), 2)
self.assertEqual(images[0]['id'], UUID5)
self.assertEqual(images[1]['id'], UUID2)
def test_get_index_marker_and_name_asc(self):
"""Test marker and null name ascending
Tests that the registry API returns 200
when a marker and a null name are combined
ascending order
"""
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': None,
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'marker': UUID3, 'sort_key': 'name',
'sort_dir': 'asc'},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
images = jsonutils.loads(res.body)[0]
self.assertEqual(len(images), 2)
def test_get_index_marker_and_name_desc(self):
"""Test marker and null name descending
Tests that the registry API returns 200
when a marker and a null name are combined
descending order
"""
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': None,
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'marker': UUID3, 'sort_key': 'name',
'sort_dir': 'desc'},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
images = jsonutils.loads(res.body)[0]
self.assertEqual(len(images), 0)
def test_get_index_marker_and_disk_format_asc(self):
"""Test marker and null disk format ascending
Tests that the registry API returns 200
when a marker and a null disk_format are combined
ascending order
"""
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': None,
'container_format': 'ovf',
'name': 'Fake image',
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'marker': UUID3, 'sort_key': 'disk_format',
'sort_dir': 'asc'},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
images = jsonutils.loads(res.body)[0]
self.assertEqual(len(images), 2)
def test_get_index_marker_and_disk_format_desc(self):
"""Test marker and null disk format descending
Tests that the registry API returns 200
when a marker and a null disk_format are combined
descending order
"""
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': None,
'container_format': 'ovf',
'name': 'Fake image',
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'marker': UUID3, 'sort_key': 'disk_format',
'sort_dir': 'desc'},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
images = jsonutils.loads(res.body)[0]
self.assertEqual(len(images), 0)
def test_get_index_marker_and_container_format_asc(self):
"""Test marker and null container format ascending
Tests that the registry API returns 200
when a marker and a null container_format are combined
ascending order
"""
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': None,
'name': 'Fake image',
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'marker': UUID3, 'sort_key': 'container_format',
'sort_dir': 'asc'},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
images = jsonutils.loads(res.body)[0]
self.assertEqual(len(images), 2)
def test_get_index_marker_and_container_format_desc(self):
"""Test marker and null container format descending
Tests that the registry API returns 200
when a marker and a null container_format are combined
descending order
"""
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': None,
'name': 'Fake image',
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'marker': UUID3, 'sort_key': 'container_format',
'sort_dir': 'desc'},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
images = jsonutils.loads(res.body)[0]
self.assertEqual(len(images), 0)
def test_get_index_unknown_marker(self):
"""
Tests that the registry API returns a NotFound
when an unknown marker is provided
"""
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'marker': _gen_uuid()},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
result = jsonutils.loads(res.body)[0]
self.assertIn("_error", result)
self.assertIn("NotFound", result["_error"]["cls"])
def test_get_index_limit(self):
"""
Tests that the registry API returns list of
public images that conforms to a limit query param
"""
uuid3_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid4_time = uuid3_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 19,
'checksum': None,
'created_at': uuid3_time,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None,
'created_at': uuid4_time,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'limit': 1},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(res.status_int, 200)
images = res_dict
self.assertEqual(len(images), 1)
# expect list to be sorted by created_at desc
self.assertEqual(images[0]['id'], UUID4)
def test_get_index_limit_marker(self):
"""
Tests that the registry API returns list of
public images that conforms to limit and marker query params
"""
uuid3_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid4_time = uuid3_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 19,
'checksum': None,
'created_at': uuid3_time,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
extra_fixture = {'id': _gen_uuid(),
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None,
'created_at': uuid4_time,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'marker': UUID3, 'limit': 1},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(res.status_int, 200)
images = res_dict
self.assertEqual(len(images), 1)
# expect list to be sorted by created_at desc
self.assertEqual(images[0]['id'], UUID2)
def test_get_index_filter_name(self):
"""
Tests that the registry API returns list of
public images that have a specific name. This is really a sanity
check, filtering is tested more in-depth using /images/detail
"""
extra_fixture = {'id': _gen_uuid(),
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
extra_fixture = {'id': _gen_uuid(),
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'name': 'new name! #123'}},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(res.status_int, 200)
images = res_dict
self.assertEqual(len(images), 2)
for image in images:
self.assertEqual('new name! #123', image['name'])
def test_get_index_filter_on_user_defined_properties(self):
"""
Tests that the registry API returns list of
public images that have a specific user-defined properties.
"""
properties = {'distro': 'ubuntu', 'arch': 'i386', 'type': 'kernel'}
extra_id = _gen_uuid()
extra_fixture = {'id': extra_id,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'image-extra-1',
'size': 19, 'properties': properties,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
# testing with a common property.
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'type': 'kernel'}},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
images = jsonutils.loads(res.body)[0]
self.assertEqual(len(images), 2)
self.assertEqual(images[0]['id'], extra_id)
self.assertEqual(images[1]['id'], UUID1)
# testing with a non-existent value for a common property.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'type': 'random'}},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
images = jsonutils.loads(res.body)[0]
self.assertEqual(len(images), 0)
# testing with a non-existent value for a common property.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'type': 'random'}},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
images = jsonutils.loads(res.body)[0]
self.assertEqual(len(images), 0)
# testing with a non-existent property.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'poo': 'random'}},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
images = jsonutils.loads(res.body)[0]
self.assertEqual(len(images), 0)
# testing with multiple existing properties.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'type': 'kernel', 'distro': 'ubuntu'}},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
images = jsonutils.loads(res.body)[0]
self.assertEqual(len(images), 1)
self.assertEqual(images[0]['id'], extra_id)
# testing with multiple existing properties but non-existent values.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'type': 'random', 'distro': 'random'}},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
images = jsonutils.loads(res.body)[0]
self.assertEqual(len(images), 0)
# testing with multiple non-existing properties.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'typo': 'random', 'poo': 'random'}},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
images = jsonutils.loads(res.body)[0]
self.assertEqual(len(images), 0)
# testing with one existing property and the other non-existing.
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'type': 'kernel', 'poo': 'random'}},
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
images = jsonutils.loads(res.body)[0]
self.assertEqual(len(images), 0)
def test_get_index_sort_default_created_at_desc(self):
"""
Tests that the registry API returns list of
public images that conforms to a default sort key/dir
"""
uuid5_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid4_time = uuid5_time + datetime.timedelta(seconds=5)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 19,
'checksum': None,
'created_at': uuid3_time,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None,
'created_at': uuid4_time,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
UUID5 = _gen_uuid()
extra_fixture = {'id': UUID5,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None,
'created_at': uuid5_time,
'updated_at': uuid5_time}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(res.status_int, 200)
images = res_dict
# (flaper87)registry's v1 forced is_public to True
# when no value was specified. This is not
# the default behaviour anymore.
self.assertEqual(len(images), 5)
self.assertEqual(images[0]['id'], UUID3)
self.assertEqual(images[1]['id'], UUID4)
self.assertEqual(images[2]['id'], UUID5)
self.assertEqual(images[3]['id'], UUID2)
self.assertEqual(images[4]['id'], UUID1)
def test_get_index_sort_name_asc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by name in
ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'asdf',
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'xyz',
'size': 20,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
UUID5 = _gen_uuid()
extra_fixture = {'id': UUID5,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': None,
'size': 20,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': 'name', 'sort_dir': 'asc'}
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
self.assertEqual(len(images), 5)
self.assertEqual(images[0]['id'], UUID5)
self.assertEqual(images[1]['id'], UUID3)
self.assertEqual(images[2]['id'], UUID1)
self.assertEqual(images[3]['id'], UUID2)
self.assertEqual(images[4]['id'], UUID4)
def test_get_index_sort_status_desc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by status in
descending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'queued',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'asdf',
'size': 19,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'xyz',
'size': 20,
'checksum': None,
'created_at': uuid4_time,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': 'status', 'sort_dir': 'asc'}
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
self.assertEqual(len(images), 4)
self.assertEqual(images[0]['id'], UUID1)
self.assertEqual(images[1]['id'], UUID2)
self.assertEqual(images[2]['id'], UUID4)
self.assertEqual(images[3]['id'], UUID3)
def test_get_index_sort_disk_format_asc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by disk_format in
ascending order.
"""
uuid3_time = timeutils.utcnow() + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'ami',
'container_format': 'ami',
'name': 'asdf',
'size': 19,
'checksum': None,
'created_at': uuid3_time,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'is_public': True,
'disk_format': 'vdi',
'container_format': 'ovf',
'name': 'xyz',
'size': 20,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': 'disk_format', 'sort_dir': 'asc'}
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
self.assertEqual(len(images), 4)
self.assertEqual(images[0]['id'], UUID1)
self.assertEqual(images[1]['id'], UUID3)
self.assertEqual(images[2]['id'], UUID4)
self.assertEqual(images[3]['id'], UUID2)
def test_get_index_sort_container_format_desc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by container_format in
descending order.
"""
uuid3_time = timeutils.utcnow() + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'ami',
'container_format': 'ami',
'name': 'asdf',
'size': 19,
'checksum': None,
'created_at': uuid3_time,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'is_public': True,
'disk_format': 'iso',
'container_format': 'bare',
'name': 'xyz',
'size': 20,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': 'container_format',
'sort_dir': 'desc'}
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
self.assertEqual(len(images), 4)
self.assertEqual(images[0]['id'], UUID2)
self.assertEqual(images[1]['id'], UUID4)
self.assertEqual(images[2]['id'], UUID3)
self.assertEqual(images[3]['id'], UUID1)
def test_get_index_sort_size_asc(self):
"""
Tests that the registry API returns list of
public images sorted by size in ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'ami',
'container_format': 'ami',
'name': 'asdf',
'size': 100,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'is_public': True,
'disk_format': 'iso',
'container_format': 'bare',
'name': 'xyz',
'size': 2,
'checksum': None}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': 'size',
'sort_dir': 'asc'}
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
self.assertEqual(len(images), 4)
self.assertEqual(images[0]['id'], UUID4)
self.assertEqual(images[1]['id'], UUID1)
self.assertEqual(images[2]['id'], UUID2)
self.assertEqual(images[3]['id'], UUID3)
def test_get_index_sort_created_at_asc(self):
"""
Tests that the registry API returns list of
public images sorted by created_at in ascending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 19,
'checksum': None,
'created_at': uuid3_time,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None,
'created_at': uuid4_time,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': 'created_at',
'sort_dir': 'asc'}
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
self.assertEqual(len(images), 4)
self.assertEqual(images[0]['id'], UUID1)
self.assertEqual(images[1]['id'], UUID2)
self.assertEqual(images[2]['id'], UUID4)
self.assertEqual(images[3]['id'], UUID3)
def test_get_index_sort_updated_at_desc(self):
"""
Tests that the registry API returns list of
public images sorted by updated_at in descending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = {'id': UUID3,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 19,
'checksum': None,
'created_at': None,
'updated_at': uuid3_time}
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = {'id': UUID4,
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf',
'name': 'new name! #123',
'size': 20,
'checksum': None,
'created_at': None,
'updated_at': uuid4_time}
db_api.image_create(self.context, extra_fixture)
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'sort_key': 'updated_at',
'sort_dir': 'desc'}
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)[0]
images = res_dict
self.assertEqual(len(images), 4)
self.assertEqual(images[0]['id'], UUID3)
self.assertEqual(images[1]['id'], UUID4)
self.assertEqual(images[2]['id'], UUID2)
self.assertEqual(images[3]['id'], UUID1)
def test_create_image(self):
"""Tests that the registry API creates the image"""
fixture = {'name': 'fake public image',
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf'}
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_create',
'kwargs': {'values': fixture}
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)[0]
for k, v in six.iteritems(fixture):
self.assertEqual(v, res_dict[k])
# Test status was updated properly
self.assertEqual('active', res_dict['status'])
def test_create_image_with_min_disk(self):
"""Tests that the registry API creates the image"""
fixture = {'name': 'fake public image',
'is_public': True,
'status': 'active',
'min_disk': 5,
'disk_format': 'vhd',
'container_format': 'ovf'}
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_create',
'kwargs': {'values': fixture}
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(5, res_dict['min_disk'])
def test_create_image_with_min_ram(self):
"""Tests that the registry API creates the image"""
fixture = {'name': 'fake public image',
'is_public': True,
'status': 'active',
'min_ram': 256,
'disk_format': 'vhd',
'container_format': 'ovf'}
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_create',
'kwargs': {'values': fixture}
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(256, res_dict['min_ram'])
def test_create_image_with_min_ram_default(self):
"""Tests that the registry API creates the image"""
fixture = {'name': 'fake public image',
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf'}
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_create',
'kwargs': {'values': fixture}
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(0, res_dict['min_ram'])
def test_create_image_with_min_disk_default(self):
"""Tests that the registry API creates the image"""
fixture = {'name': 'fake public image',
'status': 'active',
'is_public': True,
'disk_format': 'vhd',
'container_format': 'ovf'}
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_create',
'kwargs': {'values': fixture}
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(0, res_dict['min_disk'])
def test_update_image(self):
"""Tests that the registry API updates the image"""
fixture = {'name': 'fake public image #2',
'min_disk': 5,
'min_ram': 256,
'disk_format': 'raw'}
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_update',
'kwargs': {'values': fixture,
'image_id': UUID2}
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
res_dict = jsonutils.loads(res.body)[0]
self.assertNotEqual(res_dict['created_at'],
res_dict['updated_at'])
for k, v in six.iteritems(fixture):
self.assertEqual(v, res_dict[k])
def test_delete_image(self):
"""Tests that the registry API deletes the image"""
# Grab the original number of images
req = webob.Request.blank('/rpc')
req.method = "POST"
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'deleted': False}}
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(res.status_int, 200)
orig_num_images = len(res_dict)
# Delete image #2
cmd = [{
'command': 'image_destroy',
'kwargs': {'image_id': UUID2}
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
# Verify one less image
cmd = [{
'command': 'image_get_all',
'kwargs': {'filters': {'deleted': False}}
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
res_dict = jsonutils.loads(res.body)[0]
self.assertEqual(res.status_int, 200)
new_num_images = len(res_dict)
self.assertEqual(new_num_images, orig_num_images - 1)
def test_delete_image_response(self):
"""Tests that the registry API delete returns the image metadata"""
image = self.FIXTURES[0]
req = webob.Request.blank('/rpc')
req.method = 'POST'
cmd = [{
'command': 'image_destroy',
'kwargs': {'image_id': image['id']}
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
deleted_image = jsonutils.loads(res.body)[0]
self.assertEqual(image['id'], deleted_image['id'])
self.assertTrue(deleted_image['deleted'])
self.assertTrue(deleted_image['deleted_at'])
def test_get_image_members(self):
"""
Tests members listing for existing images
"""
req = webob.Request.blank('/rpc')
req.method = 'POST'
cmd = [{
'command': 'image_member_find',
'kwargs': {'image_id': UUID2}
}]
req.body = jsonutils.dumps(cmd)
res = req.get_response(self.api)
self.assertEqual(res.status_int, 200)
memb_list = jsonutils.loads(res.body)[0]
self.assertEqual(len(memb_list), 0)
|
|
import struct
import json
import logging
import binascii
import math
from collections import OrderedDict
from bitcoin.core import key
from functools import reduce
from itertools import groupby
from bitstring import BitArray, BitStream, ConstBitStream, ReadError
logger = logging.getLogger(__name__)
from counterpartylib.lib import (config, util, exceptions, util, message_type, address)
## encoding functions
def _encode_constructBaseLUT(snds):
# t is a tuple of the form (asset, addr, amnt [, memo, is_hex])
return sorted(list(set([t[1] for t in snds]))) # Sorted to make list determinist
def _encode_constructBaseAssets(sends):
# t is a tuple of the form (asset, addr, amnt [, memo, is_hex])
return sorted(list(set([t[0] for t in sends]))) # Sorted to make list determinist
def _encode_constructLUT(sends):
baseLUT = _encode_constructBaseLUT(sends)
# What's this? It calculates the minimal number of bits needed to represent an item index inside the baseLUT
lutNbits = math.ceil(math.log2(len(baseLUT)))
return {
"nbits": lutNbits,
"addrs": baseLUT
}
def _encode_compressLUT(lut):
return b''.join([struct.pack('>H', len(lut['addrs']))] +
[
address.pack(addr)
for addr in lut['addrs']
])
def _encode_memo(memo=None, is_hex=False):
'''Tightly pack a memo as a Bit array'''
if memo is not None:
# signal a 1 bit for existence of the memo
barr = BitArray('0b1')
if is_hex:
# signal a 1 bit for hex encoded memos
barr.append('0b1')
if type(memo) is str: # append the string as hex-string
barr.append('uint:6=%i' % (len(memo) >> 1))
memo = '0x%s' % memo
else:
barr.append('uint:6=%i' % len(memo))
barr.append(memo)
else:
# signal a 0 bit for a string encoded memo
encoded_memo = memo.encode('utf-8')
barr.append('0b0')
barr.append('uint:6=%i' % len(encoded_memo))
barr.append(BitArray(encoded_memo))
return barr
else:
# if the memo is None, return just a 0 bit
return BitArray('0b0')
def _safe_tuple_index(t, i):
'''Get an element from a tuple, returning None if it's out of bounds'''
if len(t) <= i:
return None
else:
return t[i]
def _encode_constructSendList(send_asset, lut, sends):
# t is a tuple of the form (asset, addr, amnt, memo, is_hex)
# if there's no memo specified, memo and is_hex are None
return [
(lut['addrs'].index(t[1]), t[2], _safe_tuple_index(t, 3), _safe_tuple_index(t, 4))
for t in sends
if t[0] == send_asset
]
def _solve_asset(db, assetName, block_index):
asset = util.resolve_subasset_longname(db, assetName)
return util.get_asset_id(db, asset, block_index)
def _encode_compressSendList(db, nbits, send, block_index):
r = BitArray()
r.append('uintbe:64=%i' % _solve_asset(db, send['assetName'], block_index))
r.append('uint:%i=%i' % (nbits, len(send['sendList'])-1))
for sendItem in send['sendList']:
idx = sendItem[0]
amnt = sendItem[1]
r.append('uint:%i=%i' % (nbits, idx))
r.append('uintbe:64=%i' % amnt)
try:
memoStr = _encode_memo(memo=sendItem[2], is_hex=sendItem[3])
except:
memoStr = BitArray('0b0')
r.append(memoStr)
return r
def _encode_constructSends(sends):
lut = _encode_constructLUT(sends)
assets = _encode_constructBaseAssets(sends)
sendLists = [
{
"assetName": asset,
"sendList": _encode_constructSendList(asset, lut, sends)
}
for asset in assets
]
return {
"lut": lut,
"sendLists": sendLists
}
def _encode_compressSends(db, mpmaSend, block_index, memo=None, memo_is_hex=False):
compressedLUT = _encode_compressLUT(mpmaSend['lut'])
memo_arr = _encode_memo(memo, memo_is_hex).bin
isends = '0b' + memo_arr + ''.join([
''.join(['1', _encode_compressSendList(db, mpmaSend['lut']['nbits'], sendList, block_index).bin])
for sendList in mpmaSend['sendLists']
])
bstr = ''.join([isends, '0'])
pad = '0' * ((8 - (len(bstr) - 2)) % 8) # That -2 is because the prefix 0b is there
barr = BitArray(bstr + pad)
return b''.join([
compressedLUT,
barr.bytes
])
def _encode_mpmaSend(db, sends, block_index, memo=None, memo_is_hex=False):
mpma = _encode_constructSends(sends)
send = _encode_compressSends(db, mpma, block_index, memo=memo, memo_is_hex=memo_is_hex)
return send
## decoding functions
def _decode_decodeLUT(data):
(numAddresses,) = struct.unpack('>H', data[0:2])
if numAddresses == 0:
raise exceptions.DecodeError('address list can\'t be empty')
p = 2
addressList = []
bytesPerAddress = 21
for _ in range(0, numAddresses):
addr_raw = data[p:p+bytesPerAddress]
addressList.append(address.unpack(addr_raw))
p += bytesPerAddress
lutNbits = math.ceil(math.log2(numAddresses))
return addressList, lutNbits, data[p:]
def _decode_decodeSendList(stream, nbits, lut, block_index):
asset_id = stream.read('uintbe:64')
if nbits > 0:
numRecipients = stream.read('uint:%i' % nbits)
rangeLimit = numRecipients + 1
else:
numRecipients = 1
rangeLimit = numRecipients
sendList = []
asset = util.generate_asset_name(asset_id, block_index)
for _ in range(0, rangeLimit):
if nbits > 0:
idx = stream.read('uint:%i' % nbits)
else:
idx = 0
addr = lut[idx]
amount = stream.read('uintbe:64')
memo, is_hex = _decode_memo(stream)
if memo is not None:
sendList.append((addr, amount, memo, is_hex))
else:
sendList.append((addr, amount))
return asset, sendList
def _decode_decodeSends(stream, nbits, lut, block_index):
#stream = ConstBitStream(data)
sends = OrderedDict()
while stream.read('bool'):
asset, sendList = _decode_decodeSendList(stream, nbits, lut, block_index)
sends[asset] = sendList
return sends
def _decode_memo(stream):
if stream.read('bool'):
is_hex = stream.read('bool')
mlen = stream.read('uint:6')
data = stream.read('bytes:%i' % mlen)
return data, is_hex
else:
return None, None
def _decode_mpmaSendDecode(data, block_index):
lut, nbits, remain = _decode_decodeLUT(data)
stream = ConstBitStream(remain)
memo, is_hex = _decode_memo(stream)
sends = _decode_decodeSends(stream, nbits, lut, block_index)
if memo is not None:
for asset in sends:
sendList = sends[asset]
for idx, send in enumerate(sendList):
if len(send) == 2:
sendList[idx] = (send[0], send[1], memo, is_hex)
return sends
|
|
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The MLPerf reference implementation of BLEU."""
import collections
import math
import re
import sys
import time
import unicodedata
import numpy as np
import six
import REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.compat as tf
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import metrics
def is_unicode(s):
if isinstance(s, str):
return True
return False
def to_unicode(s, ignore_errors=False):
if is_unicode(s):
return s
error_mode = "ignore" if ignore_errors else "strict"
return s.decode("utf-8", errors=error_mode)
def _get_ngrams(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this methods.
Returns:
The Counter containing all n-grams up to max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
def compute_bleu(reference_corpus,
translation_corpus,
max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each reference
should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation should
be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams(references, max_order)
translation_ngram_counts = _get_ngrams(translations, max_order)
overlap = dict((ngram, min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) -
1] += translation_ngram_counts[ngram]
precisions = [0] * max_order
smooth = 1.0
for i in range(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = matches_by_order[i] / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = matches_by_order[i] / possible_matches_by_order[i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
if not reference_length:
bp = 1.0
else:
ratio = translation_length / reference_length
if ratio <= 0.0:
bp = 0.0
elif ratio >= 1.0:
bp = 1.0
else:
bp = math.exp(1 - 1. / ratio)
bleu = geo_mean * bp
return np.float32(bleu)
class UnicodeRegex(object):
"""Ad-hoc hack to recognize all punctuation and symbols."""
def __init__(self):
punctuation = self.property_chars("P")
self.nondigit_punct_re = re.compile(r"([^\d])([" + punctuation + r"])")
self.punct_nondigit_re = re.compile(r"([" + punctuation + r"])([^\d])")
self.symbol_chars = set()
for char in self.property_chars("S"):
self.symbol_chars.add(char)
def add_spaces_around_symbols(self, string):
uchars = []
for uchar in string:
if uchar in self.symbol_chars:
uchars.append(" %s " % uchar)
else:
uchars.append(uchar)
return "".join(uchars)
def property_chars(self, prefix):
return "".join(
six.unichr(x)
for x in range(sys.maxunicode)
if unicodedata.category(six.unichr(x)).startswith(prefix))
uregex = UnicodeRegex()
def bleu_tokenize(string):
"""Tokenize a string following the official BLEU implementation.
Args:
string: the input string
Returns:
a list of tokens
"""
string = uregex.nondigit_punct_re.sub(r"\1 \2 ", string)
string = uregex.punct_nondigit_re.sub(r" \1 \2", string)
string = uregex.add_spaces_around_symbols(string)
return string.split()
def bleu_wrapper(ref_lines, hyp_lines, case_sensitive=False):
"""Compute BLEU for two files (reference and hypothesis translation)."""
start_time = time.time()
if not case_sensitive:
ref_lines = [x.lower() for x in ref_lines]
hyp_lines = [x.lower() for x in hyp_lines]
ref_tokens = [bleu_tokenize(x) for x in ref_lines]
hyp_tokens = [bleu_tokenize(x) for x in hyp_lines]
ret = compute_bleu(ref_tokens, hyp_tokens)
end_time = time.time()
tf.logging.info("bleu_wrapper: %f", (end_time - start_time))
return ret
class MlPerfBleuMetric(metrics.BaseMetric):
"""Use the MLPerf reference impelmentation."""
def __init__(self, **kwargs):
self._ref_lines = []
self._hyp_lines = []
def Update(self, ref_str, hyp_str, eval_weight=1.0):
if eval_weight != 0.0:
self._ref_lines.append(ref_str)
self._hyp_lines.append(hyp_str)
@property
def value(self):
return bleu_wrapper(self._ref_lines, self._hyp_lines)
|
|
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import string
import os
import logging
import re
import itertools
from collections import defaultdict
from collections import OrderedDict
# bsd licensed - pip install jinja2
from jinja2 import Environment, FileSystemLoader
# fsutils, , misc filesystem utils, internal
import fsutils
# validate, , validate various things, internal
import validate
Template_Dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates')
logger = logging.getLogger('cmakegen')
Ignore_Subdirs = set(('build','yotta_modules', 'yotta_targets', 'CMake'))
jinja_environment = Environment(loader=FileSystemLoader(Template_Dir), trim_blocks=True, lstrip_blocks=True)
def replaceBackslashes(s):
return s.replace('\\', '/')
def sanitizePreprocessorSymbol(sym):
return re.sub('[^a-zA-Z0-9]', '_', str(sym)).upper()
def sanitizeSymbol(sym):
return re.sub('[^a-zA-Z0-9]', '_', str(sym))
jinja_environment.filters['replaceBackslashes'] = replaceBackslashes
jinja_environment.filters['sanitizePreprocessorSymbol'] = sanitizePreprocessorSymbol
jinja_environment.globals['list'] = list
class SourceFile(object):
def __init__(self, fullpath, relpath, lang):
super(SourceFile, self).__init__()
self.fullpath = fullpath
self.relpath = relpath
self.lang = lang
def __repr__(self):
return self.fullpath
class CMakeGen(object):
def __init__(self, directory, target):
super(CMakeGen, self).__init__()
self.buildroot = directory
logger.info("generate for target: %s" % target)
self.target = target
self.config_include_file = None
self.build_info_include_file = None
self.build_uuid = None
def _writeFile(self, path, contents):
dirname = os.path.dirname(path)
fsutils.mkDirP(dirname)
self.writeIfDifferent(path, contents)
def generateRecursive(self, component, all_components, builddir=None, modbuilddir=None, processed_components=None, application=None):
''' generate top-level CMakeLists for this component and its
dependencies: the CMakeLists are all generated in self.buildroot,
which MUST be out-of-source
!!! NOTE: experimenting with a slightly different way of doing
things here, this function is a generator that yields any errors
produced, so the correct use is:
for error in gen.generateRecursive(...):
print(error)
'''
if builddir is None:
builddir = self.buildroot
if modbuilddir is None:
modbuilddir = os.path.join(builddir, 'ym')
if processed_components is None:
processed_components = dict()
if not self.target:
yield 'Target "%s" is not a valid build target' % self.target
toplevel = not len(processed_components)
logger.debug('generate build files: %s (target=%s)' % (component, self.target))
# because of the way c-family language includes work we need to put the
# public header directories of all components that this component
# depends on (directly OR indirectly) into the search path, which means
# we need to first enumerate all the direct and indirect dependencies
recursive_deps = component.getDependenciesRecursive(
available_components = all_components,
target = self.target,
available_only = True,
test = True
)
dependencies = component.getDependencies(
all_components,
target = self.target,
available_only = True,
test = True
)
for name, dep in dependencies.items():
# if dep is a test dependency, then it might not be required (if
# we're not building tests). We don't actually know at this point
if not dep:
if dep.isTestDependency():
logger.debug('Test dependency "%s" of "%s" is not installed.' % (name, component))
else:
yield 'Required dependency "%s" of "%s" is not installed.' % (name, component)
# ensure this component is assumed to have been installed before we
# check for its dependencies, in case it has a circular dependency on
# itself
processed_components[component.getName()] = component
new_dependencies = OrderedDict([(name,c) for name,c in dependencies.items() if c and not name in processed_components])
self.generate(builddir, modbuilddir, component, new_dependencies, dependencies, recursive_deps, application, toplevel)
logger.debug('recursive deps of %s:' % component)
for d in recursive_deps.values():
logger.debug(' %s' % d)
processed_components.update(new_dependencies)
for name, c in new_dependencies.items():
for error in self.generateRecursive(
c, all_components, os.path.join(modbuilddir, name), modbuilddir, processed_components, application=application
):
yield error
def checkStandardSourceDir(self, dirname, component):
err = validate.sourceDirValidationError(dirname, component.getName())
if err:
logger.warn(err)
def _listSubDirectories(self, component):
''' return: {
manual: [list of subdirectories with manual CMakeLists],
auto: [list of pairs: (subdirectories name to autogenerate, a list of source files in that dir)],
bin: {dictionary of subdirectory name to binary name},
test: [list of directories that build tests]
resource: [list of directories that contain resources]
}
'''
manual_subdirs = []
auto_subdirs = []
bin_subdirs = {os.path.normpath(x) : y for x,y in component.getBinaries().items()};
test_subdirs = []
resource_subdirs = []
for f in sorted(os.listdir(component.path)):
if f in Ignore_Subdirs or f.startswith('.') or f.startswith('_'):
continue
if os.path.isfile(os.path.join(component.path, f, 'CMakeLists.txt')):
self.checkStandardSourceDir(f, component)
# if the subdirectory has a CMakeLists.txt in it, then use that
manual_subdirs.append(f)
# tests only supported in the `test` directory for now
if f in ('test',):
test_subdirs.append(f)
elif f in ('source', 'test') or os.path.normpath(f) in bin_subdirs:
# otherwise, if the directory has source files, generate a
# CMakeLists in the corresponding temporary directory, and add
# that.
# For now we only do this for the source and test directories -
# in theory we could do others
sources = self.containsSourceFiles(os.path.join(component.path, f), component)
if sources:
auto_subdirs.append((f, sources))
# tests only supported in the `test` directory for now
if f in ('test',):
test_subdirs.append(f)
elif f in ('resource'):
resource_subdirs.append(os.path.join(component.path, f))
elif f.lower() in ('source', 'src', 'test', 'resource'):
self.checkStandardSourceDir(f, component)
return {
"manual": manual_subdirs,
"auto": auto_subdirs,
"bin": bin_subdirs,
"test": test_subdirs,
"resource": resource_subdirs
}
def _definitionsForConfig(self, config, key_path=None):
if key_path is None:
key_path = list()
key_prefix = '_'.join([sanitizePreprocessorSymbol(x) for x in key_path])
r = []
if len(key_prefix):
r.append((key_prefix,None))
for (k, v) in config.items():
if isinstance(v, dict):
r += self._definitionsForConfig(v, key_path + [k])
else:
# Don't validate the value here (we wouldn't know where an
# invalid value came from, so the error message would be
# unhelpful) - the target schema should validate values, or if
# that isn't possible then the target should check when loading
if isinstance(v, bool):
# convert bool to 1/0, since we can't know the availability
# of a C bool type
v = 1 if v else 0
r.append(('%s_%s' % (key_prefix, sanitizePreprocessorSymbol(k)), v))
return r
def getConfigData(self, all_dependencies, component, builddir, build_info_header_path):
''' returns (path_to_config_header, cmake_set_definitions) '''
add_defs_header = ''
set_definitions = ''
# !!! backwards-compatible "TARGET_LIKE" definitions for the top-level
# of the config. NB: THESE WILL GO AWAY
definitions = []
definitions.append(('TARGET', sanitizePreprocessorSymbol(self.target.getName())))
definitions.append(('TARGET_LIKE_%s' % sanitizePreprocessorSymbol(self.target.getName()),None))
# make the path to the build-info header available both to CMake and
# in the preprocessor:
full_build_info_header_path = replaceBackslashes(os.path.abspath(build_info_header_path))
logger.debug('build info header include path: "%s"', full_build_info_header_path)
definitions.append(('YOTTA_BUILD_INFO_HEADER', '"'+full_build_info_header_path+'"'))
for target in self.target.getSimilarTo_Deprecated():
if '*' not in target:
definitions.append(('TARGET_LIKE_%s' % sanitizePreprocessorSymbol(target),None))
logger.debug('target configuration data: %s', self.target.getMergedConfig())
definitions += self._definitionsForConfig(self.target.getMergedConfig(), ['YOTTA', 'CFG'])
add_defs_header += '// yotta config data (including backwards-compatible definitions)\n'
for k, v in definitions:
if v is not None:
add_defs_header += '#define %s %s\n' % (k, v)
set_definitions += 'set(%s %s)\n' % (k, v)
else:
add_defs_header += '#define %s\n' % k
set_definitions += 'set(%s TRUE)\n' % k
add_defs_header += '\n// version definitions\n'
for dep in list(all_dependencies.values()) + [component]:
add_defs_header += "#define YOTTA_%s_VERSION_STRING \"%s\"\n" % (sanitizePreprocessorSymbol(dep.getName()), str(dep.getVersion()))
add_defs_header += "#define YOTTA_%s_VERSION_MAJOR %d\n" % (sanitizePreprocessorSymbol(dep.getName()), dep.getVersion().major())
add_defs_header += "#define YOTTA_%s_VERSION_MINOR %d\n" % (sanitizePreprocessorSymbol(dep.getName()), dep.getVersion().minor())
add_defs_header += "#define YOTTA_%s_VERSION_PATCH %d\n" % (sanitizePreprocessorSymbol(dep.getName()), dep.getVersion().patch())
# use -include <definitions header> instead of lots of separate
# defines... this is compiler specific, but currently testing it
# out for gcc-compatible compilers only:
config_include_file = os.path.join(builddir, 'yotta_config.h')
self._writeFile(
config_include_file,
'#ifndef __YOTTA_CONFIG_H__\n'+
'#define __YOTTA_CONFIG_H__\n'+
add_defs_header+
'#endif // ndef __YOTTA_CONFIG_H__\n'
)
return (config_include_file, set_definitions)
def getBuildInfo(self, sourcedir, builddir):
''' Write the build info header file, and return (path_to_written_header, set_cmake_definitions) '''
cmake_defs = ''
preproc_defs = '// yotta build info, #include YOTTA_BUILD_INFO_HEADER to access\n'
# standard library modules
import datetime
# vcs, , represent version controlled directories, internal
import vcs
now = datetime.datetime.utcnow()
vcs = vcs.getVCS(sourcedir)
if self.build_uuid is None:
import uuid
self.build_uuid = uuid.uuid4()
definitions = [
('YOTTA_BUILD_YEAR', now.year, 'UTC year'),
('YOTTA_BUILD_MONTH', now.month, 'UTC month 1-12'),
('YOTTA_BUILD_DAY', now.day, 'UTC day 1-31'),
('YOTTA_BUILD_HOUR', now.hour, 'UTC hour 0-24'),
('YOTTA_BUILD_MINUTE', now.minute, 'UTC minute 0-59'),
('YOTTA_BUILD_SECOND', now.second, 'UTC second 0-61'),
('YOTTA_BUILD_UUID', self.build_uuid, 'unique random UUID for each build'),
]
if vcs is not None:
definitions += [
('YOTTA_BUILD_VCS_ID', vcs.getCommitId(), 'git or mercurial hash')
('YOTTA_BUILD_VCS_CLEAN', vcs.getCommitId(), 'evaluates true if the version control system was clean, otherwise false')
]
for d in definitions:
preproc_defs += '#define %s %s // %s\n' % d
cmake_defs += 'set(%s "%s") # %s\n' % d
buildinfo_include_file = os.path.join(builddir, 'yotta_build_info.h')
self._writeFile(
buildinfo_include_file,
'#ifndef __YOTTA_BUILD_INFO_H__\n'+
'#define __YOTTA_BUILD_INFO_H__\n'+
preproc_defs+
'#endif // ndef __YOTTA_BUILD_INFO_H__\n'
)
return (buildinfo_include_file, cmake_defs)
def generate(
self, builddir, modbuilddir, component, active_dependencies, immediate_dependencies, all_dependencies, application, toplevel
):
''' active_dependencies is the dictionary of components that need to be
built for this component, but will not already have been built for
another component.
'''
set_definitions = ''
if self.build_info_include_file is None:
assert(toplevel)
self.build_info_include_file, build_info_definitions = self.getBuildInfo(component.path, builddir)
set_definitions += build_info_definitions
if self.config_include_file is None:
self.config_include_file, config_definitions = self.getConfigData(all_dependencies, component, builddir, self.build_info_include_file)
set_definitions += config_definitions
include_root_dirs = ''
if application is not None and component is not application:
include_root_dirs += 'include_directories("%s")\n' % replaceBackslashes(application.path)
include_sys_dirs = ''
include_other_dirs = ''
for name, c in itertools.chain(((component.getName(), component),), all_dependencies.items()):
if c is not component and c.isTestDependency():
continue
include_root_dirs += 'include_directories("%s")\n' % replaceBackslashes(c.path)
dep_sys_include_dirs = c.getExtraSysIncludes()
for d in dep_sys_include_dirs:
include_sys_dirs += 'include_directories(SYSTEM "%s")\n' % replaceBackslashes(os.path.join(c.path, d))
dep_extra_include_dirs = c.getExtraIncludes()
for d in dep_extra_include_dirs:
include_other_dirs += 'include_directories("%s")\n' % replaceBackslashes(os.path.join(c.path, d))
add_depend_subdirs = ''
for name, c in active_dependencies.items():
depend_subdir = replaceBackslashes(os.path.join(modbuilddir, name))
add_depend_subdirs += 'add_subdirectory("%s" "%s")\n' % (
depend_subdir, depend_subdir
)
delegate_to_existing = None
delegate_build_dir = None
if os.path.isfile(os.path.join(component.path, 'CMakeLists.txt')):
delegate_to_existing = component.path
add_own_subdirs = []
logger.debug("delegate to build dir: %s", builddir)
delegate_build_dir = os.path.join(builddir, 'existing')
else:
subdirs = self._listSubDirectories(component)
manual_subdirs = subdirs['manual']
autogen_subdirs = subdirs['auto']
binary_subdirs = subdirs['bin']
test_subdirs = subdirs['test']
resource_subdirs = subdirs['resource']
add_own_subdirs = []
for f in manual_subdirs:
if os.path.isfile(os.path.join(component.path, f, 'CMakeLists.txt')):
add_own_subdirs.append(
(os.path.join(component.path, f), os.path.join(builddir, f))
)
# names of all directories at this level with stuff in: used to figure
# out what to link automatically
all_subdirs = manual_subdirs + [x[0] for x in autogen_subdirs]
for f, source_files in autogen_subdirs:
if f in binary_subdirs:
exe_name = binary_subdirs[f]
else:
exe_name = None
if f in test_subdirs:
# if this module is a test dependency, then don't recurse
# to building its own tests.
if component.isTestDependency():
continue
self.generateTestDirList(
builddir, f, source_files, component, immediate_dependencies, toplevel=toplevel
)
else:
self.generateSubDirList(
builddir, f, source_files, component, all_subdirs,
immediate_dependencies, exe_name, resource_subdirs
)
add_own_subdirs.append(
(os.path.join(builddir, f), os.path.join(builddir, f))
)
# from now on, completely forget that this component had any tests
# if it is itself a test dependency:
if component.isTestDependency():
test_subdirs = []
# if we're not building anything other than tests, then we need to
# generate a dummy library so that this component can still be linked
# against
if len(add_own_subdirs) <= len(test_subdirs):
add_own_subdirs.append(self.createDummyLib(
component, builddir, [x[0] for x in immediate_dependencies.items() if not x[1].isTestDependency()]
))
# generate the top-level toolchain file:
template = jinja_environment.get_template('toolchain.cmake')
file_contents = template.render({
# toolchain files are provided in hierarchy
# order, but the template needs them in reverse
# order (base-first):
"toolchain_files": reversed(self.target.getToolchainFiles())
})
toolchain_file_path = os.path.join(builddir, 'toolchain.cmake')
self._writeFile(toolchain_file_path, file_contents)
# generate the top-level CMakeLists.txt
template = jinja_environment.get_template('base_CMakeLists.txt')
file_contents = template.render({
"toplevel": toplevel,
"target_name": self.target.getName(),
"set_definitions": set_definitions,
"toolchain_file": toolchain_file_path,
"component": component,
"include_root_dirs": include_root_dirs,
"include_sys_dirs": include_sys_dirs,
"include_other_dirs": include_other_dirs,
"add_depend_subdirs": add_depend_subdirs,
"add_own_subdirs": add_own_subdirs,
"config_include_file": self.config_include_file,
"delegate_to": delegate_to_existing,
"delegate_build_dir": delegate_build_dir,
"active_dependencies": active_dependencies
})
self._writeFile(os.path.join(builddir, 'CMakeLists.txt'), file_contents)
def createDummyLib(self, component, builddir, link_dependencies):
safe_name = sanitizeSymbol(component.getName())
dummy_dirname = 'yotta_dummy_lib_%s' % safe_name
dummy_cfile_name = 'dummy.c'
logger.debug("create dummy lib: %s, %s, %s" % (safe_name, dummy_dirname, dummy_cfile_name))
dummy_template = jinja_environment.get_template('dummy_CMakeLists.txt')
dummy_cmakelists = dummy_template.render({
"cfile_name": dummy_cfile_name,
"libname": component.getName(),
"link_dependencies": link_dependencies
})
self._writeFile(os.path.join(builddir, dummy_dirname, "CMakeLists.txt"), dummy_cmakelists)
dummy_cfile = "void __yotta_dummy_lib_symbol_%s(){}\n" % safe_name
self._writeFile(os.path.join(builddir, dummy_dirname, dummy_cfile_name), dummy_cfile)
return (os.path.join(builddir, dummy_dirname), os.path.join(builddir, dummy_dirname))
def writeIfDifferent(self, fname, contents):
try:
with open(fname, "r+") as f:
current_contents = f.read()
if current_contents != contents:
f.seek(0)
f.write(contents)
f.truncate()
except IOError:
with open(fname, "w") as f:
f.write(contents)
def generateTestDirList(self, builddir, dirname, source_files, component, immediate_dependencies, toplevel=False):
logger.debug('generate CMakeLists.txt for directory: %s' % os.path.join(component.path, dirname))
link_dependencies = [x for x in immediate_dependencies]
fname = os.path.join(builddir, dirname, 'CMakeLists.txt')
# group the list of source files by subdirectory: generate one test for
# each subdirectory, and one test for each file at the top level
subdirs = defaultdict(list)
toplevel_srcs = []
for f in source_files:
if f.lang in ('c', 'cpp', 'objc', 's'):
subrelpath = os.path.relpath(f.relpath, dirname)
subdir = fsutils.fullySplitPath(subrelpath)[0]
if subdir and subdir != subrelpath:
subdirs[subdir].append(f)
else:
toplevel_srcs.append(f)
tests = []
for f in toplevel_srcs:
object_name = '%s-test-%s' % (
component.getName(), os.path.basename(os.path.splitext(str(f))[0]).lower()
)
tests.append([[str(f)], object_name, [f.lang]])
for subdirname, sources in sorted(subdirs.items(), key=lambda x: x[0]):
object_name = '%s-test-%s' % (
component.getName(), fsutils.fullySplitPath(subdirname)[0].lower()
)
tests.append([[str(f) for f in sources], object_name, [f.lang for f in sources]])
# link tests against the main executable
link_dependencies.append(component.getName())
# Find cmake files
cmake_files = []
for root, dires, files in os.walk(os.path.join(component.path, dirname)):
for f in files:
name, ext = os.path.splitext(f)
if ext.lower() == '.cmake':
cmake_files.append(os.path.join(root, f))
test_template = jinja_environment.get_template('test_CMakeLists.txt')
file_contents = test_template.render({
'source_directory':os.path.join(component.path, dirname),
'tests':tests,
'link_dependencies':link_dependencies,
'cmake_files': cmake_files,
'exclude_from_all': (not toplevel),
'test_dependencies': [x[1] for x in immediate_dependencies.items() if x[1].isTestDependency()]
})
self._writeFile(fname, file_contents)
def generateSubDirList(self, builddir, dirname, source_files, component, all_subdirs, immediate_dependencies, executable_name, resource_subdirs):
logger.debug('generate CMakeLists.txt for directory: %s' % os.path.join(component.path, dirname))
link_dependencies = [x[0] for x in immediate_dependencies.items() if not x[1].isTestDependency()]
fname = os.path.join(builddir, dirname, 'CMakeLists.txt')
if dirname == 'source' or executable_name:
if executable_name:
object_name = executable_name
executable = True
else:
object_name = component.getName()
executable = False
# if we're building the main library, or an executable for this
# component, then we should link against all the other directories
# containing cmakelists:
link_dependencies += [x for x in all_subdirs if x not in ('source', 'test', dirname)]
# Find resource files
resource_files = []
for f in resource_subdirs:
for root, dires, files in os.walk(f):
if root.endswith(".xcassets") or root.endswith(".bundle"):
resource_files.append(root)
del dires[:]
else:
for f in files:
resource_files.append(os.path.join(root, f))
# Find cmake files
cmake_files = []
for root, dires, files in os.walk(os.path.join(component.path, dirname)):
for f in files:
name, ext = os.path.splitext(f)
if ext.lower() == '.cmake':
cmake_files.append(os.path.join(root, f))
subdir_template = jinja_environment.get_template('subdir_CMakeLists.txt')
file_contents = subdir_template.render({
'source_directory': os.path.join(component.path, dirname),
"config_include_file": self.config_include_file,
'executable': executable,
'file_names': [str(f) for f in source_files],
'object_name': object_name,
'link_dependencies': link_dependencies,
'languages': set(f.lang for f in source_files),
'source_files': set((f.fullpath, f.lang) for f in source_files),
'resource_files': resource_files,
'cmake_files': cmake_files
})
else:
raise Exception('auto CMakeLists for non-source/test directories is not supported')
self._writeFile(fname, file_contents)
def containsSourceFiles(self, directory, component):
c_exts = set(('.c',))
cpp_exts = set(('.cpp','.cc','.cxx'))
asm_exts = set(('.s',))
objc_exts = set(('.m', '.mm'))
header_exts = set(('.h',))
sources = []
for root, dires, files in os.walk(directory):
for f in sorted(files):
name, ext = os.path.splitext(f)
ext = ext.lower()
fullpath = os.path.join(root, f)
relpath = os.path.relpath(fullpath, component.path)
if component.ignores(relpath):
continue
if ext in c_exts:
sources.append(SourceFile(fullpath, relpath, 'c'))
elif ext in cpp_exts:
sources.append(SourceFile(fullpath, relpath, 'cpp'))
elif ext in asm_exts:
sources.append(SourceFile(fullpath, relpath, 's'))
elif ext in objc_exts:
sources.append(SourceFile(fullpath, relpath, 'objc'))
elif ext in header_exts:
sources.append(SourceFile(fullpath, relpath, 'header'))
return sources
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python
#
# Copyright 2021 The On Combining Bags to Better Learn from
# Label Proportions Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generating Training Bags."""
import os
import pathlib
import shutil
import bagsformethod5
import clusteringbagsmethodseeded
import numpy as np
import singlestraddlebag
import twostraddlebags
rng = np.random.default_rng(73652603)
names_list = ["Heart", "Ionosphere", "Australian"]
n_tot_features_list = [14, 35, 15]
path_to_root_data_dir = (pathlib.Path(__file__).parent /
"../../Data/").resolve()
root_dir = str(path_to_root_data_dir) + "/"
for clustering_bags_method in range(1, 9):
for index_name, name in enumerate(names_list):
for s in range(1, 6): # Number of Kfold operations
Folddirectory = root_dir + name + "/" + "Fold_" + str(s) + "/"
for splitnumber in range(1, 6):
splitdir = Folddirectory + "Split_" + str(splitnumber) + "/"
trainfile = splitdir + name + "_" + str(s) + "_" + str(
splitnumber) + "-train.csv"
if clustering_bags_method == 5:
random_seed = rng.integers(low=1000000, size=1)[0]
numpy_seed = rng.integers(low=1000000, size=1)[0]
continue
cluster_dir = splitdir + "ClusterBags_" + str(
clustering_bags_method) + "/"
directory_to_read = splitdir + "ClusterBags_" + str(2) + "/"
if clustering_bags_method > 5:
cluster_dir = splitdir + "ClusterBags_" + str(clustering_bags_method -
1) + "/"
if os.path.exists(cluster_dir):
shutil.rmtree(cluster_dir)
os.makedirs(os.path.dirname(cluster_dir), exist_ok=True)
print()
print()
print("For ", cluster_dir, " ***************")
if clustering_bags_method == 1:
clusteringbagsmethodseeded.makeclusterbags(
n_clusters=1,
head_inclusion_prob=0.1,
tail_inclusion_prob=0.1,
p_law_param=-1.66,
n_head=125,
n_tail=125,
cluster_bias=[1],
trainfile=trainfile,
cluster_dir=cluster_dir,
n_tot_features=n_tot_features_list[index_name],
option="normal",
random_seed=rng.integers(low=1000000, size=1)[0],
numpy_seed=rng.integers(low=1000000, size=1)[0],
kmeans_seed=rng.integers(low=1000000, size=1)[0])
elif clustering_bags_method == 2:
clusteringbagsmethodseeded.makeclusterbags(
n_clusters=3,
head_inclusion_prob=0.9,
tail_inclusion_prob=0.1,
p_law_param=-1.66,
n_head=40,
n_tail=40,
cluster_bias=[1, 1, 1],
trainfile=trainfile,
cluster_dir=cluster_dir,
n_tot_features=n_tot_features_list[index_name],
option="normal",
random_seed=rng.integers(low=1000000, size=1)[0],
numpy_seed=rng.integers(low=1000000, size=1)[0],
kmeans_seed=rng.integers(low=1000000, size=1)[0])
elif clustering_bags_method == 3:
clusteringbagsmethodseeded.makeclusterbags(
n_clusters=3,
head_inclusion_prob=0.9,
tail_inclusion_prob=0.1,
p_law_param=-1.66,
n_head=15,
n_tail=15,
cluster_bias=[1, 3, 5],
trainfile=trainfile,
cluster_dir=cluster_dir,
n_tot_features=n_tot_features_list[index_name],
option="normal",
directory_to_read=directory_to_read,
random_seed=rng.integers(low=1000000, size=1)[0],
numpy_seed=rng.integers(low=1000000, size=1)[0],
kmeans_seed=rng.integers(low=1000000, size=1)[0])
elif clustering_bags_method == 4:
clusteringbagsmethodseeded.makeclusterbags(
n_clusters=3,
head_inclusion_prob=-0.9,
tail_inclusion_prob=-0.1,
p_law_param=1.66,
n_head=15,
n_tail=15,
cluster_bias=[1, 3, 5],
trainfile=trainfile,
cluster_dir=cluster_dir,
n_tot_features=n_tot_features_list[index_name],
option="powerlaw",
directory_to_read=directory_to_read,
random_seed=rng.integers(low=1000000, size=1)[0],
numpy_seed=rng.integers(low=1000000, size=1)[0],
kmeans_seed=rng.integers(low=1000000, size=1)[0])
elif clustering_bags_method == 5:
bagsformethod5.makeonlybags(
n_clusters=3,
head_inclusion_powerlaw=[1.7, 1.6, 1.9],
tail_inclusion_powerlaw=[1.1, 1.2, 1.01],
p_law_param=-1.66,
n_head=15,
n_tail=15,
cluster_bias=[1, 3, 5],
trainfile=trainfile,
cluster_dir=cluster_dir,
n_tot_features=n_tot_features_list[index_name],
option="powerlaw",
directory_to_read=directory_to_read,
random_seed=rng.integers(low=1000000, size=1)[0],
numpy_seed=rng.integers(low=1000000, size=1)[0])
elif clustering_bags_method == 6:
singlestraddlebag.makeonlybagswithstraddle(
n_clusters=3,
straddle_inclusion=[0.2, 0.2, 0.2],
tail_inclusion=[0.6, 0.6, 0.6],
p_law_param=-1.66,
trainfile=trainfile,
n_tail=60,
n_straddle=60,
cluster_dir=cluster_dir,
option="powerlaw",
directory_to_read=directory_to_read,
random_seed=rng.integers(low=1000000, size=1)[0],
numpy_seed=rng.integers(low=1000000, size=1)[0])
elif clustering_bags_method == 7:
singlestraddlebag.makeonlybagswithstraddle(
n_clusters=3,
straddle_inclusion=[0.4, 0.8, 0.8],
tail_inclusion=[0.2, 0.2, 0.2],
p_law_param=-1.66,
trainfile=trainfile,
n_tail=60,
n_straddle=60,
cluster_dir=cluster_dir,
option="powerlaw",
directory_to_read=directory_to_read,
random_seed=rng.integers(low=1000000, size=1)[0],
numpy_seed=rng.integers(low=1000000, size=1)[0])
elif clustering_bags_method == 8:
twostraddlebags.makeonlybagswithtwostraddle(
n_clusters=3,
straddle_inclusion_first=[0.2, 0.2],
straddle_inclusion_second=[0.6, 0.6],
tail_inclusion=[0.2, 0.2, 0.2],
p_law_param=-1.66,
trainfile=trainfile,
n_tail=50,
n_straddle=50,
cluster_dir=cluster_dir,
option="powerlaw",
directory_to_read=directory_to_read,
random_seed=rng.integers(low=1000000, size=1)[0],
numpy_seed=rng.integers(low=1000000, size=1)[0])
|
|
#!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_volume
short_description: Create/Delete Cinder Volumes
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Create or Remove cinder block storage volumes
options:
size:
description:
- Size of volume in GB. This parameter is required when the
I(state) parameter is 'present'.
required: false
default: None
display_name:
description:
- Name of volume
required: true
display_description:
description:
- String describing the volume
required: false
default: None
volume_type:
description:
- Volume type for volume
required: false
default: None
image:
description:
- Image name or id for boot from volume
required: false
default: None
snapshot_id:
description:
- Volume snapshot id to create from
required: false
default: None
volume:
description:
- Volume name or id to create from
required: false
default: None
version_added: "2.3"
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
scheduler_hints:
description:
- Scheduler hints passed to volume API in form of dict
required: false
default: None
version_added: "2.4"
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Creates a new volume
- name: create a volume
hosts: localhost
tasks:
- name: create 40g test volume
os_volume:
state: present
cloud: mordred
availability_zone: az2
size: 40
display_name: test_volume
scheduler_hints:
same_host: 243e8d3c-8f47-4a61-93d6-7215c344b0c0
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
def _present_volume(module, cloud):
if cloud.volume_exists(module.params['display_name']):
v = cloud.get_volume(module.params['display_name'])
module.exit_json(changed=False, id=v['id'], volume=v)
volume_args = dict(
size=module.params['size'],
volume_type=module.params['volume_type'],
display_name=module.params['display_name'],
display_description=module.params['display_description'],
snapshot_id=module.params['snapshot_id'],
availability_zone=module.params['availability_zone'],
)
if module.params['image']:
image_id = cloud.get_image_id(module.params['image'])
volume_args['imageRef'] = image_id
if module.params['volume']:
volume_id = cloud.get_volume_id(module.params['volume'])
if not volume_id:
module.fail_json(msg="Failed to find volume '%s'" % module.params['volume'])
volume_args['source_volid'] = volume_id
if module.params['scheduler_hints']:
volume_args['scheduler_hints'] = module.params['scheduler_hints']
volume = cloud.create_volume(
wait=module.params['wait'], timeout=module.params['timeout'],
**volume_args)
module.exit_json(changed=True, id=volume['id'], volume=volume)
def _absent_volume(module, cloud):
changed = False
if cloud.volume_exists(module.params['display_name']):
try:
changed = cloud.delete_volume(name_or_id=module.params['display_name'],
wait=module.params['wait'],
timeout=module.params['timeout'])
except shade.OpenStackCloudTimeout:
module.exit_json(changed=changed)
module.exit_json(changed=changed)
def main():
argument_spec = openstack_full_argument_spec(
size=dict(default=None),
volume_type=dict(default=None),
display_name=dict(required=True, aliases=['name']),
display_description=dict(default=None, aliases=['description']),
image=dict(default=None),
snapshot_id=dict(default=None),
volume=dict(default=None),
state=dict(default='present', choices=['absent', 'present']),
scheduler_hints=dict(default=None, type='dict')
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['image', 'snapshot_id', 'volume'],
],
)
module = AnsibleModule(argument_spec=argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if (module.params['scheduler_hints'] and
StrictVersion(shade.__version__) < StrictVersion('1.22')):
module.fail_json(msg="To utilize scheduler_hints, the installed version of"
"the shade library MUST be >= 1.22")
state = module.params['state']
if state == 'present' and not module.params['size']:
module.fail_json(msg="Size is required when state is 'present'")
try:
cloud = shade.openstack_cloud(**module.params)
if state == 'present':
_present_volume(module, cloud)
if state == 'absent':
_absent_volume(module, cloud)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
|
|
###########################################################################
##
## Copyright (C) 2006-2010 University of Utah. All rights reserved.
##
## This file is part of VisTrails.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following to ensure GNU General Public
## Licensing requirements will be met:
## http://www.opensource.org/licenses/gpl-license.php
##
## If you are unsure which license is appropriate for your use (for
## instance, you are interested in developing a commercial derivative
## of VisTrails), please contact us at [email protected].
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
################################################################################
# VTK-SNL Package for VisTrails (Sandia National Laboratories)
################################################################################
from core.bundles import py_import
import vtksnl
from core.utils import all, any, VistrailsInternalError, InstanceObject
from core.debug import debug
from core.modules.basic_modules import Integer, Float, String, File, \
Variant, Color
from core.modules.module_registry import get_module_registry
from core.modules.vistrails_module import new_module, ModuleError
from base_module import vtkBaseModule
from class_tree import ClassTree
from vtk_parser import VTKMethodParser
import re
import os.path
from itertools import izip
from vtkcell import VTKCell
import tf_widget
import offscreen
import fix_classes
import inspectors
from hasher import vtk_hasher
import sys
################################################################################
# filter some deprecation warnings coming from the fact that vtk calls
# range() with float parameters
import warnings
warnings.filterwarnings("ignore",
message="integer argument expected, got float")
################################################################################
if tuple(vtksnl.vtkVersion().GetVTKVersion().split('.')) < ('5', '0', '4'):
def get_description_class(klass):
"""Because sometimes we need to patch VTK classes, the klass that
has the methods is different than the klass we want to
instantiate. get_description_class makes sure that for patched
classes we get the correct one."""
try:
return fix_classes.description[klass]
except KeyError:
return klass
else:
# On VTK 5.0.4, we use the id of the class to hash, because it
# seems that VTK hasn't implemented hash() correctly for their
# classes.
def get_description_class(klass):
"""Because sometimes we need to patch VTK classes, the klass that
has the methods is different than the klass we want to
instantiate. get_description_class makes sure that for patched
classes we get the correct one."""
try:
return fix_classes.description[id(klass)]
except KeyError:
return klass
parser = VTKMethodParser()
typeMapDict = {'int': Integer,
'long': Integer,
'float': Float,
'char*': String,
'char *': String,
'string': String,
'char': String,
'const char*': String,
'const char *': String}
typeMapDictValues = [Integer, Float, String]
file_name_pattern = re.compile('.*FileName$')
set_file_name_pattern = re.compile('Set.*FileName$')
def resolve_overloaded_name(name, ix, signatures):
# VTK supports static overloading, VisTrails does not. The
# solution is to check whether the current function has
# overloads and change the names appropriately.
if len(signatures) == 1:
return name
else:
return name + '_' + str(ix+1)
def typeMap(name, package=None):
""" typeMap(name: str) -> Module
Convert from C/C++ types into VisTrails Module type
"""
if package is None:
package = identifier
if type(name) == tuple:
return [typeMap(x, package) for x in name]
if name in typeMapDict:
return typeMapDict[name]
else:
registry = get_module_registry()
if not registry.has_descriptor_with_name(package, name):
return None
else:
return registry.get_descriptor_by_name(package,
name).module
def get_method_signature(method):
""" get_method_signature(method: vtkmethod) -> [ret, arg]
Re-wrap Prabu's method to increase performance
"""
doc = method.__doc__
tmptmp = doc.split('\n')
tmp = []
for l in tmptmp:
l = l.strip('\n \t')
if l.startswith('V.') or l.startswith('C++:'):
tmp.append(l)
else:
tmp[-1] = tmp[-1] + l
tmp.append('')
sig = []
pat = re.compile(r'\b')
# Remove all the C++ function signatures and V.<method_name> field
offset = 2+len(method.__name__)
for i in xrange(len(tmp)):
s = tmp[i]
if s=='': break
if i%2==0:
x = s.split('->')
arg = x[0].strip()[offset:]
if len(x) == 1: # No return value
ret = None
else:
ret = x[1].strip()
# Remove leading and trailing parens for arguments.
arg = arg[1:-1]
if not arg:
arg = None
if arg and arg[-1] == ')':
arg = arg + ','
# Now quote the args and eval them. Easy!
if ret and ret[:3]!='vtk':
try:
ret = eval(pat.sub('\"', ret))
except:
continue
if arg:
if arg.find('(')!=-1:
try:
arg = eval(pat.sub('\"', arg))
except:
continue
else:
arg = arg.split(', ')
if len(arg)>1:
arg = tuple(arg)
else:
arg = arg[0]
if type(arg) == str:
arg = [arg]
sig.append(([ret], arg))
return sig
def prune_signatures(module, name, signatures, output=False):
"""prune_signatures tries to remove redundant signatures to reduce
overloading. It _mutates_ the given parameter.
It does this by performing several operations:
1) It compares a 'flattened' version of the types
against the other 'flattened' signatures. If any of them match, we
keep only the 'flatter' ones.
A 'flattened' signature is one where parameters are not inside a
tuple.
2) We explicitly forbid a few signatures based on modules and names
"""
# yeah, this is Omega(n^2) on the number of overloads. Who cares?
def flatten(type_):
if type_ is None:
return []
def convert(entry):
if type(entry) == tuple:
return list(entry)
else:
assert(type(entry) == str)
return [entry]
result = []
for entry in type_:
result.extend(convert(entry))
return result
flattened_entries = [flatten(sig[1]) for
sig in signatures]
def hit_count(entry):
result = 0
for entry in flattened_entries:
if entry in flattened_entries:
result += 1
return result
hits = [hit_count(entry) for entry in flattened_entries]
def forbidden(flattened, hit_count, original):
if (issubclass(get_description_class(module.vtkClass), vtksnl.vtk3DWidget) and
name == 'PlaceWidget' and
flattened == []):
return True
# We forbid this because addPorts hardcodes this but
# SetInputArrayToProcess is an exception for the InfoVis
# package
if (get_description_class(module.vtkClass) == vtksnl.vtkAlgorithm and
name!='SetInputArrayToProcess'):
return True
return False
# This is messy: a signature is only allowed if there's no
# explicit disallowing of it. Then, if it's not overloaded,
# it is also allowed. If it is overloaded and not the flattened
# version, it is pruned. If these are output ports, there can be
# no parameters.
def passes(flattened, hit_count, original):
if forbidden(flattened, hit_count, original):
return False
if hit_count == 1:
return True
if original[1] is None:
return True
if output and len(original[1]) > 0:
return False
if hit_count > 1 and len(original[1]) == len(flattened):
return True
return False
signatures[:] = [original for (flattened, hit_count, original)
in izip(flattened_entries,
hits,
signatures)
if passes(flattened, hit_count, original)]
#then we remove the duplicates, if necessary
unique_signatures = []
[unique_signatures.append(s) for s in signatures if not unique_signatures.count(s)]
signatures[:] = unique_signatures
disallowed_classes = set(
[
'vtkCriticalSection',
'vtkDataArraySelection',
'vtkDebugLeaks',
'vtkDirectory',
'vtkDynamicLoader',
'vtkFunctionParser',
'vtkGarbageCollector',
'vtkHeap',
'vtkInformationKey',
'vtkInstantiator',
'vtkLogLookupTable', # VTK: use vtkLookupTable.SetScaleToLog10() instead
'vtkMath',
'vtkModelMetadata',
'vtkMultiProcessController',
'vtkMutexLock',
'vtkOutputWindow',
'vtkPriorityQueue',
'vtkReferenceCount',
'vtkRenderWindowCollection',
'vtkRenderWindowInteractor',
'vtkTesting',
'vtkWindow',
'vtksnlVersion',
'vtkDiffFilter',
'vtkDocumentM3MetaData'
#'vtkXMLUtilities',
#'vtkXMLShader',
#'vtkXMLMaterialReader',
#'vtkXMLMaterial',
#'vtkXMLDataElement'
])
def is_class_allowed(module):
if module is None:
return False
try:
name = module.__name__
return not (name in disallowed_classes)
except AttributeError:
return True
def addAlgorithmPorts(module):
""" addAlgorithmPorts(module: Module) -> None
If module is a subclass of vtkAlgorithm, this function will add all
SetInputConnection([id],[port]) and GetOutputPort([id]) as
SetInputConnection{id}([port]) and GetOutputPort{id}.
"""
if issubclass(get_description_class(module.vtkClass), vtksnl.vtkAlgorithm):
if get_description_class(module.vtkClass)!=vtksnl.vtkStructuredPointsGeometryFilter:
# We try to instantiate the class here to get the number of
# ports and to avoid abstract classes
try:
instance = module.vtkClass()
except TypeError:
pass
else:
registry = get_module_registry()
des = registry.get_descriptor_by_name('edu.utah.sci.vistrails.vtksnl',
'vtkAlgorithmOutput')
for i in xrange(0,instance.GetNumberOfInputPorts()):
registry.add_input_port(module, 'SetInputConnection%d'%i,
des.module)
for i in xrange(0,instance.GetNumberOfOutputPorts()):
registry.add_output_port(module, 'GetOutputPort%d'%i,
des.module)
disallowed_set_get_ports = set(['ReferenceCount',
'InputConnection',
'OutputPort',
'Progress',
'ProgressText',
'InputArrayToProcess',
])
def addSetGetPorts(module, get_set_dict, delayed):
""" addSetGetPorts(module: Module, get_set_dict: dict) -> None
Convert all Setxxx methods of module into input ports and all Getxxx
methods of module into output ports
Keyword arguments:
module --- Module
get_set_dict --- the Set/Get method signatures returned by vtk_parser
"""
klass = get_description_class(module.vtkClass)
registry = get_module_registry()
for name in get_set_dict.iterkeys():
if name in disallowed_set_get_ports: continue
getterMethod = getattr(klass, 'Get%s'%name)
setterMethod = getattr(klass, 'Set%s'%name)
getterSig = get_method_signature(getterMethod)
setterSig = get_method_signature(setterMethod)
if len(getterSig) > 1:
prune_signatures(module, 'Get%s'%name, getterSig, output=True)
for order, getter in enumerate(getterSig):
if getter[1]:
debug("Can't handle getter %s (%s) of class %s: Needs input to "
"get output" % (order+1, name, klass))
continue
if len(getter[0]) != 1:
debug("Can't handle getter %s (%s) of class %s: More than a "
"single output" % (order+1, name, str(klass)))
continue
class_ = typeMap(getter[0][0])
if is_class_allowed(class_):
registry.add_output_port(module, 'Get'+name, class_, True)
if len(setterSig) > 1:
prune_signatures(module, 'Set%s'%name, setterSig)
for ix, setter in enumerate(setterSig):
if setter[1]==None: continue
n = resolve_overloaded_name('Set' + name, ix, setterSig)
if len(setter[1]) == 1 and is_class_allowed(typeMap(setter[1][0])):
registry.add_input_port(module, n,
typeMap(setter[1][0]),
setter[1][0] in typeMapDict)
else:
classes = [typeMap(i) for i in setter[1]]
if all(is_class_allowed(x) for x in classes):
registry.add_input_port(module, n, classes, True)
# Wrap SetFileNames for VisTrails file access
if file_name_pattern.match(name):
registry.add_input_port(module, 'Set' + name[:-4],
(File, 'input file'), False)
# Wrap SetRenderWindow for exporters
elif name == 'RenderWindow':
# cscheid 2008-07-11 This is messy: VTKCell isn't
# registered yet, so we can't use it as a port
# However, we can't register VTKCell before these either,
# because VTKCell requires vtkRenderer. The "right" way would
# be to add all modules first, then all ports. However, that would
# be too slow.
# Workaround: delay the addition of the port by storing
# the information in a list
if registry.has_module('edu.utah.sci.vistrails.spreadsheet',
'SpreadsheetCell'):
delayed.add_input_port.append((module, 'SetVTKCell', VTKCell, False))
# Wrap color methods for VisTrails GUI facilities
elif name == 'DiffuseColor':
registry.add_input_port(module, 'SetDiffuseColorWidget',
(Color, 'color'), True)
elif name == 'Color':
registry.add_input_port(module, 'SetColorWidget',
(Color, 'color'), True)
elif name == 'AmbientColor':
registry.add_input_port(module, 'SetAmbientColorWidget',
(Color, 'color'), True)
elif name == 'SpecularColor':
registry.add_input_port(module, 'SetSpecularColorWidget',
(Color, 'color'), True)
elif name == 'EdgeColor':
registry.add_input_port(module, 'SetEdgeColorWidget',
(Color, 'color'), True)
elif name == 'Background' :
registry.add_input_port(module, 'SetBackgroundWidget',
(Color, 'color'), True)
elif name == 'Background2' :
registry.add_input_port(module, 'SetBackground2Widget',
(Color, 'color'), True)
disallowed_toggle_ports = set(['GlobalWarningDisplay',
'Debug',
])
def addTogglePorts(module, toggle_dict):
""" addTogglePorts(module: Module, toggle_dict: dict) -> None
Convert all xxxOn/Off methods of module into input ports
Keyword arguments:
module --- Module
toggle_dict --- the Toggle method signatures returned by vtk_parser
"""
registry = get_module_registry()
for name in toggle_dict.iterkeys():
if name in disallowed_toggle_ports:
continue
registry.add_input_port(module, name+'On', [], True)
registry.add_input_port(module, name+'Off', [], True)
disallowed_state_ports = set(['SetInputArrayToProcess'])
def addStatePorts(module, state_dict):
""" addStatePorts(module: Module, state_dict: dict) -> None
Convert all SetxxxToyyy methods of module into input ports
Keyword arguments:
module --- Module
state_dict --- the State method signatures returned by vtk_parser
"""
klass = get_description_class(module.vtkClass)
registry = get_module_registry()
for name in state_dict.iterkeys():
for mode in state_dict[name]:
# Creates the port Set foo to bar
field = 'Set'+name+'To'+mode[0]
if field in disallowed_state_ports:
continue
if not registry.has_input_port(module, field):
registry.add_input_port(module, field, [], True)
# Now create the port Set foo with parameter
if hasattr(klass, 'Set%s'%name):
setterMethod = getattr(klass, 'Set%s'%name)
setterSig = get_method_signature(setterMethod)
# if the signature looks like an enum, we'll skip it, it shouldn't
# be necessary
if len(setterSig) > 1:
prune_signatures(module, 'Set%s'%name, setterSig)
for ix, setter in enumerate(setterSig):
n = resolve_overloaded_name('Set' + name, ix, setterSig)
tm = typeMap(setter[1][0])
if len(setter[1]) == 1 and is_class_allowed(tm):
registry.add_input_port(module, n, tm,
setter[1][0] in typeMapDict)
else:
classes = [typeMap(i) for i in setter[1]]
if all(is_class_allowed(x) for x in classes):
registry.add_input_port(module, n, classes, True)
disallowed_other_ports = set(
[
'BreakOnError',
'DeepCopy',
'FastDelete',
'HasObserver',
'HasExecutive',
'INPUT_ARRAYS_TO_PROCESS',
'INPUT_CONNECTION',
'INPUT_IS_OPTIONAL',
'INPUT_IS_REPEATABLE',
'INPUT_PORT',
'INPUT_REQUIRED_DATA_TYPE',
'INPUT_REQUIRED_FIELDS',
'InvokeEvent',
'IsA',
'Modified',
'NewInstance',
'PrintRevisions',
'RemoveAllInputs',
'RemoveObserver',
'RemoveObservers',
'SafeDownCast',
# 'SetInputArrayToProcess',
'ShallowCopy',
'Update',
'UpdateInformation',
'UpdateProgress',
'UpdateWholeExtent',
])
force_not_optional_port = set(
['ApplyViewTheme',
])
def addOtherPorts(module, other_list):
""" addOtherPorts(module: Module, other_list: list) -> None
Convert all other ports such as Insert/Add.... into input/output
Keyword arguments:
module --- Module
other_dict --- any other method signatures that is not
Algorithm/SetGet/Toggle/State type
"""
klass = get_description_class(module.vtkClass)
registry = get_module_registry()
for name in other_list:
if name[:3] in ['Add','Set'] or name[:6]=='Insert':
if name in disallowed_other_ports:
continue
method = getattr(klass, name)
signatures = get_method_signature(method)
if len(signatures) > 1:
prune_signatures(module, name, signatures)
for (ix, sig) in enumerate(signatures):
([result], params) = sig
types = []
if params:
for p in params:
t = typeMap(p)
if not t:
types = None
break
else: types.append(t)
else:
types = [[]]
if types:
if not all(is_class_allowed(x) for x in types):
continue
n = resolve_overloaded_name(name, ix, signatures)
if len(types)<=1:
registry.add_input_port(module, n, types[0],
types[0] in typeMapDictValues)
else:
registry.add_input_port(module, n, types, True)
else:
if name in disallowed_other_ports:
continue
method = getattr(klass, name)
signatures = get_method_signature(method)
if len(signatures) > 1:
prune_signatures(module, name, signatures)
for (ix, sig) in enumerate(signatures):
([result], params) = sig
types = []
if params:
types = [typeMap(p) for p in params]
else:
types = []
if not all(is_class_allowed(x) for x in types):
continue
if types==[] or (result==None):
n = resolve_overloaded_name(name, ix, signatures)
registry.add_input_port(module, n, types,
not (n in force_not_optional_port))
disallowed_get_ports = set([
'GetClassName',
'GetErrorCode',
'GetNumberOfInputPorts',
'GetNumberOfOutputPorts',
'GetOutputPortInformation',
'GetTotalNumberOfInputConnections',
])
def addGetPorts(module, get_list):
klass = get_description_class(module.vtkClass)
registry = get_module_registry()
for name in get_list:
if name in disallowed_get_ports:
continue
method = getattr(klass, name)
signatures = get_method_signature(method)
if len(signatures) > 1:
prune_signatures(module, name, signatures, output=True)
for ix, getter in enumerate(signatures):
if getter[1] or len(getter[0]) > 1:
continue
class_ = typeMap(getter[0][0])
if is_class_allowed(class_):
if len(signatures) > 1:
n = name + "_" + str(ix+1)
else:
n = name
registry.add_output_port(module, n, class_, True)
def addPorts(module, delayed):
""" addPorts(module: VTK module inherited from Module,
delayed: object with add_input_port slot
) -> None
Search all metamethods of module and add appropriate ports
ports that cannot be added immediately should be appended to
the delayed object that is passed. see the SetRenderWindow cases.
"""
klass = get_description_class(module.vtkClass)
registry = get_module_registry()
registry.add_output_port(module, 'self', module)
parser.parse(klass)
addAlgorithmPorts(module)
addGetPorts(module, parser.get_get_methods())
addSetGetPorts(module, parser.get_get_set_methods(), delayed)
addTogglePorts(module, parser.get_toggle_methods())
addStatePorts(module, parser.get_state_methods())
addOtherPorts(module, parser.get_other_methods())
# CVS version of VTK doesn't support AddInputConnect(vtkAlgorithmOutput)
if klass==vtksnl.vtkAlgorithm:
registry.add_input_port(module, 'AddInputConnection',
typeMap('vtkAlgorithmOutput'))
# vtkWriters have a custom File port
elif klass==vtksnl.vtkWriter:
registry.add_output_port(module, 'file',
typeMap('File','edu.utah.sci.vistrails.basic'))
elif klass==vtksnl.vtkImageWriter:
registry.add_output_port(module, 'file',
typeMap('File','edu.utah.sci.vistrails.basic'))
elif klass==vtksnl.vtkVolumeProperty:
registry.add_input_port(module, 'SetTransferFunction',
typeMap('TransferFunction'))
elif klass==vtksnl.vtkDataSet:
registry.add_input_port(module, 'SetPointData', typeMap('vtkPointData'))
registry.add_input_port(module, 'SetCallData', typeMap('vtkCellData'))
elif klass==vtksnl.vtkCell:
registry.add_input_port(module, 'SetPointIds', typeMap('vtkIdList'))
def setAllPorts(descriptor, delayed):
""" setAllPorts(descriptor: ModuleDescriptor) -> None
Traverse descriptor and all of its children/grand-children to add all ports
"""
addPorts(descriptor.module, delayed)
for child in descriptor.children:
setAllPorts(child, delayed)
def class_dict(base_module, node):
"""class_dict(base_module, node) -> dict
Returns the class dictionary for the module represented by node and
with base class base_module"""
class_dict_ = {}
def update_dict(name, callable_):
if class_dict_.has_key(name):
class_dict_[name] = callable_(class_dict_[name])
elif hasattr(base_module, name):
class_dict_[name] = callable_(getattr(base_module, name))
else:
class_dict_[name] = callable_(None)
def guarded_SimpleScalarTree_wrap_compute(old_compute):
# This builds the scalar tree and makes it cacheable
def compute(self):
self.is_cacheable = lambda *args, **kwargs: True
old_compute(self)
self.vtkInstance.BuildTree()
return compute
def guarded_SetFileName_wrap_compute(old_compute):
# This checks for the presence of file in VTK readers
def compute(self):
# Skips the check if it's a vtkImageReader or vtkPLOT3DReader, because
# it has other ways of specifying files, like SetFilePrefix for
# multiple files
if any(issubclass(self.vtkClass, x)
for x in
[vtksnl.vtkBYUReader,
vtksnl.vtkImageReader,
vtksnl.vtkPLOT3DReader,
vtksnl.vtkDICOMImageReader,
vtksnl.vtkTIFFReader]):
old_compute(self)
return
if self.has_input('SetFileName'):
name = self.get_input('SetFileName')
elif self.has_input('SetFile'):
name = self.get_input('SetFile').name
else:
raise ModuleError(self, 'Missing filename')
if not os.path.isfile(name):
raise ModuleError(self, 'File does not exist')
old_compute(self)
return compute
def compute_SetDiffuseColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetDiffuseColorWidget(self, color):
self.vtkInstance.SetDiffuseColor(color.tuple)
return call_SetDiffuseColorWidget
def compute_SetAmbientColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetAmbientColorWidget(self, color):
self.vtkInstance.SetAmbientColor(color.tuple)
return call_SetAmbientColorWidget
def compute_SetSpecularColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetSpecularColorWidget(self, color):
self.vtkInstance.SetSpecularColor(color.tuple)
return call_SetSpecularColorWidget
def compute_SetColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetColorWidget(self, color):
self.vtkInstance.SetColor(color.tuple)
return call_SetColorWidget
def compute_SetEdgeColorWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetEdgeColorWidget(self, color):
self.vtkInstance.SetEdgeColor(color.tuple)
return call_SetEdgeColorWidget
def compute_SetBackgroundWidget(old_compute):
if old_compute != None:
return old_compute
def call_SetBackgroundWidget(self, color):
self.vtkInstance.SetBackground(color.tuple)
return call_SetBackgroundWidget
def compute_SetBackground2Widget(old_compute):
if old_compute != None:
return old_compute
def call_SetBackground2Widget(self, color):
self.vtkInstance.SetBackground2(color.tuple)
return call_SetBackground2Widget
def compute_SetVTKCell(old_compute):
if old_compute != None:
return old_compute
def call_SetRenderWindow(self, cellObj):
if cellObj.cellWidget:
self.vtkInstance.SetRenderWindow(cellObj.cellWidget.mRenWin)
return call_SetRenderWindow
def compute_SetTransferFunction(old_compute):
# This sets the transfer function
if old_compute != None:
return old_compute
def call_SetTransferFunction(self, tf):
tf.set_on_vtk_volume_property(self.vtkInstance)
return call_SetTransferFunction
def compute_SetPointData(old_compute):
if old_compute != None:
return old_compute
def call_SetPointData(self, pd):
self.vtkInstance.GetPointData().ShallowCopy(pd)
return call_SetPointData
def compute_SetCellData(old_compute):
if old_compute != None:
return old_compute
def call_SetCellData(self, cd):
self.vtkInstance.GetCellData().ShallowCopy(cd)
return call_SetCellData
def compute_SetPointIds(old_compute):
if old_compute != None:
return old_compute
def call_SetPointIds(self, point_ids):
self.vtkInstance.GetPointIds().SetNumberOfIds(point_ids.GetNumberOfIds())
for i in xrange(point_ids.GetNumberOfIds()):
self.vtkInstance.GetPointIds().SetId(i, point_ids.GetId(i))
return call_SetPointIds
def guarded_Writer_wrap_compute(old_compute):
# The behavior for vtkWriter subclasses is to call Write()
# If the user sets a name, we will create a file with that name
# If not, we will create a temporary file from the file pool
def compute(self):
old_compute(self)
fn = self.vtkInstance.GetFileName()
if not fn:
o = self.interpreter.filePool.create_file(suffix='.vtk')
self.vtkInstance.SetFileName(o.name)
else:
o = File()
o.name = fn
self.vtkInstance.Write()
self.set_output('file', o)
return compute
for var in dir(node.klass):
# Everyone that has a Set.*FileName should have a Set.*File port too
if set_file_name_pattern.match(var):
def get_compute_SetFile(method_name):
def compute_SetFile(old_compute):
if old_compute != None:
return old_compute
def call_SetFile(self, file_obj):
getattr(self.vtkInstance, method_name)(file_obj.name)
return call_SetFile
return compute_SetFile
update_dict('_special_input_function_' + var[:-4],
get_compute_SetFile(var))
if hasattr(node.klass, 'SetFileName'):
# ... BUT we only want to check existence of filenames on
# readers. VTK is nice enough to be consistent with names, but
# this is brittle..
if node.klass.__name__.endswith('Reader'):
if not node.klass.__name__.endswith('TiffReader'):
update_dict('compute', guarded_SetFileName_wrap_compute)
if hasattr(node.klass, 'SetRenderWindow'):
update_dict('_special_input_function_SetVTKCell',
compute_SetVTKCell)
#color gui wrapping
if hasattr(node.klass, 'SetDiffuseColor'):
update_dict('_special_input_function_SetDiffuseColorWidget',
compute_SetDiffuseColorWidget)
if hasattr(node.klass, 'SetAmbientColor'):
update_dict('_special_input_function_SetAmbientColorWidget',
compute_SetAmbientColorWidget)
if hasattr(node.klass, 'SetSpecularColor'):
update_dict('_special_input_function_SetSpecularColorWidget',
compute_SetSpecularColorWidget)
if hasattr(node.klass, 'SetEdgeColor'):
update_dict('_special_input_function_SetEdgeColorWidget',
compute_SetEdgeColorWidget)
if hasattr(node.klass, 'SetColor'):
update_dict('_special_input_function_SetColorWidget',
compute_SetColorWidget)
if (issubclass(node.klass, vtksnl.vtkRenderer) and
hasattr(node.klass, 'SetBackground')):
update_dict('_special_input_function_SetBackgroundWidget',
compute_SetBackgroundWidget)
if (issubclass(node.klass, vtksnl.vtkRenderer) and
hasattr(node.klass, 'SetBackground2')):
update_dict('_special_input_function_SetBackground2Widget',
compute_SetBackground2Widget)
if issubclass(node.klass, vtksnl.vtkWriter):
update_dict('compute', guarded_Writer_wrap_compute)
if issubclass(node.klass, vtksnl.vtkScalarTree):
update_dict('compute', guarded_SimpleScalarTree_wrap_compute)
if issubclass(node.klass, vtksnl.vtkVolumeProperty):
update_dict('_special_input_function_SetTransferFunction',
compute_SetTransferFunction)
if issubclass(node.klass, vtksnl.vtkDataSet):
update_dict('_special_input_function_SetPointData',
compute_SetPointData)
update_dict('_special_input_function_SetCellData',
compute_SetCellData)
if issubclass(node.klass, vtksnl.vtkCell):
update_dict('_special_input_function_SetPointIds',
compute_SetPointIds)
return class_dict_
disallowed_modules = set([
# 'vtkGeoAlignedImageCache',
# 'vtkGeoTerrainCache',
# 'vtkMimeTypeStrategy',
# 'vtkMPIGroup',
'vtkPlotWriter', # Segfaults when being destroyed (when created without a reference, or when last reference is removed)
'vtkRenderedLandscapeRepresentation' # Segfaults when calling: GetPeakLabelStopWords()
])
def createModule(baseModule, node):
""" createModule(baseModule: a Module subclass, node: TreeNode) -> None
Construct a module inherits baseModule with specification from node
"""
if node.name in disallowed_modules: return
def obsolete_class_list():
lst = []
items = ['vtkInteractorStyleTrackball',
'vtkStructuredPointsGeometryFilter',
'vtkConstrainedPointHandleRepresentation',
'vtkTypePromotion']
def try_to_add_item(item):
try:
lst.append(getattr(vtksnl, item))
except AttributeError:
pass
for item in items:
try_to_add_item(item)
return lst
obsolete_list = obsolete_class_list()
def is_abstract():
"""is_abstract tries to instantiate the class. If it's
abstract, this will raise."""
# Consider obsolete classes abstract
if node.klass in obsolete_list:
return True
try:
getattr(vtksnl, node.name)()
except TypeError: # VTK raises type error on abstract classes
return True
return False
module = new_module(baseModule, node.name,
class_dict(baseModule, node),
docstring=getattr(vtksnl, node.name).__doc__
)
# This is sitting on the class
if hasattr(fix_classes, node.klass.__name__ + '_fixed'):
module.vtkClass = getattr(fix_classes, node.klass.__name__ + '_fixed')
else:
module.vtkClass = node.klass
registry = get_module_registry()
registry.add_module(module, abstract=is_abstract(),
signatureCallable=vtk_hasher)
for child in node.children:
if child.name in disallowed_classes:
continue
createModule(module, child)
def createAllModules(g):
""" createAllModules(g: ClassTree) -> None
Traverse the VTK class tree and add all modules into the module registry
"""
assert len(g.tree[0]) == 1
base = g.tree[0][0]
assert base.name == 'vtkObjectBase'
vtkObjectBase = new_module(vtkBaseModule, 'vtkObjectBase')
vtkObjectBase.vtkClass = vtksnl.vtkObjectBase
registry = get_module_registry()
registry.add_module(vtkObjectBase)
for child in base.children:
if child.name in disallowed_classes:
continue
createModule(vtkObjectBase, child)
##############################################################################
# Convenience methods
def extract_vtk_instance(vistrails_obj):
"""extract_vtk_instance(vistrails_obj) -> vtk_object
takes an instance of a VisTrails module that is a subclass
of the vtkObjectBase module and returns the corresponding
instance."""
global identifier
vtkObjectBase = registry.get_descriptor_by_name(identifier,
'vtkObjectBase').module
assert isinstance(vistrails_obj, vtkObjectBase)
return vistrails_obj.vtkInstance
def wrap_vtk_instance(vtk_obj):
"""wrap_vtk_instance(vtk_object) -> VisTrails module
takes a vtk instance and returns a corresponding
wrapped instance of a VisTrails module"""
global identifier
assert isinstance(vtk_obj, vtksnl.vtkObjectBase)
m = registry.get_descriptor_by_name(identifier,
vtk_obj.GetClassName())
result = m.module()
result.vtkInstance = vtk_obj
return result
################################################################################
def initialize():
""" initialize() -> None
Package-entry to initialize the package
"""
# Check VTK version
v = vtksnl.vtkVersion()
version = [v.GetVTKMajorVersion(),
v.GetVTKMinorVersion(),
v.GetVTKBuildVersion()]
if version < [5, 0, 0]:
raise Exception("You need to upgrade your VTK install to version \
> >= 5.0.0")
inheritanceGraph = ClassTree(vtksnl)
inheritanceGraph.create()
# Transfer Function constant
tf_widget.initialize()
delayed = InstanceObject(add_input_port=[])
# Add VTK modules
registry = get_module_registry()
registry.add_module(vtkBaseModule)
createAllModules(inheritanceGraph)
setAllPorts(registry.get_descriptor_by_name(identifier,
'vtkObjectBase'),
delayed)
# Register the VTKCell and VTKHandler type if the spreadsheet is up
if registry.has_module('edu.utah.sci.vistrails.spreadsheet',
'SpreadsheetCell'):
import vtkhandler
import vtkcell
import vtkviewcell
vtkhandler.registerSelf()
vtkcell.registerSelf()
vtkviewcell.registerSelf()
# register offscreen rendering module
offscreen.register_self()
# Now add all "delayed" ports - see comment on addSetGetPorts
for args in delayed.add_input_port:
registry.add_input_port(*args)
# register Transfer Function adjustment
# This can't be reordered -- TransferFunction needs to go before
# vtkVolumeProperty, but vtkScaledTransferFunction needs
# to go after vtkAlgorithmOutput
getter = registry.get_descriptor_by_name
registry.add_module(tf_widget.vtkScaledTransferFunction)
registry.add_input_port(tf_widget.vtkScaledTransferFunction,
'Input', getter('edu.utah.sci.vistrails.vtksnl',
'vtkAlgorithmOutput').module)
registry.add_input_port(tf_widget.vtkScaledTransferFunction,
'Dataset', getter ('edu.utah.sci.vistrails.vtksnl',
'vtkDataObject').module)
registry.add_input_port(tf_widget.vtkScaledTransferFunction,
'Range', [Float, Float])
registry.add_input_port(tf_widget.vtkScaledTransferFunction,
'TransferFunction',
tf_widget.TransferFunctionConstant)
registry.add_output_port(tf_widget.vtkScaledTransferFunction,
'TransferFunction',
tf_widget.TransferFunctionConstant)
inspectors.initialize()
################################################################################
|
|
# Copyright 2017--2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Defines commandline arguments for the main CLIs with reasonable defaults.
"""
import argparse
import os
import sys
import types
from typing import Any, Callable, Dict, List, Tuple, Optional
import yaml
from sockeye.utils import smart_open
from . import constants as C
class ConfigArgumentParser(argparse.ArgumentParser):
"""
Extension of argparse.ArgumentParser supporting config files.
The option --config is added automatically and expects a YAML serialized
dictionary, similar to the return value of parse_args(). Command line
parameters have precedence over config file values. Usage should be
transparent, just substitute argparse.ArgumentParser with this class.
Extended from
https://stackoverflow.com/questions/28579661/getting-required-option-from-namespace-in-python
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.argument_definitions = {} # type: Dict[Tuple, Dict]
self.argument_actions = [] # type: List[Any]
self._overwrite_add_argument(self)
self.add_argument("--config", help="Path to CLI arguments in yaml format "
"(as saved in Sockeye model directories as 'args.yaml'). "
"Commandline arguments have precedence over values in this file.", type=str)
# Note: not FileType so that we can get the path here
def _register_argument(self, _action, *args, **kwargs):
self.argument_definitions[args] = kwargs
self.argument_actions.append(_action)
def _overwrite_add_argument(self, original_object):
def _new_add_argument(this_self, *args, **kwargs):
action = this_self.original_add_argument(*args, **kwargs)
this_self.config_container._register_argument(action, *args, **kwargs)
original_object.original_add_argument = original_object.add_argument
original_object.config_container = self
original_object.add_argument = types.MethodType(_new_add_argument, original_object)
return original_object
def add_argument_group(self, *args, **kwargs):
group = super().add_argument_group(*args, **kwargs)
return self._overwrite_add_argument(group)
def parse_args(self, args=None, namespace=None) -> argparse.Namespace: # type: ignore
# Mini argument parser to find the config file
config_parser = argparse.ArgumentParser(add_help=False)
config_parser.add_argument("--config", type=regular_file())
config_args, _ = config_parser.parse_known_args(args=args)
initial_args = argparse.Namespace()
if config_args.config:
initial_args = load_args(config_args.config)
# Remove the 'required' flag from options loaded from config file
for action in self.argument_actions:
if action.dest in initial_args:
action.required = False
return super().parse_args(args=args, namespace=initial_args)
class StoreDeprecatedAction(argparse.Action):
def __init__(self, option_strings, dest, deprecated_dest, nargs=None, **kwargs):
super(StoreDeprecatedAction, self).__init__(option_strings, dest, **kwargs)
self.deprecated_dest = deprecated_dest
def __call__(self, parser, namespace, value, option_string=None):
setattr(namespace, self.dest, value)
setattr(namespace, self.deprecated_dest, value)
def save_args(args: argparse.Namespace, fname: str):
with open(fname, 'w') as out:
yaml.safe_dump(args.__dict__, out, default_flow_style=False)
def load_args(fname: str) -> argparse.Namespace:
with open(fname, 'r') as inp:
return argparse.Namespace(**yaml.safe_load(inp))
def regular_file() -> Callable:
"""
Returns a method that can be used in argument parsing to check the argument is a regular file or a symbolic link,
but not, e.g., a process substitution.
:return: A method that can be used as a type in argparse.
"""
def check_regular_file(value_to_check):
value_to_check = str(value_to_check)
if not os.path.isfile(value_to_check):
raise argparse.ArgumentTypeError("must exist and be a regular file.")
return value_to_check
return check_regular_file
def regular_folder() -> Callable:
"""
Returns a method that can be used in argument parsing to check the argument is a directory.
:return: A method that can be used as a type in argparse.
"""
def check_regular_directory(value_to_check):
value_to_check = str(value_to_check)
if not os.path.isdir(value_to_check):
raise argparse.ArgumentTypeError("must be a directory.")
return value_to_check
return check_regular_directory
def int_greater_or_equal(threshold: int) -> Callable:
"""
Returns a method that can be used in argument parsing to check that the int argument is greater or equal to `threshold`.
:param threshold: The threshold that we assume the cli argument value is greater or equal to.
:return: A method that can be used as a type in argparse.
"""
def check_greater_equal(value: str):
value_to_check = int(value)
if value_to_check < threshold:
raise argparse.ArgumentTypeError("must be greater or equal to %d." % threshold)
return value_to_check
return check_greater_equal
def float_greater_or_equal(threshold: float) -> Callable:
"""
Returns a method that can be used in argument parsing to check that the float argument is greater or equal to `threshold`.
:param threshold: The threshold that we assume the cli argument value is greater or equal to.
:return: A method that can be used as a type in argparse.
"""
def check_greater_equal(value: str):
value_to_check = float(value)
if value_to_check < threshold:
raise argparse.ArgumentTypeError("must be greater or equal to %f." % threshold)
return value_to_check
return check_greater_equal
def bool_str() -> Callable:
"""
Returns a method that can be used in argument parsing to check that the argument is a valid representation of
a boolean value.
:return: A method that can be used as a type in argparse.
"""
def parse(value: str):
lower_value = value.lower()
if lower_value in ["true", "yes", "1"]:
return True
elif lower_value in ["false", "no", "0"]:
return False
else:
raise argparse.ArgumentTypeError(
"Invalid value for bool argument. Use true/false, yes/no or 1/0.")
return parse
def simple_dict() -> Callable:
"""
A simple dictionary format that does not require spaces or quoting.
Supported types: bool, int, float
:return: A method that can be used as a type in argparse.
"""
def parse(dict_str: str):
def _parse(value: str):
if value.lower() == "true":
return True
if value.lower() == "false":
return False
if "." in value or "e" in value:
return float(value)
return int(value)
_dict = dict()
try:
for entry in dict_str.split(","):
key, value = entry.split(":")
_dict[key] = _parse(value)
except ValueError:
raise argparse.ArgumentTypeError("Specify argument dictionary as key1:value1,key2:value2,..."
" Supported types: bool, int, float.")
return _dict
return parse
def multiple_values(num_values: int = 0,
greater_or_equal: Optional[float] = None,
data_type: Callable = int) -> Callable:
"""
Returns a method to be used in argument parsing to parse a string of the form "<val>:<val>[:<val>...]" into
a tuple of values of type data_type.
:param num_values: Optional number of ints required.
:param greater_or_equal: Optional constraint that all values should be greater or equal to this value.
:param data_type: Type of values. Default: int.
:return: Method for parsing.
"""
def parse(value_to_check):
if ':' in value_to_check:
expected_num_separators = num_values - 1 if num_values else 0
if expected_num_separators > 0 and (value_to_check.count(':') != expected_num_separators):
raise argparse.ArgumentTypeError("Expected either a single value or %d values separated by %s" %
(num_values, C.ARG_SEPARATOR))
values = tuple(map(data_type, value_to_check.split(C.ARG_SEPARATOR, num_values - 1)))
else:
values = tuple([data_type(value_to_check)] * num_values)
if greater_or_equal is not None:
if any((value < greater_or_equal for value in values)):
raise argparse.ArgumentTypeError("Must provide value greater or equal to %d" % greater_or_equal)
return values
return parse
def file_or_stdin() -> Callable:
"""
Returns a file descriptor from stdin or opening a file from a given path.
"""
def parse(path):
if path is None or path == "-":
return sys.stdin
else:
return smart_open(path)
return parse
def add_average_args(params):
average_params = params.add_argument_group("Averaging")
average_params.add_argument(
"inputs",
metavar="INPUT",
type=str,
nargs="+",
help="either a single model directory (automatic checkpoint selection) "
"or multiple .params files (manual checkpoint selection)")
average_params.add_argument(
"--metric",
help="Name of the metric to choose n-best checkpoints from. Default: %(default)s.",
default=C.PERPLEXITY,
choices=C.METRICS)
average_params.add_argument(
"-n",
type=int,
default=4,
help="number of checkpoints to find. Default: %(default)s.")
average_params.add_argument(
"--output", "-o", required=True, type=str, help="File to write averaged parameters to.")
average_params.add_argument(
"--strategy",
choices=C.AVERAGE_CHOICES,
default=C.AVERAGE_BEST,
help="selection method. Default: %(default)s.")
def add_rerank_args(params):
rerank_params = params.add_argument_group("Reranking")
rerank_params.add_argument("--reference", "-r",
type=str,
required=True,
help="File where target reference translations are stored.")
rerank_params.add_argument("--hypotheses", "-hy",
type=str,
required=True,
help="File with nbest translations, one nbest list per line,"
"in JSON format as returned by sockeye.translate with --nbest-size x.")
rerank_params.add_argument("--metric", "-m",
type=str,
required=False,
default=C.RERANK_BLEU,
choices=C.RERANK_METRICS,
help="Sentence-level metric used to compare each nbest translation to the reference."
"Default: %(default)s.")
rerank_params.add_argument("--output", "-o", default=None, help="File to write output to. Default: STDOUT.")
rerank_params.add_argument("--output-best",
action="store_true",
help="Output only the best hypothesis from each nbest list.")
rerank_params.add_argument("--output-reference-instead-of-blank",
action="store_true",
help="When outputting only the best hypothesis (--output-best) and the best hypothesis "
"is a blank line, output the reference instead.")
rerank_params.add_argument("--return-score",
action="store_true",
help="Returns the reranking scores as scores in output JSON objects.")
def add_lexicon_args(params):
lexicon_params = params.add_argument_group("Model & Top-k")
lexicon_params.add_argument("--model", "-m", required=True,
help="Model directory containing source and target vocabularies.")
lexicon_params.add_argument("-k", type=int, default=200,
help="Number of target translations to keep per source. Default: %(default)s.")
def add_lexicon_create_args(params):
lexicon_params = params.add_argument_group("I/O")
lexicon_params.add_argument("--input", "-i", required=True,
help="Probabilistic lexicon (fast_align format) to build top-k lexicon from.")
lexicon_params.add_argument("--output", "-o", required=True, help="File name to write top-k lexicon to.")
def add_lexicon_inspect_args(params):
lexicon_params = params.add_argument_group("Lexicon to inspect")
lexicon_params.add_argument("--lexicon", "-l", required=True, help="File name of top-k lexicon to inspect.")
def add_logging_args(params):
logging_params = params.add_argument_group("Logging")
logging_params.add_argument('--quiet', '-q',
default=False,
action="store_true",
help='Suppress console logging.')
logging_params.add_argument('--quiet-secondary-workers', '-qsw',
default=False,
action="store_true",
help='Suppress console logging for secondary workers in distributed training.')
logging_params.add_argument('--no-logfile',
default=False,
action="store_true",
help='Suppress file logging')
log_levels = ['INFO', 'DEBUG', 'ERROR']
logging_params.add_argument('--loglevel', '--log-level',
default='INFO',
choices=log_levels,
help='Log level. Default: %(default)s.')
logging_params.add_argument('--loglevel-secondary-workers',
default='INFO',
choices=log_levels,
help='Console log level for secondary workers. Default: %(default)s.')
def add_training_data_args(params, required=False):
params.add_argument(C.TRAINING_ARG_SOURCE, '-s',
required=required,
type=regular_file(),
help='Source side of parallel training data.')
params.add_argument('--source-factors', '-sf',
required=False,
nargs='+',
type=regular_file(),
default=[],
help='File(s) containing additional token-parallel source-side factors. Default: %(default)s.')
params.add_argument('--source-factors-use-source-vocab',
required=False,
nargs='+',
type=bool_str(),
default=[],
help='List of bools signaling whether to use the source vocabulary for the source factors. '
'If empty (default) each factor has its own vocabulary.')
params.add_argument('--target-factors', '-tf',
required=False,
nargs='+',
type=regular_file(),
default=[],
help='File(s) containing additional token-parallel target-side factors. Default: %(default)s.')
params.add_argument('--target-factors-use-target-vocab',
required=False,
nargs='+',
type=bool_str(),
default=[],
help='List of bools signaling whether to use the target vocabulary for the target factors. '
'If empty (default) each factor has its own vocabulary.')
params.add_argument(C.TRAINING_ARG_TARGET, '-t',
required=required,
type=regular_file(),
help='Target side of parallel training data.')
def add_validation_data_params(params):
params.add_argument('--validation-source', '-vs',
required=True,
type=regular_file(),
help='Source side of validation data.')
params.add_argument('--validation-source-factors', '-vsf',
required=False,
nargs='+',
type=regular_file(),
default=[],
help='File(s) containing additional token-parallel validation source side factors. '
'Default: %(default)s.')
params.add_argument('--validation-target', '-vt',
required=True,
type=regular_file(),
help='Target side of validation data.')
params.add_argument('--validation-target-factors', '-vtf',
required=False,
nargs='+',
type=regular_file(),
default=[],
help='File(s) containing additional token-parallel validation target side factors. '
'Default: %(default)s.')
def add_prepared_data_args(params):
params.add_argument(C.TRAINING_ARG_PREPARED_DATA, '-d',
type=regular_folder(),
help='Prepared training data directory created through python -m sockeye.prepare_data.')
def add_training_output_args(params):
params.add_argument('--output', '-o',
required=True,
help='Folder where model & training results are written to.')
params.add_argument('--overwrite-output',
action='store_true',
help='Delete all contents of the model directory if it already exists.')
def add_training_io_args(params):
params = params.add_argument_group("Data & I/O")
# Unfortunately we must set --source/--target to not required as we either accept these parameters
# or --prepared-data which can not easily be encoded in argparse.
add_training_data_args(params, required=False)
add_prepared_data_args(params)
add_validation_data_params(params)
add_bucketing_args(params)
add_vocab_args(params)
add_training_output_args(params)
def add_bucketing_args(params):
params.add_argument('--no-bucketing',
action='store_true',
help='Disable bucketing: always unroll the graph to --max-seq-len. Default: %(default)s.')
params.add_argument('--bucket-width',
type=int_greater_or_equal(1),
default=8,
help='Width of buckets in tokens. Default: %(default)s.')
params.add_argument('--bucket-scaling',
action='store_true',
help='Scale source/target buckets based on length ratio to reduce padding. Default: '
'%(default)s.')
params.add_argument(C.TRAINING_ARG_MAX_SEQ_LEN,
type=multiple_values(num_values=2, greater_or_equal=1),
default=(95, 95),
help='Maximum sequence length in tokens, not counting BOS/EOS tokens (internal max sequence '
'length is X+1). Use "x:x" to specify separate values for src&tgt. Default: %(default)s.')
def add_process_pool_args(params):
params.add_argument('--max-processes',
type=int_greater_or_equal(1),
default=1,
help='Process the shards in parallel using max-processes processes.')
def add_prepare_data_cli_args(params):
add_training_data_args(params, required=True)
add_vocab_args(params)
add_bucketing_args(params)
params.add_argument('--num-samples-per-shard',
type=int_greater_or_equal(1),
default=10000000,
help='The approximate number of samples per shard. Default: %(default)s.')
params.add_argument('--min-num-shards',
default=1,
type=int_greater_or_equal(1),
help='The minimum number of shards to use, even if they would not '
'reach the desired number of samples per shard. Default: %(default)s.')
params.add_argument('--seed',
type=int,
default=13,
help='Random seed used that makes shard assignments deterministic. Default: %(default)s.')
params.add_argument('--output', '-o',
required=True,
help='Folder where the prepared and possibly sharded data is written to.')
add_logging_args(params)
add_process_pool_args(params)
def add_device_args(params):
device_params = params.add_argument_group("Device parameters")
device_params.add_argument('--device-id',
type=int_greater_or_equal(0),
default=0,
help='GPU to use. 0 translates to "cuda:0", etc. When running in distributed mode '
'(--dist), each process\'s device is set automatically. Default: %(default)s.')
device_params.add_argument('--use-cpu',
action='store_true',
help='Use CPU device instead of GPU.')
device_params.add_argument('--env',
help='List of environment variables to be set before importing PyTorch. Separated by '
'",", e.g. --env=OMP_NUM_THREADS=1,PYTORCH_JIT=0 etc.')
def add_vocab_args(params):
params.add_argument('--source-vocab',
required=False,
default=None,
help='Existing source vocabulary (JSON).')
params.add_argument('--target-vocab',
required=False,
default=None,
help='Existing target vocabulary (JSON).')
params.add_argument('--source-factor-vocabs',
required=False,
nargs='+',
type=regular_file(),
default=[],
help='Existing source factor vocabulary (-ies) (JSON).')
params.add_argument('--target-factor-vocabs',
required=False,
nargs='+',
type=regular_file(),
default=[],
help='Existing target factor vocabulary (-ies) (JSON).')
params.add_argument(C.VOCAB_ARG_SHARED_VOCAB,
action='store_true',
default=False,
help='Share source and target vocabulary. '
'Will be automatically turned on when using weight tying. Default: %(default)s.')
params.add_argument('--num-words',
type=multiple_values(num_values=2, greater_or_equal=0),
default=(0, 0),
help='Maximum vocabulary size. Use "x:x" to specify separate values for src&tgt. '
'A value of 0 indicates that the vocabulary unrestricted and determined from the data by '
'creating an entry for all words that occur at least --word-min-count times.'
'Default: %(default)s.')
params.add_argument('--word-min-count',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(1, 1),
help='Minimum frequency of words to be included in vocabularies. Default: %(default)s.')
params.add_argument('--pad-vocab-to-multiple-of',
type=int,
default=8,
help='Pad vocabulary to a multiple of this integer. Default: %(default)s.')
def add_model_parameters(params):
model_params = params.add_argument_group("ModelConfig")
model_params.add_argument('--params', '-p',
type=str,
default=None,
help='Initialize model parameters from file. Overrides random initializations.')
model_params.add_argument('--allow-missing-params',
action="store_true",
default=False,
help="Allow missing parameters when initializing model parameters from file. "
"Default: %(default)s.")
model_params.add_argument('--ignore-extra-params',
action="store_true",
default=False,
help="Allow extra parameters when initializing model parameters from file. "
"Default: %(default)s.")
model_params.add_argument('--encoder',
choices=C.ENCODERS,
default=C.TRANSFORMER_TYPE,
help="Type of encoder. Default: %(default)s.")
model_params.add_argument('--decoder',
choices=C.DECODERS,
default=C.TRANSFORMER_TYPE,
help="Type of decoder. Default: %(default)s. "
"'ssru_transformer' uses Simpler Simple Recurrent Units (Kim et al, 2019) "
"as replacement for self-attention layers.")
model_params.add_argument('--num-layers',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(6, 6),
help='Number of layers for encoder & decoder. '
'Use "x:x" to specify separate values for encoder & decoder. Default: %(default)s.')
# transformer arguments
model_params.add_argument('--transformer-model-size',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(512, 512),
help='Number of hidden units in transformer layers. '
'Use "x:x" to specify separate values for encoder & decoder. Default: %(default)s.')
model_params.add_argument('--transformer-attention-heads',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(8, 8),
help='Number of heads for all self-attention when using transformer layers. '
'Use "x:x" to specify separate values for encoder & decoder. Default: %(default)s.')
model_params.add_argument('--transformer-feed-forward-num-hidden',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(2048, 2048),
help='Number of hidden units in transformers feed forward layers. '
'Use "x:x" to specify separate values for encoder & decoder. Default: %(default)s.')
model_params.add_argument('--transformer-feed-forward-use-glu',
action='store_true',
default=False,
help='Use Gated Linear Units in transformer feed forward networks (Daupin et al. 2016, '
'arxiv.org/abs/1612.08083; Shazeer 2020, arxiv.org/abs/2002.05202). Default: '
'%(default)s.')
model_params.add_argument('--transformer-activation-type',
type=multiple_values(num_values=2, greater_or_equal=None, data_type=str),
default=(C.RELU, C.RELU),
help='Type of activation to use for each feed forward layer. Use "x:x" to specify '
'different values for encoder & decoder. Supported: {}. Default: '
'%(default)s.'.format(' '.join(C.TRANSFORMER_ACTIVATION_TYPES)))
model_params.add_argument('--transformer-positional-embedding-type',
choices=C.POSITIONAL_EMBEDDING_TYPES,
default=C.FIXED_POSITIONAL_EMBEDDING,
help='The type of positional embedding. Default: %(default)s.')
model_params.add_argument('--transformer-preprocess',
type=multiple_values(num_values=2, greater_or_equal=None, data_type=str),
default=('n', 'n'),
help='Transformer preprocess sequence for encoder and decoder. Supports three types of '
'operations: d=dropout, r=residual connection, n=layer normalization. You can '
'combine in any order, for example: "ndr". '
'Leave empty to not use any of these operations. '
'You can specify separate sequences for encoder and decoder by separating with ":" '
'For example: n:drn '
'Default: %(default)s.')
model_params.add_argument('--transformer-postprocess',
type=multiple_values(num_values=2, greater_or_equal=None, data_type=str),
default=('dr', 'dr'),
help='Transformer postprocess sequence for encoder and decoder. Supports three types of '
'operations: d=dropout, r=residual connection, n=layer normalization. You can '
'combine in any order, for example: "ndr". '
'Leave empty to not use any of these operations. '
'You can specify separate sequences for encoder and decoder by separating with ":" '
'For example: n:drn '
'Default: %(default)s.')
model_params.add_argument('--lhuc',
nargs="+",
default=None,
choices=C.LHUC_CHOICES,
metavar="COMPONENT",
help="Use LHUC (Vilar 2018). Include an amplitude parameter to hidden units for"
" domain adaptation. Needs a pre-trained model. Valid values: {values}."
" Default: %(default)s.".format(
values=", ".join(C.LHUC_CHOICES)))
# embedding arguments
model_params.add_argument('--num-embed',
type=multiple_values(num_values=2, greater_or_equal=1),
default=(None, None),
help='Embedding size for source and target tokens. '
'Use "x:x" to specify separate values for src&tgt. Default: %d.' % C.DEFAULT_NUM_EMBED)
model_params.add_argument('--source-factors-num-embed',
type=int,
nargs='+',
default=[],
help='Embedding size for additional source factors. '
'You must provide as many dimensions as '
'(validation) source factor files. Default: %(default)s.')
model_params.add_argument('--target-factors-num-embed',
type=int,
nargs='+',
default=[],
help='Embedding size for additional target factors. '
'You must provide as many dimensions as '
'(validation) target factor files. Default: %(default)s.')
model_params.add_argument('--source-factors-combine', '-sfc',
choices=C.FACTORS_COMBINE_CHOICES,
default=[C.FACTORS_COMBINE_SUM],
nargs='+',
help='How to combine source factors. Can be either one value which will be applied to '
'all source factors, or a list of values. Default: %(default)s.')
model_params.add_argument('--target-factors-combine', '-tfc',
choices=C.FACTORS_COMBINE_CHOICES,
default=[C.FACTORS_COMBINE_SUM],
nargs='+',
help='How to combine target factors. Can be either one value which will be applied to '
'all target factors, or a list of values. Default: %(default)s.')
model_params.add_argument('--source-factors-share-embedding',
type=bool_str(),
nargs='+',
default=[False],
help='Share the embeddings with the source language. '
'Can be either one value which will be applied '
'to all source factors, or a list of values. Default: %(default)s.')
model_params.add_argument('--target-factors-share-embedding',
type=bool_str(),
nargs='+',
default=[False],
help='Share the embeddings with the target language. '
'Can be either one value which will be applied '
'to all target factors, or a list of values. Default: %(default)s.')
model_params.add_argument('--weight-tying-type',
default=C.WEIGHT_TYING_SRC_TRG_SOFTMAX,
choices=C.WEIGHT_TYING_TYPES,
help='The type of weight tying. source embeddings=src, target embeddings=trg, '
'target softmax weight matrix=softmax. Default: %(default)s.')
model_params.add_argument('--dtype', default=C.DTYPE_FP32, choices=[C.DTYPE_FP32, C.DTYPE_FP16],
help="Data type.")
model_params.add_argument('--amp',
action='store_true',
help='Use PyTorch automatic mixed precision (AMP) to run compatible operations in '
'float16 mode instead of float32.')
model_params.add_argument('--apex-amp',
action='store_true',
help='Use NVIDIA Apex automatic mixed precision (AMP) to run the entire model in float16 '
'mode with float32 master weights and dynamic loss scaling. This is faster than '
'PyTorch AMP with some additional risk and requires installing Apex: '
'https://github.com/NVIDIA/apex')
def add_batch_args(params, default_batch_size=4096, default_batch_type=C.BATCH_TYPE_WORD):
params.add_argument('--batch-size', '-b',
type=int_greater_or_equal(1),
default=default_batch_size,
help='Mini-batch size per process. Depending on --batch-type, this either refers to words or '
'sentences. The effective batch size (update size) is num_processes * batch_size * '
'update_interval. Default: %(default)s.')
params.add_argument('--batch-type',
type=str,
default=default_batch_type,
choices=C.BATCH_TYPES,
help='sentence: each batch contains exactly X sentences. '
'word: each batch contains approximately X target words. '
'max-word: each batch contains at most X target words. '
'Default: %(default)s.')
params.add_argument('--batch-sentences-multiple-of',
type=int,
default=8,
help='For word and max-word batching, guarantee that each batch contains a multiple of X '
'sentences. For word batching, round up or down to nearest multiple. For max-word '
'batching, always round down. Default: %(default)s.')
params.add_argument('--update-interval',
type=int,
default=1,
help='Accumulate gradients over X batches for each model update. Set a value higher than 1 to '
'simulate large batches (ex: batch_size 2560 with update_interval 4 gives effective batch '
'size 10240). Default: %(default)s.')
def add_training_args(params):
train_params = params.add_argument_group("Training parameters")
add_batch_args(train_params)
train_params.add_argument('--label-smoothing',
default=0.1,
type=float,
help='Smoothing constant for label smoothing. Default: %(default)s.')
train_params.add_argument('--label-smoothing-impl',
default='mxnet',
choices=['mxnet', 'fairseq', 'torch'],
help='Choose label smoothing implementation. Default: %(default)s. '
'`torch` requires PyTorch 1.10.')
train_params.add_argument('--length-task',
type=str,
default=None,
choices=[C.LENGTH_TASK_RATIO, C.LENGTH_TASK_LENGTH],
help='If specified, adds an auxiliary task during training to predict source/target length ratios '
'(mean squared error loss), or absolute lengths (Poisson) loss. Default %(default)s.')
train_params.add_argument('--length-task-weight',
type=float_greater_or_equal(0.0),
default=1.0,
help='The weight of the auxiliary --length-task loss. Default %(default)s.')
train_params.add_argument('--length-task-layers',
type=int_greater_or_equal(1),
default=1,
help='Number of fully-connected layers for predicting the length ratio. Default %(default)s.')
train_params.add_argument('--target-factors-weight',
type=float,
nargs='+',
default=[1.0],
help='Weights of target factor losses. If one value is given, it applies to all '
'secondary target factors. For multiple values, the number of weights given has '
'to match the number of target factors. Default: %(default)s.')
train_params.add_argument('--optimized-metric',
default=C.PERPLEXITY,
choices=C.METRICS,
help='Metric to optimize with early stopping {%(choices)s}. Default: %(default)s.')
train_params.add_argument(C.TRAIN_ARGS_CHECKPOINT_INTERVAL,
type=int_greater_or_equal(1),
default=4000,
help='Checkpoint and evaluate every x updates (update-interval * batches). '
'Default: %(default)s.')
train_params.add_argument('--min-samples',
type=int,
default=None,
help='Minimum number of samples before training can stop. Default: %(default)s.')
train_params.add_argument('--max-samples',
type=int,
default=None,
help='Maximum number of samples. Default: %(default)s.')
train_params.add_argument('--min-updates',
type=int,
default=None,
help='Minimum number of updates before training can stop. Default: %(default)s.')
train_params.add_argument('--max-updates',
type=int,
default=None,
help='Maximum number of updates. Default: %(default)s.')
train_params.add_argument('--max-seconds',
type=int,
default=None,
help='Training will stop on the next checkpoint after reaching the maximum seconds. '
'Default: %(default)s.')
train_params.add_argument('--max-checkpoints',
type=int,
default=None,
help='Maximum number of checkpoints to continue training the model '
'before training is stopped. '
'Default: %(default)s.')
train_params.add_argument('--max-num-checkpoint-not-improved',
type=int,
default=None,
help='Maximum number of checkpoints the model is allowed to not improve in '
'<optimized-metric> on validation data before training is stopped. '
'Default: %(default)s.')
train_params.add_argument('--checkpoint-improvement-threshold',
type=float,
default=0.,
help='Improvement in <optimized-metric> over specified number of checkpoints must exceed '
'this value to be considered actual improvement. Default: %(default)s.')
train_params.add_argument('--min-num-epochs',
type=int,
default=None,
help='Minimum number of epochs (passes through the training data) '
'before training can stop. Default: %(default)s.')
train_params.add_argument('--max-num-epochs',
type=int,
default=None,
help='Maximum number of epochs (passes through the training data) Default: %(default)s.')
train_params.add_argument('--embed-dropout',
type=multiple_values(2, data_type=float),
default=(.0, .0),
help='Dropout probability for source & target embeddings. Use "x:x" to specify separate '
'values. Default: %(default)s.')
train_params.add_argument('--transformer-dropout-attention',
type=multiple_values(2, data_type=float),
default=(0.1, 0.1),
help='Dropout probability for multi-head attention. Use "x:x" to specify separate '
'values for encoder & decoder. Default: %(default)s.')
train_params.add_argument('--transformer-dropout-act',
type=multiple_values(2, data_type=float),
default=(0.1, 0.1),
help='Dropout probability before activation in feed-forward block. Use "x:x" to specify '
'separate values for encoder & decoder. Default: %(default)s.')
train_params.add_argument('--transformer-dropout-prepost',
type=multiple_values(2, data_type=float),
default=(0.1, 0.1),
help='Dropout probability for pre/postprocessing blocks. Use "x:x" to specify separate '
'values for encoder & decoder. Default: %(default)s.')
train_params.add_argument('--optimizer',
default=C.OPTIMIZER_ADAM,
choices=C.OPTIMIZERS,
help='SGD update rule. Default: %(default)s.')
train_params.add_argument('--optimizer-betas',
type=multiple_values(2, data_type=float),
default=(0.9, 0.999),
help='Beta1 and beta2 for Adam-like optimizers, specified "x:x". Default: %(default)s.')
train_params.add_argument('--optimizer-eps',
type=float_greater_or_equal(0),
default=1e-08,
help='Optimizer epsilon. Default: %(default)s.')
train_params.add_argument('--dist',
action='store_true',
help='Run in distributed training mode. When using this option, launch training with '
'`torchrun --nproc_per_node N -m sockeye.train`. Increasing the number of processes '
'multiplies the effective batch size (ex: batch_size 2560 with `--nproc_per_node 4` '
'gives effective batch size 10240).')
train_params.add_argument('--initial-learning-rate',
type=float,
default=0.0002,
help='Initial learning rate. Default: %(default)s.')
train_params.add_argument('--weight-decay',
type=float,
default=0.0,
help='Weight decay constant. Default: %(default)s.')
train_params.add_argument('--momentum',
type=float,
default=0.0,
help='Momentum constant. Default: %(default)s.')
train_params.add_argument('--gradient-clipping-threshold',
type=float,
default=1.0,
help='Clip absolute gradients values greater than this value. '
'Set to negative to disable. Default: %(default)s.')
train_params.add_argument('--gradient-clipping-type',
choices=C.GRADIENT_CLIPPING_TYPES,
default=C.GRADIENT_CLIPPING_TYPE_NONE,
help='The type of gradient clipping. Default: %(default)s.')
train_params.add_argument('--learning-rate-scheduler-type',
default=C.LR_SCHEDULER_PLATEAU_REDUCE,
choices=C.LR_SCHEDULERS,
help='Learning rate scheduler type. Default: %(default)s.')
train_params.add_argument('--learning-rate-t-scale',
type=float,
default=1.0,
help="Step number is multiplied by this value when determining learning rate for the "
"current step. Default: %(default)s.")
train_params.add_argument('--learning-rate-reduce-factor',
type=float,
default=0.9,
help="Factor to multiply learning rate with "
"(for 'plateau-reduce' learning rate scheduler). Default: %(default)s.")
train_params.add_argument('--learning-rate-reduce-num-not-improved',
type=int,
default=8,
help="For 'plateau-reduce' learning rate scheduler. Adjust learning rate "
"if <optimized-metric> did not improve for x checkpoints. Default: %(default)s.")
train_params.add_argument('--learning-rate-warmup',
type=int,
default=0,
help="Number of warmup steps. If set to x, linearly increases learning rate from 10%% "
"to 100%% of the initial learning rate. Default: %(default)s.")
train_params.add_argument('--fixed-param-strategy',
default=None,
choices=C.FIXED_PARAM_STRATEGY_CHOICES,
help="Fix various parameters during training using a named strategy. The strategy "
"name indicates which parameters will be fixed (Wuebker et al., 2018). "
"Default: %(default)s.")
train_params.add_argument('--fixed-param-names',
default=[],
nargs='*',
help="Manually specify names of parameters to fix during training. Default: %(default)s.")
train_params.add_argument(C.TRAIN_ARGS_MONITOR_BLEU,
default=500,
type=int,
help='x>0: decode x sampled sentences from validation data and '
'compute evaluation metrics. x==-1: use full validation data. Default: %(default)s.')
train_params.add_argument(C.TRAIN_ARGS_STOP_ON_DECODER_FAILURE,
action="store_true",
help='Stop training as soon as any checkpoint decoder fails (e.g. because there is not '
'enough GPU memory). Default: %(default)s.')
train_params.add_argument('--seed',
type=int,
default=1,
help='Random seed. Default: %(default)s.')
train_params.add_argument('--keep-last-params',
type=int,
default=-1,
help='Keep only the last n params files, use -1 to keep all files. Default: %(default)s')
train_params.add_argument('--keep-initializations',
action="store_true",
help='In addition to keeping the last n params files, also keep params from checkpoint 0.')
train_params.add_argument('--cache-last-best-params',
required=False,
type=int,
default=0,
help='Cache the last n best params files, as distinct from the last n in sequence. '
'Use 0 or negative to disable. Default: %(default)s')
train_params.add_argument('--cache-strategy',
required=False,
type=str,
default=C.AVERAGE_BEST,
choices=C.AVERAGE_CHOICES,
help='Strategy to use when deciding which are the "best" params files. '
'Default: %(default)s')
train_params.add_argument('--cache-metric',
required=False,
type=str,
default=C.PERPLEXITY,
choices=C.METRICS,
help='Metric to use when deciding which are the "best" params files. '
'Default: %(default)s')
train_params.add_argument('--dry-run',
action='store_true',
help="Do not perform any actual training, but print statistics about the model"
" and mode of operation.")
def add_train_cli_args(params):
add_training_io_args(params)
add_model_parameters(params)
add_training_args(params)
add_device_args(params)
add_logging_args(params)
def add_translate_cli_args(params):
add_inference_args(params)
add_device_args(params)
add_logging_args(params)
def add_score_cli_args(params):
add_training_data_args(params, required=True)
add_vocab_args(params)
add_device_args(params)
add_batch_args(params, default_batch_size=56, default_batch_type=C.BATCH_TYPE_SENTENCE)
params = params.add_argument_group("Scoring parameters")
params.add_argument("--model", "-m", required=True,
help="Model directory containing trained model.")
params.add_argument(C.TRAINING_ARG_MAX_SEQ_LEN,
type=multiple_values(num_values=2, greater_or_equal=1),
default=None,
help='Maximum sequence length in tokens.'
'Use "x:x" to specify separate values for src&tgt. Default: Read from model.')
# common params with translate CLI
add_length_penalty_args(params)
add_brevity_penalty_args(params)
params.add_argument("--output", "-o", default=None,
help="File to write output to. Default: STDOUT.")
params.add_argument('--output-type',
default=C.OUTPUT_HANDLER_SCORE,
choices=C.OUTPUT_HANDLERS_SCORING,
help='Output type. Default: %(default)s.')
params.add_argument('--score-type',
choices=C.SCORING_TYPE_CHOICES,
default=C.SCORING_TYPE_DEFAULT,
help='Score type to output. Default: %(default)s')
params.add_argument('--softmax-temperature',
type=float,
default=None,
help='Controls peakiness of model predictions. Values < 1.0 produce '
'peaked predictions, values > 1.0 produce smoothed distributions.')
params.add_argument('--dtype', default=None, choices=[None, C.DTYPE_FP32, C.DTYPE_FP16, C.DTYPE_INT8],
help="Data type. Default: %(default)s infers from saved model.")
add_logging_args(params)
def add_inference_args(params):
decode_params = params.add_argument_group("Inference parameters")
decode_params.add_argument(C.INFERENCE_ARG_INPUT_LONG, C.INFERENCE_ARG_INPUT_SHORT,
default=None,
help='Input file to translate. One sentence per line. '
'If not given, will read from stdin.')
decode_params.add_argument(C.INFERENCE_ARG_INPUT_FACTORS_LONG, C.INFERENCE_ARG_INPUT_FACTORS_SHORT,
required=False,
nargs='+',
type=regular_file(),
default=None,
help='List of input files containing additional source factors,'
'each token-parallel to the source. Default: %(default)s.')
decode_params.add_argument('--json-input',
action='store_true',
default=False,
help="If given, the CLI expects string-serialized json objects as input."
"Requires at least the input text field, for example: "
"{'text': 'some input string'} "
"Optionally, a list of factors can be provided: "
"{'text': 'some input string', 'factors': ['C C C', 'X X X']}.")
decode_params.add_argument(C.INFERENCE_ARG_OUTPUT_LONG, C.INFERENCE_ARG_OUTPUT_SHORT,
default=None,
help='Output file to write translations to. '
'If not given, will write to stdout.')
decode_params.add_argument('--models', '-m',
required=True,
nargs='+',
help='Model folder(s). Use multiple for ensemble decoding. '
'Model determines config, best parameters and vocab files.')
decode_params.add_argument('--checkpoints', '-c',
default=None,
type=int,
nargs='+',
help='If not given, chooses best checkpoints for model(s). '
'If specified, must have the same length as --models and be integer')
decode_params.add_argument('--nbest-size',
type=int_greater_or_equal(1),
default=1,
help='Size of the nbest list of translations. Default: %(default)s.')
decode_params.add_argument('--beam-size', '-b',
type=int_greater_or_equal(1),
default=5,
help='Size of the beam. Default: %(default)s.')
decode_params.add_argument('--greedy', '-g',
action="store_true",
default=False,
help='Enables an alternative, faster greedy decoding implementation. It does not '
'support batch decoding, ensembles, and hypothesis scores '
'are not normalized. Default: %(default)s.')
decode_params.add_argument('--beam-search-stop',
choices=[C.BEAM_SEARCH_STOP_ALL, C.BEAM_SEARCH_STOP_FIRST],
default=C.BEAM_SEARCH_STOP_ALL,
help='Stopping criteria. Quit when (all) hypotheses are finished '
'or when a finished hypothesis is in (first) position. Default: %(default)s.')
decode_params.add_argument('--batch-size',
type=int_greater_or_equal(1),
default=1,
help='Batch size during decoding. Determines how many sentences are translated '
'simultaneously. Default: %(default)s.')
decode_params.add_argument('--chunk-size',
type=int_greater_or_equal(1),
default=None,
help='Size of the chunks to be read from input at once. The chunks are sorted and then '
'split into batches. Therefore the larger the chunk size the better the grouping '
'of segments of similar length and therefore the higher the increase in throughput.'
' Default: %d without batching '
'and %d * batch_size with batching.' % (C.CHUNK_SIZE_NO_BATCHING,
C.CHUNK_SIZE_PER_BATCH_SEGMENT))
decode_params.add_argument('--sample',
type=int_greater_or_equal(0),
default=None,
nargs='?',
const=0,
help='Sample from softmax instead of taking best. Optional argument will restrict '
'sampling to top N vocabulary items at each step. Default: %(default)s.')
decode_params.add_argument('--seed',
type=int,
default=None,
help='Random seed used if sampling. Default: %(default)s.')
decode_params.add_argument('--ensemble-mode',
type=str,
default='linear',
choices=['linear', 'log_linear'],
help='Ensemble mode. Default: %(default)s.')
decode_params.add_argument('--bucket-width',
type=int_greater_or_equal(0),
default=10,
help='Bucket width for encoder steps. 0 means no bucketing. Default: %(default)s.')
decode_params.add_argument('--max-input-length',
type=int_greater_or_equal(1),
default=None,
help='Maximum input sequence length. Default: value from model(s).')
decode_params.add_argument('--max-output-length-num-stds',
type=int,
default=C.DEFAULT_NUM_STD_MAX_OUTPUT_LENGTH,
help='Number of target-to-source length ratio standard deviations from training to add '
'to calculate maximum output length for beam search for each sentence. '
'Default: %(default)s.')
decode_params.add_argument('--max-output-length',
type=int_greater_or_equal(1),
default=None,
help='Maximum number of words to generate during translation. '
'If None, it will be computed automatically. Default: %(default)s.')
decode_params.add_argument('--restrict-lexicon',
nargs='+',
type=multiple_values(num_values=2, data_type=str),
default=None,
help="Specify top-k lexicon to restrict output vocabulary to the k most likely context-"
"free translations of the source words in each sentence (Devlin, 2017). See the "
"lexicon module for creating top-k lexicons. To use multiple lexicons, provide "
"'--restrict-lexicon key1:path1 key2:path2 ...' and use JSON input to specify the "
"lexicon for each sentence: "
"{\"text\": \"some input string\", \"restrict_lexicon\": \"key\"}. "
"Default: %(default)s.")
decode_params.add_argument('--restrict-lexicon-topk',
type=int,
default=None,
help="Specify the number of translations to load for each source word from the lexicon "
"given with --restrict-lexicon. Default: Load all entries from the lexicon.")
decode_params.add_argument('--strip-unknown-words',
action='store_true',
default=False,
help='Remove any <unk> symbols from outputs. Default: %(default)s.')
decode_params.add_argument('--prevent-unk',
action='store_true',
default=False,
help='Avoid generating <unk> during decoding. Default: %(default)s.')
decode_params.add_argument('--output-type',
default='translation',
choices=C.OUTPUT_HANDLERS,
help='Output type. Default: %(default)s.')
# common params with score CLI
add_length_penalty_args(decode_params)
add_brevity_penalty_args(decode_params)
decode_params.add_argument('--dtype', default=None, choices=[None, C.DTYPE_FP32, C.DTYPE_FP16, C.DTYPE_INT8],
help="Data type. Default: %(default)s infers from saved model.")
def add_length_penalty_args(params):
params.add_argument('--length-penalty-alpha',
default=1.0,
type=float,
help='Alpha factor for the length penalty used in beam search: '
'(beta + len(Y))**alpha/(beta + 1)**alpha. A value of 0.0 will therefore turn off '
'length normalization. Default: %(default)s.')
params.add_argument('--length-penalty-beta',
default=0.0,
type=float,
help='Beta factor for the length penalty used in scoring: '
'(beta + len(Y))**alpha/(beta + 1)**alpha. Default: %(default)s')
def add_brevity_penalty_args(params):
params.add_argument('--brevity-penalty-type',
default='none',
type=str,
choices=[C.BREVITY_PENALTY_NONE, C.BREVITY_PENALTY_LEARNED, C.BREVITY_PENALTY_CONSTANT],
help='If specified, adds brevity penalty to the hypotheses\' scores, calculated with learned '
'or constant length ratios. The latter, by default, uses the length ratio (|ref|/|hyp|) '
'estimated from the training data and averaged over models. Default: %(default)s.')
params.add_argument('--brevity-penalty-weight',
default=1.0,
type=float_greater_or_equal(0.0),
help='Scaler for the brevity penalty in beam search: weight * log(BP) + score. Default: %(default)s')
params.add_argument('--brevity-penalty-constant-length-ratio',
default=0.0,
type=float_greater_or_equal(0.0),
help='Has effect if --brevity-penalty-type is set to \'constant\'. If positive, overrides the length '
'ratio, used for brevity penalty calculation, for all inputs. If zero, uses the average of length '
'ratios from the training data over all models. Default: %(default)s.')
def add_evaluate_args(params):
eval_params = params.add_argument_group("Evaluate parameters")
eval_params.add_argument('--references', '-r',
required=True,
type=str,
help="File with references.")
eval_params.add_argument('--hypotheses', '-i',
type=file_or_stdin(),
default=[sys.stdin],
nargs='+',
help="File(s) with hypotheses. If none will read from stdin. Default: stdin.")
eval_params.add_argument('--metrics',
nargs='+',
choices=C.EVALUATE_METRICS,
default=[C.BLEU, C.CHRF, C.TER],
help='List of metrics to compute. Default: %(default)s.')
eval_params.add_argument('--sentence', '-s',
action="store_true",
help="Show sentence-level metrics. Default: %(default)s.")
eval_params.add_argument('--offset',
type=float,
default=0.01,
help="Numerical value of the offset of zero n-gram counts for BLEU. Default: %(default)s.")
eval_params.add_argument('--not-strict', '-n',
action="store_true",
help="Do not fail if number of hypotheses does not match number of references. "
"Default: %(default)s.")
def add_build_vocab_args(params):
params.add_argument('-i', '--inputs', required=True, nargs='+', help='List of text files to build vocabulary from.')
params.add_argument('-o', '--output', required=True, type=str, help="Output filename to write vocabulary to.")
add_vocab_args(params)
add_process_pool_args(params)
|
|
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
from typing import TYPE_CHECKING
from saspy.sasdecorator import procDecorator
if TYPE_CHECKING:
from saspy.sasresults import SASresults
from saspy.sasbase import SASdata
class SASutil:
"""
This class is for SAS BASE procedures to be called as python3 objects and use SAS as the computational engine
This class and all the useful work in this package require a licensed version of SAS.
#. Identify the product of the procedure (SAS/STAT, SAS/ETS, SAS Enterprise Miner, etc).
#. Find the corresponding file in saspy sasstat.py, sasets.py, sasml.py, etc.
#. Create a set of valid statements. Here is an example:
.. code-block:: ipython3
lset = {'ARIMA', 'BY', 'ID', 'MACURVES', 'MONTHLY', 'OUTPUT', 'VAR'}
The case and order of the items will be formated.
#. Call the `doc_convert` method to generate then method call as well as the docstring markup
.. code-block:: ipython3
import saspy
print(saspy.sasdecorator.procDecorator.doc_convert(lset, 'x11')['method_stmt'])
print(saspy.sasdecorator.procDecorator.doc_convert(lset, 'x11')['markup_stmt'])
The `doc_convert` method takes two arguments: a list of the valid statements and the proc name. It returns a dictionary with two keys, method_stmt and markup_stmt. These outputs can be copied into the appropriate product file.
#. Add the proc decorator to the new method.
The decorator should be on the line above the method declaration.
The decorator takes one argument, the required statements for the procedure. If there are no required statements than an empty list `{}` should be passed.
Here are two examples one with no required arguments:
.. code-block:: ipython3
@procDecorator.proc_decorator({})
def esm(self, data: ['SASdata', str] = None, ...
And one with required arguments:
.. code-block:: ipython3
@procDecorator.proc_decorator({'model'})
def mixed(self, data: ['SASdata', str] = None, ...
#. Add a link to the SAS documentation plus any additional details will be helpful to users
#. Write at least one test to exercise the procedures and include it in the
appropriate testing file.
If you have questions, please open an issue in the GitHub repo and the maintainers will be happy to help.
"""
def __init__(self, session, *args, **kwargs):
"""
Submit an initial set of macros to prepare the SAS system
:param session:
:param args:
:param kwargs:
"""
self.sasproduct = "util"
# create logging
# logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.DEBUG)
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.WARN)
self.sas = session
self.logger.debug("Initialization of SAS Macro: " + self.sas.saslog())
@procDecorator.proc_decorator({})
def hpimpute(self, data: ['SASdata', str] = None,
code: str = None,
freq: str = None,
id: str = None,
impute: str = None,
input: [str, list, dict] = None,
performance: str = None,
procopts: str = None,
stmtpassthrough: str = None,
**kwargs: dict) -> 'SASresults':
"""
Python method to call the HPIMPUTE procedure
Documentation link:
https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=prochp&docsetTarget=prochp_hpimpute_toc.htm&locale=en
:param data: SASdata object or string. This parameter is required.
:parm code: The code variable can only be a string type.
:parm freq: The freq variable can only be a string type.
:parm id: The id variable can only be a string type.
:parm impute: The impute variable can only be a string type.
:parm input: The input variable can be a string, list or dict type. It refers to the dependent, y, or label variable.
:parm performance: The performance variable can only be a string type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
"""
@procDecorator.proc_decorator({})
def hpbin(self, data: ['SASdata', str] = None,
code: str = None,
freq: str = None,
id: [str, list] = None,
input: [str, list, dict] = None,
performance: str = None,
target: [str, list, dict] = None,
procopts: str = None,
stmtpassthrough: str = None,
**kwargs: dict) -> 'SASresults':
"""
Python method to call the HPBIN procedure.
Documentation link:
https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=prochp&docsetTarget=prochp_hpbin_syntax.htm&locale=en
:param data: SASdata object or string. This parameter is required..
:parm code: The code variable can only be a string type.
:parm freq: The freq variable can only be a string type.
:parm id: The id variable can be a string or list type.
:parm input: The input variable can be a string, list or dict type. It refers to the dependent, y, or label variable.
:parm performance: The performance variable can only be a string type.
:parm target: The target variable can be a string, list or dict type. It refers to the dependent, y, or label variable.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
"""
@procDecorator.proc_decorator({})
def hpsample(self, data: ['SASdata', str] = None,
cls: [str, list] = None,
performance: str = None,
target: [str, list, dict] = None,
var: str = None,
procopts: [str, list, dict] = None,
stmtpassthrough: [str, list, dict] = None,
**kwargs: dict) -> 'SASresults':
"""
Python method to call the HPSAMPLE procedure.
Documentation link:
https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=prochp&docsetTarget=prochp_hpsample_toc.htm&locale=en
:param data: SASdata object or string. This parameter is required..
:parm cls: The cls variable can be a string or list type. It refers to the categorical, or nominal variables.
:parm performance: The performance variable can only be a string type.
:parm target: The target variable can be a string, list or dict type. It refers to the dependent, y, or label variable.
:parm var: The var variable can only be a string type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
"""
@procDecorator.proc_decorator({})
def univariate(self, data: ['SASdata', str] = None,
by: [str, list] = None,
cdfplot: str = None,
cls: [str, list] = None,
freq: str = None,
histogram: str = None,
id: [str, list] = None,
inset: str = None,
output: [str, bool, 'SASdata'] = None,
ppplot: str = None,
probplot: str = None,
qqplot: str = None,
var: str = None,
weight: str = None,
procopts: str = None,
stmtpassthrough: str = None,
**kwargs: dict) -> 'SASresults':
"""
Python method to call the UNIVARIATE procedure
Documentation link:
https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=procstat&docsetTarget=procstat_univariate_syntax.htm&locale=en
The PROC UNIVARIATE statement invokes the procedure. The VAR statement specifies the numeric variables to be analyzed, and it is required if the OUTPUT statement is used to save summary statistics in an output data set. If you do not use the VAR statement, all numeric variables in the data set are analyzed. The plot statements (CDFPLOT, HISTOGRAM, PPPLOT, PROBPLOT, and QQPLOT) create graphical displays, and the INSET statement enhances these displays by adding a table of summary statistics directly on the graph. You can specify one or more of each of the plot statements, the INSET statement, and the OUTPUT statement. If you use a VAR statement, the variables listed in a plot statement must be a subset of the variables listed in the VAR statement.
You can specify a BY statement to obtain separate analyses for each BY group. The FREQ statement specifies a variable whose values provide the frequency for each observation. The ID statement specifies one or more variables to identify the extreme observations. The WEIGHT statement specifies a variable whose values are used to weight certain statistics.
You can use a CLASS statement to specify one or two variables that group the data into classification levels. The analysis is carried out for each combination of levels in the input data set, or within each BY group if you also specify a BY statement. You can use the CLASS statement with plot statements to create comparative displays, in which each cell contains a plot for one combination of classification levels.
:param data: SASdata object or string. This parameter is required.
:parm by: The by variable can be a string or list type.
:parm cdfplot: The cdfplot variable can only be a string type.
:parm cls: The cls variable can be a string or list type. It refers to the categorical, or nominal variables.
:parm freq: The freq variable can only be a string type.
:parm histogram: The histogram variable can only be a string type.
:parm id: The id variable can be a string or list type.
:parm inset: The inset variable can only be a string type.
:parm output: The output variable can be a string, boolean or SASdata type. The member name for a boolean is "_output".
:parm ppplot: The ppplot variable can only be a string type.
:parm probplot: The probplot variable can only be a string type.
:parm qqplot: The qqplot variable can only be a string type.
:parm var: The var variable can only be a string type.
:parm weight: The weight variable can only be a string type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object
"""
|
|
import os,sys,re
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
#sys.path.append(os.path.abspath('../../new/'))
from predictions_library import *
myHOME = os.path.abspath('..')
start, end = "\033[1m", "\033[0;0m" # this is to print bold
def initialize_notebook_and_load_datasets():
'''
function initialize notebook and load datasets.
input: nothing
output: data
'''
# load datasets
positives = open_fastas(myHOME + '/data/ARG3_O1_O2_clustered_rescored.fasta_bin14_threshold10.0_37923positives.fasta')
negatives = open_fastas(myHOME + '/data/ARG3_O1_O2_clustered_rescored.fasta_bg_37923negatives.fasta')
positives['TAD']=1
negatives['TAD']=0
data = pd.concat([pd.DataFrame(i) for i in [positives, negatives]], axis=0)
# get aa frequencies
for i in aa:
data[i] = np.array([seq.count(i)/30. for seq in data.index])
# function to extract distribution of sequence in bins
def fasta_header_extract(fileName):
headers, sequences = [], []
for i in open(fileName):
if i[0]=='>':
tmp = i.strip().split('_')
headers.append(tmp)
continue
else:
sequences.append(i.strip())
values = np.array(headers)[:,1:6].astype(int)
return pd.DataFrame(values, index=sequences, columns = ['bg','bin1','bin2','bin3','bin4'])
# from data, take out sequences with no reads in bins2,3 and 4 (bin distribution is extracted from the fasta headers)
positives_bins = fasta_header_extract(myHOME + '/data/ARG3_O1_O2_clustered_rescored.fasta_bin14_threshold10.0_37923positives.fasta')
negatives = open_fastas(myHOME + '/data/ARG3_O1_O2_clustered_rescored.fasta_bg_37923negatives.fasta')
positives_out = positives_bins[(positives_bins['bin2']==0) & (positives_bins['bin3']==0) & (positives_bins['bin4']==0)].index
data = data.loc[ set(data.index) - set(positives_out)]
return data
def generate_split_index(data):
'''
function generate indices for train, test, validation split
input: data
output: _train, _test, _valid are indices of data, splitted 80,10,10
'''
idx = np.arange(data.shape[0])
np.random.seed=0
np.random.shuffle(idx)
_10 = len(idx)
_8, _9 = int(0.8*_10), int(0.9*_10)
_train = data.index[idx[:_8]]
_test = data.index[idx[_8:_9]]
_valid = data.index[idx[_9:]]
return _train, _test, _valid
def calculate_charge(seq):
'''
calculates the charge/residue of the sequence
'''
charge = seq.count('K') + seq.count('R') + seq.count('H') * 0.5 \
- seq.count('D') - seq.count('E')
return (charge/len(seq))
def open_fasta(fasta):
'''
function open fasta file
input: fasta file
output: sequence
'''
sequence = []
for i in open(fasta):
# obviate header
if i[0]=='>': continue
# else take line and later join
sequence.append(i.strip('\n'))
return ''.join(sequence)
def open_ss(ss):
'''
function open .horiz files
input: .horiz file
output: sequence of ss elements
'''
sequence = []
for i in open(ss):
# jump lines that are not relevant
if i[:4]!='Pred': continue
sequence.append(i[6:].strip('\n'))
return ''.join(sequence).replace('C','-')
def ohe_single(sequence, ss, **kwargs): #kwargs are aa, ss_psipred_set):
'''
ohe a single protein
input: protein sequence and secondary structure (psipred format). list of aa and set of ss
AS USED WITH THE TRAINING DATA!!!
output: one hot encoded data of the protein as input for the neural network
'''
# if aa and ss_psipred lists where not provided, here are defined
if 'aa_list' not in kwargs.keys():
print('using local aa_list')
aa_list = ['R','H','K','D','E','S','T','N','Q','A','V','L','I','M','F' ,'Y', 'W', 'C','G','P'] # (or just aa from predictions_library)
else:
aa_list = kwargs['aa_list']
if 'ss_list' not in kwargs.keys():
print('using local ss_list')
ss_list = ['E', 'H', '-']
else:
ss_list = kwargs['ss_list']
# translate ss into ohe
categorical_ss = np.zeros(shape=(len(ss),3))
# go over each position
for n,i in enumerate(ss):
# fill 1 at the position that matches the index in ss_psipred_set
position = ss_list.index(i)
categorical_ss[n, position] = 1
# translate sequence into ohe
categorical_seq = np.zeros(shape=(len(sequence),20))
# go over each positoin
for n,i in enumerate(sequence):
# fill 1 at position that matches index in aa
position = aa.index(i)
categorical_seq[n, position] = 1
# return merged matrix
return np.hstack([categorical_seq, categorical_ss]).reshape(1, len(ss), 23, 1)
def translate_dna2aa(dna):
'''
function translate nucleotide sequence into aminoacid sequence
input: dna sequence as a string
output: aa sequence as a string
'''
# codons list
gene_code = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M', 'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K', 'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L', 'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q', 'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V', 'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E', 'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S', 'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'_', 'TAG':'_', 'TGC':'C', 'TGT':'C', 'TGA':'_', 'TGG':'W'}
codon_seq = [dna[i:i+3] for i in np.arange(0,len(dna),3)]
aa_seq = [gene_code[i] for i in codon_seq]
return ''.join(aa_seq)
def read_wig(wig):
'''
function read wiggle file
input: wiggle file
output: tuple with position as key and counts as values
'''
positions, counts = [],[]
f = open(wig)
try:
while True:
line = next(f)
_, position, count = line.strip('\n').split('\t')
positions.append(position)
counts.append(count)
except StopIteration:
print('done! ' + wig + ' file loaded')
return pd.DataFrame(np.array(counts, 'float'), index=np.array(positions, 'int'), columns=['counts'])
def make_m_dict(Stark_data_annot, gfp_p, gfp_m, FOLD_CHANGE_THRESHOLD=2, MINIMUM_READS=100):
'''
function generate a table of measurements from Stark's (or other) data
INPUT: Stark_data_annot (including gene name, start and end positions in gfp_p and gfp_m)
gfp_p: wig file with positions and counts for sorting GFP positive
gfp_m: wig file with positions and counts for sorting GFP negative
FOLD_CHANGE_THRESHOLD: minimal fold change gfp_p/gfp_m accepted to generate data
MINIMUM_READS: minimum absolute counts in gfp_p accepted to generate data
OUTPUT: table (key: gene name
values: counts over the length of each protein)
'''
k,v=[],[]
for TF_name in Stark_data_annot.index:
# extract gene values from plus and minus sets
start, end = Stark_data_annot.loc[TF_name, ['start','end']]
plus = gfp_p.iloc[start:end].counts.values
minus = gfp_m.iloc[start:end].counts.values
# averaged values every 3 nt to plot together with protein
plus = np.array([np.mean(plus[i:i+3]) for i in np.arange(0, len(plus), 3)])
minus = np.array([np.mean(minus[i:i+3]) for i in np.arange(0, len(minus), 3)])
# take values of tADs whose plus/minus >fold_change_threshold and plus >minimum_reads counts
tAD = np.nan_to_num(plus/minus)
tAD = np.array([k if k>FOLD_CHANGE_THRESHOLD and i>MINIMUM_READS else 0 for i,k in zip(plus, tAD)])
tAD = np.nan_to_num((tAD - np.min(tAD)) / (np.max(tAD) - np.min(tAD)))
k.append(TF_name)
v.append(tAD)
# finally define m_dict, a dictionary for m
m_dict = dict(zip(k,v))
return m_dict
def make_p_dict(Stark_data_annot, deep_model, fastas_folder='../data/Stark_tADs/fastas/', horiz_folder='../data/Stark_tADs/horiz/'):
'''
function generate table (dictionary) of key=f, values=predictions from best NN model
INPUT: Stark_data_annot (I could use f instead, but I keep this to make it equal to make_m_dict)
fastas_folder directory
horiz_folder directory
OUTPUT:table (keys=gene names, values=prediction scores over the length of each protein)
'''
k,v = [],[]
for TF_name in Stark_data_annot.index:
# if ss file is not here yet, skip
if not os.path.exists(horiz_folder+ TF_name + '.horiz'): continue
# open fasta and horiz files and generate ohe
seq = open_fasta(fastas_folder+ TF_name + '.fasta')
ss = open_ss(horiz_folder+ TF_name + '.horiz')
single = ohe_single(seq,ss, aa_list=aa, ss_list=ss_psipred_set)
# predict using deep_model
predictions = []
for i in range(0, len(ss)-30):
region = deep_model.predict(single[0, i:i+30, :, 0].reshape(1,30,23,1))[0][0]
predictions.append(region)
k.append(TF_name)
v.append(np.array(predictions))
# finally define p_dict, a dictionary for p
p_dict = dict(zip(k,v))
return p_dict
def build_distribution(f, p_dict, n_iter=10000):
'''
function build distribution of correlation coefficients of two vectors
as one of them is permutated in each iteration.
INPUT: f (index of genes from table)
p_dict (table. keys=gene names (as in f), values=(counts over the length of the protein))
m = vector of concatenated counts from m_dict
OUTPUT: list of correlation coefficients.
'''
# build distribution of correlations
corr_distrib = []
for n in range(n_iter):
# permutation of p
k = f[:]
np.random.shuffle(k)
p_permut = np.concatenate([p_dict[i] for i in k])
# compute correlation
corr_distrib.append(np.corrcoef(m, p_permut)[0][1])
return corr_distrib
def calc_typeI_error(corr_values, point, **kwargs):
'''
function makes distribution out of correlation values (corr_values)
and calculate the area under "normal" curve as extreme or more extreme than point,
that is the probability that a number falls on the right of the point in that curve(
if point is positive, else to the left of the point).
INPUT = corr_values (list of values of correlation) and point (single sample to test)
optional = bins (bins=100) and plot (plot=True)
OUTPUT = type I error or area under the curve from point to the right or
point to the left if point is negative value.
'''
# allow user to input bins number
if 'bins' in kwargs.keys(): bins=kwargs['bins']
else: bins=100
# make histogram of corr_values
y,x = np.histogram(corr_values, bins=bins)
# have to make x.shape=yshape
x = x[:-1]
# allow user to plot the distribution
if 'plot' in kwargs.keys():
if kwargs['plot']==True:
yc = np.convolve(y, np.ones(10)/10, "same")
if point>0:
_right = np.where(x<point)
_left = np.where(x>=point)
plt.plot(x, yc, c="k")
plt.fill_between(x[_right], yc[_right], color='g', alpha=0.4)
plt.fill_between(x[_left], yc[_left], color='b', alpha=0.3)
else:
_left = np.where(x<=point)
_right = np.where(x>point)
plt.plot(x, yc, c="k")
plt.fill_between(x[_right], yc[_right], color='b', alpha=0.3)
plt.fill_between(x[_left], yc[_left], color='g', alpha=0.4)
# if point is negative, flip the curve and make point positive
if point<0:
point = point*-1
y = y[::-1]
# measure total area and area from point to it's right
total_area = np.sum(y)
index_point2right = np.where(x>=point)
area_point_right = np.sum(y[index_point2right])
# typeI error
probaTypeI = area_point_right *1. / total_area
return probaTypeI
# create matrix to store results. This matrix should contain for each position, all possible 20 aa with their
# corresponding tAD probabilities. Dims = [seq_position, aa.index] = predictions
def compute_mutagenesis(ohe_data, refId, deep_model):
all_samples = ohe_data.shape[0]
#results = np.zeros(shape=(all_samples,30,20))
results = np.zeros(shape=(30,20))
# go over all samples and mutate every position in a ohe setting.
#sample = ohe_data[bestIdx[0]]#ohe_data[10].copy()
sample = ohe_data[refId]
# first of all measure tAD probaboility in the original sequence
prediction = deep_model.predict(sample.reshape(np.append(1, sample.shape)))
print('original_prediction: {}'.format(prediction[0][0]))
# list of amino acids in ohe format
original_seq = np.where(sample[:,:20,0]==1)[1] #ohe_data[0,:,:20,0]==1)[1]
# start filling results with original_seq
for n in range(30):
results[n,original_seq[n]]=prediction[0][0]
# go over all positions in the sequence
for position in range(30): #len(original_seq)
# ohe_aminoacid in current position
original_position = original_seq[position]
# list all possible ohe aminoacids
ohe_positions = list(np.arange(20))
# remove the original aa from the list
ohe_positions.remove(original_position)
# copy into new instance to avoid overwriting and to restore all other positions to their
# original values
this_sample = sample.copy()
# start mutation in that position
for mutation in ohe_positions:
# reset all aa in position to 0
this_sample[position,:20,0]=0
# make mutation
this_sample[position,mutation,0]=1
# predict the mutant tAD probability
tmp = deep_model.predict(this_sample.reshape(np.append(1, sample.shape)))
# If there is a radical change, let me know
if prediction > 0.5 and tmp[0][0] < 0.5 or prediction < 0.5 and tmp[0][0] > 0.5:
# print the original sequence
SEQUENCE = ''.join([aa[S] for S in original_seq])
print(SEQUENCE)
# print which mutation at which position
print('{}/{} at position {} -> score {} into {}'.format(aa[original_position], aa[mutation], position, prediction, tmp[0][0]))
#print(position, aa.index(mutation))
#results[sample_number, position, mutation] = prediction
#result.append([aa[mutation], aa[original_position]])
results[position,mutation]=tmp[0][0]
return prediction, results
|
|
import collections
import itertools
from hindley_milner import TypeVariable
from hindley_milner import ListType
from hindley_milner import unify
from type_system import typeof
from type_system import Typeclass
from type_system import Hask
from type_system import build_instance
from typeclasses import Show
from typeclasses import show
from typeclasses import Eq
from typeclasses import Ord
from syntax import Syntax
from syntax import instance
from syntax import sig
from syntax import H
class Enum(Typeclass):
"""
Class Enum defines operations on sequentially ordered types.
The enumFrom... methods are used in translation of arithmetic sequences.
Instances of Enum may be derived for any enumeration type (types whose
constructors have no fields). The nullary constructors are assumed to be
numbered left-to-right by fromEnum from 0 through n-1.
Attributes:
toEnum, fromEnum, succ, pred, enumFrom, enumFromThen, enumFrom,
enumFromThenTo, EnumFromTo
Minimal complete definition:
toEnum, fromEnum
"""
@classmethod
def make_instance(typeclass, cls, toEnum, fromEnum):
def succ(a):
return toEnum(fromEnum(a) + 1)
def pred(a):
return toEnum(fromEnum(a) - 1)
def enumFromThen(start, second):
pointer = fromEnum(start)
step = fromEnum(second) - pointer
while True:
yield toEnum(pointer)
pointer += step
def enumFrom(start):
return enumFromThen(start, succ(start))
def enumFromThenTo(start, second, end):
pointer, stop = fromEnum(start), fromEnum(end)
step = fromEnum(second) - pointer
while pointer <= stop:
yield toEnum(pointer)
pointer += step
return
def enumFromTo(start, end):
return enumFromThenTo(start, succ(start), end)
attrs = {"toEnum":toEnum, "fromEnum":fromEnum, "succ":succ,
"pred":pred, "enumFromThen":enumFromThen, "enumFrom":enumFrom,
"enumFromThenTo":enumFromThenTo, "enumFromTo":enumFromTo}
build_instance(Enum, cls, attrs)
return
@sig(H/ "a" >> int)
def fromEnum(a):
"""
fromEnum :: a -> int
Convert to an int.
"""
return Enum[a].toEnum(a)
@sig(H/ "a" >> "a")
def succ(a):
"""
succ :: a -> a
the successor of a value. For numeric types, succ adds 1.
"""
return Enum[a].succ(a)
@sig(H/ "a" >> "a")
def pred(a):
"""
pred :: a -> a
the predecessor of a value. For numeric types, pred subtracts 1.
"""
return Enum[a].pred(a)
@sig(H/ "a" >> "a" >> ["a"])
def enumFromThen(start, second):
"""
enumFromThen :: a -> a -> [a]
Used in translation of [n, n_, ...]
"""
return L[Enum[start].enumFromThen(start, second)]
@sig(H/ "a" >> ["a"])
def enumFrom(start):
"""
enumFrom :: a -> [a]
Used in translation of L[n, ...]
"""
return L[Enum[start].enumFrom(start)]
@sig(H/ "a" >> "a" >> "a" >> ["a"])
def enumFromThenTo(start, second, end):
"""
enumFromThenTo :: a -> a -> a -> [a]
Used in translation of L[n, n_, ..., m]
"""
return L[Enum[start].enumFromThenTo(start, second, end)]
@sig(H/ "a" >> "a" >> ["a"])
def enumFromTo(start, end):
"""
enumFromTo :: a -> a -> [a]
Used in translation of L[n, ..., m]
"""
return L[Enum[start].enumFromTo(start, end)]
instance(Enum, int).where(fromEnum=int, toEnum=int)
instance(Enum, long).where(fromEnum=int, toEnum=long)
instance(Enum, bool).where(fromEnum=int, toEnum=bool)
instance(Enum, str).where(fromEnum=ord, toEnum=chr)
#=============================================================================#
# List
class List(collections.Sequence, Hask):
"""
Statically typed lazy sequence datatype.
See help(L) for more information.
"""
def __init__(self, head=None, tail=None):
self.__head = []
self.__tail = itertools.chain([])
self.__is_evaluated = True
if head is not None and len(head) > 0:
fst = head[0]
for fst, other in zip(itertools.repeat(fst), head):
unify(typeof(fst), typeof(other))
self.__head.extend(head)
if tail is not None:
self.__tail = itertools.chain(self.__tail, tail)
self.__is_evaluated = False
return
def __type__(self):
if self.__is_evaluated:
if len(self.__head) == 0:
return ListType(TypeVariable())
return ListType(typeof(self[0]))
elif len(self.__head) == 0:
self.__next()
return self.__type__()
return ListType(typeof(self[0]))
def __next(self):
"""
Evaluate the next element of the tail, and add it to the head.
"""
if self.__is_evaluated:
raise StopIteration
else:
try:
next_iter = next(self.__tail)
if len(self.__head) > 0:
unify(typeof(self[0]), typeof(next_iter))
self.__head.append(next_iter)
except StopIteration as si:
self.__is_evaluated = True
return
def __evaluate(self):
"""
Evaluate the entire List.
"""
while not self.__is_evaluated:
self.__next()
return
def __rxor__(self, item):
"""
^ is the cons operator (equivalent to : in Haskell)
"""
unify(self.__type__(), ListType(typeof(item)))
if self.__is_evaluated:
return List(head=[item] + self.__head)
return List(head=[item] + self.__head, tail=self.__tail)
def __add__(self, other):
"""
(+) :: [a] -> [a] -> [a]
+ is the list concatenation operator, equivalent to ++ in Haskell and +
for Python lists
"""
unify(self.__type__(), typeof(other))
if self.__is_evaluated and other.__is_evaluated:
return List(head=self.__head + other.__head)
elif self.__is_evaluated and not other.__is_evaluated:
return List(head=self.__head + other.__head,
tail=other.__tail)
return List(head=self.__head,
tail=itertools.chain(self.__tail, iter(other)))
def __str__(self):
if len(self.__head) == 0 and self.__is_evaluated:
return "L[[]]"
elif len(self.__head) == 1 and self.__is_evaluated:
return "L[[%s]]" % show(self.__head[0])
body = ", ".join((show(s) for s in self.__head))
return "L[%s]" % body if self.__is_evaluated else "L[%s ...]" % body
def __cmp__(self, other):
if self.__is_evaluated and other.__is_evaluated:
return cmp(self.__head, other.__head)
elif len(self.__head) >= len(other.__head):
# check the evaluated heads
heads = zip(self.__head[:len(other.__head)], other.__head)
heads_comp = ((cmp(h1, h2) for h1, h2 in heads))
for comp in heads_comp:
if comp != 0:
return comp
# evaluate the shorter-headed list until it is the same size
while len(self.__head) > len(other.__head):
if other.__is_evaluated:
return 1
other.__next()
comp = cmp(self.__head[len(other.__head)-1], other.__head[-1])
if comp != 0:
return comp
# evaluate the tails, checking each time
while not self.__is_evaluated or not other.__is_evaluated:
if not self.__is_evaluated:
self.__next()
if not other.__is_evaluated:
other.__next()
len_comp = cmp(len(self.__head), len(other.__head))
if len_comp != 0:
return len_comp
if len(self.__head) > 0:
value_comp = cmp(self.__head[-1], other.__head[-1])
if value_comp != 0:
return value_comp
elif len(other.__head) > len(self.__head):
return -other.__cmp__(self)
return 0
def __eq__(self, other):
return self.__cmp__(other) == 0
def __lt__(self, other):
return self.__cmp__(other) == -1
def __gt__(self, other):
return self.__cmp__(other) == 1
def __le__(self, other):
comp = self.__cmp__(other)
return comp in (-1, 0)
def __ge__(self, other):
comp = self.__cmp__(other)
return comp in (1, 0)
def __len__(self):
self.__evaluate()
return len(self.__head)
def __iter__(self):
for item in self.__head:
yield item
for item in self.__tail:
self.__head.append(item)
yield item
def __getitem__(self, ix):
is_slice = isinstance(ix, slice)
if is_slice:
i = ix.start if ix.stop is None else ix.stop
else:
i = ix
# make sure that the list is evaluated enough to do the indexing, but
# not any more than necessary
# if index is negative, evaluate the entire list
if i >= 0:
while (i+1) > len(self.__head):
try:
self.__next()
except StopIteration:
break
else:
self.__evaluate()
if is_slice:
if ix.stop is None:
return List(head=self.__head[ix], tail=self.__tail)
return List(head=self.__head[ix])
return self.__head[i]
## Basic typeclass instances for list
instance(Show, List).where(
show = List.__str__
)
instance(Eq, List).where(
eq = List.__eq__
)
instance(Ord, List).where(
lt = List.__lt__,
gt = List.__gt__,
le = List.__le__,
ge = List.__ge__
)
#=============================================================================#
# List comprehension syntax
class __list_comprehension__(Syntax):
"""
L is the syntactic construct for Haskell-style list comprehensions and lazy
list creation. To create a new List, just wrap an interable in L[ ].
List comprehensions can be used with any instance of Enum, including the
built-in types int, long, float, and char.
There are four basic list comprehension patterns:
>>> L[1, ...]
# list from 1 to infinity, counting by ones
>>> L[1, 3, ...]
# list from 1 to infinity, counting by twos
>>> L[1, ..., 20]
# list from 1 to 20 (inclusive), counting by ones
>>> L[1, 5, ..., 20]
# list from 1 to 20 (inclusive), counting by fours
"""
def __getitem__(self, lst):
if isinstance(lst, tuple) and len(lst) < 5 and \
any((Ellipsis is x for x in lst)):
# L[x, ...]
if len(lst) == 2 and lst[1] is Ellipsis:
return enumFrom(lst[0])
# L[x, y, ...]
elif len(lst) == 3 and lst[2] is Ellipsis:
return enumFromThen(lst[0], lst[1])
# L[x, ..., y]
elif len(lst) == 3 and lst[1] is Ellipsis:
return enumFromTo(lst[0], lst[2])
# L[x, y, ..., z]
elif len(lst) == 4 and lst[2] is Ellipsis:
return enumFromThenTo(lst[0], lst[1], lst[3])
raise SyntaxError("Invalid list comprehension: %s" % str(lst))
elif hasattr(lst, "next") or hasattr(lst, "__next__"):
return List(tail=lst)
return List(head=lst)
L = __list_comprehension__("Invalid input to list constructor")
|
|
"""\
aug-cc-pV6Z basis set for use with PyQuante
Apr 7 2010 Jussi Lehtola
This program is part of the PyQuante quantum chemistry program suite.
PyQuante version 1.2 and later is covered by the modified BSD
license. Please see the file LICENSE that is part of this
distribution.
"""
basis_data = {1: [('S',
[(1776.77556, 4.3999999999999999e-05),
(254.01771199999999, 0.00037199999999999999),
(54.698039000000001, 0.0020939999999999999),
(15.018344000000001, 0.0088629999999999994),
(4.9150780000000003, 0.030540000000000001)]),
('S', [(1.794924, 1.0)]),
('S', [(0.71071600000000001, 1.0)]),
('S', [(0.30480200000000002, 1.0)]),
('S', [(0.138046, 1.0)]),
('S', [(0.062156999999999997, 1.0)]),
('S', [(0.0189, 1.0)]),
('P', [(8.6489999999999991, 1.0)]),
('P', [(3.4300000000000002, 1.0)]),
('P', [(1.3600000000000001, 1.0)]),
('P', [(0.53900000000000003, 1.0)]),
('P', [(0.214, 1.0)]),
('P', [(0.067000000000000004, 1.0)]),
('D', [(4.4530000000000003, 1.0)]),
('D', [(1.958, 1.0)]),
('D', [(0.86099999999999999, 1.0)]),
('D', [(0.378, 1.0)]),
('D', [(0.126, 1.0)]),
('F', [(4.0999999999999996, 1.0)]),
('F', [(1.78, 1.0)]),
('F', [(0.77300000000000002, 1.0)]),
('F', [(0.245, 1.0)]),
('G', [(3.1989999999999998, 1.0)]),
('G', [(1.3260000000000001, 1.0)]),
('G', [(0.40699999999999997, 1.0)]),
('H', [(2.653, 1.0)]),
('H', [(0.68200000000000005, 1.0)])],
2: [('S',
[(4785.0, 5.9999999999999997e-07),
(717.0, 4.6999999999999999e-06),
(163.19999999999999, 2.44e-05),
(46.259999999999998, 0.00010119999999999999),
(15.1, 0.00034860000000000002)]),
('S', [(5.4370000000000003, 1.0)]),
('S', [(2.0880000000000001, 1.0)]),
('S', [(0.82969999999999999, 1.0)]),
('S', [(0.33660000000000001, 1.0)]),
('S', [(0.13689999999999999, 1.0)]),
('S', [(0.044729999999999999, 1.0)]),
('P', [(0.38700000000000001, 1.0)]),
('P', [(0.98399999999999999, 1.0)]),
('P', [(2.4980000000000002, 1.0)]),
('P', [(6.3419999999999996, 1.0)]),
('P', [(16.103999999999999, 1.0)]),
('P', [(0.128, 1.0)]),
('D', [(0.747, 1.0)]),
('D', [(1.9099999999999999, 1.0)]),
('D', [(4.8860000000000001, 1.0)]),
('D', [(12.497999999999999, 1.0)]),
('D', [(0.24099999999999999, 1.0)]),
('F', [(1.292, 1.0)]),
('F', [(3.4620000000000002, 1.0)]),
('F', [(9.2759999999999998, 1.0)]),
('F', [(0.40699999999999997, 1.0)]),
('G', [(2.2360000000000002, 1.0)]),
('G', [(6.5860000000000003, 1.0)]),
('G', [(0.68600000000000005, 1.0)]),
('H', [(4.1589999999999998, 1.0)]),
('H', [(1.016, 1.0)])],
5: [('S',
[(210400.0, 5.8300000000000001e-06),
(31500.0, 4.532e-05),
(7169.0, 0.00023838),
(2030.0, 0.0010057),
(662.5, 0.00364496),
(239.19999999999999, 0.01173628),
(93.260000000000005, 0.03380702),
(38.640000000000001, 0.085565929999999998),
(16.780000000000001, 0.18260322000000001),
(7.5410000000000004, 0.30583759999999999),
(3.4820000000000002, 0.34080347)]),
('S',
[(210400.0, -1.1799999999999999e-06),
(31500.0, -9.1500000000000005e-06),
(7169.0, -4.8189999999999998e-05),
(2030.0, -0.00020306),
(662.5, -0.00073917000000000004),
(239.19999999999999, -0.0023860299999999999),
(93.260000000000005, -0.0069865400000000003),
(38.640000000000001, -0.018115940000000001),
(16.780000000000001, -0.041231289999999997),
(7.5410000000000004, -0.077813530000000006),
(3.4820000000000002, -0.12123181)]),
('S', [(1.6180000000000001, 1.0)]),
('S', [(0.627, 1.0)]),
('S', [(0.29339999999999999, 1.0)]),
('S', [(0.13100000000000001, 1.0)]),
('S', [(0.05815, 1.0)]),
('S', [(0.023, 1.0)]),
('P',
[(192.5, 0.0001349),
(45.640000000000001, 0.00114741),
(14.75, 0.0058479300000000003),
(5.5030000000000001, 0.021170910000000001),
(2.222, 0.062668719999999997)]),
('P', [(0.95899999999999996, 1.0)]),
('P', [(0.43140000000000001, 1.0)]),
('P', [(0.19689999999999999, 1.0)]),
('P', [(0.090329999999999994, 1.0)]),
('P', [(0.040660000000000002, 1.0)]),
('P', [(0.013650000000000001, 1.0)]),
('D', [(2.8860000000000001, 1.0)]),
('D', [(1.2669999999999999, 1.0)]),
('D', [(0.55600000000000005, 1.0)]),
('D', [(0.24399999999999999, 1.0)]),
('D', [(0.107, 1.0)]),
('D', [(0.039199999999999999, 1.0)]),
('F', [(1.651, 1.0)]),
('F', [(0.80020000000000002, 1.0)]),
('F', [(0.38779999999999998, 1.0)]),
('F', [(0.188, 1.0)]),
('F', [(0.073300000000000004, 1.0)]),
('G', [(1.6469, 1.0)]),
('G', [(0.78890000000000005, 1.0)]),
('G', [(0.37790000000000001, 1.0)]),
('G', [(0.16200000000000001, 1.0)]),
('H', [(1.3120000000000001, 1.0)]),
('H', [(0.5806, 1.0)]),
('H', [(0.28799999999999998, 1.0)]),
('I', [(0.98470000000000002, 1.0)]),
('I', [(0.5, 1.0)])],
6: [('S',
[(312100.0, 5.6699999999999999e-06),
(46740.0, 4.4100000000000001e-05),
(10640.0, 0.0002319),
(3013.0, 0.00097897000000000001),
(982.79999999999995, 0.0035516300000000001),
(354.80000000000001, 0.01144061),
(138.40000000000001, 0.032998550000000001),
(57.350000000000001, 0.084053470000000005),
(24.920000000000002, 0.18067612999999999),
(11.23, 0.3049114),
(5.2009999999999996, 0.34141569999999999)]),
('S',
[(312100.0, -1.2100000000000001e-06),
(46740.0, -9.3899999999999999e-06),
(10640.0, -4.9469999999999999e-05),
(3013.0, -0.00020856999999999999),
(982.79999999999995, -0.00076015000000000002),
(354.80000000000001, -0.0024546899999999998),
(138.40000000000001, -0.0072015300000000003),
(57.350000000000001, -0.018807419999999998),
(24.920000000000002, -0.043250009999999998),
(11.23, -0.082597329999999997),
(5.2009999999999996, -0.12857592000000001)]),
('S', [(2.4260000000000002, 1.0)]),
('S', [(0.96730000000000005, 1.0)]),
('S', [(0.4456, 1.0)]),
('S', [(0.1971, 1.0)]),
('S', [(0.086349999999999996, 1.0)]),
('S', [(0.035400000000000001, 1.0)]),
('P',
[(295.19999999999999, 0.00014249),
(69.980000000000004, 0.0012201),
(22.640000000000001, 0.00633696),
(8.4849999999999994, 0.023518750000000001),
(3.4590000000000001, 0.069904469999999996)]),
('P', [(1.504, 1.0)]),
('P', [(0.67830000000000001, 1.0)]),
('P', [(0.30869999999999997, 1.0)]),
('P', [(0.14000000000000001, 1.0)]),
('P', [(0.061780000000000002, 1.0)]),
('P', [(0.02376, 1.0)]),
('D', [(4.5419999999999998, 1.0)]),
('D', [(1.9790000000000001, 1.0)]),
('D', [(0.86209999999999998, 1.0)]),
('D', [(0.37559999999999999, 1.0)]),
('D', [(0.1636, 1.0)]),
('D', [(0.063600000000000004, 1.0)]),
('F', [(2.6309999999999998, 1.0)]),
('F', [(1.2549999999999999, 1.0)]),
('F', [(0.5988, 1.0)]),
('F', [(0.28570000000000001, 1.0)]),
('F', [(0.11799999999999999, 1.0)]),
('G', [(2.6520000000000001, 1.0)]),
('G', [(1.204, 1.0)]),
('G', [(0.54700000000000004, 1.0)]),
('G', [(0.254, 1.0)]),
('H', [(2.0299999999999998, 1.0)]),
('H', [(0.85109999999999997, 1.0)]),
('H', [(0.45100000000000001, 1.0)]),
('I', [(1.4910000000000001, 1.0)]),
('I', [(0.77600000000000002, 1.0)])],
7: [('S',
[(432300.0, 5.5899999999999998e-06),
(64700.0, 4.3510000000000002e-05),
(14720.0, 0.00022892999999999999),
(4170.0, 0.00096502000000000003),
(1361.0, 0.0035021900000000001),
(491.19999999999999, 0.011292119999999999),
(191.59999999999999, 0.032612830000000002),
(79.409999999999997, 0.083297270000000007),
(34.530000000000001, 0.17998565999999999),
(15.58, 0.30500350999999998),
(7.2320000000000002, 0.34115931999999999)]),
('S',
[(432300.0, -1.2300000000000001e-06),
(64700.0, -9.5799999999999998e-06),
(14720.0, -5.0510000000000003e-05),
(4170.0, -0.00021264),
(1361.0, -0.00077534000000000001),
(491.19999999999999, -0.0025062399999999999),
(191.59999999999999, -0.00736529),
(79.409999999999997, -0.01930167),
(34.530000000000001, -0.044717380000000001),
(15.58, -0.086066470000000006),
(7.2320000000000002, -0.13329626999999999)]),
('S', [(3.3820000000000001, 1.0)]),
('S', [(1.369, 1.0)]),
('S', [(0.62480000000000002, 1.0)]),
('S', [(0.2747, 1.0)]),
('S', [(0.1192, 1.0)]),
('S', [(0.047140000000000001, 1.0)]),
('P',
[(415.89999999999998, 0.00014841),
(98.609999999999999, 0.0012763399999999999),
(31.920000000000002, 0.0067024199999999997),
(12.0, 0.025261700000000002),
(4.9189999999999996, 0.075189430000000002)]),
('P', [(2.1480000000000001, 1.0)]),
('P', [(0.96960000000000002, 1.0)]),
('P', [(0.43990000000000001, 1.0)]),
('P', [(0.1978, 1.0)]),
('P', [(0.086029999999999995, 1.0)]),
('P', [(0.0315, 1.0)]),
('D', [(6.7169999999999996, 1.0)]),
('D', [(2.8959999999999999, 1.0)]),
('D', [(1.2490000000000001, 1.0)]),
('D', [(0.53800000000000003, 1.0)]),
('D', [(0.23200000000000001, 1.0)]),
('D', [(0.087400000000000005, 1.0)]),
('F', [(3.8290000000000002, 1.0)]),
('F', [(1.7949999999999999, 1.0)]),
('F', [(0.84099999999999997, 1.0)]),
('F', [(0.39400000000000002, 1.0)]),
('F', [(0.151, 1.0)]),
('G', [(3.8559999999999999, 1.0)]),
('G', [(1.702, 1.0)]),
('G', [(0.751, 1.0)]),
('G', [(0.32600000000000001, 1.0)]),
('H', [(2.875, 1.0)]),
('H', [(1.1699999999999999, 1.0)]),
('H', [(0.58699999999999997, 1.0)]),
('I', [(2.0990000000000002, 1.0)]),
('I', [(1.0409999999999999, 1.0)])],
8: [('S',
[(570800.0, 5.5500000000000002e-06),
(85480.0, 4.3109999999999999e-05),
(19460.0, 0.00022667),
(5512.0, 0.00095637000000000001),
(1798.0, 0.0034732000000000001),
(648.89999999999998, 0.011197779999999999),
(253.09999999999999, 0.032387659999999999),
(104.90000000000001, 0.082859769999999999),
(45.649999999999999, 0.17958381000000001),
(20.620000000000001, 0.30522110000000002),
(9.5869999999999997, 0.34089349000000002)]),
('S',
[(570800.0, -1.26e-06),
(85480.0, -9.7699999999999996e-06),
(19460.0, -5.1480000000000002e-05),
(5512.0, -0.00021696000000000001),
(1798.0, -0.00079162000000000004),
(648.89999999999998, -0.0025590000000000001),
(253.09999999999999, -0.0075331299999999999),
(104.90000000000001, -0.019788969999999999),
(45.649999999999999, -0.04606288),
(20.620000000000001, -0.0891956),
(9.5869999999999997, -0.13754216)]),
('S', [(4.4930000000000003, 1.0)]),
('S', [(1.837, 1.0)]),
('S', [(0.83489999999999998, 1.0)]),
('S', [(0.36580000000000001, 1.0)]),
('S', [(0.157, 1.0)]),
('S', [(0.05935, 1.0)]),
('P',
[(525.60000000000002, 0.00016663999999999999),
(124.59999999999999, 0.0014333600000000001),
(40.340000000000003, 0.0075476199999999997),
(15.18, 0.028594560000000002),
(6.2450000000000001, 0.084388580000000005)]),
('P', [(2.7320000000000002, 1.0)]),
('P', [(1.2270000000000001, 1.0)]),
('P', [(0.54920000000000002, 1.0)]),
('P', [(0.24179999999999999, 1.0)]),
('P', [(0.10249999999999999, 1.0)]),
('P', [(0.033799999999999997, 1.0)]),
('D', [(8.2530000000000001, 1.0)]),
('D', [(3.597, 1.0)]),
('D', [(1.5680000000000001, 1.0)]),
('D', [(0.68400000000000005, 1.0)]),
('D', [(0.29799999999999999, 1.0)]),
('D', [(0.115, 1.0)]),
('F', [(5.4299999999999997, 1.0)]),
('F', [(2.4159999999999999, 1.0)]),
('F', [(1.075, 1.0)]),
('F', [(0.47799999999999998, 1.0)]),
('F', [(0.19500000000000001, 1.0)]),
('G', [(5.2110000000000003, 1.0)]),
('G', [(2.1899999999999999, 1.0)]),
('G', [(0.92000000000000004, 1.0)]),
('G', [(0.40600000000000003, 1.0)]),
('H', [(3.8719999999999999, 1.0)]),
('H', [(1.5049999999999999, 1.0)]),
('H', [(0.748, 1.0)]),
('I', [(2.7730000000000001, 1.0)]),
('I', [(1.345, 1.0)])],
9: [('S',
[(723500.0, 5.5600000000000001e-06),
(108400.0, 4.3180000000000003e-05),
(24680.0, 0.00022699999999999999),
(6990.0, 0.00095803000000000001),
(2282.0, 0.00347015),
(824.60000000000002, 0.011185260000000001),
(321.80000000000001, 0.032328799999999998),
(133.5, 0.082795450000000007),
(58.109999999999999, 0.17988024),
(26.280000000000001, 0.30557831000000002),
(12.24, 0.34026838999999998)]),
('S',
[(723500.0, -1.2899999999999999e-06),
(108400.0, -9.9899999999999992e-06),
(24680.0, -5.2599999999999998e-05),
(6990.0, -0.00022172000000000001),
(2282.0, -0.00080692000000000003),
(824.60000000000002, -0.0026081699999999999),
(321.80000000000001, -0.0076740200000000001),
(133.5, -0.020193530000000001),
(58.109999999999999, -0.047187519999999997),
(26.280000000000001, -0.091580090000000003),
(12.24, -0.14048558)]),
('S', [(5.7469999999999999, 1.0)]),
('S', [(2.3650000000000002, 1.0)]),
('S', [(1.071, 1.0)]),
('S', [(0.46810000000000002, 1.0)]),
('S', [(0.19939999999999999, 1.0)]),
('S', [(0.073150000000000007, 1.0)]),
('P',
[(660.0, 0.00017720999999999999),
(156.40000000000001, 0.00152691),
(50.640000000000001, 0.0080720700000000006),
(19.079999999999998, 0.03074021),
(7.8719999999999999, 0.09011914)]),
('P', [(3.4489999999999998, 1.0)]),
('P', [(1.5449999999999999, 1.0)]),
('P', [(0.68640000000000001, 1.0)]),
('P', [(0.29859999999999998, 1.0)]),
('P', [(0.1245, 1.0)]),
('P', [(0.047600000000000003, 1.0)]),
('D', [(10.573, 1.0)]),
('D', [(4.6130000000000004, 1.0)]),
('D', [(2.0129999999999999, 1.0)]),
('D', [(0.878, 1.0)]),
('D', [(0.38300000000000001, 1.0)]),
('D', [(0.151, 1.0)]),
('F', [(7.5629999999999997, 1.0)]),
('F', [(3.3300000000000001, 1.0)]),
('F', [(1.466, 1.0)]),
('F', [(0.64500000000000002, 1.0)]),
('F', [(0.27200000000000002, 1.0)]),
('G', [(6.7350000000000003, 1.0)]),
('G', [(2.7829999999999999, 1.0)]),
('G', [(1.1499999999999999, 1.0)]),
('G', [(0.52000000000000002, 1.0)]),
('H', [(5.0880000000000001, 1.0)]),
('H', [(1.9370000000000001, 1.0)]),
('H', [(0.98499999999999999, 1.0)]),
('I', [(3.581, 1.0)]),
('I', [(1.7390000000000001, 1.0)])],
10: [('S',
[(902400.0, 5.5099999999999998e-06),
(135100.0, 4.282e-05),
(30750.0, 0.00022514),
(8710.0, 0.00095016000000000002),
(2842.0, 0.0034471900000000001),
(1026.0, 0.01112545),
(400.10000000000002, 0.03220568),
(165.90000000000001, 0.082598909999999998),
(72.209999999999994, 0.17990564000000001),
(32.659999999999997, 0.30605208),
(15.220000000000001, 0.34012558999999998)]),
('S',
[(902400.0, -1.2899999999999999e-06),
(135100.0, -1.005e-05),
(30750.0, -5.2930000000000003e-05),
(8710.0, -0.00022311999999999999),
(2842.0, -0.00081338000000000005),
(1026.0, -0.0026323000000000002),
(400.10000000000002, -0.0077590999999999997),
(165.90000000000001, -0.020452769999999999),
(72.209999999999994, -0.047975049999999998),
(32.659999999999997, -0.093400860000000002),
(15.220000000000001, -0.14277214999999999)]),
('S', [(7.149, 1.0)]),
('S', [(2.9569999999999999, 1.0)]),
('S', [(1.335, 1.0)]),
('S', [(0.58160000000000001, 1.0)]),
('S', [(0.24629999999999999, 1.0)]),
('S', [(0.086900000000000005, 1.0)]),
('P',
[(815.60000000000002, 0.00018375999999999999),
(193.30000000000001, 0.0015850899999999999),
(62.600000000000001, 0.0084146399999999993),
(23.609999999999999, 0.032200329999999999),
(9.7620000000000005, 0.093963900000000003)]),
('P', [(4.2809999999999997, 1.0)]),
('P', [(1.915, 1.0)]),
('P', [(0.84760000000000002, 1.0)]),
('P', [(0.36599999999999999, 1.0)]),
('P', [(0.151, 1.0)]),
('P', [(0.056599999999999998, 1.0)]),
('D', [(13.317, 1.0)]),
('D', [(5.8029999999999999, 1.0)]),
('D', [(2.5289999999999999, 1.0)]),
('D', [(1.1020000000000001, 1.0)]),
('D', [(0.47999999999999998, 1.0)]),
('D', [(0.187, 1.0)]),
('F', [(10.356, 1.0)]),
('F', [(4.5380000000000003, 1.0)]),
('F', [(1.9890000000000001, 1.0)]),
('F', [(0.871, 1.0)]),
('F', [(0.34920000000000001, 1.0)]),
('G', [(8.3450000000000006, 1.0)]),
('G', [(3.4169999999999998, 1.0)]),
('G', [(1.399, 1.0)]),
('G', [(0.63449999999999995, 1.0)]),
('H', [(6.5190000000000001, 1.0)]),
('H', [(2.4470000000000001, 1.0)]),
('H', [(1.2093, 1.0)]),
('I', [(4.4889999999999999, 1.0)]),
('I', [(2.1215000000000002, 1.0)])],
13: [('S',
[(3652000.0, 1.9e-06),
(546800.0, 1.45e-05),
(124500.0, 7.6199999999999995e-05),
(35440.0, 0.00031579999999999998),
(11840.0, 0.0010973999999999999),
(4434.0, 0.0033697000000000002),
(1812.0, 0.0093221999999999992),
(791.5, 0.0237992),
(361.0, 0.056819099999999997),
(169.5, 0.12246799999999999),
(81.680000000000007, 0.22389700000000001),
(40.280000000000001, 0.313446),
(20.25, 0.27497500000000002),
(10.23, 0.110564),
(4.8019999999999996, 0.0119215),
(2.339, 0.00065280000000000004)]),
('S',
[(3652000.0, -4.9999999999999998e-07),
(546800.0, -3.8e-06),
(124500.0, -1.98e-05),
(35440.0, -8.2100000000000003e-05),
(11840.0, -0.00028580000000000001),
(4434.0, -0.00087850000000000005),
(1812.0, -0.0024482000000000002),
(791.5, -0.0063099999999999996),
(361.0, -0.0154854),
(169.5, -0.034958900000000001),
(81.680000000000007, -0.0707729),
(40.280000000000001, -0.119423),
(20.25, -0.148842),
(10.23, -0.059046500000000002),
(4.8019999999999996, 0.216693),
(2.339, 0.47655700000000001)]),
('S',
[(3652000.0, 9.9999999999999995e-08),
(546800.0, 8.9999999999999996e-07),
(124500.0, 4.6e-06),
(35440.0, 1.9000000000000001e-05),
(11840.0, 6.5900000000000003e-05),
(4434.0, 0.0002031),
(1812.0, 0.00056470000000000001),
(791.5, 0.001462),
(361.0, 0.0035793999999999999),
(169.5, 0.0081516000000000002),
(81.680000000000007, 0.0165276),
(40.280000000000001, 0.028546700000000001),
(20.25, 0.036148399999999997),
(10.23, 0.015380400000000001),
(4.8019999999999996, -0.0612141),
(2.339, -0.15126300000000001)]),
('S', [(1.163, 1.0)]),
('S', [(0.58819999999999995, 1.0)]),
('S', [(0.2311, 1.0)]),
('S', [(0.1027, 1.0)]),
('S', [(0.04521, 1.0)]),
('S', [(0.01737, 1.0)]),
('P',
[(2884.0, 6.3800000000000006e-05),
(683.20000000000005, 0.00056309999999999997),
(222.0, 0.0031691000000000002),
(84.819999999999993, 0.013240099999999999),
(35.810000000000002, 0.043340299999999998),
(16.219999999999999, 0.11194999999999999),
(7.702, 0.21779599999999999),
(3.7410000000000001, 0.31167499999999998),
(1.831, 0.316722)]),
('P',
[(2884.0, -7.9999999999999996e-06),
(683.20000000000005, -6.5099999999999997e-05),
(222.0, -0.00039990000000000002),
(84.819999999999993, -0.0015368999999999999),
(35.810000000000002, -0.0055643999999999997),
(16.219999999999999, -0.0131106),
(7.702, -0.02972),
(3.7410000000000001, -0.0347195),
(1.831, -0.055162099999999999)]),
('P', [(0.88780000000000003, 1.0)]),
('P', [(0.39889999999999998, 1.0)]),
('P', [(0.17180000000000001, 1.0)]),
('P', [(0.072980000000000003, 1.0)]),
('P', [(0.030689999999999999, 1.0)]),
('P', [(0.01021, 1.0)]),
('D', [(2.2143000000000002, 1.0)]),
('D', [(0.94489999999999996, 1.0)]),
('D', [(0.4032, 1.0)]),
('D', [(0.1721, 1.0)]),
('D', [(0.073429999999999995, 1.0)]),
('D', [(0.02666, 1.0)]),
('F', [(0.87560000000000004, 1.0)]),
('F', [(0.44719999999999999, 1.0)]),
('F', [(0.22839999999999999, 1.0)]),
('F', [(0.1167, 1.0)]),
('F', [(0.046249999999999999, 1.0)]),
('G', [(0.69520000000000004, 1.0)]),
('G', [(0.37709999999999999, 1.0)]),
('G', [(0.2046, 1.0)]),
('G', [(0.085449999999999998, 1.0)]),
('H', [(0.65600000000000003, 1.0)]),
('H', [(0.33000000000000002, 1.0)]),
('H', [(0.16550000000000001, 1.0)]),
('I', [(0.5302, 1.0)]),
('I', [(0.29899999999999999, 1.0)])],
14: [('S',
[(4465000.0, 1.7e-06),
(668500.0, 1.36e-05),
(152200.0, 7.1400000000000001e-05),
(43300.0, 0.00029730000000000002),
(14410.0, 0.0010383),
(5394.0, 0.0031746999999999999),
(2212.0, 0.0087323999999999995),
(968.10000000000002, 0.022383),
(441.19999999999999, 0.053727299999999999),
(207.09999999999999, 0.116649),
(99.799999999999997, 0.215978),
(49.240000000000002, 0.30956600000000001),
(24.739999999999998, 0.283945),
(12.470000000000001, 0.12223199999999999),
(5.7949999999999999, 0.0141952),
(2.8300000000000001, 0.0003121)]),
('S',
[(4465000.0, -4.9999999999999998e-07),
(668500.0, -3.5999999999999998e-06),
(152200.0, -1.9000000000000001e-05),
(43300.0, -7.9099999999999998e-05),
(14410.0, -0.00027690000000000001),
(5394.0, -0.00084719999999999999),
(2212.0, -0.0023478000000000001),
(968.10000000000002, -0.0060705000000000004),
(441.19999999999999, -0.014971099999999999),
(207.09999999999999, -0.0339729),
(99.799999999999997, -0.069458400000000003),
(49.240000000000002, -0.119001),
(24.739999999999998, -0.153645),
(12.470000000000001, -0.0704684),
(5.7949999999999999, 0.21314900000000001),
(2.8300000000000001, 0.49159599999999998)]),
('S',
[(4465000.0, 9.9999999999999995e-08),
(668500.0, 8.9999999999999996e-07),
(152200.0, 4.8999999999999997e-06),
(43300.0, 2.0299999999999999e-05),
(14410.0, 7.0900000000000002e-05),
(5394.0, 0.00021719999999999999),
(2212.0, 0.00060130000000000003),
(968.10000000000002, 0.0015590999999999999),
(441.19999999999999, 0.0038443000000000001),
(207.09999999999999, 0.0087796999999999997),
(99.799999999999997, 0.018038800000000001),
(49.240000000000002, 0.031522399999999999),
(24.739999999999998, 0.041690499999999998),
(12.470000000000001, 0.020097299999999998),
(5.7949999999999999, -0.066748399999999999),
(2.8300000000000001, -0.18190600000000001)]),
('S', [(1.407, 1.0)]),
('S', [(0.69950000000000001, 1.0)]),
('S', [(0.30830000000000002, 1.0)]),
('S', [(0.13850000000000001, 1.0)]),
('S', [(0.061449999999999998, 1.0)]),
('S', [(0.025389999999999999, 1.0)]),
('P',
[(3572.0, 5.9899999999999999e-05),
(846.0, 0.00052959999999999997),
(274.80000000000001, 0.0029957999999999999),
(105.0, 0.012633500000000001),
(44.350000000000001, 0.041904400000000001),
(20.079999999999998, 0.110259),
(9.5299999999999994, 0.218831),
(4.6340000000000003, 0.317828),
(2.2799999999999998, 0.31942500000000001)]),
('P',
[(3572.0, -1.2799999999999999e-05),
(846.0, -0.0001126),
(274.80000000000001, -0.00064019999999999995),
(105.0, -0.0027028999999999998),
(44.350000000000001, -0.0090789000000000009),
(20.079999999999998, -0.024234800000000001),
(9.5299999999999994, -0.049346000000000001),
(4.6340000000000003, -0.072585899999999995),
(2.2799999999999998, -0.080425800000000006)]),
('P', [(1.1160000000000001, 1.0)]),
('P', [(0.49909999999999999, 1.0)]),
('P', [(0.22539999999999999, 1.0)]),
('P', [(0.10009999999999999, 1.0)]),
('P', [(0.043319999999999997, 1.0)]),
('P', [(0.01694, 1.0)]),
('D', [(3.2385999999999999, 1.0)]),
('D', [(1.3767, 1.0)]),
('D', [(0.58530000000000004, 1.0)]),
('D', [(0.24879999999999999, 1.0)]),
('D', [(0.10580000000000001, 1.0)]),
('D', [(0.041390000000000003, 1.0)]),
('F', [(1.351, 1.0)]),
('F', [(0.66000000000000003, 1.0)]),
('F', [(0.32250000000000001, 1.0)]),
('F', [(0.1575, 1.0)]),
('F', [(0.068839999999999998, 1.0)]),
('G', [(0.8528, 1.0)]),
('G', [(0.46310000000000001, 1.0)]),
('G', [(0.2515, 1.0)]),
('G', [(0.1164, 1.0)]),
('H', [(0.85570000000000002, 1.0)]),
('H', [(0.42309999999999998, 1.0)]),
('H', [(0.2351, 1.0)]),
('I', [(0.6946, 1.0)]),
('I', [(0.42709999999999998, 1.0)])],
15: [('S',
[(5384000.0, 1.5999999999999999e-06),
(806200.0, 1.2799999999999999e-05),
(183600.0, 6.7199999999999994e-05),
(52250.0, 0.00027970000000000002),
(17390.0, 0.00097670000000000005),
(6523.0, 0.0029683999999999999),
(2687.0, 0.0081239999999999993),
(1178.0, 0.020920000000000001),
(536.20000000000005, 0.050559),
(251.5, 0.11047899999999999),
(121.3, 0.206957),
(59.880000000000003, 0.30473699999999998),
(30.050000000000001, 0.29295199999999999),
(15.119999999999999, 0.13556099999999999),
(7.0099999999999998, 0.017320800000000001),
(3.4409999999999998, -3.5200000000000002e-05)]),
('S',
[(5384000.0, -3.9999999999999998e-07),
(806200.0, -3.4999999999999999e-06),
(183600.0, -1.8300000000000001e-05),
(52250.0, -7.5900000000000002e-05),
(17390.0, -0.00026570000000000001),
(6523.0, -0.00080800000000000002),
(2687.0, -0.0022273000000000002),
(1178.0, -0.0057832999999999999),
(536.20000000000005, -0.0143438),
(251.5, -0.032706100000000002),
(121.3, -0.067371600000000004),
(59.880000000000003, -0.117647),
(30.050000000000001, -0.15728),
(15.119999999999999, -0.083854399999999996),
(7.0099999999999998, 0.19971800000000001),
(3.4409999999999998, 0.49860500000000002)]),
('S',
[(5384000.0, 9.9999999999999995e-08),
(806200.0, 9.9999999999999995e-07),
(183600.0, 5.0000000000000004e-06),
(52250.0, 2.09e-05),
(17390.0, 7.2999999999999999e-05),
(6523.0, 0.0002221),
(2687.0, 0.00061220000000000003),
(1178.0, 0.0015918),
(536.20000000000005, 0.0039534000000000001),
(251.5, 0.0090571999999999996),
(121.3, 0.018790899999999999),
(59.880000000000003, 0.033383099999999999),
(30.050000000000001, 0.0459484),
(15.119999999999999, 0.025524000000000002),
(7.0099999999999998, -0.066949599999999998),
(3.4409999999999998, -0.20364499999999999)]),
('S', [(1.712, 1.0)]),
('S', [(0.8337, 1.0)]),
('S', [(0.39119999999999999, 1.0)]),
('S', [(0.1777, 1.0)]),
('S', [(0.079390000000000002, 1.0)]),
('S', [(0.032280000000000003, 1.0)]),
('P',
[(4552.0, 5.1999999999999997e-05),
(1078.0, 0.00046040000000000002),
(350.10000000000002, 0.0026208),
(133.80000000000001, 0.011187300000000001),
(56.520000000000003, 0.0378229),
(25.579999999999998, 0.102116),
(12.140000000000001, 0.210314),
(5.9020000000000001, 0.31738300000000003),
(2.9100000000000001, 0.32716499999999998)]),
('P',
[(4552.0, -1.24e-05),
(1078.0, -0.0001094),
(350.10000000000002, -0.00062560000000000003),
(133.80000000000001, -0.0026733999999999998),
(56.520000000000003, -0.0091552000000000005),
(25.579999999999998, -0.025099300000000001),
(12.140000000000001, -0.053180999999999999),
(5.9020000000000001, -0.081588800000000003),
(2.9100000000000001, -0.091972499999999999)]),
('P', [(1.4350000000000001, 1.0)]),
('P', [(0.65700000000000003, 1.0)]),
('P', [(0.30049999999999999, 1.0)]),
('P', [(0.13400000000000001, 1.0)]),
('P', [(0.057829999999999999, 1.0)]),
('P', [(0.02197, 1.0)]),
('D', [(4.3007999999999997, 1.0)]),
('D', [(1.8346, 1.0)]),
('D', [(0.78259999999999996, 1.0)]),
('D', [(0.33389999999999997, 1.0)]),
('D', [(0.1424, 1.0)]),
('D', [(0.054919999999999997, 1.0)]),
('F', [(1.8160000000000001, 1.0)]),
('F', [(0.88060000000000005, 1.0)]),
('F', [(0.42699999999999999, 1.0)]),
('F', [(0.20699999999999999, 1.0)]),
('F', [(0.087099999999999997, 1.0)]),
('G', [(1.0616000000000001, 1.0)]),
('G', [(0.57909999999999995, 1.0)]),
('G', [(0.31590000000000001, 1.0)]),
('G', [(0.14699999999999999, 1.0)]),
('H', [(1.085, 1.0)]),
('H', [(0.52769999999999995, 1.0)]),
('H', [(0.28739999999999999, 1.0)]),
('I', [(0.88900000000000001, 1.0)]),
('I', [(0.5151, 1.0)])],
16: [('S',
[(6297000.0, 1.5999999999999999e-06),
(943100.0, 1.24e-05),
(214900.0, 6.4900000000000005e-05),
(61250.0, 0.00026929999999999999),
(20450.0, 0.00093470000000000001),
(7719.0, 0.0028083000000000001),
(3198.0, 0.0076740000000000003),
(1402.0, 0.019889799999999999),
(637.20000000000005, 0.0482589),
(298.89999999999998, 0.105757),
(144.30000000000001, 0.20022300000000001),
(71.209999999999994, 0.300728),
(35.729999999999997, 0.29868800000000001),
(17.969999999999999, 0.146347),
(8.3409999999999993, 0.020115899999999999),
(4.1120000000000001, -0.00024879999999999998)]),
('S',
[(6297000.0, -3.9999999999999998e-07),
(943100.0, -3.4000000000000001e-06),
(214900.0, -1.7900000000000001e-05),
(61250.0, -7.4400000000000006e-05),
(20450.0, -0.0002587),
(7719.0, -0.00077769999999999998),
(3198.0, -0.0021396000000000002),
(1402.0, -0.0055906000000000003),
(637.20000000000005, -0.013907600000000001),
(298.89999999999998, -0.031768900000000003),
(144.30000000000001, -0.065930199999999994),
(71.209999999999994, -0.11683200000000001),
(35.729999999999997, -0.15978700000000001),
(17.969999999999999, -0.094532199999999997),
(8.3409999999999993, 0.18782799999999999),
(4.1120000000000001, 0.50468299999999999)]),
('S',
[(6297000.0, 9.9999999999999995e-08),
(943100.0, 9.9999999999999995e-07),
(214900.0, 5.2000000000000002e-06),
(61250.0, 2.16e-05),
(20450.0, 7.5099999999999996e-05),
(7719.0, 0.00022580000000000001),
(3198.0, 0.00062169999999999999),
(1402.0, 0.0016251),
(637.20000000000005, 0.0040534999999999998),
(298.89999999999998, 0.0092902000000000002),
(144.30000000000001, 0.0194561),
(71.209999999999994, 0.035004),
(35.729999999999997, 0.049489699999999998),
(17.969999999999999, 0.030344300000000001),
(8.3409999999999993, -0.066366099999999997),
(4.1120000000000001, -0.22315399999999999)]),
('S', [(2.0449999999999999, 1.0)]),
('S', [(0.97699999999999998, 1.0)]),
('S', [(0.47660000000000002, 1.0)]),
('S', [(0.2185, 1.0)]),
('S', [(0.097589999999999996, 1.0)]),
('S', [(0.038929999999999999, 1.0)]),
('P',
[(5266.0, 5.2299999999999997e-05),
(1247.0, 0.00046349999999999999),
(405.0, 0.0026410000000000001),
(154.80000000000001, 0.0113169),
(65.379999999999995, 0.038470400000000002),
(29.59, 0.104339),
(14.039999999999999, 0.21568399999999999),
(6.8239999999999998, 0.32525999999999999),
(3.3690000000000002, 0.32617800000000002)]),
('P',
[(5266.0, -1.33e-05),
(1247.0, -0.00011790000000000001),
(405.0, -0.0006759),
(154.80000000000001, -0.0028972999999999998),
(65.379999999999995, -0.0099979999999999999),
(29.59, -0.027541599999999999),
(14.039999999999999, -0.058794300000000001),
(6.8239999999999998, -0.090376100000000001),
(3.3690000000000002, -0.099989099999999997)]),
('P', [(1.6659999999999999, 1.0)]),
('P', [(0.7681, 1.0)]),
('P', [(0.35039999999999999, 1.0)]),
('P', [(0.15559999999999999, 1.0)]),
('P', [(0.066809999999999994, 1.0)]),
('P', [(0.02648, 1.0)]),
('D', [(5.0754999999999999, 1.0)]),
('D', [(2.1833, 1.0)]),
('D', [(0.93920000000000003, 1.0)]),
('D', [(0.40400000000000003, 1.0)]),
('D', [(0.17380000000000001, 1.0)]),
('D', [(0.069860000000000005, 1.0)]),
('F', [(1.3222, 1.0)]),
('F', [(0.7319, 1.0)]),
('F', [(0.40510000000000002, 1.0)]),
('F', [(0.2243, 1.0)]),
('F', [(0.11, 1.0)]),
('G', [(1.3472999999999999, 1.0)]),
('G', [(0.70089999999999997, 1.0)]),
('G', [(0.36470000000000002, 1.0)]),
('G', [(0.1799, 1.0)]),
('H', [(1.2861, 1.0)]),
('H', [(0.61150000000000004, 1.0)]),
('H', [(0.34649999999999997, 1.0)]),
('I', [(1.0408999999999999, 1.0)]),
('I', [(0.62219999999999998, 1.0)])],
17: [('S',
[(7733000.0, 1.43474e-06),
(1158000.0, 1.11486e-05),
(263700.0, 5.8586499999999999e-05),
(75010.0, 0.00024451799999999999),
(24890.0, 0.000858287),
(9318.0, 0.0026101900000000001),
(3840.0, 0.0071378400000000003),
(1684.0, 0.018456400000000001),
(766.29999999999995, 0.044894400000000001),
(359.5, 0.099382200000000004),
(173.40000000000001, 0.19078200000000001),
(85.609999999999999, 0.29356500000000002),
(42.93, 0.306477),
(21.550000000000001, 0.16220899999999999),
(10.050000000000001, 0.0249383),
(4.9779999999999998, -0.00051314199999999996)]),
('S',
[(7733000.0, -4.0222699999999999e-07),
(1158000.0, -3.1244800000000002e-06),
(263700.0, -1.6429000000000001e-05),
(75010.0, -6.8542100000000002e-05),
(24890.0, -0.00024100099999999999),
(9318.0, -0.00073353800000000005),
(3840.0, -0.0020183000000000002),
(1684.0, -0.0052610699999999996),
(766.29999999999995, -0.0130986),
(359.5, -0.030179399999999999),
(173.40000000000001, -0.063188800000000003),
(85.609999999999999, -0.113859),
(42.93, -0.16125100000000001),
(21.550000000000001, -0.109234),
(10.050000000000001, 0.162999),
(4.9779999999999998, 0.501413)]),
('S',
[(7733000.0, 1.2169600000000001e-07),
(1158000.0, 9.4514100000000003e-07),
(263700.0, 4.9711900000000001e-06),
(75010.0, 2.0732300000000001e-05),
(24890.0, 7.2940200000000003e-05),
(9318.0, 0.00022189900000000001),
(3840.0, 0.00061135499999999995),
(1684.0, 0.00159337),
(766.29999999999995, 0.00398001),
(359.5, 0.0091937500000000005),
(173.40000000000001, 0.0194399),
(85.609999999999999, 0.0355187),
(42.93, 0.0520674),
(21.550000000000001, 0.036564399999999997),
(10.050000000000001, -0.059749999999999998),
(4.9779999999999998, -0.23164100000000001)]),
('S', [(2.4780000000000002, 1.0)]),
('S', [(1.1799999999999999, 1.0)]),
('S', [(0.58279999999999998, 1.0)]),
('S', [(0.26679999999999998, 1.0)]),
('S', [(0.1183, 1.0)]),
('S', [(0.046249999999999999, 1.0)]),
('P',
[(6091.0, 5.1619400000000003e-05),
(1442.0, 0.00045846800000000002),
(468.30000000000001, 0.00261509),
(179.0, 0.0112554),
(75.609999999999999, 0.038457699999999997),
(34.219999999999999, 0.10508099999999999),
(16.23, 0.21860299999999999),
(7.8899999999999997, 0.330874),
(3.8980000000000001, 0.32587899999999997)]),
('P',
[(6091.0, -1.39259e-05),
(1442.0, -0.00012332399999999999),
(468.30000000000001, -0.00070755099999999995),
(179.0, -0.0030493899999999999),
(75.609999999999999, -0.0105752),
(34.219999999999999, -0.029409399999999999),
(16.23, -0.063229599999999997),
(7.8899999999999997, -0.098186999999999997),
(3.8980000000000001, -0.10587000000000001)]),
('P', [(1.9330000000000001, 1.0)]),
('P', [(0.90569999999999995, 1.0)]),
('P', [(0.41399999999999998, 1.0)]),
('P', [(0.18360000000000001, 1.0)]),
('P', [(0.078589999999999993, 1.0)]),
('P', [(0.031629999999999998, 1.0)]),
('D', [(6.2427999999999999, 1.0)]),
('D', [(2.6905999999999999, 1.0)]),
('D', [(1.1596, 1.0)]),
('D', [(0.49980000000000002, 1.0)]),
('D', [(0.21540000000000001, 1.0)]),
('D', [(0.088849999999999998, 1.0)]),
('F', [(2.5327000000000002, 1.0)]),
('F', [(1.2405999999999999, 1.0)]),
('F', [(0.60770000000000002, 1.0)]),
('F', [(0.29770000000000002, 1.0)]),
('F', [(0.14649999999999999, 1.0)]),
('G', [(1.5387999999999999, 1.0)]),
('G', [(0.80500000000000005, 1.0)]),
('G', [(0.42120000000000002, 1.0)]),
('G', [(0.2177, 1.0)]),
('H', [(1.5612999999999999, 1.0)]),
('H', [(0.73970000000000002, 1.0)]),
('H', [(0.4365, 1.0)]),
('I', [(1.2572000000000001, 1.0)]),
('I', [(0.80740000000000001, 1.0)])],
18: [('S',
[(9149000.0, 1.3e-06),
(1370000.0, 1.04e-05),
(311900.0, 5.49e-05),
(88650.0, 0.0002296),
(29330.0, 0.00081030000000000002),
(10930.0, 0.0024853000000000002),
(4480.0, 0.0068368999999999999),
(1962.0, 0.017619900000000001),
(894.10000000000002, 0.042875200000000002),
(419.60000000000002, 0.095485299999999995),
(202.30000000000001, 0.18506400000000001),
(99.840000000000003, 0.28904200000000002),
(50.07, 0.310166),
(25.140000000000001, 0.172183),
(11.81, 0.028522700000000002),
(5.8819999999999997, -0.00057569999999999995)]),
('S',
[(9149000.0, -3.9999999999999998e-07),
(1370000.0, -3.0000000000000001e-06),
(311900.0, -1.56e-05),
(88650.0, -6.5199999999999999e-05),
(29330.0, -0.00023039999999999999),
(10930.0, -0.00070750000000000001),
(4480.0, -0.0019572999999999999),
(1962.0, -0.0050856),
(894.10000000000002, -0.012652800000000001),
(419.60000000000002, -0.029306499999999999),
(202.30000000000001, -0.061771199999999998),
(99.840000000000003, -0.112541),
(50.07, -0.16229299999999999),
(25.140000000000001, -0.118412),
(11.81, 0.146148),
(5.8819999999999997, 0.49775199999999997)]),
('S',
[(9149000.0, 9.9999999999999995e-08),
(1370000.0, 8.9999999999999996e-07),
(311900.0, 4.8999999999999997e-06),
(88650.0, 2.0400000000000001e-05),
(29330.0, 7.2000000000000002e-05),
(10930.0, 0.00022100000000000001),
(4480.0, 0.00061249999999999998),
(1962.0, 0.0015908000000000001),
(894.10000000000002, 0.0039722000000000004),
(419.60000000000002, 0.0092204000000000001),
(202.30000000000001, 0.0196367),
(99.840000000000003, 0.036256999999999998),
(50.07, 0.054172499999999998),
(25.140000000000001, 0.040999599999999997),
(11.81, -0.055174399999999998),
(5.8819999999999997, -0.23875399999999999)]),
('S', [(2.9390000000000001, 1.0)]),
('S', [(1.405, 1.0)]),
('S', [(0.69630000000000003, 1.0)]),
('S', [(0.31879999999999997, 1.0)]),
('S', [(0.14099999999999999, 1.0)]),
('S', [(0.05357, 1.0)]),
('P',
[(7050.0, 5.02e-05),
(1669.0, 0.00044539999999999998),
(542.10000000000002, 0.0025479999999999999),
(207.09999999999999, 0.011015499999999999),
(87.519999999999996, 0.037849000000000001),
(39.609999999999999, 0.104355),
(18.780000000000001, 0.219335),
(9.1300000000000008, 0.334615),
(4.516, 0.32677099999999998)]),
('P',
[(7050.0, -1.4e-05),
(1669.0, -0.00012430000000000001),
(542.10000000000002, -0.00071469999999999997),
(207.09999999999999, -0.0030967999999999998),
(87.519999999999996, -0.010796099999999999),
(39.609999999999999, -0.030353600000000001),
(18.780000000000001, -0.065978499999999995),
(9.1300000000000008, -0.103877),
(4.516, -0.109956)]),
('P', [(2.2450000000000001, 1.0)]),
('P', [(1.0649999999999999, 1.0)]),
('P', [(0.48849999999999999, 1.0)]),
('P', [(0.21659999999999999, 1.0)]),
('P', [(0.092549999999999993, 1.0)]),
('P', [(0.03678, 1.0)]),
('D', [(7.6326999999999998, 1.0)]),
('D', [(3.2875999999999999, 1.0)]),
('D', [(1.4159999999999999, 1.0)]),
('D', [(0.6099, 1.0)]),
('D', [(0.26269999999999999, 1.0)]),
('D', [(0.10780000000000001, 1.0)]),
('F', [(3.0581999999999998, 1.0)]),
('F', [(1.5291999999999999, 1.0)]),
('F', [(0.76470000000000005, 1.0)]),
('F', [(0.38240000000000002, 1.0)]),
('F', [(0.183, 1.0)]),
('G', [(1.845, 1.0)]),
('G', [(0.9657, 1.0)]),
('G', [(0.50549999999999995, 1.0)]),
('G', [(0.2555, 1.0)]),
('H', [(1.8743000000000001, 1.0)]),
('H', [(0.8871, 1.0)]),
('H', [(0.52649999999999997, 1.0)]),
('I', [(1.5065999999999999, 1.0)]),
('I', [(0.99260000000000004, 1.0)])]}
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for anchor_generators.multiscale_grid_anchor_generator_test.py."""
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.anchor_generators import multiscale_grid_anchor_generator as mg
from object_detection.utils import test_case
class MultiscaleGridAnchorGeneratorTest(test_case.TestCase):
def test_construct_single_anchor(self):
def graph_fn():
min_level = 5
max_level = 5
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 64
im_width = 64
feature_map_shape_list = [(2, 2)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return anchor_corners
exp_anchor_corners = [[-48, -48, 80, 80],
[-48, -16, 80, 112],
[-16, -48, 112, 80],
[-16, -16, 112, 112]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_unit_dimensions(self):
def graph_fn():
min_level = 5
max_level = 5
anchor_scale = 1.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 1
im_width = 1
feature_map_shape_list = [(2, 2)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return anchor_corners
# Positive offsets are produced.
exp_anchor_corners = [[0, 0, 32, 32],
[0, 32, 32, 64],
[32, 0, 64, 32],
[32, 32, 64, 64]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_normalized_anchors_fails_with_unit_dimensions(self):
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level=5, max_level=5, anchor_scale=1.0, aspect_ratios=[1.0],
scales_per_octave=1, normalize_coordinates=True)
with self.assertRaisesRegexp(ValueError, 'Normalized coordinates'):
anchor_generator.generate(
feature_map_shape_list=[(2, 2)], im_height=1, im_width=1)
def test_construct_single_anchor_in_normalized_coordinates(self):
def graph_fn():
min_level = 5
max_level = 5
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 64
im_width = 128
feature_map_shape_list = [(2, 2)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=True)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return anchor_corners
exp_anchor_corners = [[-48./64, -48./128, 80./64, 80./128],
[-48./64, -16./128, 80./64, 112./128],
[-16./64, -48./128, 112./64, 80./128],
[-16./64, -16./128, 112./64, 112./128]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_num_anchors_per_location(self):
min_level = 5
max_level = 6
anchor_scale = 4.0
aspect_ratios = [1.0, 2.0]
scales_per_octave = 3
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
self.assertEqual(anchor_generator.num_anchors_per_location(), [6, 6])
def test_construct_single_anchor_dynamic_size(self):
def graph_fn():
min_level = 5
max_level = 5
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = tf.constant(64)
im_width = tf.constant(64)
feature_map_shape_list = [(2, 2)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return anchor_corners
exp_anchor_corners = [[-64, -64, 64, 64],
[-64, -32, 64, 96],
[-32, -64, 96, 64],
[-32, -32, 96, 96]]
# Add anchor offset.
anchor_offset = 2.0**5 / 2.0
exp_anchor_corners = [
[b + anchor_offset for b in a] for a in exp_anchor_corners
]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_with_odd_input_dimension(self):
def graph_fn():
min_level = 5
max_level = 5
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 65
im_width = 65
feature_map_shape_list = [(3, 3)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(
feature_map_shape_list, im_height=im_height, im_width=im_width)
anchor_corners = anchors_list[0].get()
return (anchor_corners,)
anchor_corners_out = self.execute(graph_fn, [])
exp_anchor_corners = [[-64, -64, 64, 64],
[-64, -32, 64, 96],
[-64, 0, 64, 128],
[-32, -64, 96, 64],
[-32, -32, 96, 96],
[-32, 0, 96, 128],
[0, -64, 128, 64],
[0, -32, 128, 96],
[0, 0, 128, 128]]
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_on_two_feature_maps(self):
def graph_fn():
min_level = 5
max_level = 6
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 64
im_width = 64
feature_map_shape_list = [(2, 2), (1, 1)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
anchor_corners_out = np.concatenate(self.execute(graph_fn, []), axis=0)
exp_anchor_corners = [[-48, -48, 80, 80],
[-48, -16, 80, 112],
[-16, -48, 112, 80],
[-16, -16, 112, 112],
[-96, -96, 160, 160]]
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_with_two_scales_per_octave(self):
def graph_fn():
min_level = 6
max_level = 6
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 2
im_height = 64
im_width = 64
feature_map_shape_list = [(1, 1)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
# There are 4 set of anchors in this configuration. The order is:
# [[2**0.0 intermediate scale + 1.0 aspect],
# [2**0.5 intermediate scale + 1.0 aspect]]
exp_anchor_corners = [[-96., -96., 160., 160.],
[-149.0193, -149.0193, 213.0193, 213.0193]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchor_with_two_scales_per_octave_and_aspect(self):
def graph_fn():
min_level = 6
max_level = 6
anchor_scale = 4.0
aspect_ratios = [1.0, 2.0]
scales_per_octave = 2
im_height = 64
im_width = 64
feature_map_shape_list = [(1, 1)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
# There are 4 set of anchors in this configuration. The order is:
# [[2**0.0 intermediate scale + 1.0 aspect],
# [2**0.5 intermediate scale + 1.0 aspect],
# [2**0.0 intermediate scale + 2.0 aspect],
# [2**0.5 intermediate scale + 2.0 aspect]]
exp_anchor_corners = [[-96., -96., 160., 160.],
[-149.0193, -149.0193, 213.0193, 213.0193],
[-58.50967, -149.0193, 122.50967, 213.0193],
[-96., -224., 160., 288.]]
anchor_corners_out = self.execute(graph_fn, [])
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
def test_construct_single_anchors_on_feature_maps_with_dynamic_shape(self):
def graph_fn(feature_map1_height, feature_map1_width, feature_map2_height,
feature_map2_width):
min_level = 5
max_level = 6
anchor_scale = 4.0
aspect_ratios = [1.0]
scales_per_octave = 1
im_height = 64
im_width = 64
feature_map_shape_list = [(feature_map1_height, feature_map1_width),
(feature_map2_height, feature_map2_width)]
anchor_generator = mg.MultiscaleGridAnchorGenerator(
min_level, max_level, anchor_scale, aspect_ratios, scales_per_octave,
normalize_coordinates=False)
anchors_list = anchor_generator.generate(feature_map_shape_list,
im_height=im_height,
im_width=im_width)
anchor_corners = [anchors.get() for anchors in anchors_list]
return anchor_corners
anchor_corners_out = np.concatenate(
self.execute_cpu(graph_fn, [
np.array(2, dtype=np.int32),
np.array(2, dtype=np.int32),
np.array(1, dtype=np.int32),
np.array(1, dtype=np.int32)
]),
axis=0)
exp_anchor_corners = [[-48, -48, 80, 80],
[-48, -16, 80, 112],
[-16, -48, 112, 80],
[-16, -16, 112, 112],
[-96, -96, 160, 160]]
self.assertAllClose(anchor_corners_out, exp_anchor_corners)
if __name__ == '__main__':
tf.test.main()
|
|
#!/usr/bin/env python
# This document is part of Pelagos Data
# https://github.com/skytruth/pelagos-data
# =========================================================================== #
#
# The MIT License (MIT)
#
# Copyright (c) 2014 SkyTruth
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# =========================================================================== #
"""Pelagos Regionate
Usage:
regionate.py [options] POLY_LAYER [POINTS_IN [POINTS_OUT]] [-q | -v]
regionate.py [options] POLY_LAYER [-] [POINTS_OUT] [-q | -v]
regionate.py (-h | --help)
regionate.py --version
Options:
--attribute=ATTRIB Attribute in the polygon layer containing the regionid [default: regionid]
--layername=LAYERNAME Name of the polygon layer to use. Default is to use the first layer found
--xfield=XFIELD Name of input field containing x value [default: longitude]
--yfield=YFIELD Name of input field containing x value [default: latitude]
--regionid-map=DEFINITION LAYER=FIELD,FIELD,...:LAYER=FIELD:...
--regionid-mode=MODE (update|append) Specify whether regionid's should be appended or updated [default: update]
-h --help Show this screen.
--version Show version.
-q --quiet be quiet
-v --verbose yak yak yak
"""
from docopt import docopt
import logging
import csv
import sys
import json
from osgeo import ogr
#/* ======================================================================= */#
#/* Define load_layers() function
#/* ======================================================================= */#
def load_layers(data_source, arg):
"""
Load specified layers
"""
layers = []
layer_name = arg['--layername']
for i in range(0, data_source.GetLayerCount()):
layer = data_source.GetLayerByIndex(i)
if layer_name:
if layer_name == layer.GetName():
layers.append(layer)
else:
if layer.GetGeomType() in (3, 6):
layers.append(layer)
if not layers:
if arg['--layername']:
raise IOError('Layer %s not found in %s' % (arg['--layername'], arg['POLY_LAYER']))
else:
raise IOError('No Polygon layers found in %s' % arg['POLY_LAYER'])
return layers
#/* ======================================================================= */#
#/* Define regionate() function
#/* ======================================================================= */#
def regionate(file_in, file_out, arg):
"""
Open polygon layer for reading
Arguments are file path and layer name
Unfortunately a layer cannot exist without an open datasource so both objects must exist
poly_ds, poly_layer = putils.io.open_datasource(arg['POLY_LAYER'], basename(arg['POLY_LAYER']).split('.')[0])
"""
# Prep OGR objects
poly_ds = ogr.Open(arg['POLY_LAYER'], 0)
if poly_ds is None:
raise IOError('Unable to open %s' % arg['POLY_LAYER'])
layers = load_layers(poly_ds, arg)
regionid_map = {layer.GetName(): ['region'] for layer in layers}
if arg['--regionid-map'] is not None:
# Parse the region map definitions
definitions = arg['--regionid-map'].split(':')
for defn in definitions:
layer, fields = defn.split('=')
regionid_map[layer] = fields.split(',')
# Extract all the fields specified in the region map so they can be created if they do not already exist
regionid_fields = []
for r_fields in regionid_map.values():
regionid_fields += r_fields
# Prep CSV objects
reader = csv.DictReader(file_in)
# Process one row at a time
for row in reader:
point = ogr.CreateGeometryFromWkt("POINT (%s %s)" % (row['longitude'], row['latitude']))
regionids = {}
# Perform point in polygon tests for all layers
for layer in layers:
layer.SetSpatialFilter(point)
feature = layer.GetNextFeature()
# Check all intersecting features for current layer
while feature:
if feature.GetGeometryRef().Intersects(point):
value = feature.GetField(arg['--attribute']).split(',')
# Add regionid
if layer.GetName() not in regionids:
regionids[layer.GetName()] = value
else:
regionids[layer.GetName()] += value
feature = layer.GetNextFeature()
# Create an output row
row_out = row.copy()
# # No regionid mapping - populate
# if regionid_map is None:
# _ids = []
# for _rids in regionids.values():
# _ids += _rids
# row_out['region'] = _ids
#
# # Regionids are mapped to multiple fields, distribute
# else:
# Make sure output row contains all necessary fields
for rid_field in regionid_fields:
if rid_field not in row_out:
row_out[rid_field] = []
# Populate output regionid's
for layer_name, collected_ids in regionids.iteritems():
for ofield in regionid_map[layer_name]:
# If the field is empty, set it equal to the collected regionids
# If the field should be updated, replace existing values with new
if row_out[ofield] is None or arg['--regionid-mode'] == 'update':
row_out[ofield] = regionids[layer_name]
# Add to existing values
elif arg['--regionid-mode'] == 'append':
row_out[ofield] += regionids[layer_name]
# Argument error
else:
raise ValueError("Invalid --regionid-mode: %s" % arg['--regionid-mode'])
# Dump to disk
file_out.write(json.dumps(row_out, sort_keys=True))
file_out.write('\n')
#/* ======================================================================= */#
#/* Define main() function
#/* ======================================================================= */#
def main():
# Parse arguments
arguments = docopt(__doc__, version='Pelagos Regionator 0.1')
if arguments['--verbose']:
log_level = logging.DEBUG
elif arguments.get('--quiet'):
log_level = logging.ERROR
else:
log_level = logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
try:
points_in = arguments['POINTS_IN']
points_out = arguments['POINTS_OUT']
# Open input file
with sys.stdin if points_in is None or '-' == points_in else open(points_in, 'rb') as file_in:
# Open output file
with sys.stdout if points_out is None or '-' == points_out else open(points_out, 'w') as file_out:
regionate(file_in, file_out, arguments)
except (ValueError, IOError), e:
logging.error(e)
return 1
return 0
#/* ======================================================================= */#
#/* Command Line Execution
#/* ======================================================================= */#
if __name__ == "__main__":
sys.exit(main())
|
|
"""
Iterative fit Deep Neural Network
Created: Hector Mendoza
"""
import numpy as np
import scipy.sparse as sp
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.conditions import EqualsCondition, InCondition
from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter, Constant
from autosklearn.pipeline.components.base import AutoSklearnClassificationAlgorithm
from autosklearn.pipeline.constants import *
class DeepNetIterative(AutoSklearnClassificationAlgorithm):
def __init__(self, number_epochs, batch_size, num_layers,
dropout_output, learning_rate, solver,
lambda2, random_state=None,
**kwargs):
self.number_epochs = number_epochs
self.batch_size = batch_size
self.num_layers = ord(num_layers) - ord('a')
self.dropout_output = dropout_output
self.learning_rate = learning_rate
self.lambda2 = lambda2
self.solver = solver
# Also taken from **kwargs. Because the assigned
# arguments are the mininum parameters to run
# the iterative net. IMO.
self.lr_policy = kwargs.get("lr_policy", "fixed")
self.momentum = kwargs.get("momentum", 0.99)
self.beta1 = 1 - kwargs.get("beta1", 0.1)
self.beta2 = 1 - kwargs.get("beta2", 0.01)
self.rho = kwargs.get("rho", 0.95)
self.gamma = kwargs.get("gamma", 0.01)
self.power = kwargs.get("power", 1.0)
self.epoch_step = kwargs.get("epoch_step", 1)
# Add special iterative member
self._iterations = 0
# Empty features and shape
self.n_features = None
self.input_shape = None
self.m_issparse = False
self.m_isbinary = False
self.m_ismultilabel = False
# TODO: Should one add a try-except here?
self.num_units_per_layer = []
self.dropout_per_layer = []
self.activation_per_layer = []
self.weight_init_layer = []
self.std_per_layer = []
self.leakiness_per_layer = []
self.tanh_alpha_per_layer = []
self.tanh_beta_per_layer = []
for i in range(1, self.num_layers):
self.num_units_per_layer.append(int(kwargs.get("num_units_layer_" + str(i), 128)))
self.dropout_per_layer.append(float(kwargs.get("dropout_layer_" + str(i), 0.5)))
self.activation_per_layer.append(kwargs.get("activation_layer_" + str(i), 'relu'))
self.weight_init_layer.append(kwargs.get("weight_init_" + str(i), 'he_normal'))
self.std_per_layer.append(float(kwargs.get("std_layer_" + str(i), 0.005)))
self.leakiness_per_layer.append(float(kwargs.get("leakiness_layer_" + str(i), 1./3.)))
self.tanh_alpha_per_layer.append(float(kwargs.get("tanh_alpha_layer_" + str(i), 2./3.)))
self.tanh_beta_per_layer.append(float(kwargs.get("tanh_beta_layer_" + str(i), 1.7159)))
self.estimator = None
self.random_state = random_state
def _prefit(self, X, y):
self.batch_size = int(self.batch_size)
self.n_features = X.shape[1]
self.input_shape = (self.batch_size, self.n_features)
assert len(self.num_units_per_layer) == self.num_layers - 1,\
"Number of created layers is different than actual layers"
assert len(self.dropout_per_layer) == self.num_layers - 1,\
"Number of created layers is different than actual layers"
if len(y.shape) == 2 and y.shape[1] > 1: # Multilabel
self.m_ismultilabel = True
self.num_output_units = y.shape[1]
else:
number_classes = len(np.unique(y.astype(int)))
if number_classes == 2: # Make it binary
self.m_isbinary = True
self.num_output_units = 1
if len(y.shape) == 1:
y = y[:, np.newaxis]
else:
self.num_output_units = number_classes
self.m_issparse = sp.issparse(X)
return X, y
def fit(self, X, y, sample_weight=None):
Xf, yf = self._prefit(X, y)
while not self.configuration_fully_fitted():
self.iterative_fit(Xf, yf, n_iter=1, sample_weight=sample_weight)
return self
def iterative_fit(self, X, y, n_iter=1, refit=False, sample_weight=None):
Xf, yf = self._prefit(X, y)
if refit:
self.estimator = None
if self.estimator is None:
self._iterations = 1
from implementation import FeedForwardNet
self.estimator = FeedForwardNet.FeedForwardNet(batch_size=self.batch_size,
input_shape=self.input_shape,
num_layers=self.num_layers,
num_units_per_layer=self.num_units_per_layer,
dropout_per_layer=self.dropout_per_layer,
activation_per_layer=self.activation_per_layer,
weight_init_per_layer=self.weight_init_layer,
std_per_layer=self.std_per_layer,
leakiness_per_layer=self.leakiness_per_layer,
tanh_alpha_per_layer=self.tanh_alpha_per_layer,
tanh_beta_per_layer=self.tanh_beta_per_layer,
num_output_units=self.num_output_units,
dropout_output=self.dropout_output,
learning_rate=self.learning_rate,
lr_policy=self.lr_policy,
lambda2=self.lambda2,
momentum=self.momentum,
beta1=self.beta1,
beta2=self.beta2,
rho=self.rho,
solver=self.solver,
num_epochs=1,
gamma=self.gamma,
power=self.power,
epoch_step=self.epoch_step,
is_sparse=self.m_issparse,
is_binary=self.m_isbinary,
is_multilabel=self.m_ismultilabel,
random_state=self.random_state)
self.estimator.num_epochs = n_iter
print('Increasing epochs %d' % n_iter)
print('Iterations: %d' % self._iterations)
self.estimator.fit(Xf, yf)
if self._iterations >= self.number_epochs:
self._fully_fit = True
self._iterations += n_iter
return self
def configuration_fully_fitted(self):
if self.estimator is None:
return False
elif not hasattr(self, '_fully_fit'):
return False
else:
return self._fully_fit
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X, self.m_issparse)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X, self.m_issparse)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'feed_nn_iter',
'name': 'Feed Forward Neural Network Iterative',
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
max_num_layers = 7 # Maximum number of layers coded
# Hacky way to condition layers params based on the number of layers
# 'c'=1, 'd'=2, 'e'=3 ,'f'=4', g ='5', h='6' + output_layer
layer_choices = [chr(i) for i in range(ord('c'), ord('b') + max_num_layers)]
batch_size = UniformIntegerHyperparameter("batch_size",
32, 4096,
log=True,
default=32)
number_epochs = UniformIntegerHyperparameter("number_epochs",
2, 80,
default=5)
num_layers = CategoricalHyperparameter("num_layers",
choices=layer_choices,
default='c')
lr = UniformFloatHyperparameter("learning_rate", 1e-6, 1.0,
log=True,
default=0.01)
l2 = UniformFloatHyperparameter("lambda2", 1e-7, 1e-2,
log=True,
default=1e-4)
dropout_output = UniformFloatHyperparameter("dropout_output",
0.0, 0.99,
default=0.5)
# Define basic hyperparameters and define the config space
# basic means that are independent from the number of layers
cs = ConfigurationSpace()
cs.add_hyperparameter(number_epochs)
cs.add_hyperparameter(batch_size)
cs.add_hyperparameter(num_layers)
cs.add_hyperparameter(lr)
cs.add_hyperparameter(l2)
cs.add_hyperparameter(dropout_output)
# Define parameters with different child parameters and conditions
solver_choices = ["adam", "adadelta", "adagrad",
"sgd", "momentum", "nesterov",
"smorm3s"]
solver = CategoricalHyperparameter(name="solver",
choices=solver_choices,
default="smorm3s")
beta1 = UniformFloatHyperparameter("beta1", 1e-4, 0.1,
log=True,
default=0.1)
beta2 = UniformFloatHyperparameter("beta2", 1e-4, 0.1,
log=True,
default=0.01)
rho = UniformFloatHyperparameter("rho", 0.05, 0.99,
log=True,
default=0.95)
momentum = UniformFloatHyperparameter("momentum", 0.3, 0.999,
default=0.9)
# TODO: Add policy based on this sklearn sgd
policy_choices = ['fixed', 'inv', 'exp', 'step']
lr_policy = CategoricalHyperparameter(name="lr_policy",
choices=policy_choices,
default='fixed')
gamma = UniformFloatHyperparameter(name="gamma",
lower=1e-3, upper=1e-1,
default=1e-2)
power = UniformFloatHyperparameter("power",
0.0, 1.0,
default=0.5)
epoch_step = UniformIntegerHyperparameter("epoch_step",
2, 20,
default=5)
cs.add_hyperparameter(solver)
cs.add_hyperparameter(beta1)
cs.add_hyperparameter(beta2)
cs.add_hyperparameter(momentum)
cs.add_hyperparameter(rho)
cs.add_hyperparameter(lr_policy)
cs.add_hyperparameter(gamma)
cs.add_hyperparameter(power)
cs.add_hyperparameter(epoch_step)
# Define parameters that are needed it for each layer
output_activation_choices = ['softmax', 'sigmoid', 'softplus', 'tanh']
activations_choices = ['sigmoid', 'tanh', 'scaledTanh', 'elu', 'relu', 'leaky', 'linear']
weight_choices = ['constant', 'normal', 'uniform',
'glorot_normal', 'glorot_uniform',
'he_normal', 'he_uniform',
'ortogonal', 'sparse']
# Iterate over parameters that are used in each layer
for i in range(1, max_num_layers):
layer_units = UniformIntegerHyperparameter("num_units_layer_" + str(i),
64, 4096,
log=True,
default=128)
cs.add_hyperparameter(layer_units)
layer_dropout = UniformFloatHyperparameter("dropout_layer_" + str(i),
0.0, 0.99,
default=0.5)
cs.add_hyperparameter(layer_dropout)
weight_initialization = CategoricalHyperparameter('weight_init_' + str(i),
choices=weight_choices,
default='he_normal')
cs.add_hyperparameter(weight_initialization)
layer_std = UniformFloatHyperparameter("std_layer_" + str(i),
1e-6, 0.1,
log=True,
default=0.005)
cs.add_hyperparameter(layer_std)
layer_activation = CategoricalHyperparameter("activation_layer_" + str(i),
choices=activations_choices,
default="relu")
cs.add_hyperparameter(layer_activation)
layer_leakiness = UniformFloatHyperparameter('leakiness_layer_' + str(i),
0.01, 0.99,
default=0.3)
cs.add_hyperparameter(layer_leakiness)
layer_tanh_alpha = UniformFloatHyperparameter('tanh_alpha_layer_' + str(i),
0.5, 1.0,
default=2. / 3.)
cs.add_hyperparameter(layer_tanh_alpha)
layer_tanh_beta = UniformFloatHyperparameter('tanh_beta_layer_' + str(i),
1.1, 3.0,
log=True,
default=1.7159)
cs.add_hyperparameter(layer_tanh_beta)
# TODO: Could be in a function in a new module
for i in range(2, max_num_layers):
# Condition layers parameter on layer choice
layer_unit_param = cs.get_hyperparameter("num_units_layer_" + str(i))
layer_cond = InCondition(child=layer_unit_param, parent=num_layers,
values=[l for l in layer_choices[i - 1:]])
cs.add_condition(layer_cond)
# Condition dropout parameter on layer choice
layer_dropout_param = cs.get_hyperparameter("dropout_layer_" + str(i))
layer_cond = InCondition(child=layer_dropout_param, parent=num_layers,
values=[l for l in layer_choices[i - 1:]])
cs.add_condition(layer_cond)
# Condition weight initialization on layer choice
layer_weight_param = cs.get_hyperparameter("weight_init_" + str(i))
layer_cond = InCondition(child=layer_weight_param, parent=num_layers,
values=[l for l in layer_choices[i - 1:]])
cs.add_condition(layer_cond)
# Condition std parameter on weight layer initialization choice
layer_std_param = cs.get_hyperparameter("std_layer_" + str(i))
weight_cond = EqualsCondition(child=layer_std_param,
parent=layer_weight_param,
value='normal')
cs.add_condition(weight_cond)
# Condition activation parameter on layer choice
layer_activation_param = cs.get_hyperparameter("activation_layer_" + str(i))
layer_cond = InCondition(child=layer_activation_param, parent=num_layers,
values=[l for l in layer_choices[i - 1:]])
cs.add_condition(layer_cond)
# Condition leakiness on activation choice
layer_leakiness_param = cs.get_hyperparameter("leakiness_layer_" + str(i))
activation_cond = EqualsCondition(child=layer_leakiness_param,
parent=layer_activation_param,
value='leaky')
cs.add_condition(activation_cond)
# Condition tanh on activation choice
layer_tanh_alpha_param = cs.get_hyperparameter("tanh_alpha_layer_" + str(i))
activation_cond = EqualsCondition(child=layer_tanh_alpha_param,
parent=layer_activation_param,
value='scaledTanh')
cs.add_condition(activation_cond)
layer_tanh_beta_param = cs.get_hyperparameter("tanh_beta_layer_" + str(i))
activation_cond = EqualsCondition(child=layer_tanh_beta_param,
parent=layer_activation_param,
value='scaledTanh')
cs.add_condition(activation_cond)
# Conditioning on solver
momentum_depends_on_solver = InCondition(momentum, solver,
values=["momentum", "nesterov"])
beta1_depends_on_solver = EqualsCondition(beta1, solver, "adam")
beta2_depends_on_solver = EqualsCondition(beta2, solver, "adam")
rho_depends_on_solver = EqualsCondition(rho, solver, "adadelta")
cs.add_condition(momentum_depends_on_solver)
cs.add_condition(beta1_depends_on_solver)
cs.add_condition(beta2_depends_on_solver)
cs.add_condition(rho_depends_on_solver)
# Conditioning on learning rate policy
lr_policy_depends_on_solver = InCondition(lr_policy, solver,
["adadelta", "adagrad", "sgd",
"momentum", "nesterov"])
gamma_depends_on_policy = InCondition(child=gamma, parent=lr_policy,
values=["inv", "exp", "step"])
power_depends_on_policy = EqualsCondition(power, lr_policy, "inv")
epoch_step_depends_on_policy = EqualsCondition(epoch_step, lr_policy, "step")
cs.add_condition(lr_policy_depends_on_solver)
cs.add_condition(gamma_depends_on_policy)
cs.add_condition(power_depends_on_policy)
cs.add_condition(epoch_step_depends_on_policy)
return cs
|
|
# Copyright 2012 Nebula, Inc.
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import forms
from django import http
from django import shortcuts
from django.template import defaultfilters
from mox import IsA # noqa
from horizon_lib import tables
from horizon_lib.tables import formset as table_formset
from horizon_lib.tables import views as table_views
from horizon_lib.test import helpers as test
class FakeObject(object):
def __init__(self, id, name, value, status, optional=None, excluded=None):
self.id = id
self.name = name
self.value = value
self.status = status
self.optional = optional
self.excluded = excluded
self.extra = "extra"
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.name)
TEST_DATA = (
FakeObject('1', 'object_1', 'value_1', 'up', 'optional_1', 'excluded_1'),
FakeObject('2', 'object_2', '<strong>evil</strong>', 'down', 'optional_2'),
FakeObject('3', 'object_3', 'value_3', 'up'),
)
TEST_DATA_2 = (
FakeObject('1', 'object_1', 'value_1', 'down', 'optional_1', 'excluded_1'),
)
TEST_DATA_3 = (
FakeObject('1', 'object_1', 'value_1', 'up', 'optional_1', 'excluded_1'),
)
TEST_DATA_4 = (
FakeObject('1', 'object_1', 2, 'up'),
FakeObject('2', 'object_2', 4, 'up'),
)
TEST_DATA_5 = (
FakeObject('1', 'object_1', 'A Value That is longer than 35 characters!',
'down', 'optional_1'),
)
TEST_DATA_6 = (
FakeObject('1', 'object_1', 'DELETED', 'down'),
FakeObject('2', 'object_2', 'CREATED', 'up'),
FakeObject('3', 'object_3', 'STANDBY', 'standby'),
)
TEST_DATA_7 = (
FakeObject('1', 'wrapped name', 'wrapped value', 'status',
'not wrapped optional'),
)
class MyLinkAction(tables.LinkAction):
name = "login"
verbose_name = "Log In"
url = "login"
attrs = {
"class": "ajax-modal",
}
def get_link_url(self, datum=None, *args, **kwargs):
return reverse(self.url)
class MyAction(tables.Action):
name = "delete"
verbose_name = "Delete Me"
verbose_name_plural = "Delete Them"
def allowed(self, request, obj=None):
return getattr(obj, 'status', None) != 'down'
def handle(self, data_table, request, object_ids):
return shortcuts.redirect('http://example.com/?ids=%s'
% ",".join(object_ids))
class MyColumn(tables.Column):
pass
class MyRowSelectable(tables.Row):
ajax = True
def can_be_selected(self, datum):
return datum.value != 'DELETED'
class MyRow(tables.Row):
ajax = True
@classmethod
def get_data(cls, request, obj_id):
return TEST_DATA_2[0]
class MyBatchAction(tables.BatchAction):
name = "batch"
action_present = "Batch"
action_past = "Batched"
data_type_singular = "Item"
data_type_plural = "Items"
def action(self, request, object_ids):
pass
class MyToggleAction(tables.BatchAction):
name = "toggle"
action_present = ("Down", "Up")
action_past = ("Downed", "Upped")
data_type_singular = "Item"
data_type_plural = "Items"
def allowed(self, request, obj=None):
if not obj:
return False
self.down = getattr(obj, 'status', None) == 'down'
if self.down:
self.current_present_action = 1
return self.down or getattr(obj, 'status', None) == 'up'
def action(self, request, object_ids):
if self.down:
# up it
self.current_past_action = 1
class MyDisabledAction(MyToggleAction):
def allowed(self, request, obj=None):
return False
class MyFilterAction(tables.FilterAction):
def filter(self, table, objs, filter_string):
q = filter_string.lower()
def comp(obj):
if q in obj.name.lower():
return True
return False
return filter(comp, objs)
class MyServerFilterAction(tables.FilterAction):
filter_type = 'server'
filter_choices = (('name', 'Name', False),
('status', 'Status', True))
needs_preloading = True
def filter(self, table, items, filter_string):
filter_field = table.get_filter_field()
if filter_field == 'name' and filter_string:
return [item for item in items
if filter_string in item.name]
return items
class MyUpdateAction(tables.UpdateAction):
def allowed(self, *args):
return True
def update_cell(self, *args):
pass
class MyUpdateActionNotAllowed(MyUpdateAction):
def allowed(self, *args):
return False
def get_name(obj):
return "custom %s" % obj.name
def get_link(obj):
return reverse('login')
class MyTable(tables.DataTable):
tooltip_dict = {'up': {'title': 'service is up and running',
'style': 'color:green;cursor:pointer'},
'down': {'title': 'service is not available',
'style': 'color:red;cursor:pointer'}}
id = tables.Column('id', hidden=True, sortable=False)
name = tables.Column(get_name,
verbose_name="Verbose Name",
sortable=True,
form_field=forms.CharField(required=True),
form_field_attributes={'class': 'test'},
update_action=MyUpdateAction)
value = tables.Column('value',
sortable=True,
link='http://example.com/',
attrs={'class': 'green blue'},
summation="average",
truncate=35,
link_classes=('link-modal',),
link_attrs={'data-type': 'modal dialog',
'data-tip': 'click for dialog'})
status = tables.Column('status', link=get_link,
cell_attributes_getter=tooltip_dict.get)
optional = tables.Column('optional', empty_value='N/A')
excluded = tables.Column('excluded')
class Meta:
name = "my_table"
verbose_name = "My Table"
status_columns = ["status"]
columns = ('id', 'name', 'value', 'optional', 'status')
row_class = MyRow
column_class = MyColumn
table_actions = (MyFilterAction, MyAction, MyBatchAction)
row_actions = (MyAction, MyLinkAction, MyBatchAction, MyToggleAction)
class MyServerFilterTable(MyTable):
class Meta:
name = "my_table"
verbose_name = "My Table"
status_columns = ["status"]
columns = ('id', 'name', 'value', 'optional', 'status')
row_class = MyRow
column_class = MyColumn
table_actions = (MyServerFilterAction, MyAction, MyBatchAction)
row_actions = (MyAction, MyLinkAction, MyBatchAction, MyToggleAction)
class MyTableSelectable(MyTable):
class Meta:
name = "my_table"
columns = ('id', 'name', 'value', 'status')
row_class = MyRowSelectable
status_columns = ["status"]
multi_select = True
class MyTableNotAllowedInlineEdit(MyTable):
name = tables.Column(get_name,
verbose_name="Verbose Name",
sortable=True,
form_field=forms.CharField(required=True),
form_field_attributes={'class': 'test'},
update_action=MyUpdateActionNotAllowed)
class Meta:
name = "my_table"
columns = ('id', 'name', 'value', 'optional', 'status')
row_class = MyRow
class MyTableWrapList(MyTable):
name = tables.Column('name',
form_field=forms.CharField(required=True),
form_field_attributes={'class': 'test'},
update_action=MyUpdateActionNotAllowed,
wrap_list=True)
value = tables.Column('value',
wrap_list=True)
optional = tables.Column('optional',
wrap_list=False)
class NoActionsTable(tables.DataTable):
id = tables.Column('id')
class Meta:
name = "no_actions_table"
verbose_name = "No Actions Table"
table_actions = ()
row_actions = ()
class DisabledActionsTable(tables.DataTable):
id = tables.Column('id')
class Meta:
name = "disabled_actions_table"
verbose_name = "Disabled Actions Table"
table_actions = (MyDisabledAction,)
row_actions = ()
multi_select = True
class DataTableTests(test.TestCase):
def test_table_instantiation(self):
"""Tests everything that happens when the table is instantiated."""
self.table = MyTable(self.request, TEST_DATA)
# Properties defined on the table
self.assertEqual(TEST_DATA, self.table.data)
self.assertEqual("my_table", self.table.name)
# Verify calculated options that weren't specified explicitly
self.assertTrue(self.table._meta.actions_column)
self.assertTrue(self.table._meta.multi_select)
# Test for verbose_name
self.assertEqual(u"My Table", unicode(self.table))
# Column ordering and exclusion.
# This should include auto-columns for multi_select and actions,
# but should not contain the excluded column.
# Additionally, auto-generated columns should use the custom
# column class specified on the table.
self.assertQuerysetEqual(self.table.columns.values(),
['<MyColumn: multi_select>',
'<Column: id>',
'<Column: name>',
'<Column: value>',
'<Column: optional>',
'<Column: status>',
'<MyColumn: actions>'])
# Actions (these also test ordering)
self.assertQuerysetEqual(self.table.base_actions.values(),
['<MyBatchAction: batch>',
'<MyAction: delete>',
'<MyFilterAction: filter>',
'<MyLinkAction: login>',
'<MyToggleAction: toggle>'])
self.assertQuerysetEqual(self.table.get_table_actions(),
['<MyFilterAction: filter>',
'<MyAction: delete>',
'<MyBatchAction: batch>'])
self.assertQuerysetEqual(self.table.get_row_actions(TEST_DATA[0]),
['<MyAction: delete>',
'<MyLinkAction: login>',
'<MyBatchAction: batch>',
'<MyToggleAction: toggle>'])
# Auto-generated columns
multi_select = self.table.columns['multi_select']
self.assertEqual("multi_select", multi_select.auto)
self.assertEqual("multi_select_column",
multi_select.get_final_attrs().get('class', ""))
actions = self.table.columns['actions']
self.assertEqual("actions", actions.auto)
self.assertEqual("actions_column",
actions.get_final_attrs().get('class', ""))
# In-line edit action on column.
name_column = self.table.columns['name']
self.assertEqual(MyUpdateAction, name_column.update_action)
self.assertEqual(forms.CharField, name_column.form_field.__class__)
self.assertEqual({'class': 'test'}, name_column.form_field_attributes)
def test_table_force_no_multiselect(self):
class TempTable(MyTable):
class Meta:
columns = ('id',)
table_actions = (MyFilterAction, MyAction,)
row_actions = (MyAction, MyLinkAction,)
multi_select = False
self.table = TempTable(self.request, TEST_DATA)
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: id>',
'<Column: actions>'])
def test_table_force_no_actions_column(self):
class TempTable(MyTable):
class Meta:
columns = ('id',)
table_actions = (MyFilterAction, MyAction,)
row_actions = (MyAction, MyLinkAction,)
actions_column = False
self.table = TempTable(self.request, TEST_DATA)
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: multi_select>',
'<Column: id>'])
def test_table_natural_no_inline_editing(self):
class TempTable(MyTable):
name = tables.Column(get_name,
verbose_name="Verbose Name",
sortable=True)
class Meta:
name = "my_table"
columns = ('id', 'name', 'value', 'optional', 'status')
self.table = TempTable(self.request, TEST_DATA_2)
name_column = self.table.columns['name']
self.assertIsNone(name_column.update_action)
self.assertIsNone(name_column.form_field)
self.assertEqual({}, name_column.form_field_attributes)
def test_table_natural_no_actions_column(self):
class TempTable(MyTable):
class Meta:
columns = ('id',)
table_actions = (MyFilterAction, MyAction,)
self.table = TempTable(self.request, TEST_DATA)
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: multi_select>',
'<Column: id>'])
def test_table_natural_no_multiselect(self):
class TempTable(MyTable):
class Meta:
columns = ('id',)
row_actions = (MyAction, MyLinkAction,)
self.table = TempTable(self.request, TEST_DATA)
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: id>',
'<Column: actions>'])
def test_table_column_inheritance(self):
class TempTable(MyTable):
extra = tables.Column('extra')
class Meta:
name = "temp_table"
table_actions = (MyFilterAction, MyAction,)
row_actions = (MyAction, MyLinkAction,)
self.table = TempTable(self.request, TEST_DATA)
self.assertQuerysetEqual(self.table.columns.values(),
['<Column: multi_select>',
'<Column: id>',
'<Column: name>',
'<Column: value>',
'<Column: status>',
'<Column: optional>',
'<Column: excluded>',
'<Column: extra>',
'<Column: actions>'])
def test_table_construction(self):
self.table = MyTable(self.request, TEST_DATA)
# Verify we retrieve the right columns for headers
columns = self.table.get_columns()
self.assertQuerysetEqual(columns, ['<MyColumn: multi_select>',
'<Column: id>',
'<Column: name>',
'<Column: value>',
'<Column: optional>',
'<Column: status>',
'<MyColumn: actions>'])
# Verify we retrieve the right rows from our data
rows = self.table.get_rows()
self.assertQuerysetEqual(rows, ['<MyRow: my_table__row__1>',
'<MyRow: my_table__row__2>',
'<MyRow: my_table__row__3>'])
# Verify each row contains the right cells
self.assertQuerysetEqual(rows[0].get_cells(),
['<Cell: multi_select, my_table__row__1>',
'<Cell: id, my_table__row__1>',
'<Cell: name, my_table__row__1>',
'<Cell: value, my_table__row__1>',
'<Cell: optional, my_table__row__1>',
'<Cell: status, my_table__row__1>',
'<Cell: actions, my_table__row__1>'])
def test_table_column(self):
self.table = MyTable(self.request, TEST_DATA)
row = self.table.get_rows()[0]
row3 = self.table.get_rows()[2]
id_col = self.table.columns['id']
name_col = self.table.columns['name']
value_col = self.table.columns['value']
# transform
self.assertEqual('1', row.cells['id'].data) # Standard attr access
self.assertEqual('custom object_1', row.cells['name'].data) # Callable
# name and verbose_name
self.assertEqual("Id", unicode(id_col))
self.assertEqual("Verbose Name", unicode(name_col))
# sortable
self.assertEqual(False, id_col.sortable)
self.assertNotIn("sortable", id_col.get_final_attrs().get('class', ""))
self.assertEqual(True, name_col.sortable)
self.assertIn("sortable", name_col.get_final_attrs().get('class', ""))
# hidden
self.assertEqual(True, id_col.hidden)
self.assertIn("hide", id_col.get_final_attrs().get('class', ""))
self.assertEqual(False, name_col.hidden)
self.assertNotIn("hide", name_col.get_final_attrs().get('class', ""))
# link, link_classes, link_attrs, and get_link_url
self.assertIn('href="http://example.com/"', row.cells['value'].value)
self.assertIn('class="link-modal"', row.cells['value'].value)
self.assertIn('data-type="modal dialog"', row.cells['value'].value)
self.assertIn('data-tip="click for dialog"', row.cells['value'].value)
self.assertIn('href="/auth/login/"', row.cells['status'].value)
# empty_value
self.assertEqual("N/A", row3.cells['optional'].value)
# classes
self.assertEqual("green blue sortable anchor normal_column",
value_col.get_final_attrs().get('class', ""))
# status
cell_status = row.cells['status'].status
self.assertEqual(True, cell_status)
self.assertEqual('status_up',
row.cells['status'].get_status_class(cell_status))
# status_choices
id_col.status = True
id_col.status_choices = (('1', False), ('2', True), ('3', None))
cell_status = row.cells['id'].status
self.assertEqual(False, cell_status)
self.assertEqual('status_down',
row.cells['id'].get_status_class(cell_status))
cell_status = row3.cells['id'].status
self.assertIsNone(cell_status)
self.assertEqual('status_unknown',
row.cells['id'].get_status_class(cell_status))
# Ensure data is not cached on the column across table instances
self.table = MyTable(self.request, TEST_DATA_2)
row = self.table.get_rows()[0]
self.assertTrue("down" in row.cells['status'].value)
def test_table_row(self):
self.table = MyTable(self.request, TEST_DATA)
row = self.table.get_rows()[0]
self.assertEqual(self.table, row.table)
self.assertEqual(TEST_DATA[0], row.datum)
self.assertEqual('my_table__row__1', row.id)
# Verify row status works even if status isn't set on the column
self.assertEqual(True, row.status)
self.assertEqual('status_up', row.status_class)
# Check the cells as well
cell_status = row.cells['status'].status
self.assertEqual(True, cell_status)
self.assertEqual('status_up',
row.cells['status'].get_status_class(cell_status))
def test_table_column_truncation(self):
self.table = MyTable(self.request, TEST_DATA_5)
row = self.table.get_rows()[0]
self.assertEqual(35, len(row.cells['value'].data))
self.assertEqual(u'A Value That is longer than 35 c...',
row.cells['value'].data)
def test_table_rendering(self):
self.table = MyTable(self.request, TEST_DATA)
# Table actions
table_actions = self.table.render_table_actions()
resp = http.HttpResponse(table_actions)
self.assertContains(resp, "table_search", 1)
self.assertContains(resp, "my_table__filter__q", 1)
self.assertContains(resp, "my_table__delete", 1)
self.assertContains(resp, 'id="my_table__action_delete"', 1)
# Row actions
row_actions = self.table.render_row_actions(TEST_DATA[0])
resp = http.HttpResponse(row_actions)
self.assertContains(resp, "<li", 3)
self.assertContains(resp, "my_table__delete__1", 1)
self.assertContains(resp, "my_table__toggle__1", 1)
self.assertContains(resp, "/auth/login/", 1)
self.assertContains(resp, "ajax-modal", 1)
self.assertContains(resp, 'id="my_table__row_1__action_delete"', 1)
# Whole table
resp = http.HttpResponse(self.table.render())
self.assertContains(resp, '<table id="my_table"', 1)
self.assertContains(resp, '<th ', 8)
self.assertContains(resp, 'id="my_table__row__1"', 1)
self.assertContains(resp, 'id="my_table__row__2"', 1)
self.assertContains(resp, 'id="my_table__row__3"', 1)
update_string = "action=row_update&table=my_table&obj_id="
self.assertContains(resp, update_string, 3)
self.assertContains(resp, "data-update-interval", 3)
# Verify our XSS protection
self.assertContains(resp, '<a href="http://example.com/" '
'data-tip="click for dialog" '
'data-type="modal dialog" '
'class="link-modal">'
'<strong>evil</strong></a>', 1)
# Filter = False hides the search box
self.table._meta.filter = False
table_actions = self.table.render_table_actions()
resp = http.HttpResponse(table_actions)
self.assertContains(resp, "table_search", 0)
def test_wrap_list_rendering(self):
self.table = MyTableWrapList(self.request, TEST_DATA_7)
row = self.table.get_rows()[0]
name_cell = row.cells['name']
value_cell = row.cells['value']
optional_cell = row.cells['optional']
# Check if is cell is rendered correctly.
name_cell_rendered = name_cell.render()
value_cell_rendered = value_cell.render()
optional_cell_rendered = optional_cell.render()
resp_name = http.HttpResponse(name_cell_rendered)
resp_value = http.HttpResponse(value_cell_rendered)
resp_optional = http.HttpResponse(optional_cell_rendered)
self.assertContains(resp_name, '<ul>wrapped name</ul>', 1)
self.assertContains(resp_value, '<ul>wrapped value</ul>', 1)
self.assertContains(resp_optional, 'not wrapped optional', 1)
self.assertNotContains(resp_optional, '<ul>')
self.assertNotContains(resp_optional, '</ul>')
def test_inline_edit_available_cell_rendering(self):
self.table = MyTable(self.request, TEST_DATA_2)
row = self.table.get_rows()[0]
name_cell = row.cells['name']
# Check if in-line edit is available in the cell,
# but is not in inline_edit_mod.
self.assertEqual(True,
name_cell.inline_edit_available)
self.assertEqual(False,
name_cell.inline_edit_mod)
# Check if is cell is rendered correctly.
name_cell_rendered = name_cell.render()
resp = http.HttpResponse(name_cell_rendered)
self.assertContains(resp, '<td', 1)
self.assertContains(resp, 'inline_edit_available', 1)
self.assertContains(resp,
'data-update-url="?action=cell_update&'
'table=my_table&cell_name=name&obj_id=1"',
1)
self.assertContains(resp, 'table_cell_wrapper', 1)
self.assertContains(resp, 'table_cell_data_wrapper', 1)
self.assertContains(resp, 'table_cell_action', 1)
self.assertContains(resp, 'ajax-inline-edit', 1)
def test_inline_edit_available_not_allowed_cell_rendering(self):
self.table = MyTableNotAllowedInlineEdit(self.request, TEST_DATA_2)
row = self.table.get_rows()[0]
name_cell = row.cells['name']
# Check if in-line edit is available in the cell,
# but is not in inline_edit_mod.
self.assertEqual(True,
name_cell.inline_edit_available)
self.assertEqual(False,
name_cell.inline_edit_mod)
# Check if is cell is rendered correctly.
name_cell_rendered = name_cell.render()
resp = http.HttpResponse(name_cell_rendered)
self.assertContains(resp, '<td', 1)
self.assertContains(resp, 'inline_edit_available', 1)
self.assertContains(resp,
'data-update-url="?action=cell_update&'
'table=my_table&cell_name=name&obj_id=1"',
1)
self.assertContains(resp, 'table_cell_wrapper', 0)
self.assertContains(resp, 'table_cell_data_wrapper', 0)
self.assertContains(resp, 'table_cell_action', 0)
self.assertContains(resp, 'ajax-inline-edit', 0)
def test_inline_edit_mod_cell_rendering(self):
self.table = MyTable(self.request, TEST_DATA_2)
name_col = self.table.columns['name']
name_col.auto = "form_field"
row = self.table.get_rows()[0]
name_cell = row.cells['name']
name_cell.inline_edit_mod = True
# Check if in-line edit is available in the cell,
# and is in inline_edit_mod, also column auto must be
# set as form_field.
self.assertEqual(True,
name_cell.inline_edit_available)
self.assertEqual(True,
name_cell.inline_edit_mod)
self.assertEqual('form_field',
name_col.auto)
# Check if is cell is rendered correctly.
name_cell_rendered = name_cell.render()
resp = http.HttpResponse(name_cell_rendered)
self.assertContains(resp,
'<input class="test" id="name__1" name="name__1"'
' type="text" value="custom object_1" />',
count=1, html=True)
self.assertContains(resp, '<td', 1)
self.assertContains(resp, 'inline_edit_available', 1)
self.assertContains(resp,
'data-update-url="?action=cell_update&'
'table=my_table&cell_name=name&obj_id=1"',
1)
self.assertContains(resp, 'table_cell_wrapper', 1)
self.assertContains(resp, 'inline-edit-error', 1)
self.assertContains(resp, 'inline-edit-form', 1)
self.assertContains(resp, 'inline-edit-actions', 1)
self.assertContains(resp, 'inline-edit-submit', 1)
self.assertContains(resp, 'inline-edit-cancel', 1)
def test_inline_edit_mod_checkbox_with_label(self):
class TempTable(MyTable):
name = tables.Column(get_name,
verbose_name="Verbose Name",
sortable=True,
form_field=forms.BooleanField(
required=True,
label="Verbose Name"),
form_field_attributes={'class': 'test'},
update_action=MyUpdateAction)
class Meta:
name = "my_table"
columns = ('id', 'name', 'value', 'optional', 'status')
self.table = TempTable(self.request, TEST_DATA_2)
name_col = self.table.columns['name']
name_col.auto = "form_field"
row = self.table.get_rows()[0]
name_cell = row.cells['name']
name_cell.inline_edit_mod = True
# Check if is cell is rendered correctly.
name_cell_rendered = name_cell.render()
resp = http.HttpResponse(name_cell_rendered)
self.assertContains(resp,
'<input checked="checked" class="test" '
'id="name__1" name="name__1" type="checkbox" '
'value="custom object_1" />',
count=1, html=True)
self.assertContains(resp,
'<label class="inline-edit-label" for="name__1">'
'Verbose Name</label>',
count=1, html=True)
def test_inline_edit_mod_textarea(self):
class TempTable(MyTable):
name = tables.Column(get_name,
verbose_name="Verbose Name",
sortable=True,
form_field=forms.CharField(
widget=forms.Textarea(),
required=False),
form_field_attributes={'class': 'test'},
update_action=MyUpdateAction)
class Meta:
name = "my_table"
columns = ('id', 'name', 'value', 'optional', 'status')
self.table = TempTable(self.request, TEST_DATA_2)
name_col = self.table.columns['name']
name_col.auto = "form_field"
row = self.table.get_rows()[0]
name_cell = row.cells['name']
name_cell.inline_edit_mod = True
# Check if is cell is rendered correctly.
name_cell_rendered = name_cell.render()
resp = http.HttpResponse(name_cell_rendered)
self.assertContains(resp,
'<textarea class="test" cols="40" id="name__1" '
'name="name__1" rows="10">\r\ncustom object_1'
'</textarea>',
count=1, html=True)
def test_table_actions(self):
# Single object action
action_string = "my_table__delete__1"
req = self.factory.post('/my_url/', {'action': action_string})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(('my_table', 'delete', '1'),
self.table.parse_action(action_string))
handled = self.table.maybe_handle()
self.assertEqual(302, handled.status_code)
self.assertEqual("http://example.com/?ids=1", handled["location"])
# Batch action (without toggle) conjugation behavior
req = self.factory.get('/my_url/')
self.table = MyTable(req, TEST_DATA_3)
toggle_action = self.table.get_row_actions(TEST_DATA_3[0])[2]
self.assertEqual("Batch Item", unicode(toggle_action.verbose_name))
# Single object toggle action
# GET page - 'up' to 'down'
req = self.factory.get('/my_url/')
self.table = MyTable(req, TEST_DATA_3)
self.assertEqual(4, len(self.table.get_row_actions(TEST_DATA_3[0])))
toggle_action = self.table.get_row_actions(TEST_DATA_3[0])[3]
self.assertEqual("Down Item", unicode(toggle_action.verbose_name))
# Toggle from status 'up' to 'down'
# POST page
action_string = "my_table__toggle__1"
req = self.factory.post('/my_url/', {'action': action_string})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(('my_table', 'toggle', '1'),
self.table.parse_action(action_string))
handled = self.table.maybe_handle()
self.assertEqual(302, handled.status_code)
self.assertEqual("/my_url/", handled["location"])
self.assertEqual(u"Downed Item: object_1",
list(req._messages)[0].message)
# Toggle from status 'down' to 'up'
# GET page - 'down' to 'up'
req = self.factory.get('/my_url/')
self.table = MyTable(req, TEST_DATA_2)
self.assertEqual(3, len(self.table.get_row_actions(TEST_DATA_2[0])))
toggle_action = self.table.get_row_actions(TEST_DATA_2[0])[2]
self.assertEqual("Up Item", unicode(toggle_action.verbose_name))
# POST page
action_string = "my_table__toggle__2"
req = self.factory.post('/my_url/', {'action': action_string})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(('my_table', 'toggle', '2'),
self.table.parse_action(action_string))
handled = self.table.maybe_handle()
self.assertEqual(302, handled.status_code)
self.assertEqual("/my_url/", handled["location"])
self.assertEqual(u"Upped Item: object_2",
list(req._messages)[0].message)
# Multiple object action
action_string = "my_table__delete"
req = self.factory.post('/my_url/', {'action': action_string,
'object_ids': [1, 2]})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(('my_table', 'delete', None),
self.table.parse_action(action_string))
handled = self.table.maybe_handle()
self.assertEqual(302, handled.status_code)
self.assertEqual("http://example.com/?ids=1,2", handled["location"])
# Action with nothing selected
req = self.factory.post('/my_url/', {'action': action_string})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(('my_table', 'delete', None),
self.table.parse_action(action_string))
handled = self.table.maybe_handle()
self.assertIsNone(handled)
self.assertEqual("Please select a row before taking that action.",
list(req._messages)[0].message)
# Action with specific id and multiple ids favors single id
action_string = "my_table__delete__3"
req = self.factory.post('/my_url/', {'action': action_string,
'object_ids': [1, 2]})
self.table = MyTable(req, TEST_DATA)
self.assertEqual(('my_table', 'delete', '3'),
self.table.parse_action(action_string))
handled = self.table.maybe_handle()
self.assertEqual(302, handled.status_code)
self.assertEqual("http://example.com/?ids=3",
handled["location"])
# At least one object in table
# BatchAction is available
req = self.factory.get('/my_url/')
self.table = MyTable(req, TEST_DATA_2)
self.assertQuerysetEqual(self.table.get_table_actions(),
['<MyFilterAction: filter>',
'<MyAction: delete>',
'<MyBatchAction: batch>'])
# Zero objects in table
# BatchAction not available
req = self.factory.get('/my_url/')
self.table = MyTable(req, None)
self.assertQuerysetEqual(self.table.get_table_actions(),
['<MyFilterAction: filter>',
'<MyAction: delete>'])
# Filtering
action_string = "my_table__filter__q"
req = self.factory.post('/my_url/', {action_string: '2'})
self.table = MyTable(req, TEST_DATA)
handled = self.table.maybe_handle()
self.assertIsNone(handled)
self.assertQuerysetEqual(self.table.filtered_data,
['<FakeObject: object_2>'])
# Ensure fitering respects the request method, e.g. no filter here
req = self.factory.get('/my_url/', {action_string: '2'})
self.table = MyTable(req, TEST_DATA)
handled = self.table.maybe_handle()
self.assertIsNone(handled)
self.assertQuerysetEqual(self.table.filtered_data,
['<FakeObject: object_1>',
'<FakeObject: object_2>',
'<FakeObject: object_3>'])
# Updating and preemptive actions
params = {"table": "my_table", "action": "row_update", "obj_id": "1"}
req = self.factory.get('/my_url/',
params,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.table = MyTable(req)
resp = self.table.maybe_preempt()
self.assertEqual(200, resp.status_code)
# Make sure the data returned differs from the original
self.assertContains(resp, "my_table__row__1")
self.assertContains(resp, "status_down")
# Verify that we don't get a response for a valid action with the
# wrong method.
params = {"table": "my_table", "action": "delete", "obj_id": "1"}
req = self.factory.get('/my_url/', params)
self.table = MyTable(req)
resp = self.table.maybe_preempt()
self.assertIsNone(resp)
resp = self.table.maybe_handle()
self.assertIsNone(resp)
# Verbose names
table_actions = self.table.get_table_actions()
self.assertEqual("Filter", unicode(table_actions[0].verbose_name))
self.assertEqual("Delete Me", unicode(table_actions[1].verbose_name))
row_actions = self.table.get_row_actions(TEST_DATA[0])
self.assertEqual("Delete Me", unicode(row_actions[0].verbose_name))
self.assertEqual("Log In", unicode(row_actions[1].verbose_name))
def test_server_filtering(self):
filter_value_param = "my_table__filter__q"
filter_field_param = '%s_field' % filter_value_param
# Server Filtering
req = self.factory.post('/my_url/')
req.session[filter_value_param] = '2'
req.session[filter_field_param] = 'name'
self.table = MyServerFilterTable(req, TEST_DATA)
handled = self.table.maybe_handle()
self.assertIsNone(handled)
self.assertQuerysetEqual(self.table.filtered_data,
['<FakeObject: object_2>'])
# Ensure API filtering does not filter on server, e.g. no filter here
req = self.factory.post('/my_url/')
req.session[filter_value_param] = 'up'
req.session[filter_field_param] = 'status'
self.table = MyServerFilterTable(req, TEST_DATA)
handled = self.table.maybe_handle()
self.assertIsNone(handled)
self.assertQuerysetEqual(self.table.filtered_data,
['<FakeObject: object_1>',
'<FakeObject: object_2>',
'<FakeObject: object_3>'])
def test_inline_edit_update_action_get_non_ajax(self):
# Non ajax inline edit request should return None.
url = ('/my_url/?action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.get(url, {})
self.table = MyTable(req, TEST_DATA_2)
handled = self.table.maybe_preempt()
# Checking the response header.
self.assertIsNone(handled)
def test_inline_edit_update_action_get(self):
# Get request should return td field with data.
url = ('/my_url/?action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.get(url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.table = MyTable(req, TEST_DATA_2)
handled = self.table.maybe_preempt()
# Checking the response header.
self.assertEqual(200, handled.status_code)
# Checking the response content.
resp = handled
self.assertContains(resp, '<td', 1)
self.assertContains(resp, 'inline_edit_available', 1)
self.assertContains(
resp,
'data-update-url="/my_url/?action=cell_update&'
'table=my_table&cell_name=name&obj_id=1"',
1)
self.assertContains(resp, 'table_cell_wrapper', 1)
self.assertContains(resp, 'table_cell_data_wrapper', 1)
self.assertContains(resp, 'table_cell_action', 1)
self.assertContains(resp, 'ajax-inline-edit', 1)
def test_inline_edit_update_action_get_not_allowed(self):
# Name column has required validation, sending blank
# will return error.
url = ('/my_url/?action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.post(url, {})
self.table = MyTableNotAllowedInlineEdit(req, TEST_DATA_2)
handled = self.table.maybe_preempt()
# Checking the response header.
self.assertEqual(401, handled.status_code)
def test_inline_edit_update_action_get_inline_edit_mod(self):
# Get request in inline_edit_mode should return td with form field.
url = ('/my_url/?inline_edit_mod=true&action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.get(url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.table = MyTable(req, TEST_DATA_2)
handled = self.table.maybe_preempt()
# Checking the response header.
self.assertEqual(200, handled.status_code)
# Checking the response content.
resp = handled
self.assertContains(resp,
'<input class="test" id="name__1" name="name__1"'
' type="text" value="custom object_1" />',
count=1, html=True)
self.assertContains(resp, '<td', 1)
self.assertContains(resp, 'inline_edit_available', 1)
self.assertContains(
resp,
'data-update-url="/my_url/?action=cell_update&'
'table=my_table&cell_name=name&obj_id=1"',
1)
self.assertContains(resp, 'table_cell_wrapper', 1)
self.assertContains(resp, 'inline-edit-error', 1)
self.assertContains(resp, 'inline-edit-form', 1)
self.assertContains(resp, 'inline-edit-actions', 1)
self.assertContains(resp, '<button', 2)
self.assertContains(resp, 'inline-edit-submit', 1)
self.assertContains(resp, 'inline-edit-cancel', 1)
def test_inline_edit_update_action_post(self):
# Post request should invoke the cell update table action.
url = ('/my_url/?action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.post(url, {'name__1': 'test_name'})
self.table = MyTable(req, TEST_DATA_2)
# checking the response header
handled = self.table.maybe_preempt()
self.assertEqual(200, handled.status_code)
def test_inline_edit_update_action_post_not_allowed(self):
# Post request should invoke the cell update table action.
url = ('/my_url/?action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.post(url, {'name__1': 'test_name'})
self.table = MyTableNotAllowedInlineEdit(req, TEST_DATA_2)
# checking the response header
handled = self.table.maybe_preempt()
self.assertEqual(401, handled.status_code)
def test_inline_edit_update_action_post_validation_error(self):
# Name column has required validation, sending blank
# will return error.
url = ('/my_url/?action=cell_update'
'&table=my_table&cell_name=name&obj_id=1')
req = self.factory.post(url, {})
self.table = MyTable(req, TEST_DATA_2)
handled = self.table.maybe_preempt()
# Checking the response header.
self.assertEqual(400, handled.status_code)
self.assertEqual(('Content-Type', 'application/json'),
handled._headers['content-type'])
# Checking the response content.
resp = handled
self.assertContains(resp,
'"message": "This field is required."',
count=1, status_code=400)
def test_column_uniqueness(self):
table1 = MyTable(self.request)
table2 = MyTable(self.request)
# Regression test for launchpad bug 964345.
self.assertNotEqual(id(table1), id(table2))
self.assertNotEqual(id(table1.columns), id(table2.columns))
t1cols = table1.columns.values()
t2cols = table2.columns.values()
self.assertEqual(t1cols[0].name, t2cols[0].name)
self.assertNotEqual(id(t1cols[0]), id(t2cols[0]))
self.assertNotEqual(id(t1cols[0].table),
id(t2cols[0].table))
self.assertNotEqual(id(t1cols[0].table._data_cache),
id(t2cols[0].table._data_cache))
def test_summation_row(self):
# Test with the "average" method.
table = MyTable(self.request, TEST_DATA_4)
res = http.HttpResponse(table.render())
self.assertContains(res, '<tr class="summation"', 1)
self.assertContains(res, '<td>Summary</td>', 1)
self.assertContains(res, '<td>3.0</td>', 1)
# Test again with the "sum" method.
table.columns['value'].summation = "sum"
res = http.HttpResponse(table.render())
self.assertContains(res, '<tr class="summation"', 1)
self.assertContains(res, '<td>Summary</td>', 1)
self.assertContains(res, '<td>6</td>', 1)
# One last test with no summation.
table.columns['value'].summation = None
table.needs_summary_row = False
res = http.HttpResponse(table.render())
self.assertNotContains(res, '<tr class="summation"')
self.assertNotContains(res, '<td>3.0</td>')
self.assertNotContains(res, '<td>6</td>')
# Even if "average" summation method is specified,
# we have summation fields but no value is provoded
# if the provided data cannot be summed.
table = MyTable(self.request, TEST_DATA)
res = http.HttpResponse(table.render())
self.assertContains(res, '<tr class="summation"')
self.assertNotContains(res, '<td>3.0</td>')
self.assertNotContains(res, '<td>6</td>')
def test_table_action_attributes(self):
table = MyTable(self.request, TEST_DATA)
self.assertTrue(table.has_actions)
self.assertTrue(table.needs_form_wrapper)
res = http.HttpResponse(table.render())
self.assertContains(res, "<form")
table = MyTable(self.request, TEST_DATA, needs_form_wrapper=False)
self.assertTrue(table.has_actions)
self.assertFalse(table.needs_form_wrapper)
res = http.HttpResponse(table.render())
self.assertNotContains(res, "<form")
table = NoActionsTable(self.request, TEST_DATA)
self.assertFalse(table.has_actions)
self.assertFalse(table.needs_form_wrapper)
res = http.HttpResponse(table.render())
self.assertNotContains(res, "<form")
def test_table_actions_not_allowed_hide_multiselect(self):
table = DisabledActionsTable(self.request, TEST_DATA)
self.assertFalse(table.has_actions)
self.assertFalse(table.needs_form_wrapper)
res = http.HttpResponse(table.render())
self.assertContains(res, "multi_select_column hidden")
def test_table_action_object_display_is_none(self):
action_string = "my_table__toggle__1"
req = self.factory.post('/my_url/', {'action': action_string})
self.table = MyTable(req, TEST_DATA)
self.mox.StubOutWithMock(self.table, 'get_object_display')
self.table.get_object_display(IsA(FakeObject)).AndReturn(None)
self.mox.ReplayAll()
self.assertEqual(('my_table', 'toggle', '1'),
self.table.parse_action(action_string))
handled = self.table.maybe_handle()
self.assertEqual(302, handled.status_code)
self.assertEqual("/my_url/", handled["location"])
self.assertEqual(u"Downed Item: N/A",
list(req._messages)[0].message)
def test_table_column_can_be_selected(self):
self.table = MyTableSelectable(self.request, TEST_DATA_6)
# non selectable row
row = self.table.get_rows()[0]
# selectable
row1 = self.table.get_rows()[1]
row2 = self.table.get_rows()[2]
id_col = self.table.columns['id']
name_col = self.table.columns['name']
value_col = self.table.columns['value']
# transform
self.assertEqual('1', row.cells['id'].data) # Standard attr access
self.assertEqual('custom object_1', row.cells['name'].data) # Callable
# name and verbose_name
self.assertEqual("Id", unicode(id_col))
self.assertEqual("Verbose Name", unicode(name_col))
self.assertIn("sortable", name_col.get_final_attrs().get('class', ""))
# hidden
self.assertEqual(True, id_col.hidden)
self.assertIn("hide", id_col.get_final_attrs().get('class', ""))
self.assertEqual(False, name_col.hidden)
self.assertNotIn("hide", name_col.get_final_attrs().get('class', ""))
# link, link_classes, link_attrs and get_link_url
self.assertIn('href="http://example.com/"', row.cells['value'].value)
self.assertIn('class="link-modal"', row.cells['value'].value)
self.assertIn('data-type="modal dialog"', row.cells['value'].value)
self.assertIn('data-tip="click for dialog"', row.cells['value'].value)
self.assertIn('href="/auth/login/"', row.cells['status'].value)
# classes
self.assertEqual("green blue sortable anchor normal_column",
value_col.get_final_attrs().get('class', ""))
self.assertQuerysetEqual(row.get_cells(),
['<Cell: multi_select, my_table__row__1>',
'<Cell: id, my_table__row__1>',
'<Cell: name, my_table__row__1>',
'<Cell: value, my_table__row__1>',
'<Cell: status, my_table__row__1>',
])
# can_be_selected = False
self.assertTrue(row.get_cells()[0].data == "")
# can_be_selected = True
self.assertIn('checkbox', row1.get_cells()[0].data)
# status
cell_status = row.cells['status'].status
self.assertEqual('status_down',
row.cells['status'].get_status_class(cell_status))
self.assertEqual(row.cells['status'].data, 'down')
self.assertEqual(row.cells['status'].attrs,
{'title': 'service is not available',
'style': 'color:red;cursor:pointer'})
self.assertEqual(row1.cells['status'].data, 'up')
self.assertEqual(row1.cells['status'].attrs,
{'title': 'service is up and running',
'style': 'color:green;cursor:pointer'})
self.assertEqual(row2.cells['status'].data, 'standby')
self.assertEqual(row2.cells['status'].attrs, {})
status_rendered = row.cells['status'].render()
resp = http.HttpResponse(status_rendered)
self.assertContains(resp, 'style="color:red;cursor:pointer"', 1)
self.assertContains(resp, 'title="service is not available"', 1)
status_rendered = row1.cells['status'].render()
resp = http.HttpResponse(status_rendered)
self.assertContains(resp, 'style="color:green;cursor:pointer"', 1)
self.assertContains(resp, 'title="service is up and running"', 1)
# status_choices
id_col.status = True
id_col.status_choices = (('1', False), ('2', True))
cell_status = row.cells['id'].status
self.assertEqual(False, cell_status)
self.assertEqual('status_down',
row.cells['id'].get_status_class(cell_status))
# Ensure data is not cached on the column across table instances
self.table = MyTable(self.request, TEST_DATA_6)
row = self.table.get_rows()[0]
self.assertTrue("down" in row.cells['status'].value)
def test_broken_filter(self):
class MyTableBrokenFilter(MyTable):
value = tables.Column('value',
filters=(defaultfilters.timesince,))
value = "not_a_date"
data = TEST_DATA[0]
data.value = value
table = MyTableBrokenFilter(self.request, [data])
resp = http.HttpResponse(table.render())
self.assertContains(resp, value)
class SingleTableView(table_views.DataTableView):
table_class = MyTable
name = "Single Table"
slug = "single"
template_name = "horizon_lib/common/_detail_table.html"
def get_data(self):
return TEST_DATA
class APIFilterTableView(SingleTableView):
table_class = MyServerFilterTable
class TableWithPermissions(tables.DataTable):
id = tables.Column('id')
class Meta:
name = "table_with_permissions"
permissions = ('horizon_lib.test',)
class SingleTableViewWithPermissions(SingleTableView):
table_class = TableWithPermissions
class MultiTableView(tables.MultiTableView):
table_classes = (TableWithPermissions, MyTable)
def get_table_with_permissions_data(self):
return TEST_DATA
def get_my_table_data(self):
return TEST_DATA
class DataTableViewTests(test.TestCase):
def _prepare_view(self, cls, *args, **kwargs):
req = self.factory.get('/my_url/')
req.user = self.user
view = cls()
view.request = req
view.args = args
view.kwargs = kwargs
return view
def test_data_table_view(self):
view = self._prepare_view(SingleTableView)
context = view.get_context_data()
self.assertEqual(SingleTableView.table_class,
context['table'].__class__)
def test_data_table_view_not_authorized(self):
view = self._prepare_view(SingleTableViewWithPermissions)
context = view.get_context_data()
self.assertNotIn('table', context)
def test_data_table_view_authorized(self):
view = self._prepare_view(SingleTableViewWithPermissions)
self.set_permissions(permissions=['test'])
context = view.get_context_data()
self.assertIn('table', context)
self.assertEqual(SingleTableViewWithPermissions.table_class,
context['table'].__class__)
def test_multi_table_view_not_authorized(self):
view = self._prepare_view(MultiTableView)
context = view.get_context_data()
self.assertEqual(MyTable, context['my_table_table'].__class__)
self.assertNotIn('table_with_permissions_table', context)
def test_multi_table_view_authorized(self):
view = self._prepare_view(MultiTableView)
self.set_permissions(permissions=['test'])
context = view.get_context_data()
self.assertEqual(MyTable, context['my_table_table'].__class__)
self.assertEqual(TableWithPermissions,
context['table_with_permissions_table'].__class__)
def test_api_filter_table_view(self):
filter_value_param = "my_table__filter__q"
filter_field_param = '%s_field' % filter_value_param
req = self.factory.post('/my_url/', {filter_value_param: 'up',
filter_field_param: 'status'})
req.user = self.user
view = APIFilterTableView()
view.request = req
view.kwargs = {}
view.handle_server_filter(req)
context = view.get_context_data()
self.assertEqual(context['table'].__class__, MyServerFilterTable)
data = view.get_data()
self.assertQuerysetEqual(data,
['<FakeObject: object_1>',
'<FakeObject: object_2>',
'<FakeObject: object_3>'])
self.assertEqual(req.session.get(filter_value_param), 'up')
self.assertEqual(req.session.get(filter_field_param), 'status')
class FormsetTableTests(test.TestCase):
def test_populate(self):
"""Create a FormsetDataTable and populate it with data."""
class TableForm(forms.Form):
name = forms.CharField()
value = forms.IntegerField()
TableFormset = forms.formsets.formset_factory(TableForm, extra=0)
class Table(table_formset.FormsetDataTable):
formset_class = TableFormset
name = tables.Column('name')
value = tables.Column('value')
class Meta:
name = 'table'
table = Table(self.request)
table.data = TEST_DATA_4
formset = table.get_formset()
self.assertEqual(2, len(formset))
form = formset[0]
form_data = form.initial
self.assertEqual('object_1', form_data['name'])
self.assertEqual(2, form_data['value'])
|
|
# core.py
#
# Copyright (c) 2009 Stephen Day
#
# This module is part of Creoleparser and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
#
import re
import genshi.builder as bldr
__docformat__ = 'restructuredtext en'
escape_char = '~'
esc_neg_look = '(?<!' + re.escape(escape_char) + ')'
esc_to_remove = re.compile(''.join([r'(?<!',re.escape(escape_char),')',re.escape(escape_char),r'(?!([ \n]|$))']))
place_holder_re = re.compile(r'<<<(-?\d+?)>>>')
class Parser(object):
def __init__(self,dialect, method='xhtml', strip_whitespace=False, encoding='utf-8'):
"""Constructor for Parser objects
:parameters:
dialect
Usually created using :func:`creoleparser.dialects.create_dialect`
method
This value is passed to Genshies Steam.render(). Possible values
include ``xhtml``, ``html``, ``xml``, and ``text``.
strip_whitespace
This value is passed to Genshies Steam.render().
encoding
This value is passed to Genshies Steam.render().
"""
if isinstance(dialect,type):
self.dialect = dialect()
else:
# warning message here in next major version
self.dialect = dialect
self.method = method
self.strip_whitespace = strip_whitespace
self.encoding=encoding
def generate(self,text,element_store=None,context='block', environ=None):
"""Returns a Genshi Stream.
:parameters:
text
The text to be parsed.
context
This is useful for marco development where (for example) supression
of paragraph tags is desired. Can be 'inline', 'block', or a list
of WikiElement objects (use with caution).
element_store
Internal dictionary that's passed around a lot ;)
environ
This can be any type of object. It will be passed to ``macro_func``
unchanged (for a description of ``macro_func``, see
:func:`~creoleparser.dialects.create_dialect`).
"""
if element_store is None:
element_store = {}
if not isinstance(context,list):
if context == 'block':
top_level_elements = self.dialect.block_elements
do_preprocess = True
elif context == 'inline':
top_level_elements = self.dialect.inline_elements
do_preprocess = False
else:
top_level_elements = context
do_preprocess = False
if do_preprocess:
text = preprocess(text,self.dialect)
return bldr.tag(fragmentize(text,top_level_elements,element_store, environ)).generate()
def render(self, text, element_store=None, context='block', environ=None, **kwargs):
"""Returns the final output string (e.g., xhtml). See
:meth:`~creoleparser.core.Parser.generate` for named parameter descriptions.
Left over keyword arguments (``kwargs``) will be passed to Genshi's Stream.render() method,
overriding the corresponding attributes of the Parser object. For more infomation on Streams,
see the `Genshi documentation <http://genshi.edgewall.org/wiki/Documentation/streams.html#serialization-options>`_.
"""
if element_store is None:
element_store = {}
kwargs.setdefault('method',self.method)
kwargs.setdefault('encoding',self.encoding)
if kwargs['method'] != "text":
kwargs.setdefault('strip_whitespace',self.strip_whitespace)
stream = self.generate(text, element_store, context, environ)
return stream.render(**kwargs)
def __call__(self,text, **kwargs):
"""Wrapper for the render method. Returns final output string.
"""
return self.render(text, **kwargs)
class ArgParser(object):
"""Creates a callable object for parsing macro argument strings
>>> from dialects import creepy20_base
>>> my_parser = ArgParser(dialect=creepy20_base())
>>> my_parser(" one two foo='three' boo='four' ")
(['one', 'two'], {'foo': 'three', 'boo': 'four'})
A parser returns a two-tuple, the first item being a list of positional
arguments and the second a dictionary of keyword arguments. Argument
values are either strings or lists.
"""
def __init__(self,dialect, convert_implicit_lists=True,
key_func=None, illegal_keys=(), convert_unicode_keys=True):
"""Constructor for ArgParser objects
:parameters:
convert_unicode_keys
If *True*, keys will be converted using ``str(key)`` before being
added to the output dictionary. This allows the dictionary to be
safely passed to functions using the special ``**`` form (i.e.,
``func(**kwargs)``).
dialect
Usually created using :func:`~creoleparser.dialects.creepy10_base`
or :func:`~creoleparser.dialects.creepy20_base`
convert_implicit_lists
If *True*, all implicit lists will be converted to strings
using ``' '.join(list)``. "Implicit" lists are created when
positional arguments follow keyword arguments
(see :func:`~creoleparser.dialects.creepy10_base`).
illegal_keys
A tuple of keys that will be post-fixed with an underscore if found
during parsing.
key_func
If supplied, this function will be used to transform the names of
keyword arguments. It must accept a single positional argument.
For example, this can be used to make keywords case insensitive:
>>> from string import lower
>>> from dialects import creepy20_base
>>> my_parser = ArgParser(dialect=creepy20_base(),key_func=lower)
>>> my_parser(" Foo='one' ")
([], {'foo': 'one'})
"""
self.dialect = dialect()
self.convert_implicit_lists = convert_implicit_lists
self.key_func = key_func
self.illegal_keys = illegal_keys
self.convert_unicode_keys = convert_unicode_keys
def __call__(self, arg_string, **kwargs):
"""Parses the ``arg_string`` returning a two-tuple
Keyword arguments (``kwargs``) can be used to override the corresponding
attributes of the ArgParser object (see above). However, the
``dialect`` attribute **cannot** be overridden.
"""
kwargs.setdefault('convert_implicit_lists',self.convert_implicit_lists)
kwargs.setdefault('key_func',self.key_func)
kwargs.setdefault('illegal_keys',self.illegal_keys)
kwargs.setdefault('convert_unicode_keys',self.convert_unicode_keys)
return self._parse(arg_string,**kwargs)
def _parse(self,arg_string, convert_implicit_lists, key_func, illegal_keys,
convert_unicode_keys):
frags = fragmentize(arg_string,self.dialect.top_elements,{},{})
positional_args = []
kw_args = {}
for arg in frags:
if isinstance(arg,tuple):
k, v = arg
if convert_unicode_keys:
k = str(k)
if key_func:
k = key_func(k)
if k in illegal_keys:
k = k + '_'
if k in kw_args:
if isinstance(v,list):
try:
kw_args[k].extend(v)
except AttributeError:
v.insert(0,kw_args[k])
kw_args[k] = v
elif isinstance(kw_args[k],list):
kw_args[k].append(v)
else:
kw_args[k] = [kw_args[k], v]
kw_args[k] = ImplicitList(kw_args[k])
else:
kw_args[k] = v
if isinstance(kw_args[k],ImplicitList) and convert_implicit_lists:
kw_args[k] = ' ' .join(kw_args[k])
else:
positional_args.append(arg)
return (positional_args, kw_args)
def fragmentize(text,wiki_elements, element_store, environ, remove_escapes=True):
"""Takes a string of wiki markup and outputs a list of genshi
Fragments (Elements and strings).
This recursive function, with help from the WikiElement objects,
does almost all the parsing.
When no WikiElement objects are supplied, escapes are removed from
``text`` (except if remove_escapes=True) and it is
returned as-is. This is the only way for recursion to stop.
:parameters:
text
the text to be parsed
wiki_elements
list of WikiElement objects to be searched for
environ
object that may by used by macros
remove_escapes
If False, escapes will not be removed
"""
while wiki_elements:
# If the first supplied wiki_element is actually a list of elements, \
# search for all of them and match the closest one only.
if isinstance(wiki_elements[0],(list,tuple)):
x = None
mos = None
for element in wiki_elements[0]:
mo = element.regexp.search(text)
if mo:
if x is None or mo.start() < x:
x,wiki_element,mos = mo.start(),element,[mo]
else:
wiki_element = wiki_elements[0]
mos = [mo for mo in wiki_element.regexp.finditer(text)]
if mos:
frags = wiki_element._process(mos, text, wiki_elements, element_store, environ)
break
else:
wiki_elements = wiki_elements[1:]
# remove escape characters
else:
if remove_escapes:
text = esc_to_remove.sub('',text)
frags = fill_from_store(text,element_store)
return frags
def fill_from_store(text,element_store):
frags = []
mos = place_holder_re.finditer(text)
start = 0
for mo in mos:
if mo.start() > start:
frags.append(text[start:mo.start()])
frags.append(element_store.get(mo.group(1),
mo.group(1).join(['<<<','>>>'])))
start = mo.end()
if start < len(text):
frags.append(text[start:])
return frags
def preprocess(text, dialect):
"""This should generally be called before fragmentize().
:parameters:
text
text to be processsed.
dialect
a ``Dialect`` object.
"""
text = text.replace("\r\n", "\n")
text = text.replace("\r", "\n")
return text
def chunk(text, blank_lines, hard_elements, limit):
"""Safely breaks large Creole documents into a list of smaller
ones (strings) - DEPRECIATED
"""
hard_spans = []
for e in hard_elements:
for mo in e.regexp.finditer(text):
hard_spans.append(mo.span())
hard_chars = []
for x,y in hard_spans:
hard_chars.extend(range(x,y))
hard_chars = set(hard_chars)
chunks = []
start = 0
for i in range(len(blank_lines)/limit):
for mo in blank_lines[limit/2 + i*limit:limit*3/2+i*limit:10]:
if mo.start() not in hard_chars:
chunks.append(text[start:mo.start()])
start = mo.end()
break
chunks.append(text[start:])
return chunks
class ImplicitList(list):
"""This class marks argument lists as implicit"""
pass
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
|
#!/usr/bin/env python
"""This is a generic environment reader."""
# =============================================================================
#
# FILE: process_dc_env.py
#
# USAGE: process_dc_env.py
#
# DESCRIPTION: customer appName specific that will drop the indexes
# identified by the CREATE INDEX lines in the customer
# appname .sql file
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: Gregg Jensen (), [email protected]
# Bob Lozano (), [email protected]
# ORGANIZATION: devops.center
# CREATED: 11/21/2016 15:13:37
# REVISION: ---
#
# Copyright 2014-2017 devops.center llc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
import sys
import logging
import os
from os.path import expanduser
import argparse
import subprocess
# ==============================================================================
"""
process_dc_env.py process the arguments and passes them back to put them in the
environment along with other environment variables defined in .env files.
"""
__version__ = "0.1"
__copyright__ = "Copyright 2016, devops.center"
__credits__ = ["Bob Lozano", "Gregg Jensen"]
__license__ = "GPL"
__status__ = "Development"
# ==============================================================================
class Process_dc_Env:
"""Process reading in the environment file."""
def __init__(self, envList, generateEnvFiles=0, forCustomer=None):
"""Constructor for process_dc_env class."""
self.envList = envList
self.baseDir = ""
self.baseAppName = ""
self.dcBaseConfig = ""
self.baseAppUtilsDir = ""
self.forCustomer = forCustomer
if generateEnvFiles:
self.generateEnvFiles = True
else:
self.generateEnvFiles = False
def getdcUtilsDirectory(self):
"""Read the utils directory."""
# read the ~/.dcConfig/settings
baseSettingsDir = expanduser("~") + "/.dcConfig"
if not os.path.exists(baseSettingsDir):
print("You seem to be missing the $HOME/.dcConfig directory,"
"you will need to run the RUN-ME-FIRST.sh script that "
"established that directory and the settings file that"
"contains the initial base directory for you application"
"development and where dcUtils is installed.")
sys.exit(1)
if os.path.isfile(baseSettingsDir + "/settings"):
# get the base directory from the settings file
with open(baseSettingsDir + "/settings") as f:
lines = [line.rstrip('\n') for line in f]
for item in lines:
if "dcUTILS" in item:
lineArray = item.split('=')
dcUtilsDir = lineArray[1]
return(dcUtilsDir)
# otherwise return current dir
# this works for the scripts executed from the dcUtils directory.
# otherwise make sure it is in your environment
return(os.getcwd())
def process_dc_env(self):
"""Process the environment files."""
# ---------------------------------------------------------------------
# lets check to see if dcUtils has been set already if not then we are
# probably the first time through and it hasn't been set in the
# environment, so we assume we are in the directory
# ---------------------------------------------------------------------
dcUtils = os.getenv("dcUTILS")
if not dcUtils:
dcUtils = self.getdcUtilsDirectory()
self.envList["dcUTILS"] = dcUtils
# ---------------------------------------------------------------------
# First we need to get the base location of the customers files. This
# was created when the manageApp.py was run as one of the arguments is
# the directory and it should be an absolute path
# ---------------------------------------------------------------------
self.dcBaseConfig = expanduser("~") + "/.dcConfig/baseDirectory"
# ---------------------------------------------------------------------
# get the base directory from the .dcConfig/baseDirectory
# ---------------------------------------------------------------------
self.getBaseDir()
# ---------------------------------------------------------------------
# Next get the baseAppName
# ---------------------------------------------------------------------
self.getBaseAppName()
# ---------------------------------------------------------------------
# Next get the base App Utilities directory by reading the
# .dcDirMap.cnf
# ---------------------------------------------------------------------
self.baseAppUtilsDir = self.getBaseAppUtils()
# ---------------------------------------------------------------------
# go read the ~/.dcConfig/settings file for any variables we need from
# there. Like the customer name
# ---------------------------------------------------------------------
getSettings(self.envList)
# ---------------------------------------------------------------------
# Need to find what dcEnv files that are in the directory. We need to
# get the possible appNames if there is more than one. And then get
# any environment (ie, dev, staging, prod, local) files if there are
# more than one. If it doesn't exist exit and instruct the user to run
# deployenv.sh
# ---------------------------------------------------------------------
if not self.generateEnvFiles:
self.getEnvFile()
# -----------------------------------------------------------------
# check for the DEFAULT_APP_NAME. If not given set it to the
# appname from the input. If the --appName is not given then
# check the # one from the env and make sure it is not the
# __DEFAULT__ one.
# -----------------------------------------------------------------
# if self.envList["dcDEFAULT_APP_NAME"] == "__DEFAULT__":
# print ("The dcDEFAULT_APP_NAME environment variable has not "
# +
# "been set and has not been made available. This " +
# "should be identified when running deployenv.sh by " +
# "utilizing the option: --appName appname")
# sys.exit(1)
return self.envList
def getBaseDir(self):
"""Get the base directory."""
if os.path.exists(self.dcBaseConfig):
if "WORKSPACE_NAME" in self.envList:
# open the dcConfig/baseDirectory and read it in directly
# as we are looking for the alternate workspace name
with open(self.dcBaseConfig) as f:
lines = [line.rstrip('\n') for line in f]
flagFound = 0
itemToLookFor = "_" + self.envList["WORKSPACE_NAME"] + \
"_BASE_CUSTOMER_DIR="
for line in lines:
if itemToLookFor in line:
key, value = line.split('=', 1)
self.baseDir = value
self.envList["BASE_CUSTOMER_DIR"] = self.baseDir
flagFound = 1
break
if not flagFound:
print("Could not find a directory for the given " +
"--workspaceName value in the " +
"$HOME/.dcConfig/baseDirectory. \nHave you run " +
"manageApp.py with the --workspaceName to create " +
"an alternate base directory?")
sys.exit(1)
else:
with open(self.dcBaseConfig) as f:
lines = [line.rstrip('\n') for line in f]
workspaceName = ''
for item in lines:
if "CURRENT_WORKSPACE" in item:
lineArray = item.split('=')
workspaceName = '_' + lineArray[1] + \
'_BASE_CUSTOMER_DIR'
if workspaceName in item:
anotherLineArray = item.split('=')
self.baseDir = anotherLineArray[1]
self.envList["BASE_CUSTOMER_DIR"] = self.baseDir
return
else:
print("ERROR: can not determine the base directory as it "
"does not appear that you have run manageApp.py to "
"set up your application. This must be done before "
"you can run this script.")
sys.exit(1)
def getBaseAppName(self):
"""Get the base name for the application."""
subDirs = next(os.walk(self.baseDir))[1]
if len(subDirs) == 0:
print("The base directory defined in "
"$HOME/.dcConfig/basedirectory "
"does not have any directories in it. There is "
"configuration issue in that file you may need to run "
"manageApp.py again.")
sys.exit(1)
elif len(subDirs) == 1:
if "CUSTOMER_APP_NAME" in self.envList:
if subDirs[0] != self.envList["CUSTOMER_APP_NAME"]:
print("The appName you provided: " +
self.envList["CUSTOMER_APP_NAME"] + " was not " +
"found in the base directory: " + self.baseDir)
sys.exit(1)
else:
self.baseAppName = self.envList["CUSTOMER_APP_NAME"]
else:
self.baseAppName = subDirs[0]
self.envList["CUSTOMER_APP_NAME"] = self.baseAppName
else:
self.handleMultipleDirectories(subDirs)
# print "baseAppName= " + baseAppName
def handleMultipleDirectories(self, subDirs):
"""Handle the situation when there are multiple apps."""
if "CUSTOMER_APP_NAME" in self.envList:
foundFlag = 0
for dir in subDirs:
if dir == self.envList["CUSTOMER_APP_NAME"]:
self.baseAppName = self.envList["CUSTOMER_APP_NAME"]
foundFlag = 1
break
if not foundFlag:
print("The appName you provided: " +
self.envList["CUSTOMER_APP_NAME"] + " was not found " +
"in the base directory: " + self.baseDir)
sys.exit(1)
else:
print("Found multiple applications so you will need to provide " +
"the appropriate option (usually --appName) for " +
"this script to be able to pass the application name.")
print("The applications found are:\n")
for filename in subDirs:
print(filename)
print("\n")
sys.exit(1)
def getBaseAppUtils(self):
"""Get the appliation utils."""
appDir = self.baseDir + "/" + self.baseAppName
subDirMapFile = appDir + "/" + ".dcDirMap.cnf"
if os.path.exists(subDirMapFile):
with open(subDirMapFile) as f:
lines = [line.rstrip('\n') for line in f]
for line in lines:
key, value = line.split('=', 1)
self.envList[key] = value
if key == "CUSTOMER_APP_UTILS":
if self.forCustomer:
retBaseAppUtils = self.baseDir + "/" + \
self.baseAppName + "/" + value + "/" + \
self.forCustomer
elif value == "dcShared-utils":
retBaseAppUtils = self.baseDir + "/" + \
self.baseAppName + "/" + value
else:
retBaseAppUtils = self.baseDir + "/" + \
self.baseAppName + "/" + value
else:
print("Can not read the " + subDirMapFile + " file in the base" +
" application directory, have you run manageApp.py yet? ")
sys.exit(1)
return retBaseAppUtils
def getEnvFile(self):
"""Get the Environment file."""
# check for a dcEnv-${CUSTOMER_APP_NAME}-*.sh file
envDirToFind = self.baseAppUtilsDir + \
"/environments/.generatedEnvFiles"
envFiles = next(os.walk(envDirToFind))[2]
# if one doesn't exist instruct the user to run deployenv.sh with that
# app name and try this again and then exit
# if len(envFiles) == 0 or "dcEnv" not in envFiles[0]:
if len(envFiles) == 0:
print("There does not appear to be any env files available " +
"for the given appName: " + self.baseAppName + ". " +
"You will need to create one by executing the " +
"deployenv.sh with the appName")
sys.exit(1)
# there is at least a pair there, so now go through the list and
# look for the application specific env files. If there, there
# will be two one each one for .env and .sh and the one we want
# is the .sh
flagFound = 0
for file in envFiles:
envFileName = "dcEnv-" + self.baseAppName + "-" + \
self.envList["ENV"] + ".env"
shEnvFileName = "dcEnv-" + self.baseAppName + "-" + \
self.envList["ENV"] + ".sh"
if shEnvFileName in file:
# found the one needed
flagFound = 1
theEnvFileNameToSource = file
break
if not flagFound:
# if there is more than one file (ie, different ENVs) then display
# the list and ask for the user to select one.
print("There are multiple sets of environment files with that " +
"appName. The difference \nbetween the files is the " +
"environment portion. This is one of local, dev, " +
"staging \nor prod. Look at the list below and you will " +
"need to know the environment that \nyou want to run in." +
" Re-run this script and give the appropriate option to " +
"\ndesiginate the env (usually --env) and provide the " +
"environment string. \nThe env files found are:")
for filename in envFiles:
if "dcEnv-" in filename:
print(filename)
sys.exit(1)
else:
# source the .sh env file into the environment as it has the export
# variables and that will set the environment
fileToSource = envDirToFind + "/" + theEnvFileNameToSource
command = '/usr/bin/env bash -c "source ' + fileToSource + \
' && env"'
try:
tmpOutput = subprocess.check_output(command,
stderr=subprocess.STDOUT,
shell=True)
theWholeEnv = tmpOutput.split('\n')
# -------------------------------------------------------------
# now go through the whole environment and only get the ones
# that are in the envFile that we sourced
# how this happens is that the sourcing command above spits
# out the entire environment at that time. So that will have a
# bunch of extra variables that we don't need. What we need
# are the keys from the file, so we will read through the file
# and pull the keys and then match up the sourced value to the
# needed key
# -------------------------------------------------------------
theEnvFileToRead = envDirToFind + "/" + envFileName
with open(theEnvFileToRead) as f:
lines = [line.rstrip('\n') for line in f]
for line in lines:
needKey, needValue = line.split('=', 1)
for envVar in theWholeEnv:
if needKey in envVar:
lookKey, lookValue = envVar.split('=', 1)
self.envList[needKey] = lookValue
except subprocess.CalledProcessError:
logging.exception("There was an issue with sourcing " +
fileToSource)
def getSettings(anEnvList):
"""Read the ~/.dcConfig/settings file."""
baseSettingsFile = expanduser("~") + "/.dcConfig/settings"
with open(baseSettingsFile, 'r') as f:
lines = [line.rstrip('\n') for line in f]
for aLine in lines:
if "CUSTOMER_NAME=" in aLine:
anEnvList["CUSTOMER_NAME"] = aLine.split("=")[1]
if "PROFILE=" in aLine:
anEnvList["PROFILE"] = aLine.split("=")[1]
if "REGION=" in aLine:
anEnvList["REGION"] = aLine.split("=")[1]
if "USER_NAME=" in aLine:
anEnvList["USER_NAME"] = aLine.split("=")[1]
if "dcCOMMON_SHARED_DIR=" in aLine:
anEnvList["dcCOMMON_SHARED_DIR"] = aLine.split("=")[1]
def pythonGetEnv(initialCreate=False, forCustomer=None):
"""Process env when called from a python script."""
envList = dcEnvCheckArgs()
if forCustomer is None:
if "FOR_CUSTOMER" in envList:
forCustomer = envList["FOR_CUSTOMER"]
if initialCreate:
getSettings(envList)
returnEnvList = envList
else:
anEnv = Process_dc_Env(envList, forCustomer=forCustomer)
returnEnvList = anEnv.process_dc_env()
return returnEnvList
def shellGetEnv():
"""Process env when called via a shell script."""
(envList, initialCreate, generateEnvFiles) = dcEnvCheckArgs(type=1)
customerNameToSpecialize = None
if "FOR_CUSTOMER" in envList:
customerNameToSpecialize = envList["FOR_CUSTOMER"]
if initialCreate:
returnEnvList = envList
else:
anEnv = Process_dc_Env(envList, generateEnvFiles,
forCustomer=customerNameToSpecialize)
returnEnvList = anEnv.process_dc_env()
returnStr = "export"
for key, value in returnEnvList.iteritems():
if '\"' in value or '\'' in value:
returnStr += " " + key + '=' + value
else:
returnStr += " " + key + '="' + value + '"'
curDir = os.path.dirname(sys.argv[0])
with open(curDir + '/shellfunctions.incl', 'r') as includeFile:
data = includeFile.read()
returnStr += " ; {}".format(data)
print(returnStr)
def dcEnvCheckArgs(type=0):
"""Check the arguments passed into this script."""
parser = argparse.ArgumentParser(
description='The core argument processing is handled by a separate '
'process (process_dc_env.py) and is called by this script. This core '
'process will ensure that there is an environment file that is set '
'and can be utilized for the running of this session. The intent of '
'this script is that it would be put at the top of any devops.center '
'scripts that will need the full application environment to ensure '
'that the environment variables that are needed will be available to '
'the script. This is done to help avoid polluting the users '
'environment. Another main purpose of this is to be able to isolate '
'sessions such that a user could run one app in terminal session '
'and a second one in parallel in a separate terminal '
'session while using the same code.')
parser.add_argument('-a', '--appName', help='The application name'
'of the application that you want to run as the '
'default app for the current session. This is '
'optional as by default the appName will be set '
'when deployenv.sh is run',
required=False)
parser.add_argument('-e', '--env', help='the env is one of local, dev, '
'staging, prod. DEFAULT: local',
default='local',
required=False)
parser.add_argument('-w', '--workspaceName',
help='A unique name that identifies an alternate '
'workspace. By default only one base directory is '
'created and all applications created are put into '
'that directory. By specifying this option an '
'alternate base directory can be identified and it '
'will be kept separate from any other base '
'directories. One usage is if you have multiple '
'clients that you are building apps for then the '
'apps can be in separate base directories '
'(essentially applications associated by client)'
'with this option.',
required=False)
parser.add_argument('-i', '--initialCreate',
# help='The flag to say ' +
# 'that this is being invoked by a start up script'
# 'NOTE: if it came in this way it came from a shell'
# 'script and probably should not be run this way',
action="store_true",
help=argparse.SUPPRESS,
required=False)
parser.add_argument('-g', '--generateEnvFiles',
# help='The flag to say ' +
# 'that this is being invoked by deployEnv.sh '
# 'and that we need to generate the env files rather '
# 'then read them.',
action="store_true",
help=argparse.SUPPRESS,
required=False)
parser.add_argument('--forCustomer',
# help='This is used only'
# 'when creating a dcAuthorization instance.',
help=argparse.SUPPRESS,
required=False)
# args, unknown = parser.parse_known_args()
try:
args, unknown = parser.parse_known_args()
except SystemExit:
sys.exit(1)
returnList = {}
if args.appName:
returnList["CUSTOMER_APP_NAME"] = args.appName
if args.env:
returnList["ENV"] = args.env
if args.workspaceName:
returnList["WORKSPACE_NAME_ORIGINAL"] = args.workspaceName
returnList["WORKSPACE_NAME"] = args.workspaceName.upper()
if args.forCustomer:
returnList["FOR_CUSTOMER"] = args.forCustomer
# if we get here then the return the necessary arguments
if type:
return (returnList, args.initialCreate, args.generateEnvFiles)
else:
return (returnList)
def main(argv):
"""Execute this script."""
shellGetEnv()
if __name__ == "__main__":
main(sys.argv[1:])
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
|
|
"""Test the songpal config flow."""
import copy
from unittest.mock import patch
from homeassistant.components import ssdp
from homeassistant.components.songpal.const import CONF_ENDPOINT, DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_SSDP, SOURCE_USER
from homeassistant.const import CONF_HOST, CONF_NAME
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from . import (
CONF_DATA,
ENDPOINT,
FRIENDLY_NAME,
HOST,
MODEL,
_create_mocked_device,
_patch_config_flow_device,
)
from tests.common import MockConfigEntry
UDN = "uuid:1234"
SSDP_DATA = {
ssdp.ATTR_UPNP_UDN: UDN,
ssdp.ATTR_UPNP_FRIENDLY_NAME: FRIENDLY_NAME,
ssdp.ATTR_SSDP_LOCATION: f"http://{HOST}:52323/dmr.xml",
"X_ScalarWebAPI_DeviceInfo": {
"X_ScalarWebAPI_BaseURL": ENDPOINT,
"X_ScalarWebAPI_ServiceList": {
"X_ScalarWebAPI_ServiceType": ["guide", "system", "audio", "avContent"],
},
},
}
def _flow_next(hass, flow_id):
return next(
flow
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == flow_id
)
def _patch_setup():
return patch(
"homeassistant.components.songpal.async_setup_entry",
return_value=True,
)
async def test_flow_ssdp(hass):
"""Test working ssdp flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_SSDP},
data=SSDP_DATA,
)
assert result["type"] == "form"
assert result["step_id"] == "init"
assert result["description_placeholders"] == {
CONF_NAME: FRIENDLY_NAME,
CONF_HOST: HOST,
}
flow = _flow_next(hass, result["flow_id"])
assert flow["context"]["unique_id"] == UDN
with _patch_setup():
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == FRIENDLY_NAME
assert result["data"] == CONF_DATA
async def test_flow_user(hass):
"""Test working user initialized flow."""
mocked_device = _create_mocked_device()
with _patch_config_flow_device(mocked_device), _patch_setup():
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] is None
_flow_next(hass, result["flow_id"])
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_ENDPOINT: ENDPOINT},
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == MODEL
assert result["data"] == {
CONF_NAME: MODEL,
CONF_ENDPOINT: ENDPOINT,
}
mocked_device.get_supported_methods.assert_called_once()
mocked_device.get_interface_information.assert_called_once()
async def test_flow_import(hass):
"""Test working import flow."""
mocked_device = _create_mocked_device()
with _patch_config_flow_device(mocked_device), _patch_setup():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=CONF_DATA
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == FRIENDLY_NAME
assert result["data"] == CONF_DATA
mocked_device.get_supported_methods.assert_called_once()
mocked_device.get_interface_information.assert_not_called()
async def test_flow_import_without_name(hass):
"""Test import flow without optional name."""
mocked_device = _create_mocked_device()
with _patch_config_flow_device(mocked_device), _patch_setup():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data={CONF_ENDPOINT: ENDPOINT}
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == MODEL
assert result["data"] == {CONF_NAME: MODEL, CONF_ENDPOINT: ENDPOINT}
mocked_device.get_supported_methods.assert_called_once()
mocked_device.get_interface_information.assert_called_once()
def _create_mock_config_entry(hass):
MockConfigEntry(
domain=DOMAIN,
unique_id="uuid:0000",
data=CONF_DATA,
).add_to_hass(hass)
async def test_ssdp_bravia(hass):
"""Test discovering a bravia TV."""
ssdp_data = copy.deepcopy(SSDP_DATA)
ssdp_data["X_ScalarWebAPI_DeviceInfo"]["X_ScalarWebAPI_ServiceList"][
"X_ScalarWebAPI_ServiceType"
].append("videoScreen")
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_SSDP},
data=ssdp_data,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "not_songpal_device"
async def test_sddp_exist(hass):
"""Test discovering existed device."""
_create_mock_config_entry(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_SSDP},
data=SSDP_DATA,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_user_exist(hass):
"""Test user adding existed device."""
mocked_device = _create_mocked_device()
_create_mock_config_entry(hass)
with _patch_config_flow_device(mocked_device):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONF_DATA
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
mocked_device.get_supported_methods.assert_called_once()
mocked_device.get_interface_information.assert_called_once()
async def test_import_exist(hass):
"""Test importing existed device."""
mocked_device = _create_mocked_device()
_create_mock_config_entry(hass)
with _patch_config_flow_device(mocked_device):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=CONF_DATA
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
mocked_device.get_supported_methods.assert_called_once()
mocked_device.get_interface_information.assert_not_called()
async def test_user_invalid(hass):
"""Test using adding invalid config."""
mocked_device = _create_mocked_device(True)
_create_mock_config_entry(hass)
with _patch_config_flow_device(mocked_device):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONF_DATA
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
mocked_device.get_supported_methods.assert_called_once()
mocked_device.get_interface_information.assert_not_called()
async def test_import_invalid(hass):
"""Test importing invalid config."""
mocked_device = _create_mocked_device(True)
_create_mock_config_entry(hass)
with _patch_config_flow_device(mocked_device):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=CONF_DATA
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
mocked_device.get_supported_methods.assert_called_once()
mocked_device.get_interface_information.assert_not_called()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Partially based on AboutMessagePassing in the Ruby Koans
#
from runner.koan import *
class AboutAttributeAccess(Koan):
class TypicalObject:
pass
def test_calling_undefined_functions_normally_results_in_errors(self):
typical = self.TypicalObject()
with self.assertRaises(AttributeError): typical.foobar()
def test_calling_getattribute_causes_an_attribute_error(self):
typical = self.TypicalObject()
with self.assertRaises(AttributeError): typical.__getattribute__('foobar')
# THINK ABOUT IT:
#
# If the method __getattribute__() causes the AttributeError, then
# what would happen if we redefine __getattribute__()?
# ------------------------------------------------------------------
class CatchAllAttributeReads:
def __getattribute__(self, attr_name):
return "Someone called '" + attr_name + "' and it could not be found"
def test_all_attribute_reads_are_caught(self):
catcher = self.CatchAllAttributeReads()
self.assertRegexpMatches(catcher.foobar, "Someone called 'foobar' and it could not be found")
def test_intercepting_return_values_can_disrupt_the_call_chain(self):
catcher = self.CatchAllAttributeReads()
self.assertRegexpMatches(catcher.foobaz, "Someone called 'foobaz' and it could not be found") # This is fine
try:
catcher.foobaz(1)
except TypeError as ex:
err_msg = ex.args[0]
self.assertRegexpMatches(err_msg, "'str' object is not callable")
# foobaz returns a string. What happens to the '(1)' part?
# Try entering this into a python console to reproduce the issue:
#
# "foobaz"(1)
#
def test_changes_to_the_getattribute_implementation_affects_getattr_function(self):
catcher = self.CatchAllAttributeReads()
self.assertRegexpMatches(getattr(catcher, 'any_attribute'), "Someone called 'any_attribute' and it could not be found")
# ------------------------------------------------------------------
class WellBehavedFooCatcher:
def __getattribute__(self, attr_name):
if attr_name[:3] == "foo":
return "Foo to you too"
else:
return super().__getattribute__(attr_name)
def test_foo_attributes_are_caught(self):
catcher = self.WellBehavedFooCatcher()
self.assertEqual('Foo to you too', catcher.foo_bar)
self.assertEqual('Foo to you too', catcher.foo_baz)
def test_non_foo_messages_are_treated_normally(self):
catcher = self.WellBehavedFooCatcher()
with self.assertRaises(AttributeError): catcher.normal_undefined_attribute
# ------------------------------------------------------------------
global stack_depth
stack_depth = 0
class RecursiveCatcher:
def __init__(self):
global stack_depth
stack_depth = 0
self.no_of_getattribute_calls = 0
def __getattribute__(self, attr_name):
global stack_depth # We need something that is outside the scope of this class
stack_depth += 1
if stack_depth<=10: # to prevent a stack overflow
self.no_of_getattribute_calls += 1
# Oops! We just accessed an attribute (no_of_getattribute_calls)
# Guess what happens when self.no_of_getattribute_calls is
# accessed?
# Using 'object' directly because using super() here will also
# trigger a __getattribute__() call.
return object.__getattribute__(self, attr_name)
def my_method(self):
pass
def test_getattribute_is_a_bit_overzealous_sometimes(self):
catcher = self.RecursiveCatcher()
catcher.my_method()
global stack_depth
self.assertEqual(11, stack_depth)
# ------------------------------------------------------------------
class MinimalCatcher:
class DuffObject: pass
def __init__(self):
self.no_of_getattr_calls = 0
def __getattr__(self, attr_name):
self.no_of_getattr_calls += 1
return self.DuffObject
def my_method(self):
pass
def test_getattr_ignores_known_attributes(self):
catcher = self.MinimalCatcher()
catcher.my_method()
self.assertEqual(0, catcher.no_of_getattr_calls)
def test_getattr_only_catches_unknown_attributes(self):
catcher = self.MinimalCatcher()
catcher.purple_flamingos()
catcher.free_pie()
self.assertEqual('DuffObject',
type(catcher.give_me_duff_or_give_me_death()).__name__)
self.assertEqual(3, catcher.no_of_getattr_calls)
# ------------------------------------------------------------------
class PossessiveSetter(object):
def __setattr__(self, attr_name, value):
new_attr_name = attr_name
if attr_name[-5:] == 'comic':
new_attr_name = "my_" + new_attr_name
elif attr_name[-3:] == 'pie':
new_attr_name = "a_" + new_attr_name
object.__setattr__(self, new_attr_name, value)
def test_setattr_intercepts_attribute_assignments(self):
fanboy = self.PossessiveSetter()
fanboy.comic = 'The Laminator, issue #1'
fanboy.pie = 'blueberry'
self.assertEqual('blueberry', fanboy.a_pie)
#
# NOTE: Change the prefix to make this next assert pass
#
prefix = 'my'
self.assertEqual("The Laminator, issue #1", getattr(fanboy, prefix + '_comic'))
# ------------------------------------------------------------------
class ScarySetter:
def __init__(self):
self.num_of_coconuts = 9
self._num_of_private_coconuts = 2
def __setattr__(self, attr_name, value):
new_attr_name = attr_name
if attr_name[0] != '_':
new_attr_name = "altered_" + new_attr_name
object.__setattr__(self, new_attr_name, value)
def test_it_modifies_external_attribute_as_expected(self):
setter = self.ScarySetter()
setter.e = "mc hammer"
self.assertEqual('mc hammer', setter.altered_e)
def test_it_mangles_some_internal_attributes(self):
setter = self.ScarySetter()
try:
coconuts = setter.num_of_coconuts
except AttributeError:
self.assertEqual(9, setter.altered_num_of_coconuts)
def test_in_this_case_private_attributes_remain_unmangled(self):
setter = self.ScarySetter()
self.assertEqual(2, setter._num_of_private_coconuts)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""T5 CBQA tasks."""
import functools
from . import metrics
from . import postprocessors
from . import preprocessors
import seqio
from t5.data import get_default_vocabulary
from t5.data import postprocessors as t5_postprocessors
from t5.data import preprocessors as t5_preprocessors
from t5.evaluation import metrics as t5_metrics
MixtureRegistry = seqio.MixtureRegistry
TaskRegistry = seqio.TaskRegistry
DEFAULT_SPM_PATH = "gs://t5-data/vocabs/cc_all.32000/sentencepiece.model" # GCS
DEFAULT_EXTRA_IDS = 100
NQ_TRAIN_SPLIT_START = 7830
NQ_TRAIN_SPLIT_END = 79168
NQO_TRAIN_SPLIT_END = 79168
WQ_TRAIN_SPLIT_END = 3417
TQA_TRAIN_SPLIT_END = 78785
DEFAULT_OUTPUT_FEATURES = {
"inputs": seqio.Feature(vocabulary=get_default_vocabulary(), add_eos=True),
"targets": seqio.Feature(vocabulary=get_default_vocabulary(), add_eos=True)
}
# ========================== Natural Questions =================================
# Natural Questions open domain variant that most closely matches the official
# evaluation procedure.
# The model is trained to predict all ground-truth answers
# and is only considered correct if it predicts all answers for any one of the
# annotators. As in the official evaluation, we consider questions with fewer
# than two non-null annotations unanswerable (given the context) but because we
# cannot predict unanswerability without the context, we only compute the recall
# metric. Further, because our model does not have access to the oracle context,
# we also normalize predicted and ground-truth answers when comparing them.
# This task uses a portion of the train set for validation.
TaskRegistry.add(
"natural_questions_nocontext",
source=seqio.TfdsDataSource(
tfds_name="natural_questions:0.0.2",
splits={
"train": f"train[{NQ_TRAIN_SPLIT_START}:{NQ_TRAIN_SPLIT_END}]",
"validation": f"train[:{NQ_TRAIN_SPLIT_START}]",
"test": "validation"
}),
preprocessors=[
preprocessors.natural_questions_nocontext,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=postprocessors.natural_questions,
metric_fns=[
functools.partial(
metrics.natural_questions,
# Train set does not contain multiple annotations.
non_null_threshold=1)
])
# This task uses full train split and reports metrics on the NQ validation split
# (which is the test set in the open domain setting).
TaskRegistry.add(
"natural_questions_nocontext_test",
source=seqio.TfdsDataSource(tfds_name="natural_questions:0.0.2"),
preprocessors=[
preprocessors.natural_questions_nocontext,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=postprocessors.natural_questions,
metric_fns=[metrics.natural_questions])
# The standard open domain variant of Natural Questions, where:
# 1) the model is only ever trained to output a single answer;
# 2) if a question has multiple answers, it is trained to predict the first;
# 3) any questions with answers longer than five tokens are ignored;
# 4) answers are normalized before being compared;
# This task uses a portion of the train split for validation.
TaskRegistry.add(
"natural_questions_open",
source=seqio.TfdsDataSource(
tfds_name="natural_questions_open:1.0.0",
splits={
# ~90%, matches numbers used by ORQA
"train": f"train[:{NQO_TRAIN_SPLIT_END}]",
# ~10%, matches numbers used by ORQA
"validation": f"train[{NQO_TRAIN_SPLIT_END}:]",
"test": "validation"
}),
preprocessors=[
preprocessors.natural_questions_open,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.squad])
# This is a slight variant of the previous task that selects a random answer
# when multiple are provided instead of using the first.
TaskRegistry.add(
"natural_questions_open_randanswer",
source=seqio.TfdsDataSource(
tfds_name="natural_questions_open:1.0.0",
splits={
"train": f"train[:{NQO_TRAIN_SPLIT_END}]",
"validation": f"train[{NQO_TRAIN_SPLIT_END}:]",
"test": "validation"
}),
preprocessors=[
preprocessors.natural_questions_open,
preprocessors.sample_answer,
seqio.preprocessors.tokenize,
# Do not cache - ensures we are sampling different answers.
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.squad])
# This task uses full train split and reports metrics on the NQ validation split
# (which is the test set in the open domain setting).
TaskRegistry.add(
"natural_questions_open_test",
source=seqio.TfdsDataSource(tfds_name="natural_questions_open:1.0.0"),
preprocessors=[
preprocessors.natural_questions_open,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.squad])
# ============================ Web Questions ===================================
# This task uses 10% of the train split for validation.
TaskRegistry.add(
"web_questions_open",
source=seqio.TfdsDataSource(
tfds_name="web_questions:1.0.0",
splits={
# ~90%, matches numbers used by ORQA
"train": f"train[:{WQ_TRAIN_SPLIT_END}]",
# ~10%, matches numbers used by ORQA
"validation": f"train[{WQ_TRAIN_SPLIT_END}:]",
"test": "test"
}),
preprocessors=[
preprocessors.web_questions_open,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.squad],
)
# This tasks trains on the full train split.
TaskRegistry.add(
"web_questions_open_test",
source=seqio.TfdsDataSource(
tfds_name="web_questions:1.0.0",
splits={
"train": "train",
"validation": "test",
}),
preprocessors=[
preprocessors.web_questions_open,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.squad],
)
# =============================== Trivia QA ====================================
TaskRegistry.add(
"trivia_qa_open",
source=seqio.TfdsDataSource(
tfds_name="trivia_qa/unfiltered.nocontext:1.1.0",
splits={
# ~90%, matches numbers used by ORQA
"train": f"train[:{TQA_TRAIN_SPLIT_END}]",
# ~10%, matches numbers used by ORQA
"validation": f"train[{TQA_TRAIN_SPLIT_END}:]",
"test": "validation"
}),
preprocessors=[
preprocessors.trivia_qa_open,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.trivia_qa])
# This tasks trains on combined train and validation splits.
TaskRegistry.add(
"trivia_qa_open_test",
source=seqio.TfdsDataSource(
tfds_name="trivia_qa/unfiltered.nocontext:1.1.0",
splits={
"train": "train+validation",
"test": "test"
}),
preprocessors=[
preprocessors.trivia_qa_open,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
postprocess_fn=t5_postprocessors.qa,
metric_fns=[t5_metrics.trivia_qa])
# ============================= CBQA Mixtures ==================================
# This mixture is to be used for hyperparameter tuning. Training happens on
# validation sets (if available) or subsplits of the train set. Evaluation
# happens on the validation (or heldout portion of the train) split.
MixtureRegistry.add(
"closed_book_qa",
[
"trivia_qa_open",
"natural_questions_open",
"web_questions_open"
],
default_rate=seqio.mixing_rate_num_examples
)
# This mixture is to be used at test time. Training happens on the combined
# train and validation splits and evaluation happens on the test split.
MixtureRegistry.add(
"closed_book_qa_test",
[
"trivia_qa_open_test",
"natural_questions_open_test",
"web_questions_open_test"
],
default_rate=seqio.mixing_rate_num_examples
)
# ========================= Salient Span Masking ===============================
TaskRegistry.add(
"salient_span_masked_wikipedia",
source=seqio.TfdsDataSource(
tfds_name="salient_span_wikipedia/sentences:1.0.0"),
preprocessors=[
preprocessors.mask_salient_spans,
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[])
TaskRegistry.add(
"span_corrupted_wikipedia",
source=seqio.TfdsDataSource(
tfds_name="salient_span_wikipedia/sentences:1.0.0"),
preprocessors=[
functools.partial(
t5_preprocessors.rekey, key_map={
"inputs": None,
"targets": "text"
}),
seqio.preprocessors.tokenize,
seqio.CacheDatasetPlaceholder(),
t5_preprocessors.span_corruption,
seqio.preprocessors.append_eos_after_trim,
],
output_features=DEFAULT_OUTPUT_FEATURES,
metric_fns=[])
|
|
#!/usr/bin/env python
#
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provisions Android devices with settings required for bots.
Usage:
./provision_devices.py [-d <device serial number>]
"""
import argparse
import datetime
import json
import logging
import os
import posixpath
import re
import subprocess
import sys
import time
from devil.android import battery_utils
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_temp_file
from devil.android import device_utils
from devil.android.sdk import version_codes
from devil.utils import run_tests_helper
from devil.utils import timeout_retry
from pylib import constants
from pylib import device_settings
_SYSTEM_WEBVIEW_PATHS = ['/system/app/webview', '/system/app/WebViewGoogle']
_CHROME_PACKAGE_REGEX = re.compile('.*chrom.*')
_TOMBSTONE_REGEX = re.compile('tombstone.*')
class _DEFAULT_TIMEOUTS(object):
# L can take a while to reboot after a wipe.
LOLLIPOP = 600
PRE_LOLLIPOP = 180
HELP_TEXT = '{}s on L, {}s on pre-L'.format(LOLLIPOP, PRE_LOLLIPOP)
class _PHASES(object):
WIPE = 'wipe'
PROPERTIES = 'properties'
FINISH = 'finish'
ALL = [WIPE, PROPERTIES, FINISH]
def ProvisionDevices(args):
blacklist = (device_blacklist.Blacklist(args.blacklist_file)
if args.blacklist_file
else None)
devices = [d for d in device_utils.DeviceUtils.HealthyDevices(blacklist)
if not args.emulators or d.adb.is_emulator]
if args.device:
devices = [d for d in devices if d == args.device]
if not devices:
raise device_errors.DeviceUnreachableError(args.device)
parallel_devices = device_utils.DeviceUtils.parallel(devices)
if args.emulators:
parallel_devices.pMap(SetProperties, args)
else:
parallel_devices.pMap(ProvisionDevice, blacklist, args)
if args.auto_reconnect:
_LaunchHostHeartbeat()
blacklisted_devices = blacklist.Read() if blacklist else []
if args.output_device_blacklist:
with open(args.output_device_blacklist, 'w') as f:
json.dump(blacklisted_devices, f)
if all(d in blacklisted_devices for d in devices):
raise device_errors.NoDevicesError
return 0
def ProvisionDevice(device, blacklist, options):
if options.reboot_timeout:
reboot_timeout = options.reboot_timeout
elif device.build_version_sdk >= version_codes.LOLLIPOP:
reboot_timeout = _DEFAULT_TIMEOUTS.LOLLIPOP
else:
reboot_timeout = _DEFAULT_TIMEOUTS.PRE_LOLLIPOP
def should_run_phase(phase_name):
return not options.phases or phase_name in options.phases
def run_phase(phase_func, reboot=True):
try:
device.WaitUntilFullyBooted(timeout=reboot_timeout, retries=0)
except device_errors.CommandTimeoutError:
logging.error('Device did not finish booting. Will try to reboot.')
device.Reboot(timeout=reboot_timeout)
phase_func(device, options)
if reboot:
device.Reboot(False, retries=0)
device.adb.WaitForDevice()
try:
if should_run_phase(_PHASES.WIPE):
if options.chrome_specific_wipe:
run_phase(WipeChromeData)
else:
run_phase(WipeDevice)
if should_run_phase(_PHASES.PROPERTIES):
run_phase(SetProperties)
if should_run_phase(_PHASES.FINISH):
run_phase(FinishProvisioning, reboot=False)
if options.chrome_specific_wipe:
package = "com.google.android.gms"
version_name = device.GetApplicationVersion(package)
logging.info("Version name for %s is %s", package, version_name)
CheckExternalStorage(device)
except device_errors.CommandTimeoutError:
logging.exception('Timed out waiting for device %s. Adding to blacklist.',
str(device))
if blacklist:
blacklist.Extend([str(device)], reason='provision_timeout')
except device_errors.CommandFailedError:
logging.exception('Failed to provision device %s. Adding to blacklist.',
str(device))
if blacklist:
blacklist.Extend([str(device)], reason='provision_failure')
def CheckExternalStorage(device):
"""Checks that storage is writable and if not makes it writable.
Arguments:
device: The device to check.
"""
try:
with device_temp_file.DeviceTempFile(
device.adb, suffix='.sh', dir=device.GetExternalStoragePath()) as f:
device.WriteFile(f.name, 'test')
except device_errors.CommandFailedError:
logging.info('External storage not writable. Remounting / as RW')
device.RunShellCommand(['mount', '-o', 'remount,rw', '/'],
check_return=True, as_root=True)
device.EnableRoot()
with device_temp_file.DeviceTempFile(
device.adb, suffix='.sh', dir=device.GetExternalStoragePath()) as f:
device.WriteFile(f.name, 'test')
def WipeChromeData(device, options):
"""Wipes chrome specific data from device
(1) uninstall any app whose name matches *chrom*, except
com.android.chrome, which is the chrome stable package. Doing so also
removes the corresponding dirs under /data/data/ and /data/app/
(2) remove any dir under /data/app-lib/ whose name matches *chrom*
(3) remove any files under /data/tombstones/ whose name matches "tombstone*"
(4) remove /data/local.prop if there is any
(5) remove /data/local/chrome-command-line if there is any
(6) remove anything under /data/local/.config/ if the dir exists
(this is telemetry related)
(7) remove anything under /data/local/tmp/
Arguments:
device: the device to wipe
"""
if options.skip_wipe:
return
try:
device.EnableRoot()
_UninstallIfMatch(device, _CHROME_PACKAGE_REGEX,
constants.PACKAGE_INFO['chrome_stable'].package)
_WipeUnderDirIfMatch(device, '/data/app-lib/', _CHROME_PACKAGE_REGEX)
_WipeUnderDirIfMatch(device, '/data/tombstones/', _TOMBSTONE_REGEX)
_WipeFileOrDir(device, '/data/local.prop')
_WipeFileOrDir(device, '/data/local/chrome-command-line')
_WipeFileOrDir(device, '/data/local/.config/')
_WipeFileOrDir(device, '/data/local/tmp/')
device.RunShellCommand('rm -rf %s/*' % device.GetExternalStoragePath(),
check_return=True)
except device_errors.CommandFailedError:
logging.exception('Possible failure while wiping the device. '
'Attempting to continue.')
def WipeDevice(device, options):
"""Wipes data from device, keeping only the adb_keys for authorization.
After wiping data on a device that has been authorized, adb can still
communicate with the device, but after reboot the device will need to be
re-authorized because the adb keys file is stored in /data/misc/adb/.
Thus, adb_keys file is rewritten so the device does not need to be
re-authorized.
Arguments:
device: the device to wipe
"""
if options.skip_wipe:
return
try:
device.EnableRoot()
device_authorized = device.FileExists(constants.ADB_KEYS_FILE)
if device_authorized:
adb_keys = device.ReadFile(constants.ADB_KEYS_FILE,
as_root=True).splitlines()
device.RunShellCommand(['wipe', 'data'],
as_root=True, check_return=True)
device.adb.WaitForDevice()
if device_authorized:
adb_keys_set = set(adb_keys)
for adb_key_file in options.adb_key_files or []:
try:
with open(adb_key_file, 'r') as f:
adb_public_keys = f.readlines()
adb_keys_set.update(adb_public_keys)
except IOError:
logging.warning('Unable to find adb keys file %s.', adb_key_file)
_WriteAdbKeysFile(device, '\n'.join(adb_keys_set))
except device_errors.CommandFailedError:
logging.exception('Possible failure while wiping the device. '
'Attempting to continue.')
def _WriteAdbKeysFile(device, adb_keys_string):
dir_path = posixpath.dirname(constants.ADB_KEYS_FILE)
device.RunShellCommand(['mkdir', '-p', dir_path],
as_root=True, check_return=True)
device.RunShellCommand(['restorecon', dir_path],
as_root=True, check_return=True)
device.WriteFile(constants.ADB_KEYS_FILE, adb_keys_string, as_root=True)
device.RunShellCommand(['restorecon', constants.ADB_KEYS_FILE],
as_root=True, check_return=True)
def SetProperties(device, options):
try:
device.EnableRoot()
except device_errors.CommandFailedError as e:
logging.warning(str(e))
_ConfigureLocalProperties(device, options.enable_java_debug)
device_settings.ConfigureContentSettings(
device, device_settings.DETERMINISTIC_DEVICE_SETTINGS)
if options.disable_location:
device_settings.ConfigureContentSettings(
device, device_settings.DISABLE_LOCATION_SETTINGS)
else:
device_settings.ConfigureContentSettings(
device, device_settings.ENABLE_LOCATION_SETTINGS)
if options.disable_mock_location:
device_settings.ConfigureContentSettings(
device, device_settings.DISABLE_MOCK_LOCATION_SETTINGS)
else:
device_settings.ConfigureContentSettings(
device, device_settings.ENABLE_MOCK_LOCATION_SETTINGS)
device_settings.SetLockScreenSettings(device)
if options.disable_network:
device_settings.ConfigureContentSettings(
device, device_settings.NETWORK_DISABLED_SETTINGS)
if options.disable_system_chrome:
# The system chrome version on the device interferes with some tests.
device.RunShellCommand(['pm', 'disable', 'com.android.chrome'],
check_return=True)
if options.remove_system_webview:
if device.HasRoot():
# This is required, e.g., to replace the system webview on a device.
device.adb.Remount()
device.RunShellCommand(['stop'], check_return=True)
device.RunShellCommand(['rm', '-rf'] + _SYSTEM_WEBVIEW_PATHS,
check_return=True)
device.RunShellCommand(['start'], check_return=True)
else:
logging.warning('Cannot remove system webview from a non-rooted device')
def _ConfigureLocalProperties(device, java_debug=True):
"""Set standard readonly testing device properties prior to reboot."""
local_props = [
'persist.sys.usb.config=adb',
'ro.monkey=1',
'ro.test_harness=1',
'ro.audio.silent=1',
'ro.setupwizard.mode=DISABLED',
]
if java_debug:
local_props.append(
'%s=all' % device_utils.DeviceUtils.JAVA_ASSERT_PROPERTY)
local_props.append('debug.checkjni=1')
try:
device.WriteFile(
device.LOCAL_PROPERTIES_PATH,
'\n'.join(local_props), as_root=True)
# Android will not respect the local props file if it is world writable.
device.RunShellCommand(
['chmod', '644', device.LOCAL_PROPERTIES_PATH],
as_root=True, check_return=True)
except device_errors.CommandFailedError:
logging.exception('Failed to configure local properties.')
def FinishProvisioning(device, options):
if options.min_battery_level is not None:
try:
battery = battery_utils.BatteryUtils(device)
battery.ChargeDeviceToLevel(options.min_battery_level)
except device_errors.CommandFailedError:
logging.exception('Unable to charge device to specified level.')
if options.max_battery_temp is not None:
try:
battery = battery_utils.BatteryUtils(device)
battery.LetBatteryCoolToTemperature(options.max_battery_temp)
except device_errors.CommandFailedError:
logging.exception('Unable to let battery cool to specified temperature.')
def _set_and_verify_date():
if device.build_version_sdk >= version_codes.MARSHMALLOW:
date_format = '%m%d%H%M%Y.%S'
set_date_command = ['date']
else:
date_format = '%Y%m%d.%H%M%S'
set_date_command = ['date', '-s']
strgmtime = time.strftime(date_format, time.gmtime())
set_date_command.append(strgmtime)
device.RunShellCommand(set_date_command, as_root=True, check_return=True)
device_time = device.RunShellCommand(
['date', '+"%Y%m%d.%H%M%S"'], as_root=True,
single_line=True).replace('"', '')
device_time = datetime.datetime.strptime(device_time, "%Y%m%d.%H%M%S")
correct_time = datetime.datetime.strptime(strgmtime, date_format)
tdelta = (correct_time - device_time).seconds
if tdelta <= 1:
logging.info('Date/time successfully set on %s', device)
return True
else:
logging.error('Date mismatch. Device: %s Correct: %s',
device_time.isoformat(), correct_time.isoformat())
return False
# Sometimes the date is not set correctly on the devices. Retry on failure.
if not timeout_retry.WaitFor(_set_and_verify_date, wait_period=1,
max_tries=2):
raise device_errors.CommandFailedError(
'Failed to set date & time.', device_serial=str(device))
props = device.RunShellCommand('getprop', check_return=True)
for prop in props:
logging.info(' %s', prop)
if options.auto_reconnect:
_PushAndLaunchAdbReboot(device, options.target)
def _UninstallIfMatch(device, pattern, app_to_keep):
installed_packages = device.RunShellCommand(['pm', 'list', 'packages'])
for package_output in installed_packages:
package = package_output.split(":")[1]
if pattern.match(package) and not package == app_to_keep:
device.Uninstall(package)
def _WipeUnderDirIfMatch(device, path, pattern):
ls_result = device.Ls(path)
for (content, _) in ls_result:
if pattern.match(content):
_WipeFileOrDir(device, path + content)
def _WipeFileOrDir(device, path):
if device.PathExists(path):
device.RunShellCommand(['rm', '-rf', path], check_return=True)
def _PushAndLaunchAdbReboot(device, target):
"""Pushes and launches the adb_reboot binary on the device.
Arguments:
device: The DeviceUtils instance for the device to which the adb_reboot
binary should be pushed.
target: The build target (example, Debug or Release) which helps in
locating the adb_reboot binary.
"""
logging.info('Will push and launch adb_reboot on %s', str(device))
# Kill if adb_reboot is already running.
device.KillAll('adb_reboot', blocking=True, timeout=2, quiet=True)
# Push adb_reboot
logging.info(' Pushing adb_reboot ...')
adb_reboot = os.path.join(constants.DIR_SOURCE_ROOT,
'out/%s/adb_reboot' % target)
device.PushChangedFiles([(adb_reboot, '/data/local/tmp/')])
# Launch adb_reboot
logging.info(' Launching adb_reboot ...')
device.RunShellCommand(
['/data/local/tmp/adb_reboot'],
check_return=True)
def _LaunchHostHeartbeat():
# Kill if existing host_heartbeat
KillHostHeartbeat()
# Launch a new host_heartbeat
logging.info('Spawning host heartbeat...')
subprocess.Popen([os.path.join(constants.DIR_SOURCE_ROOT,
'build/android/host_heartbeat.py')])
def KillHostHeartbeat():
ps = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
stdout, _ = ps.communicate()
matches = re.findall('\\n.*host_heartbeat.*', stdout)
for match in matches:
logging.info('An instance of host heart beart running... will kill')
pid = re.findall(r'(\S+)', match)[1]
subprocess.call(['kill', str(pid)])
def main():
# Recommended options on perf bots:
# --disable-network
# TODO(tonyg): We eventually want network on. However, currently radios
# can cause perfbots to drain faster than they charge.
# --min-battery-level 95
# Some perf bots run benchmarks with USB charging disabled which leads
# to gradual draining of the battery. We must wait for a full charge
# before starting a run in order to keep the devices online.
parser = argparse.ArgumentParser(
description='Provision Android devices with settings required for bots.')
parser.add_argument('-d', '--device', metavar='SERIAL',
help='the serial number of the device to be provisioned'
' (the default is to provision all devices attached)')
parser.add_argument('--blacklist-file', help='Device blacklist JSON file.')
parser.add_argument('--phase', action='append', choices=_PHASES.ALL,
dest='phases',
help='Phases of provisioning to run. '
'(If omitted, all phases will be run.)')
parser.add_argument('--skip-wipe', action='store_true', default=False,
help="don't wipe device data during provisioning")
parser.add_argument('--reboot-timeout', metavar='SECS', type=int,
help='when wiping the device, max number of seconds to'
' wait after each reboot '
'(default: %s)' % _DEFAULT_TIMEOUTS.HELP_TEXT)
parser.add_argument('--min-battery-level', type=int, metavar='NUM',
help='wait for the device to reach this minimum battery'
' level before trying to continue')
parser.add_argument('--disable-location', action='store_true',
help='disable Google location services on devices')
parser.add_argument('--disable-mock-location', action='store_true',
default=False, help='Set ALLOW_MOCK_LOCATION to false')
parser.add_argument('--disable-network', action='store_true',
help='disable network access on devices')
parser.add_argument('--disable-java-debug', action='store_false',
dest='enable_java_debug', default=True,
help='disable Java property asserts and JNI checking')
parser.add_argument('--disable-system-chrome', action='store_true',
help='Disable the system chrome from devices.')
parser.add_argument('--remove-system-webview', action='store_true',
help='Remove the system webview from devices.')
parser.add_argument('-t', '--target', default='Debug',
help='the build target (default: %(default)s)')
parser.add_argument('-r', '--auto-reconnect', action='store_true',
help='push binary which will reboot the device on adb'
' disconnections')
parser.add_argument('--adb-key-files', type=str, nargs='+',
help='list of adb keys to push to device')
parser.add_argument('-v', '--verbose', action='count', default=1,
help='Log more information.')
parser.add_argument('--max-battery-temp', type=int, metavar='NUM',
help='Wait for the battery to have this temp or lower.')
parser.add_argument('--output-device-blacklist',
help='Json file to output the device blacklist.')
parser.add_argument('--chrome-specific-wipe', action='store_true',
help='only wipe chrome specific data during provisioning')
parser.add_argument('--emulators', action='store_true',
help='provision only emulators and ignore usb devices')
args = parser.parse_args()
constants.SetBuildType(args.target)
run_tests_helper.SetLogLevel(args.verbose)
return ProvisionDevices(args)
if __name__ == '__main__':
sys.exit(main())
|
|
import os
import hashlib
import requests
import tempfile
import mimetypes
import numpy as np
from PIL import Image
from io import BytesIO
from fnmatch import fnmatch
from datetime import datetime
from . import config
API_URL = 'https://api.abraia.me'
tempdir = tempfile.gettempdir()
def file_path(f, userid):
return f['source'][len(userid)+1:]
def md5sum(src):
hash_md5 = hashlib.md5()
f = BytesIO(src.getvalue()) if isinstance(src, BytesIO) else open(src, 'rb')
for chunk in iter(lambda: f.read(4096), b''):
hash_md5.update(chunk)
f.close()
return hash_md5.hexdigest()
class APIError(Exception):
def __init__(self, message, code=0):
super(APIError, self).__init__(message, code)
self.code = code
try:
self.message = message.json()['message']
except:
self.message = ''
class Abraia:
def __init__(self, folder=''):
self.auth = config.load_auth()
self.userid = self.load_user().get('id')
self.folder = folder
def load_user(self):
if self.auth[0] and self.auth[1]:
url = f"{API_URL}/users"
resp = requests.get(url, auth=self.auth)
if resp.status_code != 200:
raise APIError(resp.text, resp.status_code)
return resp.json()['user']
return {}
def list_files(self, path=''):
dirname = os.path.dirname(path)
basename = os.path.basename(path)
folder = dirname + '/' if dirname else dirname
url = f"{API_URL}/files/{self.userid}/{folder}"
resp = requests.get(url, auth=self.auth)
if resp.status_code != 200:
raise APIError(resp.text, resp.status_code)
resp = resp.json()
for f in resp['files']:
f['date'] = datetime.fromtimestamp(f['date'])
files, folders = resp['files'], resp['folders']
files = list(map(lambda f: {'path': file_path(f, self.userid), 'name': f['name'], 'size': f['size'], 'date': f['date']}, files))
folders = list(map(lambda f: {'path': file_path(f, self.userid), 'name': f['name']}, folders))
if basename:
files = list(filter(lambda f: fnmatch(f['path'], path), files))
folders = list(filter(lambda f: fnmatch(f['path'], path), folders))
return files, folders
def upload_file(self, src, path=''):
if path == '' or path.endswith('/'):
path = path + os.path.basename(src)
json = {}
name = os.path.basename(path)
type = mimetypes.guess_type(name)[0] or 'binary/octet-stream'
if isinstance(src, str) and src.startswith('http'):
json = {'url': src}
else:
json = {'name': name, 'type': type}
md5 = md5sum(src)
if md5:
json['md5'] = md5
url = f"{API_URL}/files/{self.userid}/{path}"
resp = requests.post(url, json=json, auth=self.auth)
if resp.status_code != 201:
raise APIError(resp.text, resp.status_code)
resp = resp.json()
url = resp.get('uploadURL')
if url:
data = src if isinstance(src, BytesIO) else open(src, 'rb')
resp = requests.put(url, data=data, headers={'Content-Type': type})
if resp.status_code != 200:
raise APIError(resp.text, resp.status_code)
return file_path({'name': name, 'source': f"{self.userid}/{path}"}, self.userid)
return file_path(resp['file'], self.userid)
def move_file(self, old_path, new_path):
json = {'store': f"{self.userid}/{old_path}"}
url = f"{API_URL}/files/{self.userid}/{new_path}"
resp = requests.post(url, json=json, auth=self.auth)
if resp.status_code != 201:
raise APIError(resp.text, resp.status_code)
resp = resp.json()
return file_path(resp['file'], self.userid)
def download_file(self, path, dest=''):
url = f"{API_URL}/files/{self.userid}/{path}"
resp = requests.get(url, stream=True, auth=self.auth)
if resp.status_code != 200:
raise APIError(resp.text, resp.status_code)
if not dest:
return BytesIO(resp.content)
with open(dest, 'wb') as f:
f.write(resp.content)
def remove_file(self, path):
url = f"{API_URL}/files/{self.userid}/{path}"
resp = requests.delete(url, auth=self.auth)
if resp.status_code != 200:
raise APIError(resp.text, resp.status_code)
resp = resp.json()
return file_path(resp['file'], self.userid)
def load_metadata(self, path):
url = f"{API_URL}/metadata/{self.userid}/{path}"
resp = requests.get(url, auth=self.auth)
if resp.status_code != 200:
raise APIError(resp.text, resp.status_code)
return resp.json()
def remove_metadata(self, path):
url = f"{API_URL}/metadata/{self.userid}/{path}"
resp = requests.delete(url, auth=self.auth)
if resp.status_code != 200:
raise APIError(resp.text, resp.status_code)
return resp.json()
def transform_image(self, path, dest, params={'quality': 'auto'}):
ext = dest.split('.').pop().lower()
params['format'] = params.get('format') or ext
if params.get('action'):
params['background'] = f"{API_URL}/images/{self.userid}/{path}"
if params.get('fmt') is None:
params['fmt'] = params['background'].split('.').pop()
path = f"{self.userid}/{params['action']}"
url = f"{API_URL}/images/{self.userid}/{path}"
resp = requests.get(url, params=params, stream=True, auth=self.auth)
if resp.status_code != 200:
raise APIError(resp.text, resp.status_code)
with open(dest, 'wb') as f:
f.write(resp.content)
def load_file(self, path):
stream = self.download_file(path)
try:
return stream.getvalue().decode('utf-8')
except:
return stream
def load_image(self, path):
stream = self.download_file(path)
return np.asarray(Image.open(stream))
def save_file(self, path, stream):
stream = BytesIO(bytes(stream, 'utf-8')) if isinstance(stream, str) else stream
return self.upload_file(stream, path)
def save_image(self, path, img):
# stream = BytesIO()
# mime = mimetypes.guess_type(path)[0]
# format = mime.split('/')[1]
# Image.fromarray(img).save(stream, format)
# print(mime, format)
basename = os.path.basename(path)
src = os.path.join(tempdir, basename)
Image.fromarray(img).save(src)
return self.upload_file(src, path)
def capture_text(self, path):
url = f"{API_URL}/rekognition/{self.userid}/{path}"
resp = requests.get(url, params={'mode': 'text'}, auth=self.auth)
if resp.status_code != 200:
raise APIError(resp.text, resp.status_code)
text = list(filter(lambda t: t.get('ParentId') is None, resp.json().get('Text')));
return [t.get('DetectedText') for t in text]
def detect_labels(self, path):
url = f"{API_URL}/rekognition/{self.userid}/{path}"
resp = requests.get(url, params={'mode': 'labels'}, auth=self.auth)
if resp.status_code != 200:
raise APIError(resp.text, resp.status_code)
labels = resp.json().get('Labels')
return [l.get('Name') for l in labels]
|
|
#!/usr/bin/env python
import datetime
from datetime import timedelta
import json
import os
import random
import string
import httplib2
from flask import (
Flask, Response, jsonify, make_response, render_template, request, session)
from oauth2client.client import flow_from_clientsecrets, FlowExchangeError
from pyfiles.database_interface import (
get_filtered_device_data_points, get_svg, get_users_table_id, init_db,
client_log_table_insert, get_max_devices_table_id_from_users_table_id)
from pyfiles.authentication_helper import user_hash, verify_and_get_account_id
from pyfiles.server_common import (
common_path,
common_routes_json,
common_setlegmode,
common_trips_csv,
common_trips_json,
common_download_zip)
import logging
logging.basicConfig()
APPLICATION_NAME = 'TrafficSense'
SETTINGS_FILE_ENV_VAR = 'REGULARROUTES_SETTINGS'
CLIENT_SECRET_FILE_NAME = 'client_secrets.json'
# set settings dir from env.var for settings file. fallback dir is server.py file's parent dir
settings_dir_path = os.path.abspath(os.path.dirname(os.getenv(SETTINGS_FILE_ENV_VAR, os.path.abspath(__file__))))
CLIENT_SECRET_FILE = os.path.join(settings_dir_path, CLIENT_SECRET_FILE_NAME)
CLIENT_ID = json.loads(open(CLIENT_SECRET_FILE, 'r').read())['web']['client_id']
app = Flask(__name__)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# Memory-resident session storage, see the simplekv documentation for details
# store = DictStore()
# This will replace the app's session handling
# KVSessionExtension(store, app)
env_var_value = os.getenv(SETTINGS_FILE_ENV_VAR, None)
if env_var_value is not None:
print('loading settings from: "' + str(env_var_value) + '"')
app.config.from_envvar(SETTINGS_FILE_ENV_VAR)
else:
print('Environment variable "SETTINGS_FILE_ENV_VAR" was not defined -> using debug mode')
# assume debug environment
app.config.from_pyfile('regularroutes.cfg')
app.debug = True
db, store = init_db(app)
# TrafficSense website REST interface:
# Browser sign-in procedures
@app.route('/', methods=['GET'])
def index():
"""Initialize a session for the current user, and render index.html."""
# Create a state token to prevent request forgery.
# Store it in the session for later validation.
state = ''.join(random.choice(string.ascii_uppercase + string.digits)
for x in range(32))
session['state'] = state
# Set the Client ID, Token State, and Application Name in the HTML while
# serving it.
response = make_response(
render_template('index.html',
CLIENT_ID=CLIENT_ID,
STATE=state,
RR_URL_PREFIX=app.config['RR_URL_PREFIX'],
APPLICATION_NAME=APPLICATION_NAME))
response.headers['Content-Type'] = 'text/html'
return response
@app.route('/signin', methods=['GET'])
def sign_in():
return index()
@app.route('/connect', methods=['POST'])
def connect():
"""Exchange the one-time authorization code for a token and
store the token in the session."""
# Ensure that the request is not a forgery and that the user sending
# this connect request is the expected user.
# print 'Session state returns: ' + session.get('state')
if request.args.get('state', '') != session.get('state'):
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
print('401 due to invalid state parameter.')
return response
# Delete the one-time token - page refresh required to re-connect
del session['state']
# If this request does not have `X-Requested-With` header, this could be a CSRF
if not request.headers.get('X-Requested-With'):
response = make_response(json.dumps('Invalid header.'), 403)
response.headers['Content-Type'] = 'application/json'
print('403 due to missing X-Requested-With header.')
return response
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets(CLIENT_SECRET_FILE,
scope='profile',
redirect_uri='postmessage')
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError as err:
# invalid token
print('Invalid token: ' + code + ". error: " + err.message)
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# An ID Token is a cryptographically-signed JSON object encoded in base 64.
# Normally, it is critical that you validate an ID Token before you use it,
# but since you are communicating directly with Google over an
# intermediary-free HTTPS channel and using your Client Secret to
# authenticate yourself to Google, you can be confident that the token you
# receive really comes from Google and is valid. If your server passes the
# ID Token to other components of your app, it is extremely important that
# the other components validate the token before using it.
google_id = verify_and_get_account_id(CLIENT_ID, credentials)['google_id']
stored_credentials = session.get('credentials')
stored_google_id = session.get('google_id')
if stored_credentials is not None and google_id == stored_google_id:
response = make_response(json.dumps('Current user is already connected.'),
200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
session['credentials'] = credentials
session['google_id'] = google_id
# Find and store the RegularRoutes user id
user_hash_id = user_hash(google_id)
user_id = get_users_table_id(user_hash_id)
if user_id < 0:
# No data for the user -> show the nodata -page
print('No data found for the current user.')
response = make_response(json.dumps('Nodata.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
client_log_table_insert(get_max_devices_table_id_from_users_table_id(user_id), user_id, "WEB-CONNECT", "")
session['rr_user_id'] = user_id
response = make_response(json.dumps('Successfully connected user.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
@app.route('/disconnect', methods=['POST'])
def disconnect():
"""Revoke current user's token and reset their session."""
# Only disconnect a connected user.
credentials = session.get('credentials')
if credentials is None:
response = make_response(json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Execute HTTP GET request to revoke current token.
access_token = credentials.access_token
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
# Reset the user's session.
del session['credentials']
if session.get('rr_user_id') != None: del session['rr_user_id']
response = make_response(json.dumps('Successfully disconnected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
else:
# For whatever reason, the given token was invalid.
print(result)
response = make_response(
json.dumps('Failed to revoke token for given user.'), 400)
response.headers['Content-Type'] = 'application/json'
return response
@app.route('/signedout')
def signed_out():
"""User disconnected from the service."""
return render_template('signedout.html', APPLICATION_NAME=APPLICATION_NAME)
@app.route('/menu')
def regularroutes_menu():
"""User disconnected from the service."""
user_id = session.get('rr_user_id')
if user_id == None:
# Not authenticated -> throw back to front page
return index()
return render_template('menu.html',
RR_URL_PREFIX=app.config['RR_URL_PREFIX'],
APPLICATION_NAME=APPLICATION_NAME)
@app.route('/nodata')
def no_data():
"""No data was found for this user account."""
user_id = session.get('rr_user_id')
if user_id == None:
# Not authenticated -> throw back to front page
return index()
return render_template('nodata.html', APPLICATION_NAME=APPLICATION_NAME)
@app.route('/pdmanagement')
def personal_data_management():
"""Personal data management submenu."""
user_id = session.get('rr_user_id')
if user_id == None:
# Not authenticated -> throw back to front page
return index()
return render_template('pdmanagement.html', APPLICATION_NAME=APPLICATION_NAME)
@app.route('/cancelparticipation', methods=['POST'])
def cancel_participation():
"""Cancel the current user's participation to the study."""
user_id = session.get('rr_user_id')
if user_id == None:
response = make_response(json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
client_log_table_insert(get_max_devices_table_id_from_users_table_id(user_id), user_id, "CANCEL-PARTICIPATION", "")
# Send email to the maintainers about the cancellation
try:
import yagmail
yag = yagmail.SMTP(app.config['GMAIL_FROM'], app.config['GMAIL_PWD'])
msgBody = ['User id: ' + str(user_id)]
print('Sending participation cancellation email with message body: ' + msgBody[0])
yag.send(app.config['EMAIL_TO'], 'CANCEL request from user', msgBody)
response = make_response(json.dumps('TrafficSense project informed of participation cancellation.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
except Exception as e:
print('Exception' + e.message)
response = make_response(json.dumps('Error in informing the TrafficSense project: ' + e.message), 500)
response.headers['Content-Type'] = 'application/json'
return response
@app.route('/participationcancelled')
def participation_cancelled():
"""Participation cancellation message has been sent."""
user_id = session.get('rr_user_id')
if user_id == None:
# Not authenticated -> throw back to front page
return index()
return render_template('participationcancelled.html', APPLICATION_NAME=APPLICATION_NAME)
@app.route('/energymap')
def energymap():
"""Draw the energy consumption map of the user."""
user_id = session.get('rr_user_id')
if user_id == None:
# Not authenticated -> throw back to front page
return index()
client_log_table_insert(get_max_devices_table_id_from_users_table_id(user_id), user_id, "WEB-PATH", "")
return render_template('energymap.html',
APPLICATION_NAME=APPLICATION_NAME,
RR_URL_PREFIX=app.config['RR_URL_PREFIX'],
api_key=app.config['MAPS_API_KEY'])
@app.route('/energymap/geojson')
def energymap_device_geojson():
if 'date' in request.args:
date_start = datetime.datetime.strptime(request.args['date'], '%Y-%m-%d').replace(hour=0, minute=0, second=0, microsecond=0)
else:
date_start = datetime.datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
# date_start = datetime.datetime.strptime("2015-11-11", '%Y-%m-%d')
date_end = date_start + timedelta(hours=24)
user_id = session.get('rr_user_id')
if user_id == None:
response = make_response(json.dumps('No user data in current session.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Debug-code:
# user_id = 14
points = get_filtered_device_data_points(
user_id, datetime.datetime.fromordinal(date_start.toordinal()),
datetime.datetime.fromordinal(date_end.toordinal()))
# points = data_points_filtered(user_id, date_start, date_end)
features = []
for point in points:
if point["line_type"] is None:
activity = point['activity']
title = 'activity: %s' % activity
else:
# Public transport recognized, use line-type instead
activity = point['line_type']
title = 'public_transport: %s %s' % (activity, point['line_name'])
title += "\n%s" % point["time"].strftime('%Y-%m-%d %H:%M:%S')
features.append({
'type': 'Feature',
'geometry': json.loads(point['geojson']),
'properties': {
'type': 'raw-point',
'activity': activity,
'title': title
}
})
geojson = {
'type': 'FeatureCollection',
'features': features
}
return jsonify(geojson)
@app.route("/energycertificate_svg")
def energycertificate_svg():
user_id = session.get('rr_user_id')
if user_id == None:
# Not authenticated -> throw back to front page
return index()
firstlastday = [
d in request.args and datetime.datetime.strptime(
request.args[d], '%Y-%m-%d') or None
for d in ["firstday", "lastday"]]
client_log_table_insert(
get_max_devices_table_id_from_users_table_id(user_id),
user_id,
"WEB-CERTIFICATE",
"/".join(str(x)[:10] for x in firstlastday))
return Response(get_svg(user_id, *firstlastday), mimetype="image/svg+xml")
@app.route("/energycertificate")
def energycertificate():
user_id = session.get('rr_user_id')
if user_id == None:
# Not authenticated -> throw back to front page
return index()
firstday, lastday = [
d in request.args and datetime.datetime.strptime(
request.args[d], '%Y-%m-%d') or None
for d in ["firstday", "lastday"]]
# client_log through energycertificate_svg
return render_template(
'energycertificate.html',
APPLICATION_NAME=APPLICATION_NAME,
RR_URL_PREFIX=app.config['RR_URL_PREFIX'],
firstday=firstday,
lastday=lastday)
@app.route('/routes')
def routes():
"""Draw a day of trips of the user."""
user_id = session.get('rr_user_id')
if user_id == None:
# Not authenticated -> throw back to front page
return index()
# client_log_table_insert(get_max_devices_table_id_from_users_table_id(user_id), user_id, "WEB-PATH", "") XXX add me
return render_template(
'userroutes.html',
APPLICATION_NAME=APPLICATION_NAME,
RR_URL_PREFIX=app.config['RR_URL_PREFIX'],
api_key=app.config['MAPS_API_KEY'])
@app.route('/routes_json')
def routes_json():
user_id = session.get('rr_user_id')
if user_id == None:
response = make_response(json.dumps(
'No user data in current session.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
return common_routes_json(request, db, user_id)
@app.route('/setlegmode', methods=['POST'])
def setlegmode_post():
"""Allow user to correct detected transit modes and line names."""
user_id = session.get('rr_user_id')
if user_id == None:
# Not authenticated -> throw back to front page
return index()
device, legid, legact, legline = common_setlegmode(request, db, user_id)
client_log_table_insert(
device,
user_id,
"WEB-PATH-EDIT",
"%s %s %s" % (legid, legact, legline))
return jsonify({})
@app.route('/trips')
def trips():
"""Draw a day of trips of the user."""
user_id = session.get('rr_user_id')
if user_id == None:
# Not authenticated -> throw back to front page
return index()
return render_template(
'usertrips.html',
APPLICATION_NAME=APPLICATION_NAME,
RR_URL_PREFIX=app.config['RR_URL_PREFIX'],
api_key=app.config['MAPS_API_KEY'])
@app.route('/trips_csv')
def trips_csv():
user_id = session.get('rr_user_id')
if user_id == None:
response = make_response(json.dumps(
'No user data in current session.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
return common_trips_csv(request, db, user_id)
@app.route('/trips_json')
def trips_json():
user_id = session.get('rr_user_id')
if user_id == None:
response = make_response(json.dumps(
'No user data in current session.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
client_log_table_insert(
get_max_devices_table_id_from_users_table_id(user_id),
user_id,
"WEB-TRIPS-LIST", "/".join([
request.args.get("firstday", ""),
request.args.get("lastday", "")]))
return common_trips_json(request, db, user_id)
@app.route('/trafficsense_data')
def download_dump():
user_id = session.get('rr_user_id')
if user_id == None:
response = make_response(json.dumps(
'No user data in current session.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
client_log_table_insert(
get_max_devices_table_id_from_users_table_id(user_id),
user_id, "WEB-DOWNLOAD-DATA", "")
return common_download_zip(user_id)
@app.route('/path_json')
def path_json():
user_id = session.get('rr_user_id')
if user_id == None:
response = make_response(json.dumps(
'No user data in current session.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
devices = db.metadata.tables["devices"]
response = make_response(
common_path(request, db, devices.c.user_id==user_id))
response.headers['Content-Type'] = 'application/json'
return response
# App starting point:
if __name__ == '__main__':
if app.debug:
app.run(host='0.0.0.0')
else:
app.run()
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Distributed measurement infrastructure to measure the runtime costs of tensor programs.
These functions are responsible for building the tvm module, uploading it to
remote devices, recording the running time costs, and checking the correctness of the output.
We separate the measurement into two steps: build and run.
A builder builds the executable binary files and a runner runs the binary files to
get the measurement results. The flow of data structures is
. `ProgramBuilder` `ProgramRunner`
`MeasureInput` -----------------> `BuildResult` ----------------> `MeasureResult`
We implement these in python to utilize python's multiprocessing and error handling.
"""
import os
import time
import shutil
import traceback
import tempfile
import multiprocessing
import tvm._ffi
from tvm.runtime import Object, module, ndarray
from tvm.driver import build_module
from tvm.ir import transform
from tvm.rpc.tracker import Tracker
from tvm.rpc.server import Server
from tvm.autotvm.measure.measure_methods import set_cuda_target_arch
from tvm.contrib import tar, ndk
from . import _ffi_api
from .loop_state import StateObject
from .utils import (
get_const_tuple,
NoDaemonPool,
call_func_with_timeout,
request_remote,
check_remote,
)
# The maximum length of error message
MAX_ERROR_MSG_LEN = 512
# We use fork and a global variable to copy arguments between processes.
# This can avoid expensive serialization of TVM IR when using multiprocessing.Pool
GLOBAL_BUILD_ARGUMENTS = None
GLOBAL_RUN_ARGUMENTS = None
@tvm._ffi.register_object("auto_scheduler.MeasureCallback")
class MeasureCallback(Object):
""" The base class of measurement callback functions. """
@tvm._ffi.register_object("auto_scheduler.MeasureInput")
class MeasureInput(Object):
"""Store the input of a measurement.
Parameters
----------
task : SearchTask
The SearchTask of this measurement.
state : Union[State, StateObject]
The State to be measured.
"""
def __init__(self, task, state):
state = state if isinstance(state, StateObject) else state.state_object
self.__init_handle_by_constructor__(_ffi_api.MeasureInput, task, state)
@tvm._ffi.register_object("auto_scheduler.BuildResult")
class BuildResult(Object):
"""Store the result of a build.
Parameters
----------
filename : Optional[str]
The filename of built binary file.
args : List[Tensor]
The arguments.
error_no : int
The error code.
error_msg : Optional[str]
The error message if there is any error.
time_cost : float
The time cost of build.
"""
def __init__(self, filename, args, error_no, error_msg, time_cost):
filename = filename if filename else ""
error_msg = error_msg if error_msg else ""
self.__init_handle_by_constructor__(
_ffi_api.BuildResult, filename, args, error_no, error_msg, time_cost
)
@tvm._ffi.register_object("auto_scheduler.MeasureResult")
class MeasureResult(Object):
"""Store the results of a measurement.
Parameters
----------
costs : List[float]
The time costs of execution.
error_no : int
The error code.
error_msg : Optional[str]
The error message if there is any error.
all_cost : float
The time cost of build and run.
timestamp : float
The time stamps of this measurement.
"""
def __init__(self, costs, error_no, error_msg, all_cost, timestamp):
error_msg = error_msg if error_msg else ""
self.__init_handle_by_constructor__(
_ffi_api.MeasureResult, costs, error_no, error_msg, all_cost, timestamp
)
@tvm._ffi.register_object("auto_scheduler.ProgramBuilder")
class ProgramBuilder(Object):
""" The base class of ProgramBuilders. """
def build(self, measure_inputs, verbose=1):
"""Build programs and return results.
Parameters
----------
measure_inputs : List[MeasureInput]
A List of MeasureInput.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during program building.
Returns
-------
res : List[BuildResult]
"""
return _ffi_api.ProgramBuilderBuild(self, measure_inputs, verbose)
@tvm._ffi.register_object("auto_scheduler.ProgramRunner")
class ProgramRunner(Object):
""" The base class of ProgramRunners. """
def run(self, measure_inputs, build_results, verbose=1):
"""Run measurement and return results.
Parameters
----------
measure_inputs : List[MeasureInput]
A List of MeasureInput.
build_results : List[BuildResult]
A List of BuildResult to be ran.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during program running.
Returns
-------
res : List[MeasureResult]
"""
return _ffi_api.ProgramRunnerRun(self, measure_inputs, build_results, verbose)
@tvm._ffi.register_object("auto_scheduler.LocalBuilder")
class LocalBuilder(ProgramBuilder):
"""LocalBuilder use local CPU cores to build programs in parallel.
Parameters
----------
timeout : int = 15
The timeout limit (in second) for each build thread.
This is used in a wrapper of the multiprocessing.Process.join().
n_parallel : int = multiprocessing.cpu_count()
Number of threads used to build in parallel.
build_func : str = 'default'
The name of registered build function.
"""
def __init__(self, timeout=15, n_parallel=multiprocessing.cpu_count(), build_func="default"):
self.__init_handle_by_constructor__(_ffi_api.LocalBuilder, timeout, n_parallel, build_func)
@tvm._ffi.register_object("auto_scheduler.LocalRunner")
class LocalRunner(ProgramRunner):
"""LocalRunner that uses local CPU/GPU to measures the time cost of programs.
Parameters
----------
timeout : int = 10
The timeout limit (in second) for each run.
This is used in a wrapper of the multiprocessing.Process.join().
number : int = 3
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int = 1
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms : int = 100
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval : float = 0.0
The cool down interval between two measurements.
enable_cpu_cache_flush: bool = False
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
"""
def __init__(
self,
timeout=10,
number=3,
repeat=1,
min_repeat_ms=100,
cooldown_interval=0.0,
enable_cpu_cache_flush=False,
):
self.__init_handle_by_constructor__(
_ffi_api.LocalRunner,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
)
@tvm._ffi.register_object("auto_scheduler.RPCRunner")
class RPCRunner(ProgramRunner):
"""RPCRunner that uses RPC call to measures the time cost of programs on remote devices.
Or sometime we may need to use RPC even in local running to insulate the thread environment.
(e.g. running CUDA programs)
Parameters
----------
key : str
The key of the device registered in the RPC tracker.
host : str
The host address of the RPC Tracker.
port : int
The port of RPC Tracker.
priority : int = 1
The priority of this run request, larger is more prior.
n_parallel : int = 1
The number of tasks run in parallel.
timeout : int = 10
The timeout limit (in second) for each run.
This is used in a wrapper of the multiprocessing.Process.join().
number : int = 3
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int = 1
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms : int = 100
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval : float = 0.0
The cool down interval between two measurements.
enable_cpu_cache_flush: bool = False
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
"""
def __init__(
self,
key,
host,
port,
priority=1,
n_parallel=1,
timeout=10,
number=3,
repeat=1,
min_repeat_ms=100,
cooldown_interval=0.0,
enable_cpu_cache_flush=False,
):
self.__init_handle_by_constructor__(
_ffi_api.RPCRunner,
key,
host,
port,
priority,
n_parallel,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
)
if check_remote(key, host, port, priority, timeout):
print("Get devices for measurement successfully!")
else:
raise RuntimeError(
"Cannot get remote devices from the tracker. "
"Please check the status of tracker by "
"'python -m tvm.exec.query_rpc_tracker --port [THE PORT YOU USE]' "
"and make sure you have free devices on the queue status."
)
class LocalRPCMeasureContext:
"""A context wrapper for running RPCRunner locally.
This will launch a local RPC Tracker and local RPC Server.
Parameters
----------
priority : int = 1
The priority of this run request, larger is more prior.
n_parallel : int = 1
The number of tasks run in parallel.
timeout : int = 10
The timeout limit (in second) for each run.
This is used in a wrapper of the multiprocessing.Process.join().
number : int = 3
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int = 1
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms : int = 0
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval : float = 0.0
The cool down interval between two measurements.
enable_cpu_cache_flush: bool = False
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
"""
def __init__(
self,
priority=1,
n_parallel=1,
timeout=10,
number=3,
repeat=1,
min_repeat_ms=0,
cooldown_interval=0.0,
enable_cpu_cache_flush=False,
):
ctx = tvm.context("cuda", 0)
if ctx.exist:
cuda_arch = "sm_" + "".join(ctx.compute_version.split("."))
set_cuda_target_arch(cuda_arch)
host = "0.0.0.0"
self.tracker = Tracker(host, port=9000, port_end=10000, silent=True)
device_key = "$local$device$%d" % self.tracker.port
self.server = Server(
host,
port=self.tracker.port,
port_end=10000,
key=device_key,
use_popen=True,
silent=True,
tracker_addr=(self.tracker.host, self.tracker.port),
)
self.runner = RPCRunner(
device_key,
host,
self.tracker.port,
priority,
n_parallel,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
)
# Wait for the processes to start
time.sleep(0.5)
def __del__(self):
# Close the tracker and server before exit
self.tracker.terminate()
self.server.terminate()
class MeasureErrorNo(object):
""" Error type for MeasureResult. """
NO_ERROR = 0 # No error
INSTANTIATION_ERROR = 1 # Errors happen when apply transform steps from init state
COMPILE_HOST = 2 # Errors happen when compiling code on host (e.g., tvm.build)
COMPILE_DEVICE = 3 # Errors happen when compiling code on device
# (e.g. OpenCL JIT on the device)
RUNTIME_DEVICE = 4 # Errors happen when run program on device
WRONG_ANSWER = 5 # Answer is wrong when compared to a reference output
BUILD_TIMEOUT = 6 # Timeout during compilation
RUN_TIMEOUT = 7 # Timeout during run
UNKNOWN_ERROR = 8 # Unknown error
def make_error_msg():
""" Get the error message from traceback. """
error_msg = str(traceback.format_exc())
if len(error_msg) > MAX_ERROR_MSG_LEN:
error_msg = (
error_msg[: MAX_ERROR_MSG_LEN // 2] + "\n...\n" + error_msg[-MAX_ERROR_MSG_LEN // 2 :]
)
return error_msg
def local_build_worker(index):
"""
Build function of LocalBuilder to be ran in the Builder thread pool.
Parameters
----------
index : int
The MeasureInput index to be processed by the current Builder thread.
Returns
-------
res : BuildResult
The build result of this Builder thread.
"""
global GLOBAL_BUILD_ARGUMENTS
# We use fork and a global variable to copy arguments between processes.
# This can avoid expensive serialization of TVM IR when using multiprocessing.Pool
if not GLOBAL_BUILD_ARGUMENTS:
raise ValueError("GLOBAL_BUILD_ARGUMENTS not found")
measure_inputs, build_func, timeout, verbose = GLOBAL_BUILD_ARGUMENTS
assert isinstance(build_func, str)
if build_func == "default":
build_func = tar.tar
elif build_func == "ndk":
build_func = ndk.create_shared
else:
raise ValueError("Invalid build_func" + build_func)
def timed_func():
tic = time.time()
inp = measure_inputs[index]
task = inp.task
error_no = MeasureErrorNo.NO_ERROR
error_msg = None
args = []
try:
sch, args = task.compute_dag.apply_steps_from_state(inp.state, layout_rewrite=True)
# pylint: disable=broad-except
except Exception:
error_no = MeasureErrorNo.INSTANTIATION_ERROR
error_msg = make_error_msg()
if error_no == 0:
dirname = tempfile.mkdtemp()
filename = os.path.join(dirname, "tmp_func." + build_func.output_format)
try:
# TODO(merrymercy): Port the unroll pass.
with transform.PassContext():
func = build_module.build(
sch, args, target=task.target, target_host=task.target_host
)
func.export_library(filename, build_func)
# pylint: disable=broad-except
except Exception:
error_no = MeasureErrorNo.COMPILE_HOST
error_msg = make_error_msg()
else:
filename = ""
if verbose >= 1:
if error_no == MeasureErrorNo.NO_ERROR:
print(".", end="")
else:
print(".E", end="") # Build error
return filename, args, error_no, error_msg, time.time() - tic
res = call_func_with_timeout(timeout, timed_func)
if isinstance(res, TimeoutError):
if verbose >= 1:
print(".T", end="") # Build timeout
res = None, [], MeasureErrorNo.BUILD_TIMEOUT, None, timeout
return res
@tvm._ffi.register_func("auto_scheduler.local_builder.build")
def local_builder_build(inputs, timeout, n_parallel, build_func="default", verbose=1):
"""
Build function of LocalBuilder to build the MeasureInputs to runnable modules.
Parameters
----------
inputs : List[MeasureInput]
The MeasureInputs to be built.
timeout : int
The timeout limit (in second) for each build thread.
This is used in a wrapper of the multiprocessing.Process.join().
n_parallel : int
Number of threads used to build in parallel.
build_func : str = 'default'
The name of build function to process the built module.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during program building.
Returns
-------
res : List[BuildResult]
The build results of these MeasureInputs.
"""
# We use fork and a global variable to copy arguments between processes.
# This can avoid expensive serialization of TVM IR when using multiprocessing.Pool
global GLOBAL_BUILD_ARGUMENTS
GLOBAL_BUILD_ARGUMENTS = (inputs, build_func, timeout, verbose)
pool = NoDaemonPool(n_parallel)
tuple_res = pool.map(local_build_worker, range(len(inputs)))
pool.terminate()
pool.join()
del pool
results = []
for res in tuple_res:
results.append(BuildResult(*res))
return results
@tvm._ffi.register_func("auto_scheduler.local_runner.run")
def local_run(
inputs,
build_results,
timeout=10,
number=3,
repeat=1,
min_repeat_ms=0,
cooldown_interval=0,
enable_cpu_cache_flush=False,
verbose=1,
):
"""
Run function of LocalRunner to test the performance of the input BuildResults.
Parameters
----------
inputs : List[MeasureInput]
The MeasureInputs to be measured.
build_results : List[BuildResult]
The BuildResults to be measured.
timeout : int = 10
The timeout limit (in second) for each run.
This is used in a wrapper of the multiprocessing.Process.join().
number : int = 3
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int = 1
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms : int = 0
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval : float = 0.0
The cool down interval between two measurements.
enable_cpu_cache_flush: bool = False
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during program measuring.
Returns
-------
res : List[MeasureResult]
The measure results of these MeasureInputs.
"""
max_float = 1e10 # We use 1e10 instead of sys.float_info.max for better readability in log
def timed_func(inp, build_res):
tic = time.time()
error_no = 0
error_msg = None
try:
func = module.load_module(build_res.filename)
ctx = ndarray.context(str(inp.task.target), 0)
# Limitation:
# We can not get PackFunction directly in the remote mode as it is wrapped
# under the std::function. We could lift the restriction later once we fold
# the PackedFunc as an object. Currently, we pass function name to work
# around it.
f_prepare = "cache_flush_cpu_non_first_arg" if enable_cpu_cache_flush else ""
time_f = func.time_evaluator(
func.entry_name,
ctx,
number=number,
repeat=repeat,
min_repeat_ms=min_repeat_ms,
f_preproc=f_prepare,
)
# pylint: disable=broad-except
except Exception:
costs = (max_float,)
error_no = MeasureErrorNo.COMPILE_DEVICE
error_msg = make_error_msg()
if error_no == 0:
try:
args = [
ndarray.empty(get_const_tuple(x.shape), x.dtype, ctx) for x in build_res.args
]
random_fill = tvm.get_global_func("tvm.contrib.random.random_fill", True)
assert random_fill, "Please make sure USE_RANDOM is ON in the config.cmake"
for arg in args:
random_fill(arg)
ctx.sync()
costs = time_f(*args).results
# pylint: disable=broad-except
except Exception:
costs = (max_float,)
error_no = MeasureErrorNo.RUNTIME_DEVICE
error_msg = make_error_msg()
shutil.rmtree(os.path.dirname(build_res.filename))
toc = time.time()
time.sleep(cooldown_interval)
if verbose >= 1:
if error_no == MeasureErrorNo.NO_ERROR:
print("*", end="")
else:
print("*E", end="") # Run error
return costs, error_no, error_msg, toc - tic + build_res.time_cost, toc
measure_results = []
assert len(inputs) == len(build_results), "Measure input size should be equal to build results"
for inp, build_res in zip(inputs, build_results):
if build_res.error_no != 0:
res = (
(max_float,),
build_res.error_no,
build_res.error_msg,
build_res.time_cost,
time.time(),
)
else:
res = call_func_with_timeout(timeout, timed_func, args=(inp, build_res))
if isinstance(res, TimeoutError):
if verbose >= 1:
print("*T", end="") # Run timeout
res = (
(max_float,),
MeasureErrorNo.RUN_TIMEOUT,
None,
build_res.time_cost + timeout,
time.time(),
)
measure_results.append(MeasureResult(*res))
if verbose >= 1:
print("")
return measure_results
def rpc_run_worker(index):
"""Function to be ran in the RPCRunner thread pool.
Parameters
----------
index : int
The MeasureInput and BuildResult index to be processed by the current Runner thread.
Returns
-------
res : MeasureResult
The measure result of this Runner thread.
"""
global GLOBAL_RUN_ARGUMENTS
(
inputs,
build_results,
key,
host,
port,
priority,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
verbose,
) = GLOBAL_RUN_ARGUMENTS
max_float = 1e10 # We use 1e10 instead of sys.float_info.max for better readability in log
inp = inputs[index]
build_res = build_results[index]
if build_res.error_no != MeasureErrorNo.NO_ERROR:
return (
(max_float,),
build_res.error_no,
build_res.error_msg,
build_res.time_cost,
time.time(),
)
def timed_func():
tic = time.time()
error_no = 0
error_msg = None
try:
# upload built module
remote = request_remote(key, host, port, priority, timeout)
remote.upload(build_res.filename)
func = remote.load_module(os.path.split(build_res.filename)[1])
ctx = remote.context(str(inp.task.target), 0)
# Limitation:
# We can not get PackFunction directly in the remote mode as it is wrapped
# under the std::function. We could lift the restriction later once we fold
# the PackedFunc as an object. Currently, we pass function name to work
# around it.
f_prepare = "cache_flush_cpu_non_first_arg" if enable_cpu_cache_flush else ""
time_f = func.time_evaluator(
func.entry_name,
ctx,
number=number,
repeat=repeat,
min_repeat_ms=min_repeat_ms,
f_preproc=f_prepare,
)
# pylint: disable=broad-except
except Exception:
costs = (max_float,)
error_no = MeasureErrorNo.COMPILE_DEVICE
error_msg = make_error_msg()
if error_no == 0:
try:
args = [
ndarray.empty(get_const_tuple(x.shape), x.dtype, ctx) for x in build_res.args
]
try:
random_fill = remote.get_function("tvm.contrib.random.random_fill")
except AttributeError:
raise AttributeError(
"Please make sure USE_RANDOM is ON in the config.cmake "
"on the remote devices"
)
for arg in args:
random_fill(arg)
ctx.sync()
costs = time_f(*args).results
# clean up remote files
remote.remove(build_res.filename)
remote.remove(os.path.splitext(build_res.filename)[0] + ".so")
remote.remove("")
# pylint: disable=broad-except
except Exception:
costs = (max_float,)
error_no = MeasureErrorNo.RUNTIME_DEVICE
error_msg = make_error_msg()
shutil.rmtree(os.path.dirname(build_res.filename))
toc = time.time()
time.sleep(cooldown_interval)
if verbose >= 1:
if error_no == MeasureErrorNo.NO_ERROR:
print("*", end="")
else:
print("*E", end="") # Run error
return costs, error_no, error_msg, toc - tic + build_res.time_cost, toc
res = call_func_with_timeout(timeout, timed_func)
if isinstance(res, TimeoutError):
if verbose >= 1:
print("*T", end="") # Run timeout
res = (
(max_float,),
MeasureErrorNo.RUN_TIMEOUT,
None,
build_res.time_cost + timeout,
time.time(),
)
return res
@tvm._ffi.register_func("auto_scheduler.rpc_runner.run")
def rpc_runner_run(
inputs,
build_results,
key,
host,
port,
priority=1,
n_parallel=1,
timeout=10,
number=3,
repeat=1,
min_repeat_ms=0,
cooldown_interval=0.0,
enable_cpu_cache_flush=False,
verbose=1,
):
"""Run function of RPCRunner to test the performance of the input BuildResults.
Parameters
----------
inputs : List[MeasureInput]
The MeasureInputs to be measured.
build_results : List[BuildResult]
The BuildResults to be measured.
key : str
The key of the device registered in the RPC tracker.
host : str
The host address of the RPC Tracker.
port : int
The port of RPC Tracker.
priority : int = 1
The priority of this run request, larger is more prior.
n_parallel : int = 1
The number of tasks run in parallel.
timeout : int = 10
The timeout limit (in second) for each run.
This is used in a wrapper of the multiprocessing.Process.join().
number : int = 3
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int = 1
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms : int = 0
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval : float = 0.0
The cool down interval between two measurements.
enable_cpu_cache_flush: bool = False
Whether to flush cache on CPU between repeated measurements.
Flushing cache can make the measured latency of one operator closer to
its actual latency during end-to-end inference.
To make this option effective, the argument `number` should also be set to 1.
This is only has effect on CPU task.
verbose: int = 1
Verbosity level. 0 for silent, 1 to output information during program measuring.
Returns
-------
res : List[MeasureResult]
The measure results of these MeasureInputs.
"""
global GLOBAL_RUN_ARGUMENTS
GLOBAL_RUN_ARGUMENTS = (
inputs,
build_results,
key,
host,
port,
priority,
timeout,
number,
repeat,
min_repeat_ms,
cooldown_interval,
enable_cpu_cache_flush,
verbose,
)
assert len(inputs) == len(build_results), "Measure input size should be equal to build results"
pool = NoDaemonPool(n_parallel)
tuple_res = pool.map(rpc_run_worker, range(len(build_results)))
pool.terminate()
pool.join()
del pool
results = []
for res in tuple_res:
results.append(MeasureResult(*res))
if verbose >= 1:
print("")
return results
|
|
# Copyright 2017 HOMEINFO - Digitale Informationssysteme GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Response types."""
from hashlib import sha256
from flask import Response as Response_
from mimeutil import mimetype as get_mimetype
from wsgilib.json import strip_json, escape_object, json_dumps
__all__ = [
'Response',
'PlainText',
'Error',
'OK',
'HTML',
'XML',
'JSON',
'Binary',
'InternalServerError']
class Response(Exception, Response_):
"""A raisable WSGI response."""
def __init__(self, msg=None, status=200, mimetype='text/plain',
charset='utf-8', encoding=True, headers=None):
"""Initializes Exception and Response superclasses."""
Exception.__init__(self, msg)
msg = msg or ''
if encoding:
msg = msg.encode(encoding=charset)
if charset is not None:
content_type = '{}; charset={}'.format(mimetype, charset)
Response_.__init__(
self, response=msg, status=status, headers=headers,
content_type=content_type)
else:
Response_.__init__(
self, response=msg, status=status, headers=headers,
mimetype=mimetype)
class PlainText(Response):
"""Returns a successful plain text response."""
def __init__(self, msg=None, status=200, charset='utf-8'):
"""Returns a plain text success response."""
super().__init__(
msg=msg, status=status, mimetype='text/plain',
charset=charset, encoding=True)
class Error(PlainText):
"""An WSGI error message."""
def __init__(self, msg=None, status=400, charset='utf-8'):
"""Returns a plain text error response."""
if 400 <= status < 600:
super().__init__(msg, status=status, charset=charset)
else:
raise ValueError('Not an error status: {}'.format(status))
class OK(PlainText):
"""Returns a successful plain text response."""
def __init__(self, msg=None, status=200, charset='utf-8'):
"""Returns a plain text success response."""
if 200 <= status < 300:
super().__init__(msg=msg, status=status, charset=charset)
else:
raise ValueError('Not a success status: {}'.format(status))
class HTML(Response):
"""Returns a successful plain text response."""
def __init__(self, msg=None, status=200, charset='utf-8'):
"""Returns a plain text success response."""
super().__init__(
msg=msg, status=status, mimetype='text/html',
charset=charset, encoding=True)
class XML(Response):
"""An XML response."""
def __init__(self, dom, status=200, charset='utf-8'):
"""Sets the dom and inherited responde attributes."""
super().__init__(
msg=dom.toxml(encoding=charset), status=status,
mimetype='application/xml', charset=charset, encoding=None)
class JSON(Response):
"""A JSON response."""
def __init__(self, dictionary, strip=False, status=200, indent=None):
"""Initializes raiseable WSGI response with
the given dictionary d as JSON response.
"""
if strip:
dictionary = strip_json(dictionary)
super().__init__(
msg=json_dumps(escape_object(dictionary), indent=indent),
status=status, mimetype='application/json', encoding=True)
class Binary(Response):
"""A binary reply."""
def __init__(self, data, status=200, etag=False, filename=None):
"""Initializes raiseable WSGI response
with binary data and an optional etag.
"""
super().__init__(
msg=data, status=status, mimetype=get_mimetype(data),
charset=None, encoding=False)
self._filename = None
self.etag = etag
self.filename = filename
@property
def response_checksum(self):
"""Returns the SHA-256 checksum of the response."""
if self.response:
sha256sum = sha256()
for response in self.response:
sha256sum.update(response)
return sha256sum.hexdigest()
raise ValueError('No response available to hash.')
@property
def etag(self):
"""Returns the e-tag."""
return self.headers.get('ETag')
@etag.setter
def etag(self, etag):
"""Sets the e-tag.
If etag is None, the etag will default
to the content's SHA-256 checksum.
"""
if etag is None:
self.headers['ETag'] = self.response_checksum
elif not etag:
self.headers.pop('ETag', None)
else:
self.headers['ETag'] = etag
@property
def filename(self):
"""Yields all file names."""
try:
content_disposition = self.headers['Content-Disposition']
except KeyError:
return None
_, filename = content_disposition.split('; ')
_, filename, _ = filename.split('"')
return filename
@filename.setter
def filename(self, filename):
"""Sets the file name.
Setting the file name to None will also remove
any Content-Disposition header field.
"""
if filename is None:
self.headers.pop('Content-Disposition', None)
else:
content_disposition = 'attachment; filename="{}"'.format(filename)
self.headers['Content-Disposition'] = content_disposition
class InternalServerError(Response):
"""A code-500 WSGI response."""
def __init__(self, msg='Internal Server Error.', charset='utf-8'):
"""Indicates an internal server error."""
super().__init__(msg=msg, status=500, charset=charset)
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import time
from django.core.urlresolvers import reverse
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext as _
from beeswax import hive_site
from beeswax.conf import HIVE_SERVER_HOST, HIVE_SERVER_PORT, BROWSE_PARTITIONED_TABLE_LIMIT
from beeswax.design import hql_query
from beeswax.hive_site import hiveserver2_use_ssl
from beeswax.models import QueryHistory, QUERY_TYPES
from filebrowser.views import location_to_url
from desktop.lib.django_util import format_preserving_redirect
from desktop.lib.i18n import smart_str
LOG = logging.getLogger(__name__)
DBMS_CACHE = {}
DBMS_CACHE_LOCK = threading.Lock()
def get(user, query_server=None):
global DBMS_CACHE
global DBMS_CACHE_LOCK
# Avoid circular dependency
from beeswax.server.hive_server2_lib import HiveServerClientCompatible, HiveServerClient
if query_server is None:
query_server = get_query_server_config()
DBMS_CACHE_LOCK.acquire()
try:
DBMS_CACHE.setdefault(user.username, {})
if query_server['server_name'] not in DBMS_CACHE[user.username]:
DBMS_CACHE[user.username][query_server['server_name']] = HiveServer2Dbms(HiveServerClientCompatible(HiveServerClient(query_server, user)), QueryHistory.SERVER_TYPE[1][0])
return DBMS_CACHE[user.username][query_server['server_name']]
finally:
DBMS_CACHE_LOCK.release()
def get_query_server_config(name='beeswax', server=None):
if name == 'impala':
from impala.conf import SERVER_HOST as IMPALA_SERVER_HOST, SERVER_PORT as IMPALA_SERVER_PORT, \
IMPALA_PRINCIPAL, IMPERSONATION_ENABLED, QUERYCACHE_ROWS, QUERY_TIMEOUT_S
query_server = {
'server_name': 'impala',
'server_host': IMPALA_SERVER_HOST.get(),
'server_port': IMPALA_SERVER_PORT.get(),
'principal': IMPALA_PRINCIPAL.get(),
'impersonation_enabled': IMPERSONATION_ENABLED.get(),
'querycache_rows': QUERYCACHE_ROWS.get(),
'QUERY_TIMEOUT_S': QUERY_TIMEOUT_S.get(),
}
else:
kerberos_principal = hive_site.get_hiveserver2_kerberos_principal(HIVE_SERVER_HOST.get())
query_server = {
'server_name': 'beeswax', # Aka HiveServer2 now
'server_host': HIVE_SERVER_HOST.get(),
'server_port': HIVE_SERVER_PORT.get(),
'principal': kerberos_principal,
'http_url': '%(protocol)s://%(host)s:%(port)s/%(end_point)s' % {
'protocol': 'https' if hiveserver2_use_ssl() else 'http',
'host': HIVE_SERVER_HOST.get(),
'port': hive_site.hiveserver2_thrift_http_port(),
'end_point': hive_site.hiveserver2_thrift_http_path()
},
'transport_mode': 'http' if hive_site.hiveserver2_transport_mode() == 'HTTP' else 'socket',
}
LOG.debug("Query Server: %s" % query_server)
return query_server
class QueryServerException(Exception):
# Ideally the query handle will be stored here too.
def __init__(self, e, message=''):
super(QueryServerException, self).__init__(e)
self.message = message
class NoSuchObjectException: pass
class HiveServer2Dbms(object):
def __init__(self, client, server_type):
self.client = client
self.server_type = server_type
self.server_name = self.client.query_server['server_name']
def get_table(self, database, table_name):
return self.client.get_table(database, table_name)
def get_tables(self, database='default', table_names='*'):
hql = "SHOW TABLES IN %s '%s'" % (database, table_names) # self.client.get_tables(database, table_names) is too slow
query = hql_query(hql)
handle = self.execute_and_wait(query, timeout_sec=15.0)
if handle:
result = self.fetch(handle, rows=5000)
self.close(handle)
return [name for table in result.rows() for name in table]
else:
return []
def get_databases(self):
return self.client.get_databases()
def execute_query(self, query, design):
return self.execute_and_watch(query, design=design)
def select_star_from(self, database, table):
hql = "SELECT * FROM `%s`.`%s` %s" % (database, table.name, self._get_browse_limit_clause(table))
return self.execute_statement(hql)
def execute_statement(self, hql):
if self.server_name == 'impala':
query = hql_query(hql, QUERY_TYPES[1])
else:
query = hql_query(hql, QUERY_TYPES[0])
return self.execute_and_watch(query)
def fetch(self, query_handle, start_over=False, rows=None):
no_start_over_support = [config_variable for config_variable in self.get_default_configuration(False)
if config_variable.key == 'support_start_over'
and config_variable.value == 'false']
if no_start_over_support:
start_over = False
return self.client.fetch(query_handle, start_over, rows)
def close_operation(self, query_handle):
return self.client.close_operation(query_handle)
def open_session(self, user):
return self.client.open_session(user)
def close_session(self, session):
return self.client.close_session(session)
def cancel_operation(self, query_handle):
resp = self.client.cancel_operation(query_handle)
if self.client.query_server['server_name'] == 'impala':
resp = self.client.close_operation(query_handle)
return resp
def get_sample(self, database, table):
"""No samples if it's a view (HUE-526)"""
if not table.is_view:
limit = min(100, BROWSE_PARTITIONED_TABLE_LIMIT.get())
partition_query = ""
if table.partition_keys:
partitions = self.get_partitions(database, table, 1)
partition_query = 'WHERE ' + ' AND '.join(["%s='%s'" % (table.partition_keys[idx].name, key) for idx, key in enumerate(partitions[0].values)])
hql = "SELECT * FROM `%s`.`%s` %s LIMIT %s" % (database, table.name, partition_query, limit)
query = hql_query(hql)
handle = self.execute_and_wait(query, timeout_sec=5.0)
if handle:
result = self.fetch(handle, rows=100)
self.close(handle)
return result
def analyze_table(self, database, table):
if self.server_name == 'impala':
hql = 'COMPUTE STATS `%(database)s`.`%(table)s`' % {'database': database, 'table': table}
else:
hql = 'ANALYZE TABLE `%(database)s`.`%(table)s` COMPUTE STATISTICS' % {'database': database, 'table': table}
return self.execute_statement(hql)
def analyze_table_columns(self, database, table):
if self.server_name == 'impala':
hql = 'COMPUTE STATS `%(database)s`.`%(table)s`' % {'database': database, 'table': table}
else:
hql = 'ANALYZE TABLE `%(database)s`.`%(table)s` COMPUTE STATISTICS FOR COLUMNS' % {'database': database, 'table': table}
return self.execute_statement(hql)
def get_table_stats(self, database, table):
stats = []
if self.server_name == 'impala':
hql = 'SHOW TABLE STATS `%(database)s`.`%(table)s`' % {'database': database, 'table': table}
query = hql_query(hql)
handle = self.execute_and_wait(query, timeout_sec=5.0)
if handle:
result = self.fetch(handle, rows=100)
self.close(handle)
stats = list(result.rows())
else:
table = self.get_table(database, table)
stats = table.stats
return stats
def get_table_columns_stats(self, database, table, column):
if self.server_name == 'impala':
hql = 'SHOW COLUMN STATS `%(database)s`.`%(table)s`' % {'database': database, 'table': table}
else:
hql = 'DESCRIBE FORMATTED `%(database)s`.`%(table)s` %(column)s' % {'database': database, 'table': table, 'column': column}
query = hql_query(hql)
handle = self.execute_and_wait(query, timeout_sec=5.0)
if handle:
result = self.fetch(handle, rows=100)
self.close(handle)
data = list(result.rows())
if self.server_name == 'impala':
data = [col for col in data if col[0] == column][0]
return [
{'col_name': data[0]},
{'data_type': data[1]},
{'distinct_count': data[2]},
{'num_nulls': data[3]},
{'max_col_len': data[4]},
{'avg_col_len': data[5]},
]
else:
return [
{'col_name': data[2][0]},
{'data_type': data[2][1]},
{'min': data[2][2]},
{'max': data[2][3]},
{'num_nulls': data[2][4]},
{'distinct_count': data[2][5]},
{'avg_col_len': data[2][6]},
{'max_col_len': data[2][7]},
{'num_trues': data[2][8]},
{'num_falses': data[2][9]}
]
else:
return []
def get_top_terms(self, database, table, column, limit=30, prefix=None):
limit = min(limit, 100)
prefix_match = ''
if prefix:
prefix_match = "WHERE CAST(%(column)s AS STRING) LIKE '%(prefix)s%%'" % {'column': column, 'prefix': prefix}
hql = 'SELECT %(column)s, COUNT(*) AS ct FROM `%(database)s`.`%(table)s` %(prefix_match)s GROUP BY %(column)s ORDER BY ct DESC LIMIT %(limit)s' % {
'database': database, 'table': table, 'column': column, 'prefix_match': prefix_match, 'limit': limit,
}
query = hql_query(hql)
handle = self.execute_and_wait(query, timeout_sec=60.0) # Hive is very slow
if handle:
result = self.fetch(handle, rows=limit)
self.close(handle)
return list(result.rows())
else:
return []
def drop_table(self, database, table):
if table.is_view:
hql = "DROP VIEW `%s`.`%s`" % (database, table.name,)
else:
hql = "DROP TABLE `%s`.`%s`" % (database, table.name,)
return self.execute_statement(hql)
def load_data(self, database, table, form, design):
hql = "LOAD DATA INPATH"
hql += " '%s'" % form.cleaned_data['path']
if form.cleaned_data['overwrite']:
hql += " OVERWRITE"
hql += " INTO TABLE "
hql += "`%s`.`%s`" % (database, table.name,)
if form.partition_columns:
hql += " PARTITION ("
vals = []
for key, column_name in form.partition_columns.iteritems():
vals.append("%s='%s'" % (column_name, form.cleaned_data[key]))
hql += ", ".join(vals)
hql += ")"
query = hql_query(hql, database)
design.data = query.dumps()
design.save()
return self.execute_query(query, design)
def drop_tables(self, database, tables, design):
hql = []
for table in tables:
if table.is_view:
hql.append("DROP VIEW `%s`.`%s`" % (database, table.name,))
else:
hql.append("DROP TABLE `%s`.`%s`" % (database, table.name,))
query = hql_query(';'.join(hql), database)
design.data = query.dumps()
design.save()
return self.execute_query(query, design)
def invalidate_tables(self, database, tables):
handle = None
for table in tables:
try:
hql = "INVALIDATE METADATA `%s`.`%s`" % (database, table,)
query = hql_query(hql, database, query_type=QUERY_TYPES[1])
handle = self.execute_and_wait(query, timeout_sec=10.0)
except Exception, e:
LOG.warn('Refresh tables cache out of sync: %s' % smart_str(e))
finally:
if handle:
self.close(handle)
def drop_database(self, database):
return self.execute_statement("DROP DATABASE `%s`" % database)
def drop_databases(self, databases, design):
hql = []
for database in databases:
hql.append("DROP DATABASE `%s`" % database)
query = hql_query(';'.join(hql), database)
design.data = query.dumps()
design.save()
return self.execute_query(query, design)
def _get_and_validate_select_query(self, design, query_history):
query = design.get_query_statement(query_history.statement_number)
if not query.strip().lower().startswith('select'):
raise Exception(_('Only SELECT statements can be saved. Provided query: %(query)s') % {'query': query})
return query
def insert_query_into_directory(self, query_history, target_dir):
design = query_history.design.get_design()
database = design.query['database']
self.use(database)
query = self._get_and_validate_select_query(design, query_history)
hql = "INSERT OVERWRITE DIRECTORY '%s' %s" % (target_dir, query)
return self.execute_statement(hql)
def create_table_as_a_select(self, request, query_history, target_database, target_table, result_meta):
design = query_history.design.get_design()
database = design.query['database']
# Case 1: Hive Server 2 backend or results straight from an existing table
if result_meta.in_tablename:
self.use(database)
query = self._get_and_validate_select_query(design, query_history)
hql = 'CREATE TABLE %s.%s AS %s' % (target_database, target_table, query)
query_history = self.execute_statement(hql)
else:
# Case 2: The results are in some temporary location
# Beeswax backward compatibility and optimization
# 1. Create table
cols = ''
schema = result_meta.schema
for i, field in enumerate(schema.fieldSchemas):
if i != 0:
cols += ',\n'
cols += '`%s` %s' % (field.name, field.type)
# The representation of the delimiter is messy.
# It came from Java as a string, which might has been converted from an integer.
# So it could be "1" (^A), or "10" (\n), or "," (a comma literally).
delim = result_meta.delim
if not delim.isdigit():
delim = str(ord(delim))
hql = '''
CREATE TABLE `%s` (
%s
)
ROW FORMAT DELIMITED
FIELDS TERMINATED BY '\%s'
STORED AS TextFile
''' % (target_table, cols, delim.zfill(3))
query = hql_query(hql)
self.execute_and_wait(query)
try:
# 2. Move the results into the table's storage
table_obj = self.get_table('default', target_table)
table_loc = request.fs.urlsplit(table_obj.path_location)[2]
result_dir = request.fs.urlsplit(result_meta.table_dir)[2]
request.fs.rename_star(result_dir, table_loc)
LOG.debug("Moved results from %s to %s" % (result_meta.table_dir, table_loc))
request.info(request, _('Saved query results as new table %(table)s.') % {'table': target_table})
query_history.save_state(QueryHistory.STATE.expired)
except Exception, ex:
query = hql_query('DROP TABLE `%s`' % target_table)
try:
self.execute_and_wait(query)
except Exception, double_trouble:
LOG.exception('Failed to drop table "%s" as well: %s' % (target_table, double_trouble))
raise ex
url = format_preserving_redirect(request, reverse('metastore:index'))
return query_history
def use(self, database):
query = hql_query('USE %s' % database)
return self.client.use(query)
def get_log(self, query_handle, start_over=True):
return self.client.get_log(query_handle, start_over)
def get_state(self, handle):
return self.client.get_state(handle)
def get_operation_status(self, handle):
return self.client.get_operation_status(handle)
def execute_and_wait(self, query, timeout_sec=30.0, sleep_interval=0.5):
"""
Run query and check status until it finishes or timeouts.
Check status until it finishes or timeouts.
"""
handle = self.client.query(query)
curr = time.time()
end = curr + timeout_sec
while curr <= end:
state = self.client.get_state(handle)
if state not in (QueryHistory.STATE.running, QueryHistory.STATE.submitted):
return handle
time.sleep(sleep_interval)
curr = time.time()
try:
self.cancel_operation(handle)
except:
self.close_operation(handle)
return None
def execute_next_statement(self, query_history, hql_query):
if query_history.is_success() or query_history.is_expired():
# We need to go to the next statement only if the previous one passed
query_history.statement_number += 1
else:
# We need to update the query in case it was fixed
query_history.refresh_design(hql_query)
query_history.last_state = QueryHistory.STATE.submitted.index
query_history.save()
query = query_history.design.get_design()
return self.execute_and_watch(query, query_history=query_history)
def execute_and_watch(self, query, design=None, query_history=None):
"""
Run query and return a QueryHistory object in order to see its progress on a Web page.
"""
hql_query = query.hql_query
if query_history is None:
query_history = QueryHistory.build(
owner=self.client.user,
query=hql_query,
server_host='%(server_host)s' % self.client.query_server,
server_port='%(server_port)d' % self.client.query_server,
server_name='%(server_name)s' % self.client.query_server,
server_type=self.server_type,
last_state=QueryHistory.STATE.submitted.index,
design=design,
notify=query.query.get('email_notify', False),
query_type=query.query['type'],
statement_number=0
)
query_history.save()
LOG.debug("Made new QueryHistory id %s user %s query: %s..." % (query_history.id, self.client.user, query_history.query[:25]))
try:
handle = self.client.query(query, query_history.statement_number)
if not handle.is_valid():
msg = _("Server returning invalid handle for query id %(id)d [%(query)s]...") % {'id': query_history.id, 'query': query[:40]}
raise QueryServerException(msg)
except QueryServerException, ex:
LOG.exception(ex)
# Kind of expected (hql compile/syntax error, etc.)
if hasattr(ex, 'handle') and ex.handle:
query_history.server_id, query_history.server_guid = ex.handle.id, ex.handle.id
query_history.log_context = ex.handle.log_context
query_history.save_state(QueryHistory.STATE.failed)
raise ex
# All good
query_history.server_id, query_history.server_guid = handle.get()
query_history.operation_type = handle.operation_type
query_history.has_results = handle.has_result_set
query_history.modified_row_count = handle.modified_row_count
query_history.log_context = handle.log_context
query_history.query_type = query.query['type']
query_history.set_to_running()
query_history.save()
LOG.debug("Updated QueryHistory id %s user %s statement_number: %s" % (query_history.id, self.client.user, query_history.statement_number))
return query_history
def get_results_metadata(self, handle):
return self.client.get_results_metadata(handle)
def close(self, handle):
return self.client.close(handle)
def get_partitions(self, db_name, table, max_parts=None):
if max_parts is None or max_parts > BROWSE_PARTITIONED_TABLE_LIMIT.get():
max_parts = BROWSE_PARTITIONED_TABLE_LIMIT.get()
# DB name not supported in SHOW PARTITIONS
self.use(db_name)
return self.client.get_partitions(db_name, table.name, max_parts)
def get_partition(self, db_name, table_name, partition_id):
table = self.get_table(db_name, table_name)
partitions = self.get_partitions(db_name, table, max_parts=None)
partition_query = ""
for idx, key in enumerate(partitions[partition_id].values):
partition_query += (idx > 0 and " AND " or "") + table.partition_keys[idx].name + "='%s'" % key
hql = "SELECT * FROM `%s`.`%s` WHERE %s" % (db_name, table_name, partition_query)
return self.execute_statement(hql)
def explain(self, query):
return self.client.explain(query)
def getStatus(self):
return self.client.getStatus()
def get_default_configuration(self, include_hadoop):
return self.client.get_default_configuration(include_hadoop)
def _get_browse_limit_clause(self, table):
"""Get the limit clause when browsing a partitioned table"""
if table.partition_keys:
limit = BROWSE_PARTITIONED_TABLE_LIMIT.get()
if limit > 0:
return "LIMIT %d" % (limit,)
return ""
class Table:
"""
Represents the metadata of a Hive Table.
"""
@property
def hdfs_link(self):
return location_to_url(self.path_location)
class DataTable:
"""
Represents the data of a Hive Table.
If the dataset has more rows, a new fetch should be done in order to return a new data table with the next rows.
"""
pass
# TODO decorator?
def expand_exception(exc, db, handle=None):
try:
if handle is not None:
log = db.get_log(handle)
elif hasattr(exc, 'get_rpc_handle') or hasattr(exc, 'log_context'):
log = db.get_log(exc)
else:
log = ''
except Exception, e:
# Always show something, even if server has died on the job.
log = _("Could not retrieve logs: %s." % e)
if not exc.args or not exc.args[0]:
error_message = _("Unknown exception.")
else:
error_message = force_unicode(exc.args[0], strings_only=True, errors='replace')
return error_message, log
|
|
import json
from django.core import exceptions, serializers
from django.forms import Form
from . import PostgreSQLTestCase
from .models import HStoreModel
try:
from django.contrib.postgres import forms
from django.contrib.postgres.fields import HStoreField
from django.contrib.postgres.validators import KeysValidator
except ImportError:
pass
class SimpleTests(PostgreSQLTestCase):
apps = ['django.contrib.postgres']
def test_save_load_success(self):
value = {'a': 'b'}
instance = HStoreModel(field=value)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, value)
def test_null(self):
instance = HStoreModel(field=None)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, None)
def test_value_null(self):
value = {'a': None}
instance = HStoreModel(field=value)
instance.save()
reloaded = HStoreModel.objects.get()
self.assertEqual(reloaded.field, value)
class TestQuerying(PostgreSQLTestCase):
def setUp(self):
self.objs = [
HStoreModel.objects.create(field={'a': 'b'}),
HStoreModel.objects.create(field={'a': 'b', 'c': 'd'}),
HStoreModel.objects.create(field={'c': 'd'}),
HStoreModel.objects.create(field={}),
HStoreModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__exact={'a': 'b'}),
self.objs[:1]
)
def test_contained_by(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__contained_by={'a': 'b', 'c': 'd'}),
self.objs[:4]
)
def test_contains(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__contains={'a': 'b'}),
self.objs[:2]
)
def test_in_generator(self):
def search():
yield {'a': 'b'}
self.assertSequenceEqual(
HStoreModel.objects.filter(field__in=search()),
self.objs[:1]
)
def test_has_key(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_key='c'),
self.objs[1:3]
)
def test_has_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_keys=['a', 'c']),
self.objs[1:2]
)
def test_has_any_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__has_any_keys=['a', 'c']),
self.objs[:3]
)
def test_key_transform(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a='b'),
self.objs[:2]
)
def test_keys(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__keys=['a']),
self.objs[:1]
)
def test_values(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__values=['b']),
self.objs[:1]
)
def test_field_chaining(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a__contains='b'),
self.objs[:2]
)
def test_keys_contains(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__keys__contains=['a']),
self.objs[:2]
)
def test_values_overlap(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(field__values__overlap=['b', 'd']),
self.objs[:3]
)
def test_key_isnull(self):
obj = HStoreModel.objects.create(field={'a': None})
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a__isnull=True),
self.objs[2:5] + [obj]
)
self.assertSequenceEqual(
HStoreModel.objects.filter(field__a__isnull=False),
self.objs[:2]
)
def test_usage_in_subquery(self):
self.assertSequenceEqual(
HStoreModel.objects.filter(id__in=HStoreModel.objects.filter(field__a='b')),
self.objs[:2]
)
class TestSerialization(PostgreSQLTestCase):
test_data = '[{"fields": {"field": "{\\"a\\": \\"b\\"}"}, "model": "postgres_tests.hstoremodel", "pk": null}]'
def test_dumping(self):
instance = HStoreModel(field={'a': 'b'})
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, {'a': 'b'})
class TestValidation(PostgreSQLTestCase):
def test_not_a_string(self):
field = HStoreField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean({'a': 1}, None)
self.assertEqual(cm.exception.code, 'not_a_string')
self.assertEqual(cm.exception.message % cm.exception.params, 'The value of "a" is not a string.')
class TestFormField(PostgreSQLTestCase):
def test_valid(self):
field = forms.HStoreField()
value = field.clean('{"a": "b"}')
self.assertEqual(value, {'a': 'b'})
def test_invalid_json(self):
field = forms.HStoreField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('{"a": "b"')
self.assertEqual(cm.exception.messages[0], 'Could not load JSON data.')
self.assertEqual(cm.exception.code, 'invalid_json')
def test_not_string_values(self):
field = forms.HStoreField()
value = field.clean('{"a": 1}')
self.assertEqual(value, {'a': '1'})
def test_empty(self):
field = forms.HStoreField(required=False)
value = field.clean('')
self.assertEqual(value, {})
def test_model_field_formfield(self):
model_field = HStoreField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, forms.HStoreField)
def test_field_has_changed(self):
class HStoreFormTest(Form):
f1 = forms.HStoreField()
form_w_hstore = HStoreFormTest()
self.assertFalse(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 1}'})
self.assertTrue(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 1}'}, initial={'f1': '{"a": 1}'})
self.assertFalse(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 2}'}, initial={'f1': '{"a": 1}'})
self.assertTrue(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 1}'}, initial={'f1': {"a": 1}})
self.assertFalse(form_w_hstore.has_changed())
form_w_hstore = HStoreFormTest({'f1': '{"a": 2}'}, initial={'f1': {"a": 1}})
self.assertTrue(form_w_hstore.has_changed())
class TestValidator(PostgreSQLTestCase):
def test_simple_valid(self):
validator = KeysValidator(keys=['a', 'b'])
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
def test_missing_keys(self):
validator = KeysValidator(keys=['a', 'b'])
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some keys were missing: b')
self.assertEqual(cm.exception.code, 'missing_keys')
def test_strict_valid(self):
validator = KeysValidator(keys=['a', 'b'], strict=True)
validator({'a': 'foo', 'b': 'bar'})
def test_extra_keys(self):
validator = KeysValidator(keys=['a', 'b'], strict=True)
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some unknown keys were provided: c')
self.assertEqual(cm.exception.code, 'extra_keys')
def test_custom_messages(self):
messages = {
'missing_keys': 'Foobar',
}
validator = KeysValidator(keys=['a', 'b'], strict=True, messages=messages)
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Foobar')
self.assertEqual(cm.exception.code, 'missing_keys')
with self.assertRaises(exceptions.ValidationError) as cm:
validator({'a': 'foo', 'b': 'bar', 'c': 'baz'})
self.assertEqual(cm.exception.messages[0], 'Some unknown keys were provided: c')
self.assertEqual(cm.exception.code, 'extra_keys')
def test_deconstruct(self):
messages = {
'missing_keys': 'Foobar',
}
validator = KeysValidator(keys=['a', 'b'], strict=True, messages=messages)
path, args, kwargs = validator.deconstruct()
self.assertEqual(path, 'django.contrib.postgres.validators.KeysValidator')
self.assertEqual(args, ())
self.assertEqual(kwargs, {'keys': ['a', 'b'], 'strict': True, 'messages': messages})
|
|
from canvasapi.canvas_object import CanvasObject
from canvasapi.exceptions import RequiredFieldMissing
from canvasapi.paginated_list import PaginatedList
from canvasapi.util import combine_kwargs, obj_or_id
class Module(CanvasObject):
def __str__(self):
return "{} ({})".format(self.name, self.id)
def edit(self, **kwargs):
"""
Update this module.
:calls: `PUT /api/v1/courses/:course_id/modules/:id \
<https://canvas.instructure.com/doc/api/modules.html#method.context_modules_api.update>`_
:rtype: :class:`canvasapi.module.Module`
"""
response = self._requester.request(
"PUT",
"courses/{}/modules/{}".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
module_json = response.json()
module_json.update({"course_id": self.course_id})
return Module(self._requester, module_json)
def delete(self):
"""
Delete this module.
:calls: `DELETE /api/v1/courses/:course_id/modules/:id \
<https://canvas.instructure.com/doc/api/modules.html#method.context_modules_api.destroy>`_
:rtype: :class:`canvasapi.module.Module`
"""
response = self._requester.request(
"DELETE", "courses/{}/modules/{}".format(self.course_id, self.id)
)
module_json = response.json()
module_json.update({"course_id": self.course_id})
return Module(self._requester, module_json)
def relock(self):
"""
Reset module progressions to their default locked state and recalculates
them based on the current requirements.
Adding progression requirements to an active course will not lock students
out of modules they have already unlocked unless this action is called.
:calls: `PUT /api/v1/courses/:course_id/modules/:id/relock \
<https://canvas.instructure.com/doc/api/modules.html#method.context_modules_api.relock>`_
:rtype: :class:`canvasapi.module.Module`
"""
response = self._requester.request(
"PUT", "courses/{}/modules/{}/relock".format(self.course_id, self.id)
)
module_json = response.json()
module_json.update({"course_id": self.course_id})
return Module(self._requester, module_json)
def get_module_items(self, **kwargs):
"""
List all of the items in this module.
:calls: `GET /api/v1/courses/:course_id/modules/:module_id/items \
<https://canvas.instructure.com/doc/api/modules.html#method.context_module_items_api.index>`_
:rtype: :class:`canvasapi.paginated_list.PaginatedList` of
:class:`canvasapi.module.ModuleItem`
"""
return PaginatedList(
ModuleItem,
self._requester,
"GET",
"courses/{}/modules/{}/items".format(self.course_id, self.id),
{"course_id": self.course_id},
_kwargs=combine_kwargs(**kwargs),
)
def get_module_item(self, module_item, **kwargs):
"""
Retrieve a module item by ID.
:calls: `GET /api/v1/courses/:course_id/modules/:module_id/items/:id \
<https://canvas.instructure.com/doc/api/modules.html#method.context_module_items_api.show>`_
:param module_item: The object or ID of the module item.
:type module_item: :class:`canvasapi.module.ModuleItem` or dict
:rtype: :class:`canvasapi.module.ModuleItem`
"""
module_item_id = obj_or_id(module_item, "module_item", (ModuleItem,))
response = self._requester.request(
"GET",
"courses/{}/modules/{}/items/{}".format(
self.course_id, self.id, module_item_id
),
_kwargs=combine_kwargs(**kwargs),
)
module_item_json = response.json()
module_item_json.update({"course_id": self.course_id})
return ModuleItem(self._requester, module_item_json)
def create_module_item(self, module_item, **kwargs):
"""
Create a module item.
:calls: `POST /api/v1/courses/:course_id/modules/:module_id/items \
<https://canvas.instructure.com/doc/api/modules.html#method.context_module_items_api.create>`_
:param module_item: The attributes to create the module item with.
:type module_item: dict
:returns: The created module item.
:rtype: :class:`canvasapi.module.ModuleItem`
"""
unrequired_types = ["ExternalUrl", "Page", "SubHeader"]
if isinstance(module_item, dict) and "type" in module_item:
# content_id is not required for unrequired_types
if module_item["type"] in unrequired_types or "content_id" in module_item:
kwargs["module_item"] = module_item
else:
raise RequiredFieldMissing(
"Dictionary with key 'content_id' is required."
)
else:
raise RequiredFieldMissing("Dictionary with key 'type' is required.")
response = self._requester.request(
"POST",
"courses/{}/modules/{}/items".format(self.course_id, self.id),
_kwargs=combine_kwargs(**kwargs),
)
module_item_json = response.json()
module_item_json.update({"course_id": self.course_id})
return ModuleItem(self._requester, module_item_json)
class ModuleItem(CanvasObject):
def __str__(self):
return "{} ({})".format(self.title, self.id)
def edit(self, **kwargs):
"""
Update this module item.
:calls: `PUT /api/v1/courses/:course_id/modules/:module_id/items/:id \
<https://canvas.instructure.com/doc/api/modules.html#method.context_module_items_api.update>`_
:returns: The updated module item.
:rtype: :class:`canvasapi.module.ModuleItem`
"""
response = self._requester.request(
"PUT",
"courses/{}/modules/{}/items/{}".format(
self.course_id, self.module_id, self.id
),
_kwargs=combine_kwargs(**kwargs),
)
module_item_json = response.json()
module_item_json.update({"course_id": self.course_id})
return ModuleItem(self._requester, module_item_json)
def delete(self):
"""
Delete this module item.
:calls: `DELETE /api/v1/courses/:course_id/modules/:module_id/items/:id \
<https://canvas.instructure.com/doc/api/modules.html#method.context_module_items_api.destroy>`_
:rtype: :class:`canvasapi.module.ModuleItem`
"""
response = self._requester.request(
"DELETE",
"courses/{}/modules/{}/items/{}".format(
self.course_id, self.module_id, self.id
),
)
module_item_json = response.json()
module_item_json.update({"course_id": self.course_id})
return ModuleItem(self._requester, module_item_json)
def complete(self):
"""
Mark this module item as done.
:calls: `PUT /api/v1/courses/:course_id/modules/:module_id/items/:id/done \
<https://canvas.instructure.com/doc/api/modules.html#method.context_module_items_api.mark_as_done>`_
:rtype: :class:`canvasapi.module.ModuleItem`
"""
response = self._requester.request(
"PUT",
"courses/{}/modules/{}/items/{}/done".format(
self.course_id, self.module_id, self.id
),
)
module_item_json = response.json()
module_item_json.update({"course_id": self.course_id})
return ModuleItem(self._requester, module_item_json)
def uncomplete(self):
"""
Mark this module item as not done.
:calls: `DELETE /api/v1/courses/:course_id/modules/:module_id/items/:id/done \
<https://canvas.instructure.com/doc/api/modules.html#method.context_module_items_api.mark_as_done>`_
:rtype: :class:`canvasapi.module.ModuleItem`
"""
response = self._requester.request(
"DELETE",
"courses/{}/modules/{}/items/{}/done".format(
self.course_id, self.module_id, self.id
),
)
module_item_json = response.json()
module_item_json.update({"course_id": self.course_id})
return ModuleItem(self._requester, module_item_json)
|
|
import re
class Program:
def __init__(self):
self.name = ""
self.weight = 0
self.stacked_program_names = []
self.stacked_programs = []
self.stack_weight = 0
def __str__(self):
return ("Program{name=" + self.name
+ ", weight=" + str(self.weight)
+ ", stacked_program_names=" + str(self.stacked_program_names)
+ ", stacked_programs=" + str(self.stacked_programs)
+ ", stack_weight=" + str(self.stack_weight)
+ "}")
def __repr__(self):
return str(self)
def parse_program(self, program_string):
self.name = re.search('^[a-zA-Z]+', program_string).group(0)
self.weight = int(re.search('\d+', program_string).group(0))
programs_found = re.search('[ a-zA-Z,]+$', program_string)
if programs_found:
programs = programs_found.group(0).strip()
self.stacked_program_names = programs.split(", ")
def read_file(file_name):
data = ""
with open(file_name, "r") as file:
data = file.read()
return data
def parse_input():
program_lines = read_file("day7_input.txt").split("\n")
programs = []
for program_line in program_lines:
program = Program()
program.parse_program(program_line)
programs.append(program)
return programs
def get_non_top_programs(programs):
non_top_programs = []
for program in programs:
if program.stacked_program_names:
non_top_programs.append(program)
return non_top_programs
def part1():
print("test")
programs = parse_input()
non_top_programs = get_non_top_programs(programs)
stacked_programs = set()
for program in non_top_programs:
for stacked_program in program.stacked_program_names:
stacked_programs.add(stacked_program)
for program in non_top_programs:
if program.name not in stacked_programs:
print(program)
def calculate_stack_weight(programs_map, program_name):
program = programs_map.get(program_name)
if not program.stacked_program_names:
program.stack_weight = program.weight
return program.stack_weight
else:
stack_weight = 0
for stacked_program_name in program.stacked_program_names:
stack_weight += calculate_stack_weight(programs_map, stacked_program_name)
program.stack_weight = stack_weight + program.weight
return program.stack_weight
"""
To find the unbalanced program:
Pass down the expected_stack_weight. expected_stack_weight for higher programs is program.stack_weight - program.weight
if program.stack_weight == expected_stack_weight:
return None
else:
TODO: the below is not the correct expected stack weight
calculate expected stack weight (program.stack_weight - program.weight)
loop through stacked programs
if all equal weight, return this program name (it breaks the stack)
if all not equal weight, recursion with this expected stack weight
"""
# def find_unbalanced_program(programs_map, program_name, expected_stack_weight, is_first):
# program = programs_map.get(program_name)
# print(program, expected_stack_weight)
# if program.stack_weight == expected_stack_weight and not is_first:
# return None
# elif not program.stacked_program_names:
# return program.name, expected_stack_weight - program.weight
# else:
# new_expected_stack_weight = program.stack_weight - program.weight
# stacked_programs = []
# for stacked_program_name in program.stacked_program_names:
# stacked_programs.append(programs_map.get(stacked_program_name))
# is_all_equal = True
# for i in range(0, len(stacked_programs) - 1):
# if stacked_programs[i].weight != stacked_programs[i + 1].weight:
# is_all_equal = False
# if is_all_equal:
# fixed_weight = (expected_stack_weight - program.stack_weight) + program.weight
# print(expected_stack_weight)
# print(program.stack_weight)
# print(program.weight)
# return program.name, fixed_weight
# else:
# result = None
# if len(stacked_programs) == 2:
# result = find_unbalanced_program(programs_map, stacked_program.name, new_expected_stack_weight, False)
# for stacked_program in stacked_programs:
# result = find_unbalanced_program(programs_map, stacked_program.name, new_expected_stack_weight, False)
# if result:
# return result
# return result
"""
count_weight1 = 1
weight1 = programs[0].weight
weight2 = -1 *don't need*
weight2_index = -1
for rest of programs
compare programs[i].weight to weight1 and increase count if equal
if different, set weight2_index
if weight1 count == 1: programs[0] is invalid
else programs[weight2_index] is invalid
"""
def find_unbalanced_program(programs_map, program_name):
program = programs_map.get(program_name)
# Handle leaf nodes
if not program.stacked_program_names:
print("1")
return None
# Handle 2 splits differently
elif len(program.stacked_program_names) == 2:
print("2")
return None
else:
stacked_programs = []
for stacked_program_name in program.stacked_program_names:
stacked_programs.append(programs_map.get(stacked_program_name))
count_weight1 = 0
weight2_index = -1
for i in range(0, len(stacked_programs)):
if stacked_programs[i].stack_weight == stacked_programs[0].stack_weight:
count_weight1 += 1
else:
weight2_index = i
if count_weight1 == len(stacked_programs):
return program.name
elif count_weight1 == 1:
invalid_program_stack = stacked_programs[0]
else:
invalid_program_stack = stacked_programs[weight2_index]
return find_unbalanced_program(programs_map, invalid_program_stack.name)
def part2():
bottom_program_name = "dgoocsw"
programs = parse_input()
programs_map = {}
for program in programs:
programs_map[program.name] = program
calculate_stack_weight(programs_map, bottom_program_name)
print(programs)
print(programs_map.get("ifajhb"))
print(programs_map.get("gqmls"))
print(programs_map.get("sgmbw"))
print(programs_map.get("ddkhuyg"))
print(programs_map.get("rhqocy"))
print("start find_unbalanced_program")
# bottom_program = programs_map.get(bottom_program_name)
unbalanced_program_name = find_unbalanced_program(programs_map, bottom_program_name)
print(unbalanced_program_name)
print(programs_map.get(unbalanced_program_name))
# -8
print(programs_map.get("marnqj"), programs_map.get("moxiw"), programs_map.get("sxijke"), programs_map.get("ojgdow"), programs_map.get("fnlapoh"))
# print(programs_map.get("upair"), programs_map.get("mkrrlbv"), programs_map.get("vqkwlq"), programs_map.get("wsrmfr"))
part1()
part2()
|
|
# Copyright (C) 2014, 2015, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os
import re
import shlex
import threading
import time
from oslo_concurrency import processutils as putils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _LE, _LI, _LW
from cinder import utils
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
GETSTORAGEARRAY_ONCE = 100
MAX_SNAPSHOT_COUNT = 1021
SNAP_LAST_PATH_SSB = '0xB958,0x020A'
HOST_IO_SSB = '0xB958,0x0233'
INVALID_LUN_SSB = '0x2E20,0x0000'
INTERCEPT_LDEV_SSB = '0x2E22,0x0001'
HOSTGROUP_INSTALLED = '0xB956,0x3173'
RESOURCE_LOCKED = 'SSB=0x2E11,0x2205'
LDEV_STATUS_WAITTIME = 120
LUN_DELETE_WAITTIME = basic_lib.DEFAULT_PROCESS_WAITTIME
LUN_DELETE_INTERVAL = 3
EXEC_MAX_WAITTIME = 30
EXEC_RETRY_INTERVAL = 5
HORCM_WAITTIME = 1
PAIR_TYPE = ('HORC', 'MRCF', 'QS')
PERMITTED_TYPE = ('CVS', 'HDP', 'HDT')
RAIDCOM_LOCK_FILE = basic_lib.LOCK_DIR + 'raidcom_'
HORCMGR_LOCK_FILE = basic_lib.LOCK_DIR + 'horcmgr_'
RESOURCE_LOCK_FILE = basic_lib.LOCK_DIR + 'raidcom_resource_'
STATUS_TABLE = {
'SMPL': basic_lib.SMPL,
'COPY': basic_lib.COPY,
'RCPY': basic_lib.COPY,
'PAIR': basic_lib.PAIR,
'PFUL': basic_lib.PAIR,
'PSUS': basic_lib.PSUS,
'PFUS': basic_lib.PSUS,
'SSUS': basic_lib.PSUS,
'PSUE': basic_lib.PSUE,
}
NOT_SET = '-'
HORCM_RUNNING = 1
COPY_GROUP = basic_lib.NAME_PREFIX + '%s%s%03X%d'
SNAP_NAME = basic_lib.NAME_PREFIX + 'snap'
LDEV_NAME = basic_lib.NAME_PREFIX + 'ldev-%d-%d'
MAX_MUNS = 3
EX_ENAUTH = 202
EX_ENOOBJ = 205
EX_CMDRJE = 221
EX_CMDIOE = 237
EX_INVCMD = 240
EX_INVMOD = 241
EX_ENODEV = 246
EX_ENOENT = 247
EX_OPTINV = 248
EX_ATTDBG = 250
EX_ATTHOR = 251
EX_COMERR = 255
EX_UNKOWN = -1
NO_SUCH_DEVICE = (EX_ENODEV, EX_ENOENT)
COMMAND_IO_TO_RAID = (EX_CMDRJE, EX_CMDIOE, EX_INVCMD, EX_INVMOD, EX_OPTINV)
HORCM_ERROR = (EX_ATTDBG, EX_ATTHOR, EX_COMERR)
MAX_HOSTGROUPS = 254
MAX_HLUN = 2047
DEFAULT_PORT_BASE = 31000
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.StrOpt('hitachi_horcm_numbers',
default='200,201',
help='Instance numbers for HORCM'),
cfg.StrOpt('hitachi_horcm_user',
help='Username of storage system for HORCM'),
cfg.StrOpt('hitachi_horcm_password',
help='Password of storage system for HORCM',
secret=True),
cfg.BoolOpt('hitachi_horcm_add_conf',
default=True,
help='Add to HORCM configuration'),
cfg.IntOpt('hitachi_horcm_resource_lock_timeout',
default=600,
help='Timeout until a resource lock is released, in seconds. '
'The value must be between 0 and 7200.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
def horcm_synchronized(function):
@functools.wraps(function)
def wrapper(*args, **kargs):
if len(args) == 1:
inst = args[0].conf.hitachi_horcm_numbers[0]
raidcom_obj_lock = args[0].raidcom_lock
else:
inst = args[1]
raidcom_obj_lock = args[0].raidcom_pair_lock
raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst)
lock = basic_lib.get_process_lock(raidcom_lock_file)
with raidcom_obj_lock, lock:
return function(*args, **kargs)
return wrapper
def storage_synchronized(function):
@functools.wraps(function)
def wrapper(*args, **kargs):
serial = args[0].conf.hitachi_serial_number
resource_lock = args[0].resource_lock
resource_lock_file = '%s%s' % (RESOURCE_LOCK_FILE, serial)
lock = basic_lib.get_process_lock(resource_lock_file)
with resource_lock, lock:
return function(*args, **kargs)
return wrapper
class HBSDHORCM(basic_lib.HBSDBasicLib):
def __init__(self, conf):
super(HBSDHORCM, self).__init__(conf=conf)
self.copy_groups = [None] * MAX_MUNS
self.raidcom_lock = threading.Lock()
self.raidcom_pair_lock = threading.Lock()
self.horcmgr_lock = threading.Lock()
self.horcmgr_flock = None
self.resource_lock = threading.Lock()
def check_param(self):
numbers = self.conf.hitachi_horcm_numbers.split(',')
if len(numbers) != 2:
msg = basic_lib.output_err(601, param='hitachi_horcm_numbers')
raise exception.HBSDError(message=msg)
for i in numbers:
if not i.isdigit():
msg = basic_lib.output_err(601, param='hitachi_horcm_numbers')
raise exception.HBSDError(message=msg)
self.conf.hitachi_horcm_numbers = [int(num) for num in numbers]
inst = self.conf.hitachi_horcm_numbers[0]
pair_inst = self.conf.hitachi_horcm_numbers[1]
if inst == pair_inst:
msg = basic_lib.output_err(601, param='hitachi_horcm_numbers')
raise exception.HBSDError(message=msg)
for param in ('hitachi_horcm_user', 'hitachi_horcm_password'):
if not getattr(self.conf, param):
msg = basic_lib.output_err(601, param=param)
raise exception.HBSDError(message=msg)
if self.conf.hitachi_thin_pool_id == self.conf.hitachi_pool_id:
msg = basic_lib.output_err(601, param='hitachi_thin_pool_id')
raise exception.HBSDError(message=msg)
resource_lock_timeout = self.conf.hitachi_horcm_resource_lock_timeout
if not ((resource_lock_timeout >= 0) and
(resource_lock_timeout <= 7200)):
msg = basic_lib.output_err(
601, param='hitachi_horcm_resource_lock_timeout')
raise exception.HBSDError(message=msg)
for opt in volume_opts:
getattr(self.conf, opt.name)
def set_copy_groups(self, host_ip):
serial = self.conf.hitachi_serial_number
inst = self.conf.hitachi_horcm_numbers[1]
for mun in range(MAX_MUNS):
copy_group = COPY_GROUP % (host_ip, serial, inst, mun)
self.copy_groups[mun] = copy_group
def set_pair_flock(self):
inst = self.conf.hitachi_horcm_numbers[1]
name = '%s%d' % (HORCMGR_LOCK_FILE, inst)
self.horcmgr_flock = basic_lib.FileLock(name, self.horcmgr_lock)
return self.horcmgr_flock
def check_horcm(self, inst):
args = 'HORCMINST=%d horcmgr -check' % inst
ret, _stdout, _stderr = self.exec_command('env', args=args,
printflag=False)
return ret
def shutdown_horcm(self, inst):
ret, stdout, stderr = self.exec_command(
'horcmshutdown.sh', args=six.text_type(inst), printflag=False)
return ret
def start_horcm(self, inst):
return self.exec_command('horcmstart.sh', args=six.text_type(inst),
printflag=False)
def _wait_for_horcm_shutdown(self, inst):
if self.check_horcm(inst) != HORCM_RUNNING:
raise loopingcall.LoopingCallDone()
if self.shutdown_horcm(inst):
LOG.error(_LE("Failed to shutdown horcm."))
raise loopingcall.LoopingCallDone()
@horcm_synchronized
def restart_horcm(self, inst=None):
if inst is None:
inst = self.conf.hitachi_horcm_numbers[0]
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_horcm_shutdown, inst)
loop.start(interval=HORCM_WAITTIME).wait()
ret, stdout, stderr = self.start_horcm(inst)
if ret:
msg = basic_lib.output_err(
600, cmd='horcmstart.sh', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def restart_pair_horcm(self):
inst = self.conf.hitachi_horcm_numbers[1]
self.restart_horcm(inst=inst)
def setup_horcmgr(self, host_ip):
pair_inst = self.conf.hitachi_horcm_numbers[1]
self.set_copy_groups(host_ip)
if self.conf.hitachi_horcm_add_conf:
self.create_horcmconf()
self.create_horcmconf(inst=pair_inst)
self.restart_horcm()
with self.horcmgr_flock:
self.restart_pair_horcm()
ret, stdout, stderr = self.comm_login()
if ret:
msg = basic_lib.output_err(
600, cmd='raidcom -login', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def _wait_for_exec_horcm(self, cmd, args, printflag, start):
if cmd == 'raidcom':
serial = self.conf.hitachi_serial_number
inst = self.conf.hitachi_horcm_numbers[0]
raidcom_obj_lock = self.raidcom_lock
args = '%s -s %s -I%d' % (args, serial, inst)
else:
inst = self.conf.hitachi_horcm_numbers[1]
raidcom_obj_lock = self.raidcom_pair_lock
args = '%s -ISI%d' % (args, inst)
user = self.conf.hitachi_horcm_user
passwd = self.conf.hitachi_horcm_password
raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst)
lock = basic_lib.get_process_lock(raidcom_lock_file)
with raidcom_obj_lock, lock:
ret, stdout, stderr = self.exec_command(cmd, args=args,
printflag=printflag)
# The resource group may be locked by other software.
# Therefore, wait until the lock is released.
if (RESOURCE_LOCKED in stderr and
(time.time() - start <
self.conf.hitachi_horcm_resource_lock_timeout)):
return
if not ret or ret <= 127:
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
if time.time() - start >= EXEC_MAX_WAITTIME:
LOG.error(_LE("horcm command timeout."))
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
if (ret == EX_ENAUTH and
not re.search("-login %s %s" % (user, passwd), args)):
_ret, _stdout, _stderr = self.comm_login()
if _ret:
LOG.error(_LE("Failed to authenticate user."))
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
elif ret in HORCM_ERROR:
_ret = 0
with raidcom_obj_lock, lock:
if self.check_horcm(inst) != HORCM_RUNNING:
_ret, _stdout, _stderr = self.start_horcm(inst)
if _ret and _ret != HORCM_RUNNING:
LOG.error(_LE("Failed to start horcm."))
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
elif ret not in COMMAND_IO_TO_RAID:
LOG.error(_LE("Unexpected error occurs in horcm."))
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
def exec_raidcom(self, cmd, args, printflag=True):
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_exec_horcm, cmd, args, printflag, time.time())
return loop.start(interval=EXEC_RETRY_INTERVAL).wait()
def comm_login(self):
rmi_user = self.conf.hitachi_horcm_user
rmi_pass = self.conf.hitachi_horcm_password
args = '-login %s %s' % (rmi_user, rmi_pass)
return self.exec_raidcom('raidcom', args, printflag=False)
def comm_reset_status(self):
self.exec_raidcom('raidcom', 'reset command_status')
def comm_get_status(self):
return self.exec_raidcom('raidcom', 'get command_status')
def get_command_error(self, stdout):
lines = stdout.splitlines()
line = shlex.split(lines[1])
return int(line[3])
def comm_get_ldev(self, ldev):
opt = 'get ldev -ldev_id %s' % ldev
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return stdout
def add_used_hlun(self, port, gid, used_list):
opt = 'get lun -port %s-%d' % (port, gid)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
lun = int(shlex.split(line)[3])
if lun not in used_list:
used_list.append(lun)
def get_unused_ldev(self, ldev_range):
start = ldev_range[0]
end = ldev_range[1]
while start < end:
if end - start + 1 > GETSTORAGEARRAY_ONCE:
cnt = GETSTORAGEARRAY_ONCE
else:
cnt = end - start + 1
opt = 'get ldev -ldev_id %d -cnt %d' % (start, cnt)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
ldev_num = None
for line in lines:
if re.match("LDEV :", line):
ldev_num = int(shlex.split(line)[2])
continue
if re.match("VOL_TYPE : NOT DEFINED", line):
return ldev_num
start += GETSTORAGEARRAY_ONCE
else:
msg = basic_lib.output_err(648, resource='LDEV')
raise exception.HBSDError(message=msg)
def get_hgname_gid(self, port, host_grp_name):
opt = 'get host_grp -port %s -key host_grp' % port
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if line[2] == host_grp_name:
return int(line[1])
return None
def get_unused_gid(self, range, port):
_min = range[0]
_max = range[1]
opt = 'get host_grp -port %s -key host_grp' % port
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
free_gid = None
for line in lines[_min + 1:]:
line = shlex.split(line)
if int(line[1]) > _max:
break
if line[2] == '-':
free_gid = int(line[1])
break
if free_gid is None:
msg = basic_lib.output_err(648, resource='GID')
raise exception.HBSDError(message=msg)
return free_gid
def comm_set_target_wwns(self, target_ports):
opt = 'get port'
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
target_wwns = {}
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
port = line[0][:5]
if target_ports and port not in target_ports:
continue
target_wwns[port] = line[10]
LOG.debug('target wwns: %s', target_wwns)
return target_wwns
def comm_get_hbawwn(self, hostgroups, wwns, port, is_detected):
opt = 'get host_grp -port %s' % port
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
found_wwns = 0
for line in lines[1:]:
line = shlex.split(line)
if not re.match(basic_lib.NAME_PREFIX, line[2]):
continue
gid = line[1]
opt = 'get hba_wwn -port %s-%s' % (port, gid)
ret, stdout, stderr = self.exec_raidcom(
'raidcom', opt, printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
hba_info = shlex.split(line)
if hba_info[3] in wwns:
hostgroups.append({'port': six.text_type(port),
'gid': int(hba_info[1]),
'initiator_wwn': hba_info[3],
'detected': is_detected})
found_wwns += 1
if len(wwns) == found_wwns:
break
if len(wwns) == found_wwns:
break
def comm_chk_login_wwn(self, wwns, port):
opt = 'get port -port %s' % port
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
login_info = shlex.split(line)
if login_info[1] in wwns:
return True
else:
return False
def comm_get_hostgroup_info(self, hgs, wwns, target_ports, login=True):
security_ports = []
hostgroups = []
opt = 'get port'
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
port = line[0][:5]
if target_ports and port not in target_ports:
continue
security = True if line[7] == 'Y' else False
is_detected = None
if login:
is_detected = self.comm_chk_login_wwn(wwns, port)
if security:
self.comm_get_hbawwn(hostgroups, wwns, port, is_detected)
security_ports.append(port)
for hostgroup in hostgroups:
hgs.append(hostgroup)
return security_ports
def _get_lun(self, port, gid, ldev):
lun = None
opt = 'get lun -port %s-%d' % (port, gid)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if line[5] == six.text_type(ldev):
lun = int(line[3])
break
return lun
def _wait_for_delete_lun(self, hostgroup, ldev, start):
opt = 'delete lun -port %s-%d -ldev_id %d' % (hostgroup['port'],
hostgroup['gid'], ldev)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if not ret:
raise loopingcall.LoopingCallDone()
if (re.search('SSB=%s' % SNAP_LAST_PATH_SSB, stderr) and
not self.comm_get_snapshot(ldev) or
re.search('SSB=%s' % HOST_IO_SSB, stderr)):
LOG.warning(basic_lib.set_msg(310, ldev=ldev, reason=stderr))
if time.time() - start >= LUN_DELETE_WAITTIME:
msg = basic_lib.output_err(
637, method='_wait_for_delete_lun',
timeout=LUN_DELETE_WAITTIME)
raise exception.HBSDError(message=msg)
else:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_delete_lun_core(self, hostgroup, ldev):
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_delete_lun, hostgroup, ldev, time.time())
loop.start(interval=LUN_DELETE_INTERVAL).wait()
def comm_delete_lun(self, hostgroups, ldev):
deleted_hostgroups = []
no_ldev_cnt = 0
for hostgroup in hostgroups:
port = hostgroup['port']
gid = hostgroup['gid']
is_deleted = False
for deleted in deleted_hostgroups:
if port == deleted['port'] and gid == deleted['gid']:
is_deleted = True
if is_deleted:
continue
try:
self.comm_delete_lun_core(hostgroup, ldev)
except exception.HBSDCmdError as ex:
no_ldev_cnt += 1
if ex.ret == EX_ENOOBJ:
if no_ldev_cnt != len(hostgroups):
continue
raise exception.HBSDNotFound
else:
raise
deleted_hostgroups.append({'port': port, 'gid': gid})
def _check_ldev_status(self, ldev, status):
opt = ('get ldev -ldev_id %s -check_status %s -time %s' %
(ldev, status, LDEV_STATUS_WAITTIME))
ret, _stdout, _stderr = self.exec_raidcom('raidcom', opt)
return ret
# Don't remove a storage_syncronized decorator.
# It is need to avoid comm_add_ldev() and comm_delete_ldev() are
# executed concurrently.
@storage_synchronized
def comm_add_ldev(self, pool_id, ldev, capacity, is_vvol):
emulation = 'OPEN-V'
if is_vvol:
opt = ('add ldev -pool snap -ldev_id %d '
'-capacity %dG -emulation %s'
% (ldev, capacity, emulation))
else:
opt = ('add ldev -pool %d -ldev_id %d '
'-capacity %dG -emulation %s'
% (pool_id, ldev, capacity, emulation))
self.comm_reset_status()
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if ret:
if re.search('SSB=%s' % INTERCEPT_LDEV_SSB, stderr):
raise exception.HBSDNotFound
msg = basic_lib.output_err(
600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
if self._check_ldev_status(ldev, "NML"):
msg = basic_lib.output_err(653, ldev=ldev)
raise exception.HBSDError(message=msg)
def comm_add_hostgrp(self, port, gid, host_grp_name):
opt = 'add host_grp -port %s-%d -host_grp_name %s' % (port, gid,
host_grp_name)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if ret:
if re.search('SSB=%s' % HOSTGROUP_INSTALLED, stderr):
raise exception.HBSDNotFound
msg = basic_lib.output_err(
600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_del_hostgrp(self, port, gid, host_grp_name):
opt = 'delete host_grp -port %s-%d %s' % (port, gid, host_grp_name)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if ret:
msg = basic_lib.output_err(
600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_add_hbawwn(self, port, gid, wwn):
opt = 'add hba_wwn -port %s-%s -hba_wwn %s' % (port, gid, wwn)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if ret:
msg = basic_lib.output_err(
600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
@storage_synchronized
def comm_add_lun(self, unused_command, hostgroups, ldev, is_once=False):
tmp_hostgroups = hostgroups[:]
is_ok = False
used_list = []
lun = None
old_lun = None
for hostgroup in hostgroups:
port = hostgroup['port']
gid = hostgroup['gid']
self.add_used_hlun(port, gid, used_list)
lun = self._get_lun(port, gid, ldev)
# When 'lun' or 'old_lun' is 0, it should be true.
# So, it cannot remove 'is not None'.
if lun is not None:
if old_lun is not None and old_lun != lun:
msg = basic_lib.output_err(648, resource='LUN (HLUN)')
raise exception.HBSDError(message=msg)
is_ok = True
hostgroup['lun'] = lun
tmp_hostgroups.remove(hostgroup)
old_lun = lun
if is_once:
# When 'lun' is 0, it should be true.
# So, it cannot remove 'is not None'.
if lun is not None:
return
elif len(used_list) < MAX_HLUN + 1:
break
else:
tmp_hostgroups.remove(hostgroup)
if tmp_hostgroups:
used_list = []
if not used_list:
lun = 0
elif lun is None:
for i in range(MAX_HLUN + 1):
if i not in used_list:
lun = i
break
else:
raise exception.HBSDNotFound
opt = None
ret = 0
stdout = None
stderr = None
invalid_hgs_str = None
for hostgroup in tmp_hostgroups:
port = hostgroup['port']
gid = hostgroup['gid']
if not hostgroup['detected']:
if invalid_hgs_str:
invalid_hgs_str = '%s, %s:%d' % (invalid_hgs_str,
port, gid)
else:
invalid_hgs_str = '%s:%d' % (port, gid)
continue
opt = 'add lun -port %s-%d -ldev_id %d -lun_id %d' % (
port, gid, ldev, lun)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if not ret:
is_ok = True
hostgroup['lun'] = lun
if is_once:
break
else:
LOG.warning(basic_lib.set_msg(
314, ldev=ldev, lun=lun, port=port, id=gid))
if not is_ok:
if stderr:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
else:
msg = basic_lib.output_err(659, gid=invalid_hgs_str)
raise exception.HBSDError(message=msg)
# Don't remove a storage_syncronized decorator.
# It is need to avoid comm_add_ldev() and comm_delete_ldev() are
# executed concurrently.
@storage_synchronized
def comm_delete_ldev(self, ldev, is_vvol):
ret = -1
stdout = ""
stderr = ""
self.comm_reset_status()
opt = 'delete ldev -ldev_id %d' % ldev
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if ret:
if re.search('SSB=%s' % INVALID_LUN_SSB, stderr):
raise exception.HBSDNotFound
msg = basic_lib.output_err(
600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
ret, stdout, stderr = self.comm_get_status()
if ret or self.get_command_error(stdout):
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_extend_ldev(self, ldev, old_size, new_size):
extend_size = new_size - old_size
opt = 'extend ldev -ldev_id %d -capacity %dG' % (ldev, extend_size)
ret, stdout, stderr = self.exec_raidcom('raidcom', opt)
if ret:
msg = basic_lib.output_err(
600, cmd='raidcom %s' % opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_get_dp_pool(self, pool_id):
opt = 'get dp_pool'
ret, stdout, stderr = self.exec_raidcom('raidcom', opt,
printflag=False)
if ret:
opt = 'raidcom %s' % opt
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[1:]:
if int(shlex.split(line)[0]) == pool_id:
free_gb = int(shlex.split(line)[3]) / 1024
total_gb = int(shlex.split(line)[4]) / 1024
return total_gb, free_gb
msg = basic_lib.output_err(640, pool_id=pool_id)
raise exception.HBSDError(message=msg)
def comm_modify_ldev(self, ldev):
args = 'modify ldev -ldev_id %d -status discard_zero_page' % ldev
ret, stdout, stderr = self.exec_raidcom('raidcom', args)
if ret:
LOG.warning(basic_lib.set_msg(315, ldev=ldev, reason=stderr))
def is_detected(self, port, wwn):
return self.comm_chk_login_wwn([wwn], port)
def discard_zero_page(self, ldev):
try:
self.comm_modify_ldev(ldev)
except Exception as ex:
LOG.warning(_LW('Failed to discard zero page: %s'), ex)
def comm_add_snapshot(self, pvol, svol):
pool = self.conf.hitachi_thin_pool_id
copy_size = self.conf.hitachi_copy_speed
args = ('add snapshot -ldev_id %d %d -pool %d '
'-snapshot_name %s -copy_size %d'
% (pvol, svol, pool, SNAP_NAME, copy_size))
ret, stdout, stderr = self.exec_raidcom('raidcom', args)
if ret:
msg = basic_lib.output_err(
600, cmd='raidcom %s' % args, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_delete_snapshot(self, ldev):
args = 'delete snapshot -ldev_id %d' % ldev
ret, stdout, stderr = self.exec_raidcom('raidcom', args)
if ret:
msg = basic_lib.output_err(
600, cmd='raidcom %s' % args, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_modify_snapshot(self, ldev, op):
args = ('modify snapshot -ldev_id %d -snapshot_data %s' % (ldev, op))
ret, stdout, stderr = self.exec_raidcom('raidcom', args)
if ret:
msg = basic_lib.output_err(
600, cmd='raidcom %s' % args, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def _wait_for_snap_status(self, pvol, svol, status, timeout, start):
if (self.get_snap_pvol_status(pvol, svol) in status and
self.get_snap_svol_status(svol) in status):
raise loopingcall.LoopingCallDone()
if time.time() - start >= timeout:
msg = basic_lib.output_err(
637, method='_wait_for_snap_status', timuout=timeout)
raise exception.HBSDError(message=msg)
def wait_snap(self, pvol, svol, status, timeout, interval):
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_snap_status, pvol,
svol, status, timeout, time.time())
loop.start(interval=interval).wait()
def comm_get_snapshot(self, ldev):
args = 'get snapshot -ldev_id %d' % ldev
ret, stdout, stderr = self.exec_raidcom('raidcom', args,
printflag=False)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return stdout
def check_snap_count(self, ldev):
stdout = self.comm_get_snapshot(ldev)
if not stdout:
return
lines = stdout.splitlines()
if len(lines) >= MAX_SNAPSHOT_COUNT + 1:
msg = basic_lib.output_err(
615, copy_method=basic_lib.THIN, pvol=ldev)
raise exception.HBSDBusy(message=msg)
def get_snap_pvol_status(self, pvol, svol):
stdout = self.comm_get_snapshot(pvol)
if not stdout:
return basic_lib.SMPL
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if int(line[6]) == svol:
return STATUS_TABLE[line[2]]
else:
return basic_lib.SMPL
def get_snap_svol_status(self, ldev):
stdout = self.comm_get_snapshot(ldev)
if not stdout:
return basic_lib.SMPL
lines = stdout.splitlines()
line = shlex.split(lines[1])
return STATUS_TABLE[line[2]]
@horcm_synchronized
def create_horcmconf(self, inst=None):
if inst is None:
inst = self.conf.hitachi_horcm_numbers[0]
serial = self.conf.hitachi_serial_number
filename = '/etc/horcm%d.conf' % inst
port = DEFAULT_PORT_BASE + inst
found = False
if not os.path.exists(filename):
file_str = """
HORCM_MON
#ip_address service poll(10ms) timeout(10ms)
127.0.0.1 %16d 6000 3000
HORCM_CMD
""" % port
else:
file_str = utils.read_file_as_root(filename)
lines = file_str.splitlines()
for line in lines:
if re.match(r'\\\\.\\CMD-%s:/dev/sd' % serial, line):
found = True
break
if not found:
insert_str = r'\\\\.\\CMD-%s:/dev/sd' % serial
file_str = re.sub(r'(\n\bHORCM_CMD.*|^\bHORCM_CMD.*)',
r'\1\n%s\n' % insert_str, file_str)
try:
utils.execute('tee', filename, process_input=file_str,
run_as_root=True)
except putils.ProcessExecutionError as ex:
msg = basic_lib.output_err(
632, file=filename, ret=ex.exit_code, err=ex.stderr)
raise exception.HBSDError(message=msg)
def comm_get_copy_grp(self):
ret, stdout, stderr = self.exec_raidcom('raidcom', 'get copy_grp',
printflag=False)
if ret:
opt = 'raidcom get copy_grp'
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return stdout
def comm_add_copy_grp(self, copy_group, pvol_group, svol_group, mun):
args = ('add copy_grp -copy_grp_name %s %s %s -mirror_id %d'
% (copy_group, pvol_group, svol_group, mun))
ret, stdout, stderr = self.exec_raidcom('raidcom', args,
printflag=False)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_delete_copy_grp(self, copy_group):
args = 'delete copy_grp -copy_grp_name %s' % copy_group
ret, stdout, stderr = self.exec_raidcom('raidcom', args,
printflag=False)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_get_device_grp(self, group_name):
args = 'get device_grp -device_grp_name %s' % group_name
ret, stdout, stderr = self.exec_raidcom('raidcom', args,
printflag=False)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return stdout
def comm_add_device_grp(self, group_name, ldev_name, ldev):
args = ('add device_grp -device_grp_name %s %s -ldev_id %d'
% (group_name, ldev_name, ldev))
ret, stdout, stderr = self.exec_raidcom('raidcom', args,
printflag=False)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_delete_device_grp(self, group_name, ldev):
args = ('delete device_grp -device_grp_name %s -ldev_id %d'
% (group_name, ldev))
ret, stdout, stderr = self.exec_raidcom('raidcom', args,
printflag=False)
if ret:
opt = 'raidcom %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_paircreate(self, copy_group, ldev_name):
args = ('-g %s -d %s -split -fq quick -c %d -vl'
% (copy_group, ldev_name, self.conf.hitachi_copy_speed))
ret, stdout, stderr = self.exec_raidcom('paircreate', args)
if ret:
opt = 'paircreate %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_pairsplit(self, copy_group, ldev_name):
args = '-g %s -d %s -S' % (copy_group, ldev_name)
ret, stdout, stderr = self.exec_raidcom('pairsplit', args)
if ret:
opt = 'pairsplit %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_pairevtwait(self, copy_group, ldev_name, check_svol):
if not check_svol:
option = '-nowait'
else:
option = '-nowaits'
args = '-g %s -d %s %s' % (copy_group, ldev_name, option)
ret, stdout, stderr = self.exec_raidcom('pairevtwait', args,
printflag=False)
if ret > 127:
opt = 'pairevtwait %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return ret
def comm_pairdisplay(self, copy_group, ldev_name=None):
if not ldev_name:
args = '-g %s -CLI' % copy_group
else:
args = '-g %s -d %s -CLI' % (copy_group, ldev_name)
ret, stdout, stderr = self.exec_raidcom('pairdisplay', args,
printflag=False)
if ret and ret not in NO_SUCH_DEVICE:
opt = 'pairdisplay %s' % args
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return ret, stdout, stderr
def check_copy_grp(self, copy_group):
stdout = self.comm_get_copy_grp()
lines = stdout.splitlines()
count = 0
for line in lines[1:]:
line = shlex.split(line)
if line[0] == copy_group:
count += 1
if count == 2:
break
return count
def check_device_grp(self, group_name, ldev, ldev_name=None):
stdout = self.comm_get_device_grp(group_name)
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if int(line[2]) == ldev:
if not ldev_name:
return True
else:
return line[1] == ldev_name
else:
return False
def is_smpl(self, copy_group, ldev_name):
ret, stdout, stderr = self.comm_pairdisplay(copy_group,
ldev_name=ldev_name)
if not stdout:
return True
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if line[9] in [NOT_SET, 'SMPL']:
return True
else:
return False
def get_copy_groups(self):
copy_groups = []
stdout = self.comm_get_copy_grp()
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if line[0] in self.copy_groups and line[0] not in copy_groups:
copy_groups.append(line[0])
return copy_groups
def get_matched_copy_group(self, pvol, svol, ldev_name):
for copy_group in self.get_copy_groups():
pvol_group = '%sP' % copy_group
if self.check_device_grp(pvol_group, pvol, ldev_name=ldev_name):
return copy_group
else:
return None
def get_paired_info(self, ldev, only_flag=False):
paired_info = {'pvol': None, 'svol': []}
pvol = None
is_svol = False
stdout = self.comm_get_snapshot(ldev)
if stdout:
lines = stdout.splitlines()
line = shlex.split(lines[1])
status = STATUS_TABLE.get(line[2], basic_lib.UNKN)
if line[1] == 'P-VOL':
pvol = ldev
svol = int(line[6])
else:
is_svol = True
pvol = int(line[6])
svol = ldev
if status == basic_lib.PSUS:
status = self.get_snap_pvol_status(pvol, svol)
svol_info = {'lun': svol, 'status': status, 'is_vvol': True}
paired_info['svol'].append(svol_info)
paired_info['pvol'] = pvol
if only_flag or is_svol:
return paired_info
for copy_group in self.get_copy_groups():
ldev_name = None
pvol_status = basic_lib.UNKN
svol_status = basic_lib.UNKN
ret, stdout, stderr = self.comm_pairdisplay(copy_group)
if not stdout:
continue
lines = stdout.splitlines()
for line in lines[1:]:
line = shlex.split(line)
if line[9] not in ['P-VOL', 'S-VOL']:
continue
ldev0 = int(line[8])
ldev1 = int(line[12])
if ldev not in [ldev0, ldev1]:
continue
ldev_name = line[1]
if line[9] == 'P-VOL':
pvol = ldev0
svol = ldev1
pvol_status = STATUS_TABLE.get(line[10], basic_lib.UNKN)
else:
svol = ldev0
pvol = ldev1
svol_status = STATUS_TABLE.get(line[10], basic_lib.UNKN)
if svol == ldev:
is_svol = True
if not ldev_name:
continue
pvol_group = '%sP' % copy_group
pvol_ok = self.check_device_grp(pvol_group, pvol,
ldev_name=ldev_name)
svol_group = '%sS' % copy_group
svol_ok = self.check_device_grp(svol_group, svol,
ldev_name=ldev_name)
if pvol_ok and svol_ok:
if pvol_status == basic_lib.PSUS:
status = svol_status
else:
status = pvol_status
svol_info = {'lun': svol, 'status': status, 'is_vvol': False}
paired_info['svol'].append(svol_info)
if is_svol:
break
# When 'pvol' is 0, it should be true.
# So, it cannot remove 'is not None'.
if pvol is not None and paired_info['pvol'] is None:
paired_info['pvol'] = pvol
return paired_info
def add_pair_config(self, pvol, svol, copy_group, ldev_name, mun):
pvol_group = '%sP' % copy_group
svol_group = '%sS' % copy_group
self.comm_add_device_grp(pvol_group, ldev_name, pvol)
self.comm_add_device_grp(svol_group, ldev_name, svol)
nr_copy_groups = self.check_copy_grp(copy_group)
if nr_copy_groups == 1:
self.comm_delete_copy_grp(copy_group)
if nr_copy_groups != 2:
self.comm_add_copy_grp(copy_group, pvol_group, svol_group, mun)
def delete_pair_config(self, pvol, svol, copy_group, ldev_name):
pvol_group = '%sP' % copy_group
svol_group = '%sS' % copy_group
if self.check_device_grp(pvol_group, pvol, ldev_name=ldev_name):
self.comm_delete_device_grp(pvol_group, pvol)
if self.check_device_grp(svol_group, svol, ldev_name=ldev_name):
self.comm_delete_device_grp(svol_group, svol)
def _wait_for_pair_status(self, copy_group, ldev_name,
status, timeout, check_svol, start):
if self.comm_pairevtwait(copy_group, ldev_name,
check_svol) in status:
raise loopingcall.LoopingCallDone()
if time.time() - start >= timeout:
msg = basic_lib.output_err(
637, method='_wait_for_pair_status', timout=timeout)
raise exception.HBSDError(message=msg)
def wait_pair(self, copy_group, ldev_name, status, timeout,
interval, check_svol=False):
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_pair_status, copy_group, ldev_name,
status, timeout, check_svol, time.time())
loop.start(interval=interval).wait()
def comm_create_pair(self, pvol, svol, is_vvol):
timeout = basic_lib.DEFAULT_PROCESS_WAITTIME
interval = self.conf.hitachi_copy_check_interval
if not is_vvol:
restart = False
create = False
ldev_name = LDEV_NAME % (pvol, svol)
mun = 0
for mun in range(MAX_MUNS):
copy_group = self.copy_groups[mun]
pvol_group = '%sP' % copy_group
if not self.check_device_grp(pvol_group, pvol):
break
else:
msg = basic_lib.output_err(
615, copy_method=basic_lib.FULL, pvol=pvol)
raise exception.HBSDBusy(message=msg)
try:
self.add_pair_config(pvol, svol, copy_group, ldev_name, mun)
self.restart_pair_horcm()
restart = True
self.comm_paircreate(copy_group, ldev_name)
create = True
self.wait_pair(copy_group, ldev_name, [basic_lib.PSUS],
timeout, interval)
self.wait_pair(copy_group, ldev_name,
[basic_lib.PSUS, basic_lib.COPY],
timeout, interval, check_svol=True)
except Exception:
with excutils.save_and_reraise_exception():
if create:
try:
self.wait_pair(copy_group, ldev_name,
[basic_lib.PSUS], timeout,
interval)
self.wait_pair(copy_group, ldev_name,
[basic_lib.PSUS], timeout,
interval, check_svol=True)
except Exception as ex:
LOG.warning(_LW('Failed to create pair: %s'), ex)
try:
self.comm_pairsplit(copy_group, ldev_name)
self.wait_pair(
copy_group, ldev_name,
[basic_lib.SMPL], timeout,
self.conf.hitachi_async_copy_check_interval)
except Exception as ex:
LOG.warning(_LW('Failed to create pair: %s'), ex)
if self.is_smpl(copy_group, ldev_name):
try:
self.delete_pair_config(pvol, svol, copy_group,
ldev_name)
except Exception as ex:
LOG.warning(_LW('Failed to create pair: %s'), ex)
if restart:
try:
self.restart_pair_horcm()
except Exception as ex:
LOG.warning(_LW('Failed to restart horcm: %s'), ex)
else:
self.check_snap_count(pvol)
self.comm_add_snapshot(pvol, svol)
try:
self.wait_snap(pvol, svol, [basic_lib.PAIR], timeout, interval)
self.comm_modify_snapshot(svol, 'create')
self.wait_snap(pvol, svol, [basic_lib.PSUS], timeout, interval)
except Exception:
with excutils.save_and_reraise_exception():
try:
self.comm_delete_snapshot(svol)
self.wait_snap(
pvol, svol, [basic_lib.SMPL], timeout,
self.conf.hitachi_async_copy_check_interval)
except Exception as ex:
LOG.warning(_LW('Failed to create pair: %s'), ex)
def delete_pair(self, pvol, svol, is_vvol):
timeout = basic_lib.DEFAULT_PROCESS_WAITTIME
interval = self.conf.hitachi_async_copy_check_interval
if not is_vvol:
ldev_name = LDEV_NAME % (pvol, svol)
copy_group = self.get_matched_copy_group(pvol, svol, ldev_name)
if not copy_group:
return
try:
self.comm_pairsplit(copy_group, ldev_name)
self.wait_pair(copy_group, ldev_name, [basic_lib.SMPL],
timeout, interval)
finally:
if self.is_smpl(copy_group, ldev_name):
self.delete_pair_config(pvol, svol, copy_group, ldev_name)
else:
self.comm_delete_snapshot(svol)
self.wait_snap(pvol, svol, [basic_lib.SMPL], timeout, interval)
def comm_raidqry(self):
ret, stdout, stderr = self.exec_command('raidqry', '-h')
if ret:
opt = 'raidqry -h'
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return stdout
def get_comm_version(self):
stdout = self.comm_raidqry()
lines = stdout.splitlines()
return shlex.split(lines[1])[1]
def output_param_to_log(self, conf):
for opt in volume_opts:
if not opt.secret:
value = getattr(conf, opt.name)
LOG.info(_LI('\t%(name)-35s : %(value)s'),
{'name': opt.name, 'value': value})
def create_lock_file(self):
inst = self.conf.hitachi_horcm_numbers[0]
pair_inst = self.conf.hitachi_horcm_numbers[1]
serial = self.conf.hitachi_serial_number
raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst)
raidcom_pair_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, pair_inst)
horcmgr_lock_file = '%s%d' % (HORCMGR_LOCK_FILE, pair_inst)
resource_lock_file = '%s%s' % (RESOURCE_LOCK_FILE, serial)
basic_lib.create_empty_file(raidcom_lock_file)
basic_lib.create_empty_file(raidcom_pair_lock_file)
basic_lib.create_empty_file(horcmgr_lock_file)
basic_lib.create_empty_file(resource_lock_file)
def connect_storage(self):
properties = utils.brick_get_connector_properties()
self.setup_horcmgr(properties['ip'])
def get_max_hostgroups(self):
"""return the maximum value of hostgroup id."""
return MAX_HOSTGROUPS
def get_hostgroup_luns(self, port, gid):
list = []
self.add_used_hlun(port, gid, list)
return list
def get_ldev_size_in_gigabyte(self, ldev, existing_ref):
param = 'serial_number'
if param not in existing_ref:
msg = basic_lib.output_err(700, param=param)
raise exception.HBSDError(data=msg)
storage = existing_ref.get(param)
if storage != self.conf.hitachi_serial_number:
msg = basic_lib.output_err(648, resource=param)
raise exception.HBSDError(data=msg)
stdout = self.comm_get_ldev(ldev)
if not stdout:
msg = basic_lib.output_err(648, resource='LDEV')
raise exception.HBSDError(data=msg)
sts_line = vol_type = ""
vol_attrs = []
size = num_port = 1
lines = stdout.splitlines()
for line in lines:
if line.startswith("STS :"):
sts_line = line
elif line.startswith("VOL_TYPE :"):
vol_type = shlex.split(line)[2]
elif line.startswith("VOL_ATTR :"):
vol_attrs = shlex.split(line)[2:]
elif line.startswith("VOL_Capacity(BLK) :"):
size = int(shlex.split(line)[2])
elif line.startswith("NUM_PORT :"):
num_port = int(shlex.split(line)[2])
if 'NML' not in sts_line:
msg = basic_lib.output_err(648, resource='LDEV')
raise exception.HBSDError(data=msg)
if 'OPEN-V' not in vol_type:
msg = basic_lib.output_err(702, ldev=ldev)
raise exception.HBSDError(data=msg)
if 'HDP' not in vol_attrs:
msg = basic_lib.output_err(702, ldev=ldev)
raise exception.HBSDError(data=msg)
for vol_attr in vol_attrs:
if vol_attr == ':':
continue
if vol_attr in PAIR_TYPE:
msg = basic_lib.output_err(705, ldev=ldev)
raise exception.HBSDError(data=msg)
if vol_attr not in PERMITTED_TYPE:
msg = basic_lib.output_err(702, ldev=ldev)
raise exception.HBSDError(data=msg)
# Hitachi storage calculates volume sizes in a block unit, 512 bytes.
# So, units.Gi is divided by 512.
if size % (units.Gi / 512):
msg = basic_lib.output_err(703, ldev=ldev)
raise exception.HBSDError(data=msg)
if num_port:
msg = basic_lib.output_err(704, ldev=ldev)
raise exception.HBSDError(data=msg)
return size / (units.Gi / 512)
|
|
"""
Support for Clementine Music Player as media player.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.clementine/
"""
import asyncio
from datetime import timedelta
import logging
import time
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK, PLATFORM_SCHEMA,
SUPPORT_VOLUME_STEP, SUPPORT_SELECT_SOURCE, SUPPORT_PLAY, MEDIA_TYPE_MUSIC,
SUPPORT_VOLUME_SET, MediaPlayerDevice)
from homeassistant.const import (
CONF_HOST, CONF_NAME, CONF_PORT, CONF_ACCESS_TOKEN,
STATE_OFF, STATE_PLAYING, STATE_PAUSED, STATE_UNKNOWN)
REQUIREMENTS = ['python-clementine-remote==1.0.1']
SCAN_INTERVAL = timedelta(seconds=5)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Clementine Remote'
SUPPORT_CLEMENTINE = SUPPORT_PAUSE | SUPPORT_VOLUME_STEP | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_VOLUME_SET | \
SUPPORT_NEXT_TRACK | \
SUPPORT_SELECT_SOURCE | SUPPORT_PLAY
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=5500): cv.positive_int,
vol.Optional(CONF_ACCESS_TOKEN, default=None): cv.positive_int,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Clementine platform."""
from clementineremote import ClementineRemote
client = ClementineRemote(config.get(CONF_HOST), config.get(CONF_PORT),
config.get(CONF_ACCESS_TOKEN), reconnect=True)
add_devices([ClementineDevice(client, config[CONF_NAME])])
class ClementineDevice(MediaPlayerDevice):
"""Representation of Clementine Player."""
def __init__(self, client, name):
"""Initialize the Clementine device."""
self._client = client
self._name = name
self._muted = False
self._volume = 0.0
self._track_id = 0
self._last_track_id = 0
self._track_name = ''
self._track_artist = ''
self._track_album_name = ''
self._state = STATE_UNKNOWN
def update(self):
"""Retrieve the latest data from the Clementine Player."""
try:
client = self._client
if client.state == 'Playing':
self._state = STATE_PLAYING
elif client.state == 'Paused':
self._state = STATE_PAUSED
elif client.state == 'Disconnected':
self._state = STATE_OFF
else:
self._state = STATE_PAUSED
if client.last_update and (time.time() - client.last_update > 40):
self._state = STATE_OFF
self._volume = float(client.volume) if client.volume else 0.0
if client.current_track:
self._track_id = client.current_track['track_id']
self._track_name = client.current_track['title']
self._track_artist = client.current_track['track_artist']
self._track_album_name = client.current_track['track_album']
except:
self._state = STATE_OFF
raise
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume / 100.0
@property
def source(self):
"""Return current source name."""
source_name = "Unknown"
client = self._client
if client.active_playlist_id in client.playlists:
source_name = client.playlists[client.active_playlist_id]['name']
return source_name
@property
def source_list(self):
"""List of available input sources."""
source_names = [s["name"] for s in self._client.playlists.values()]
return source_names
def select_source(self, source):
"""Select input source."""
client = self._client
sources = [s for s in client.playlists.values() if s['name'] == source]
if len(sources) == 1:
client.change_song(sources[0]['id'], 0)
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_title(self):
"""Title of current playing media."""
return self._track_name
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._track_artist
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._track_album_name
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_CLEMENTINE
@property
def media_image_hash(self):
"""Hash value for media image."""
if self._client.current_track:
return self._client.current_track['track_id']
return None
@asyncio.coroutine
def async_get_media_image(self):
"""Fetch media image of current playing image."""
if self._client.current_track:
image = bytes(self._client.current_track['art'])
return (image, 'image/png')
return None, None
def volume_up(self):
"""Volume up the media player."""
newvolume = min(self._client.volume + 4, 100)
self._client.set_volume(newvolume)
def volume_down(self):
"""Volume down media player."""
newvolume = max(self._client.volume - 4, 0)
self._client.set_volume(newvolume)
def mute_volume(self, mute):
"""Send mute command."""
self._client.set_volume(0)
def set_volume_level(self, volume):
"""Set volume level."""
self._client.set_volume(int(100 * volume))
def media_play_pause(self):
"""Simulate play pause media player."""
if self._state == STATE_PLAYING:
self.media_pause()
else:
self.media_play()
def media_play(self):
"""Send play command."""
self._state = STATE_PLAYING
self._client.play()
def media_pause(self):
"""Send media pause command to media player."""
self._state = STATE_PAUSED
self._client.pause()
def media_next_track(self):
"""Send next track command."""
self._client.next()
def media_previous_track(self):
"""Send the previous track command."""
self._client.previous()
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import functools
import logging
import os
import shutil
import tempfile
import threading
from devil import base_error
from devil.android import device_blacklist
from devil.android import device_errors
from devil.android import device_list
from devil.android import device_utils
from devil.android import logcat_monitor
from devil.utils import file_utils
from devil.utils import parallelizer
from pylib import constants
from pylib.base import environment
def _DeviceCachePath(device):
file_name = 'device_cache_%s.json' % device.adb.GetDeviceSerial()
return os.path.join(constants.GetOutDirectory(), file_name)
def handle_shard_failures(f):
"""A decorator that handles device failures for per-device functions.
Args:
f: the function being decorated. The function must take at least one
argument, and that argument must be the device.
"""
return handle_shard_failures_with(None)(f)
# TODO(jbudorick): Refactor this to work as a decorator or context manager.
def handle_shard_failures_with(on_failure):
"""A decorator that handles device failures for per-device functions.
This calls on_failure in the event of a failure.
Args:
f: the function being decorated. The function must take at least one
argument, and that argument must be the device.
on_failure: A binary function to call on failure.
"""
def decorator(f):
@functools.wraps(f)
def wrapper(dev, *args, **kwargs):
try:
return f(dev, *args, **kwargs)
except device_errors.CommandTimeoutError:
logging.exception('Shard timed out: %s(%s)', f.__name__, str(dev))
except device_errors.DeviceUnreachableError:
logging.exception('Shard died: %s(%s)', f.__name__, str(dev))
except base_error.BaseError:
logging.exception('Shard failed: %s(%s)', f.__name__, str(dev))
except SystemExit:
logging.exception('Shard killed: %s(%s)', f.__name__, str(dev))
raise
if on_failure:
on_failure(dev, f.__name__)
return None
return wrapper
return decorator
class LocalDeviceEnvironment(environment.Environment):
def __init__(self, args, _error_func):
super(LocalDeviceEnvironment, self).__init__()
self._blacklist = (device_blacklist.Blacklist(args.blacklist_file)
if args.blacklist_file
else None)
self._device_serial = args.test_device
self._devices_lock = threading.Lock()
self._devices = []
self._concurrent_adb = args.enable_concurrent_adb
self._enable_device_cache = args.enable_device_cache
self._logcat_monitors = []
self._logcat_output_dir = args.logcat_output_dir
self._logcat_output_file = args.logcat_output_file
self._max_tries = 1 + args.num_retries
self._skip_clear_data = args.skip_clear_data
self._target_devices_file = args.target_devices_file
self._tool_name = args.tool
#override
def SetUp(self):
device_arg = 'default'
if self._target_devices_file:
device_arg = device_list.GetPersistentDeviceList(
self._target_devices_file)
if not device_arg:
logging.warning('No target devices specified. Falling back to '
'running on all available devices.')
device_arg = 'default'
else:
logging.info(
'Read device list %s from target devices file.', str(device_arg))
elif self._device_serial:
device_arg = self._device_serial
self._devices = device_utils.DeviceUtils.HealthyDevices(
self._blacklist, enable_device_files_cache=self._enable_device_cache,
default_retries=self._max_tries - 1, device_arg=device_arg)
if not self._devices:
raise device_errors.NoDevicesError
if self._logcat_output_file:
self._logcat_output_dir = tempfile.mkdtemp()
@handle_shard_failures_with(on_failure=self.BlacklistDevice)
def prepare_device(d):
if self._enable_device_cache:
cache_path = _DeviceCachePath(d)
if os.path.exists(cache_path):
logging.info('Using device cache: %s', cache_path)
with open(cache_path) as f:
d.LoadCacheData(f.read())
# Delete cached file so that any exceptions cause it to be cleared.
os.unlink(cache_path)
if self._logcat_output_dir:
logcat_file = os.path.join(
self._logcat_output_dir,
'%s_%s' % (d.adb.GetDeviceSerial(),
datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%S')))
monitor = logcat_monitor.LogcatMonitor(
d.adb, clear=True, output_file=logcat_file)
self._logcat_monitors.append(monitor)
monitor.Start()
self.parallel_devices.pMap(prepare_device)
@property
def blacklist(self):
return self._blacklist
@property
def concurrent_adb(self):
return self._concurrent_adb
@property
def devices(self):
if not self._devices:
raise device_errors.NoDevicesError()
return self._devices
@property
def max_tries(self):
return self._max_tries
@property
def parallel_devices(self):
return parallelizer.SyncParallelizer(self.devices)
@property
def skip_clear_data(self):
return self._skip_clear_data
@property
def tool(self):
return self._tool_name
#override
def TearDown(self):
@handle_shard_failures_with(on_failure=self.BlacklistDevice)
def tear_down_device(d):
# Write the cache even when not using it so that it will be ready the
# first time that it is enabled. Writing it every time is also necessary
# so that an invalid cache can be flushed just by disabling it for one
# run.
cache_path = _DeviceCachePath(d)
with open(cache_path, 'w') as f:
f.write(d.DumpCacheData())
logging.info('Wrote device cache: %s', cache_path)
self.parallel_devices.pMap(tear_down_device)
for m in self._logcat_monitors:
try:
m.Stop()
m.Close()
except base_error.BaseError:
logging.exception('Failed to stop logcat monitor for %s',
m.adb.GetDeviceSerial())
if self._logcat_output_file:
file_utils.MergeFiles(
self._logcat_output_file,
[m.output_file for m in self._logcat_monitors])
shutil.rmtree(self._logcat_output_dir)
def BlacklistDevice(self, device, reason='local_device_failure'):
device_serial = device.adb.GetDeviceSerial()
if self._blacklist:
self._blacklist.Extend([device_serial], reason=reason)
with self._devices_lock:
self._devices = [d for d in self._devices if str(d) != device_serial]
|
|
"""pypyr step that runs another pipeline from within the current pipeline."""
import logging
import shlex
from pypyr.context import Context
from pypyr.errors import (ContextError,
ControlOfFlowInstruction,
KeyInContextHasNoValueError,
KeyNotInContextError,
Stop)
import pypyr.pipelinerunner as pipelinerunner
# logger means the log level will be set correctly
logger = logging.getLogger(__name__)
def run_step(context):
"""Run another pipeline from this step.
The parent pipeline is the current, executing pipeline. The invoked, or
child pipeline is the pipeline you are calling from this step.
Args:
context: dictionary-like pypyr.context.Context. context is mandatory.
Uses the following context keys in context:
- pype
- name. mandatory. str. Name of pipeline to execute. This
{name}.yaml must exist in the working directory/pipelines
dir.
- args. optional. dict. Create the context of the called
pipeline with these keys & values. If args specified,
will not pass the parent context unless you explicitly set
useParentContext = True. If you do set useParentContext=True,
will write args into the parent context.
- out. optional. str or dict or list. If you set args or
useParentContext=False, the values in out will be saved from
child pipeline's fresh context into the parent content upon
completion of the child pipeline. Pass a string for a single
key to grab from child context, a list of string for a list
of keys to grab from child context, or a dict where you map
'parent-key-name': 'child-key-name'.
- pipeArg. string. optional. String to pass to the
context_parser - the equivalent to context arg on the
pypyr cli. Only used if skipParse==False.
- raiseError. bool. optional. Defaults to True. If False, log,
but swallow any errors that happen during the invoked
pipeline execution. Swallowing means that the current/parent
pipeline will carry on with the next step even if an error
occurs in the invoked pipeline.
- skipParse. bool. optional. Defaults to True. skip the
context_parser on the invoked pipeline.
- useParentContext. optional. bool. Defaults to True. Pass the
current (i.e parent) pipeline context to the invoked (child)
pipeline.
- loader: str. optional. Absolute name of pipeline loader
module. If not specified will use
pypyr.pypeloaders.fileloader.
- groups. list of str, or str. optional. Step-Groups to run in
pipeline. If you specify a str, will convert it to a single
entry list for you.
- success. str. optional. Step-Group to run on successful
pipeline completion.
- failure. str. optional. Step-Group to run on pipeline error.
If none of groups, success & failure specified, will run the default pypyr
steps, on_success & on_failure sequence.
If groups specified, will only run groups, without a success or failure
sequence, unless you specifically set these also.
Returns:
None
Raises:
pypyr.errors.KeyNotInContextError: if ['pype'] or ['pype']['name']
is missing.
pypyr.errors.KeyInContextHasNoValueError: ['pype']['name'] exists but
is empty.
"""
logger.debug("started")
(pipeline_name,
args,
out,
use_parent_context,
pipe_arg,
skip_parse,
raise_error,
loader,
step_groups,
success_group,
failure_group
) = get_arguments(context)
try:
if use_parent_context:
logger.info("pyping %s, using parent context.", pipeline_name)
if args:
logger.debug("writing args into parent context...")
context.update(args)
try:
og_pipeline_name = context.pipeline_name
context.pipeline_name = pipeline_name
pipelinerunner.load_and_run_pipeline(
pipeline_name=pipeline_name,
pipeline_context_input=pipe_arg,
context=context,
parse_input=not skip_parse,
loader=loader,
groups=step_groups,
success_group=success_group,
failure_group=failure_group
)
finally:
context.pipeline_name = og_pipeline_name
else:
logger.info("pyping %s, without parent context.", pipeline_name)
if args:
child_context = Context(args)
else:
child_context = Context()
child_context.pipeline_name = pipeline_name
child_context.working_dir = context.working_dir
pipelinerunner.load_and_run_pipeline(
pipeline_name=pipeline_name,
pipeline_context_input=pipe_arg,
context=child_context,
parse_input=not skip_parse,
loader=loader,
groups=step_groups,
success_group=success_group,
failure_group=failure_group
)
if out:
write_child_context_to_parent(out=out,
parent_context=context,
child_context=child_context)
logger.info("pyped %s.", pipeline_name)
except (ControlOfFlowInstruction, Stop):
# Control-of-Flow/Stop are instructions to go somewhere
# else, not errors per se.
raise
except Exception as ex_info:
# yes, yes, don't catch Exception. Have to, though, in order to swallow
# errs if !raise_error
logger.error("Something went wrong pyping %s. %s: %s",
pipeline_name, type(ex_info).__name__, ex_info)
if raise_error:
logger.debug("Raising original exception to caller.")
raise
else:
logger.debug(
"raiseError is False. Swallowing error in %s.", pipeline_name)
logger.debug("done")
def get_arguments(context):
"""Parse arguments for pype from context and assign default values.
Args:
context: pypyr.context.Context. context is mandatory.
Returns:
tuple (pipeline_name, #str
args, #dict
out, #str or dict or list
use_parent_context, #bool
pipe_arg, #str
skip_parse, #bool
raise_error #bool
groups #list of str
success_group #str
failure_group #str
)
Raises:
pypyr.errors.KeyNotInContextError: if ['pype']['name'] is missing.
pypyr.errors.KeyInContextHasNoValueError: if ['pype']['name'] exists but
is None.
"""
context.assert_key_has_value(key='pype', caller=__name__)
pype = context.get_formatted('pype')
try:
pipeline_name = pype['name']
if pipeline_name is None:
raise KeyInContextHasNoValueError(
"pypyr.steps.pype ['pype']['name'] exists but is empty.")
except KeyError as err:
raise KeyNotInContextError(
"pypyr.steps.pype missing 'name' in the 'pype' context item. "
"You need to specify the pipeline name to run another "
"pipeline.") from err
args = pype.get('args', None)
if args is not None and not isinstance(args, dict):
raise ContextError(
"pypyr.steps.pype 'args' in the 'pype' context item "
"must be a dict.")
pipe_arg_string = pype.get('pipeArg', None)
pipe_arg = shlex.split(pipe_arg_string) if pipe_arg_string else None
if pipe_arg_string and 'skipParse' not in pype:
skip_parse = False
else:
skip_parse = pype.get('skipParse', True)
if (args or pipe_arg_string) and 'useParentContext' not in pype:
use_parent_context = False
else:
use_parent_context = pype.get('useParentContext', True)
out = pype.get('out', None)
if out and use_parent_context:
raise ContextError(
"pypyr.steps.pype pype.out is only relevant if useParentContext "
"= False. If you're using the parent context, no need to have out "
"args since their values will already be in context. If you're "
"NOT using parent context and you've specified pype.args, just "
"leave off the useParentContext key and it'll default to False "
"under the hood, or set it to False yourself if you keep it in.")
raise_error = pype.get('raiseError', True)
loader = pype.get('loader', None)
groups = pype.get('groups', None)
if isinstance(groups, str):
groups = [groups]
success_group = pype.get('success', None)
failure_group = pype.get('failure', None)
return (
pipeline_name,
args,
out,
use_parent_context,
pipe_arg,
skip_parse,
raise_error,
loader,
groups,
success_group,
failure_group
)
def write_child_context_to_parent(out, parent_context, child_context):
"""Write out keys from child to parent context.
Args:
out. str or dict or list. Pass a string for a single
key to grab from child context, a list of string for a list
of keys to grab from child context, or a dict where you map
'parent-key-name': 'child-key-name'.
parent_context: parent Context. destination context.
child_context: write from this context to the parent.
"""
if isinstance(out, str):
save_me = {out: out}
elif isinstance(out, list):
save_me = {k: k for k in out}
elif isinstance(out, dict):
save_me = out
else:
raise ContextError("pypyr.steps.pype pype.out should be a string, or "
f"a list or a dict. Instead, it's a {type(out)}")
for parent_key, child_key in save_me.items():
logger.debug(
"setting parent context %s to value from child context %s",
parent_key,
child_key)
parent_context[parent_key] = child_context.get_formatted(child_key)
|
|
#!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities for working with threads and ``Futures``.
``Futures`` are a pattern for concurrent programming introduced in
Python 3.2 in the `concurrent.futures` package (this package has also
been backported to older versions of Python and can be installed with
``pip install futures``). Tornado will use `concurrent.futures.Future` if
it is available; otherwise it will use a compatible class defined in this
module.
"""
from __future__ import absolute_import, division, print_function, with_statement
import functools
import sys
from tornado.stack_context import ExceptionStackContext, wrap
from tornado.util import raise_exc_info, ArgReplacer
try:
from concurrent import futures
except ImportError:
futures = None
class ReturnValueIgnoredError(Exception):
pass
class _DummyFuture(object):
def __init__(self):
self._done = False
self._result = None
self._exception = None
self._callbacks = []
def cancel(self):
return False
def cancelled(self):
return False
def running(self):
return not self._done
def done(self):
return self._done
def result(self, timeout=None):
self._check_done()
if self._exception:
raise self._exception
return self._result
def exception(self, timeout=None):
self._check_done()
if self._exception:
return self._exception
else:
return None
def add_done_callback(self, fn):
if self._done:
fn(self)
else:
self._callbacks.append(fn)
def set_result(self, result):
self._result = result
self._set_done()
def set_exception(self, exception):
self._exception = exception
self._set_done()
def _check_done(self):
if not self._done:
raise Exception("DummyFuture does not support blocking for results")
def _set_done(self):
self._done = True
for cb in self._callbacks:
# TODO: error handling
cb(self)
self._callbacks = None
if futures is None:
Future = _DummyFuture
else:
Future = futures.Future
class TracebackFuture(Future):
"""Subclass of `Future` which can store a traceback with
exceptions.
The traceback is automatically available in Python 3, but in the
Python 2 futures backport this information is discarded.
"""
def __init__(self):
super(TracebackFuture, self).__init__()
self.__exc_info = None
def exc_info(self):
return self.__exc_info
def set_exc_info(self, exc_info):
"""Traceback-aware replacement for
`~concurrent.futures.Future.set_exception`.
"""
self.__exc_info = exc_info
self.set_exception(exc_info[1])
def result(self):
if self.__exc_info is not None:
raise_exc_info(self.__exc_info)
else:
return super(TracebackFuture, self).result()
class DummyExecutor(object):
def submit(self, fn, *args, **kwargs):
future = TracebackFuture()
try:
future.set_result(fn(*args, **kwargs))
except Exception:
future.set_exc_info(sys.exc_info())
return future
def shutdown(self, wait=True):
pass
dummy_executor = DummyExecutor()
def run_on_executor(fn):
"""Decorator to run a synchronous method asynchronously on an executor.
The decorated method may be called with a ``callback`` keyword
argument and returns a future.
"""
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
callback = kwargs.pop("callback", None)
future = self.executor.submit(fn, self, *args, **kwargs)
if callback:
self.io_loop.add_future(future,
lambda future: callback(future.result()))
return future
return wrapper
_NO_RESULT = object()
def return_future(f):
"""Decorator to make a function that returns via callback return a
`Future`.
The wrapped function should take a ``callback`` keyword argument
and invoke it with one argument when it has finished. To signal failure,
the function can simply raise an exception (which will be
captured by the `.StackContext` and passed along to the ``Future``).
From the caller's perspective, the callback argument is optional.
If one is given, it will be invoked when the function is complete
with `Future.result()` as an argument. If the function fails, the
callback will not be run and an exception will be raised into the
surrounding `.StackContext`.
If no callback is given, the caller should use the ``Future`` to
wait for the function to complete (perhaps by yielding it in a
`.gen.engine` function, or passing it to `.IOLoop.add_future`).
Usage::
@return_future
def future_func(arg1, arg2, callback):
# Do stuff (possibly asynchronous)
callback(result)
@gen.engine
def caller(callback):
yield future_func(arg1, arg2)
callback()
Note that ``@return_future`` and ``@gen.engine`` can be applied to the
same function, provided ``@return_future`` appears first. However,
consider using ``@gen.coroutine`` instead of this combination.
"""
replacer = ArgReplacer(f, 'callback')
@functools.wraps(f)
def wrapper(*args, **kwargs):
future = TracebackFuture()
callback, args, kwargs = replacer.replace(
lambda value=_NO_RESULT: future.set_result(value),
args, kwargs)
def handle_error(typ, value, tb):
future.set_exc_info((typ, value, tb))
return True
exc_info = None
with ExceptionStackContext(handle_error):
try:
result = f(*args, **kwargs)
if result is not None:
raise ReturnValueIgnoredError(
"@return_future should not be used with functions "
"that return values")
except:
exc_info = sys.exc_info()
raise
if exc_info is not None:
# If the initial synchronous part of f() raised an exception,
# go ahead and raise it to the caller directly without waiting
# for them to inspect the Future.
raise_exc_info(exc_info)
# If the caller passed in a callback, schedule it to be called
# when the future resolves. It is important that this happens
# just before we return the future, or else we risk confusing
# stack contexts with multiple exceptions (one here with the
# immediate exception, and again when the future resolves and
# the callback triggers its exception by calling future.result()).
if callback is not None:
def run_callback(future):
result = future.result()
if result is _NO_RESULT:
callback()
else:
callback(future.result())
future.add_done_callback(wrap(run_callback))
return future
return wrapper
def chain_future(a, b):
"""Chain two futures together so that when one completes, so does the other.
The result (success or failure) of ``a`` will be copied to ``b``.
"""
def copy(future):
assert future is a
if (isinstance(a, TracebackFuture) and isinstance(b, TracebackFuture)
and a.exc_info() is not None):
b.set_exc_info(a.exc_info())
elif a.exception() is not None:
b.set_exception(a.exception())
else:
b.set_result(a.result())
a.add_done_callback(copy)
|
Subsets and Splits