gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Copyright (c) 2018 European Organization for Nuclear Research.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for manipulating NodeGroups via the DB API"""
from oslo_utils import uuidutils
from magnum.common import exception
from magnum.tests.unit.db import base
from magnum.tests.unit.db import utils
class DbNodeGroupTestCase(base.DbTestCase):
def test_create_nodegroup(self):
utils.create_test_nodegroup()
def test_create_nodegroup_already_exists(self):
utils.create_test_nodegroup()
self.assertRaises(exception.NodeGroupAlreadyExists,
utils.create_test_nodegroup)
def test_create_nodegroup_same_name_same_cluster(self):
# NOTE(ttsiouts): Don't allow the same name for nodegroups
# in the same cluster.
nodegroup = utils.create_test_nodegroup()
new = {
'name': nodegroup.name,
'id': nodegroup.id + 8,
'cluster_id': nodegroup.cluster_id
}
self.assertRaises(exception.NodeGroupAlreadyExists,
utils.create_test_nodegroup, **new)
def test_create_nodegroup_same_name_different_cluster(self):
# NOTE(ttsiouts): Verify nodegroups with the same name
# but in different clusters are allowed.
nodegroup = utils.create_test_nodegroup()
new = {
'name': nodegroup.name,
'id': nodegroup.id + 8,
'cluster_id': 'fake-cluster-uuid',
'uuid': 'fake-nodegroup-uuid',
'project_id': nodegroup.project_id,
}
try:
utils.create_test_nodegroup(**new)
except Exception:
# Something went wrong, just fail the testcase
self.assertTrue(False)
def test_get_nodegroup_by_id(self):
nodegroup = utils.create_test_nodegroup()
res = self.dbapi.get_nodegroup_by_id(self.context,
nodegroup.cluster_id,
nodegroup.id)
self.assertEqual(nodegroup.id, res.id)
self.assertEqual(nodegroup.uuid, res.uuid)
def test_get_nodegroup_by_name(self):
nodegroup = utils.create_test_nodegroup()
res = self.dbapi.get_nodegroup_by_name(self.context,
nodegroup.cluster_id,
nodegroup.name)
self.assertEqual(nodegroup.name, res.name)
self.assertEqual(nodegroup.uuid, res.uuid)
def test_get_cluster_by_uuid(self):
nodegroup = utils.create_test_nodegroup()
res = self.dbapi.get_nodegroup_by_uuid(self.context,
nodegroup.cluster_id,
nodegroup.uuid)
self.assertEqual(nodegroup.id, res.id)
self.assertEqual(nodegroup.uuid, res.uuid)
def test_get_nodegroup_that_does_not_exist(self):
# Create a cluster with no nodegroups
cluster = utils.create_test_cluster()
self.assertRaises(exception.NodeGroupNotFound,
self.dbapi.get_nodegroup_by_id,
self.context, cluster.uuid, 100)
self.assertRaises(exception.NodeGroupNotFound,
self.dbapi.get_nodegroup_by_uuid,
self.context, cluster.uuid,
'12345678-9999-0000-aaaa-123456789012')
self.assertRaises(exception.NodeGroupNotFound,
self.dbapi.get_nodegroup_by_name,
self.context, cluster.uuid, 'not_found')
def test_get_nodegroups_in_cluster(self):
uuids_in_cluster = []
uuids_not_in_cluster = []
cluster = utils.create_test_cluster(uuid=uuidutils.generate_uuid())
for i in range(2):
ng = utils.create_test_nodegroup(uuid=uuidutils.generate_uuid(),
name='test%(id)s' % {'id': i},
cluster_id=cluster.uuid)
uuids_in_cluster.append(ng.uuid)
for i in range(2):
ng = utils.create_test_nodegroup(uuid=uuidutils.generate_uuid(),
name='test%(id)s' % {'id': i},
cluster_id='fake_cluster')
uuids_not_in_cluster.append(ng.uuid)
res = self.dbapi.list_cluster_nodegroups(self.context, cluster.uuid)
res_uuids = [r.uuid for r in res]
self.assertEqual(sorted(uuids_in_cluster), sorted(res_uuids))
for uuid in uuids_not_in_cluster:
self.assertNotIn(uuid, res_uuids)
def test_get_cluster_list_sorted(self):
uuids = []
cluster = utils.create_test_cluster(uuid=uuidutils.generate_uuid())
for i in range(5):
ng = utils.create_test_nodegroup(uuid=uuidutils.generate_uuid(),
name='test%(id)s' % {'id': i},
cluster_id=cluster.uuid)
uuids.append(ng.uuid)
res = self.dbapi.list_cluster_nodegroups(self.context, cluster.uuid,
sort_key='uuid')
res_uuids = [r.uuid for r in res]
self.assertEqual(sorted(uuids), res_uuids)
self.assertRaises(exception.InvalidParameterValue,
self.dbapi.list_cluster_nodegroups,
self.context,
cluster.uuid,
sort_key='not-there')
def test_get_nodegroup_list_with_filters(self):
cluster_dict = utils.get_test_cluster(
id=1, uuid=uuidutils.generate_uuid())
cluster = self.dbapi.create_cluster(cluster_dict)
group1 = utils.create_test_nodegroup(
name='group-one',
cluster_id=cluster.uuid,
flavor_id=1,
uuid=uuidutils.generate_uuid(),
node_count=1)
group2 = utils.create_test_nodegroup(
name='group-two',
cluster_id=cluster.uuid,
flavor_id=1,
uuid=uuidutils.generate_uuid(),
node_count=1)
group3 = utils.create_test_nodegroup(
name='group-four',
cluster_id=cluster.uuid,
flavor_id=2,
uuid=uuidutils.generate_uuid(),
node_count=3)
filters = {'name': 'group-one'}
res = self.dbapi.list_cluster_nodegroups(
self.context, cluster.uuid, filters=filters)
self.assertEqual([group1.id], [r.id for r in res])
filters = {'node_count': 1}
res = self.dbapi.list_cluster_nodegroups(
self.context, cluster.uuid, filters=filters)
self.assertEqual([group1.id, group2.id], [r.id for r in res])
filters = {'flavor_id': 2, 'node_count': 3}
res = self.dbapi.list_cluster_nodegroups(
self.context, cluster.uuid, filters=filters)
self.assertEqual([group3.id], [r.id for r in res])
filters = {'name': 'group-five'}
res = self.dbapi.list_cluster_nodegroups(
self.context, cluster.uuid, filters=filters)
self.assertEqual([], [r.id for r in res])
def test_destroy_nodegroup(self):
cluster = utils.create_test_cluster()
nodegroup = utils.create_test_nodegroup()
self.assertEqual(nodegroup.uuid, self.dbapi.get_nodegroup_by_uuid(
self.context, cluster.uuid, nodegroup.uuid).uuid)
self.dbapi.destroy_nodegroup(cluster.uuid, nodegroup.uuid)
self.assertRaises(exception.NodeGroupNotFound,
self.dbapi.get_nodegroup_by_uuid,
self.context, cluster.uuid, nodegroup.uuid)
self.assertRaises(exception.NodeGroupNotFound,
self.dbapi.destroy_nodegroup, cluster.uuid,
nodegroup.uuid)
def test_destroy_nodegroup_by_uuid(self):
cluster = utils.create_test_cluster()
nodegroup = utils.create_test_nodegroup()
self.assertIsNotNone(self.dbapi.get_nodegroup_by_uuid(self.context,
cluster.uuid,
nodegroup.uuid))
self.dbapi.destroy_nodegroup(cluster.uuid, nodegroup.uuid)
self.assertRaises(exception.NodeGroupNotFound,
self.dbapi.get_nodegroup_by_uuid, self.context,
cluster.uuid, nodegroup.uuid)
def test_destroy_cluster_by_uuid_that_does_not_exist(self):
self.assertRaises(exception.NodeGroupNotFound,
self.dbapi.destroy_nodegroup, 'c_uuid',
'12345678-9999-0000-aaaa-123456789012')
def test_update_cluster(self):
nodegroup = utils.create_test_nodegroup()
old_flavor = nodegroup.flavor_id
new_flavor = 5
self.assertNotEqual(old_flavor, new_flavor)
res = self.dbapi.update_nodegroup(nodegroup.cluster_id, nodegroup.id,
{'flavor_id': new_flavor})
self.assertEqual(new_flavor, res.flavor_id)
def test_update_nodegroup_not_found(self):
uuid = uuidutils.generate_uuid()
self.assertRaises(exception.NodeGroupNotFound,
self.dbapi.update_nodegroup, "c_uuid", uuid,
{'node_count': 5})
|
|
#!/usr/bin/python
# Copyright 2003 Dave Abrahams
# Copyright 2003, 2004, 2005, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.txt or https://www.bfgroup.xyz/b2/LICENSE.txt)
# Test usage of searched-libs: one which are found via -l
# switch to the linker/compiler.
import BoostBuild
import os
import string
t = BoostBuild.Tester(use_test_config=False)
# To start with, we have to prepare a library to link with.
t.write("lib/jamroot.jam", "")
t.write("lib/jamfile.jam", "lib test_lib : test_lib.cpp ;")
t.write("lib/test_lib.cpp", """\
#ifdef _WIN32
__declspec(dllexport)
#endif
void foo() {}
""");
t.run_build_system(subdir="lib")
t.expect_addition("lib/bin/$toolset/debug*/test_lib.dll")
# Auto adjusting of suffixes does not work, since we need to
# change dll to lib.
if ( ( os.name == "nt" ) or os.uname()[0].lower().startswith("cygwin") ) and \
( BoostBuild.get_toolset() != "gcc" ):
t.copy("lib/bin/$toolset/debug*/test_lib.implib", "lib/test_lib.implib")
t.copy("lib/bin/$toolset/debug*/test_lib.dll", "lib/test_lib.dll")
else:
t.copy("lib/bin/$toolset/debug*/test_lib.dll", "lib/test_lib.dll")
# Test that the simplest usage of searched library works.
t.write("jamroot.jam", "")
t.write("jamfile.jam", """\
import path ;
import project ;
exe main : main.cpp helper ;
lib helper : helper.cpp test_lib ;
lib test_lib : : <name>test_lib <search>lib ;
""")
t.write("main.cpp", """\
void helper();
int main() { helper(); }
""")
t.write("helper.cpp", """\
void foo();
void
#if defined(_WIN32)
__declspec(dllexport)
#endif
helper() { foo(); }
""")
t.run_build_system(["-d2"])
t.expect_addition("bin/$toolset/debug*/main.exe")
t.rm("bin/$toolset/debug/main.exe")
t.rm("bin/$toolset/debug/*/main.exe")
# Test that 'unit-test' will correctly add runtime paths to searched libraries.
t.write("jamfile.jam", """\
import path ;
import project ;
import testing ;
project : requirements <hardcode-dll-paths>false ;
unit-test main : main.cpp helper ;
lib helper : helper.cpp test_lib ;
lib test_lib : : <name>test_lib <search>lib ;
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug*/main.passed")
t.rm("bin/$toolset/debug/main.exe")
t.rm("bin/$toolset/debug/*/main.exe")
# Now try using searched lib from static lib. Request shared version of searched
# lib, since we do not have a static one handy.
t.write("jamfile.jam", """\
exe main : main.cpp helper ;
lib helper : helper.cpp test_lib/<link>shared : <link>static ;
lib test_lib : : <name>test_lib <search>lib ;
""")
t.run_build_system(stderr=None)
t.expect_addition("bin/$toolset/debug*/main.exe")
t.expect_addition("bin/$toolset/debug/link-static*/helper.lib")
t.rm("bin/$toolset/debug/main.exe")
t.rm("bin/$toolset/debug/*/main.exe")
# A regression test: <library>property referring to searched-lib was being
# mishandled. As the result, we were putting target name to the command line!
# Note that
# g++ ...... <.>z
# works nicely in some cases, sending output from compiler to file 'z'. This
# problem shows up when searched libs are in usage requirements.
t.write("jamfile.jam", "exe main : main.cpp d/d2//a ;")
t.write("main.cpp", """\
void foo();
int main() { foo(); }
""")
t.write("d/d2/jamfile.jam", """\
lib test_lib : : <name>test_lib <search>../../lib ;
lib a : a.cpp : : : <library>test_lib ;
""")
t.write("d/d2/a.cpp", """\
#ifdef _WIN32
__declspec(dllexport) int force_library_creation_for_a;
#endif
""")
t.run_build_system()
# A regression test. Searched targets were not associated with any properties.
# For that reason, if the same searched lib is generated with two different
# properties, we had an error saying they are actualized to the same Jam target
# name.
t.write("jamroot.jam", "")
t.write("a.cpp", "")
# The 'l' library will be built in two variants: 'debug' (directly requested)
# and 'release' (requested from 'a').
t.write("jamfile.jam", """\
exe a : a.cpp l/<variant>release ;
lib l : : <name>l_d <variant>debug ;
lib l : : <name>l_r <variant>release ;
""")
t.run_build_system(["-n"])
# A regression test. Two virtual target with the same properties were created
# for 'l' target, which caused and error to be reported when actualizing
# targets. The final error is correct, but we should not create two duplicated
# targets. Thanks to Andre Hentz for finding this bug.
t.write("jamroot.jam", "")
t.write("a.cpp", "")
t.write("jamfile.jam", """\
project a : requirements <runtime-link>static ;
static-lib a : a.cpp l ;
lib l : : <name>l_f ;
""")
t.run_build_system(["-n"])
# A regression test. Virtual targets distinguished by their search paths were
# not differentiated when registered, which caused search paths to be selected
# incorrectly for build requests with multiple feature values.
t.write("jamroot.jam", "")
t.write("a.cpp", "")
t.write("jamfile.jam", """\
exe a : a.cpp l ;
lib l : : <name>l <search>lib32 <address-model>32 ;
lib l : : <name>l <search>lib64 <address-model>64 ;
""")
t.run_build_system(["-n","address-model=32,64"])
t.fail_test(t.stdout().find("lib32") == -1)
t.fail_test(t.stdout().find("lib64") == -1)
# Make sure plain "lib foobar ; " works.
t.write("jamfile.jam", """\
exe a : a.cpp foobar ;
lib foobar ;
""")
t.run_build_system(["-n", "-d2"])
t.fail_test(t.stdout().find("foobar") == -1)
# Make sure plain "lib foo bar ; " works.
t.write("jamfile.jam", """\
exe a : a.cpp foo bar ;
lib foo bar ;
""")
t.run_build_system(["-n", "-d2"])
t.fail_test(t.stdout().find("foo") == -1)
t.fail_test(t.stdout().find("bar") == -1)
t.cleanup()
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2015-2016 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import test_framework.loginit
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class ParallelTest (BitcoinTestFramework):
def __init__(self):
self.rep = False
BitcoinTestFramework.__init__(self)
def setup_chain(self):
print ("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 6)
def setup_network(self, split=False):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-parallel=0", "-rpcservertimeout=0", "-use-thinblocks=0", "-excessiveblocksize=6000000", "-blockprioritysize=6000000", "-blockmaxsize=6000000"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-parallel=0", "-rpcservertimeout=0", "-use-thinblocks=0", "-excessiveblocksize=6000000", "-blockprioritysize=6000000", "-blockmaxsize=6000000"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-parallel=0", "-rpcservertimeout=0", "-use-thinblocks=0", "-excessiveblocksize=6000000", "-blockprioritysize=6000000", "-blockmaxsize=6000000"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-parallel=0", "-rpcservertimeout=0", "-use-thinblocks=0", "-excessiveblocksize=6000000", "-blockprioritysize=6000000", "-blockmaxsize=6000000"]))
interconnect_nodes(self.nodes)
self.is_network_split=False
self.sync_all()
def cleanup_and_reset(self):
# Cleanup - start and connect the other nodes so that we have syncd chains before proceeding
# to other tests.
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(4, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(5, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
interconnect_nodes(self.nodes)
sync_blocks(self.nodes)
print ("Mine more blocks on each node...")
self.nodes[0].generate(25)
sync_blocks(self.nodes)
self.nodes[1].generate(25)
sync_blocks(self.nodes)
self.nodes[2].generate(25)
sync_blocks(self.nodes)
self.nodes[3].generate(25)
sync_blocks(self.nodes)
self.nodes[4].generate(25)
sync_blocks(self.nodes)
self.nodes[5].generate(25)
sync_blocks(self.nodes)
stop_nodes(self.nodes)
wait_bitcoinds()
def repetitiveTest(self):
# get some coins
self.nodeLookup = {}
i = 0
for n in self.nodes:
print("Node %d is %s" % (i,n.url))
print ("generating coins for node")
n.generate(200)
self.sync_all()
i += 1
for i in range(0,200):
# Create many utxo's
print ("round %d: Generating txns..." % i)
for n in self.nodes:
send_to = {}
n.keypoolrefill(100)
n.keypoolrefill(100)
for i in range(200):
send_to[n.getnewaddress()] = Decimal("0.01")
n.sendmany("", send_to)
self.sync_all()
print (" generating blocks...")
i = 0
for n in self.nodes:
try:
n.generate(1)
except JSONRPCException as e:
print (e)
print ("Node ", i, " ", n.url)
pdb.set_trace()
i += 1
print (" syncing...")
self.sync_all()
def run_test (self):
if self.rep:
self.repetitiveTest()
return
print ("Mining blocks with PV off...")
# Mine some blocks on node2 which we will need at the end to generate a few transactions from that node
# in order to create the small block with just a few transactions in it.
self.nodes[2].generate(2)
self.sync_blocks()
# Mine the rest on node0 where we will generate the bigger block.
self.nodes[0].generate(100)
self.sync_blocks()
self.nodes[0].generate(1)
self.sync_blocks()
self.nodes[2].generate(100)
self.sync_blocks()
#stop nodes
stop_nodes(self.nodes)
wait_bitcoinds()
#restart nodes with -pvtest off and do not yet connect the nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=0"]))
# Send tx's which do not propagate
addr2 = self.nodes[2].getnewaddress()
for i in range(50):
self.nodes[0].sendtoaddress(addr2, "0.01")
# Send a few transactions from node2 that will get mined so that we will have at least
# a few inputs to check when the two competing blocks enter parallel validation.
addr0 = self.nodes[0].getnewaddress()
for i in range(5):
self.nodes[2].sendtoaddress(addr0, "0.01")
# Have node0 and node2 mine the same block which will compete to advance the chaintip when
# The nodes are connected back together.
print ("Mine two competing blocks...")
self.nodes[0].generate(1)
self.nodes[2].generate(1)
#stop nodes and restart right away
stop_nodes(self.nodes)
wait_bitcoinds()
# Restart nodes with pvtest=1
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=1"]))
print ("Connect nodes...")
interconnect_nodes(self.nodes)
sync_blocks(self.nodes[0:3])
# Wait here to make sure a re-org does not happen on node0 so we want to give it some time. If the
# memory pool on node 0 does not change within 5 seconds then we assume a reorg is not occurring
# because a reorg would cause transactions to be placed in the mempool from the old block on node 0.
old_mempoolbytes = self.nodes[0].getmempoolinfo()["bytes"]
for i in range(5):
mempoolbytes = self.nodes[0].getmempoolinfo()["bytes"]
if old_mempoolbytes != mempoolbytes:
assert("Reorg happened when it should not - Mempoolbytes has changed")
old_mempoolbytes = mempoolbytes
# node0 has the bigger block and was sent and began processing first, however the block from node2
# should have come in after and beaten node0's block. Therefore the blockhash from chaintip from
# node2 should now match the blockhash from the chaintip on node1; and node0 and node1 should not match.
print ("check for re-org " + str(i+1))
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[2].getbestblockhash())
assert_not_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
time.sleep(1)
#stop nodes
stop_nodes(self.nodes)
wait_bitcoinds()
# Restart nodes with pvtest off.
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0"]))
print ("Connect nodes...")
interconnect_nodes(self.nodes)
# mine a block on node3 and then connect to the others. This tests when a third block arrives after
# the tip has been advanced.
# this block should propagate to the other nodes but not cause a re-org
print ("Mine another block...")
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=0"]))
self.nodes[3].generate(1)
connect_nodes(self.nodes[1],3)
sync_blocks(self.nodes)
# Wait here to make sure a re-org does not happen on node0 so we want to give it some time. If the
# memory pool on node 0 does not change within 5 seconds then we assume a reorg is not occurring
# because a reorg would cause transactions to be placed in the mempool from the old block on node 0.
for i in range(5):
mempoolbytes = self.nodes[0].getmempoolinfo()["bytes"]
if old_mempoolbytes != mempoolbytes:
assert("Reorg happened when it should not - Mempoolbytes has changed")
old_mempoolbytes = mempoolbytes
print ("check for re-org " + str(i+1))
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[2].getbestblockhash())
assert_not_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
assert_not_equal(self.nodes[1].getbestblockhash(), self.nodes[3].getbestblockhash())
time.sleep(1)
# Send some transactions and Mine a block on node 2.
# This should cause node0 and node3 to re-org and all chains should now match.
for i in range(5):
self.nodes[2].sendtoaddress(addr2, .01)
print ("Mine another block on node2 which causes a reorg on node0 and node3...")
self.nodes[2].generate(1)
sync_blocks(self.nodes)
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[2].getbestblockhash())
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
counts = [ x.getblockcount() for x in self.nodes ]
assert_equal(counts, [205,205,205,205])
#stop nodes
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(4, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(5, self.options.tmpdir, ["-pvtest=0"]))
connect_nodes(self.nodes[1],0)
connect_nodes(self.nodes[1],2)
connect_nodes(self.nodes[1],3)
connect_nodes(self.nodes[1],4)
connect_nodes(self.nodes[1],5)
sync_blocks(self.nodes)
# Mine blocks on each node and then mine 100 to age them such that they are spendable.
print ("Mine more blocks on each node...")
self.nodes[1].generate(5)
sync_blocks(self.nodes)
self.nodes[2].generate(5)
sync_blocks(self.nodes)
self.nodes[3].generate(5)
sync_blocks(self.nodes)
self.nodes[4].generate(5)
sync_blocks(self.nodes)
self.nodes[5].generate(5)
sync_blocks(self.nodes)
self.nodes[1].generate(100)
sync_blocks(self.nodes)
#stop nodes
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(4, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(5, self.options.tmpdir, ["-pvtest=0"]))
print ("Send more transactions...")
num_range = 50
addrs = [ x.getnewaddress() for x in self.nodes]
for i in range(num_range):
self.nodes[0].sendtoaddress(addrs[0], 0.01)
num_range = 10
for i in range(num_range):
self.nodes[2].sendtoaddress(addrs[2], 0.01)
for i in range(num_range):
self.nodes[3].sendtoaddress(addrs[3], 0.01)
for i in range(num_range):
self.nodes[4].sendtoaddress(addrs[4], 0.01)
for i in range(num_range):
self.nodes[5].sendtoaddress(addrs[5], 0.01)
# Mine 5 competing blocks.
print ("Mine 5 competing blocks...")
self.nodes[0].generate(1)
self.nodes[2].generate(1)
self.nodes[3].generate(1)
self.nodes[4].generate(1)
self.nodes[5].generate(1)
counts = [ x.getblockcount() for x in self.nodes ]
assert_equal(counts, [331,330,331,331,331,331])
# Connect nodes so that all blocks are sent at same time to node1. Largest block from node0 will be terminated.
print ("connnect nodes...")
connect_nodes(self.nodes[1],0)
connect_nodes(self.nodes[1],2)
connect_nodes(self.nodes[1],3)
connect_nodes(self.nodes[1],4)
connect_nodes(self.nodes[1],5)
sync_blocks(self.nodes)
# Mine a block which will cause a reorg back to node0
print ("Mine another block...")
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Mine 5 more competing blocks of the same size. The last block to arrive will have its validation terminated.
print ("Mine 5 more competing blocks...")
self.nodes[0].generate(1)
self.nodes[2].generate(1)
self.nodes[3].generate(1)
self.nodes[4].generate(1)
self.nodes[5].generate(1)
sync_blocks(self.nodes)
# Mine another block which will cause the nodes to sync to one chain
print ("Mine another block...")
self.nodes[0].generate(1)
sync_blocks(self.nodes)
#stop nodes
stop_nodes(self.nodes)
wait_bitcoinds()
# Cleanup by mining more blocks if we need to run extended tests
if self.longTest == True:
self.cleanup_and_reset()
################################################
# Begin extended tests
################################################
if self.longTest == False:
return
###########################################################################################
# Test reorgs
###########################################################################################
###########################################################################################
# Basic reorg - see section below on 4 block attack scenarios. At the end there is a
# repeated test that does basic reorgs multiple times.
###########################################################################################
# 1) Start a slow to validate block race then mine another block pulling one chain ahead.
# - threads on the chain that is now not the most proof of work should be stopped and the
# most proof of work block should proceed.
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0"]))
print ("Send more transactions...")
num_range = 15
for i in range(num_range):
self.nodes[0].sendtoaddress(addrs[0], 0.01)
for i in range(num_range):
self.nodes[1].sendtoaddress(addrs[1], 0.01)
for i in range(num_range):
self.nodes[2].sendtoaddress(addrs[2], 0.01)
# Mine a block on each node
print ("Mine a block on each node..")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
self.nodes[2].generate(1)
basecount = self.nodes[0].getblockcount()
# Mine another block on node2 so that it's chain will be the longest when we connect it
print ("Mine another block on node2..")
self.nodes[2].generate(1)
bestblock = self.nodes[2].getbestblockhash()
stop_nodes(self.nodes)
wait_bitcoinds()
# Restart nodes with pvtest=1
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=1"]))
# Connect node 0 and 1 so that a block validation race begins
print ("Connect nodes0 and 1...")
connect_nodes(self.nodes[1],0)
# Wait for a little while before connecting node 2
time.sleep(3)
print ("Connect node2...")
counts = [ x.getblockcount() for x in self.nodes ]
print (str(counts))
assert_equal(counts, [basecount,basecount,basecount+1])
interconnect_nodes(self.nodes)
# All chains will sync to node2
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbestblockhash(), bestblock)
assert_equal(self.nodes[1].getbestblockhash(), bestblock)
assert_equal(self.nodes[2].getbestblockhash(), bestblock)
stop_nodes(self.nodes)
wait_bitcoinds()
# cleanup and sync chains for next tests
self.cleanup_and_reset()
###########################################################################################
# Mine two forks of equal work and start slow to validate block race on fork1. Then another
# block arrives on fork2
# - the slow to validate blocks will still continue
# Mine another block on fork2 two pulling that fork ahead.
# - threads on the fork1 should be stopped allowing fork2 to connect blocks and pull ahead
print ("Mine two forks.")
# fork 1 (both nodes on fork1 should be syncd)
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
interconnect_nodes(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# fork 2
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes[2].generate(1)
stop_nodes(self.nodes)
wait_bitcoinds()
# restart nodes but don't connect them yet
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
# Create txns on node0 and 1 to setup for a slow to validate race between those nodes.
print ("Send more transactions...")
num_range = 15
for i in range(num_range):
self.nodes[0].sendtoaddress(addrs[0], 0.01)
for i in range(num_range):
self.nodes[1].sendtoaddress(addrs[1], 0.01)
# Mine a block on each node
print ("Mine a block on each node..")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
self.nodes[2].generate(1)
basecount = self.nodes[0].getblockcount()
# Mine another block on node2 so that it's chain will be the longest when we connect it
print ("Mine another block on node2..")
self.nodes[2].generate(1)
bestblock = self.nodes[2].getbestblockhash()
stop_nodes(self.nodes)
wait_bitcoinds()
# Restart nodes with pvtest=1
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=1", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=1", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=1", "-whitelist=127.0.0.1"]))
# Connect node 0 and 1 so that a block validation race begins
print ("Connect nodes0 and 1...")
connect_nodes(self.nodes[1],0)
# Wait for a little while before connecting node 2
time.sleep(3)
print ("Connect node2...")
counts = [ x.getblockcount() for x in self.nodes ]
print (str(counts))
assert_equal(counts, [basecount,basecount,basecount+1])
interconnect_nodes(self.nodes)
# All chains will sync to node2
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbestblockhash(), bestblock)
assert_equal(self.nodes[1].getbestblockhash(), bestblock)
assert_equal(self.nodes[2].getbestblockhash(), bestblock)
stop_nodes(self.nodes)
wait_bitcoinds()
# cleanup and sync chains for next tests
self.cleanup_and_reset()
##############################################################################################
# Mine two forks of equal work and start slow to validate 4 block race on fork1. Then another
# block arrives on fork2
# - the slow to validate blocks will still continue
# Mine another block on fork2 two pulling that fork ahead.
# - threads on the fork1 should be stopped allowing fork2 to connect blocks and pull ahead
print ("Mine two forks.")
# fork 1 (both nodes on fork1 should be syncd)
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
interconnect_nodes(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# fork 2
self.nodes.append(start_node(4, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes[4].generate(1)
stop_nodes(self.nodes)
wait_bitcoinds()
# restart nodes but don't connect them yet
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(4, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
# Create txns on node0 and 1 to setup for a slow to validate race between those nodes.
print ("Send more transactions...")
num_range = 15
for i in range(num_range):
self.nodes[0].sendtoaddress(addrs[0], 0.01)
for i in range(num_range):
self.nodes[1].sendtoaddress(addrs[1], 0.01)
for i in range(num_range):
self.nodes[3].sendtoaddress(addrs[1], 0.01)
for i in range(num_range):
self.nodes[4].sendtoaddress(addrs[1], 0.01)
# Mine a block on each node
print ("Mine a block on each node..")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
self.nodes[2].generate(1)
self.nodes[3].generate(1)
self.nodes[4].generate(1)
basecount = self.nodes[0].getblockcount()
# Mine another block on node4 so that it's chain will be the longest when we connect it
print ("Mine another block on node4..")
self.nodes[4].generate(1)
bestblock = self.nodes[4].getbestblockhash()
stop_nodes(self.nodes)
wait_bitcoinds()
# Restart nodes with pvtest=1
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=1", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=1", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=1", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=1", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(4, self.options.tmpdir, ["-pvtest=1", "-whitelist=127.0.0.1"]))
# Connect node 0 and 1 so that a block validation race begins
print ("Connect nodes0, 1, 2 and 3...")
connect_nodes(self.nodes[1],0)
# Wait for a little while before connecting node 4
time.sleep(3)
print ("Connect node4...")
counts = [ x.getblockcount() for x in self.nodes ]
print (str(counts))
assert_equal(counts, [basecount,basecount,basecount, basecount, basecount+1])
interconnect_nodes(self.nodes)
# All chains will sync to node2
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbestblockhash(), bestblock)
assert_equal(self.nodes[1].getbestblockhash(), bestblock)
assert_equal(self.nodes[2].getbestblockhash(), bestblock)
assert_equal(self.nodes[3].getbestblockhash(), bestblock)
assert_equal(self.nodes[4].getbestblockhash(), bestblock)
stop_nodes(self.nodes)
wait_bitcoinds()
# cleanup and sync chains for next tests
self.cleanup_and_reset()
###########################################################################################
# 1) Mine two forks of equal work and start slow to validate block race on fork1. Then another
# block arrives on fork2 pulling that fork ahead.
# - threads on the fork1 should be stopped allowing fork2 to connect blocks and pull ahead
# 2) As fork2 is being validated, fork 1 pulls ahead
# - fork 2 is now stopped and fork 1 begins to validate
# 3) do step 2 repeatedely, going back and forth between forks
print ("Mine three forks.")
# fork 1 (both nodes on fork1 should be syncd)
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
interconnect_nodes(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# fork 2
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes[2].generate(1)
# fork 3
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes[3].generate(1)
stop_nodes(self.nodes)
wait_bitcoinds()
# restart nodes but don't connect them yet
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=0", "-whitelist=127.0.0.1"]))
# Create txns on node0 and 1 to setup for a slow to validate race between those nodes.
print ("Send more transactions...")
num_range = 15
for i in range(num_range):
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.01)
for i in range(num_range):
self.nodes[1].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
# in this test we also generate txns on node 2 so that all nodes will validate slowly.
for i in range(num_range):
self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
# Mine a block on each node
print ("Mine a block on each node..")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
self.nodes[2].generate(1)
self.nodes[3].generate(1)
basecount = self.nodes[0].getblockcount()
# Mine another block on node2 so that it's chain will be the longest when we connect it
print ("Mine another block on node2..")
self.nodes[2].generate(1)
# Mine two blocks on node3 so that it's chain will be the longest when we connect it
print ("Mine 2 blocks on node3..")
self.nodes[3].generate(1)
self.nodes[3].generate(1)
bestblock = self.nodes[3].getbestblockhash()
stop_nodes(self.nodes)
wait_bitcoinds()
# Restart nodes with pvtest=1
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=1", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=1", "-whitelist=127.0.0.1"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=1", "-whitelist=127.0.0.1"]))
# Connect node 0 and 1 so that a block validation race begins
print ("Connect nodes 0 and 1...")
connect_nodes(self.nodes[1],0)
# Wait for a little while before connecting node 2 (fork2)
time.sleep(3)
print ("Connect node2 - fork2...")
counts = [ x.getblockcount() for x in self.nodes ]
print (str(counts))
assert_equal(counts, [basecount,basecount,basecount+1])
interconnect_nodes(self.nodes)
# Wait for a little while before connecting node 3 (fork3)
time.sleep(3)
print ("Connect node3 - fork3...")
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug=","-pvtest=1", "-whitelist=127.0.0.1"]))
counts = [ x.getblockcount() for x in self.nodes ]
interconnect_nodes(self.nodes)
print (str(counts))
assert_equal(counts, [basecount-1,basecount-1,basecount+1, basecount+2])
interconnect_nodes(self.nodes)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbestblockhash(), bestblock)
assert_equal(self.nodes[1].getbestblockhash(), bestblock)
assert_equal(self.nodes[2].getbestblockhash(), bestblock)
assert_equal(self.nodes[3].getbestblockhash(), bestblock)
stop_nodes(self.nodes)
wait_bitcoinds()
# cleanup and sync chains for next tests
self.cleanup_and_reset()
###########################################################################################
# 1) Large reorg - can we do a 144 block reorg?
print ("Starting repeating many competing blocks test")
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug=","-pvtest=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug=","-pvtest=0"]))
print ("Mine 144 blocks on each chain...")
self.nodes[0].generate(144)
self.nodes[1].generate(144)
print ("Connect nodes for larg reorg...")
connect_nodes(self.nodes[1],0)
sync_blocks(self.nodes)
print ("Mine another block on node5 causing large reorg...")
self.nodes[1].generate(1)
sync_blocks(self.nodes)
# Mine another block which will cause some nodes to reorg and sync to the same chain.
print ("Mine another block on node0...")
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# stop nodes
stop_nodes(self.nodes)
wait_bitcoinds()
# cleanup and sync chains for next tests
self.cleanup_and_reset()
###########################################################################################
# Test the 4 block attack scenarios - use -pvtest=true to slow down the checking of inputs.
###########################################################################################
####################################################################
# Mine 4 blocks of all different sizes
# - the smallest block should win
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(4, self.options.tmpdir, ["-pvtest=0"]))
print ("Send more transactions...")
num_range = 15
for i in range(num_range):
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.01)
num_range = 14
for i in range(num_range):
self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), 0.01)
num_range = 13
for i in range(num_range):
self.nodes[3].sendtoaddress(self.nodes[3].getnewaddress(), 0.01)
num_range = 2
for i in range(num_range):
self.nodes[4].sendtoaddress(self.nodes[4].getnewaddress(), 0.01)
# Mine 4 competing blocks.
print ("Mine 4 competing blocks...")
self.nodes[0].generate(1)
self.nodes[2].generate(1)
self.nodes[3].generate(1)
self.nodes[4].generate(1)
# stop nodes
stop_nodes(self.nodes)
wait_bitcoinds()
# start nodes with -pvtest set to true.
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(4, self.options.tmpdir, ["-pvtest=1"]))
# Connect nodes so that all blocks are sent at same time to node1.
connect_nodes(self.nodes[1],0)
connect_nodes(self.nodes[1],2)
connect_nodes(self.nodes[1],3)
connect_nodes(self.nodes[1],4)
sync_blocks(self.nodes)
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[4].getbestblockhash())
# stop nodes
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(4, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(5, self.options.tmpdir, ["-pvtest=0"]))
connect_nodes(self.nodes[1],0)
connect_nodes(self.nodes[1],2)
connect_nodes(self.nodes[1],3)
connect_nodes(self.nodes[1],4)
connect_nodes(self.nodes[1],5)
sync_blocks(self.nodes)
# Mine a block which will cause all nodes to update their chains
print ("Mine another block...")
self.nodes[1].generate(1)
time.sleep(2) #wait for blocks to propagate
sync_blocks(self.nodes)
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[0].getbestblockhash())
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[2].getbestblockhash())
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[3].getbestblockhash())
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[4].getbestblockhash())
#stop nodes
stop_nodes(self.nodes)
wait_bitcoinds()
# cleanup and sync chains for next tests
self.cleanup_and_reset()
########################################################################################################
# Mine 4 blocks all the same size and get them to start validating and then send a 5th that is smaller
# - the last smallest and last block arriving should win.
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(4, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(5, self.options.tmpdir, ["-pvtest=0"]))
print ("Send more transactions...")
num_range = 15
for i in range(num_range):
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.01)
num_range = 15
for i in range(num_range):
self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), 0.01)
num_range = 15
for i in range(num_range):
self.nodes[3].sendtoaddress(self.nodes[3].getnewaddress(), 0.01)
num_range = 15
for i in range(num_range):
self.nodes[4].sendtoaddress(self.nodes[4].getnewaddress(), 0.01)
num_range = 2
for i in range(num_range):
self.nodes[5].sendtoaddress(self.nodes[5].getnewaddress(), 0.01)
# stop nodes
stop_nodes(self.nodes)
wait_bitcoinds()
# start nodes with -pvtest set to true.
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(4, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(5, self.options.tmpdir, ["-pvtest=1"]))
# Connect nodes so that first 4 blocks are sent at same time to node1.
connect_nodes(self.nodes[1],0)
connect_nodes(self.nodes[1],2)
connect_nodes(self.nodes[1],3)
connect_nodes(self.nodes[1],4)
time.sleep(5) #wait for blocks to start processing
# Connect 5th block and this one should win the race
connect_nodes(self.nodes[1],5)
sync_blocks(self.nodes)
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[5].getbestblockhash())
#stop nodes
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(4, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(5, self.options.tmpdir, ["-pvtest=0"]))
connect_nodes(self.nodes[1],0)
connect_nodes(self.nodes[1],2)
connect_nodes(self.nodes[1],3)
connect_nodes(self.nodes[1],4)
connect_nodes(self.nodes[1],5)
# Mine a block which will cause all nodes to update their chains
print ("Mine another block...")
self.nodes[1].generate(1)
time.sleep(2) #wait for blocks to propagate
sync_blocks(self.nodes)
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[0].getbestblockhash())
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[2].getbestblockhash())
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[3].getbestblockhash())
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[4].getbestblockhash())
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[5].getbestblockhash())
# stop nodes
stop_nodes(self.nodes)
wait_bitcoinds()
# cleanup and sync chains for next tests
self.cleanup_and_reset()
############################################################################################################
# Mine 4 blocks all the same size and get them to start validating and then send a 5th that is the same size
# - the first block arriving should win
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(4, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(5, self.options.tmpdir, ["-pvtest=0"]))
print ("Send more transactions...")
num_range = 10
for i in range(num_range):
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.01)
num_range = 10
for i in range(num_range):
self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), 0.01)
num_range = 10
for i in range(num_range):
self.nodes[3].sendtoaddress(self.nodes[3].getnewaddress(), 0.01)
num_range = 10
for i in range(num_range):
self.nodes[4].sendtoaddress(self.nodes[4].getnewaddress(), 0.01)
num_range = 10
for i in range(num_range):
self.nodes[5].sendtoaddress(self.nodes[5].getnewaddress(), 0.01)
# stop nodes
stop_nodes(self.nodes)
wait_bitcoinds()
# start nodes with -pvtest set to true.
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(4, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(5, self.options.tmpdir, ["-pvtest=1"]))
# Connect nodes so that first 4 blocks are sent 1 second apart to node1.
connect_nodes(self.nodes[1],0)
time.sleep(1)
connect_nodes(self.nodes[1],2)
time.sleep(1)
connect_nodes(self.nodes[1],3)
time.sleep(1)
connect_nodes(self.nodes[1],4)
time.sleep(1) #wait for blocks to start processing
# Connect 5th block and this one be terminated and the first block to connect from node0 should win the race
connect_nodes(self.nodes[1],5)
sync_blocks(self.nodes)
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[0].getbestblockhash())
#stop nodes
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(4, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(5, self.options.tmpdir, ["-pvtest=0"]))
connect_nodes(self.nodes[1],0)
connect_nodes(self.nodes[1],2)
connect_nodes(self.nodes[1],3)
connect_nodes(self.nodes[1],4)
connect_nodes(self.nodes[1],5)
# Mine a block which will cause all nodes to update their chains
print ("Mine another block...")
self.nodes[1].generate(1)
time.sleep(2) #wait for blocks to propagate
sync_blocks(self.nodes)
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[0].getbestblockhash())
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[2].getbestblockhash())
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[3].getbestblockhash())
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[4].getbestblockhash())
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[5].getbestblockhash())
# stop nodes
stop_nodes(self.nodes)
wait_bitcoinds()
#########################################################################################################
# Mine 4 blocks all the same size and get them to start validating and then send a 5th that is bigger
# - the first block arriving should win
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(4, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(5, self.options.tmpdir, ["-pvtest=0"]))
print ("Send more transactions...")
num_range = 10
for i in range(num_range):
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.01)
num_range = 10
for i in range(num_range):
self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), 0.01)
num_range = 10
for i in range(num_range):
self.nodes[3].sendtoaddress(self.nodes[3].getnewaddress(), 0.01)
num_range = 10
for i in range(num_range):
self.nodes[4].sendtoaddress(self.nodes[4].getnewaddress(), 0.01)
num_range = 20
for i in range(num_range):
self.nodes[5].sendtoaddress(self.nodes[5].getnewaddress(), 0.01)
# stop nodes
stop_nodes(self.nodes)
wait_bitcoinds()
# start nodes with -pvtest set to true.
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(4, self.options.tmpdir, ["-pvtest=1"]))
self.nodes.append(start_node(5, self.options.tmpdir, ["-pvtest=1"]))
# Connect nodes so that first 4 blocks are sent 1 second apart to node1.
connect_nodes(self.nodes[1],0)
time.sleep(1)
connect_nodes(self.nodes[1],2)
time.sleep(1)
connect_nodes(self.nodes[1],3)
time.sleep(1)
connect_nodes(self.nodes[1],4)
time.sleep(1) #wait for blocks to start processing
# Connect 5th block and this one be terminated and the first block to connect from node0 should win the race
connect_nodes(self.nodes[1],5)
sync_blocks(self.nodes)
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[0].getbestblockhash())
# stop nodes
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(4, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(5, self.options.tmpdir, ["-pvtest=0"]))
connect_nodes(self.nodes[1],0)
connect_nodes(self.nodes[1],2)
connect_nodes(self.nodes[1],3)
connect_nodes(self.nodes[1],4)
connect_nodes(self.nodes[1],5)
# Mine a block which will cause all nodes to update their chains
print ("Mine another block...")
self.nodes[1].generate(1)
time.sleep(2) #wait for blocks to propagate
sync_blocks(self.nodes)
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[0].getbestblockhash())
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[2].getbestblockhash())
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[3].getbestblockhash())
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[4].getbestblockhash())
assert_equal(self.nodes[1].getbestblockhash(), self.nodes[5].getbestblockhash())
# stop nodes
stop_nodes(self.nodes)
wait_bitcoinds()
# cleanup and sync chains for next tests
self.cleanup_and_reset()
#################################################################################
# Repeated 5 blocks mined with a reorg after
#################################################################################
# Repeatedly mine 5 blocks at a time on each node to have many blocks both arriving
# at the same time and racing each other to see which can extend the chain the fastest.
# This is intented just a stress test of the 4 block scenario but also while blocks
# are in the process of being both mined and with reorgs sometimes happening at the same time.
print ("Starting repeating many competing blocks test")
self.nodes.append(start_node(0, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(4, self.options.tmpdir, ["-pvtest=0"]))
self.nodes.append(start_node(5, self.options.tmpdir, ["-pvtest=0"]))
connect_nodes(self.nodes[1],0)
connect_nodes(self.nodes[1],2)
connect_nodes(self.nodes[1],3)
connect_nodes(self.nodes[1],4)
connect_nodes(self.nodes[1],5)
sync_blocks(self.nodes)
for i in range(100):
print ("Mine many more competing blocks...")
self.nodes[0].generate(1)
self.nodes[2].generate(1)
self.nodes[3].generate(1)
self.nodes[4].generate(1)
self.nodes[5].generate(1)
sync_blocks(self.nodes)
# Mine another block which will cause some nodes to reorg and sync to the same chain.
print ("%d: Mine another block..." % i)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# stop nodes
stop_nodes(self.nodes)
wait_bitcoinds()
# cleanup and sync chains for next tests
self.cleanup_and_reset()
def Test():
t = ParallelTest()
t.drop_to_pdb = True
# t.rep = True
t.longTest = False
bitcoinConf = {
"debug": ["net", "blk", "thin", "mempool", "req", "bench", "evict"],
}
flags = standardFlags()
t.main(flags, bitcoinConf, None)
if __name__ == '__main__':
p = ParallelTest()
if "--rep" in sys.argv:
print("Repetitive test")
p.rep = True
sys.argv.remove("--rep")
else:
p.rep = False
if "--extensive" in sys.argv:
p.longTest = True
# we must remove duplicate 'extensive' arg here
while True:
try:
sys.argv.remove('--extensive')
except:
break
print ("Running extensive tests")
else:
p.longTest = False
p.main ()
|
|
"""Test the Network Configuration."""
from ipaddress import IPv4Address
from unittest.mock import MagicMock, Mock, patch
import ifaddr
from homeassistant.components import network
from homeassistant.components.network.const import (
ATTR_ADAPTERS,
ATTR_CONFIGURED_ADAPTERS,
DOMAIN,
MDNS_TARGET_IP,
STORAGE_KEY,
STORAGE_VERSION,
)
from homeassistant.setup import async_setup_component
_NO_LOOPBACK_IPADDR = "192.168.1.5"
_LOOPBACK_IPADDR = "127.0.0.1"
def _mock_socket(sockname):
mock_socket = MagicMock()
mock_socket.getsockname = Mock(return_value=sockname)
return mock_socket
def _mock_socket_exception(exc):
mock_socket = MagicMock()
mock_socket.getsockname = Mock(side_effect=exc)
return mock_socket
def _generate_mock_adapters():
mock_lo0 = Mock(spec=ifaddr.Adapter)
mock_lo0.nice_name = "lo0"
mock_lo0.ips = [ifaddr.IP("127.0.0.1", 8, "lo0")]
mock_lo0.index = 0
mock_eth0 = Mock(spec=ifaddr.Adapter)
mock_eth0.nice_name = "eth0"
mock_eth0.ips = [ifaddr.IP(("2001:db8::", 1, 1), 8, "eth0")]
mock_eth0.index = 1
mock_eth1 = Mock(spec=ifaddr.Adapter)
mock_eth1.nice_name = "eth1"
mock_eth1.ips = [ifaddr.IP("192.168.1.5", 23, "eth1")]
mock_eth1.index = 2
mock_vtun0 = Mock(spec=ifaddr.Adapter)
mock_vtun0.nice_name = "vtun0"
mock_vtun0.ips = [ifaddr.IP("169.254.3.2", 16, "vtun0")]
mock_vtun0.index = 3
return [mock_eth0, mock_lo0, mock_eth1, mock_vtun0]
async def test_async_detect_interfaces_setting_non_loopback_route(hass, hass_storage):
"""Test without default interface config and the route returns a non-loopback address."""
with patch(
"homeassistant.components.network.util.socket.socket",
return_value=_mock_socket([_NO_LOOPBACK_IPADDR]),
), patch(
"homeassistant.components.network.util.ifaddr.get_adapters",
return_value=_generate_mock_adapters(),
):
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
network_obj = hass.data[DOMAIN]
assert network_obj.configured_adapters == []
assert network_obj.adapters == [
{
"auto": False,
"index": 1,
"default": False,
"enabled": False,
"ipv4": [],
"ipv6": [
{
"address": "2001:db8::",
"network_prefix": 8,
"flowinfo": 1,
"scope_id": 1,
}
],
"name": "eth0",
},
{
"index": 0,
"auto": False,
"default": False,
"enabled": False,
"ipv4": [{"address": "127.0.0.1", "network_prefix": 8}],
"ipv6": [],
"name": "lo0",
},
{
"index": 2,
"auto": True,
"default": True,
"enabled": True,
"ipv4": [{"address": "192.168.1.5", "network_prefix": 23}],
"ipv6": [],
"name": "eth1",
},
{
"index": 3,
"auto": False,
"default": False,
"enabled": False,
"ipv4": [{"address": "169.254.3.2", "network_prefix": 16}],
"ipv6": [],
"name": "vtun0",
},
]
async def test_async_detect_interfaces_setting_loopback_route(hass, hass_storage):
"""Test without default interface config and the route returns a loopback address."""
with patch(
"homeassistant.components.network.util.socket.socket",
return_value=_mock_socket([_LOOPBACK_IPADDR]),
), patch(
"homeassistant.components.network.util.ifaddr.get_adapters",
return_value=_generate_mock_adapters(),
):
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
network_obj = hass.data[DOMAIN]
assert network_obj.configured_adapters == []
assert network_obj.adapters == [
{
"index": 1,
"auto": True,
"default": False,
"enabled": True,
"ipv4": [],
"ipv6": [
{
"address": "2001:db8::",
"network_prefix": 8,
"flowinfo": 1,
"scope_id": 1,
}
],
"name": "eth0",
},
{
"index": 0,
"auto": False,
"default": True,
"enabled": False,
"ipv4": [{"address": "127.0.0.1", "network_prefix": 8}],
"ipv6": [],
"name": "lo0",
},
{
"index": 2,
"auto": True,
"default": False,
"enabled": True,
"ipv4": [{"address": "192.168.1.5", "network_prefix": 23}],
"ipv6": [],
"name": "eth1",
},
{
"index": 3,
"auto": False,
"default": False,
"enabled": False,
"ipv4": [{"address": "169.254.3.2", "network_prefix": 16}],
"ipv6": [],
"name": "vtun0",
},
]
async def test_async_detect_interfaces_setting_empty_route(hass, hass_storage):
"""Test without default interface config and the route returns nothing."""
with patch(
"homeassistant.components.network.util.socket.socket",
return_value=_mock_socket([]),
), patch(
"homeassistant.components.network.util.ifaddr.get_adapters",
return_value=_generate_mock_adapters(),
):
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
network_obj = hass.data[DOMAIN]
assert network_obj.configured_adapters == []
assert network_obj.adapters == [
{
"auto": True,
"index": 1,
"default": False,
"enabled": True,
"ipv4": [],
"ipv6": [
{
"address": "2001:db8::",
"network_prefix": 8,
"flowinfo": 1,
"scope_id": 1,
}
],
"name": "eth0",
},
{
"auto": False,
"index": 0,
"default": False,
"enabled": False,
"ipv4": [{"address": "127.0.0.1", "network_prefix": 8}],
"ipv6": [],
"name": "lo0",
},
{
"auto": True,
"index": 2,
"default": False,
"enabled": True,
"ipv4": [{"address": "192.168.1.5", "network_prefix": 23}],
"ipv6": [],
"name": "eth1",
},
{
"auto": False,
"index": 3,
"default": False,
"enabled": False,
"ipv4": [{"address": "169.254.3.2", "network_prefix": 16}],
"ipv6": [],
"name": "vtun0",
},
]
async def test_async_detect_interfaces_setting_exception(hass, hass_storage):
"""Test without default interface config and the route throws an exception."""
with patch(
"homeassistant.components.network.util.socket.socket",
return_value=_mock_socket_exception(AttributeError),
), patch(
"homeassistant.components.network.util.ifaddr.get_adapters",
return_value=_generate_mock_adapters(),
):
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
network_obj = hass.data[DOMAIN]
assert network_obj.configured_adapters == []
assert network_obj.adapters == [
{
"auto": True,
"index": 1,
"default": False,
"enabled": True,
"ipv4": [],
"ipv6": [
{
"address": "2001:db8::",
"network_prefix": 8,
"flowinfo": 1,
"scope_id": 1,
}
],
"name": "eth0",
},
{
"auto": False,
"index": 0,
"default": False,
"enabled": False,
"ipv4": [{"address": "127.0.0.1", "network_prefix": 8}],
"ipv6": [],
"name": "lo0",
},
{
"auto": True,
"index": 2,
"default": False,
"enabled": True,
"ipv4": [{"address": "192.168.1.5", "network_prefix": 23}],
"ipv6": [],
"name": "eth1",
},
{
"auto": False,
"index": 3,
"default": False,
"enabled": False,
"ipv4": [{"address": "169.254.3.2", "network_prefix": 16}],
"ipv6": [],
"name": "vtun0",
},
]
async def test_interfaces_configured_from_storage(hass, hass_storage):
"""Test settings from storage are preferred over auto configure."""
hass_storage[STORAGE_KEY] = {
"version": STORAGE_VERSION,
"key": STORAGE_KEY,
"data": {ATTR_CONFIGURED_ADAPTERS: ["eth0", "eth1", "vtun0"]},
}
with patch(
"homeassistant.components.network.util.socket.socket",
return_value=_mock_socket([_NO_LOOPBACK_IPADDR]),
), patch(
"homeassistant.components.network.util.ifaddr.get_adapters",
return_value=_generate_mock_adapters(),
):
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
network_obj = hass.data[DOMAIN]
assert network_obj.configured_adapters == ["eth0", "eth1", "vtun0"]
assert network_obj.adapters == [
{
"auto": False,
"index": 1,
"default": False,
"enabled": True,
"ipv4": [],
"ipv6": [
{
"address": "2001:db8::",
"network_prefix": 8,
"flowinfo": 1,
"scope_id": 1,
}
],
"name": "eth0",
},
{
"auto": False,
"index": 0,
"default": False,
"enabled": False,
"ipv4": [{"address": "127.0.0.1", "network_prefix": 8}],
"ipv6": [],
"name": "lo0",
},
{
"auto": True,
"index": 2,
"default": True,
"enabled": True,
"ipv4": [{"address": "192.168.1.5", "network_prefix": 23}],
"ipv6": [],
"name": "eth1",
},
{
"auto": False,
"index": 3,
"default": False,
"enabled": True,
"ipv4": [{"address": "169.254.3.2", "network_prefix": 16}],
"ipv6": [],
"name": "vtun0",
},
]
async def test_interfaces_configured_from_storage_websocket_update(
hass, hass_ws_client, hass_storage
):
"""Test settings from storage can be updated via websocket api."""
hass_storage[STORAGE_KEY] = {
"version": STORAGE_VERSION,
"key": STORAGE_KEY,
"data": {ATTR_CONFIGURED_ADAPTERS: ["eth0", "eth1", "vtun0"]},
}
with patch(
"homeassistant.components.network.util.socket.socket",
return_value=_mock_socket([_NO_LOOPBACK_IPADDR]),
), patch(
"homeassistant.components.network.util.ifaddr.get_adapters",
return_value=_generate_mock_adapters(),
):
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
network_obj = hass.data[DOMAIN]
assert network_obj.configured_adapters == ["eth0", "eth1", "vtun0"]
ws_client = await hass_ws_client(hass)
await ws_client.send_json({"id": 1, "type": "network"})
response = await ws_client.receive_json()
assert response["success"]
assert response["result"][ATTR_CONFIGURED_ADAPTERS] == ["eth0", "eth1", "vtun0"]
assert response["result"][ATTR_ADAPTERS] == [
{
"auto": False,
"index": 1,
"default": False,
"enabled": True,
"ipv4": [],
"ipv6": [
{
"address": "2001:db8::",
"network_prefix": 8,
"flowinfo": 1,
"scope_id": 1,
}
],
"name": "eth0",
},
{
"auto": False,
"index": 0,
"default": False,
"enabled": False,
"ipv4": [{"address": "127.0.0.1", "network_prefix": 8}],
"ipv6": [],
"name": "lo0",
},
{
"auto": True,
"index": 2,
"default": True,
"enabled": True,
"ipv4": [{"address": "192.168.1.5", "network_prefix": 23}],
"ipv6": [],
"name": "eth1",
},
{
"auto": False,
"index": 3,
"default": False,
"enabled": True,
"ipv4": [{"address": "169.254.3.2", "network_prefix": 16}],
"ipv6": [],
"name": "vtun0",
},
]
await ws_client.send_json(
{"id": 2, "type": "network/configure", "config": {ATTR_CONFIGURED_ADAPTERS: []}}
)
response = await ws_client.receive_json()
assert response["result"][ATTR_CONFIGURED_ADAPTERS] == []
await ws_client.send_json({"id": 3, "type": "network"})
response = await ws_client.receive_json()
assert response["result"][ATTR_CONFIGURED_ADAPTERS] == []
assert response["result"][ATTR_ADAPTERS] == [
{
"auto": False,
"index": 1,
"default": False,
"enabled": False,
"ipv4": [],
"ipv6": [
{
"address": "2001:db8::",
"network_prefix": 8,
"flowinfo": 1,
"scope_id": 1,
}
],
"name": "eth0",
},
{
"auto": False,
"index": 0,
"default": False,
"enabled": False,
"ipv4": [{"address": "127.0.0.1", "network_prefix": 8}],
"ipv6": [],
"name": "lo0",
},
{
"auto": True,
"index": 2,
"default": True,
"enabled": True,
"ipv4": [{"address": "192.168.1.5", "network_prefix": 23}],
"ipv6": [],
"name": "eth1",
},
{
"auto": False,
"index": 3,
"default": False,
"enabled": False,
"ipv4": [{"address": "169.254.3.2", "network_prefix": 16}],
"ipv6": [],
"name": "vtun0",
},
]
async def test_async_get_source_ip_matching_interface(hass, hass_storage):
"""Test getting the source ip address with interface matching."""
hass_storage[STORAGE_KEY] = {
"version": STORAGE_VERSION,
"key": STORAGE_KEY,
"data": {ATTR_CONFIGURED_ADAPTERS: ["eth1"]},
}
with patch(
"homeassistant.components.network.util.ifaddr.get_adapters",
return_value=_generate_mock_adapters(),
), patch(
"homeassistant.components.network.util.socket.socket",
return_value=_mock_socket(["192.168.1.5"]),
):
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
assert await network.async_get_source_ip(hass, MDNS_TARGET_IP) == "192.168.1.5"
async def test_async_get_source_ip_interface_not_match(hass, hass_storage):
"""Test getting the source ip address with interface does not match."""
hass_storage[STORAGE_KEY] = {
"version": STORAGE_VERSION,
"key": STORAGE_KEY,
"data": {ATTR_CONFIGURED_ADAPTERS: ["vtun0"]},
}
with patch(
"homeassistant.components.network.util.ifaddr.get_adapters",
return_value=_generate_mock_adapters(),
), patch(
"homeassistant.components.network.util.socket.socket",
return_value=_mock_socket(["192.168.1.5"]),
):
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
assert await network.async_get_source_ip(hass, MDNS_TARGET_IP) == "169.254.3.2"
async def test_async_get_source_ip_cannot_determine_target(hass, hass_storage):
"""Test getting the source ip address when getsockname fails."""
hass_storage[STORAGE_KEY] = {
"version": STORAGE_VERSION,
"key": STORAGE_KEY,
"data": {ATTR_CONFIGURED_ADAPTERS: ["eth1"]},
}
with patch(
"homeassistant.components.network.util.ifaddr.get_adapters",
return_value=_generate_mock_adapters(),
), patch(
"homeassistant.components.network.util.socket.socket",
return_value=_mock_socket([None]),
):
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
assert await network.async_get_source_ip(hass, MDNS_TARGET_IP) == "192.168.1.5"
async def test_async_get_ipv4_broadcast_addresses_default(hass, hass_storage):
"""Test getting ipv4 broadcast addresses when only the default address is enabled."""
hass_storage[STORAGE_KEY] = {
"version": STORAGE_VERSION,
"key": STORAGE_KEY,
"data": {ATTR_CONFIGURED_ADAPTERS: ["eth1"]},
}
with patch(
"homeassistant.components.network.util.socket.socket",
return_value=_mock_socket(["192.168.1.5"]),
), patch(
"homeassistant.components.network.util.ifaddr.get_adapters",
return_value=_generate_mock_adapters(),
):
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
assert await network.async_get_ipv4_broadcast_addresses(hass) == {
IPv4Address("255.255.255.255")
}
async def test_async_get_ipv4_broadcast_addresses_multiple(hass, hass_storage):
"""Test getting ipv4 broadcast addresses when multiple adapters are enabled."""
hass_storage[STORAGE_KEY] = {
"version": STORAGE_VERSION,
"key": STORAGE_KEY,
"data": {ATTR_CONFIGURED_ADAPTERS: ["eth1", "vtun0"]},
}
with patch(
"homeassistant.components.network.util.socket.socket",
return_value=_mock_socket([_LOOPBACK_IPADDR]),
), patch(
"homeassistant.components.network.util.ifaddr.get_adapters",
return_value=_generate_mock_adapters(),
):
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
assert await network.async_get_ipv4_broadcast_addresses(hass) == {
IPv4Address("255.255.255.255"),
IPv4Address("192.168.1.255"),
IPv4Address("169.254.255.255"),
}
|
|
# Django settings for unit test project.
from __future__ import unicode_literals
import os
import sys
from django.core.urlresolvers import reverse_lazy
from cmsplugin_cascade.extra_fields.config import PluginExtraFieldsConfig
from cmsplugin_cascade.utils import format_lazy
DEBUG = True
BASE_DIR = os.path.dirname(__file__)
# Root directory for this Django project
PROJECT_ROOT = os.path.abspath(os.path.join(BASE_DIR, os.path.pardir))
# Directory where working files, such as media and databases are kept
WORK_DIR = os.path.join(PROJECT_ROOT, 'workdir')
if not os.path.isdir(WORK_DIR):
os.makedirs(WORK_DIR)
SITE_ID = 1
ROOT_URLCONF = 'bs4demo.urls'
SECRET_KEY = 'secret'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(WORK_DIR, 'db.sqlite3'),
},
}
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
#'reversion',
'djangocms_text_ckeditor',
'django_select2',
'cmsplugin_cascade',
'cmsplugin_cascade.clipboard',
'cmsplugin_cascade.extra_fields',
'cmsplugin_cascade.icon',
'cmsplugin_cascade.sharable',
'cmsplugin_cascade.segmentation',
'cms',
#'cms_bootstrap3',
'cmsplugin_bs4forcascade',
'adminsortable2',
'menus',
'treebeard',
'filer',
'easy_thumbnails',
'sass_processor',
'sekizai',
'bs4demo',
]
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.gzip.GZipMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware',
]
# silence false-positive warning 1_6.W001
# https://docs.djangoproject.com/en/1.8/ref/checks/#backwards-compatibility
#TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(WORK_DIR, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory that holds static files.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(WORK_DIR, 'static')
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'sass_processor.finders.CssFinder',
]
STATICFILES_DIRS = [
('node_modules', os.path.join(PROJECT_ROOT, 'node_modules')),
]
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'context_processors': (
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.csrf',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
'sekizai.context_processors.sekizai',
'cms.context_processors.cms_settings',
'bs4demo.context_processors.cascade',
),
},
}]
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
LANGUAGE_CODE = 'en'
LANGUAGES = (
('en', 'English'),
)
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'formatters': {
'simple': {
'format': '[%(asctime)s %(module)s] %(levelname)s: %(message)s'
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'simple',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
},
}
#############################################################
# Application specific settings
if sys.argv[1] == 'test':
CMS_TEMPLATES = (
('testing.html', "Default Page"),
)
else:
CMS_TEMPLATES = (
('bs4demo/main.html', "Main Content"),
('bs4demo/wrapped.html', "Wrapped Bootstrap Column"),
)
CMS_SEO_FIELDS = True
CMS_CACHE_DURATIONS = {
'content': 3600,
'menus': 3600,
'permissions': 86400,
}
CMSPLUGIN_CASCADE_PLUGINS = (
'cmsplugin_cascade.segmentation',
'cmsplugin_cascade.generic',
'cmsplugin_cascade.leaflet',
'cmsplugin_cascade.link',
#'cmsplugin_cascade.bootstrap3',
'cmsplugin_bs4forcascade.bootstrap4',
'bs4demo',
)
CMSPLUGIN_CASCADE = {
'alien_plugins': ('TextPlugin', 'TextLinkPlugin',),
'plugins_with_sharables': {
'BootstrapImagePlugin': ('image_shapes', 'image_width_responsive', 'image_width_fixed',
'image_height', 'resize_options',),
'BootstrapPicturePlugin': ('image_shapes', 'responsive_heights', 'image_size', 'resize_options',),
'BootstrapButtonPlugin': ('button_type', 'button_size', 'button_options', 'icon_font',),
'TextLinkPlugin': ('link', 'target',),
},
'exclude_hiding_plugin': ('SegmentPlugin', 'Badge'),
'allow_plugin_hiding': True,
'leaflet': {'default_position': {'lat': 50.0, 'lng': 12.0, 'zoom': 6}},
'cache_strides': True,
}
CASCADE_WORKAREA_GLOSSARY = {
'breakpoints': ['xs', 'sm', 'md', 'lg','xl'],
'container_max_widths': {'xs': 576,'sm': 767,'md': 991, 'lg': 1199, 'xl': 1980,},
'fluid': False,
'media_queries': {
'xs': ['(max-width: 576px)'],
'sm': ['(min-width: 576px)', '(max-width: 767px)'],
'md': ['(min-width: 768px)', '(max-width: 991px)'],
'lg': ['(min-width: 992px)', '(max-width: 1199px)'],
'xl': ['(min-width: 1200px)'],
},
}
CMS_PLACEHOLDER_CONF = {
# this placeholder is used in templates/main.html, it shows how to
# scaffold a djangoCMS page starting with an empty placeholder
'Main Content': {
'plugins': ['Bootstrap4ContainerPlugin', 'BootstrapJumbotronPlugin'],
'parent_classes': {'Bootstrap4ContainerPlugin': None, 'BootstrapJumbotronPlugin': None},
'glossary': CASCADE_WORKAREA_GLOSSARY,
},
# this placeholder is used in templates/wrapped.html, it shows how to
# add content to an existing Bootstrap column
'Bootstrap Column': {
'plugins': ['BootstrapRowPlugin', 'TextPlugin', ],
'parent_classes': {'BootstrapRowPlugin': None},
'require_parent': False,
'glossary': CASCADE_WORKAREA_GLOSSARY,
},
}
CKEDITOR_SETTINGS = {
'language': '{{ language }}',
'skin': 'moono',
'toolbar': 'CMS',
'stylesSet': format_lazy('default:{}', reverse_lazy('admin:cascade_texticon_wysiwig_config')),
}
SELECT2_CSS = 'node_modules/select2/dist/css/select2.min.css'
SELECT2_JS = 'node_modules/select2/dist/js/select2.min.js'
FILER_ALLOW_REGULAR_USERS_TO_ADD_ROOT_FOLDERS = True
FILER_DUMP_PAYLOAD = True
THUMBNAIL_PROCESSORS = (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters',
)
THUMBNAIL_HIGH_RESOLUTION = False
THUMBNAIL_PRESERVE_EXTENSIONS = True
THUMBNAIL_OPTIMIZE_COMMAND = {
'png': '/opt/local/bin/optipng {filename}',
'gif': '/opt/local/bin/optipng {filename}',
'jpeg': '/opt/local/bin/jpegoptim {filename}',
}
SASS_PROCESSOR_INCLUDE_DIRS = [
os.path.join(PROJECT_ROOT, 'node_modules'),
]
SASS_PROCESSOR_ROOT = STATIC_ROOT
# to access files such as fonts via staticfiles finders
NODE_MODULES_URL = STATIC_URL + 'node_modules/'
try:
from .private_settings import *
except ImportError:
pass
|
|
import sys
import os.path
import platform
from PyQt5.QtWidgets import QApplication, QDialog, QVBoxLayout, QGridLayout, QLabel, QLineEdit, QCheckBox, QPushButton, QMessageBox, QFileDialog, QColorDialog, QSpinBox, QTableWidget
from PyQt5.QtGui import QIcon, QColor
from PyQt5.QtCore import QRegExp, Qt
class BaseDialog(QDialog):
"""The base class of all our config windows. All common setup should be done in here."""
def __init__(self, parent, config, modal=True, streamer_icon=None, title=None, geometry=None, resizable=False):
super().__init__(parent)
self.parent = parent # The parent object (probably an instance of QMainWindow)
self.config = config # Config database's connection
self.modal = modal # If modal, the focus will be held by this window
self.streamer_icon = streamer_icon # Path to the icon for this window
self.window_geometry = geometry
self.is_resizable = resizable
self.result_data = None # For use if a dialog needs to return something back to the caller
self.setWindowTitle(title)
self.setModal(self.modal)
self.setup_window_icon()
self.setup_layout()
self.setup_geometry()
self.setup_dialog_layout()
if self.is_resizable:
self.setSizeGripEnabled(False)
self.rejected.connect(self.closeEvent)
self.made_changes = False
def setup_geometry(self):
if self.window_geometry:
if self.is_resizable:
self.setGeometry(self.window_geometry[0], self.window_geometry[1])
self.setMinimumSize(self.window_geometry[0], self.window_geometry[1])
else:
self.setFixedSize(self.window_geometry[0], self.window_geometry[1])
center_point = QApplication.desktop().availableGeometry().center()
frame_geometry = self.frameGeometry()
frame_geometry.moveCenter(center_point)
self.move(frame_geometry.topLeft())
def setup_window_icon(self):
# Set the same window icon as the parent
if self.streamer_icon is not None:
self.setWindowIcon(QIcon(self.streamer_icon))
def setup_layout(self):
self.layout = QGridLayout(self)
self.setLayout(self.layout)
def update_colors(self, fg_override=None, bg_override=None):
"""This applies the foreground color to all the widgets in the list. This method is not called in this base class."""
if fg_override is not None:
foreground_color = fg_override
else:
foreground_color = self.config.get_config_value("foreground-color")
if bg_override is not None:
background_color = bg_override
else:
background_color = self.config.get_config_value("background-color")
self.setStyleSheet("QDialog QLabel {{ color: {0} }} QDialog {{ background: {1} }}".format(foreground_color, background_color))
self.update()
def save_changes(self):
raise NotImplementedException("Implement me!")
def closeEvent(self, event=None):
if self.made_changes:
reply = QMessageBox(self, "Save changes?", "Save changes before dialog closes?", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
self.save_changes()
class AppConfigDialog(BaseDialog):
"""The window with application's global configuration settings."""
cache_max_value = 999999
def __init__(self, parent, config, modal=True, streamer_icon=None, title=None):
super().__init__(parent, config, modal=modal, streamer_icon=streamer_icon, title="Application configuration", geometry=(500, 260))
if self.config.get_config_value("db-version") >= 2:
self.window_geometry = (500, 320)
self.setup_geometry()
self.original_values = {}
self.load_config_values()
self.update_colors()
def setup_dialog_layout(self):
row = 0
label_livestreamer = QLabel("Livestreamer path", self)
self.layout.addWidget(label_livestreamer, row, 0)
self.input_livestreamer = QLineEdit(self)
self.input_livestreamer.setReadOnly(True)
self.layout.addWidget(self.input_livestreamer, row, 1)
button_livestreamer = QPushButton("Browse...", self)
button_livestreamer.clicked.connect(self.on_livestreamer_click)
self.layout.addWidget(button_livestreamer, row, 2)
row += 1
label_player = QLabel("Player path", self)
self.layout.addWidget(label_player, row, 0)
self.input_player = QLineEdit(self)
self.input_player.setReadOnly(True)
self.layout.addWidget(self.input_player, row, 1)
button_player = QPushButton("Browse...", self)
button_player.clicked.connect(self.on_player_click)
self.layout.addWidget(button_player, row, 2)
row += 1
label_fgcolor = QLabel("Foreground color", self)
self.layout.addWidget(label_fgcolor, row, 0)
self.input_fgcolor = QLineEdit(self)
self.input_fgcolor.setReadOnly(True)
self.layout.addWidget(self.input_fgcolor, row, 1)
button_fgcolor = QPushButton("Pick...", self)
button_fgcolor.clicked.connect(self.on_fgcolor_click)
self.layout.addWidget(button_fgcolor, row, 2)
row += 1
label_bgcolor = QLabel("Background color", self)
self.layout.addWidget(label_bgcolor)
self.input_bgcolor = QLineEdit(self)
self.input_bgcolor.setReadOnly(True)
self.layout.addWidget(self.input_bgcolor)
button_bgcolor = QPushButton("Pick...", self)
button_bgcolor.clicked.connect(self.on_bgcolor_click)
self.layout.addWidget(button_bgcolor)
row += 1
label_check_auto_refresh = QLabel("Stream quality auto-refresh", self)
self.layout.addWidget(label_check_auto_refresh, row, 0)
self.check_auto_refresh = QCheckBox(self)
self.check_auto_refresh.setTristate(False)
self.layout.addWidget(self.check_auto_refresh, row, 1)
row += 1
label_cache_lifetime = QLabel("Stream quality cache\nlifetime (in minutes)", self)
self.layout.addWidget(label_cache_lifetime, row, 0)
self.input_cache_lifetime = QSpinBox(self)
self.input_cache_lifetime.setRange(0, self.cache_max_value)
self.input_cache_lifetime.setSuffix(" minute(s)")
self.layout.addWidget(self.input_cache_lifetime, row, 1)
if self.config.get_config_value("db-version") >= 2:
row += 1
label_enable_systray_icon = QLabel("Enable system tray icon", self)
self.layout.addWidget(label_enable_systray_icon, row, 0)
self.check_enable_systray_icon = QCheckBox(self)
self.check_enable_systray_icon.setTristate(False)
self.layout.addWidget(self.check_enable_systray_icon, row, 1)
row += 1
label_minimize_to_systray = QLabel("Minimize to system tray", self)
self.layout.addWidget(label_minimize_to_systray, row, 0)
self.check_minimize_to_systray = QCheckBox(self)
self.check_minimize_to_systray.setTristate(False)
self.layout.addWidget(self.check_minimize_to_systray, row, 1)
row += 1
label_close_to_systray = QLabel("Close to system tray", self)
self.layout.addWidget(label_close_to_systray, row, 0)
self.check_close_to_systray = QCheckBox(self)
self.check_close_to_systray.setTristate(False)
self.layout.addWidget(self.check_close_to_systray, row, 1)
if self.config.get_config_value("db-version") >= 3:
row += 1
label_remember_position = QLabel("Remember window position", self)
self.layout.addWidget(label_remember_position, row, 0)
self.check_remember_position = QCheckBox(self)
self.check_remember_position.setTristate(False)
self.layout.addWidget(self.check_remember_position, row, 1)
row += 1
button_close = QPushButton("Save && close", self)
button_close.clicked.connect(self.save_changes_and_close)
self.layout.addWidget(button_close, row, 2)
def closeEvent(self, event=None):
# if Esc key was pressed, the event is None (and the method gets called the second time, with populated event)
if event is None:
return
if self.changes_made():
reply = QMessageBox.question(self, "Save changes?", "The dialog will close. Save changes?", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if reply == QMessageBox.Yes:
self.save_changes()
def load_config_values(self, update_widgets=True):
self.original_values = {
"input_livestreamer": self.config.get_config_value("livestreamer-path"),
"input_player": self.config.get_config_value("player-path"),
"input_fgcolor": self.config.get_config_value("foreground-color"),
"input_bgcolor": self.config.get_config_value("background-color"),
"check_auto_refresh": bool(self.config.get_config_value("auto-refresh-quality")),
"input_cache_lifetime": int(self.config.get_config_value("quality-cache-persistance")),
}
if self.config.get_config_value("db-version") >= 2:
self.original_values["check_enable_systray_icon"] = bool(self.config.get_config_value("enable-systray-icon"))
self.original_values["check_minimize_to_systray"] = bool(self.config.get_config_value("minimize-to-systray"))
self.original_values["check_close_to_systray"] = bool(self.config.get_config_value("close-to-systray"))
if self.config.get_config_value("db-version") >= 3:
self.original_values["check_remember_position"] = bool(self.config.get_config_value("remember-window-position"))
if not update_widgets:
return
self.input_livestreamer.setText(self.original_values["input_livestreamer"])
self.input_player.setText(self.original_values["input_player"])
self.input_fgcolor.setText(self.original_values["input_fgcolor"])
self.input_bgcolor.setText(self.original_values["input_bgcolor"])
self.check_auto_refresh.setChecked(self.original_values["check_auto_refresh"])
self.input_cache_lifetime.setValue(self.original_values["input_cache_lifetime"])
if self.config.get_config_value("db-version") >= 2:
self.check_enable_systray_icon.setChecked(self.original_values["check_enable_systray_icon"])
self.check_minimize_to_systray.setChecked(self.original_values["check_minimize_to_systray"])
self.check_close_to_systray.setChecked(self.original_values["check_close_to_systray"])
if self.config.get_config_value("db-version") >= 3:
self.check_remember_position.setChecked(self.original_values["check_remember_position"])
def changes_made(self):
base = self.original_values["input_livestreamer"] != self.input_livestreamer.text() \
or self.original_values["input_player"] != self.input_player.text() \
or self.original_values["input_fgcolor"] != self.input_fgcolor.text() \
or self.original_values["input_bgcolor"] != self.input_bgcolor.text() \
or self.original_values["check_auto_refresh"] != self.check_auto_refresh.isChecked() \
or self.original_values["input_cache_lifetime"] != self.input_cache_lifetime.value()
extended = base
if self.config.get_config_value("db-version") >= 2:
extended = extended \
or self.original_values["check_enable_systray_icon"] != self.check_enable_systray_icon.isChecked() \
or self.original_values["check_minimize_to_systray"] != self.check_minimize_to_systray.isChecked() \
or self.original_values["check_close_to_systray"] != self.check_close_to_systray.isChecked()
if self.config.get_config_value("db-version") >= 3:
extended = extended \
or self.original_values["check_remember_position"] != self.check_remember_position.isChecked()
return extended
def save_changes(self):
self.config.set_config_value("livestreamer-path", self.input_livestreamer.text())
self.config.set_config_value("player-path", self.input_player.text())
self.config.set_config_value("foreground-color", self.input_fgcolor.text())
self.config.set_config_value("background-color", self.input_bgcolor.text())
self.config.set_config_value("auto-refresh-quality", int(self.check_auto_refresh.isChecked()))
self.config.set_config_value("quality-cache-persistance", int(self.input_cache_lifetime.value()))
if self.config.get_config_value("db-version") >= 2:
self.config.set_config_value("enable-systray-icon", int(self.check_enable_systray_icon.isChecked()))
self.config.set_config_value("minimize-to-systray", int(self.check_minimize_to_systray.isChecked()))
self.config.set_config_value("close-to-systray", int(self.check_close_to_systray.isChecked()))
if self.config.get_config_value("db-version") >= 3:
self.config.set_config_value("remember-window-position", int(self.check_remember_position.isChecked()))
self.load_config_values(update_widgets=False)
def save_changes_and_close(self):
if self.changes_made():
self.save_changes()
self.accept()
def get_filename_from_dialog(self, existing_path, dialog_caption):
if os.path.exists(existing_path):
existing_path = os.path.dirname(existing_path) # assume the path always points to a file
else:
existing_path = os.path.dirname(sys.argv[0])
file_filters = []
if platform.system().lower() == "windows":
file_filters.append("Windows executables (*.exe *.bat)")
file_filters.append("All files (*.*)")
path, selected_filter = QFileDialog.getOpenFileName(self, dialog_caption, existing_path, ';;'.join(file_filters))
return path
def on_livestreamer_click(self):
existing_path = self.input_livestreamer.text()
path = self.get_filename_from_dialog(existing_path, "Select livestreamer executable")
if path and os.path.isfile(path):
self.input_livestreamer.setText(path)
def on_player_click(self):
existing_path = self.input_player.text()
path = self.get_filename_from_dialog(existing_path, "Select VLC player executable")
if path and os.path.isfile(path):
self.input_player.setText(path)
def on_fgcolor_click(self):
initial = self.input_fgcolor.text()
color = QColorDialog.getColor(QColor(initial), self, "Choose foreground color")
if color.isValid():
self.input_fgcolor.setText(color.name())
self.update_colors(fg_override=color.name(), bg_override=self.input_bgcolor.text())
def on_bgcolor_click(self):
initial = self.input_bgcolor.text()
color = QColorDialog.getColor(QColor(initial), self, "Choose background color")
if color.isValid():
self.input_bgcolor.setText(color.name())
self.update_colors(fg_override=self.input_fgcolor.text(), bg_override=color.name())
class AddEditChannelsDialog(BaseDialog):
entry_max_size = 255
"""The window for adding or editing streamer's channels."""
def __init__(self, parent, config, title=None, modal=True, streamer_icon=None, streamer=None, channel_data=None):
if streamer is None:
raise Exception("No streamer defined!")
self.streamer = streamer
self.channel_data = channel_data
super().__init__(parent, config, modal=modal, streamer_icon=streamer_icon, title=title, geometry=(400, 150))
def setup_dialog_layout(self):
row = 0
label_name = QLabel("Channel name", self)
self.layout.addWidget(label_name, row, 0)
self.input_name = QLineEdit(self)
self.input_name.setMaxLength(self.entry_max_size)
self.input_name.setToolTip("Name the channel for your reference, e.g. Kitchen cats")
self.layout.addWidget(self.input_name, row, 1)
row += 1
label_url = QLabel("Relative URL", self)
self.layout.addWidget(label_url, row, 0)
self.input_url = QLineEdit(self)
self.input_url.setMaxLength(self.entry_max_size)
self.input_url.setToolTip("URL path to the channel relative to the streamer, e.g. kitchencatschannel\nfor twitch.tv. When livestreamer is run, the resulting URL is composed\ninto http://www.twitch.tv/kitchencatschannel, where kitchencatschannel\nis the value entered into this edit box.")
self.layout.addWidget(self.input_url, row, 1)
row += 1
label_check = QLabel("Is favorite channel", self)
self.layout.addWidget(label_check, row, 0)
self.check_fav = QCheckBox(self)
self.check_fav.setTristate(False)
self.check_fav.setToolTip("Mark this channel as your most favorite channel")
self.layout.addWidget(self.check_fav, row, 1)
row += 1
self.button_save = QPushButton("Save && close", self)
self.button_save.clicked.connect(self.save_changes)
self.layout.addWidget(self.button_save, row, 0)
# Apply the foreground and background color to the widgets
self.update_colors()
# Load the data, if provided, into the entry widgets
if self.channel_data is not None:
self.input_name.setText(self.channel_data["name"])
self.input_url.setText(self.channel_data["url"])
self.check_fav.setChecked(bool(self.channel_data["favorite"]))
def save_changes(self):
channel_name = self.input_name.text().strip()
channel_url = self.input_url.text().strip()
if channel_name == "":
self.input_name.setFocus(True)
QMessageBox.warning(self, "Input error", "Please name the channel.", QMessageBox.Ok, QMessageBox.Ok)
return
if channel_url == "":
self.input_url.setFocus(True)
QMessageBox.warning(self, "Input error", "Please provide the channel's URL.", QMessageBox.Ok, QMessageBox.Ok)
return
set_result = True
if self.channel_data is None:
# We're adding a new record
channel = self.config.get_streamer_channel(self.streamer["name"], channel_name)
if channel is not None:
self.input_name.setFocus(True)
QMessageBox.warning(self, "Input error", "Channel name already exists!\nName: {}\nURL: {}".format(channel["name"], channel["url"]), QMessageBox.Ok, QMessageBox.Ok)
return
channel = self.config.get_channel_by_url(self.streamer["name"], channel_url)
if channel is not None:
self.input_url.setFocus(True)
QMessageBox.warning(self, "Input error", "Channel URL already exists!\nName: {}\nURL: {}".format(channel["name"], channel["url"]), QMessageBox.Ok, QMessageBox.Ok)
return
self.config.add_new_channel(self.streamer["name"], channel_name, channel_url, self.check_fav.isChecked())
else:
# We're editing an existing record
if channel_name != self.channel_data["name"]:
# User changed the name of the channel
channel = self.config.get_streamer_channel(self.streamer["name"], channel_name)
if channel is not None:
self.input_name.setFocus(True)
QMessageBox.warning(self, "Input error", "Channel name already exists!\nName: {}\nURL: {}".format(channel["name"], channel["url"]), QMessageBox.Ok, QMessageBox.Ok)
return
if channel_url != self.channel_data["url"]:
# User changed the channel's URL
channel = self.config.get_channel_by_url(self.streamer["name"], channel_url)
if channel is not None:
self.input_url.setFocus(True)
QMessageBox.warning(self, "Input error", "Channel URL already exists!\nName: {}\nURL: {}".format(channel["name"], channel["url"]), QMessageBox.Ok, QMessageBox.Ok)
return
if channel_name != self.channel_data["name"] or channel_url != self.channel_data["url"] or bool(self.channel_data["favorite"]) != self.check_fav.isChecked():
self.config.update_existing_channel(self.streamer["name"], channel_name, channel_url, self.check_fav.isChecked(), self.channel_data["name"], self.channel_data["url"])
else:
set_result = False
if set_result:
self.result_data = {
"name": channel_name,
"url": channel_url,
"favorite": self.check_fav.isChecked(),
}
self.done(QDialog.Accepted)
|
|
'''
Implements the targetcli target related UI.
This file is part of LIO(tm).
Copyright (c) 2011-2014 by Datera, Inc
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
'''
from ui_node import UINode, UIRTSLibNode
from ui_backstore import dedup_so_name
from rtslib import RTSLibError, RTSLibBrokenLink, utils
from rtslib import NodeACL, NetworkPortal, MappedLUN
from rtslib import Target, TPG, LUN
class UIFabricModule(UIRTSLibNode):
'''
A fabric module UI.
'''
def __init__(self, fabric_module, parent):
UIRTSLibNode.__init__(self, fabric_module.name, fabric_module, parent)
self.cfs_cwd = fabric_module.path
self.refresh()
if self.rtsnode.has_feature('discovery_auth'):
for param in ['userid', 'password',
'mutual_userid', 'mutual_password',
'enable']:
self.define_config_group_param('discovery_auth',
param, 'string')
self.refresh()
def ui_getgroup_discovery_auth(self, auth_attr):
'''
This is the backend method for getting discovery_auth attributes.
@param auth_attr: The auth attribute to get the value of.
@type auth_attr: str
@return: The auth attribute's value
@rtype: str
'''
value = None
if auth_attr == 'password':
value = self.rtsnode.discovery_password
elif auth_attr == 'userid':
value = self.rtsnode.discovery_userid
elif auth_attr == 'mutual_password':
value = self.rtsnode.discovery_mutual_password
elif auth_attr == 'mutual_userid':
value = self.rtsnode.discovery_mutual_userid
elif auth_attr == 'enable':
value = self.rtsnode.discovery_enable_auth
return value
def ui_setgroup_discovery_auth(self, auth_attr, value):
'''
This is the backend method for setting discovery auth attributes.
@param auth_attr: The auth attribute to set the value of.
@type auth_attr: str
@param value: The auth's value
@type value: str
'''
self.assert_root()
if value is None:
value = ''
if auth_attr == 'password':
self.rtsnode.discovery_password = value
elif auth_attr == 'userid':
self.rtsnode.discovery_userid = value
elif auth_attr == 'mutual_password':
self.rtsnode.discovery_mutual_password = value
elif auth_attr == 'mutual_userid':
self.rtsnode.discovery_mutual_userid = value
elif auth_attr == 'enable':
self.rtsnode.discovery_enable_auth = value
def refresh(self):
self._children = set([])
for target in self.rtsnode.targets:
self.shell.log.debug("Found target %s under fabric module %s."
% (target.wwn, target.fabric_module))
if target.has_feature('tpgts'):
UIMultiTPGTarget(target, self)
else:
UITarget(target, self)
def summary(self):
no_targets = len(self._children)
if no_targets != 1:
msg = "%d Targets" % no_targets
else:
msg = "%d Target" % no_targets
return (msg, None)
def ui_command_create(self, wwn=None):
'''
Creates a new target. The I{wwn} format depends on the transport(s)
supported by the fabric module. If the I{wwn} is ommited, then a
target will be created using either a randomly generated WWN of the
proper type, or the first unused WWN in the list of possible WWNs if
one is available. If WWNs are constrained to a list (i.e. for hardware
targets addresses) and all WWNs are in use, the target creation will
fail. Use the B{info} command to get more information abour WWN type
and possible values.
SEE ALSO
========
B{info}
'''
self.assert_root()
target = Target(self.rtsnode, wwn, mode='create')
wwn = target.wwn
if target.has_feature('tpgts'):
ui_target = UIMultiTPGTarget(target, self)
self.shell.log.info("Created target %s." % wwn)
return ui_target.ui_command_create()
else:
ui_target = UITarget(target, self)
self.shell.log.info("Created target %s." % wwn)
return self.new_node(ui_target)
def ui_complete_create(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command create.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
spec = self.rtsnode.spec
if current_param == 'wwn' and spec['wwn_list'] is not None:
existing_wwns = [child.wwn for child in self.rtsnode.targets]
completions = [wwn for wwn in spec['wwn_list']
if wwn.startswith(text)
if wwn not in existing_wwns]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
def ui_command_delete(self, wwn):
'''
Recursively deletes the target with the specified I{wwn}, and all
objects hanging under it.
SEE ALSO
========
B{create}
'''
self.assert_root()
target = Target(self.rtsnode, wwn, mode='lookup')
target.delete()
self.shell.log.info("Deleted Target %s." % wwn)
self.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'wwn':
wwns = [child.name for child in self.children]
completions = [wwn for wwn in wwns if wwn.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
def ui_command_info(self):
'''
Displays information about the fabric module, notably the supported
transports(s) and accepted B{wwn} format(s), as long as supported
features.
'''
spec = self.rtsnode.spec
self.shell.log.info("Fabric module name: %s" % self.name)
self.shell.log.info("ConfigFS path: %s" % self.rtsnode.path)
if spec['wwn_list'] is not None:
self.shell.log.info("Allowed WWNs list (%s type): %s"
% (spec['wwn_type'],
', '.join(spec['wwn_list'])))
else:
self.shell.log.info("Supported WWN type: %s" % spec['wwn_type'])
self.shell.log.info("Fabric module specfile: %s"
% self.rtsnode.spec_file)
self.shell.log.info("Fabric module features: %s"
% ', '.join(spec['features']))
self.shell.log.info("Corresponding kernel module: %s"
% spec['kernel_module'])
def ui_command_version(self):
'''
Displays the target fabric module version.
'''
version = "Target fabric module %s: %s" \
% (self.rtsnode.name, self.rtsnode.version)
self.shell.con.display(version.strip())
class UIMultiTPGTarget(UIRTSLibNode):
'''
A generic target UI that has multiple TPGs.
'''
def __init__(self, target, parent):
UIRTSLibNode.__init__(self, target.wwn, target, parent)
self.cfs_cwd = target.path
self.refresh()
def refresh(self):
self._children = set([])
for tpg in self.rtsnode.tpgs:
UITPG(tpg, self)
def summary(self):
if not self.rtsnode.fabric_module.is_valid_wwn(self.rtsnode.wwn):
description = "INVALID WWN"
is_healthy = False
else:
is_healthy = None
no_tpgs = len(self._children)
if no_tpgs != 1:
description = "%d TPGs" % no_tpgs
else:
description = "%d TPG" % no_tpgs
return (description, is_healthy)
def ui_command_create(self, tag=None):
'''
Creates a new Target Portal Group within the target. The I{tag} must be
a strictly positive integer value. If omitted, the next available
Target Portal Group Tag (TPG) will be used.
SEE ALSO
========
B{delete}
'''
self.assert_root()
if tag is None:
tags = [tpg.tag for tpg in self.rtsnode.tpgs]
for index in range(1048576):
if index not in tags and index > 0:
tag = index
break
if tag is None:
self.shell.log.error("Cannot find an available TPG Tag.")
return
else:
self.shell.log.info("Selected TPG Tag %d." % tag)
else:
try:
tag = int(tag)
except ValueError:
self.shell.log.error("The TPG Tag must be an integer value.")
return
else:
if tag < 0:
self.shell.log.error("The TPG Tag must be 0 or more.")
return
tpg = TPG(self.rtsnode, tag, mode='create')
if self.shell.prefs['auto_enable_tpgt']:
tpg.enable = True
self.shell.log.info("Created TPG %s." % tpg.tag)
ui_tpg = UITPG(tpg, self)
return self.new_node(ui_tpg)
def ui_command_delete(self, tag):
'''
Deletes the Target Portal Group with TPG I{tag} from the target. The
I{tag} must be a positive integer matching an existing TPG.
SEE ALSO
========
B{create}
'''
self.assert_root()
if tag.startswith("tpg"):
tag = tag[3:]
tpg = TPG(self.rtsnode, int(tag), mode='lookup')
tpg.delete()
self.shell.log.info("Deleted TPG %s." % tag)
self.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'tag':
tags = [child.name[4:] for child in self.children]
completions = [tag for tag in tags if tag.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
class UITPG(UIRTSLibNode):
'''
A generic TPG UI.
'''
def __init__(self, tpg, parent):
name = "tpg%d" % tpg.tag
UIRTSLibNode.__init__(self, name, tpg, parent)
self.cfs_cwd = tpg.path
self.refresh()
UILUNs(tpg, self)
if tpg.has_feature('acls'):
UINodeACLs(self.rtsnode, self)
if tpg.has_feature('nps'):
UIPortals(self.rtsnode, self)
def summary(self):
if self.rtsnode.has_feature('nexus'):
description = ("nexus WWN %s" % self.rtsnode.nexus_wwn, True)
elif self.rtsnode.enable:
description = ("enabled", True)
else:
description = ("disabled", False)
return description
def ui_command_enable(self):
'''
Enables the TPG.
SEE ALSO
========
B{disable status}
'''
self.assert_root()
if self.rtsnode.enable:
self.shell.log.info("The TPG is already enabled.")
else:
self.rtsnode.enable = True
self.shell.log.info("The TPG has been enabled.")
def ui_command_disable(self):
'''
Disables the TPG.
SEE ALSO
========
B{enable status}
'''
self.assert_root()
if self.rtsnode.enable:
self.rtsnode.enable = False
self.shell.log.info("The TPG has been disabled.")
else:
self.shell.log.info("The TPG is already disabled.")
class UITarget(UITPG):
'''
A generic target UI merged with its only TPG.
'''
def __init__(self, target, parent):
UITPG.__init__(self, TPG(target, 1), parent)
self._name = target.wwn
self.target = target
self.rtsnode.enable = True
def summary(self):
if not self.target.fabric_module.is_valid_wwn(self.target.wwn):
return ("INVALID WWN", False)
else:
return UITPG.summary(self)
class UINodeACLs(UINode):
'''
A generic UI for node ACLs.
'''
def __init__(self, tpg, parent):
UINode.__init__(self, "acls", parent)
self.tpg = tpg
self.cfs_cwd = "%s/acls" % tpg.path
self.refresh()
def refresh(self):
self._children = set([])
for node_acl in self.tpg.node_acls:
UINodeACL(node_acl, self)
def summary(self):
no_acls = len(self._children)
if no_acls != 1:
msg = "%d ACLs" % no_acls
else:
msg = "%d ACL" % no_acls
return (msg, None)
def ui_command_create(self, wwn, add_mapped_luns=None):
'''
Creates a Node ACL for the initiator node with the specified I{wwn}.
The node's I{wwn} must match the expected WWN Type of the target's
fabric module.
If I{add_mapped_luns} is omitted, the global parameter
B{auto_add_mapped_luns} will be used, else B{true} or B{false} are
accepted. If B{true}, then after creating the ACL, mapped LUNs will be
automatically created for all existing LUNs.
SEE ALSO
========
B{delete}
'''
self.assert_root()
spec = self.tpg.parent_target.fabric_module.spec
if not utils.is_valid_wwn(spec['wwn_type'], wwn):
self.shell.log.error("'%s' is not a valid %s WWN."
% (wwn, spec['wwn_type']))
return
add_mapped_luns = \
self.ui_eval_param(add_mapped_luns, 'bool',
self.shell.prefs['auto_add_mapped_luns'])
try:
node_acl = NodeACL(self.tpg, wwn, mode="create")
except RTSLibError, msg:
self.shell.log.error(str(msg))
return
else:
self.shell.log.info("Created Node ACL for %s"
% node_acl.node_wwn)
ui_node_acl = UINodeACL(node_acl, self)
if add_mapped_luns:
for lun in self.tpg.luns:
MappedLUN(node_acl, lun.lun, lun.lun, write_protect=False)
self.shell.log.info("Created mapped LUN %d." % lun.lun)
self.refresh()
return self.new_node(ui_node_acl)
def ui_command_delete(self, wwn):
'''
Deletes the Node ACL with the specified I{wwn}.
SEE ALSO
========
B{create}
'''
self.assert_root()
node_acl = NodeACL(self.tpg, wwn, mode='lookup')
node_acl.delete()
self.shell.log.info("Deleted Node ACL %s." % wwn)
self.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'wwn':
wwns = [acl.node_wwn for acl in self.tpg.node_acls]
completions = [wwn for wwn in wwns if wwn.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
class UINodeACL(UIRTSLibNode):
'''
A generic UI for a node ACL.
'''
def __init__(self, node_acl, parent):
UIRTSLibNode.__init__(self, node_acl.node_wwn, node_acl, parent)
if self.rtsnode.has_feature("acls_tcq_depth"):
self.define_config_group_param(
'attribute', 'tcq_depth', 'string', "Command queue depth.", True)
self.cfs_cwd = node_acl.path
self.refresh()
def ui_getgroup_attribute(self, attribute):
'''
This is the backend method for getting attributes.
@param attribute: The attribute to get the value of.
@type attribute: str
@return: The attribute's value
@rtype: arbitrary
'''
if attribute == 'tcq_depth' and self.rtsnode.has_feature("acls_tcq_depth"):
return self.rtsnode.tcq_depth
else:
return self.rtsnode.get_attribute(attribute)
def ui_setgroup_attribute(self, attribute, value):
'''
This is the backend method for setting attributes.
@param attribute: The attribute to set the value of.
@type attribute: str
@param value: The attribute's value
@type value: arbitrary
'''
self.assert_root()
if attribute == 'tcq_depth' and self.rtsnode.has_feature("acls_tcq_depth"):
self.rtsnode.tcq_depth = value
else:
self.rtsnode.set_attribute(attribute, value)
def refresh(self):
self._children = set([])
for mlun in self.rtsnode.mapped_luns:
UIMappedLUN(mlun, self)
def summary(self):
no_mluns = len(self._children)
if no_mluns != 1:
msg = "%d Mapped LUNs" % no_mluns
else:
msg = "%d Mapped LUN" % no_mluns
return (msg, None)
def ui_command_create(self, mapped_lun, tpg_lun, write_protect=None):
'''
Creates a mapping to one of the TPG LUNs for the initiator referenced
by the ACL. The provided I{tpg_lun} will appear to that initiator as
LUN I{mapped_lun}. If the I{write_protect} flag is set to B{1}, the
initiator will not have write access to the Mapped LUN.
SEE ALSO
========
B{delete}
'''
self.assert_root()
try:
tpg_lun = int(tpg_lun)
mapped_lun = int(mapped_lun)
except ValueError:
self.shell.log.error("Incorrect LUN value.")
return
if tpg_lun in (ml.tpg_lun.lun for ml in self.rtsnode.mapped_luns):
self.shell.log.warning(
"Warning: TPG LUN %d already mapped to this NodeACL" % tpg_lun)
mlun = MappedLUN(self.rtsnode, mapped_lun, tpg_lun, write_protect)
ui_mlun = UIMappedLUN(mlun, self)
self.shell.log.info("Created Mapped LUN %s." % mlun.mapped_lun)
return self.new_node(ui_mlun)
def ui_command_delete(self, mapped_lun):
'''
Deletes the specified I{mapped_lun}.
SEE ALSO
========
B{create}
'''
self.assert_root()
mlun = MappedLUN(self.rtsnode, mapped_lun)
mlun.delete()
self.shell.log.info("Deleted Mapped LUN %s." % mapped_lun)
self.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'mapped_lun':
mluns = [str(mlun.mapped_lun) for mlun in self.rtsnode.mapped_luns]
completions = [mlun for mlun in mluns if mlun.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
class UIMappedLUN(UIRTSLibNode):
'''
A generic UI for MappedLUN objects.
'''
def __init__(self, mapped_lun, parent):
name = "mapped_lun%d" % mapped_lun.mapped_lun
UIRTSLibNode.__init__(self, name, mapped_lun, parent)
self.cfs_cwd = mapped_lun.path
self.refresh()
def summary(self):
mapped_lun = self.rtsnode
is_healthy = True
try:
tpg_lun = mapped_lun.tpg_lun
except RTSLibBrokenLink:
description = "BROKEN LUN LINK"
is_healthy = False
else:
if mapped_lun.write_protect:
access_mode = 'ro'
else:
access_mode = 'rw'
description = "lun%d (%s)" % (tpg_lun.lun, access_mode)
return (description, is_healthy)
class UILUNs(UINode):
'''
A generic UI for TPG LUNs.
'''
def __init__(self, tpg, parent):
UINode.__init__(self, "luns", parent)
self.cfs_cwd = "%s/lun" % tpg.path
self.tpg = tpg
self.refresh()
def refresh(self):
self._children = set([])
for lun in self.tpg.luns:
UILUN(lun, self)
def summary(self):
no_luns = len(self._children)
if no_luns != 1:
msg = "%d LUNs" % no_luns
else:
msg = "%d LUN" % no_luns
return (msg, None)
def ui_command_create(self, storage_object, lun=None,
add_mapped_luns=None):
'''
Creates a new LUN in the Target Portal Group, attached to a storage
object. If the I{lun} parameter is omitted, the first available LUN in
the TPG will be used. If present, it must be a number greater than 0.
Alternatively, the syntax I{lunX} where I{X} is a positive number is
also accepted.
The I{storage_object} must be the path of an existing storage object,
i.e. B{/backstore/pscsi0/mydisk} to reference the B{mydisk} storage
object of the virtual HBA B{pscsi0}.
If I{add_mapped_luns} is omitted, the global parameter
B{auto_add_mapped_luns} will be used, else B{true} or B{false} are
accepted. If B{true}, then after creating the LUN, mapped LUNs will be
automatically created for all existing node ACLs, mapping the new LUN.
SEE ALSO
========
B{delete}
'''
self.assert_root()
if lun is None:
luns = [lun.lun for lun in self.tpg.luns]
for index in range(1048576):
if index not in luns:
lun = index
break
if lun is None:
self.shell.log.error("Cannot find an available LUN.")
return
else:
self.shell.log.info("Selected LUN %d." % lun)
else:
try:
if lun.startswith('lun'):
lun = lun[3:]
lun = int(lun)
except ValueError:
self.shell.log.error("The LUN must be an integer value.")
return
else:
if lun < 0:
self.shell.log.error("The LUN cannot be negative.")
return
add_mapped_luns = \
self.ui_eval_param(add_mapped_luns, 'bool',
self.shell.prefs['auto_add_mapped_luns'])
try:
storage_object = self.get_node(storage_object).rtsnode
except ValueError:
self.shell.log.error("Invalid storage object %s." % storage_object)
return
lun_object = LUN(self.tpg, lun, storage_object)
self.shell.log.info("Created LUN %s." % lun_object.lun)
ui_lun = UILUN(lun_object, self)
if add_mapped_luns:
for acl in self.tpg.node_acls:
mapped_lun = lun
existing_mluns = [mlun.mapped_lun for mlun in acl.mapped_luns]
if mapped_lun in existing_mluns:
tentative_mlun = 0
while mapped_lun == lun:
if tentative_mlun not in existing_mluns:
mapped_lun = tentative_mlun
self.shell.log.warning(
"Mapped LUN %d already " % lun
+ "exists in ACL %s, using %d instead."
% (acl.node_wwn, mapped_lun))
else:
tentative_mlun += 1
mlun = MappedLUN(acl, mapped_lun, lun, write_protect=False)
self.shell.log.info("Created mapped LUN %d in node ACL %s"
% (mapped_lun, acl.node_wwn))
self.parent.refresh()
return self.new_node(ui_lun)
def ui_complete_create(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command create.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'storage_object':
storage_objects = []
for backstore in self.get_node('/backstores').children:
for storage_object in backstore.children:
storage_objects.append(storage_object.path)
completions = [so for so in storage_objects if so.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
def ui_command_delete(self, lun):
'''
Deletes the supplied LUN from the Target Portal Group. The I{lun} must
be a positive number matching an existing LUN.
Alternatively, the syntax I{lunX} where I{X} is a positive number is
also accepted.
SEE ALSO
========
B{create}
'''
self.assert_root()
if lun.lower().startswith("lun"):
lun = lun[3:]
try:
lun = int(lun)
lun_object = LUN(self.tpg, lun)
except:
raise RTSLibError("Invalid LUN")
lun_object.delete()
self.shell.log.info("Deleted LUN %s." % lun)
# Refresh the TPG as we need to also refresh acls MappedLUNs
self.parent.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'lun':
luns = [str(lun.lun) for lun in self.tpg.luns]
completions = [lun for lun in luns if lun.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
class UILUN(UIRTSLibNode):
'''
A generic UI for LUN objects.
'''
def __init__(self, lun, parent):
name = "lun%d" % lun.lun
UIRTSLibNode.__init__(self, name, lun, parent)
self.cfs_cwd = lun.path
self.refresh()
def summary(self):
lun = self.rtsnode
is_healthy = True
try:
storage_object = lun.storage_object
except RTSLibBrokenLink:
description = "BROKEN STORAGE LINK"
is_healthy = False
else:
backstore = storage_object.backstore
if backstore.plugin.startswith("rd"):
path = "ramdisk"
else:
path = storage_object.udev_path
if self.shell.prefs['legacy_hba_view']:
description = "%s%s/%s (%s)" % (backstore.plugin,
backstore.index,
storage_object.name, path)
else:
description = "%s/%s (%s)" % (backstore.plugin,
dedup_so_name(storage_object),
path)
return (description, is_healthy)
class UIPortals(UINode):
'''
A generic UI for TPG network portals.
'''
def __init__(self, tpg, parent):
UINode.__init__(self, "portals", parent)
self.tpg = tpg
self.cfs_cwd = "%s/np" % tpg.path
self.refresh()
def refresh(self):
self._children = set([])
for portal in self.tpg.network_portals:
UIPortal(portal, self)
def summary(self):
no_portals = len(self._children)
if no_portals != 1:
msg = "%d Portals" % no_portals
else:
msg = "%d Portal" % no_portals
return (msg, None)
def ui_command_create(self, ip_address=None, ip_port=None):
'''
Creates a Network Portal with specified I{ip_address} and I{ip_port}.
If I{ip_port} is omitted, the default port for the target fabric will
be used. If I{ip_address} is omitted, the first IP address found
matching the local hostname will be used.
SEE ALSO
========
B{delete}
'''
self.assert_root()
try:
listen_all = int(ip_address.replace(".", "")) == 0
except:
listen_all = False
if listen_all:
ip_address = "0.0.0.0"
if ip_port is None:
# FIXME: Add a specfile parameter to determine that
ip_port = 3260
self.shell.log.info("Using default IP port %d" % ip_port)
if ip_address is None:
if not ip_address:
ip_address = utils.get_main_ip()
if ip_address:
self.shell.log.info("Automatically selected IP address %s."
% ip_address)
else:
self.shell.log.error("Cannot find a usable IP address to "
+ "create the Network Portal.")
return
elif ip_address not in utils.list_eth_ips() and not listen_all:
self.shell.log.error("IP address does not exist: %s" % ip_address)
return
try:
ip_port = int(ip_port)
except ValueError:
self.shell.log.error("The ip_port must be an integer value.")
return
portal = NetworkPortal(self.tpg, ip_address, ip_port, mode='create')
self.shell.log.info("Created network portal %s:%d."
% (ip_address, ip_port))
ui_portal = UIPortal(portal, self)
return self.new_node(ui_portal)
def ui_complete_create(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command create.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
if current_param == 'ip_address':
completions = [addr for addr in utils.list_eth_ips()
if addr.startswith(text)]
else:
completions = []
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
def ui_command_delete(self, ip_address, ip_port):
'''
Deletes the Network Portal with specified I{ip_address} and I{ip_port}.
SEE ALSO
========
B{create}
'''
self.assert_root()
portal = NetworkPortal(self.tpg, ip_address, ip_port, mode='lookup')
portal.delete()
self.shell.log.info("Deleted network portal %s:%s"
% (ip_address, ip_port))
self.refresh()
def ui_complete_delete(self, parameters, text, current_param):
'''
Parameter auto-completion method for user command delete.
@param parameters: Parameters on the command line.
@type parameters: dict
@param text: Current text of parameter being typed by the user.
@type text: str
@param current_param: Name of parameter to complete.
@type current_param: str
@return: Possible completions
@rtype: list of str
'''
completions = []
# TODO: Check if a dict comprehension is acceptable here with supported
# XXX: python versions.
portals = {}
all_ports = set([])
for portal in self.tpg.network_portals:
all_ports.add(str(portal.port))
if not portal.ip_address in portals:
portals[portal.ip_address] = []
portals[portal.ip_address].append(str(portal.port))
if current_param == 'ip_address':
if 'ip_port' in parameters:
port = parameters['ip_port']
completions = [addr for addr in portals
if port in portals[addr]
if addr.startswith(text)]
else:
completions = [addr for addr in portals
if addr.startswith(text)]
elif current_param == 'ip_port':
if 'ip_address' in parameters:
addr = parameters['ip_address']
if addr in portals:
completions = [port for port in portals[addr]
if port.startswith(text)]
else:
completions = [port for port in all_ports
if port.startswith(text)]
if len(completions) == 1:
return [completions[0] + ' ']
else:
return completions
class UIPortal(UIRTSLibNode):
'''
A generic UI for a network portal.
'''
def __init__(self, portal, parent):
name = "%s:%s" % (portal.ip_address, portal.port)
UIRTSLibNode.__init__(self, name, portal, parent)
self.cfs_cwd = portal.path
self.portal = portal
self.refresh()
def summary(self):
if self.portal._get_iser_attr():
return ('OK, iser enabled', True)
else:
return ('OK, iser disabled', True)
def ui_command_iser_enable(self):
'''
Enables iser operation on an network portal.
'''
if self.portal._get_iser_attr() == True:
self.shell.log.info("iser operation has already been enabled")
else:
self.portal._set_iser_attr(True)
self.shell.log.info("iser operation has been enabled")
def ui_command_iser_disable(self):
'''
Disabled iser operation on an network portal.
'''
if self.portal._get_iser_attr() == False:
self.shell.log.info("iser operation has already been disabled")
else:
self.portal._set_iser_attr(False)
self.shell.log.info("iser operation has been disabled")
|
|
from datetime import datetime, timedelta
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.admin import FieldListFilter
from django.contrib.admin.exceptions import (
DisallowedModelAdminLookup, DisallowedModelAdminToField,
)
from django.contrib.admin.options import (
IS_POPUP_VAR, TO_FIELD_VAR, IncorrectLookupParameters,
)
from django.contrib.admin.utils import (
get_fields_from_path, lookup_needs_distinct, prepare_lookup_value, quote,
)
from django.core.exceptions import (
FieldDoesNotExist, ImproperlyConfigured, SuspiciousOperation,
)
from django.core.paginator import InvalidPage
from django.db.models import F, Field, ManyToOneRel, OrderBy
from django.db.models.expressions import Combinable
from django.urls import reverse
from django.utils.http import urlencode
from django.utils.timezone import make_aware
from django.utils.translation import gettext
# Changelist settings
ALL_VAR = 'all'
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
ERROR_FLAG = 'e'
IGNORED_PARAMS = (
ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR, TO_FIELD_VAR)
class ChangeListSearchForm(forms.Form):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Populate "fields" dynamically because SEARCH_VAR is a variable:
self.fields = {
SEARCH_VAR: forms.CharField(required=False, strip=False),
}
class ChangeList:
search_form_class = ChangeListSearchForm
def __init__(self, request, model, list_display, list_display_links,
list_filter, date_hierarchy, search_fields, list_select_related,
list_per_page, list_max_show_all, list_editable, model_admin, sortable_by):
self.model = model
self.opts = model._meta
self.lookup_opts = self.opts
self.root_queryset = model_admin.get_queryset(request)
self.list_display = list_display
self.list_display_links = list_display_links
self.list_filter = list_filter
self.has_filters = None
self.has_active_filters = None
self.clear_all_filters_qs = None
self.date_hierarchy = date_hierarchy
self.search_fields = search_fields
self.list_select_related = list_select_related
self.list_per_page = list_per_page
self.list_max_show_all = list_max_show_all
self.model_admin = model_admin
self.preserved_filters = model_admin.get_preserved_filters(request)
self.sortable_by = sortable_by
# Get search parameters from the query string.
_search_form = self.search_form_class(request.GET)
if not _search_form.is_valid():
for error in _search_form.errors.values():
messages.error(request, ', '.join(error))
self.query = _search_form.cleaned_data.get(SEARCH_VAR) or ''
try:
self.page_num = int(request.GET.get(PAGE_VAR, 1))
except ValueError:
self.page_num = 1
self.show_all = ALL_VAR in request.GET
self.is_popup = IS_POPUP_VAR in request.GET
to_field = request.GET.get(TO_FIELD_VAR)
if to_field and not model_admin.to_field_allowed(request, to_field):
raise DisallowedModelAdminToField("The field %s cannot be referenced." % to_field)
self.to_field = to_field
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
if self.is_popup:
self.list_editable = ()
else:
self.list_editable = list_editable
self.queryset = self.get_queryset(request)
self.get_results(request)
if self.is_popup:
title = gettext('Select %s')
elif self.model_admin.has_change_permission(request):
title = gettext('Select %s to change')
else:
title = gettext('Select %s to view')
self.title = title % self.opts.verbose_name
self.pk_attname = self.lookup_opts.pk.attname
def get_filters_params(self, params=None):
"""
Return all params except IGNORED_PARAMS.
"""
params = params or self.params
lookup_params = params.copy() # a dictionary of the query string
# Remove all the parameters that are globally and systematically
# ignored.
for ignored in IGNORED_PARAMS:
if ignored in lookup_params:
del lookup_params[ignored]
return lookup_params
def get_filters(self, request):
lookup_params = self.get_filters_params()
use_distinct = False
has_active_filters = False
for key, value in lookup_params.items():
if not self.model_admin.lookup_allowed(key, value):
raise DisallowedModelAdminLookup("Filtering by %s not allowed" % key)
filter_specs = []
for list_filter in self.list_filter:
lookup_params_count = len(lookup_params)
if callable(list_filter):
# This is simply a custom list filter class.
spec = list_filter(request, lookup_params, self.model, self.model_admin)
else:
field_path = None
if isinstance(list_filter, (tuple, list)):
# This is a custom FieldListFilter class for a given field.
field, field_list_filter_class = list_filter
else:
# This is simply a field name, so use the default
# FieldListFilter class that has been registered for the
# type of the given field.
field, field_list_filter_class = list_filter, FieldListFilter.create
if not isinstance(field, Field):
field_path = field
field = get_fields_from_path(self.model, field_path)[-1]
spec = field_list_filter_class(
field, request, lookup_params,
self.model, self.model_admin, field_path=field_path,
)
# field_list_filter_class removes any lookup_params it
# processes. If that happened, check if distinct() is needed to
# remove duplicate results.
if lookup_params_count > len(lookup_params):
use_distinct = use_distinct or lookup_needs_distinct(self.lookup_opts, field_path)
if spec and spec.has_output():
filter_specs.append(spec)
if lookup_params_count > len(lookup_params):
has_active_filters = True
if self.date_hierarchy:
# Create bounded lookup parameters so that the query is more
# efficient.
year = lookup_params.pop('%s__year' % self.date_hierarchy, None)
if year is not None:
month = lookup_params.pop('%s__month' % self.date_hierarchy, None)
day = lookup_params.pop('%s__day' % self.date_hierarchy, None)
try:
from_date = datetime(
int(year),
int(month if month is not None else 1),
int(day if day is not None else 1),
)
except ValueError as e:
raise IncorrectLookupParameters(e) from e
if day:
to_date = from_date + timedelta(days=1)
elif month:
# In this branch, from_date will always be the first of a
# month, so advancing 32 days gives the next month.
to_date = (from_date + timedelta(days=32)).replace(day=1)
else:
to_date = from_date.replace(year=from_date.year + 1)
if settings.USE_TZ:
from_date = make_aware(from_date)
to_date = make_aware(to_date)
lookup_params.update({
'%s__gte' % self.date_hierarchy: from_date,
'%s__lt' % self.date_hierarchy: to_date,
})
# At this point, all the parameters used by the various ListFilters
# have been removed from lookup_params, which now only contains other
# parameters passed via the query string. We now loop through the
# remaining parameters both to ensure that all the parameters are valid
# fields and to determine if at least one of them needs distinct(). If
# the lookup parameters aren't real fields, then bail out.
try:
for key, value in lookup_params.items():
lookup_params[key] = prepare_lookup_value(key, value)
use_distinct = use_distinct or lookup_needs_distinct(self.lookup_opts, key)
return (
filter_specs, bool(filter_specs), lookup_params, use_distinct,
has_active_filters,
)
except FieldDoesNotExist as e:
raise IncorrectLookupParameters(e) from e
def get_query_string(self, new_params=None, remove=None):
if new_params is None:
new_params = {}
if remove is None:
remove = []
p = self.params.copy()
for r in remove:
for k in list(p):
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(sorted(p.items()))
def get_results(self, request):
paginator = self.model_admin.get_paginator(request, self.queryset, self.list_per_page)
# Get the number of objects, with admin filters applied.
result_count = paginator.count
# Get the total number of objects, with no admin filters applied.
if self.model_admin.show_full_result_count:
full_result_count = self.root_queryset.count()
else:
full_result_count = None
can_show_all = result_count <= self.list_max_show_all
multi_page = result_count > self.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and can_show_all) or not multi_page:
result_list = self.queryset._clone()
else:
try:
result_list = paginator.page(self.page_num).object_list
except InvalidPage:
raise IncorrectLookupParameters
self.result_count = result_count
self.show_full_result_count = self.model_admin.show_full_result_count
# Admin actions are shown if there is at least one entry
# or if entries are not counted because show_full_result_count is disabled
self.show_admin_actions = not self.show_full_result_count or bool(full_result_count)
self.full_result_count = full_result_count
self.result_list = result_list
self.can_show_all = can_show_all
self.multi_page = multi_page
self.paginator = paginator
def _get_default_ordering(self):
ordering = []
if self.model_admin.ordering:
ordering = self.model_admin.ordering
elif self.lookup_opts.ordering:
ordering = self.lookup_opts.ordering
return ordering
def get_ordering_field(self, field_name):
"""
Return the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Return None if no
proper model field name can be matched.
"""
try:
field = self.lookup_opts.get_field(field_name)
return field.name
except FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self.model_admin, field_name):
attr = getattr(self.model_admin, field_name)
else:
attr = getattr(self.model, field_name)
if isinstance(attr, property) and hasattr(attr, 'fget'):
attr = attr.fget
return getattr(attr, 'admin_order_field', None)
def get_ordering(self, request, queryset):
"""
Return the list of ordering fields for the change list.
First check the get_ordering() method in model admin, then check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by calling _get_deterministic_ordering() with the
constructed ordering.
"""
params = self.params
ordering = list(self.model_admin.get_ordering(request) or self._get_default_ordering())
if ORDER_VAR in params:
# Clear ordering and used params
ordering = []
order_params = params[ORDER_VAR].split('.')
for p in order_params:
try:
none, pfx, idx = p.rpartition('-')
field_name = self.list_display[int(idx)]
order_field = self.get_ordering_field(field_name)
if not order_field:
continue # No 'admin_order_field', skip it
if isinstance(order_field, OrderBy):
if pfx == '-':
order_field = order_field.copy()
order_field.reverse_ordering()
ordering.append(order_field)
elif hasattr(order_field, 'resolve_expression'):
# order_field is an expression.
ordering.append(order_field.desc() if pfx == '-' else order_field.asc())
# reverse order if order_field has already "-" as prefix
elif order_field.startswith('-') and pfx == '-':
ordering.append(order_field[1:])
else:
ordering.append(pfx + order_field)
except (IndexError, ValueError):
continue # Invalid ordering specified, skip it.
# Add the given query's ordering fields, if any.
ordering.extend(queryset.query.order_by)
return self._get_deterministic_ordering(ordering)
def _get_deterministic_ordering(self, ordering):
"""
Ensure a deterministic order across all database backends. Search for a
single field or unique together set of fields providing a total
ordering. If these are missing, augment the ordering with a descendant
primary key.
"""
ordering = list(ordering)
ordering_fields = set()
total_ordering_fields = {'pk'} | {
field.attname for field in self.lookup_opts.fields
if field.unique and not field.null
}
for part in ordering:
# Search for single field providing a total ordering.
field_name = None
if isinstance(part, str):
field_name = part.lstrip('-')
elif isinstance(part, F):
field_name = part.name
elif isinstance(part, OrderBy) and isinstance(part.expression, F):
field_name = part.expression.name
if field_name:
# Normalize attname references by using get_field().
try:
field = self.lookup_opts.get_field(field_name)
except FieldDoesNotExist:
# Could be "?" for random ordering or a related field
# lookup. Skip this part of introspection for now.
continue
# Ordering by a related field name orders by the referenced
# model's ordering. Skip this part of introspection for now.
if field.remote_field and field_name == field.name:
continue
if field.attname in total_ordering_fields:
break
ordering_fields.add(field.attname)
else:
# No single total ordering field, try unique_together and total
# unique constraints.
constraint_field_names = (
*self.lookup_opts.unique_together,
*(
constraint.fields
for constraint in self.lookup_opts.total_unique_constraints
),
)
for field_names in constraint_field_names:
# Normalize attname references by using get_field().
fields = [self.lookup_opts.get_field(field_name) for field_name in field_names]
# Composite unique constraints containing a nullable column
# cannot ensure total ordering.
if any(field.null for field in fields):
continue
if ordering_fields.issuperset(field.attname for field in fields):
break
else:
# If no set of unique fields is present in the ordering, rely
# on the primary key to provide total ordering.
ordering.append('-pk')
return ordering
def get_ordering_field_columns(self):
"""
Return a dictionary of ordering field column numbers and asc/desc.
"""
# We must cope with more than one column having the same underlying sort
# field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = {}
if ORDER_VAR not in self.params:
# for ordering specified on ModelAdmin or model Meta, we don't know
# the right column numbers absolutely, because there might be more
# than one column associated with that ordering, so we guess.
for field in ordering:
if isinstance(field, (Combinable, OrderBy)):
if not isinstance(field, OrderBy):
field = field.asc()
if isinstance(field.expression, F):
order_type = 'desc' if field.descending else 'asc'
field = field.expression.name
else:
continue
elif field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for index, attr in enumerate(self.list_display):
if self.get_ordering_field(attr) == field:
ordering_fields[index] = order_type
break
else:
for p in self.params[ORDER_VAR].split('.'):
none, pfx, idx = p.rpartition('-')
try:
idx = int(idx)
except ValueError:
continue # skip it
ordering_fields[idx] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
def get_queryset(self, request):
# First, we collect all the declared list filters.
(
self.filter_specs,
self.has_filters,
remaining_lookup_params,
filters_use_distinct,
self.has_active_filters,
) = self.get_filters(request)
# Then, we let every list filter modify the queryset to its liking.
qs = self.root_queryset
for filter_spec in self.filter_specs:
new_qs = filter_spec.queryset(request, qs)
if new_qs is not None:
qs = new_qs
try:
# Finally, we apply the remaining lookup parameters from the query
# string (i.e. those that haven't already been processed by the
# filters).
qs = qs.filter(**remaining_lookup_params)
except (SuspiciousOperation, ImproperlyConfigured):
# Allow certain types of errors to be re-raised as-is so that the
# caller can treat them in a special way.
raise
except Exception as e:
# Every other error is caught with a naked except, because we don't
# have any other way of validating lookup parameters. They might be
# invalid if the keyword arguments are incorrect, or if the values
# are not in the correct type, so we might get FieldError,
# ValueError, ValidationError, or ?.
raise IncorrectLookupParameters(e)
if not qs.query.select_related:
qs = self.apply_select_related(qs)
# Set ordering.
ordering = self.get_ordering(request, qs)
qs = qs.order_by(*ordering)
# Apply search results
qs, search_use_distinct = self.model_admin.get_search_results(request, qs, self.query)
# Set query string for clearing all filters.
self.clear_all_filters_qs = self.get_query_string(
new_params=remaining_lookup_params,
remove=self.get_filters_params(),
)
# Remove duplicates from results, if necessary
if filters_use_distinct | search_use_distinct:
return qs.distinct()
else:
return qs
def apply_select_related(self, qs):
if self.list_select_related is True:
return qs.select_related()
if self.list_select_related is False:
if self.has_related_field_in_list_display():
return qs.select_related()
if self.list_select_related:
return qs.select_related(*self.list_select_related)
return qs
def has_related_field_in_list_display(self):
for field_name in self.list_display:
try:
field = self.lookup_opts.get_field(field_name)
except FieldDoesNotExist:
pass
else:
if isinstance(field.remote_field, ManyToOneRel):
# <FK>_id field names don't require a join.
if field_name != field.get_attname():
return True
return False
def url_for_result(self, result):
pk = getattr(result, self.pk_attname)
return reverse('admin:%s_%s_change' % (self.opts.app_label,
self.opts.model_name),
args=(quote(pk),),
current_app=self.model_admin.admin_site.name)
|
|
# Copyright (c) 2015 Michael Strosaker
# MIT License
# http://opensource.org/licenses/MIT
import math
from utils import dataset, fold_change
from services import mine_protein
from ontology import go
class _feature_row:
def __init__(self, feature, description, gene, protein_id):
if isinstance(feature, basestring) and feature != '':
self.feature = feature
else:
self.feature = None
if isinstance(description, basestring) and description != '':
self.description = description
else:
self.description = None
if isinstance(gene, basestring) and gene != '':
self.gene = gene
else:
self.gene = None
if isinstance(protein_id, basestring) and protein_id != '':
self.protein_id = protein_id
else:
self.protein_id = None
self.expression_labels = []
self.expression = {}
self.foldchange_labels = []
self.foldchanges = {}
self.annotation_labels = []
self.annotation = {}
def retrieve_accession(self, organism, lookup_db):
self.acc = mine_protein(self.protein_id, lookup_db, self.gene, organism)
if self.gene is None:
if self.acc is not None and len(self.acc.gene) > 0:
self.gene = self.acc.gene
else:
self.gene = self.feature
self.go = []
self.go_slims = None
self.ec = []
self.pfam = []
self.tigrfam = []
self.smart = []
self.interpro = []
if self.acc is not None:
self.go = self.acc.go
self.ec = self.acc.ec
self.pfam = self.acc.pfam
self.tigrfam = self.acc.tigrfam
self.smart = self.acc.smart
self.interpro = self.acc.interpro
def add_expression(self, label, value):
self.expression_labels.append(label)
self.expression[label] = value
def calc_foldchange(self, from_label, to_label):
label = '%s:%s' % (from_label, to_label)
if from_label in self.expression_labels and \
to_label in self.expression_labels:
self.foldchange_labels.append(label)
self.foldchanges[label] = fold_change(self.expression[from_label],
self.expression[to_label])
def add_annotation(self, label, value):
self.annotation_labels.append(label)
self.annotation[label] = value
def csv(self, all_exprs, all_foldchanges, all_annotations):
ret = []
if self.gene:
ret.append(self.gene)
ret.append(self.feature)
else:
ret.append(self.feature)
ret.append('')
ret.append('"' + self.description + '"')
ret.append(';'.join(self.go))
if not self.go_slims:
self.go_slims = []
for term in self.go:
self.go_slims.extend(go(term).slims)
self.go_slims = list(set(self.go_slims))
ret.append(';'.join(self.go_slims))
ret.append(';'.join(self.ec))
ret.append(';'.join(self.pfam))
ret.append(';'.join(self.tigrfam))
ret.append(';'.join(self.smart))
ret.append(';'.join(self.interpro))
for label in all_exprs:
if label in self.expression_labels:
ret.append(str(self.expression[label]))
else:
ret.append('')
for label in all_foldchanges:
if label in self.foldchange_labels:
ret.append(str(self.foldchanges[label]))
else:
ret.append('')
for label in all_annotations:
if label in self.annotation_labels:
ret.append('"%s"' % str(self.annotation[label]))
else:
ret.append('')
return ','.join(ret)
class ontology_table:
def __init__(self, organism=None, feature_table=None, locus_col=None,
gene_col=None, lookup_db=None, label_col=None,
accession_col=None, description_col=None,
locus_tag_prefix=None, progress=True, filename = None):
self.expression_labels = []
self.foldchanges = []
self.annotation_labels = []
if filename is not None:
# build the table from an existing file
self.feat_rows = []
self.feat_tab = dataset(filename, 'csv')
firstrow = True
for row in self.feat_tab.rows:
feat = _feature_row(row['locus'], row['product'],
row['feature'], '')
if isinstance(row['go-term'], basestring):
feat.go = row['go-term'].split(';')
else:
feat.go = []
if isinstance(row['go-slim'], basestring):
feat.go_slims = row['go-slim'].split(';')
else:
feat.go_slims = []
if isinstance(row['ec'], basestring):
feat.ec = row['ec'].split(';')
else:
feat.ec = []
if isinstance(row['pfam'], basestring):
feat.pfam = row['pfam'].split(';')
else:
feat.pfam = []
if isinstance(row['tigrfam'], basestring):
feat.tigrfam = row['tigrfam'].split(';')
else:
feat.tigrfam = []
if isinstance(row['smart'], basestring):
feat.smart = row['smart'].split(';')
else:
feat.smart = []
if isinstance(row['interpro'], basestring):
feat.interpro = row['interpro'].split(';')
else:
feat.interpro = []
for col in self.feat_tab.colnames:
if col.startswith('expr:'):
label = ':'.join(col.split(':')[1:])
if firstrow:
self.expression_labels.append(label)
if isinstance(row[col], float) and \
not math.isnan(row[col]):
feat.expression_labels.append(label)
feat.expression[label] = row[col]
elif col.startswith('foldchange:'):
label = ':'.join(col.split(':')[1:])
if firstrow:
self.foldchanges.append(label)
if isinstance(row[col], float) and \
not math.isnan(row[col]):
feat.foldchange_labels.append(label)
feat.foldchanges[label] = row[col]
elif col.startswith('annotation:'):
label = ':'.join(col.split(':')[1:])
if firstrow:
self.annotation_labels.append(label)
if isinstance(row[col], basestring) and row[col] != '':
feat.annotation_labels.append(label)
feat.annotation[label] = row[col]
self.feat_rows.append(feat)
firstrow = False
self.build_index()
return
if not organism or not feature_table or not locus_col or \
not gene_col or not lookup_db or not label_col or \
not accession_col or not description_col or \
not locus_tag_prefix:
raise Exception('missing required parameter(s)')
self.feat_tab = dataset(feature_table, 'tab-delimited')
#index = self.feat_tab.index([locus_col, gene_col], accession_col)
self.feat_rows = []
finished = 0
for row in self.feat_tab.rows:
if isinstance(row[accession_col], basestring):
feature = _feature_row(row[label_col], row[description_col],
row[gene_col], row[accession_col])
feature.retrieve_accession(organism, lookup_db)
self.feat_rows.append(feature)
finished += 1
if progress and (finished % 10) == 0:
print 'finished %4d records' % finished
self.build_index()
def build_index(self):
self.index = {}
for feat in self.feat_rows:
self.index[feat.feature] = feat
if feat.gene:
self.index[feat.gene] = feat
def dump(self, filename):
outfile = open(filename, 'w')
cols = ['feature', 'locus', 'product', 'go-term', 'go-slim', 'ec',
'pfam', 'tigrfam', 'smart', 'interpro']
cols.extend([('"expr:%s"' % x) for x in self.expression_labels])
cols.extend([('"foldchange:%s"' % x) for x in self.foldchanges])
cols.extend([('"annotation:%s"' % x) for x in self.annotation_labels])
outfile.write('%s\n' % ','.join(cols))
for f in self.feat_rows:
outfile.write('%s\n' % f.csv(self.expression_labels,
self.foldchanges, self.annotation_labels))
outfile.close()
def add_RNAseq_values(self, filename, label, locus_col, value_col):
if label in self.expression_labels:
raise Exception('Expression data label %s already in table' % label)
expr_data = dataset(filename, 'csv')
self.expression_labels.append(label)
for row in expr_data.rows:
if isinstance(row[locus_col], basestring):
if row[locus_col] in self.index:
self.index[row[locus_col]].add_expression(label,
row[value_col])
def calc_foldchange(self, from_label, to_label):
label = '%s:%s' % (from_label, to_label)
if label in self.foldchanges:
raise Exception('Fold change label %s already in table' % label)
self.foldchanges.append(label)
for row in self.feat_rows:
row.calc_foldchange(from_label, to_label)
def add_annotation(self, filename, label, value_col, locus_col=None,
product_col=None):
if label in self.annotation_labels:
raise Exception('Annotation label %s already in table' % label)
annot_data = dataset(filename, 'csv')
self.annotation_labels.append(label)
for row in annot_data.rows:
if locus_col and isinstance(row[locus_col], basestring):
if row[locus_col] in self.index:
self.index[row[locus_col]].add_annotation(label,
row[value_col])
elif product_col and isinstance(row[product_col], basestring):
for tab_row in self.feat_rows:
if tab_row.description and \
row[product_col] in tab_row.description:
row[product_col].add_annotation(label, row[value_col])
|
|
import mimetypes
import os
import random
import time
from email import Charset, Encoders
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.Header import Header
from email.Utils import formatdate, getaddresses, formataddr
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils.encoding import smart_str, force_unicode
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
Charset.add_charset('utf-8', Charset.SHORTEST, Charset.QP, 'utf-8')
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
class BadHeaderError(ValueError):
pass
# Copied from Python standard library, with the following modifications:
# * Used cached hostname for performance.
# * Added try/except to support lack of getpid() in Jython (#5496).
def make_msgid(idstring=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<[email protected]>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
try:
pid = os.getpid()
except AttributeError:
# No getpid() in Jython, for example.
pid = 1
randint = random.randrange(100000)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
idhost = DNS_NAME
msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, idhost)
return msgid
def forbid_multi_line_headers(name, val, encoding):
"""Forbids multi-line headers, to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = force_unicode(val)
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val = val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ('to', 'from', 'cc'):
result = []
for nm, addr in getaddresses((val,)):
nm = str(Header(nm.encode(encoding), encoding))
result.append(formataddr((nm, str(addr))))
val = ', '.join(result)
else:
val = Header(val.encode(encoding), encoding)
else:
if name.lower() == 'subject':
val = Header(val)
return name, val
class SafeMIMEText(MIMEText):
def __init__(self, text, subtype, charset):
self.encoding = charset
MIMEText.__init__(self, text, subtype, charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
class SafeMIMEMultipart(MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage(object):
"""
A container for email information.
"""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings
(or UTF-8 bytestrings). The SafeMIMEText class will handle any
necessary encoding conversions.
"""
if to:
assert not isinstance(to, basestring), '"to" argument must be a list or tuple'
self.to = list(to)
else:
self.to = []
if bcc:
assert not isinstance(bcc, basestring), '"bcc" argument must be a list or tuple'
self.bcc = list(bcc)
else:
self.bcc = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body
self.attachments = attachments or []
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(smart_str(self.body, encoding),
self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = ', '.join(self.to)
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
msg['Date'] = formatdate()
if 'message-id' not in header_names:
msg['Message-ID'] = make_msgid()
for name, value in self.extra_headers.items():
if name.lower() == 'from': # From is already handled
continue
msg[name] = value
return msg
def recipients(self):
"""
Returns a list of all recipients of the email (includes direct
addressees as well as Bcc entries).
"""
return self.to + self.bcc
def send(self, fail_silently=False):
"""Sends the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
"""
if isinstance(filename, MIMEBase):
assert content == mimetype == None
self.attachments.append(filename)
else:
assert content is not None
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""Attaches a file from the filesystem."""
filename = os.path.basename(path)
content = open(path, 'rb').read()
self.attach(filename, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Converts the content, mimetype pair into a MIME attachment object.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(smart_str(content, encoding), subtype, encoding)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Converts the filename, content, mimetype triple into a MIME attachment
object.
"""
if mimetype is None:
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
attachment = self._create_mime_attachment(content, mimetype)
if filename:
attachment.add_header('Content-Disposition', 'attachment',
filename=filename)
return attachment
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings (or UTF-8
bytestrings). The SafeMIMEText class will handle any necessary encoding
conversions.
"""
super(EmailMultiAlternatives, self).__init__(subject, body, from_email, to, bcc, connection, attachments, headers)
self.alternatives=alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
|
|
#!/usr/bin/env python
#Copyright 2012-2013 SAP Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This is part of the COCOMA framework
#
# COCOMA is a framework for COntrolled COntentious and MAlicious patterns
#
import logging
from logging import handlers
import pika, time, datetime, sys, os
import sqlite3 as sqlite
import json
import Library
#from EmulationManager import *
global HOMEPATH
#HOMEPATH = Library.getHomepath()
try:
HOMEPATH= os.environ['COCOMA']
except:
print "no $COCOMA environmental variable set"
class Producer():
def __init__(self):
self.init()
def init(self):
try:
mqconfig = MQconfigLoad()
self.enabled = mqconfig[0][0]
self.vhost = mqconfig[0][1]
self.exchange = mqconfig[0][2]
self.user = mqconfig[0][3]
self.password = mqconfig[0][4]
self.host = mqconfig[0][5]
self.topic = mqconfig[0][6]
except:
self.enabled = "no"
self.vhost = ""
self.exchange = ""
self.user = ""
self.password = ""
self.host = ""
self.topic = ""
pass
#print "In Producer 1: "+ vhost + " " + exchange + " " + user + " " + password + " " + host + " " + topic
def sendmsg(self,loggingName,message):
if self.enabled == "yes":
frtmsg = self.formatmsg(self.topic,loggingName,message)
#print "In Producer 3, frtmsg: "+frtmsg
credentials = pika.PlainCredentials(self.user, self.password)
#print "In Producer 2 "+ self.vhost + " " + self.exchange + " " + self.user + " " + self.password + " " + self.host + " " + self.topic
#self.vhost = "bonfire"
#self.exchange = "experiments"
#self.user = "eventreader"
#self.password = "reader1348"
#self.host = "mq.bonfire-project.eu"
#self.topic = "f47aa8ce21c2273d077b"
connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.host, virtual_host=self.vhost, credentials=credentials))
#print "In Producer 4"
channel = connection.channel()
channel.basic_publish(exchange=self.exchange, routing_key=self.topic,body=json.dumps(frtmsg))
#print " [x] Sent %s:%s" % (self.topic, frtmsg)
print " [x] Sent: "+ json.dumps(frtmsg)
connection.close()
def formatmsg(self, topic, name, message):
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S.%f')
#newmsg = st+';'+name+';'+message
#print "scheduler.getUniqueID: "+self.scheduler.getUniqueID()
data = {"Timestamp":ts, "From":name, "Message":message}
#data2 = unicode(data, 'latin-1')
return data
class BroadcastLogHandler(logging.Handler):
def __init__(self,loggingName,producer):
self.broadcaster = producer
#print "In BroadcastLogHandler 1"
self.level = 0
self.filters = []
self.lock = 0
self.loggingname = loggingName
#self.machine = os.uname()[1]
def emit(self,record):
# Send the log message to the broadcast queue
#print "Who calls Producer.emit: "+sys._getframe(1).f_code.co_name
message = "%s" % (record.msg)
self.broadcaster.sendmsg(self.loggingname,message)
#print "In BroadcastLogHandler 2"
def StreamAndBroadcastHandler(loggingName,producer,level=logging.DEBUG):
"Factory function for a logging.StreamHandler instance connected to your namespace."
logger = logging.getLogger(loggingName)
logger.setLevel(level)
handler_messages = BroadcastLogHandler(loggingName,producer)
logger.addHandler(handler_messages)
#print "In StreamAndBroadcastHandler"
def MQconfig(arguments):
try:
if len(arguments)==7:
#print "got 7 arguments"
if (arguments["emulationMQenable"]=="yes") or (arguments["emulationMQenable"]=="no") :
if HOMEPATH:
conn = sqlite.connect(HOMEPATH+'/data/cocoma.sqlite')
c = conn.cursor()
#print "In producer MQconfig: "+str(arguments.values())
c.execute('DELETE from MQconfig')
c.execute('INSERT INTO MQconfig (enabled,vhost,exchange,user,password,host,topic) VALUES (?, ?, ?, ?, ?, ?, ?)',[arguments["emulationMQenable"],arguments["emulationMQvhost"],arguments["emulationMQexchange"],arguments["emulationMQuser"],arguments["emulationMQpassword"],arguments["emulationMQhost"],arguments["emulationMQtopic"]])
conn.commit()
c.close()
#print "In producer MQconfig 2: "+str(arguments.values())
else:
print "Enabled parameter accepts either 'yes' or 'no'"
else:
print "MQproducer Not all arguments supplied, check help: "+ str(len(arguments))
except sqlite.Error, e:
print "MQconfig(arguments) SQL Error %s:" % e.args[0]
print e
return "<error>str(e)</error>"
sys.exit(1)
def MQconfigDelete():
try:
if HOMEPATH:
conn = sqlite.connect(HOMEPATH+'/data/cocoma.sqlite')
c = conn.cursor()
#print "In producer MQconfig: "+enabled+" "+vhost+" "+exchange+" "+user+" "+password+" "+host+" "+topic
c.execute('DELETE from MQconfig')
conn.commit()
c.close()
except sqlite.Error, e:
print "MQconfigDelete() SQL Error %s:" % e.args[0]
print e
return "<error>str(e)</error>"
sys.exit(1)
def MQconfigEnable(enabled):
try:
if HOMEPATH:
conn = sqlite.connect(HOMEPATH+'/data/cocoma.sqlite')
c = conn.cursor()
#print "In producer MQconfig: "+enabled+" "+vhost+" "+exchange+" "+user+" "+password+" "+host+" "+topic
c.execute('UPDATE MQconfig SET enabled = (?)',[enabled])
conn.commit()
c.close()
except sqlite.Error, e:
print "MQconfigEnable() SQL Error %s:" % e.args[0]
print e
return "<error>str(e)</error>"
sys.exit(1)
def MQconfigLoad():
try:
if HOMEPATH:
conn = sqlite.connect(HOMEPATH+'/data/cocoma.sqlite')
c = conn.cursor()
#print "In producer MQconfig: "+enabled+" "+vhost+" "+exchange+" "+user+" "+password+" "+host+" "+topic
c.execute('SELECT * from MQconfig')
mqconfig = c.fetchall()
#for par in mqconfig:
#for i in range(0,6):
# print "enabled: "+str(par[0])+", vhost: "+par[1]+", exchange: "+par[2]+", user: "+par[3]+", password: "+par[4]+", host: "+par[5]+", topic: "+par[6]
conn.commit()
return mqconfig
c.close()
except sqlite.Error, e:
print "MQconfigLoad() SQL Error %s:" % e.args[0]
print e
return "<error>str(e)</error>"
sys.exit(1)
def MQconfigShow():
mqconfig=MQconfigLoad()
for par in mqconfig:
#for i in range(0,6):
print "enabled: "+str(par[0])+", vhost: "+par[1]+", exchange: "+par[2]+", user: "+par[3]+", password: "+par[4]+", host: "+par[5]+", topic: "+par[6]
|
|
"""The IPython kernel implementation"""
import asyncio
from contextlib import contextmanager
from functools import partial
import getpass
import signal
import sys
from IPython.core import release
from ipython_genutils.py3compat import builtin_mod, PY3, unicode_type, safe_unicode
from IPython.utils.tokenutil import token_at_cursor, line_at_cursor
from tornado import gen
from traitlets import Instance, Type, Any, List, Bool
from .comm import CommManager
from .kernelbase import Kernel as KernelBase
from .zmqshell import ZMQInteractiveShell
from .eventloops import _use_appnope
try:
from IPython.core.interactiveshell import _asyncio_runner
except ImportError:
_asyncio_runner = None
try:
from IPython.core.completer import rectify_completions as _rectify_completions, provisionalcompleter as _provisionalcompleter
_use_experimental_60_completion = True
except ImportError:
_use_experimental_60_completion = False
_EXPERIMENTAL_KEY_NAME = '_jupyter_types_experimental'
class IPythonKernel(KernelBase):
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
shell_class = Type(ZMQInteractiveShell)
use_experimental_completions = Bool(True,
help="Set this flag to False to deactivate the use of experimental IPython completion APIs.",
).tag(config=True)
user_module = Any()
def _user_module_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_module = new
user_ns = Instance(dict, args=None, allow_none=True)
def _user_ns_changed(self, name, old, new):
if self.shell is not None:
self.shell.user_ns = new
self.shell.init_user_ns()
# A reference to the Python builtin 'raw_input' function.
# (i.e., __builtin__.raw_input for Python 2.7, builtins.input for Python 3)
_sys_raw_input = Any()
_sys_eval_input = Any()
def __init__(self, **kwargs):
super(IPythonKernel, self).__init__(**kwargs)
# Initialize the InteractiveShell subclass
self.shell = self.shell_class.instance(parent=self,
profile_dir = self.profile_dir,
user_module = self.user_module,
user_ns = self.user_ns,
kernel = self,
)
self.shell.displayhook.session = self.session
self.shell.displayhook.pub_socket = self.iopub_socket
self.shell.displayhook.topic = self._topic('execute_result')
self.shell.display_pub.session = self.session
self.shell.display_pub.pub_socket = self.iopub_socket
self.comm_manager = CommManager(parent=self, kernel=self)
self.shell.configurables.append(self.comm_manager)
comm_msg_types = [ 'comm_open', 'comm_msg', 'comm_close' ]
for msg_type in comm_msg_types:
self.shell_handlers[msg_type] = getattr(self.comm_manager, msg_type)
if _use_appnope() and self._darwin_app_nap:
# Disable app-nap as the kernel is not a gui but can have guis
import appnope
appnope.nope()
help_links = List([
{
'text': "Python Reference",
'url': "https://docs.python.org/%i.%i" % sys.version_info[:2],
},
{
'text': "IPython Reference",
'url': "https://ipython.org/documentation.html",
},
{
'text': "NumPy Reference",
'url': "https://docs.scipy.org/doc/numpy/reference/",
},
{
'text': "SciPy Reference",
'url': "https://docs.scipy.org/doc/scipy/reference/",
},
{
'text': "Matplotlib Reference",
'url': "https://matplotlib.org/contents.html",
},
{
'text': "SymPy Reference",
'url': "http://docs.sympy.org/latest/index.html",
},
{
'text': "pandas Reference",
'url': "https://pandas.pydata.org/pandas-docs/stable/",
},
]).tag(config=True)
# Kernel info fields
implementation = 'ipython'
implementation_version = release.version
language_info = {
'name': 'python',
'version': sys.version.split()[0],
'mimetype': 'text/x-python',
'codemirror_mode': {
'name': 'ipython',
'version': sys.version_info[0]
},
'pygments_lexer': 'ipython%d' % (3 if PY3 else 2),
'nbconvert_exporter': 'python',
'file_extension': '.py'
}
@property
def banner(self):
return self.shell.banner
def start(self):
self.shell.exit_now = False
super(IPythonKernel, self).start()
def set_parent(self, ident, parent):
"""Overridden from parent to tell the display hook and output streams
about the parent message.
"""
super(IPythonKernel, self).set_parent(ident, parent)
self.shell.set_parent(parent)
def init_metadata(self, parent):
"""Initialize metadata.
Run at the beginning of each execution request.
"""
md = super(IPythonKernel, self).init_metadata(parent)
# FIXME: remove deprecated ipyparallel-specific code
# This is required for ipyparallel < 5.0
md.update({
'dependencies_met' : True,
'engine' : self.ident,
})
return md
def finish_metadata(self, parent, metadata, reply_content):
"""Finish populating metadata.
Run after completing an execution request.
"""
# FIXME: remove deprecated ipyparallel-specific code
# This is required by ipyparallel < 5.0
metadata['status'] = reply_content['status']
if reply_content['status'] == 'error' and reply_content['ename'] == 'UnmetDependency':
metadata['dependencies_met'] = False
return metadata
def _forward_input(self, allow_stdin=False):
"""Forward raw_input and getpass to the current frontend.
via input_request
"""
self._allow_stdin = allow_stdin
if PY3:
self._sys_raw_input = builtin_mod.input
builtin_mod.input = self.raw_input
else:
self._sys_raw_input = builtin_mod.raw_input
self._sys_eval_input = builtin_mod.input
builtin_mod.raw_input = self.raw_input
builtin_mod.input = lambda prompt='': eval(self.raw_input(prompt))
self._save_getpass = getpass.getpass
getpass.getpass = self.getpass
def _restore_input(self):
"""Restore raw_input, getpass"""
if PY3:
builtin_mod.input = self._sys_raw_input
else:
builtin_mod.raw_input = self._sys_raw_input
builtin_mod.input = self._sys_eval_input
getpass.getpass = self._save_getpass
@property
def execution_count(self):
return self.shell.execution_count
@execution_count.setter
def execution_count(self, value):
# Ignore the incrementing done by KernelBase, in favour of our shell's
# execution counter.
pass
@contextmanager
def _cancel_on_sigint(self, future):
"""ContextManager for capturing SIGINT and cancelling a future
SIGINT raises in the event loop when running async code,
but we want it to halt a coroutine.
Ideally, it would raise KeyboardInterrupt,
but this turns it into a CancelledError.
At least it gets a decent traceback to the user.
"""
sigint_future = asyncio.Future()
# whichever future finishes first,
# cancel the other one
def cancel_unless_done(f, _ignored):
if f.cancelled() or f.done():
return
f.cancel()
# when sigint finishes,
# abort the coroutine with CancelledError
sigint_future.add_done_callback(
partial(cancel_unless_done, future)
)
# when the main future finishes,
# stop watching for SIGINT events
future.add_done_callback(
partial(cancel_unless_done, sigint_future)
)
def handle_sigint(*args):
def set_sigint_result():
if sigint_future.cancelled() or sigint_future.done():
return
sigint_future.set_result(1)
# use add_callback for thread safety
self.io_loop.add_callback(set_sigint_result)
# set the custom sigint hander during this context
save_sigint = signal.signal(signal.SIGINT, handle_sigint)
try:
yield
finally:
# restore the previous sigint handler
signal.signal(signal.SIGINT, save_sigint)
@gen.coroutine
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
shell = self.shell # we'll need this a lot here
self._forward_input(allow_stdin)
reply_content = {}
if hasattr(shell, 'run_cell_async') and hasattr(shell, 'should_run_async'):
run_cell = shell.run_cell_async
should_run_async = shell.should_run_async
else:
should_run_async = lambda cell: False
# older IPython,
# use blocking run_cell and wrap it in coroutine
@gen.coroutine
def run_cell(*args, **kwargs):
return shell.run_cell(*args, **kwargs)
try:
# default case: runner is asyncio and asyncio is already running
# TODO: this should check every case for "are we inside the runner",
# not just asyncio
if (
_asyncio_runner
and should_run_async(code)
and shell.loop_runner is _asyncio_runner
and asyncio.get_event_loop().is_running()
):
coro = run_cell(code, store_history=store_history, silent=silent)
coro_future = asyncio.ensure_future(coro)
with self._cancel_on_sigint(coro_future):
res = None
try:
res = yield coro_future
finally:
shell.events.trigger('post_execute')
if not silent:
shell.events.trigger('post_run_cell', res)
else:
# runner isn't already running,
# make synchronous call,
# letting shell dispatch to loop runners
res = shell.run_cell(code, store_history=store_history, silent=silent)
finally:
self._restore_input()
if res.error_before_exec is not None:
err = res.error_before_exec
else:
err = res.error_in_exec
if res.success:
reply_content[u'status'] = u'ok'
else:
reply_content[u'status'] = u'error'
reply_content.update({
u'traceback': shell._last_traceback or [],
u'ename': unicode_type(type(err).__name__),
u'evalue': safe_unicode(err),
})
# FIXME: deprecated piece for ipyparallel (remove in 5.0):
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id,
method='execute')
reply_content['engine_info'] = e_info
# Return the execution counter so clients can display prompts
reply_content['execution_count'] = shell.execution_count - 1
if 'traceback' in reply_content:
self.log.info("Exception in execute request:\n%s", '\n'.join(reply_content['traceback']))
# At this point, we can tell whether the main code execution succeeded
# or not. If it did, we proceed to evaluate user_expressions
if reply_content['status'] == 'ok':
reply_content[u'user_expressions'] = \
shell.user_expressions(user_expressions or {})
else:
# If there was an error, don't even try to compute expressions
reply_content[u'user_expressions'] = {}
# Payloads should be retrieved regardless of outcome, so we can both
# recover partial output (that could have been generated early in a
# block, before an error) and always clear the payload system.
reply_content[u'payload'] = shell.payload_manager.read_payload()
# Be aggressive about clearing the payload because we don't want
# it to sit in memory until the next execute_request comes in.
shell.payload_manager.clear_payload()
return reply_content
def do_complete(self, code, cursor_pos):
if _use_experimental_60_completion and self.use_experimental_completions:
return self._experimental_do_complete(code, cursor_pos)
# FIXME: IPython completers currently assume single line,
# but completion messages give multi-line context
# For now, extract line from cell, based on cursor_pos:
if cursor_pos is None:
cursor_pos = len(code)
line, offset = line_at_cursor(code, cursor_pos)
line_cursor = cursor_pos - offset
txt, matches = self.shell.complete('', line, line_cursor)
return {'matches' : matches,
'cursor_end' : cursor_pos,
'cursor_start' : cursor_pos - len(txt),
'metadata' : {},
'status' : 'ok'}
def _experimental_do_complete(self, code, cursor_pos):
"""
Experimental completions from IPython, using Jedi.
"""
if cursor_pos is None:
cursor_pos = len(code)
with _provisionalcompleter():
raw_completions = self.shell.Completer.completions(code, cursor_pos)
completions = list(_rectify_completions(code, raw_completions))
comps = []
for comp in completions:
comps.append(dict(
start=comp.start,
end=comp.end,
text=comp.text,
type=comp.type,
))
if completions:
s = completions[0].start
e = completions[0].end
matches = [c.text for c in completions]
else:
s = cursor_pos
e = cursor_pos
matches = []
return {'matches': matches,
'cursor_end': e,
'cursor_start': s,
'metadata': {_EXPERIMENTAL_KEY_NAME: comps},
'status': 'ok'}
def do_inspect(self, code, cursor_pos, detail_level=0):
name = token_at_cursor(code, cursor_pos)
reply_content = {'status' : 'ok'}
reply_content['data'] = {}
reply_content['metadata'] = {}
try:
reply_content['data'].update(
self.shell.object_inspect_mime(
name,
detail_level=detail_level
)
)
if not self.shell.enable_html_pager:
reply_content['data'].pop('text/html')
reply_content['found'] = True
except KeyError:
reply_content['found'] = False
return reply_content
def do_history(self, hist_access_type, output, raw, session=0, start=0,
stop=None, n=None, pattern=None, unique=False):
if hist_access_type == 'tail':
hist = self.shell.history_manager.get_tail(n, raw=raw, output=output,
include_latest=True)
elif hist_access_type == 'range':
hist = self.shell.history_manager.get_range(session, start, stop,
raw=raw, output=output)
elif hist_access_type == 'search':
hist = self.shell.history_manager.search(
pattern, raw=raw, output=output, n=n, unique=unique)
else:
hist = []
return {
'status': 'ok',
'history' : list(hist),
}
def do_shutdown(self, restart):
self.shell.exit_now = True
return dict(status='ok', restart=restart)
def do_is_complete(self, code):
transformer_manager = getattr(self.shell, 'input_transformer_manager', None)
if transformer_manager is None:
# input_splitter attribute is deprecated
transformer_manager = self.shell.input_splitter
status, indent_spaces = transformer_manager.check_complete(code)
r = {'status': status}
if status == 'incomplete':
r['indent'] = ' ' * indent_spaces
return r
def do_apply(self, content, bufs, msg_id, reply_metadata):
from .serialize import serialize_object, unpack_apply_message
shell = self.shell
try:
working = shell.user_ns
prefix = "_"+str(msg_id).replace("-","")+"_"
f,args,kwargs = unpack_apply_message(bufs, working, copy=False)
fname = getattr(f, '__name__', 'f')
fname = prefix+"f"
argname = prefix+"args"
kwargname = prefix+"kwargs"
resultname = prefix+"result"
ns = { fname : f, argname : args, kwargname : kwargs , resultname : None }
# print ns
working.update(ns)
code = "%s = %s(*%s,**%s)" % (resultname, fname, argname, kwargname)
try:
exec(code, shell.user_global_ns, shell.user_ns)
result = working.get(resultname)
finally:
for key in ns:
working.pop(key)
result_buf = serialize_object(result,
buffer_threshold=self.session.buffer_threshold,
item_threshold=self.session.item_threshold,
)
except BaseException as e:
# invoke IPython traceback formatting
shell.showtraceback()
reply_content = {
u'traceback': shell._last_traceback or [],
u'ename': unicode_type(type(e).__name__),
u'evalue': safe_unicode(e),
}
# FIXME: deprecated piece for ipyparallel (remove in 5.0):
e_info = dict(engine_uuid=self.ident, engine_id=self.int_id, method='apply')
reply_content['engine_info'] = e_info
self.send_response(self.iopub_socket, u'error', reply_content,
ident=self._topic('error'))
self.log.info("Exception in apply request:\n%s", '\n'.join(reply_content['traceback']))
result_buf = []
reply_content['status'] = 'error'
else:
reply_content = {'status' : 'ok'}
return reply_content, result_buf
def do_clear(self):
self.shell.reset(False)
return dict(status='ok')
# This exists only for backwards compatibility - use IPythonKernel instead
class Kernel(IPythonKernel):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn('Kernel is a deprecated alias of ipykernel.ipkernel.IPythonKernel',
DeprecationWarning)
super(Kernel, self).__init__(*args, **kwargs)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core Fast Attention Module for Flax.
Implementation of the approximate fast softmax and generalized
attention mechanism leveraging structured random feature maps [RFM] techniques
and low rank decomposition of the attention matrix.
"""
# pylint: disable=invalid-name, missing-function-docstring, line-too-long
import abc
from collections.abc import Iterable # pylint: disable=g-importing-member
import functools
from absl import logging
import gin
import jax
from jax import lax
from jax import random
import jax.numpy as jnp
import numpy as onp
# Nonlinear mappings encoding different attention kernels.
gin.external_configurable(jnp.cos, 'jcos')
gin.external_configurable(jnp.sin, 'jsin')
gin.external_configurable(jnp.tanh, 'jtanh')
gin.external_configurable(jax.nn.sigmoid, 'jsigmoid')
gin.external_configurable(
lambda x: jax.nn.gelu(x, approximate=False), 'jgelu'
) # Needs to be exact, although might be slower. See https://github.com/google/jax/issues/4428.
gin.external_configurable(lambda x: x * x * (x > 0.0), 'jrequ')
gin.external_configurable(jnp.exp, 'jexp')
gin.external_configurable(lambda x: x, 'jidentity')
gin.external_configurable(
lambda x: (jnp.exp(x)) * (x <= 0.0) + (x + 1.0) * (x > 0.0), 'jshiftedelu'
) # Nonlinearity used in "Transformers are RNNs: Fast Autoregressive Transformers with Linear Attention" (https://arxiv.org/abs/2006.16236).
def nonnegative_softmax_kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data=True,
eps=0.0001):
"""Constructs nonnegative kernel features for fast softmax attention.
Args:
data: input for which features are computes
projection_matrix: random matrix used to compute features
attention_dims_t: tuple of attention dimensions
batch_dims_t: tuple of batch dimensions
precision: precision parameter
is_query: predicate indicating whether input data corresponds to queries or
keys
normalize_data: predicate indicating whether data should be normalized,
eps: numerical stabilizer.
Returns:
Random features for fast softmax attention.
"""
if normalize_data:
# We have e^{qk^T/sqrt{d}} = e^{q_norm k_norm^T}, where
# w_norm = w * data_normalizer for w in {q,k}.
data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1])))
else:
data_normalizer = 1.0
ratio = 1.0 / jnp.sqrt(projection_matrix.shape[0])
data_mod_shape = data.shape[0:len(batch_dims_t)] + projection_matrix.shape
data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix
data_dash = lax.dot_general(
data_normalizer * data,
data_thick_random_matrix,
(((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)),
(batch_dims_t, batch_dims_t)),
precision=precision)
diag_data = jnp.square(data)
diag_data = jnp.sum(diag_data, axis=data.ndim - 1)
diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer
diag_data = jnp.expand_dims(diag_data, axis=data.ndim - 1)
last_dims_t = (len(data_dash.shape) - 1,)
if is_query:
data_dash = ratio * (
jnp.exp(data_dash - diag_data -
jnp.max(data_dash, axis=last_dims_t, keepdims=True)) + eps)
else:
data_dash = ratio * (
jnp.exp(data_dash - diag_data - jnp.max(
data_dash, axis=last_dims_t + attention_dims_t, keepdims=True)) +
eps)
return data_dash
def sincos_softmax_kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
normalize_data=True):
"""Constructs kernel sin-cos features for fast softmax attention.
Args:
data: input for which features are computes
projection_matrix: random matrix used to compute features
attention_dims_t: tuple of attention dimensions
batch_dims_t: tuple of batch dimensions
precision: precision parameter
normalize_data: predicate indicating whether data should be normalized.
Returns:
Random features for fast softmax attention.
"""
if normalize_data:
# We have: exp(qk^T/sqrt{d}) = exp(|q|^2/2sqrt{d}) * exp(|k|^2/2sqrt{d}) *
# exp(-(|q*c-k*c|^2)/2), where c = 1.0 / sqrt{sqrt{d}}.
data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1])))
else:
data_normalizer = 1.0
ratio = 1.0 / jnp.sqrt(projection_matrix.shape[0])
data_mod_shape = data.shape[0:len(batch_dims_t)] + projection_matrix.shape
data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix
data_dash = lax.dot_general(
data_normalizer * data,
data_thick_random_matrix,
(((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)),
(batch_dims_t, batch_dims_t)),
precision=precision)
data_dash_cos = ratio * jnp.cos(data_dash)
data_dash_sin = ratio * jnp.sin(data_dash)
data_dash = jnp.concatenate((data_dash_cos, data_dash_sin), axis=-1)
# Constructing D_data and data^{'}
diag_data = jnp.square(data)
diag_data = jnp.sum(diag_data, axis=data.ndim - 1)
diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer
diag_data = jnp.expand_dims(diag_data, axis=data.ndim - 1)
# Additional renormalization for numerical stability
data_renormalizer = jnp.max(diag_data, attention_dims_t, keepdims=True)
diag_data -= data_renormalizer
diag_data = jnp.exp(diag_data)
data_prime = data_dash * diag_data
return data_prime
def generalized_kernel_feature_creator(data, projection_matrix, batch_dims_t,
precision, kernel_fn, kernel_epsilon,
normalize_data):
"""Constructs kernel features for fast generalized attention.
Args:
data: input for which features are computes
projection_matrix: matrix used to compute features
batch_dims_t: tuple of batch dimensions
precision: precision parameter
kernel_fn: kernel function used
kernel_epsilon: additive positive term added to every feature for numerical
stability
normalize_data: predicate indicating whether data should be normalized.
Returns:
Random features for fast generalized attention.
"""
if normalize_data:
data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1])))
else:
data_normalizer = 1.0
if projection_matrix is None:
return kernel_fn(data_normalizer * data) + kernel_epsilon
else:
data_mod_shape = data.shape[0:len(batch_dims_t)] + projection_matrix.shape
data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix
data_dash = lax.dot_general(
data_normalizer * data,
data_thick_random_matrix,
(((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)),
(batch_dims_t, batch_dims_t)),
precision=precision)
data_prime = kernel_fn(data_dash) + kernel_epsilon
return data_prime
@gin.configurable
def make_fast_softmax_attention(qkv_dim,
renormalize_attention=True,
numerical_stabilizer=0.000001,
nb_features=256,
ortho_features=True,
ortho_scaling=0.0,
redraw_features=True,
unidirectional=False,
nonnegative_features=True,
lax_scan_unroll=1):
"""Construct a fast softmax attention method."""
logging.info(
'Fast softmax attention: %s features and orthogonal=%s, renormalize=%s',
nb_features, ortho_features, renormalize_attention)
if ortho_features:
matrix_creator = functools.partial(
GaussianOrthogonalRandomMatrix,
nb_features,
qkv_dim,
scaling=ortho_scaling)
else:
matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix,
nb_features, qkv_dim)
if nonnegative_features:
def kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data=True):
return nonnegative_softmax_kernel_feature_creator(
data, projection_matrix, attention_dims_t, batch_dims_t, precision,
is_query, normalize_data, numerical_stabilizer)
else:
def kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data=True):
del is_query
return sincos_softmax_kernel_feature_creator(data, projection_matrix,
attention_dims_t,
batch_dims_t, precision,
normalize_data)
attention_fn = FastAttentionviaLowRankDecomposition(
matrix_creator,
kernel_feature_creator,
renormalize_attention=renormalize_attention,
numerical_stabilizer=numerical_stabilizer,
redraw_features=redraw_features,
unidirectional=unidirectional,
lax_scan_unroll=lax_scan_unroll).dot_product_attention
return attention_fn
@gin.configurable
def make_fast_generalized_attention(qkv_dim,
renormalize_attention=True,
numerical_stabilizer=0.0,
nb_features=256,
features_type='deterministic',
kernel_fn=jax.nn.relu,
kernel_epsilon=0.001,
redraw_features=False,
unidirectional=False,
lax_scan_unroll=1):
"""Construct a fast generalized attention menthod."""
logging.info('Fast generalized attention.: %s features and renormalize=%s',
nb_features, renormalize_attention)
if features_type == 'ortho':
matrix_creator = functools.partial(
GaussianOrthogonalRandomMatrix, nb_features, qkv_dim, scaling=False)
elif features_type == 'iid':
matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix,
nb_features, qkv_dim)
elif features_type == 'deterministic':
matrix_creator = None
else:
raise ValueError('Unknown feature value type')
def kernel_feature_creator(data,
projection_matrix,
attention_dims_t,
batch_dims_t,
precision,
is_query,
normalize_data=False):
del attention_dims_t
del is_query
return generalized_kernel_feature_creator(data, projection_matrix,
batch_dims_t, precision,
kernel_fn, kernel_epsilon,
normalize_data)
attention_fn = FastAttentionviaLowRankDecomposition(
matrix_creator,
kernel_feature_creator,
renormalize_attention=renormalize_attention,
numerical_stabilizer=numerical_stabilizer,
redraw_features=redraw_features,
unidirectional=unidirectional,
lax_scan_unroll=lax_scan_unroll).dot_product_attention
return attention_fn
class RandomMatrix(object):
r"""Abstract class providing a method for constructing 2D random arrays.
Class is responsible for constructing 2D random arrays.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_2d_array(self):
raise NotImplementedError('Abstract method')
class GaussianUnstructuredRandomMatrix(RandomMatrix):
def __init__(self, nb_rows, nb_columns, key):
self.nb_rows = nb_rows
self.nb_columns = nb_columns
self.key = key
def get_2d_array(self):
return random.normal(self.key, (self.nb_rows, self.nb_columns))
class GaussianOrthogonalRandomMatrix(RandomMatrix):
r"""Class providing a method to create Gaussian orthogonal matrix.
Class is responsible for constructing 2D Gaussian orthogonal arrays.
"""
def __init__(self, nb_rows, nb_columns, key, scaling=0):
self.nb_rows = nb_rows
self.nb_columns = nb_columns
self.key = key
self.scaling = scaling
def get_2d_array(self):
nb_full_blocks = int(self.nb_rows / self.nb_columns)
block_list = []
rng = self.key
for _ in range(nb_full_blocks):
rng, rng_input = jax.random.split(rng)
unstructured_block = random.normal(rng_input,
(self.nb_columns, self.nb_columns))
q, _ = jnp.linalg.qr(unstructured_block)
q = jnp.transpose(q)
block_list.append(q)
remaining_rows = self.nb_rows - nb_full_blocks * self.nb_columns
if remaining_rows > 0:
rng, rng_input = jax.random.split(rng)
unstructured_block = random.normal(rng_input,
(self.nb_columns, self.nb_columns))
q, _ = jnp.linalg.qr(unstructured_block)
q = jnp.transpose(q)
block_list.append(q[0:remaining_rows])
final_matrix = jnp.vstack(block_list)
if self.scaling == 0:
multiplier = jnp.linalg.norm(
random.normal(self.key, (self.nb_rows, self.nb_columns)), axis=1)
elif self.scaling == 1:
multiplier = jnp.sqrt(float(self.nb_columns)) * jnp.ones((self.nb_rows))
else:
raise ValueError('Scaling must be one of {0, 1}. Was %s' % self._scaling)
return jnp.matmul(jnp.diag(multiplier), final_matrix)
class FastAttention(object):
r"""Abstract class providing a method for fast attention.
Class is responsible for providing a method <dot_product_attention> for fast
approximate attention.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def dot_product_attention(self,
query,
key,
value,
dtype=jnp.float32,
bias=None,
mask=None,
axis=None,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.,
deterministic=False,
precision=None):
"""Computes dot-product attention given query, key, and value.
This is the core function for applying fast approximate dot-product
attention. It calculates the attention weights given query and key and
combines the values using the attention weights. This function supports
multi-dimensional inputs.
Args:
query: queries for calculating attention with shape of [batch_size, dim1,
dim2, ..., dimN, num_heads, mem_channels].
key: keys for calculating attention with shape of [batch_size, dim1, dim2,
..., dimN, num_heads, mem_channels].
value: values to be used in attention with shape of [batch_size, dim1,
dim2,..., dimN, num_heads, value_channels].
dtype: the dtype of the computation (default: float32)
bias: bias for the attention weights. This can be used for incorporating
autoregressive mask, padding mask, proximity bias.
mask: mask for the attention weights. This can be used for incorporating
autoregressive masks.
axis: axises over which the attention is applied.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout.
dropout_rate: dropout rate.
deterministic: bool, deterministic or not (to apply dropout).
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
Returns:
Output of shape [bs, dim1, dim2, ..., dimN,, num_heads, value_channels].
"""
raise NotImplementedError('Abstract method')
def _numerator(z_slice_shape, precision, unroll=1):
def fwd(qs, ks, vs):
def body(p, qkv):
(q, k, v) = qkv
p += jnp.einsum('...m,...d->...md', k, v, precision=precision)
X_slice = jnp.einsum('...m,...md->...d', q, p, precision=precision)
return p, X_slice
init_value = jnp.zeros(z_slice_shape)
p, W = lax.scan(body, init_value, (qs, ks, vs), unroll=unroll)
return W, (p, qs, ks, vs)
def bwd(pqkv, W_ct):
def body(carry, qkv_xct):
p, p_ct = carry
q, k, v, x_ct = qkv_xct
q_ct = jnp.einsum('...d,...md->...m', x_ct, p, precision=precision)
p_ct += jnp.einsum('...d,...m->...md', x_ct, q, precision=precision)
k_ct = jnp.einsum('...md,...d->...m', p_ct, v, precision=precision)
v_ct = jnp.einsum('...md,...m->...d', p_ct, k, precision=precision)
p -= jnp.einsum('...m,...d->...md', k, v, precision=precision)
return (p, p_ct), (q_ct, k_ct, v_ct)
p, qs, ks, vs = pqkv
_, (qs_ct, ks_ct, vs_ct) = lax.scan(
body, (p, jnp.zeros_like(p)), (qs, ks, vs, W_ct),
reverse=True,
unroll=unroll)
return qs_ct, ks_ct, vs_ct
@jax.custom_vjp
def _numerator_impl(qs, ks, vs):
W, _ = fwd(qs, ks, vs)
return W
_numerator_impl.defvjp(fwd, bwd)
return _numerator_impl
def _denominator(t_slice_shape, precision, unroll=1):
def fwd(qs, ks):
def body(p, qk):
q, k = qk
p += k
x = jnp.einsum('...m,...m->...', q, p, precision=precision)
return p, x
p = jnp.zeros(t_slice_shape)
p, R = lax.scan(body, p, (qs, ks), unroll=unroll)
return R, (qs, ks, p)
def bwd(qkp, R_ct):
def body(carry, qkx):
p, p_ct = carry
q, k, x_ct = qkx
q_ct = jnp.einsum('...,...m->...m', x_ct, p, precision=precision)
p_ct += jnp.einsum('...,...m->...m', x_ct, q, precision=precision)
k_ct = p_ct
p -= k
return (p, p_ct), (q_ct, k_ct)
qs, ks, p = qkp
_, (qs_ct, ks_ct) = lax.scan(
body, (p, jnp.zeros_like(p)), (qs, ks, R_ct),
reverse=True,
unroll=unroll)
return (qs_ct, ks_ct)
@jax.custom_vjp
def _denominator_impl(qs, ks):
R, _ = fwd(qs, ks)
return R
_denominator_impl.defvjp(fwd, bwd)
return _denominator_impl
class FastAttentionviaLowRankDecomposition(FastAttention):
r"""Class providing a method for fast attention via low rank decomposition.
Class is responsible for providing a method <dot_product_attention> for fast
dot-product attention with the use of low rank decomposition (e.g. with
random feature maps).
"""
def __init__(self,
matrix_creator,
kernel_feature_creator,
renormalize_attention,
numerical_stabilizer,
redraw_features,
unidirectional,
lax_scan_unroll=1): # For optimal GPU performance, set to 16.
rng = random.PRNGKey(0)
self.matrix_creator = matrix_creator
self.projection_matrix = self.draw_weights(rng)
self.kernel_feature_creator = kernel_feature_creator
self.renormalize_attention = renormalize_attention
self.numerical_stabilizer = numerical_stabilizer
self.redraw_features = redraw_features
self.unidirectional = unidirectional
self.lax_scan_unroll = lax_scan_unroll
def draw_weights(self, key):
if self.matrix_creator is None:
return None
matrixrng, _ = random.split(key)
projection_matrix = self.matrix_creator(key=matrixrng).get_2d_array()
return projection_matrix
def dot_product_attention(self,
query,
key,
value,
dtype=jnp.float32,
bias=None,
mask=None,
axis=None,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.,
deterministic=False,
precision=None):
assert key.shape[:-1] == value.shape[:-1]
assert (query.shape[0:1] == key.shape[0:1] and
query.shape[-1] == key.shape[-1])
if axis is None:
axis = tuple(range(1, key.ndim - 2))
if not isinstance(axis, Iterable):
axis = (axis,)
assert key.ndim == query.ndim
assert key.ndim == value.ndim
for ax in axis:
if not (query.ndim >= 3 and 1 <= ax < query.ndim - 2):
raise ValueError('Attention axis must be between the batch '
'axis and the last-two axes.')
n = key.ndim
# Constructing projection tensor.
if self.redraw_features:
# TODO(kchoro): Get rid of the constant below.
query_seed = lax.convert_element_type(
jnp.ceil(jnp.sum(query) * 10000000.0), jnp.int32)
rng = random.PRNGKey(query_seed)
self.projection_matrix = self.draw_weights(rng)
# batch_dims is <bs, <non-attention dims>, num_heads>
batch_dims = tuple(onp.delete(range(n), axis + (n - 1,)))
# q & k -> (bs, <non-attention dims>, num_heads, <attention dims>, channels)
qk_perm = batch_dims + axis + (n - 1,)
k_extra_perm = axis + batch_dims + (n - 1,)
key_extra = key.transpose(k_extra_perm)
key = key.transpose(qk_perm)
query = query.transpose(qk_perm)
# v -> (bs, <non-attention dims>, num_heads, <attention dims>, channels)
v_perm = batch_dims + axis + (n - 1,)
value = value.transpose(v_perm)
batch_dims_t = tuple(range(len(batch_dims)))
attention_dims_t = tuple(
range(len(batch_dims),
len(batch_dims) + len(axis)))
# Constructing tensors Q^{'} and K^{'}.
query_prime = self.kernel_feature_creator(query, self.projection_matrix,
attention_dims_t, batch_dims_t,
precision, True)
key_prime = self.kernel_feature_creator(key, self.projection_matrix,
attention_dims_t, batch_dims_t,
precision, False)
if self.unidirectional:
index = attention_dims_t[0]
z_slice_shape = key_prime.shape[0:len(batch_dims_t)] + (
key_prime.shape[-1],) + (value.shape[-1],)
numerator_fn = _numerator(z_slice_shape, precision, self.lax_scan_unroll)
W = numerator_fn(
jnp.moveaxis(query_prime, index, 0),
jnp.moveaxis(key_prime, index, 0), jnp.moveaxis(value, index, 0))
# Constructing W = (Q^{'}(K^{'})^{T})_{masked}V
W = jnp.moveaxis(W, 0, index)
if not self.renormalize_attention:
# Unidirectional, not-normalized attention.
perm_inv = _invert_perm(qk_perm)
result = W.transpose(perm_inv)
return result
else:
# Unidirectional, normalized attention.
thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(
key_extra.shape[0:len(axis)])
index = attention_dims_t[0]
t_slice_shape = key_prime.shape[0:len(batch_dims_t)] + (
key_prime.shape[-1],)
denominator_fn = _denominator(t_slice_shape, precision,
self.lax_scan_unroll)
R = denominator_fn(
jnp.moveaxis(query_prime, index, 0),
jnp.moveaxis(key_prime, index, 0))
R = jnp.moveaxis(R, 0, index)
else:
contract_query = tuple(
range(len(batch_dims) + len(axis),
len(batch_dims) + len(axis) + 1))
contract_z = tuple(range(len(batch_dims), len(batch_dims) + 1))
# Constructing Z = (K^{'})^{T}V
# Z (bs, <non-attention dims>, num_heads, channels_m, channels_v)
Z = lax.dot_general(
key_prime,
value,
((attention_dims_t, attention_dims_t), (batch_dims_t, batch_dims_t)),
precision=precision)
# Constructing W = Q^{'}Z = Q^{'}(K^{'})^{T}V
# q (bs, <non-attention dims>, num_heads, <attention dims>, channels_m)
# Z (bs, <non-attention dims>, num_heads, channels_m, channels_v)
# W (bs, <non-attention dims>, num_heads, <attention dims>, channels_v)
W = lax.dot_general(
query_prime,
Z, ((contract_query, contract_z), (batch_dims_t, batch_dims_t)),
precision=precision)
if not self.renormalize_attention:
# Bidirectional, not-normalized attention.
perm_inv = _invert_perm(qk_perm)
result = W.transpose(perm_inv)
return result
else:
# Bidirectional, normalized attention.
thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(
key_extra.shape[0:len(axis)])
contract_key = tuple(
range(len(batch_dims),
len(batch_dims) + len(axis)))
contract_thick_all_ones = tuple(
range(thick_all_ones.ndim - len(axis), thick_all_ones.ndim))
# Construct T = (K^{'})^{T} 1_L
# k (bs, <non-attention dims>, num_heads, <attention dims>, channels)
T = lax.dot_general(
key_prime,
thick_all_ones, ((contract_key, contract_thick_all_ones),
(batch_dims_t, batch_dims_t)),
precision=precision)
# Construct partition function: R = Q^{'} T = Q^{'}(K^{'})^{T} 1_L
# q_p (bs, <non-attention dims>, num_heads, <attention dims>, channs_m)
# T (bs, <non-attention dims>, num_heads, channels_m)
R = lax.dot_general(
query_prime,
T, (((query_prime.ndim - 1,), (T.ndim - 1,)),
(batch_dims_t, range(0,
len(T.shape) - 1))),
precision=precision)
R = R + 2 * self.numerical_stabilizer * (
jnp.abs(R) <= self.numerical_stabilizer)
R = jnp.reciprocal(R)
R = jnp.expand_dims(R, len(R.shape))
# W (bs, <non-attention dims>, num_heads, <attention dims>, channels_v)
# R (bs, <non-attention dims>, num_heads, <attention dims>, extra_channel)
result = W * R
# back to (bs, dim1, dim2, ..., dimN, num_heads, channels)
perm_inv = _invert_perm(qk_perm)
result = result.transpose(perm_inv)
return result
def _invert_perm(perm):
perm_inv = [0] * len(perm)
for i, j in enumerate(perm):
perm_inv[j] = i
return tuple(perm_inv)
|
|
# -*- coding: utf-8 -*-
"""Pure python implementation of the binary Tokyo Tyrant 1.1.17 protocol
Tokyo Cabinet <http://tokyocabinet.sourceforge.net/> is a "super hyper ultra
database manager" written and maintained by Mikio Hirabayashi and released
under the LGPL.
Tokyo Tyrant is the de facto database server for Tokyo Cabinet written and
maintained by the same author. It supports a REST HTTP protocol, memcached,
and its own simple binary protocol. This library implements the full binary
protocol for the Tokyo Tyrant 1.1.17 in pure Python as defined here::
http://tokyocabinet.sourceforge.net/tyrantdoc/
Typical usage is with the PyTyrant class which provides a dict-like wrapper
for the raw Tyrant protocol::
>>> import pytyrant
>>> t = pytyrant.PyTyrant.open('127.0.0.1', 1978)
>>> t['__test_key__'] = 'foo'
>>> t.concat('__test_key__', 'bar')
>>> print t['__test_key__']
foobar
>>> del t['__test_key__']
Or even:
>>> from pytyrant import open_tyrant
>>> t = open_tyrant() # using default settings; you can specify other
>>> t.__class__
<class 'pytyrant.PyTableTyrant'>
>>> t.search.filter(name__in=['John','Mary'])
['john_doe', 'mary_doh']
(In the latter case the server reported that its database type is TDB, so
the extended version of PyTyrant was automatically chosen.)
"""
import itertools
import math
import socket
import struct
import UserDict
__all__ = [
'Tyrant', 'TyrantError', 'PyTyrant',
'RDBMONOULOG', 'RDBXOLCKREC', 'RDBXOLCKGLB',
]
class TyrantError(Exception):
pass
DEFAULT_PORT = 1978
MAGIC = 0xc8
RDBMONOULOG = 1 << 0
RDBXOLCKREC = 1 << 0
RDBXOLCKGLB = 1 << 1
RDBQOSTRASC, RDBQOSTRDESC, RDBQONUMASC, RDBQONUMDESC = range(4)
# Enumeration for index types (from tcrdb.h, tctdb.h)
RDBITLEXICAL = TDBITLEXICAL = 0 # Lexical string
RDBITDECIMAL = TDBITDECIMAL = 1 # Decimal string
RDBITOPT = TDBITOPT = 9998, # Optimize
RDBITVOID = TDBITVOID = 9999, # Void
RDBITKEEP = TDBITKEEP = 1 << 24 # Keep existing index
class C(object):
"""
Tyrant Protocol constants
"""
put = 0x10
putkeep = 0x11
putcat = 0x12
putshl = 0x13
putnr = 0x18
out = 0x20
get = 0x30
mget = 0x31
vsiz = 0x38
iterinit = 0x50
iternext = 0x51
fwmkeys = 0x58
addint = 0x60
adddouble = 0x61
ext = 0x68
sync = 0x70
vanish = 0x72
copy = 0x73
restore = 0x74
setmst = 0x78
rnum = 0x80
size = 0x81
stat = 0x88
misc = 0x90
QUERY_OPERATIONS = {
# operations on strings
'str': {
None: '0', # streq - string is equal to
'is': '0', # streq - string is equal to
'contains': '1', # strinc - string is included in
'startswith': '2', # strbw - string begins with
'endswith': '3', # strew - string ends with
'regex': '7', # strrx - string matches regular expression of
'iregex': '7', # strrx - string matches regular expression of (case-insensitive) XXX must prepend value with an asterisk
},
# operations on numbers
'num': {
None: '8', # numeq - number is equal to
'is': '8', # numeq - number is equal to
'gt': '9', # numgt - number is greater than
'gte': '10', # numge - number is greater than or equal to
'lt': '11', # numlt - number is less than
'lte': '12', # numle - number is less than or equal to
},
# operations on lists of numbers
'list_num': {
'in': '14', # numoreq - number is equal to at least one token in
'between': '13', # numbt - number is between two tokens of
},
# operations on lists of strings (or mixed)
'list_str': {
None: '4', # strand - string includes all tokens in
'is': '4', # strand - string includes all tokens in
'any': '5', # stror - string includes at least one token in
'in': '6', # stroreq - string is equal to at least one token in
}
}
def _t0(code):
return [chr(MAGIC) + chr(code)]
def _t1(code, key):
return [
struct.pack('>BBI', MAGIC, code, len(key)),
key,
]
def _t1FN(code, func, opts, args):
outlst = [
struct.pack('>BBIII', MAGIC, code, len(func), opts, len(args)),
func,
]
for k in args:
outlst.extend([struct.pack('>I', len(k)), k])
return outlst
def _t1R(code, key, msec):
return [
struct.pack('>BBIQ', MAGIC, code, len(key), msec),
key,
]
def _t1M(code, key, count):
return [
struct.pack('>BBII', MAGIC, code, len(key), count),
key,
]
def _tN(code, klst):
outlst = [struct.pack('>BBI', MAGIC, code, len(klst))]
for k in klst:
outlst.extend([struct.pack('>I', len(k)), k])
return outlst
def _t2(code, key, value):
return [
struct.pack('>BBII', MAGIC, code, len(key), len(value)),
key,
value,
]
def _t2W(code, key, value, width):
return [
struct.pack('>BBIII', MAGIC, code, len(key), len(value), width),
key,
value,
]
def _t3F(code, func, opts, key, value):
return [
struct.pack('>BBIIII', MAGIC, code, len(func), opts, len(key), len(value)),
func,
key,
value,
]
def _tDouble(code, key, integ, fract):
return [
struct.pack('>BBIQQ', MAGIC, code, len(key), integ, fract),
key,
]
def socksend(sock, lst):
sock.sendall(''.join(lst))
def sockrecv(sock, bytes):
d = ''
while len(d) < bytes:
d += sock.recv(min(8192, bytes - len(d)))
return d
def socksuccess(sock):
fail_code = ord(sockrecv(sock, 1))
if fail_code:
raise TyrantError(fail_code)
def socklen(sock):
return struct.unpack('>I', sockrecv(sock, 4))[0]
def socklong(sock):
return struct.unpack('>Q', sockrecv(sock, 8))[0]
def sockstr(sock):
return sockrecv(sock, socklen(sock))
def sockdouble(sock):
intpart, fracpart = struct.unpack('>QQ', sockrecv(sock, 16))
return intpart + (fracpart * 1e-12)
def sockstrpair(sock):
klen = socklen(sock)
vlen = socklen(sock)
k = sockrecv(sock, klen)
v = sockrecv(sock, vlen)
return k, v
def dict_to_list(dct):
return list(itertools.chain(*dct.iteritems()))
def list_to_dict(lst):
if not isinstance(lst, (list, tuple)):
lst = list(lst)
return dict((lst[i], lst[i + 1]) for i in xrange(0, len(lst), 2))
def get_tyrant_stats(tyrant):
return dict(l.split('\t', 1) for l in tyrant.stat().splitlines() if l)
def open_tyrant(*args, **kw):
"Opens connection and returns an appropriate PyTyrant class."
t = Tyrant.open(*args, **kw)
if get_tyrant_stats(t).get('type') == 'table':
return PyTableTyrant(t)
else:
return PyTyrant(t)
class PyTyrant(object, UserDict.DictMixin):
"""
Dict-like proxy for a Tyrant instance
"""
@classmethod
def open(cls, *args, **kw):
return cls(Tyrant.open(*args, **kw))
def __init__(self, t):
self.t = t
def __repr__(self):
# The __repr__ for UserDict.DictMixin isn't desirable
# for a large KV store :)
return object.__repr__(self)
def has_key(self, key):
return key in self
def __contains__(self, key):
try:
self.t.vsiz(key)
except TyrantError:
return False
else:
return True
def setdefault(self, key, value):
try:
self.t.putkeep(key, value)
except TyrantError:
return self[key]
return value
def __setitem__(self, key, value):
self.t.put(key, value)
def __getitem__(self, key):
try:
return self.t.get(key)
except TyrantError:
raise KeyError(key)
def __delitem__(self, key):
try:
self.t.out(key)
except TyrantError:
raise KeyError(key)
def __iter__(self):
return self.iterkeys()
def iterkeys(self):
self.t.iterinit()
try:
while True:
yield self.t.iternext()
except TyrantError:
pass
def keys(self):
return list(self.iterkeys())
def __len__(self):
return self.t.rnum()
def clear(self):
"""
>>> t = PyTyrant.open('127.0.0.1', 1978)
>>> t['delete_me'] = 'not to be seen'
>>> t['delete_me_2'] = 'not to be seen'
>>> 'delete_me' in t and 'delete_me_2' in t
True
>>> t.clear()
>>> 'delete_me' in t or 'delete_me_2' in t
False
"""
self.t.vanish()
def update(self, other=None, **kwargs):
# Make progressively weaker assumptions about "other"
if other is None:
pass
elif hasattr(other, 'iteritems'):
self.multi_set(other.iteritems())
elif hasattr(other, 'keys'):
self.multi_set([(k, other[k]) for k in other.keys()])
else:
self.multi_set(other)
if kwargs:
self.update(kwargs)
def multi_del(self, keys, no_update_log=False):
opts = (no_update_log and RDBMONOULOG or 0)
if not isinstance(keys, (list, tuple)):
keys = list(keys)
self.t.misc("outlist", opts, keys)
def multi_get(self, keys, no_update_log=False):
opts = (no_update_log and RDBMONOULOG or 0)
if not isinstance(keys, (list, tuple)):
keys = list(keys)
rval = self.t.misc("getlist", opts, keys)
if len(rval) <= len(keys):
# 1.1.10 protocol, may return invalid results
if len(rval) < len(keys):
raise KeyError("Missing a result, unusable response in 1.1.10")
return rval
# 1.1.11 protocol returns interleaved key, value list
d = list_to_dict(rval)
return map(d.get, keys)
def multi_set(self, items, no_update_log=False):
opts = (no_update_log and RDBMONOULOG or 0)
lst = []
for k, v in items:
lst.extend((k, v))
self.t.misc("putlist", opts, lst)
def call_func(self, func, key, value, record_locking=False, global_locking=False):
opts = (
(record_locking and RDBXOLCKREC or 0) |
(global_locking and RDBXOLCKGLB or 0))
return self.t.ext(func, opts, key, value)
def get_size(self, key):
try:
return self.t.vsiz(key)
except TyrantError:
raise KeyError(key)
def get_stats(self):
return get_tyrant_stats(self.t)
def prefix_keys(self, prefix, maxkeys=None):
if maxkeys is None:
maxkeys = len(self)
return self.t.fwmkeys(prefix, maxkeys)
def concat(self, key, value, width=None):
if width is None:
self.t.putcat(key, value)
else:
self.t.putshl(key, value, width)
def sync(self):
self.t.sync()
def close(self):
self.t.close()
class Query(object):
def __init__(self, ptt):
self.ptt = ptt
self.conditions = []
self._result_cache = None
def __iter__(self):
return iter(self._get_results())
def __len__(self):
return len(self._get_results())
def __repr__(self):
return repr(list(self))
def __getitem__(self, k):
if not isinstance(k, (slice, int, long)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache:
try:
return self._result_cache[k]
except IndexError:
# Same behavior as querying the DB if the key doesn't exist
if not isinstance(k, slice):
return None
raise
if isinstance(k, slice):
if k.stop is not None:
limit = k.stop - (k.start or 0)
else:
limit = -1
condition = '\x00'.join(('setlimit', str(limit), str(k.start or 0)))
resp = self.ptt.t.misc('search', 0, self.conditions + [condition])
return k.step and list(resp)[::k.step] or resp
condition = '\x00'.join(('setlimit', str(1), str(k)))
resp = self.ptt.t.misc('search', 0, self.conditions + [condition])
if not resp:
return None
else:
return resp[0]
def _determine_operation(self, lookup, value):
""" Returns operation code or raises KeyError.
Determines appropriate operation by lookup and value.
"""
# number
if isinstance(value, (int, float)):
return QUERY_OPERATIONS['num'][lookup]
# string
if isinstance(value, basestring):
return QUERY_OPERATIONS['str'][lookup]
# list...
if hasattr(value, '__iter__'):
# ...of numbers
if lookup in QUERY_OPERATIONS['list_num']:
if len(value) and isinstance(list(value)[0], (int, float)):
return QUERY_OPERATIONS['list_num'][lookup]
# ...of strings
return QUERY_OPERATIONS['list_str'][lookup]
raise KeyError
def filter(self, **query):
q = self._clone()
for key, value in query.iteritems():
# resolve operation
if '__' in key:
try:
field, lookup = key.split('__')
except ValueError:
raise ValueError("Filter arguments should be of the form "
"`field__operation`")
else:
field, lookup = key, None
try:
opcode = self._determine_operation(lookup, value)
except KeyError:
raise ValueError('"%s" is not a valid lookup for value %s'
% (lookup, value))
# prepare value
if isinstance(value, (int,float)):
# coerce number to string
value = str(value)
if lookup == 'iregex':
# add asterisk for case-insensitive regular expression
value = '*%s' % value
if not isinstance(value, basestring) and hasattr(value, '__iter__'):
# Value is a list. Make it a comma separated string.
value = ','.join(str(x) for x in value)
condition = '\x00'.join(["addcond", field, opcode, value])
q.conditions.append(condition)
return q
def items(self):
return self.ptt.multi_get(list(self))
def order_by_num(self, field):
q = self._clone()
if field.startswith('-'):
direction = RDBQONUMDESC
field = field[1:]
else:
direction = RDBQONUMASC
condition = '\x00'.join(["setorder", field, str(direction)])
q.conditions.append(condition)
return q
def order_by_str(self, field):
q = self._clone()
if field.startswith('-'):
direction = RDBQOSTRDESC
field = field[1:]
else:
direction = RDBQOSTRASC
condition = '\x00'.join(["setorder", field, str(direction)])
q.conditions.append(condition)
return q
def _clone(self, klass=None, **kwargs):
if klass is None:
klass = self.__class__
q = klass(self.ptt)
q.conditions = self.conditions[:]
q.__dict__.update(kwargs)
return q
def _get_results(self):
if self._result_cache is None:
self._result_cache = self.ptt.t.misc('search', 0, self.conditions)
return self._result_cache
class PyTableTyrant(PyTyrant):
"""
Dict-like proxy for a Table-based Tyrant instance
"""
def setdefault(self, key, value, no_update_log=False):
opts = (no_update_log and RDBMONOULOG or 0)
try:
self.t.misc('putkeep', opts, [key] + dict_to_list(value))
except TyrantError:
return self[key]
return value
def __setitem__(self, key, value):
self.t.misc('put', 0, [key] + dict_to_list(value))
def __getitem__(self, key):
try:
return list_to_dict(self.t.misc('get', 0, (key,)))
except TyrantError:
raise KeyError(key)
def multi_get(self, keys, no_update_log=False):
opts = (no_update_log and RDBMONOULOG or 0)
if not isinstance(keys, (list, tuple)):
keys = list(keys)
rval = self.t.misc("getlist", opts, keys)
if len(rval) <= len(keys):
# 1.1.10 protocol, may return invalid results
if len(rval) < len(keys):
raise KeyError("Missing a result, unusable response in 1.1.10")
return list_to_dict(rval.split('\x00'))
# 1.1.11 protocol returns interleaved key, value list
d = dict((rval[i], rval[i + 1]) for i in xrange(0, len(rval), 2))
return [list_to_dict(d.get(i).split('\x00')) for i in keys]
def multi_set(self, items, no_update_log=False):
opts = (no_update_log and RDBMONOULOG or 0)
lst = []
for k, v in items:
lst.extend((k, '\x00'.join(dict_to_list(v))))
self.t.misc("putlist", opts, lst)
def concat(self, key, value, width=None, no_update_log=False):
opts = (no_update_log and RDBMONOULOG or 0)
if width is None:
self.t.misc('putcat', opts, ([key] + dict_to_list(value)))
else:
raise ValueError('Cannot concat with a width on a table database')
def _search(self):
return Query(self)
search = property(_search)
def setindex(self, column, index_type=RDBITLEXICAL, no_update_log=False):
"""Create or modify secondary column index."""
opts = (no_update_log and RDBMONOULOG or 0)
self.t.misc("setindex", opts, (column, str(index_type)))
class Tyrant(object):
@classmethod
def open(cls, host='127.0.0.1', port=DEFAULT_PORT):
sock = socket.socket()
sock.connect((host, port))
sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
return cls(sock)
def __init__(self, sock):
self.sock = sock
def close(self):
self.sock.close()
def put(self, key, value):
"""Unconditionally set key to value
"""
socksend(self.sock, _t2(C.put, key, value))
socksuccess(self.sock)
def putkeep(self, key, value):
"""Set key to value if key does not already exist
"""
socksend(self.sock, _t2(C.putkeep, key, value))
socksuccess(self.sock)
def putcat(self, key, value):
"""Append value to the existing value for key, or set key to
value if it does not already exist
"""
socksend(self.sock, _t2(C.putcat, key, value))
socksuccess(self.sock)
def putshl(self, key, value, width):
"""Equivalent to::
self.putcat(key, value)
self.put(key, self.get(key)[-width:])
"""
socksend(self.sock, _t2W(C.putshl, key, value, width))
socksuccess(self.sock)
def putnr(self, key, value):
"""Set key to value without waiting for a server response
"""
socksend(self.sock, _t2(C.putnr, key, value))
def out(self, key):
"""Remove key from server
"""
socksend(self.sock, _t1(C.out, key))
socksuccess(self.sock)
def get(self, key):
"""Get the value of a key from the server
"""
socksend(self.sock, _t1(C.get, key))
socksuccess(self.sock)
return sockstr(self.sock)
def _mget(self, klst):
socksend(self.sock, _tN(C.mget, klst))
socksuccess(self.sock)
numrecs = socklen(self.sock)
for i in xrange(numrecs):
k, v = sockstrpair(self.sock)
yield k, v
def mget(self, klst):
"""Get key,value pairs from the server for the given list of keys
"""
return list(self._mget(klst))
def vsiz(self, key):
"""Get the size of a value for key
"""
socksend(self.sock, _t1(C.vsiz, key))
socksuccess(self.sock)
return socklen(self.sock)
def iterinit(self):
"""Begin iteration over all keys of the database
"""
socksend(self.sock, _t0(C.iterinit))
socksuccess(self.sock)
def iternext(self):
"""Get the next key after iterinit
"""
socksend(self.sock, _t0(C.iternext))
socksuccess(self.sock)
return sockstr(self.sock)
def _fwmkeys(self, prefix, maxkeys):
socksend(self.sock, _t1M(C.fwmkeys, prefix, maxkeys))
socksuccess(self.sock)
numkeys = socklen(self.sock)
for i in xrange(numkeys):
yield sockstr(self.sock)
def fwmkeys(self, prefix, maxkeys):
"""Get up to the first maxkeys starting with prefix
"""
return list(self._fwmkeys(prefix, maxkeys))
def addint(self, key, num):
socksend(self.sock, _t1M(C.addint, key, num))
socksuccess(self.sock)
return socklen(self.sock)
def adddouble(self, key, num):
fracpart, intpart = math.modf(num)
fracpart, intpart = int(fracpart * 1e12), int(intpart)
socksend(self.sock, _tDouble(C.adddouble, key, fracpart, intpart))
socksuccess(self.sock)
return sockdouble(self.sock)
def ext(self, func, opts, key, value):
# tcrdbext opts are RDBXOLCKREC, RDBXOLCKGLB
"""Call func(key, value) with opts
opts is a bitflag that can be RDBXOLCKREC for record locking
and/or RDBXOLCKGLB for global locking"""
socksend(self.sock, _t3F(C.ext, func, opts, key, value))
socksuccess(self.sock)
return sockstr(self.sock)
def sync(self):
"""Synchronize the database
"""
socksend(self.sock, _t0(C.sync))
socksuccess(self.sock)
def vanish(self):
"""Remove all records
"""
socksend(self.sock, _t0(C.vanish))
socksuccess(self.sock)
def copy(self, path):
"""Hot-copy the database to path
"""
socksend(self.sock, _t1(C.copy, path))
socksuccess(self.sock)
def restore(self, path, msec):
"""Restore the database from path at timestamp (in msec)
"""
socksend(self.sock, _t1R(C.copy, path, msec))
socksuccess(self.sock)
def setmst(self, host, port):
"""Set master to host:port
"""
socksend(self.sock, _t1M(C.setmst, host, port))
socksuccess(self.sock)
def rnum(self):
"""Get the number of records in the database
"""
socksend(self.sock, _t0(C.rnum))
socksuccess(self.sock)
return socklong(self.sock)
def size(self):
"""Get the size of the database
"""
socksend(self.sock, _t0(C.size))
socksuccess(self.sock)
return socklong(self.sock)
def stat(self):
"""Get some statistics about the database
"""
socksend(self.sock, _t0(C.stat))
socksuccess(self.sock)
return sockstr(self.sock)
def _misc(self, func, opts, args):
# tcrdbmisc opts are RDBMONOULOG
socksend(self.sock, _t1FN(C.misc, func, opts, args))
try:
socksuccess(self.sock)
finally:
numrecs = socklen(self.sock)
for i in xrange(numrecs):
yield sockstr(self.sock)
def misc(self, func, opts, args):
"""All databases support "putlist", "outlist", and "getlist".
"putlist" is to store records. It receives keys and values one after the other, and returns an empty list.
"outlist" is to remove records. It receives keys, and returns an empty list.
"getlist" is to retrieve records. It receives keys, and returns values.
Table database supports "setindex", "search", "genuid".
opts is a bitflag that can be RDBMONOULOG to prevent writing to the update log
"""
return list(self._misc(func, opts, args))
def main():
import doctest
doctest.testmod()
if __name__ == '__main__':
main()
|
|
"""Provides helper methods to handle the time in HA."""
import datetime as dt
import re
import pytz
DATE_STR_FORMAT = "%Y-%m-%d"
UTC = DEFAULT_TIME_ZONE = pytz.utc
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
DATETIME_RE = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
r'(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$'
)
def set_default_time_zone(time_zone):
"""Set a default time zone to be used when none is specified."""
global DEFAULT_TIME_ZONE # pylint: disable=global-statement
assert isinstance(time_zone, dt.tzinfo)
DEFAULT_TIME_ZONE = time_zone
def get_time_zone(time_zone_str):
"""Get time zone from string. Return None if unable to determine."""
try:
return pytz.timezone(time_zone_str)
except pytz.exceptions.UnknownTimeZoneError:
return None
def utcnow():
"""Get now in UTC time."""
return dt.datetime.now(UTC)
def now(time_zone=None):
"""Get now in specified time zone."""
return dt.datetime.now(time_zone or DEFAULT_TIME_ZONE)
def as_utc(dattim):
"""Return a datetime as UTC time.
Assumes datetime without tzinfo to be in the DEFAULT_TIME_ZONE.
"""
if dattim.tzinfo == UTC:
return dattim
elif dattim.tzinfo is None:
dattim = DEFAULT_TIME_ZONE.localize(dattim)
return dattim.astimezone(UTC)
def as_timestamp(dt_value):
"""Convert a date/time into a unix time (seconds since 1970)."""
if hasattr(dt_value, "timestamp"):
parsed_dt = dt_value
else:
parsed_dt = parse_datetime(str(dt_value))
if not parsed_dt:
raise ValueError("not a valid date/time.")
return parsed_dt.timestamp()
def as_local(dattim):
"""Convert a UTC datetime object to local time zone."""
if dattim.tzinfo == DEFAULT_TIME_ZONE:
return dattim
elif dattim.tzinfo is None:
dattim = UTC.localize(dattim)
return dattim.astimezone(DEFAULT_TIME_ZONE)
def utc_from_timestamp(timestamp):
"""Return a UTC time from a timestamp."""
return dt.datetime.utcfromtimestamp(timestamp).replace(tzinfo=UTC)
def start_of_local_day(dt_or_d=None):
"""Return local datetime object of start of day from date or datetime."""
if dt_or_d is None:
dt_or_d = now().date()
elif isinstance(dt_or_d, dt.datetime):
dt_or_d = dt_or_d.date()
return dt.datetime.combine(dt_or_d, dt.time()).replace(
tzinfo=DEFAULT_TIME_ZONE)
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
def parse_datetime(dt_str):
"""Parse a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted.
"""
match = DATETIME_RE.match(dt_str)
if not match:
return None
kws = match.groupdict()
if kws['microsecond']:
kws['microsecond'] = kws['microsecond'].ljust(6, '0')
tzinfo = kws.pop('tzinfo')
if tzinfo == 'Z':
tzinfo = UTC
elif tzinfo is not None:
offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0
offset_hours = int(tzinfo[1:3])
offset = dt.timedelta(hours=offset_hours, minutes=offset_mins)
if tzinfo[0] == '-':
offset = -offset
tzinfo = dt.timezone(offset)
kws = {k: int(v) for k, v in kws.items() if v is not None}
kws['tzinfo'] = tzinfo
return dt.datetime(**kws)
def parse_date(dt_str):
"""Convert a date string to a date object."""
try:
return dt.datetime.strptime(dt_str, DATE_STR_FORMAT).date()
except ValueError: # If dt_str did not match our format
return None
def parse_time(time_str):
"""Parse a time string (00:20:00) into Time object.
Return None if invalid.
"""
parts = str(time_str).split(':')
if len(parts) < 2:
return None
try:
hour = int(parts[0])
minute = int(parts[1])
second = int(parts[2]) if len(parts) > 2 else 0
return dt.time(hour, minute, second)
except ValueError:
# ValueError if value cannot be converted to an int or not in range
return None
# Found in this gist: https://gist.github.com/zhangsen/1199964
def get_age(date):
# pylint: disable=too-many-return-statements
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it won't work.
"""
def formatn(number, unit):
"""Add "unit" if it's plural."""
if number == 1:
return "1 %s" % unit
elif number > 1:
return "%d %ss" % (number, unit)
def q_n_r(first, second):
"""Return quotient and remaining."""
return first // second, first % second
delta = now() - date
day = delta.days
second = delta.seconds
year, day = q_n_r(day, 365)
if year > 0:
return formatn(year, 'year')
month, day = q_n_r(day, 30)
if month > 0:
return formatn(month, 'month')
if day > 0:
return formatn(day, 'day')
hour, second = q_n_r(second, 3600)
if hour > 0:
return formatn(hour, 'hour')
minute, second = q_n_r(second, 60)
if minute > 0:
return formatn(minute, 'minute')
if second > 0:
return formatn(second, 'second')
return "0 second"
|
|
# Copyright 2014 Dirk Pranke. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import io
import logging
import sys
from typ import python_2_3_compat
from typ.host import _TeedStream
is_python3 = bool(sys.version_info.major == 3)
if is_python3: # pragma: python3
# pylint: disable=redefined-builtin,invalid-name
unicode = str
class FakeHost(object):
# "too many instance attributes" pylint: disable=R0902
# "redefining built-in" pylint: disable=W0622
# "unused arg" pylint: disable=W0613
python_interpreter = 'python'
is_python3 = bool(sys.version_info.major == 3)
def __init__(self):
self.logger = logging.getLogger()
self.stdin = sys.stdin
self.stdout = sys.stdout
self.stderr = sys.stderr
self.platform = 'linux2'
self.env = {}
self.sep = '/'
self.dirs = set([])
self.files = {}
self.fetches = []
self.fetch_responses = {}
self.written_files = {}
self.last_tmpdir = None
self.current_tmpno = 0
self.mtimes = {}
self.cmds = []
self.cwd = '/tmp'
self._orig_logging_handlers = []
def __getstate__(self):
d = copy.copy(self.__dict__)
del d['stderr']
del d['stdout']
del d['stdin']
del d['logger']
del d['_orig_logging_handlers']
return d
def __setstate__(self, d):
for k, v in d.items():
setattr(self, k, v)
self.logger = logging.getLogger()
self.stdin = io.StringIO()
self.stdout = io.StringIO()
self.stderr = io.StringIO()
def abspath(self, *comps):
relpath = self.join(*comps)
if relpath.startswith('/'):
return relpath
return self.join(self.cwd, relpath)
def add_to_path(self, *comps):
absolute_path = self.abspath(*comps)
if absolute_path not in sys.path:
sys.path.append(absolute_path)
def basename(self, path):
return path.split(self.sep)[-1]
def call(self, argv, stdin=None, env=None):
self.cmds.append(argv)
return 0, '', ''
def call_inline(self, argv):
return self.call(argv)[0]
def chdir(self, *comps):
path = self.join(*comps)
if not path.startswith('/'):
path = self.join(self.cwd, path)
self.cwd = path
def cpu_count(self):
return 1
def dirname(self, path):
return '/'.join(path.split('/')[:-1])
def exists(self, *comps):
path = self.abspath(*comps)
return ((path in self.files and self.files[path] is not None) or
path in self.dirs)
def files_under(self, top):
files = []
top = self.abspath(top)
for f in self.files:
if self.files[f] is not None and f.startswith(top):
files.append(self.relpath(f, top))
return files
def for_mp(self):
return self
def getcwd(self):
return self.cwd
def getenv(self, key, default=None):
return self.env.get(key, default)
def getpid(self):
return 1
def isdir(self, *comps):
path = self.abspath(*comps)
return path in self.dirs
def isfile(self, *comps):
path = self.abspath(*comps)
return path in self.files and self.files[path] is not None
def join(self, *comps):
p = ''
for c in comps:
if c in ('', '.'):
continue
elif c.startswith('/'):
p = c
elif p:
p += '/' + c
else:
p = c
# Handle ./
p = p.replace('/./', '/')
# Handle ../
while '/..' in p:
comps = p.split('/')
idx = comps.index('..')
comps = comps[:idx-1] + comps[idx+1:]
p = '/'.join(comps)
return p
def maybe_make_directory(self, *comps):
path = self.abspath(self.join(*comps))
if path not in self.dirs:
self.dirs.add(path)
def mktempfile(self, delete=True):
curno = self.current_tmpno
self.current_tmpno += 1
f = io.StringIO()
f.name = '__im_tmp/tmpfile_%u' % curno
return f
def mkdtemp(self, suffix='', prefix='tmp', dir=None, **_kwargs):
if dir is None:
dir = self.sep + '__im_tmp'
curno = self.current_tmpno
self.current_tmpno += 1
self.last_tmpdir = self.join(dir, '%s_%u_%s' % (prefix, curno, suffix))
self.dirs.add(self.last_tmpdir)
return self.last_tmpdir
def mtime(self, *comps):
return self.mtimes.get(self.join(*comps), 0)
def print_(self, msg='', end='\n', stream=None):
stream = stream or self.stdout
stream.write(msg + end)
stream.flush()
def read_binary_file(self, *comps):
return self._read(comps)
def read_text_file(self, *comps):
return self._read(comps)
def _read(self, comps):
return self.files[self.abspath(*comps)]
def realpath(self, *comps):
return self.abspath(*comps)
def relpath(self, path, start):
return path.replace(start + '/', '')
def remove(self, *comps):
path = self.abspath(*comps)
self.files[path] = None
self.written_files[path] = None
def rmtree(self, *comps):
path = self.abspath(*comps)
for f in self.files:
if f.startswith(path):
self.files[f] = None
self.written_files[f] = None
self.dirs.remove(path)
def terminal_width(self):
return 80
def splitext(self, path):
idx = path.rfind('.')
if idx == -1:
return (path, '')
return (path[:idx], path[idx:])
def time(self):
return 0
def write_binary_file(self, path, contents):
self._write(path, contents)
def write_text_file(self, path, contents):
self._write(path, contents)
def _write(self, path, contents):
full_path = self.abspath(path)
self.maybe_make_directory(self.dirname(full_path))
self.files[full_path] = contents
self.written_files[full_path] = contents
def fetch(self, url, data=None, headers=None):
resp = self.fetch_responses.get(url, FakeResponse(unicode(''), url))
self.fetches.append((url, data, headers, resp))
return resp
def _tap_output(self):
self.stdout = _TeedStream(self.stdout)
self.stderr = _TeedStream(self.stderr)
if True:
sys.stdout = self.stdout
sys.stderr = self.stderr
def _untap_output(self):
assert isinstance(self.stdout, _TeedStream)
self.stdout = self.stdout.stream
self.stderr = self.stderr.stream
if True:
sys.stdout = self.stdout
sys.stderr = self.stderr
def capture_output(self, divert=True):
self._tap_output()
self._orig_logging_handlers = self.logger.handlers
if self._orig_logging_handlers:
self.logger.handlers = [logging.StreamHandler(self.stderr)]
self.stdout.capture(divert=divert)
self.stderr.capture(divert=divert)
def restore_output(self):
assert isinstance(self.stdout, _TeedStream)
out, err = (self.stdout.restore(), self.stderr.restore())
out = python_2_3_compat.bytes_to_str(out)
err = python_2_3_compat.bytes_to_str(err)
self.logger.handlers = self._orig_logging_handlers
self._untap_output()
return out, err
class FakeResponse(io.StringIO):
def __init__(self, response, url, code=200):
io.StringIO.__init__(self, response)
self._url = url
self.code = code
def geturl(self):
return self._url
def getcode(self):
return self.code
|
|
"""
DogStatsApi is a tool for collecting application metrics without hindering
performance. It collects metrics in the application thread with very little overhead
and allows flushing metrics in process, in a thread or in a greenlet, depending
on your application's needs.
"""
import logging
import socket
from functools import wraps
from contextlib import contextmanager
from time import time
from dogapi.common import get_ec2_instance_id
from dogapi.constants import MetricType
from dogapi.stats.metrics import MetricsAggregator, Counter, Gauge, Histogram
from dogapi.stats.statsd import StatsdAggregator
from dogapi.stats.reporters import HttpReporter
# Loggers
log = logging.getLogger('dd.dogapi')
class DogStatsApi(object):
def __init__(self):
""" Initialize a dogstats object. """
# Don't collect until start is called.
self._disabled = True
def start(self, api_key=None,
flush_interval=10,
roll_up_interval=10,
host=None,
device=None,
api_host=None,
use_ec2_instance_ids=False,
flush_in_thread=True,
flush_in_greenlet=False,
disabled=False,
statsd=False,
statsd_host='localhost',
statsd_port=8125):
"""
Configure the DogStatsApi instance and optionally, begin auto-flusing metrics.
:param api_key: Your DataDog API key.
:param flush_interval: The number of seconds to wait between flushes.
:param flush_in_thread: True if you'd like to spawn a thread to flush metrics. It will run every `flush_interval` seconds.
:param flush_in_greenlet: Set to true if you'd like to flush in a gevent greenlet.
"""
self.flush_interval = flush_interval
self.roll_up_interval = roll_up_interval
self.device = device
self._disabled = disabled
self.host = host or socket.gethostname()
if use_ec2_instance_ids:
self.host = get_ec2_instance_id()
self._is_auto_flushing = False
if statsd:
# If we're configured to send to a statsd instance, use an aggregator
# which forwards packets over UDP.
log.info("Initializing dog api to use statsd: %s, %s" % (statsd_host, statsd_port))
self._needs_flush = False
self._aggregator = StatsdAggregator(statsd_host, statsd_port)
else:
# Otherwise create an aggreagtor that while aggregator metrics
# in process.
self._needs_flush = True
self._aggregator = MetricsAggregator(self.roll_up_interval)
# The reporter is responsible for sending metrics off to their final destination.
# It's abstracted to support easy unit testing and in the near future, forwarding
# to the datadog agent.
self.reporter = HttpReporter(api_key=api_key, api_host=api_host)
self._is_flush_in_progress = False
self.flush_count = 0
if self._disabled:
log.info("dogapi is disabled. No metrics will flush.")
else:
if flush_in_greenlet:
self._start_flush_greenlet()
elif flush_in_thread:
self._start_flush_thread()
def stop(self):
if not self._is_auto_flushing:
return True
if self._flush_thread:
self._flush_thread.end()
self._is_auto_flushing = False
return True
def gauge(self, metric_name, value, timestamp=None, tags=None, sample_rate=1, host=None):
"""
Record the current *value* of a metric. They most recent value in
a given flush interval will be recorded. Optionally, specify a set of
tags to associate with the metric. This should be used for sum values
such as total hard disk space, process uptime, total number of active
users, or number of rows in a database table.
>>> dog_stats_api.gauge('process.uptime', time.time() - process_start_time)
>>> dog_stats_api.gauge('cache.bytes.free', cache.get_free_bytes(), tags=['version:1.0'])
"""
if not self._disabled:
self._aggregator.add_point(metric_name, tags, timestamp or time(), value, Gauge,
sample_rate=sample_rate, host=host)
def increment(self, metric_name, value=1, timestamp=None, tags=None, sample_rate=1, host=None):
"""
Increment the counter by the given *value*. Optionally, specify a list of
*tags* to associate with the metric. This is useful for counting things
such as incrementing a counter each time a page is requested.
>>> dog_stats_api.increment('home.page.hits')
>>> dog_stats_api.increment('bytes.processed', file.size())
"""
if not self._disabled:
self._aggregator.add_point(metric_name, tags, timestamp or time(), value, Counter,
sample_rate=sample_rate, host=host)
def histogram(self, metric_name, value, timestamp=None, tags=None, sample_rate=1, host=None):
"""
Sample a histogram value. Histograms will produce metrics that
describe the distribution of the recorded values, namely the minimum,
maximum, average, count and the 75th, 85th, 95th and 99th percentiles.
Optionally, specify a list of *tags* to associate with the metric.
>>> dog_stats_api.histogram('uploaded_file.size', uploaded_file.size())
"""
if not self._disabled:
self._aggregator.add_point(metric_name, tags, timestamp or time(), value, Histogram,
sample_rate=sample_rate, host=host)
@contextmanager
def timer(self, metric_name, sample_rate=1, tags=None, host=None):
"""
A context manager that will track the distribution of the contained code's run time.
Optionally specify a list of tags to associate with the metric.
::
def get_user(user_id):
with dog_stats_api.timer('user.query.time'):
# Do what you need to ...
pass
# Is equivalent to ...
def get_user(user_id):
start = time.time()
try:
# Do what you need to ...
pass
finally:
dog_stats_api.histogram('user.query.time', time.time() - start)
"""
start = time()
try:
yield
finally:
end = time()
self.histogram(metric_name, end - start, end, tags=tags,
sample_rate=sample_rate, host=host)
def timed(self, metric_name, sample_rate=1, tags=None, host=None):
"""
A decorator that will track the distribution of a function's run time.
Optionally specify a list of tags to associate with the metric.
::
@dog_stats_api.timed('user.query.time')
def get_user(user_id):
# Do what you need to ...
pass
# Is equivalent to ...
start = time.time()
try:
get_user(user_id)
finally:
dog_stats_api.histogram('user.query.time', time.time() - start)
"""
def wrapper(func):
@wraps(func)
def wrapped(*args, **kwargs):
with self.timer(metric_name, sample_rate, tags, host):
result = func(*args, **kwargs)
return result
return wrapped
return wrapper
def flush(self, timestamp=None):
"""
Flush and post all metrics to the server. Note that this is a blocking
call, so it is likely not suitable for user facing processes. In those
cases, it's probably best to flush in a thread or greenlet.
"""
try:
if not self._needs_flush:
return False
if self._is_flush_in_progress:
log.debug("A flush is already in progress. Skipping this one.")
return False
elif self._disabled:
log.info("Not flushing because we're disabled.")
return False
self._is_flush_in_progress = True
metrics = self._get_aggregate_metrics(timestamp or time())
count = len(metrics)
if count:
self.flush_count += 1
log.debug("Flush #%s sending %s metrics" % (self.flush_count, count))
self.reporter.flush(metrics)
else:
log.debug("No metrics to flush. Continuing.")
except:
try:
log.exception("Error flushing metrics")
except:
pass
finally:
self._is_flush_in_progress = False
def _get_aggregate_metrics(self, flush_time=None):
# Get rolled up metrics
rolled_up_metrics = self._aggregator.flush(flush_time)
# FIXME: emit a dictionary from the aggregator
metrics = []
for timestamp, value, name, tags, host in rolled_up_metrics:
if host is None:
host = self.host
metric = {
'metric' : name,
'points' : [[timestamp, value]],
'type': MetricType.Gauge,
'host': host,
'device': self.device,
'tags' : tags
}
metrics.append(metric)
return metrics
def _start_flush_thread(self):
""" Start a thread to flush metrics. """
from dogapi.stats.periodic_timer import PeriodicTimer
if self._is_auto_flushing:
log.info("Autoflushing already started.")
return
self._is_auto_flushing = True
# A small helper for logging and flushing.
def flush():
try:
log.debug("Flushing metrics in thread")
self.flush()
except:
try:
log.exception("Error flushing in thread")
except:
pass
log.info("Starting flush thread with interval %s." % self.flush_interval)
self._flush_thread = PeriodicTimer(self.flush_interval, flush)
self._flush_thread.start()
def _start_flush_greenlet(self):
if self._is_auto_flushing:
log.info("Autoflushing already started.")
return
self._is_auto_flushing = True
import gevent
# A small helper for flushing.
def flush():
while True:
try:
log.debug("Flushing metrics in greenlet")
self.flush()
gevent.sleep(self.flush_interval)
except:
try:
log.exception("Error flushing in greenlet")
except:
pass
log.info("Starting flush greenlet with interval %s." % self.flush_interval)
gevent.spawn(flush)
|
|
#!/usr/bin/env python
from __future__ import print_function
import optparse
import os
import pwd
import signal
import subprocess
import sys
import time
import traceback
from six.moves.urllib.parse import urlunparse
from tornado import httpclient
from tornado import httputil
from tornado import gen
from tornado import web
from tornado.ioloop import IOLoop
from tornado.websocket import WebSocketHandler, websocket_connect
if False:
from typing import Any, Callable, Generator, Optional
if 'posix' in os.name and os.geteuid() == 0:
raise RuntimeError("run-dev.py should not be run as root.")
parser = optparse.OptionParser(r"""
Starts the app listening on localhost, for local development.
This script launches the Django and Tornado servers, then runs a reverse proxy
which serves to both of them. After it's all up and running, browse to
http://localhost:9991/
Note that, while runserver and runtornado have the usual auto-restarting
behavior, the reverse proxy itself does *not* automatically restart on changes
to this file.
""")
TOOLS_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(TOOLS_DIR))
from tools.lib.test_script import (
get_provisioning_status,
)
parser.add_option('--test',
action='store_true', dest='test',
help='Use the testing database and ports')
parser.add_option('--interface',
action='store', dest='interface',
default=None, help='Set the IP or hostname for the proxy to listen on')
parser.add_option('--no-clear-memcached',
action='store_false', dest='clear_memcached',
default=True, help='Do not clear memcached')
parser.add_option('--force', dest='force',
action="store_true",
default=False, help='Run command despite possible problems.')
parser.add_option('--enable-tornado-logging', dest='enable_tornado_logging',
action="store_true",
default=False, help='Enable access logs from tornado proxy server.')
(options, arguments) = parser.parse_args()
if not options.force:
ok, msg = get_provisioning_status()
if not ok:
print(msg)
print('If you really know what you are doing, use --force to run anyway.')
sys.exit(1)
if options.interface is None:
user_id = os.getuid()
user_name = pwd.getpwuid(user_id).pw_name
if user_name in ["vagrant", "zulipdev"]:
# In the Vagrant development environment, we need to listen on
# all ports, and it's safe to do so, because Vagrant is only
# exposing certain guest ports (by default just 9991) to the
# host. The same argument applies to the remote development
# servers using username "zulipdev".
options.interface = None
else:
# Otherwise, only listen to requests on localhost for security.
options.interface = "127.0.0.1"
elif options.interface == "":
options.interface = None
base_port = 9991
if options.test:
base_port = 9981
settings_module = "zproject.test_settings"
else:
settings_module = "zproject.settings"
manage_args = ['--settings=%s' % (settings_module,)]
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from scripts.lib.zulip_tools import WARNING, ENDC
proxy_port = base_port
django_port = base_port + 1
tornado_port = base_port + 2
webpack_port = base_port + 3
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
# Clean up stale .pyc files etc.
subprocess.check_call('./tools/clean-repo')
# HACK to fix up node_modules/.bin/handlebars deletion issue
if not os.path.exists("node_modules/.bin/handlebars") and os.path.exists("node_modules/handlebars"):
print("Handlebars binary missing due to rebase past .gitignore fixup; fixing...")
subprocess.check_call(["rm", "-rf", "node_modules/handlebars"])
subprocess.check_call(["npm", "install"])
if options.clear_memcached:
print("Clearing memcached ...")
subprocess.check_call('./scripts/setup/flush-memcached')
# Set up a new process group, so that we can later kill run{server,tornado}
# and all of the processes they spawn.
os.setpgrp()
# Pass --nostatic because we configure static serving ourselves in
# zulip/urls.py.
cmds = [['./tools/compile-handlebars-templates', 'forever'],
['./manage.py', 'rundjango'] +
manage_args + ['127.0.0.1:%d' % (django_port,)],
['env', 'PYTHONUNBUFFERED=1', './manage.py', 'runtornado'] +
manage_args + ['127.0.0.1:%d' % (tornado_port,)],
['./tools/run-dev-queue-processors'] + manage_args,
['env', 'PGHOST=127.0.0.1', # Force password authentication using .pgpass
'./puppet/zulip/files/postgresql/process_fts_updates']]
if options.test:
# Webpack doesn't support 2 copies running on the same system, so
# in order to support running the Casper tests while a Zulip
# development server is running, we use webpack in production mode
# for the Casper tests.
subprocess.check_call('./tools/webpack')
else:
cmds += [['./tools/webpack', '--watch', '--port', str(webpack_port)]]
for cmd in cmds:
subprocess.Popen(cmd)
def transform_url(protocol, path, query, target_port, target_host):
# type: (str, str, str, int, str) -> str
# generate url with target host
host = ":".join((target_host, str(target_port)))
newpath = urlunparse((protocol, host, path, '', query, ''))
return newpath
@gen.engine
def fetch_request(url, callback, **kwargs):
# type: (str, Any, **Any) -> Generator[Callable[..., Any], Any, None]
# use large timeouts to handle polling requests
req = httpclient.HTTPRequest(url, connect_timeout=240.0, request_timeout=240.0, **kwargs)
client = httpclient.AsyncHTTPClient()
# wait for response
response = yield gen.Task(client.fetch, req)
callback(response)
class BaseWebsocketHandler(WebSocketHandler):
# target server ip
target_host = '127.0.0.1' # type: str
# target server port
target_port = None # type: int
def __init__(self, *args, **kwargs):
# type: (*Any, **Any) -> None
super(BaseWebsocketHandler, self).__init__(*args, **kwargs)
# define client for target websocket server
self.client = None # type: Any
def get(self, *args, **kwargs):
# type: (*Any, **Any) -> Callable
# use get method from WebsocketHandler
return super(BaseWebsocketHandler, self).get(*args, **kwargs)
def open(self):
# type: () -> None
# setup connection with target websocket server
websocket_url = "ws://{host}:{port}{uri}".format(
host=self.target_host,
port=self.target_port,
uri=self.request.uri
)
request = httpclient.HTTPRequest(websocket_url)
request.headers = self._add_request_headers(['sec-websocket-extensions'])
websocket_connect(request, callback=self.open_callback,
on_message_callback=self.on_client_message)
def open_callback(self, future):
# type: (Any) -> None
# callback on connect with target websocket server
self.client = future.result()
def on_client_message(self, message):
# type: (str) -> None
if not message:
# if message empty -> target websocket server close connection
return self.close()
if self.ws_connection:
# send message to client if connection exists
self.write_message(message, False)
def on_message(self, message, binary=False):
# type: (str, bool) -> Optional[Callable]
if not self.client:
# close websocket proxy connection if no connection with target websocket server
return self.close()
self.client.write_message(message, binary)
def check_origin(self, origin):
# type: (str) -> bool
return True
def _add_request_headers(self, exclude_lower_headers_list=None):
# type: (Optional[List[str]]) -> httputil.HTTPHeaders
exclude_lower_headers_list = exclude_lower_headers_list or []
headers = httputil.HTTPHeaders()
for header, v in self.request.headers.get_all():
if header.lower() not in exclude_lower_headers_list:
headers.add(header, v)
return headers
class CombineHandler(BaseWebsocketHandler):
def get(self, *args, **kwargs):
# type: (*Any, **Any) -> Optional[Callable]
if self.request.headers.get("Upgrade", "").lower() == 'websocket':
return super(CombineHandler, self).get(*args, **kwargs)
def head(self):
# type: () -> None
pass
def post(self):
# type: () -> None
pass
def put(self):
# type: () -> None
pass
def patch(self):
# type: () -> None
pass
def options(self):
# type: () -> None
pass
def delete(self):
# type: () -> None
pass
def handle_response(self, response):
# type: (Any) -> None
if response.error and not isinstance(response.error, httpclient.HTTPError):
self.set_status(500)
self.write('Internal server error:\n' + str(response.error))
else:
self.set_status(response.code, response.reason)
self._headers = httputil.HTTPHeaders() # clear tornado default header
for header, v in response.headers.get_all():
if header != 'Content-Length':
# some header appear multiple times, eg 'Set-Cookie'
self.add_header(header, v)
if response.body:
# rewrite Content-Length Header by the response
self.set_header('Content-Length', len(response.body))
self.write(response.body)
self.finish()
@web.asynchronous
def prepare(self):
# type: () -> None
if 'X-REAL-IP' not in self.request.headers:
self.request.headers['X-REAL-IP'] = self.request.remote_ip
if self.request.headers.get("Upgrade", "").lower() == 'websocket':
return super(CombineHandler, self).prepare()
url = transform_url(
self.request.protocol,
self.request.path,
self.request.query,
self.target_port,
self.target_host,
)
try:
fetch_request(
url=url,
callback=self.handle_response,
method=self.request.method,
headers=self._add_request_headers(["upgrade-insecure-requests"]),
follow_redirects=False,
body=getattr(self.request, 'body'),
allow_nonstandard_methods=True
)
except httpclient.HTTPError as e:
if hasattr(e, 'response') and e.response:
self.handle_response(e.response)
else:
self.set_status(500)
self.write('Internal server error:\n' + str(e))
self.finish()
class WebPackHandler(CombineHandler):
target_port = webpack_port
class DjangoHandler(CombineHandler):
target_port = django_port
class TornadoHandler(CombineHandler):
target_port = tornado_port
class Application(web.Application):
def __init__(self, enable_logging=False):
# type: (bool) -> None
handlers = [
(r"/json/events.*", TornadoHandler),
(r"/api/v1/events.*", TornadoHandler),
(r"/webpack.*", WebPackHandler),
(r"/sockjs.*", TornadoHandler),
(r"/socket.io.*", WebPackHandler),
(r"/.*", DjangoHandler)
]
super(Application, self).__init__(handlers, enable_logging=enable_logging)
def log_request(self, handler):
# type: (BaseWebsocketHandler) -> None
if self.settings['enable_logging']:
super(Application, self).log_request(handler)
def on_shutdown():
# type: () -> None
IOLoop.instance().stop()
def shutdown_handler(*args, **kwargs):
# type: (*Any, **Any) -> None
io_loop = IOLoop.instance()
if io_loop._callbacks:
io_loop.add_timeout(time.time() + 1, shutdown_handler)
else:
io_loop.stop()
# log which services/ports will be started
print("Starting Zulip services on ports: web proxy: {},".format(proxy_port),
"Django: {}, Tornado: {}".format(django_port, tornado_port), end='')
if options.test:
print("") # no webpack for --test
else:
print(", webpack: {}".format(webpack_port))
print("".join((WARNING,
"Note: only port {} is exposed to the host in a Vagrant environment.".format(
proxy_port), ENDC)))
try:
app = Application(enable_logging=options.enable_tornado_logging)
app.listen(proxy_port, address=options.interface)
ioloop = IOLoop.instance()
for s in (signal.SIGINT, signal.SIGTERM):
signal.signal(s, shutdown_handler)
ioloop.start()
except:
# Print the traceback before we get SIGTERM and die.
traceback.print_exc()
raise
finally:
# Kill everything in our process group.
os.killpg(0, signal.SIGTERM)
|
|
#!/usr/bin/env python3
import os
import time
import signal
import shutil
import json
from submitty_utils import dateutils
import multiprocessing
import contextlib
import traceback
import zipfile
from pathlib import Path
from autograder import grade_item
from autograder import config as submitty_config
# ==================================================================================
JOB_ID = '~WORK~'
# ==================================================================================
# ==================================================================================
def worker_process(
config: submitty_config.Config,
which_machine: str,
address: str,
which_untrusted: str,
my_server: str
):
# verify the DAEMON_USER is running this script
if not int(os.getuid()) == int(config.submitty_users['daemon_uid']):
config.logger.log_message("ERROR: must be run by DAEMON_USER")
raise SystemExit(
"ERROR: the submitty_autograding_worker.py script must be run by the DAEMON_USER"
)
# ignore keyboard interrupts in the worker processes
signal.signal(signal.SIGINT, signal.SIG_IGN)
counter = 0
# The full name of this worker
worker_name = f"{my_server}_{address}_{which_untrusted}"
# Set up key autograding_DONE directories
done_dir = os.path.join(config.submitty['submitty_data_dir'], "autograding_DONE")
done_queue_file = os.path.join(done_dir, f"{worker_name}_queue.json")
results_zip = os.path.join(done_dir, f"{worker_name}_results.zip")
# Set up key autograding_TODO directories
todo_dir = os.path.join(config.submitty['submitty_data_dir'], "autograding_TODO")
autograding_zip = os.path.join(todo_dir, f"{worker_name}_autograding.zip")
submission_zip = os.path.join(todo_dir, f"{worker_name}_submission.zip")
todo_queue_file = os.path.join(todo_dir, f"{worker_name}_queue.json")
# Establish the the directory in which we will do our work
working_directory = os.path.join(
config.submitty['submitty_data_dir'],
'autograding_tmp',
which_untrusted,
"tmp"
)
while True:
if os.path.exists(todo_queue_file):
try:
# Attempt to grade the submission. Get back the location of the results.
results_zip_tmp = grade_item.grade_from_zip(
config,
working_directory,
which_untrusted,
autograding_zip,
submission_zip
)
shutil.copyfile(results_zip_tmp, results_zip)
os.remove(results_zip_tmp)
# At this point, we will assume that grading has progressed successfully enough to
# return a coherent answer, and will say as much in the done queue file
response = {
'status': 'success',
'message': 'Grading completed successfully'
}
except Exception:
# If we threw an error while grading, log it.
config.logger.log_message(
f"ERROR attempting to unzip graded item: {which_machine} "
f"{which_untrusted}. for more details, see traces entry.",
which_untrusted=which_untrusted,
)
config.logger.log_stack_trace(
traceback.format_exc(),
which_untrusted=which_untrusted,
)
# TODO: It is possible that autograding failed after multiple steps.
# In this case, we may be able to salvage a portion of the autograding_results
# directory.
# Because we failed grading, we will respond with an empty results zip.
results_zip_tmp = zipfile.ZipFile(results_zip, 'w')
results_zip_tmp.close()
# We will also respond with a done_queue_file which contains a failure message.
response = {
'status': 'fail',
'message': traceback.format_exc()
}
finally:
# Regardless of if we succeeded or failed, create a done queue file to
# send to the shipper.
with open(todo_queue_file, 'r') as infile:
queue_obj = json.load(infile)
queue_obj["done_time"] = dateutils.write_submitty_date(milliseconds=True)
queue_obj['autograding_status'] = response
queue_obj['errors'] = config.logger.accumulated_traces
with open(done_queue_file, 'w') as outfile:
json.dump(queue_obj, outfile, sort_keys=True, indent=4)
# Clean up temporary files.
with contextlib.suppress(FileNotFoundError):
os.remove(autograding_zip)
with contextlib.suppress(FileNotFoundError):
os.remove(submission_zip)
with contextlib.suppress(FileNotFoundError):
os.remove(todo_queue_file)
# Clear out accumulated stack traces in the logger.
config.logger.accumulated_traces.clear()
counter = 0
else:
if counter >= 10:
print(which_machine, which_untrusted, "wait")
counter = 0
counter += 1
time.sleep(1)
def try_run_worker(
config: submitty_config.Config,
which_machine: str,
address: str,
which_untrusted: str,
my_server: str
):
"""Try and run `worker_process`.
If `worker_process` fails, print a message to the log before letting the thread die.
"""
try:
worker_process(config, which_machine, address, which_untrusted, my_server)
except Exception as e:
config.logger.log_message(
f"FATAL: {which_untrusted} crashed! See traces entry for more details.",
which_untrusted=which_untrusted,
)
config.logger.log_stack_trace(
traceback.format_exc(),
which_untrusted=which_untrusted,
)
# Re-raise the exception so the process doesn't look like it exited OK
raise e
# ==================================================================================
# ==================================================================================
def launch_workers(config, my_name, my_stats):
num_workers = my_stats['num_autograding_workers']
# verify the DAEMON_USER is running this script
if not int(os.getuid()) == int(config.submitty_users['daemon_uid']):
raise SystemExit(
"ERROR: the submitty_autograding_worker.py script must be run by the DAEMON_USER"
)
config.logger.log_message("grade_scheduler.py launched")
# prepare a list of untrusted users to be used by the workers
untrusted_users = multiprocessing.Queue()
for i in range(num_workers):
untrusted_users.put("untrusted" + str(i).zfill(2))
# launch the worker threads
address = my_stats['address']
if address != 'localhost':
which_machine = f"{my_stats['username']}@{address}"
else:
which_machine = address
my_server = my_stats['server_name']
processes = list()
for i in range(0, num_workers):
u = "untrusted" + str(i).zfill(2)
p = multiprocessing.Process(
target=try_run_worker, args=(config, which_machine, address, u, my_server)
)
p.start()
processes.append(p)
# main monitoring loop
try:
while True:
alive = 0
for i in range(0, num_workers):
if processes[i].is_alive():
alive = alive+1
else:
config.logger.log_message(f"ERROR: process {i} is not alive")
if alive != num_workers:
config.logger.log_message(f"ERROR: #workers={num_workers} != #alive={alive}")
time.sleep(60)
except KeyboardInterrupt:
config.logger.log_message("grade_scheduler.py keyboard interrupt")
# just kill everything in this group id right now
# NOTE: this may be a bug if the grandchildren have a different group id and not be killed
os.kill(-os.getpid(), signal.SIGKILL)
# run this to check if everything is dead
# ps xao pid,ppid,pgid,sid,comm,user | grep untrust
# everything's dead, including the main process so the rest of this will be ignored
# but this was mostly working...
# terminate the jobs
for i in range(0, num_workers):
processes[i].terminate()
# wait for them to join
for i in range(0, num_workers):
processes[i].join()
config.logger.log_message("grade_scheduler.py terminated")
# ==================================================================================
def read_autograding_worker_json(config: submitty_config.Config, worker_json_path: os.PathLike):
try:
with open(worker_json_path, 'r') as infile:
name_and_stats = json.load(infile)
# grab the key and the value. NOTE: For now there should only ever be one pair.
name = list(name_and_stats.keys())[0]
stats = name_and_stats[name]
except FileNotFoundError as e:
raise SystemExit(
"autograding_worker.json not found. Have you registered this worker with a "
"Submitty host yet?"
) from e
except Exception as e:
config.logger.log_stack_trace(traceback.format_exc())
raise SystemExit(f"ERROR loading autograding_worker.json file: {e}")
return name, stats
# ==================================================================================
# Removes any existing files or folders in the autograding_done folder.
def cleanup_old_jobs(config: submitty_config.Config):
for file_path in Path(config.submitty['submitty_data_dir'], "autograding_DONE").glob("*"):
file_path = str(file_path)
config.logger.log_message(f"Remove autograding DONE file: {file_path}")
try:
os.remove(file_path)
except Exception:
config.logger.log_stack_trace(traceback.format_exc())
# ==================================================================================
if __name__ == "__main__":
config_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'config')
config = submitty_config.Config.path_constructor(config_dir, JOB_ID, capture_traces=True)
cleanup_old_jobs(config)
print('cleaned up old jobs')
my_name, my_stats = read_autograding_worker_json(
config,
os.path.join(
config.submitty['submitty_data_dir'],
'autograding_TODO',
'autograding_worker.json'
),
)
launch_workers(config, my_name, my_stats)
|
|
from contextlib import contextmanager
import filecmp
import functools
import os
import posix
import stat
import sys
import sysconfig
import tempfile
import time
import uuid
import unittest
from ..xattr import get_all
from ..platform import get_flags
from ..helpers import umount
from .. import platform
# Note: this is used by borg.selftest, do not use or import py.test functionality here.
try:
import llfuse
# Does this version of llfuse support ns precision?
have_fuse_mtime_ns = hasattr(llfuse.EntryAttributes, 'st_mtime_ns')
except ImportError:
have_fuse_mtime_ns = False
try:
from pytest import raises
except ImportError:
raises = None
has_lchflags = hasattr(os, 'lchflags') or sys.platform.startswith('linux')
try:
with tempfile.NamedTemporaryFile() as file:
platform.set_flags(file.name, stat.UF_NODUMP)
except OSError:
has_lchflags = False
try:
import llfuse
has_llfuse = True or llfuse # avoids "unused import"
except ImportError:
has_llfuse = False
# The mtime get/set precision varies on different OS and Python versions
if 'HAVE_FUTIMENS' in getattr(posix, '_have_functions', []):
st_mtime_ns_round = 0
elif 'HAVE_UTIMES' in sysconfig.get_config_vars():
st_mtime_ns_round = -6
else:
st_mtime_ns_round = -9
if sys.platform.startswith('netbsd'):
st_mtime_ns_round = -4 # only >1 microsecond resolution here?
@contextmanager
def unopened_tempfile():
with tempfile.TemporaryDirectory() as tempdir:
yield os.path.join(tempdir, "file")
@functools.lru_cache()
def are_symlinks_supported():
with unopened_tempfile() as filepath:
try:
os.symlink('somewhere', filepath)
if os.stat(filepath, follow_symlinks=False) and os.readlink(filepath) == 'somewhere':
return True
except OSError:
pass
return False
@functools.lru_cache()
def are_hardlinks_supported():
with unopened_tempfile() as file1path, unopened_tempfile() as file2path:
open(file1path, 'w').close()
try:
os.link(file1path, file2path)
stat1 = os.stat(file1path)
stat2 = os.stat(file2path)
if stat1.st_nlink == stat2.st_nlink == 2 and stat1.st_ino == stat2.st_ino:
return True
except OSError:
pass
return False
@functools.lru_cache()
def are_fifos_supported():
with unopened_tempfile() as filepath:
try:
os.mkfifo(filepath)
return True
except OSError:
return False
@functools.lru_cache()
def is_utime_fully_supported():
with unopened_tempfile() as filepath:
# Some filesystems (such as SSHFS) don't support utime on symlinks
if are_symlinks_supported():
os.symlink('something', filepath)
else:
open(filepath, 'w').close()
try:
os.utime(filepath, (1000, 2000), follow_symlinks=False)
new_stats = os.stat(filepath, follow_symlinks=False)
if new_stats.st_atime == 1000 and new_stats.st_mtime == 2000:
return True
except OSError as err:
pass
return False
def no_selinux(x):
# selinux fails our FUSE tests, thus ignore selinux xattrs
SELINUX_KEY = 'security.selinux'
if isinstance(x, dict):
return {k: v for k, v in x.items() if k != SELINUX_KEY}
if isinstance(x, list):
return [k for k in x if k != SELINUX_KEY]
class BaseTestCase(unittest.TestCase):
"""
"""
assert_in = unittest.TestCase.assertIn
assert_not_in = unittest.TestCase.assertNotIn
assert_equal = unittest.TestCase.assertEqual
assert_not_equal = unittest.TestCase.assertNotEqual
assert_true = unittest.TestCase.assertTrue
if raises:
assert_raises = staticmethod(raises)
else:
assert_raises = unittest.TestCase.assertRaises
@contextmanager
def assert_creates_file(self, path):
self.assert_true(not os.path.exists(path), '{} should not exist'.format(path))
yield
self.assert_true(os.path.exists(path), '{} should exist'.format(path))
def assert_dirs_equal(self, dir1, dir2, **kwargs):
diff = filecmp.dircmp(dir1, dir2)
self._assert_dirs_equal_cmp(diff, **kwargs)
def _assert_dirs_equal_cmp(self, diff, ignore_bsdflags=False, ignore_xattrs=False, ignore_ns=False):
self.assert_equal(diff.left_only, [])
self.assert_equal(diff.right_only, [])
self.assert_equal(diff.diff_files, [])
self.assert_equal(diff.funny_files, [])
for filename in diff.common:
path1 = os.path.join(diff.left, filename)
path2 = os.path.join(diff.right, filename)
s1 = os.stat(path1, follow_symlinks=False)
s2 = os.stat(path2, follow_symlinks=False)
# Assume path2 is on FUSE if st_dev is different
fuse = s1.st_dev != s2.st_dev
attrs = ['st_uid', 'st_gid', 'st_rdev']
if not fuse or not os.path.isdir(path1):
# dir nlink is always 1 on our FUSE filesystem
attrs.append('st_nlink')
d1 = [filename] + [getattr(s1, a) for a in attrs]
d2 = [filename] + [getattr(s2, a) for a in attrs]
d1.insert(1, oct(s1.st_mode))
d2.insert(1, oct(s2.st_mode))
if not ignore_bsdflags:
d1.append(get_flags(path1, s1))
d2.append(get_flags(path2, s2))
# ignore st_rdev if file is not a block/char device, fixes #203
if not stat.S_ISCHR(s1.st_mode) and not stat.S_ISBLK(s1.st_mode):
d1[4] = None
if not stat.S_ISCHR(s2.st_mode) and not stat.S_ISBLK(s2.st_mode):
d2[4] = None
# If utime isn't fully supported, borg can't set mtime.
# Therefore, we shouldn't test it in that case.
if is_utime_fully_supported():
# Older versions of llfuse do not support ns precision properly
if ignore_ns:
d1.append(int(s1.st_mtime_ns / 1e9))
d2.append(int(s2.st_mtime_ns / 1e9))
elif fuse and not have_fuse_mtime_ns:
d1.append(round(s1.st_mtime_ns, -4))
d2.append(round(s2.st_mtime_ns, -4))
else:
d1.append(round(s1.st_mtime_ns, st_mtime_ns_round))
d2.append(round(s2.st_mtime_ns, st_mtime_ns_round))
if not ignore_xattrs:
d1.append(no_selinux(get_all(path1, follow_symlinks=False)))
d2.append(no_selinux(get_all(path2, follow_symlinks=False)))
self.assert_equal(d1, d2)
for sub_diff in diff.subdirs.values():
self._assert_dirs_equal_cmp(sub_diff, ignore_bsdflags=ignore_bsdflags, ignore_xattrs=ignore_xattrs, ignore_ns=ignore_ns)
@contextmanager
def fuse_mount(self, location, mountpoint, *options):
os.mkdir(mountpoint)
args = ['mount', location, mountpoint] + list(options)
self.cmd(*args, fork=True)
self.wait_for_mount(mountpoint)
yield
umount(mountpoint)
os.rmdir(mountpoint)
# Give the daemon some time to exit
time.sleep(.2)
def wait_for_mount(self, path, timeout=5):
"""Wait until a filesystem is mounted on `path`
"""
timeout += time.time()
while timeout > time.time():
if os.path.ismount(path):
return
time.sleep(.1)
raise Exception('wait_for_mount(%s) timeout' % path)
class changedir:
def __init__(self, dir):
self.dir = dir
def __enter__(self):
self.old = os.getcwd()
os.chdir(self.dir)
def __exit__(self, *args, **kw):
os.chdir(self.old)
class environment_variable:
def __init__(self, **values):
self.values = values
self.old_values = {}
def __enter__(self):
for k, v in self.values.items():
self.old_values[k] = os.environ.get(k)
if v is None:
os.environ.pop(k, None)
else:
os.environ[k] = v
def __exit__(self, *args, **kw):
for k, v in self.old_values.items():
if v is None:
os.environ.pop(k, None)
else:
os.environ[k] = v
class FakeInputs:
"""Simulate multiple user inputs, can be used as input() replacement"""
def __init__(self, inputs):
self.inputs = inputs
def __call__(self, prompt=None):
if prompt is not None:
print(prompt, end='')
try:
return self.inputs.pop(0)
except IndexError:
raise EOFError from None
|
|
# encoding: utf-8
"""
A mixin for :class:`~IPython.core.application.Application` classes that
launch InteractiveShell instances, load extensions, etc.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import glob
from itertools import chain
import os
import sys
from traitlets.config.application import boolean_flag
from traitlets.config.configurable import Configurable
from traitlets.config.loader import Config
from IPython.core.application import SYSTEM_CONFIG_DIRS, ENV_CONFIG_DIRS
from IPython.core import pylabtools
from IPython.utils.contexts import preserve_keys
from IPython.utils.path import filefind
from traitlets import (
Unicode, Instance, List, Bool, CaselessStrEnum, observe,
)
from IPython.terminal import pt_inputhooks
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
gui_keys = tuple(sorted(pt_inputhooks.backends) + sorted(pt_inputhooks.aliases))
backend_keys = sorted(pylabtools.backends.keys())
backend_keys.insert(0, 'auto')
shell_flags = {}
addflag = lambda *args: shell_flags.update(boolean_flag(*args))
addflag('autoindent', 'InteractiveShell.autoindent',
'Turn on autoindenting.', 'Turn off autoindenting.'
)
addflag('automagic', 'InteractiveShell.automagic',
"""Turn on the auto calling of magic commands. Type %%magic at the
IPython prompt for more information.""",
'Turn off the auto calling of magic commands.'
)
addflag('pdb', 'InteractiveShell.pdb',
"Enable auto calling the pdb debugger after every exception.",
"Disable auto calling the pdb debugger after every exception."
)
addflag('pprint', 'PlainTextFormatter.pprint',
"Enable auto pretty printing of results.",
"Disable auto pretty printing of results."
)
addflag('color-info', 'InteractiveShell.color_info',
"""IPython can display information about objects via a set of functions,
and optionally can use colors for this, syntax highlighting
source code and various other elements. This is on by default, but can cause
problems with some pagers. If you see such problems, you can disable the
colours.""",
"Disable using colors for info related things."
)
addflag('ignore-cwd', 'InteractiveShellApp.ignore_cwd',
"Exclude the current working directory from sys.path",
"Include the current working directory in sys.path",
)
nosep_config = Config()
nosep_config.InteractiveShell.separate_in = ''
nosep_config.InteractiveShell.separate_out = ''
nosep_config.InteractiveShell.separate_out2 = ''
shell_flags['nosep']=(nosep_config, "Eliminate all spacing between prompts.")
shell_flags['pylab'] = (
{'InteractiveShellApp' : {'pylab' : 'auto'}},
"""Pre-load matplotlib and numpy for interactive use with
the default matplotlib backend."""
)
shell_flags['matplotlib'] = (
{'InteractiveShellApp' : {'matplotlib' : 'auto'}},
"""Configure matplotlib for interactive use with
the default matplotlib backend."""
)
# it's possible we don't want short aliases for *all* of these:
shell_aliases = dict(
autocall='InteractiveShell.autocall',
colors='InteractiveShell.colors',
logfile='InteractiveShell.logfile',
logappend='InteractiveShell.logappend',
c='InteractiveShellApp.code_to_run',
m='InteractiveShellApp.module_to_run',
ext='InteractiveShellApp.extra_extension',
gui='InteractiveShellApp.gui',
pylab='InteractiveShellApp.pylab',
matplotlib='InteractiveShellApp.matplotlib',
)
shell_aliases['cache-size'] = 'InteractiveShell.cache_size'
#-----------------------------------------------------------------------------
# Main classes and functions
#-----------------------------------------------------------------------------
class InteractiveShellApp(Configurable):
"""A Mixin for applications that start InteractiveShell instances.
Provides configurables for loading extensions and executing files
as part of configuring a Shell environment.
The following methods should be called by the :meth:`initialize` method
of the subclass:
- :meth:`init_path`
- :meth:`init_shell` (to be implemented by the subclass)
- :meth:`init_gui_pylab`
- :meth:`init_extensions`
- :meth:`init_code`
"""
extensions = List(Unicode(),
help="A list of dotted module names of IPython extensions to load."
).tag(config=True)
extra_extension = Unicode('',
help="dotted module name of an IPython extension to load."
).tag(config=True)
reraise_ipython_extension_failures = Bool(False,
help="Reraise exceptions encountered loading IPython extensions?",
).tag(config=True)
# Extensions that are always loaded (not configurable)
default_extensions = List(Unicode(), [u'storemagic']).tag(config=False)
hide_initial_ns = Bool(True,
help="""Should variables loaded at startup (by startup files, exec_lines, etc.)
be hidden from tools like %who?"""
).tag(config=True)
exec_files = List(Unicode(),
help="""List of files to run at IPython startup."""
).tag(config=True)
exec_PYTHONSTARTUP = Bool(True,
help="""Run the file referenced by the PYTHONSTARTUP environment
variable at IPython startup."""
).tag(config=True)
file_to_run = Unicode('',
help="""A file to be run""").tag(config=True)
exec_lines = List(Unicode(),
help="""lines of code to run at IPython startup."""
).tag(config=True)
code_to_run = Unicode('',
help="Execute the given command string."
).tag(config=True)
module_to_run = Unicode('',
help="Run the module as a script."
).tag(config=True)
gui = CaselessStrEnum(gui_keys, allow_none=True,
help="Enable GUI event loop integration with any of {0}.".format(gui_keys)
).tag(config=True)
matplotlib = CaselessStrEnum(backend_keys, allow_none=True,
help="""Configure matplotlib for interactive use with
the default matplotlib backend."""
).tag(config=True)
pylab = CaselessStrEnum(backend_keys, allow_none=True,
help="""Pre-load matplotlib and numpy for interactive use,
selecting a particular matplotlib backend and loop integration.
"""
).tag(config=True)
pylab_import_all = Bool(True,
help="""If true, IPython will populate the user namespace with numpy, pylab, etc.
and an ``import *`` is done from numpy and pylab, when using pylab mode.
When False, pylab mode should not import any names into the user namespace.
"""
).tag(config=True)
ignore_cwd = Bool(
False,
help="""If True, IPython will not add the current working directory to sys.path.
When False, the current working directory is added to sys.path, allowing imports
of modules defined in the current directory."""
).tag(config=True)
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC',
allow_none=True)
# whether interact-loop should start
interact = Bool(True)
user_ns = Instance(dict, args=None, allow_none=True)
@observe('user_ns')
def _user_ns_changed(self, change):
if self.shell is not None:
self.shell.user_ns = change['new']
self.shell.init_user_ns()
def init_path(self):
"""Add current working directory, '', to sys.path
Unlike Python's default, we insert before the first `site-packages`
or `dist-packages` directory,
so that it is after the standard library.
.. versionchanged:: 7.2
Try to insert after the standard library, instead of first.
.. versionchanged:: 8.0
Allow optionally not including the current directory in sys.path
"""
if '' in sys.path or self.ignore_cwd:
return
for idx, path in enumerate(sys.path):
parent, last_part = os.path.split(path)
if last_part in {'site-packages', 'dist-packages'}:
break
else:
# no site-packages or dist-packages found (?!)
# back to original behavior of inserting at the front
idx = 0
sys.path.insert(idx, '')
def init_shell(self):
raise NotImplementedError("Override in subclasses")
def init_gui_pylab(self):
"""Enable GUI event loop integration, taking pylab into account."""
enable = False
shell = self.shell
if self.pylab:
enable = lambda key: shell.enable_pylab(key, import_all=self.pylab_import_all)
key = self.pylab
elif self.matplotlib:
enable = shell.enable_matplotlib
key = self.matplotlib
elif self.gui:
enable = shell.enable_gui
key = self.gui
if not enable:
return
try:
r = enable(key)
except ImportError:
self.log.warning("Eventloop or matplotlib integration failed. Is matplotlib installed?")
self.shell.showtraceback()
return
except Exception:
self.log.warning("GUI event loop or pylab initialization failed")
self.shell.showtraceback()
return
if isinstance(r, tuple):
gui, backend = r[:2]
self.log.info("Enabling GUI event loop integration, "
"eventloop=%s, matplotlib=%s", gui, backend)
if key == "auto":
print("Using matplotlib backend: %s" % backend)
else:
gui = r
self.log.info("Enabling GUI event loop integration, "
"eventloop=%s", gui)
def init_extensions(self):
"""Load all IPython extensions in IPythonApp.extensions.
This uses the :meth:`ExtensionManager.load_extensions` to load all
the extensions listed in ``self.extensions``.
"""
try:
self.log.debug("Loading IPython extensions...")
extensions = self.default_extensions + self.extensions
if self.extra_extension:
extensions.append(self.extra_extension)
for ext in extensions:
try:
self.log.info("Loading IPython extension: %s" % ext)
self.shell.extension_manager.load_extension(ext)
except:
if self.reraise_ipython_extension_failures:
raise
msg = ("Error in loading extension: {ext}\n"
"Check your config files in {location}".format(
ext=ext,
location=self.profile_dir.location
))
self.log.warning(msg, exc_info=True)
except:
if self.reraise_ipython_extension_failures:
raise
self.log.warning("Unknown error in loading extensions:", exc_info=True)
def init_code(self):
"""run the pre-flight code, specified via exec_lines"""
self._run_startup_files()
self._run_exec_lines()
self._run_exec_files()
# Hide variables defined here from %who etc.
if self.hide_initial_ns:
self.shell.user_ns_hidden.update(self.shell.user_ns)
# command-line execution (ipython -i script.py, ipython -m module)
# should *not* be excluded from %whos
self._run_cmd_line_code()
self._run_module()
# flush output, so itwon't be attached to the first cell
sys.stdout.flush()
sys.stderr.flush()
def _run_exec_lines(self):
"""Run lines of code in IPythonApp.exec_lines in the user's namespace."""
if not self.exec_lines:
return
try:
self.log.debug("Running code from IPythonApp.exec_lines...")
for line in self.exec_lines:
try:
self.log.info("Running code in user namespace: %s" %
line)
self.shell.run_cell(line, store_history=False)
except:
self.log.warning("Error in executing line in user "
"namespace: %s" % line)
self.shell.showtraceback()
except:
self.log.warning("Unknown error in handling IPythonApp.exec_lines:")
self.shell.showtraceback()
def _exec_file(self, fname, shell_futures=False):
try:
full_filename = filefind(fname, [u'.', self.ipython_dir])
except IOError:
self.log.warning("File not found: %r"%fname)
return
# Make sure that the running script gets a proper sys.argv as if it
# were run from a system shell.
save_argv = sys.argv
sys.argv = [full_filename] + self.extra_args[1:]
try:
if os.path.isfile(full_filename):
self.log.info("Running file in user namespace: %s" %
full_filename)
# Ensure that __file__ is always defined to match Python
# behavior.
with preserve_keys(self.shell.user_ns, '__file__'):
self.shell.user_ns['__file__'] = fname
if full_filename.endswith('.ipy') or full_filename.endswith('.ipynb'):
self.shell.safe_execfile_ipy(full_filename,
shell_futures=shell_futures)
else:
# default to python, even without extension
self.shell.safe_execfile(full_filename,
self.shell.user_ns,
shell_futures=shell_futures,
raise_exceptions=True)
finally:
sys.argv = save_argv
def _run_startup_files(self):
"""Run files from profile startup directory"""
startup_dirs = [self.profile_dir.startup_dir] + [
os.path.join(p, 'startup') for p in chain(ENV_CONFIG_DIRS, SYSTEM_CONFIG_DIRS)
]
startup_files = []
if self.exec_PYTHONSTARTUP and os.environ.get('PYTHONSTARTUP', False) and \
not (self.file_to_run or self.code_to_run or self.module_to_run):
python_startup = os.environ['PYTHONSTARTUP']
self.log.debug("Running PYTHONSTARTUP file %s...", python_startup)
try:
self._exec_file(python_startup)
except:
self.log.warning("Unknown error in handling PYTHONSTARTUP file %s:", python_startup)
self.shell.showtraceback()
for startup_dir in startup_dirs[::-1]:
startup_files += glob.glob(os.path.join(startup_dir, '*.py'))
startup_files += glob.glob(os.path.join(startup_dir, '*.ipy'))
if not startup_files:
return
self.log.debug("Running startup files from %s...", startup_dir)
try:
for fname in sorted(startup_files):
self._exec_file(fname)
except:
self.log.warning("Unknown error in handling startup files:")
self.shell.showtraceback()
def _run_exec_files(self):
"""Run files from IPythonApp.exec_files"""
if not self.exec_files:
return
self.log.debug("Running files in IPythonApp.exec_files...")
try:
for fname in self.exec_files:
self._exec_file(fname)
except:
self.log.warning("Unknown error in handling IPythonApp.exec_files:")
self.shell.showtraceback()
def _run_cmd_line_code(self):
"""Run code or file specified at the command-line"""
if self.code_to_run:
line = self.code_to_run
try:
self.log.info("Running code given at command line (c=): %s" %
line)
self.shell.run_cell(line, store_history=False)
except:
self.log.warning("Error in executing line in user namespace: %s" %
line)
self.shell.showtraceback()
if not self.interact:
self.exit(1)
# Like Python itself, ignore the second if the first of these is present
elif self.file_to_run:
fname = self.file_to_run
if os.path.isdir(fname):
fname = os.path.join(fname, "__main__.py")
if not os.path.exists(fname):
self.log.warning("File '%s' doesn't exist", fname)
if not self.interact:
self.exit(2)
try:
self._exec_file(fname, shell_futures=True)
except:
self.shell.showtraceback(tb_offset=4)
if not self.interact:
self.exit(1)
def _run_module(self):
"""Run module specified at the command-line."""
if self.module_to_run:
# Make sure that the module gets a proper sys.argv as if it were
# run using `python -m`.
save_argv = sys.argv
sys.argv = [sys.executable] + self.extra_args
try:
self.shell.safe_run_module(self.module_to_run,
self.shell.user_ns)
finally:
sys.argv = save_argv
|
|
"""
This module contains functions to:
- solve a single equation for a single variable, in any domain either real or complex.
- solve a system of linear equations with N variables and M equations.
"""
from __future__ import print_function, division
from sympy.core.sympify import sympify
from sympy.core import S, Pow, Dummy, pi, Expr, Wild, Mul, Equality
from sympy.core.numbers import I, Number, Rational, oo
from sympy.core.function import (Lambda, expand, expand_complex)
from sympy.core.relational import Eq
from sympy.simplify.simplify import fraction, trigsimp
from sympy.functions import (log, Abs, tan, cot, sin, cos, sec, csc, exp,
acos, asin, atan, acsc, asec, arg,
Piecewise, piecewise_fold)
from sympy.functions.elementary.trigonometric import (TrigonometricFunction,
HyperbolicFunction)
from sympy.functions.elementary.miscellaneous import real_root
from sympy.sets import (FiniteSet, EmptySet, imageset, Interval, Intersection,
Union, ConditionSet)
from sympy.matrices import Matrix
from sympy.polys import (roots, Poly, degree, together, PolynomialError,
RootOf)
from sympy.solvers.solvers import checksol, denoms
from sympy.utilities import filldedent
import warnings
def invert_real(f_x, y, x):
""" Inverts a real valued function
Reduces the real valued equation ``f(x) = y`` to a set of equations ``{g(x)
= h_1(y), g(x) = h_2(y), ..., g(x) = h_n(y) }`` where ``g(x)`` is a simpler
function than ``f(x)``. The return value is a tuple ``(g(x), set_h)``,
where ``g(x)`` is a function of ``x`` and ``set_h`` is the set of
functions ``{h_1(y), h_2(y), ..., h_n(y)}``.
Here, ``y`` is not necessarily a symbol.
The ``set_h`` contains the functions along with the information about their
domain in which they are valid, through set operations. For instance, if
``y = Abs(x) - n``, is inverted, then, the ``set_h`` doesn't simply
return `{-n, n}`, as it doesn't explicitly mentions about the nature of
`n` rather it will return:
`Intersection([0, oo) {n}) U Intersection((-oo, 0], {-n})`
Examples
========
>>> from sympy.solvers.solveset import invert_real
>>> from sympy import tan, Abs, exp
>>> from sympy.abc import x, y, n
>>> invert_real(exp(x), 1, x)
(x, {0})
>>> invert_real(tan(x), y, x)
(x, ImageSet(Lambda(_n, _n*pi + atan(y)), Integers()))
* ``set_h`` containing information about the domain
>>> invert_real(Abs(x**31 + x), y, x)
(x**31 + x, Intersection([0, oo), {y}) U Intersection((-oo, 0], {-y}))
>>> invert_real(exp(Abs(x)), y, x)
(x, Intersection([0, oo), {log(y)}) U Intersection((-oo, 0], {-log(y)}))
See Also
========
invert_complex
"""
y = sympify(y)
if not y.has(x):
return _invert_real(f_x, FiniteSet(y), x)
else:
raise ValueError(" y should be independent of x ")
def _invert_real(f, g_ys, symbol):
""" Helper function for invert_real """
if not f.has(symbol):
raise ValueError("Inverse of constant function doesn't exist")
if f is symbol:
return (f, g_ys)
n = Dummy('n')
if hasattr(f, 'inverse') and not isinstance(f, TrigonometricFunction) and \
not isinstance(f, HyperbolicFunction):
if len(f.args) > 1:
raise ValueError("Only functions with one argument are supported.")
return _invert_real(f.args[0],
imageset(Lambda(n, f.inverse()(n)), g_ys), symbol)
if isinstance(f, Abs):
return _invert_real(f.args[0],
Union(imageset(Lambda(n, n), g_ys).intersect(Interval(0, oo)),
imageset(Lambda(n, -n), g_ys).intersect(Interval(-oo, 0))),
symbol)
if f.is_Add:
# f = g + h
g, h = f.as_independent(symbol)
if g != S.Zero:
return _invert_real(h, imageset(Lambda(n, n - g), g_ys), symbol)
if f.is_Mul:
# f = g*h
g, h = f.as_independent(symbol)
if g != S.One:
return _invert_real(h, imageset(Lambda(n, n/g), g_ys), symbol)
if f.is_Pow:
base, expo = f.args
base_has_sym = base.has(symbol)
expo_has_sym = expo.has(symbol)
if not expo_has_sym:
res = imageset(Lambda(n, real_root(n, expo)), g_ys)
if expo.is_rational:
numer, denom = expo.as_numer_denom()
if numer == S.One or numer == - S.One:
return _invert_real(base, res, symbol)
else:
if numer % 2 == 0:
n = Dummy('n')
neg_res = imageset(Lambda(n, -n), res)
return _invert_real(base, res + neg_res, symbol)
else:
return _invert_real(base, res, symbol)
else:
if not base.is_positive:
raise ValueError("x**w where w is irrational is not "
"defined for negative x")
return _invert_real(base, res, symbol)
if not base_has_sym:
return _invert_real(expo, imageset(Lambda(n, log(n)/log(base)),
g_ys), symbol)
if isinstance(f, sin):
n = Dummy('n')
if isinstance(g_ys, FiniteSet):
sin_invs = Union(*[imageset(Lambda(n, n*pi + (-1)**n*asin(g_y)), \
S.Integers) for g_y in g_ys])
return _invert_real(f.args[0], sin_invs, symbol)
if isinstance(f, csc):
n = Dummy('n')
if isinstance(g_ys, FiniteSet):
csc_invs = Union(*[imageset(Lambda(n, n*pi + (-1)**n*acsc(g_y)), \
S.Integers) for g_y in g_ys])
return _invert_real(f.args[0], csc_invs, symbol)
if isinstance(f, cos):
n = Dummy('n')
if isinstance(g_ys, FiniteSet):
cos_invs_f1 = Union(*[imageset(Lambda(n, 2*n*pi + acos(g_y)), \
S.Integers) for g_y in g_ys])
cos_invs_f2 = Union(*[imageset(Lambda(n, 2*n*pi - acos(g_y)), \
S.Integers) for g_y in g_ys])
cos_invs = Union(cos_invs_f1, cos_invs_f2)
return _invert_real(f.args[0], cos_invs, symbol)
if isinstance(f, sec):
n = Dummy('n')
if isinstance(g_ys, FiniteSet):
sec_invs_f1 = Union(*[imageset(Lambda(n, 2*n*pi + asec(g_y)), \
S.Integers) for g_y in g_ys])
sec_invs_f2 = Union(*[imageset(Lambda(n, 2*n*pi - asec(g_y)), \
S.Integers) for g_y in g_ys])
sec_invs = Union(sec_invs_f1, sec_invs_f2)
return _invert_real(f.args[0], sec_invs, symbol)
if isinstance(f, tan) or isinstance(f, cot):
n = Dummy('n')
if isinstance(g_ys, FiniteSet):
tan_cot_invs = Union(*[imageset(Lambda(n, n*pi + f.inverse()(g_y)), \
S.Integers) for g_y in g_ys])
return _invert_real(f.args[0], tan_cot_invs, symbol)
return (f, g_ys)
def invert_complex(f_x, y, x):
""" Inverts a complex valued function.
Reduces the complex valued equation ``f(x) = y`` to a set of equations
``{g(x) = h_1(y), g(x) = h_2(y), ..., g(x) = h_n(y) }`` where ``g(x)`` is
a simpler function than ``f(x)``. The return value is a tuple ``(g(x),
set_h)``, where ``g(x)`` is a function of ``x`` and ``set_h`` is
the set of function ``{h_1(y), h_2(y), ..., h_n(y)}``.
Here, ``y`` is not necessarily a symbol.
Note that `invert\_complex` and `invert\_real` don't always produce the
same result even for a seemingly simple function like ``exp(x)`` because
the complex extension of real valued ``log`` is multivariate in the complex
system and has infinitely many branches. If you are working with real
values only or you are not sure with function to use you should use
`invert\_real`.
Examples
========
>>> from sympy.solvers.solveset import invert_complex
>>> from sympy.abc import x, y
>>> from sympy import exp, log
>>> invert_complex(log(x), y, x)
(x, {exp(y)})
>>> invert_complex(log(x), 0, x) # Second parameter is not a symbol
(x, {1})
>>> invert_complex(exp(x), y, x)
(x, ImageSet(Lambda(_n, I*(2*_n*pi + arg(y)) + log(Abs(y))), Integers()))
See Also
========
invert_real
"""
y = sympify(y)
if not y.has(x):
return _invert_complex(f_x, FiniteSet(y), x)
else:
raise ValueError(" y should be independent of x ")
def _invert_complex(f, g_ys, symbol):
""" Helper function for invert_complex """
if not f.has(symbol):
raise ValueError("Inverse of constant function doesn't exist")
if f is symbol:
return (f, g_ys)
n = Dummy('n')
if f.is_Add:
# f = g + h
g, h = f.as_independent(symbol)
if g != S.Zero:
return _invert_complex(h, imageset(Lambda(n, n - g), g_ys), symbol)
if f.is_Mul:
# f = g*h
g, h = f.as_independent(symbol)
if g != S.One:
return _invert_complex(h, imageset(Lambda(n, n/g), g_ys), symbol)
if hasattr(f, 'inverse') and \
not isinstance(f, TrigonometricFunction) and \
not isinstance(f, exp):
if len(f.args) > 1:
raise ValueError("Only functions with one argument are supported.")
return _invert_complex(f.args[0],
imageset(Lambda(n, f.inverse()(n)), g_ys), symbol)
if isinstance(f, exp):
if isinstance(g_ys, FiniteSet):
exp_invs = Union(*[imageset(Lambda(n, I*(2*n*pi + arg(g_y)) +
log(Abs(g_y))), S.Integers)
for g_y in g_ys if g_y != 0])
return _invert_complex(f.args[0], exp_invs, symbol)
return (f, g_ys)
def domain_check(f, symbol, p):
"""Returns False if point p is infinite or any subexpression of f
is infinite or becomes so after replacing symbol with p. If none of
these conditions is met then True will be returned.
Examples
========
>>> from sympy import Mul, oo
>>> from sympy.abc import x
>>> from sympy.solvers.solveset import domain_check
>>> g = 1/(1 + (1/(x + 1))**2)
>>> domain_check(g, x, -1)
False
>>> domain_check(x**2, x, 0)
True
>>> domain_check(1/x, x, oo)
False
* The function relies on the assumption that the original form
of the equation has not been changed by automatic simplification.
>>> domain_check(x/x, x, 0) # x/x is automatically simplified to 1
True
* To deal with automatic evaluations use evaluate=False:
>>> domain_check(Mul(x, 1/x, evaluate=False), x, 0)
False
"""
f, p = sympify(f), sympify(p)
if p.is_infinite:
return False
return _domain_check(f, symbol, p)
def _domain_check(f, symbol, p):
# helper for domain check
if f.is_Atom and f.is_finite:
return True
elif f.subs(symbol, p).is_infinite:
return False
else:
return all([_domain_check(g, symbol, p)
for g in f.args])
def _is_finite_with_finite_vars(f):
"""
Return True if the given expression is finite when all free symbols
(that are not already specified as finite) are made finite.
"""
reps = dict([(s, Dummy(s.name, finite=True, **s.assumptions0))
for s in f.free_symbols if s.is_finite is None])
return f.xreplace(reps).is_finite
def _is_function_class_equation(func_class, f, symbol):
""" Tests whether the equation is an equation of the given function class.
The given equation belongs to the given function class if it is
comprised of functions of the function class which are multiplied by
or added to expressions independent of the symbol. In addition, the
arguments of all such functions must be linear in the symbol as well.
Examples
========
>>> from sympy.solvers.solveset import _is_function_class_equation
>>> from sympy import tan, sin, tanh, sinh, exp
>>> from sympy.abc import x
>>> from sympy.functions.elementary.trigonometric import (TrigonometricFunction,
... HyperbolicFunction)
>>> _is_function_class_equation(TrigonometricFunction, exp(x) + tan(x), x)
False
>>> _is_function_class_equation(TrigonometricFunction, tan(x) + sin(x), x)
True
>>> _is_function_class_equation(TrigonometricFunction, tan(x**2), x)
False
>>> _is_function_class_equation(TrigonometricFunction, tan(x + 2), x)
True
>>> _is_function_class_equation(HyperbolicFunction, tanh(x) + sinh(x), x)
True
"""
if f.is_Mul or f.is_Add:
return all(_is_function_class_equation(func_class, arg, symbol)
for arg in f.args)
if f.is_Pow:
if not f.exp.has(symbol):
return _is_function_class_equation(func_class, f.base, symbol)
else:
return False
if not f.has(symbol):
return True
if isinstance(f, func_class):
try:
g = Poly(f.args[0], symbol)
return g.degree() <= 1
except PolynomialError:
return False
else:
return False
def solveset_real(f, symbol):
""" Solves a real valued equation.
Parameters
==========
f : Expr
The target equation
symbol : Symbol
The variable for which the equation is solved
Returns
=======
Set
A set of values for `symbol` for which `f` is equal to
zero. An `EmptySet` is returned if no solution is found.
A `ConditionSet` is returned as unsolved object if algorithms
to evaluate complete solutions are not yet implemented.
`solveset_real` claims to be complete in the set of the solution it
returns.
Raises
======
NotImplementedError
Algorithms to solve inequalities in complex domain are
not yet implemented.
ValueError
The input is not valid.
RuntimeError
It is a bug, please report to the github issue tracker.
See Also
=======
solveset_complex : solver for complex domain
Examples
========
>>> from sympy import Symbol, exp, sin, sqrt, I
>>> from sympy.solvers.solveset import solveset_real
>>> x = Symbol('x', real=True)
>>> a = Symbol('a', real=True, finite=True, positive=True)
>>> solveset_real(x**2 - 1, x)
{-1, 1}
>>> solveset_real(sqrt(5*x + 6) - 2 - x, x)
{-1, 2}
>>> solveset_real(x - I, x)
EmptySet()
>>> solveset_real(x - a, x)
{a}
>>> solveset_real(exp(x) - a, x)
{log(a)}
* In case the equation has infinitely many solutions an infinitely indexed
`ImageSet` is returned.
>>> solveset_real(sin(x) - 1, x)
ImageSet(Lambda(_n, 2*_n*pi + pi/2), Integers())
* If the equation is true for any arbitrary value of the symbol a `S.Reals`
set is returned.
>>> solveset_real(x - x, x)
(-oo, oo)
"""
if not symbol.is_Symbol:
raise ValueError(" %s is not a symbol" % (symbol))
f = sympify(f)
if not isinstance(f, (Expr, Number)):
raise ValueError(" %s is not a valid sympy expression" % (f))
original_eq = f
f = together(f)
if f.has(Piecewise):
f = piecewise_fold(f)
result = EmptySet()
if f.expand().is_zero:
return S.Reals
elif not f.has(symbol):
return EmptySet()
elif f.is_Mul and all([_is_finite_with_finite_vars(m) for m in f.args]):
# if f(x) and g(x) are both finite we can say that the solution of
# f(x)*g(x) == 0 is same as Union(f(x) == 0, g(x) == 0) is not true in
# general. g(x) can grow to infinitely large for the values where
# f(x) == 0. To be sure that we are not silently allowing any
# wrong solutions we are using this technique only if both f and g are
# finite for a finite input.
result = Union(*[solveset_real(m, symbol) for m in f.args])
elif _is_function_class_equation(TrigonometricFunction, f, symbol) or \
_is_function_class_equation(HyperbolicFunction, f, symbol):
result = _solve_real_trig(f, symbol)
elif f.is_Piecewise:
result = EmptySet()
expr_set_pairs = f.as_expr_set_pairs()
for (expr, in_set) in expr_set_pairs:
solns = solveset_real(expr, symbol).intersect(in_set)
result = result + solns
else:
lhs, rhs_s = invert_real(f, 0, symbol)
if lhs == symbol:
result = rhs_s
elif isinstance(rhs_s, FiniteSet):
equations = [lhs - rhs for rhs in rhs_s]
for equation in equations:
if equation == f:
if any(_has_rational_power(g, symbol)[0]
for g in equation.args):
result += _solve_radical(equation,
symbol,
solveset_real)
elif equation.has(Abs):
result += _solve_abs(f, symbol)
else:
result += _solve_as_rational(equation, symbol,
solveset_solver=solveset_real,
as_poly_solver=_solve_as_poly_real)
else:
result += solveset_real(equation, symbol)
else:
result = ConditionSet(symbol, Eq(f, 0), S.Reals)
if isinstance(result, FiniteSet):
result = [s for s in result
if isinstance(s, RootOf)
or domain_check(original_eq, symbol, s)]
return FiniteSet(*result).intersect(S.Reals)
else:
return result.intersect(S.Reals)
def _solve_as_rational(f, symbol, solveset_solver, as_poly_solver):
""" solve rational functions"""
f = together(f, deep=True)
g, h = fraction(f)
if not h.has(symbol):
return as_poly_solver(g, symbol)
else:
valid_solns = solveset_solver(g, symbol)
invalid_solns = solveset_solver(h, symbol)
return valid_solns - invalid_solns
def _solve_real_trig(f, symbol):
""" Helper to solve trigonometric equations """
f = trigsimp(f)
f = f.rewrite(exp)
f = together(f)
g, h = fraction(f)
y = Dummy('y')
g, h = g.expand(), h.expand()
g, h = g.subs(exp(I*symbol), y), h.subs(exp(I*symbol), y)
if g.has(symbol) or h.has(symbol):
return ConditionSet(symbol, Eq(f, 0), S.Reals)
solns = solveset_complex(g, y) - solveset_complex(h, y)
if isinstance(solns, FiniteSet):
return Union(*[invert_complex(exp(I*symbol), s, symbol)[1]
for s in solns])
elif solns is S.EmptySet:
return S.EmptySet
else:
return ConditionSet(symbol, Eq(f, 0), S.Reals)
def _solve_as_poly(f, symbol, solveset_solver, invert_func):
"""
Solve the equation using polynomial techniques if it already is a
polynomial equation or, with a change of variables, can be made so.
"""
result = None
if f.is_polynomial(symbol):
solns = roots(f, symbol, cubics=True, quartics=True,
quintics=True, domain='EX')
num_roots = sum(solns.values())
if degree(f, symbol) <= num_roots:
result = FiniteSet(*solns.keys())
else:
poly = Poly(f, symbol)
solns = poly.all_roots()
if poly.degree() <= len(solns):
result = FiniteSet(*solns)
else:
result = ConditionSet(symbol, Eq(f, 0), S.Complexes)
else:
poly = Poly(f)
if poly is None:
result = ConditionSet(symbol, Eq(f, 0), S.Complexes)
gens = [g for g in poly.gens if g.has(symbol)]
if len(gens) == 1:
poly = Poly(poly, gens[0])
gen = poly.gen
deg = poly.degree()
poly = Poly(poly.as_expr(), poly.gen, composite=True)
poly_solns = FiniteSet(*roots(poly, cubics=True, quartics=True,
quintics=True).keys())
if len(poly_solns) < deg:
result = ConditionSet(symbol, Eq(f, 0), S.Complexes)
if gen != symbol:
y = Dummy('y')
lhs, rhs_s = invert_func(gen, y, symbol)
if lhs is symbol:
result = Union(*[rhs_s.subs(y, s) for s in poly_solns])
else:
result = ConditionSet(symbol, Eq(f, 0), S.Complexes)
else:
result = ConditionSet(symbol, Eq(f, 0), S.Complexes)
if result is not None:
if isinstance(result, FiniteSet):
# this is to simplify solutions like -sqrt(-I) to sqrt(2)/2
# - sqrt(2)*I/2. We are not expanding for solution with free
# variables because that makes the solution more complicated. For
# example expand_complex(a) returns re(a) + I*im(a)
if all([s.free_symbols == set() and not isinstance(s, RootOf)
for s in result]):
s = Dummy('s')
result = imageset(Lambda(s, expand_complex(s)), result)
return result
else:
return ConditionSet(symbol, Eq(f, 0), S.Complexes)
def _solve_as_poly_real(f, symbol):
"""
Solve real valued equation with methods to solve polynomial
equations.
"""
return _solve_as_poly(f, symbol,
solveset_solver=solveset_real,
invert_func=invert_real)
def _solve_as_poly_complex(f, symbol):
"""
Solve complex valued equation with methods to solve polynomial
equations.
"""
return _solve_as_poly(f, symbol,
solveset_solver=solveset_complex,
invert_func=invert_complex)
def _has_rational_power(expr, symbol):
"""
Returns (bool, den) where bool is True if the term has a
non-integer rational power and den is the denominator of the
expression's exponent.
Examples
========
>>> from sympy.solvers.solveset import _has_rational_power
>>> from sympy import sqrt
>>> from sympy.abc import x
>>> _has_rational_power(sqrt(x), x)
(True, 2)
>>> _has_rational_power(x**2, x)
(False, 1)
"""
a, p, q = Wild('a'), Wild('p'), Wild('q')
pattern_match = expr.match(a*p**q)
if pattern_match is None or pattern_match[a] is S.Zero:
return (False, S.One)
elif p not in pattern_match.keys() or a not in pattern_match.keys():
return (False, S.One)
elif isinstance(pattern_match[q], Rational) \
and pattern_match[p].has(symbol):
if not pattern_match[q].q == S.One:
return (True, pattern_match[q].q)
if not isinstance(pattern_match[a], Pow) \
or isinstance(pattern_match[a], Mul):
return (False, S.One)
else:
return _has_rational_power(pattern_match[a], symbol)
def _solve_radical(f, symbol, solveset_solver):
""" Helper function to solve equations with radicals """
from sympy.solvers.solvers import unrad
eq, cov = unrad(f)
if not cov:
result = solveset_solver(eq, symbol) - \
Union(*[solveset_solver(g, symbol) for g in denoms(f, [symbol])])
else:
y, yeq = cov
if not solveset_solver(y - I, y):
yreal = Dummy('yreal', real=True)
yeq = yeq.xreplace({y: yreal})
eq = eq.xreplace({y: yreal})
y = yreal
g_y_s = solveset_solver(yeq, symbol)
f_y_sols = solveset_solver(eq, y)
result = Union(*[imageset(Lambda(y, g_y), f_y_sols)
for g_y in g_y_s])
return FiniteSet(*[s for s in result if checksol(f, symbol, s) is True])
def _solve_abs(f, symbol):
""" Helper function to solve equation involving absolute value function """
from sympy.solvers.inequalities import solve_univariate_inequality
assert f.has(Abs)
p, q, r = Wild('p'), Wild('q'), Wild('r')
pattern_match = f.match(p*Abs(q) + r)
if not pattern_match[p].is_zero:
f_p, f_q, f_r = pattern_match[p], pattern_match[q], pattern_match[r]
q_pos_cond = solve_univariate_inequality(f_q >= 0, symbol,
relational=False)
q_neg_cond = solve_univariate_inequality(f_q < 0, symbol,
relational=False)
sols_q_pos = solveset_real(f_p*f_q + f_r,
symbol).intersect(q_pos_cond)
sols_q_neg = solveset_real(f_p*(-f_q) + f_r,
symbol).intersect(q_neg_cond)
return Union(sols_q_pos, sols_q_neg)
else:
return ConditionSet(symbol, Eq(f, 0), S.Complexes)
def solveset_complex(f, symbol):
""" Solve a complex valued equation.
Parameters
==========
f : Expr
The target equation
symbol : Symbol
The variable for which the equation is solved
Returns
=======
Set
A set of values for `symbol` for which `f` equal to
zero. An `EmptySet` is returned if no solution is found.
A `ConditionSet` is returned as an unsolved object if algorithms
to evaluate complete solutions are not yet implemented.
`solveset_complex` claims to be complete in the solution set that
it returns.
Raises
======
NotImplementedError
The algorithms to solve inequalities in complex domain are
not yet implemented.
ValueError
The input is not valid.
RuntimeError
It is a bug, please report to the github issue tracker.
See Also
========
solveset_real: solver for real domain
Examples
========
>>> from sympy import Symbol, exp
>>> from sympy.solvers.solveset import solveset_complex
>>> from sympy.abc import x, a, b, c
>>> solveset_complex(a*x**2 + b*x +c, x)
{-b/(2*a) - sqrt(-4*a*c + b**2)/(2*a), -b/(2*a) + sqrt(-4*a*c + b**2)/(2*a)}
* Due to the fact that complex extension of my real valued functions are
multivariate even some simple equations can have infinitely many
solution.
>>> solveset_complex(exp(x) - 1, x)
ImageSet(Lambda(_n, 2*_n*I*pi), Integers())
"""
if not symbol.is_Symbol:
raise ValueError(" %s is not a symbol" % (symbol))
f = sympify(f)
original_eq = f
if not isinstance(f, (Expr, Number)):
raise ValueError(" %s is not a valid sympy expression" % (f))
f = together(f)
# Without this equations like a + 4*x**2 - E keep oscillating
# into form a/4 + x**2 - E/4 and (a + 4*x**2 - E)/4
if not fraction(f)[1].has(symbol):
f = expand(f)
if f.is_zero:
return S.Complexes
elif not f.has(symbol):
result = EmptySet()
elif f.is_Mul and all([_is_finite_with_finite_vars(m) for m in f.args]):
result = Union(*[solveset_complex(m, symbol) for m in f.args])
else:
lhs, rhs_s = invert_complex(f, 0, symbol)
if lhs == symbol:
result = rhs_s
elif isinstance(rhs_s, FiniteSet):
equations = [lhs - rhs for rhs in rhs_s]
result = EmptySet()
for equation in equations:
if equation == f:
if any(_has_rational_power(g, symbol)[0]
for g in equation.args):
result += _solve_radical(equation,
symbol,
solveset_complex)
else:
result += _solve_as_rational(equation, symbol,
solveset_solver=solveset_complex,
as_poly_solver=_solve_as_poly_complex)
else:
result += solveset_complex(equation, symbol)
else:
result = ConditionSet(symbol, Eq(f, 0), S.Complexes)
if isinstance(result, FiniteSet):
result = [s for s in result
if isinstance(s, RootOf)
or domain_check(original_eq, symbol, s)]
return FiniteSet(*result)
else:
return result
def solveset(f, symbol=None, domain=S.Complexes):
"""Solves a given inequality or equation with set as output
Parameters
==========
f : Expr or a relational.
The target equation or inequality
symbol : Symbol
The variable for which the equation is solved
domain : Set
The domain over which the equation is solved
Returns
=======
Set
A set of values for `symbol` for which `f` is True or is equal to
zero. An `EmptySet` is returned if no solution is found.
A `ConditionSet` is returned as unsolved object if algorithms
to evaluatee complete solution are not yet implemented.
`solveset` claims to be complete in the solution set that it returns.
Raises
======
NotImplementedError
The algorithms to solve inequalities in complex domain are
not yet implemented.
ValueError
The input is not valid.
RuntimeError
It is a bug, please report to the github issue tracker.
`solveset` uses two underlying functions `solveset_real` and
`solveset_complex` to solve equations. They are the solvers for real and
complex domain respectively. `solveset` ignores the assumptions on the
variable being solved for and instead, uses the `domain` parameter to
decide which solver to use.
See Also
========
solveset_real: solver for real domain
solveset_complex: solver for complex domain
Examples
========
>>> from sympy import exp, Symbol, Eq, pprint, S
>>> from sympy.solvers.solveset import solveset
>>> from sympy.abc import x
* The default domain is complex. Not specifying a domain will lead to the
solving of the equation in the complex domain.
>>> pprint(solveset(exp(x) - 1, x), use_unicode=False)
{2*n*I*pi | n in Integers()}
* If you want to solve equation in real domain by the `solveset`
interface, then specify that the domain is real. Alternatively use
`solveset\_real`.
>>> x = Symbol('x')
>>> solveset(exp(x) - 1, x, S.Reals)
{0}
>>> solveset(Eq(exp(x), 1), x, S.Reals)
{0}
* Inequalities can be solved over the real domain only. Use of a complex
domain leads to a NotImplementedError.
>>> solveset(exp(x) > 1, x, S.Reals)
(0, oo)
"""
from sympy.solvers.inequalities import solve_univariate_inequality
if symbol is None:
free_symbols = f.free_symbols
if len(free_symbols) == 1:
symbol = free_symbols.pop()
else:
raise ValueError(filldedent('''
The independent variable must be specified for a
multivariate equation.'''))
elif not symbol.is_Symbol:
raise ValueError('A Symbol must be given, not type %s: %s' % (type(symbol), symbol))
f = sympify(f)
if f is S.false:
return EmptySet()
if f is S.true:
return domain
if isinstance(f, Eq):
from sympy.core import Add
f = Add(f.lhs, - f.rhs, evaluate=False)
if f.is_Relational:
if not domain.is_subset(S.Reals):
raise NotImplementedError("Inequalities in the complex domain are "
"not supported. Try the real domain by"
"setting domain=S.Reals")
try:
result = solve_univariate_inequality(
f, symbol, relational=False).intersection(domain)
except NotImplementedError:
result = ConditionSet(symbol, f, domain)
return result
if isinstance(f, (Expr, Number)):
if domain is S.Reals:
return solveset_real(f, symbol)
elif domain is S.Complexes:
return solveset_complex(f, symbol)
elif domain.is_subset(S.Reals):
return Intersection(solveset_real(f, symbol), domain)
else:
return Intersection(solveset_complex(f, symbol), domain)
###############################################################################
################################ LINSOLVE #####################################
###############################################################################
def linear_eq_to_matrix(equations, *symbols):
r"""
Converts a given System of Equations into Matrix form.
Here `equations` must be a linear system of equations in
`symbols`. The order of symbols in input `symbols` will
determine the order of coefficients in the returned
Matrix.
The Matrix form corresponds to the augmented matrix form.
For example:
.. math:: 4x + 2y + 3z = 1
.. math:: 3x + y + z = -6
.. math:: 2x + 4y + 9z = 2
This system would return `A` & `b` as given below:
::
[ 4 2 3 ] [ 1 ]
A = [ 3 1 1 ] b = [-6 ]
[ 2 4 9 ] [ 2 ]
Examples
========
>>> from sympy.solvers.solveset import linear_eq_to_matrix
>>> from sympy import symbols
>>> x, y, z = symbols('x, y, z')
>>> eqns = [x + 2*y + 3*z - 1, 3*x + y + z + 6, 2*x + 4*y + 9*z - 2]
>>> A, b = linear_eq_to_matrix(eqns, [x, y, z])
>>> A
Matrix([
[1, 2, 3],
[3, 1, 1],
[2, 4, 9]])
>>> b
Matrix([
[ 1],
[-6],
[ 2]])
>>> eqns = [x + z - 1, y + z, x - y]
>>> A, b = linear_eq_to_matrix(eqns, [x, y, z])
>>> A
Matrix([
[1, 0, 1],
[0, 1, 1],
[1, -1, 0]])
>>> b
Matrix([
[1],
[0],
[0]])
* Symbolic coefficients are also supported
>>> a, b, c, d, e, f = symbols('a, b, c, d, e, f')
>>> eqns = [a*x + b*y - c, d*x + e*y - f]
>>> A, B = linear_eq_to_matrix(eqns, x, y)
>>> A
Matrix([
[a, b],
[d, e]])
>>> B
Matrix([
[c],
[f]])
"""
if not symbols:
raise ValueError('Symbols must be given, for which coefficients \
are to be found.')
if hasattr(symbols[0], '__iter__'):
symbols = symbols[0]
M = Matrix([symbols])
# initialise Matrix with symbols + 1 columns
M = M.col_insert(len(symbols), Matrix([1]))
row_no = 1
for equation in equations:
f = sympify(equation)
if isinstance(f, Equality):
f = f.lhs - f.rhs
# Extract coeff of symbols
coeff_list = []
for symbol in symbols:
coeff_list.append(f.coeff(symbol))
# append constant term (term free from symbols)
coeff_list.append(-f.as_coeff_add(*symbols)[0])
# insert equations coeff's into rows
M = M.row_insert(row_no, Matrix([coeff_list]))
row_no += 1
# delete the initialised (Ist) trivial row
M.row_del(0)
A, b = M[:, :-1], M[:, -1:]
return A, b
def linsolve(system, *symbols):
r"""
Solve system of N linear equations with M variables, which
means both under - and overdetermined systems are supported.
The possible number of solutions is zero, one or infinite.
Zero solutions throws a ValueError, where as infinite
solutions are represented parametrically in terms of given
symbols. For unique solution a FiniteSet of ordered tuple
is returned.
All Standard input formats are supported:
For the given set of Equations, the respective input types
are given below:
.. math:: 3x + 2y - z = 1
.. math:: 2x - 2y + 4z = -2
.. math:: 2x - y + 2z = 0
* Augmented Matrix Form, `system` given below:
::
[3 2 -1 1]
system = [2 -2 4 -2]
[2 -1 2 0]
* List Of Equations Form
`system = [3x + 2y - z - 1, 2x - 2y + 4z + 2, 2x - y + 2z]`
* Input A & b Matrix Form (from Ax = b) are given as below:
::
[3 2 -1 ] [ 1 ]
A = [2 -2 4 ] b = [ -2 ]
[2 -1 2 ] [ 0 ]
`system = (A, b)`
Symbols to solve for should be given as input in all the
cases either in an iterable or as comma separated arguments.
This is done to maintain consistency in returning solutions
in the form of variable input by the user.
The algorithm used here is Gauss-Jordan elimination, which
results, after elimination, in an row echelon form matrix.
Returns
=======
A FiniteSet of ordered tuple of values of `symbols` for which
the `system` has solution.
Please note that general FiniteSet is unordered, the solution
returned here is not simply a FiniteSet of solutions, rather
it is a FiniteSet of ordered tuple, i.e. the first & only
argument to FiniteSet is a tuple of solutions, which is ordered,
& hence the returned solution is ordered.
Also note that solution could also have been returned as an
ordered tuple, FiniteSet is just a wrapper `{}` around
the tuple. It has no other significance except for
the fact it is just used to maintain a consistent output
format throughout the solveset.
Returns EmptySet(), if the linear system is inconsistent.
Raises
======
ValueError
The input is not valid.
The symbols are not given.
Examples
========
>>> from sympy.solvers.solveset import linsolve
>>> from sympy import Matrix, S
>>> from sympy import symbols
>>> x, y, z = symbols("x, y, z")
>>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 10]])
>>> b = Matrix([3, 6, 9])
>>> A
Matrix([
[1, 2, 3],
[4, 5, 6],
[7, 8, 10]])
>>> b
Matrix([
[3],
[6],
[9]])
>>> linsolve((A, b), [x, y, z])
{(-1, 2, 0)}
* Parametric Solution: In case the system is under determined, the function
will return parametric solution in terms of the given symbols.
Free symbols in the system are returned as it is. For e.g. in the system
below, `z` is returned as the solution for variable z, which means z is a
free symbol, i.e. it can take arbitrary values.
>>> A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> b = Matrix([3, 6, 9])
>>> linsolve((A, b), [x, y, z])
{(z - 1, -2*z + 2, z)}
* List of Equations as input
>>> Eqns = [3*x + 2*y - z - 1, 2*x - 2*y + 4*z + 2, - x + S(1)/2*y - z]
>>> linsolve(Eqns, x, y, z)
{(1, -2, -2)}
* Augmented Matrix as input
>>> aug = Matrix([[2, 1, 3, 1], [2, 6, 8, 3], [6, 8, 18, 5]])
>>> aug
Matrix([
[2, 1, 3, 1],
[2, 6, 8, 3],
[6, 8, 18, 5]])
>>> linsolve(aug, x, y, z)
{(3/10, 2/5, 0)}
* Solve for symbolic coefficients
>>> a, b, c, d, e, f = symbols('a, b, c, d, e, f')
>>> eqns = [a*x + b*y - c, d*x + e*y - f]
>>> linsolve(eqns, x, y)
{(-b*(f - c*d/a)/(a*(e - b*d/a)) + c/a, (f - c*d/a)/(e - b*d/a))}
* A degenerate system returns solution as set of given
symbols.
>>> system = Matrix(([0,0,0], [0,0,0], [0,0,0]))
>>> linsolve(system, x, y)
{(x, y)}
"""
if not symbols:
raise ValueError('Symbols must be given, for which solution of the '
'system is to be found.')
if hasattr(symbols[0], '__iter__'):
symbols = symbols[0]
try:
sym = symbols[0].is_Symbol
except AttributeError:
sym = False
if not sym:
raise ValueError('Symbols or iterable of symbols must be given as '
'second argument, not type %s: %s' % (type(symbols[0]), symbols[0]))
# 1). Augmented Matrix input Form
if isinstance(system, Matrix):
A, b = system[:, :-1], system[:, -1:]
elif hasattr(system, '__iter__'):
# 2). A & b as input Form
if len(system) == 2 and system[0].is_Matrix:
A, b = system[0], system[1]
# 3). List of equations Form
if not system[0].is_Matrix:
A, b = linear_eq_to_matrix(system, symbols)
else:
raise ValueError("Invalid arguments")
# Solve using Gauss-Jordan elimination
try:
sol, params, free_syms = A.gauss_jordan_solve(b, freevar=True)
except ValueError:
# No solution
return EmptySet()
# Replace free parameters with free symbols
solution = []
if params:
for s in sol:
for k, v in enumerate(params):
s = s.subs(v, symbols[free_syms[k]])
solution.append(s)
else:
for s in sol:
solution.append(s)
# Return solutions
solution = FiniteSet(tuple(solution))
return solution
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Cluster Resolvers for Cloud TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import re
from six.moves import urllib
from six.moves.urllib.error import URLError
from six.moves.urllib.request import Request
from six.moves.urllib.request import urlopen
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import format_master_url
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import get_accelerator_devices
from tensorflow.python.framework import errors
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
_GOOGLE_API_CLIENT_INSTALLED = True
try:
from googleapiclient import discovery # pylint: disable=g-import-not-at-top
from oauth2client.client import GoogleCredentials # pylint: disable=g-import-not-at-top
except ImportError:
_GOOGLE_API_CLIENT_INSTALLED = False
_GKE_ENV_VARIABLE = 'KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS'
_ENDPOINTS_SEPARATOR = ','
_DEFAULT_ENV_VARIABLE = 'TPU_NAME'
_DISCOVERY_SERVICE_URL_ENV_VARIABLE = 'TPU_API_DISCOVERY_URL'
_TPU_DEVICE_REGEX = re.compile(
r'.*task:(?P<host_id>\d+)/.*device:TPU:(?P<core_id>\d+)$')
_TPU_CONN_RETRIES = 120
DeviceDetails = collections.namedtuple(
'DeviceDetails', ['device_map', 'total_cores'])
@tf_export('distribute.cluster_resolver.TPUClusterResolver')
class TPUClusterResolver(ClusterResolver):
"""Cluster Resolver for Google Cloud TPUs.
This is an implementation of cluster resolvers for the Google Cloud TPU
service. As Cloud TPUs are in alpha, you will need to specify a API definition
file for this to consume, in addition to a list of Cloud TPUs in your Google
Cloud Platform project.
"""
def _tpuService(self):
"""Creates a new Cloud TPU API object.
This works around an issue where the underlying HTTP connection sometimes
times out when the script has been running for too long. Other methods in
this object calls this method to get a new API object whenever they need
to communicate with the Cloud API.
Returns:
A Google Cloud TPU API object.
"""
if self._service:
return self._service
credentials = self._credentials
if credentials is None or credentials == 'default':
credentials = GoogleCredentials.get_application_default()
if self._discovery_url:
return discovery.build(
'tpu', 'v1alpha1',
credentials=credentials,
discoveryServiceUrl=self._discovery_url)
else:
return discovery.build(
'tpu', 'v1alpha1',
credentials=credentials)
def _requestComputeMetadata(self, path):
req = Request('http://metadata/computeMetadata/v1/%s' % path,
headers={'Metadata-Flavor': 'Google'})
resp = urlopen(req)
return compat.as_bytes(resp.read())
def _shouldResolve(self):
if isinstance(self._should_resolve_override, bool):
return self._should_resolve_override
if (self._tpu == compat.as_bytes('') or
self._tpu == compat.as_bytes('local') or
self._tpu.startswith(compat.as_bytes('/bns')) or
self._tpu.startswith(compat.as_bytes('localhost:')) or
self._tpu.startswith(compat.as_bytes('grpc://')) or
self._tpu.startswith(compat.as_bytes('uptc://'))):
return False
return True
@staticmethod
def _get_device_dict_and_cores(devices):
"""Returns a dict of hosts to cores and total cores given devices names.
Returns a namedtuple with two attributes:
device_map: A map of host_ids to a list of core_ids.
total_cores: The total number of cores within the TPU system.
Args:
devices: A list of devices returned by session.list_devices()
"""
device_map = collections.defaultdict(list)
num_cores = 0
for device in devices:
match = _TPU_DEVICE_REGEX.match(device.name)
if match:
host_id = match.group('host_id')
core_id = match.group('core_id')
device_map[host_id].append(core_id)
num_cores += 1
return DeviceDetails(device_map, num_cores)
@staticmethod
def _verify_and_return_same_core_count(device_dict):
"""Verifies that every device in device_dict has the same # of cores."""
num_cores_per_host_set = (
{len(core_ids) for core_ids in device_dict.values()})
if len(num_cores_per_host_set) != 1:
raise RuntimeError('TPU cores on each device is not the same. This '
'should never happen. Devices: {}'.format(device_dict))
return num_cores_per_host_set.pop()
@staticmethod
def _inGke():
"""When running in GKE, the environment variable will be set."""
return _GKE_ENV_VARIABLE in os.environ
@staticmethod
def _gkeEndpoints():
return os.environ[_GKE_ENV_VARIABLE]
@staticmethod
def _envVarFallback():
if _DEFAULT_ENV_VARIABLE in os.environ:
return os.environ[_DEFAULT_ENV_VARIABLE]
return None
@staticmethod
def _environmentDiscoveryUrl():
return os.environ.get(_DISCOVERY_SERVICE_URL_ENV_VARIABLE)
@staticmethod
def _isRunningInGCE():
"""Checks for GCE presence by attempting to query the metadata service."""
try:
req = Request('http://metadata.google.internal/computeMetadata/v1',
headers={'Metadata-Flavor': 'Google'})
resp = urllib.request.urlopen(req, timeout=1)
info = resp.info()
if 'Metadata-Flavor' in info and info['Metadata-Flavor'] == 'Google':
return True
except URLError:
pass
return False
def __init__(self,
tpu=None,
zone=None,
project=None,
job_name='worker',
coordinator_name=None,
coordinator_address=None,
credentials='default',
service=None,
discovery_url=None):
"""Creates a new TPUClusterResolver object.
The ClusterResolver will then use the parameters to query the Cloud TPU APIs
for the IP addresses and ports of each Cloud TPU listed.
Args:
tpu: A string corresponding to the TPU to use. If the string is the empty
string, the string 'local', or a string that begins with 'grpc://' or
'/bns', then it is assumed to not correspond with a Cloud TPU and will
instead be passed as the session master and no ClusterSpec propagation
will be done. In the future, this may also support a list of strings
when multiple Cloud TPUs are used.
zone: Zone where the TPUs are located. If omitted or empty, we will assume
that the zone of the TPU is the same as the zone of the GCE VM, which we
will try to discover from the GCE metadata service.
project: Name of the GCP project containing Cloud TPUs. If omitted or
empty, we will try to discover the project name of the GCE VM from the
GCE metadata service.
job_name: Name of the TensorFlow job the TPUs belong to.
coordinator_name: The name to use for the coordinator. Set to None if the
coordinator should not be included in the computed ClusterSpec.
coordinator_address: The address of the coordinator (typically an ip:port
pair). If set to None, a TF server will be started. If coordinator_name
is None, a TF server will not be started even if coordinator_address is
None.
credentials: GCE Credentials. If None, then we use default credentials
from the oauth2client
service: The GCE API object returned by the googleapiclient.discovery
function. If you specify a custom service object, then the credentials
parameter will be ignored.
discovery_url: A URL template that points to the location of
the discovery service. It should have two parameters {api} and
{apiVersion} that when filled in produce an absolute URL to the
discovery document for that service. The environment variable
'TPU_API_DISCOVERY_URL' will override this.
Raises:
ImportError: If the googleapiclient is not installed.
ValueError: If no TPUs are specified.
RuntimeError: If an empty TPU name is specified and this is running in a
Google Cloud environment.
"""
if isinstance(tpu, list):
if not tpu:
raise ValueError('At least one TPU must be specified.')
if len(tpu) != 1:
raise NotImplementedError(
'Using multiple TPUs in a single session is not yet implemented')
tpu = tpu[0]
in_gke = self._inGke()
# When using GKE with Cloud TPUs, the env variable will be set.
if tpu is None:
if in_gke:
tpu = self._gkeEndpoints()
else:
tpu = self._envVarFallback()
if tpu is None:
raise ValueError('Please provide a TPU Name to connect to.')
self._tpu = compat.as_bytes(tpu) # self._tpu is always bytes
# If we are running in Cloud and don't specify a TPU name
if self._isRunningInGCE() and not self._tpu:
raise RuntimeError('You need to specify a TPU Name if you are running in '
'the Google Cloud environment.')
# By default the task_type is 'worker` and the task_id is 0 (which is the
# first worker in the task).
self.task_type = job_name
self.task_id = 0
if tpu.startswith('grpc://'):
# Cloud environment, where we are using GRPC to communicate to TPUs.
self._environment = ''
elif tpu == 'local' or not tpu:
# Google environment, where the TPU is attached to the host.
self._environment = 'google'
elif tpu.startswith('/bns') or tpu.startswith('uptc://'):
# Google environment, where we reach the TPU through BNS.
self._environment = 'google'
# If TPU is in the Google environment or exists locally, we don't use any
# RPC layer.
if tpu.startswith('/bns') or tpu.startswith(
'uptc://') or tpu == 'local' or not tpu:
self.rpc_layer = None
else:
self.rpc_layer = 'grpc'
# Setting this overrides the return value of self._shouldResolve()
self._should_resolve_override = None
# We strip out the protocol if it is included, and override the
# shouldResolve function to never resolve. We are adding the protocol back
# in later in self.master().
if self.rpc_layer is not None and tpu.startswith(self.rpc_layer + '://'):
tpu = tpu[len(self.rpc_layer + '://'):]
self._tpu = compat.as_bytes(tpu) # self._tpu is always bytes
self._should_resolve_override = False
# Whether we should actually attempt to contact Cloud APIs
should_resolve = self._shouldResolve()
# We error out if we are in a non-Cloud environment which cannot talk to the
# Cloud APIs using the standard class and a special object is not passed in.
self._service = service
if (self._service is None and should_resolve and
not _GOOGLE_API_CLIENT_INSTALLED):
raise ImportError('googleapiclient and oauth2client must be installed '
'before using the TPU cluster resolver. Execute: '
'`pip install --upgrade google-api-python-client` '
'and `pip install --upgrade oauth2client` to '
'install with pip.')
# We save user-passed credentials, unless the user didn't pass in anything.
self._credentials = credentials
if (credentials == 'default' and should_resolve and
_GOOGLE_API_CLIENT_INSTALLED):
self._credentials = None
# Automatically detect project and zone if unspecified.
if not project and should_resolve:
project = compat.as_str(
self._requestComputeMetadata('project/project-id'))
if not zone and should_resolve:
zone_path = compat.as_str(self._requestComputeMetadata('instance/zone'))
zone = zone_path.split('/')[-1]
self._project = project
self._zone = zone
self._discovery_url = self._environmentDiscoveryUrl() or discovery_url
self._coordinator_name = coordinator_name
if (coordinator_name and not coordinator_address and
(should_resolve or in_gke)):
self._start_local_server()
else:
self._coordinator_address = coordinator_address
def master(self, task_type=None, task_id=None, rpc_layer=None):
"""Get the Master string to be used for the session.
In the normal case, this returns the grpc path (grpc://1.2.3.4:8470) of
first instance in the ClusterSpec returned by the cluster_spec function.
If a non-TPU name is used when constructing a TPUClusterResolver, that will
be returned instead (e.g. If the tpus argument's value when constructing
this TPUClusterResolver was 'grpc://10.240.1.2:8470',
'grpc://10.240.1.2:8470' will be returned).
Args:
task_type: (Optional, string) The type of the TensorFlow task of the
master.
task_id: (Optional, integer) The index of the TensorFlow task of the
master.
rpc_layer: (Optional, string) The RPC protocol TensorFlow should use to
communicate with TPUs.
Returns:
string, the connection string to use when creating a session.
Raises:
ValueError: If none of the TPUs specified exists.
"""
if self._shouldResolve():
# We are going to communicate with the Cloud TPU APIs to get a Cluster.
cluster_spec = self.cluster_spec()
if task_type is not None and task_id is not None:
# task_type and task_id is from the function parameter
master = cluster_spec.task_address(task_type, task_id)
elif self.task_type is not None and self.task_id is not None:
# task_type and task_id is from the object
master = cluster_spec.task_address(self.task_type, self.task_id)
else:
# by default we take the first item in the cluster with the right name
job_tasks = cluster_spec.job_tasks(self.task_type)
if not job_tasks:
raise ValueError('No TPUs with the specified names exist.')
master = job_tasks[0]
else:
if isinstance(self._tpu, (bytes, bytearray)):
master = compat.as_text(self._tpu).split(_ENDPOINTS_SEPARATOR)[0]
else:
master = self._tpu.split(_ENDPOINTS_SEPARATOR)[0]
return format_master_url(master, rpc_layer or self.rpc_layer)
def get_master(self):
return self.master()
def get_job_name(self):
if (self._shouldResolve() or
self._isRunningInGCE()):
return self.task_type
def cluster_spec(self):
"""Returns a ClusterSpec object based on the latest TPU information.
We retrieve the information from the GCE APIs every time this method is
called.
Returns:
A ClusterSpec containing host information returned from Cloud TPUs.
Raises:
RuntimeError: If the provided TPU is not healthy.
"""
############################################################################
# There are 5 potential cases this code must handle:
# 1. [Normal case.] We should resolve the TPU name to a set of tasks, and
# a. Create a ClusterSpec that includes the coordinator job
# b. Create a ClusterSpec without the coordinator job.
# 2. [GKE / No API Access.] We should not resolve the TPU name to a set of
# tasks and
# a. Create a ClusterSpec with the coordinator
# b. Create a ClusterSpec without the coordinator
# 3. [Other (legacy non-gRPC).] We should return an empty ClusterSpec.
############################################################################
if self._shouldResolve():
# Case 1.
full_name = 'projects/%s/locations/%s/nodes/%s' % (
self._project, self._zone, compat.as_text(self._tpu))
service = self._tpuService()
request = service.projects().locations().nodes().get(name=full_name)
response = request.execute()
if 'state' in response and response['state'] != 'READY':
raise RuntimeError('TPU "%s" is not yet ready; state: "%s"' %
(compat.as_text(self._tpu), response['state']))
if 'networkEndpoints' in response:
worker_list = [
'%s:%s' % (endpoint['ipAddress'], endpoint['port'])
for endpoint in response['networkEndpoints']
]
else:
# Fall back to the deprecated response format
instance_url = '%s:%s' % (response['ipAddress'], response['port'])
worker_list = [instance_url]
cluster_spec = {self.task_type: worker_list}
else:
if self.rpc_layer is None:
# Case 3.
return None
# Case 2.
tpus = []
for tpu in compat.as_text(self._tpu).split(_ENDPOINTS_SEPARATOR):
# We are working around the fact that GKE environment variable that is
# supplied to us has the protocol string embedded in it, but we want
# to strip it out for the ClusterSpec.
if (self.rpc_layer is not None and
tpu.startswith(self.rpc_layer + '://')):
tpus.append(tpu[len(self.rpc_layer + '://'):])
else:
tpus.append(tpu)
cluster_spec = {self.task_type: tpus}
if self._coordinator_address:
# {1, 2}.a
cluster_spec[self._coordinator_name] = [self._coordinator_address]
return server_lib.ClusterSpec(cluster_spec)
def num_accelerators(self,
task_type=None,
task_id=None,
config_proto=None):
"""Returns the number of TPU cores per worker.
Connects to the master and list all the devices present in the master,
and counts them up. Also verifies that the device counts per host in the
cluster is the same before returning the number of TPU cores per host.
Args:
task_type: Unused.
task_id: Unused.
config_proto: Used to create a connection to a TPU master in order to
retrieve the system metadata.
Raises:
RuntimeError: If we cannot talk to a TPU worker after retrying or if the
number of TPU devices per host is different.
"""
retry_count = 1
# TODO(b/120564445): Replace with standard library for retries.
while True:
try:
device_details = TPUClusterResolver._get_device_dict_and_cores(
get_accelerator_devices(self.master(), config_proto=config_proto))
break
except errors.DeadlineExceededError:
error_message = ('Failed to connect to master. The TPU might not be '
'ready (e.g. still scheduling) or the master '
'address is incorrect: got (%s)' % self.master())
if retry_count <= _TPU_CONN_RETRIES:
logging.warning(error_message)
logging.warning('Retrying (%d/%d)...', retry_count, _TPU_CONN_RETRIES)
retry_count += 1
else:
raise RuntimeError(error_message)
if device_details.total_cores:
return {'TPU': TPUClusterResolver._verify_and_return_same_core_count(
device_details.device_map)}
return {'TPU': 0}
@property
def environment(self):
"""Returns the current environment which TensorFlow is running in."""
return self._environment
def _start_local_server(self):
address = compat.as_text(self._requestComputeMetadata(
'instance/network-interfaces/0/ip'))
self._server = server_lib.Server(
{
'local': ['0.0.0.0:0']
}, protocol='grpc', config=None, start=True)
# self._server.target is of the form: grpc://ipaddress:port
target = compat.as_bytes(self._server.target)
splits = target.split(compat.as_bytes(':'))
assert len(splits) == 3, self._server.target
assert splits[0] == compat.as_bytes('grpc'), self._server.target
self._coordinator_port = compat.as_text(splits[2])
self._coordinator_address = '%s:%s' % (
address, compat.as_text(self._coordinator_port))
def __deepcopy__(self, memo):
# TODO(b/73668574): Remove this once RunConfig avoids performing deepcopy.
return self
|
|
import asyncio
import os
import importlib.util
import importlib.machinery
import sys
import types
import typing
import traceback
from mitmproxy import addonmanager
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy import command
from mitmproxy import eventsequence
from mitmproxy import ctx
import mitmproxy.types as mtypes
def load_script(path: str) -> types.ModuleType:
fullname = "__mitmproxy_script__.{}".format(
os.path.splitext(os.path.basename(path))[0]
)
# the fullname is not unique among scripts, so if there already is an existing script with said
# fullname, remove it.
sys.modules.pop(fullname, None)
oldpath = sys.path
sys.path.insert(0, os.path.dirname(path))
m = None
try:
loader = importlib.machinery.SourceFileLoader(fullname, path)
spec = importlib.util.spec_from_loader(fullname, loader=loader)
m = importlib.util.module_from_spec(spec)
loader.exec_module(m)
if not getattr(m, "name", None):
m.name = path # type: ignore
except Exception as e:
script_error_handler(path, e, msg=str(e))
finally:
sys.path[:] = oldpath
return m
def script_error_handler(path, exc, msg="", tb=False):
"""
Handles all the user's script errors with
an optional traceback
"""
exception = type(exc).__name__
if msg:
exception = msg
lineno = ""
if hasattr(exc, "lineno"):
lineno = str(exc.lineno)
log_msg = "in script {}:{} {}".format(path, lineno, exception)
if tb:
etype, value, tback = sys.exc_info()
tback = addonmanager.cut_traceback(tback, "invoke_addon")
log_msg = log_msg + "\n" + "".join(traceback.format_exception(etype, value, tback))
ctx.log.error(log_msg)
ReloadInterval = 1
class Script:
"""
An addon that manages a single script.
"""
def __init__(self, path: str, reload: bool) -> None:
self.name = "scriptmanager:" + path
self.path = path
self.fullpath = os.path.expanduser(
path.strip("'\" ")
)
self.ns = None
if not os.path.isfile(self.fullpath):
raise exceptions.OptionsError('No such script')
self.reloadtask = None
if reload:
self.reloadtask = asyncio.ensure_future(self.watcher())
else:
self.loadscript()
def done(self):
if self.reloadtask:
self.reloadtask.cancel()
@property
def addons(self):
return [self.ns] if self.ns else []
def loadscript(self):
ctx.log.info("Loading script %s" % self.path)
if self.ns:
ctx.master.addons.remove(self.ns)
self.ns = None
with addonmanager.safecall():
ns = load_script(self.fullpath)
ctx.master.addons.register(ns)
self.ns = ns
if self.ns:
# We're already running, so we have to explicitly register and
# configure the addon
ctx.master.addons.invoke_addon(self.ns, "running")
ctx.master.addons.invoke_addon(
self.ns,
"configure",
ctx.options.keys()
)
async def watcher(self):
last_mtime = 0
while True:
try:
mtime = os.stat(self.fullpath).st_mtime
except FileNotFoundError:
ctx.log.info("Removing script %s" % self.path)
scripts = list(ctx.options.scripts)
scripts.remove(self.path)
ctx.options.update(scripts=scripts)
return
if mtime > last_mtime:
self.loadscript()
last_mtime = mtime
await asyncio.sleep(ReloadInterval)
class ScriptLoader:
"""
An addon that manages loading scripts from options.
"""
def __init__(self):
self.is_running = False
self.addons = []
def load(self, loader):
loader.add_option(
"scripts", typing.Sequence[str], [],
"Execute a script."
)
def running(self):
self.is_running = True
@command.command("script.run")
def script_run(self, flows: typing.Sequence[flow.Flow], path: mtypes.Path) -> None:
"""
Run a script on the specified flows. The script is configured with
the current options and all lifecycle events for each flow are
simulated. Note that the load event is not invoked.
"""
if not os.path.isfile(path):
ctx.log.error('No such script: %s' % path)
return
mod = load_script(path)
if mod:
with addonmanager.safecall():
ctx.master.addons.invoke_addon(mod, "running")
ctx.master.addons.invoke_addon(
mod,
"configure",
ctx.options.keys()
)
for f in flows:
for evt, arg in eventsequence.iterate(f):
ctx.master.addons.invoke_addon(mod, evt, arg)
def configure(self, updated):
if "scripts" in updated:
for s in ctx.options.scripts:
if ctx.options.scripts.count(s) > 1:
raise exceptions.OptionsError("Duplicate script")
for a in self.addons[:]:
if a.path not in ctx.options.scripts:
ctx.log.info("Un-loading script: %s" % a.path)
ctx.master.addons.remove(a)
self.addons.remove(a)
# The machinations below are to ensure that:
# - Scripts remain in the same order
# - Scripts are not initialized un-necessarily. If only a
# script's order in the script list has changed, it is just
# moved.
current = {}
for a in self.addons:
current[a.path] = a
ordered = []
newscripts = []
for s in ctx.options.scripts:
if s in current:
ordered.append(current[s])
else:
sc = Script(s, True)
ordered.append(sc)
newscripts.append(sc)
self.addons = ordered
for s in newscripts:
ctx.master.addons.register(s)
if self.is_running:
# If we're already running, we configure and tell the addon
# we're up and running.
ctx.master.addons.invoke_addon(s, "running")
|
|
import urllib2
import pandas as pd
import argparse
moduli_validi = ['442', '451', '433', '343', '352', '532', '541']
def download_voti(giornata):
'''
Questo metodo scarica da Pianetafantacalcio il file .xls contenente i voti della giornata selezionata.
:param giornata:
:return:
'''
results = 'http://www.pianetafantacalcio.it/Voti-Ufficiali-Excel.asp?giornataScelta=' + str(giornata)
sock = urllib2.urlopen(results)
excel = sock.read()
sock.close()
return excel
def estrai_coach(line):
'''
Estrai il nome del allenatore dal file contenente le formazioni
:param line:
:return:
'''
return line.lstrip('Coach:')
def estrai_titolari(line):
'''
Estrai la lista di giocatori schierati come titolari
:param line:
:return:
'''
s = line.lstrip('Titolari: ')
s = s.split(', ', 11)
return s
def estrai_panchina(line):
'''
Estrai la lista di giocatori schierati come panchinari
:param line:
:return:
'''
s = line.lstrip('Panchina: ')
s = s.split(', ', 7)
return s
def voti_fantacalcio(input, dataframe):
with open(input, 'r') as infile:
lines = infile.read().splitlines()
for i in range (0, len(lines)):
if "Coach" in lines[i]:
coach = estrai_coach(lines[i])
try:
titolari = estrai_titolari(lines[i+1])
panchina = estrai_panchina(lines[i+2])
except IndexError:
titolari = None
panchina = None
print "TEAM SCORE: " + str(calcola_risultato(coach, titolari, panchina, dataframe)) + '\n'
def parse_html(voti):
'''
Parsa il file contente i voti e ritorna un DataFrame (pandas library)
:param voti:
:return:
'''
data = pd.read_html(voti, thousands=None)
return data[0]
def print_dict(dict):
'''
Metodo ausiliario per stampare un dictionary.
:param dict:
:return:
'''
s = ""
for x in dict:
s += x + " - "
return s
def modificatore_difesa(difensori, portiere):
'''
Calcolo modificatore di difesa. Vien applicato se ci sono almeno 4 difensori con voto. Viene calcolata
la media voto tra: portiere e 3 migliori difensori.
Se media < 6 : 0 punti
Se media > 6 e media < 6.5 : 1 punto
Se media > 6.5 e media < 7 : 3 punti
Se media > 7 : 6 punti
In questo modificatore vengon considerati bonus e malus!
:param difensori:
:param portiere:
:return:
'''
modif = 0.0
if len(difensori) == 4:
for key,value in portiere.iteritems():
modif += value
low = min(difensori, key=difensori.get)
del difensori[low]
for value in difensori.itervalues():
modif+=value
modif = modif/4.0
elif len(difensori) == 5:
for key, value in portiere.iteritems():
modif+=value
low = min(difensori, key=difensori.get)
del difensori[low]
for value in difensori.itervalues():
modif+=value
modif = modif/5.0
else:
print "Modificatore di Difesa: NO (meno di 3 difensori)"
return 0
if modif >= 6.0 and modif < 6.5:
print "Modificatore di Difesa: SI (1pt - " + str(modif) + ")"
return 1
elif modif >= 6.5 and modif < 7.0:
print "Modificatore di Difesa: SI (3pt - " + str(modif) + ")"
return 3
elif modif >= 7.0:
print "Modificatore di Difesa: SI (6pt - " + str(modif) + ")"
return 6
else:
print "Modificatore di Difesa: NO (" + str(modif) + " < 6)"
return 0
def modificatore_centrocampo(centrocampisti):
'''
Calcolo modificatore di centrocampo. Solo se ci sono almeno 5 centrocampisti. Il giocatore con il voto piu basso,
prende come voto finale la media voto degli altri 4 centrocampisti.
In questo modificatore vengono considerati bonus e malus!
:param centrocampisti:
:return:
'''
modif = 0.0
lowest = min(centrocampisti, key=centrocampisti.get)
low_val = float(centrocampisti.get(lowest))
if len(centrocampisti)== 5:
del centrocampisti[lowest]
for value in centrocampisti.itervalues():
modif += value
tot = modif/4.0
print "Modificatore di Centrocampo: SI (+" + str(tot) + ")"
return tot-low_val
else:
print "Modificatore di Centrocampo: NO"
return 0
def calcola_voti_base(coach, portiere, difensori, centrocampisti, attaccanti):
'''
Somma i voti dei singoli giocatori
:param coach:
:param portiere:
:param difensori:
:param centrocampisti:
:param attaccanti:
:return:
'''
final_score = 0.0
for value in portiere.itervalues():
final_score += value
for value in difensori.itervalues():
final_score += value
for value in centrocampisti.itervalues():
final_score += value
for value in attaccanti.itervalues():
final_score += value
print "Portiere: " + print_dict(portiere)
print "Difesa: " + print_dict(difensori)
print "Centrocampo: " + print_dict(centrocampisti)
print "Attacco: " + print_dict(attaccanti)
return final_score
def controllo_portiere(portiere, panchina):
'''
Controlla se almeno un portiere e` stato schierato come titolare o in panchina
:param portiere:
:param panchina:
:return:
'''
if len(portiere) == 1:
return portiere
elif len(portiere) < 1:
for (x,y,z) in panchina:
if (y) == 'P':
portiere[x] = z
return portiere
else:
portiere['NO KEEPER'] = 0.0
return portiere
def controllo_squadra(difensori, centrocampisti, attaccanti, panchina):
'''
Metodo che implementa le sostituzioni. Il metodo di sostituzioni e` "panchina libera". Ogni giocatore schierato
titolare che non ha preso voto, viene sostituito dal primo giocatore in panchina che ha preso voto. La sostituzione
e` independente dal ruolo, di conseguenza il modulo della squadra puo cambiare in maniera dinamica rispetto a quello
stabilito in partenza (formazione titolare). Tuttavia, il modulo finale deve rispettare almeno uno dei moduli
consentiti dal fantacalcio ['442', '451', '433', '343', '352', '532', '541'].
:param difensori:
:param centrocampisti:
:param attaccanti:
:param panchina:
:return:
'''
modulo = "" + str(len(difensori)) + str(len(centrocampisti)) + str(len(attaccanti))
if modulo in moduli_validi or len(panchina) == 0:
print "Modulo Finale Utilizzato: " + modulo
return (difensori,centrocampisti,attaccanti)
else:
if len(difensori) <= 2:
for a in panchina:
(x,y,z) = a
if y == 'D':
difensori[x] = z
panchina.pop(panchina.index(a))
return controllo_squadra(difensori,centrocampisti,attaccanti,panchina)
elif len(centrocampisti) <=2:
for a in panchina:
(x,y,z) = a
if y == 'C':
centrocampisti[x] = z
panchina.pop(panchina.index(a))
return controllo_squadra(difensori,centrocampisti,attaccanti,panchina)
elif len(attaccanti) == 0:
for a in panchina:
(x,y,z) = a
if y == 'A':
attaccanti[x] = z
panchina.pop(panchina.index(a))
return controllo_squadra(difensori,centrocampisti,attaccanti,panchina)
else:
for a in panchina:
(x,y,z) = a
if y == 'D' and len(difensori) < 5:
difensori[x] = z
panchina.pop(panchina.index(a))
return controllo_squadra(difensori, centrocampisti, attaccanti, panchina)
elif y == 'C' and len(centrocampisti) < 5:
centrocampisti[x] = z
panchina.pop(panchina.index(a))
return controllo_squadra(difensori, centrocampisti, attaccanti, panchina)
elif y == 'A' and len(attaccanti)<3:
attaccanti[x] = z
panchina.pop(panchina.index(a))
return controllo_squadra(difensori, centrocampisti, attaccanti, panchina)
else:
panchina.pop(panchina.index(a))
return controllo_squadra(difensori, centrocampisti, attaccanti, panchina)
def substitutions(portiere, difensori, centrocampisti, attaccanti, panchina):
portiere = controllo_portiere(portiere, panchina)
(dif, centr, att) = controllo_squadra(difensori,centrocampisti,attaccanti, panchina)
return (portiere, dif, centr, att)
def calcola_risultato(coach, titolari, panchina, dataframe):
'''
Metodo che calcola il risultato finale.
:param coach:
:param titolari:
:param panchina:
:param dataframe:
:return:
'''
print "FORMAZIONE: " + coach
voti_panchina = [('Empty', 'E', '0.0')] * len(panchina)
final_score = 0.0
portiere = {}
difensori = {}
centrocampisti = {}
attaccanti = {}
for index, row in dataframe.iterrows():
a = unicode(row[1]) #unicode of the football player name
for x in titolari:
substitute = 0
if (x.upper() in a) and (row[6] != 's,v,' or (row[6] == 's,v,' and row[2] == 'P')):
player_score = float(row[32].replace(',','.'))
final_score += player_score
if row[2] == 'P':
portiere[row[1]] = player_score
elif row[2] == 'D':
difensori[row[1]] = player_score
elif row[2] == 'C':
centrocampisti[row[1]] = player_score
elif row[2] == 'A':
attaccanti[row[1]] = player_score
else:
raise NotImplementedError("Not defined Role")
else:
substitute += 1
for y in panchina:
if y.upper() in a:
voto = float(row[32].replace(',','.'))
voti_panchina.pop(panchina.index(y))
voti_panchina.insert(panchina.index(y),(row[1], row[2], voto))
(portiere, difensori, centrocampisti, attaccanti) = substitutions(portiere, difensori, centrocampisti, attaccanti, voti_panchina)
final_score = calcola_voti_base(coach, portiere, difensori, centrocampisti, attaccanti)
final_score += modificatore_centrocampo(centrocampisti)
final_score += modificatore_difesa(difensori, portiere)
return final_score
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Calcolo Voti Fantacalcio")
parser.add_argument("giornata", type=int, help="Giornata di Campionato (Serie A)")
parser.add_argument("file", help="File con la/e formazione/i")
args = parser.parse_args()
giornata = args.giornata
voti = download_voti(giornata)
d = parse_html(voti)
voti_fantacalcio(args.file, d)
|
|
from . import modifiers
from ..datasources import parent_revision, revision
from .feature import Feature
from .revision import bytes as revision_bytes
from .util import MARKUP_RE, NUMERIC_RE, SYMBOLIC_RE
################################## Bytes #######################################
def process_bytes(parent_revision_metadata):
return parent_revision_metadata.bytes \
if parent_revision_metadata is not None else 0
bytes = Feature("parent_revision.bytes", process_bytes,
returns=int, depends_on=[parent_revision.metadata])
"""
Represents size of parent revision's content in bytes.
:Returns:
int
:Example:
..code-block:: python
>>> from revscoring.features import parent_revision
>>> list(extractor.extract(655097130, [parent_revision.bytes]))
[23731]
"""
def process_was_same_user(parent_revision_metadata, revision_metadata):
parent_user_id = parent_revision_metadata.user_id \
if parent_revision_metadata is not None else None
parent_user_text = parent_revision_metadata.user_text \
if parent_revision_metadata is not None else None
return (parent_user_id is not None and
parent_user_id == revision_metadata.user_id) or \
(parent_user_text is not None and
parent_user_text == revision_metadata.user_text)
was_same_user = Feature("parent_revision.was_same_user", process_was_same_user,
returns=bool,
depends_on=[parent_revision.metadata,
revision.metadata])
"""
Represents whether the last edit was made by this user or not.
:Returns:
bool
:Example:
..code-block:: python
>>> from revscoring.features import parent_revision
>>> list(extractor.extract(655097130, [parent_revision.was_same_user]))
[False]
"""
def process_seconds_since(parent_revision_metadata, revision_metadata):
revision_timestamp = revision_metadata.timestamp \
if revision_metadata is not None else Timestamp(0)
previous_timestamp = parent_revision_metadata.timestamp \
if parent_revision_metadata is not None and \
parent_revision_metadata.timestamp is not None \
else revision_timestamp
return revision_timestamp - previous_timestamp
seconds_since = Feature("parent_revision.seconds_since", process_seconds_since,
returns=int,
depends_on=[parent_revision.metadata,
revision.metadata])
"""
Represents time between this edit and the last edit in seconds.
:Returns:
int
:Example:
..code-block:: python
>>> from revscoring.features import parent_revision
>>> list(extractor.extract(655097130, [parent_revision.seconds_since]))
[822837]
"""
################################# Characters #################################
def process_chars(parent_revision_text):
return len(parent_revision_text or "")
chars = Feature("parent_revision.chars", process_chars,
returns=int, depends_on=[parent_revision.text])
"""
Represents number of characters in parent revision's content.
:Returns:
int
:Example:
..code-block:: python
>>> from revscoring.features import parent_revision
>>> list(extractor.extract(655097130, [parent_revision.chars]))
[23719]
"""
def process_markup_chars(parent_revision_text):
parent_revision_text = parent_revision_text or ""
return sum(len(m.group(0)) for m in MARKUP_RE.finditer(parent_revision_text))
markup_chars = Feature("parent_revision.markup_chars", process_markup_chars,
returns=int, depends_on=[parent_revision.text])
"""
Represents number of markup characters in parent revision's content.
:Returns:
int
:Example:
..code-block:: python
>>> from revscoring.features import parent_revision
>>> list(extractor.extract(655097130, [parent_revision.markup_chars]))
[700]
"""
proportion_of_markup_chars = markup_chars / modifiers.max(chars, 1)
"""
Represents ratio of markup characters compared to all characters in parent
revision's content.
:Returns:
float
:Example:
..code-block:: python
>>> from revscoring.features import parent_revision
>>> list(extractor.extract(655097130, [parent_revision.proportion_of_markup_chars]))
[0.02951220540494962]
"""
def process_numeric_chars(parent_revision_text):
parent_revision_text = parent_revision_text or ""
return sum(len(m.group(0)) for m in NUMERIC_RE.finditer(parent_revision_text))
numeric_chars = Feature("parent_revision.numeric_chars", process_numeric_chars,
returns=int, depends_on=[parent_revision.text])
"""
Represents number of numeric characters in parent revision's content.
:Returns:
int
:Example:
..code-block:: python
>>> from revscoring.features import parent_revision
>>> list(extractor.extract(655097130, [parent_revision.numeric_chars]))
[203]
"""
proportion_of_numeric_chars = numeric_chars / modifiers.max(chars, 1)
"""
Represents ratio of numeric characters compared to all characters in parent
revision.
:Returns:
float
:Example:
..code-block:: python
>>> from revscoring.features import parent_revision
>>> list(extractor.extract(655097130, [parent_revision.proportion_of_numeric_chars]))
[0.008558539567435389]
"""
def process_symbolic_chars(parent_revision_text):
parent_revision_text = parent_revision_text or ""
return sum(len(m.group(0)) for m in SYMBOLIC_RE.finditer(parent_revision_text))
symbolic_chars = Feature("parent_revision.symbolic_chars",
process_symbolic_chars,
returns=int, depends_on=[parent_revision.text])
"""
Represents number of symbolic characters in parent revision's content.
:Returns:
int
:Example:
..code-block:: python
>>> from revscoring.features import parent_revision
>>> list(extractor.extract(655097130, [parent_revision.symbolic_chars]))
[2539]
"""
proportion_of_symbolic_chars = symbolic_chars / modifiers.max(chars, 1)
"""
Represents ratio of symbolic characters compared to all characters in parent
revision.
:Returns:
float
:Example:
..code-block:: python
>>> from revscoring.features import parent_revision
>>> list(extractor.extract(655097130, [parent_revision.proportion_of_symbolic_chars]))
[0.10704498503309583]
"""
def process_uppercase_chars(parent_revision_text):
parent_revision_text = parent_revision_text or ""
return sum(c.lower() != c for c in parent_revision_text)
uppercase_chars = Feature("parent_revision.uppercase_chars",
process_uppercase_chars,
returns=int, depends_on=[parent_revision.text])
"""
Represents number of uppercase characters in parent revision's content.
:Returns:
int
:Example:
..code-block:: python
>>> from revscoring.features import parent_revision
>>> list(extractor.extract(655097130, [parent_revision.uppercase_chars]))
[733]
"""
proportion_of_uppercase_chars = uppercase_chars / modifiers.max(chars, 1)
"""
Represents ratio of uppercase characters compared to all characters in parent
revision.
:Returns:
float
:Example:
..code-block:: python
>>> from revscoring.features import parent_revision
>>> list(extractor.extract(655097130, [parent_revision.proportion_of_uppercase_chars]))
[0.030903495088325815]
"""
|
|
# Copyright 2016 The Gemmlowp Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""64bit ARM/NEON assembly emitter.
Used by code generators to produce ARM assembly with NEON simd code.
Provides tools for easier register management: named register variable
allocation/deallocation, and offers a more procedural/structured approach
to generating assembly.
"""
_WIDE_TYPES = {
8: 16,
16: 32,
32: 64,
'8': '16',
'16': '32',
'32': '64',
'i8': 'i16',
'i16': 'i32',
'i32': 'i64',
'u8': 'u16',
'u16': 'u32',
'u32': 'u64',
's8': 's16',
's16': 's32',
's32': 's64'
}
_NARROW_TYPES = {
64: 32,
32: 16,
16: 8,
'64': '32',
'32': '16',
'16': '8',
'i64': 'i32',
'i32': 'i16',
'i16': 'i8',
'u64': 'u32',
'u32': 'u16',
'u16': 'u8',
's64': 's32',
's32': 's16',
's16': 's8'
}
_TYPE_BITS = {
8: 8,
16: 16,
32: 32,
64: 64,
'8': 8,
'16': 16,
'32': 32,
'64': 64,
'i8': 8,
'i16': 16,
'i32': 32,
'i64': 64,
'u8': 8,
'u16': 16,
'u32': 32,
'u64': 64,
's8': 8,
's16': 16,
's32': 32,
's64': 64,
'f32': 32,
'f64': 64,
'b': 8,
'h': 16,
's': 32,
'd': 64
}
class Error(Exception):
"""Module level error."""
class RegisterAllocationError(Error):
"""Cannot alocate registers."""
class LaneError(Error):
"""Wrong lane number."""
class RegisterSubtypeError(Error):
"""The register needs to be lane-typed."""
class ArgumentError(Error):
"""Wrong argument."""
def _AppendType(type_name, register):
"""Calculates sizes and attaches the type information to the register."""
if register.register_type is not 'v':
raise ArgumentError('Only vector registers can have type appended.')
if type_name in set([8, '8', 'i8', 's8', 'u8']):
subtype = 'b'
subtype_bits = 8
elif type_name in set([16, '16', 'i16', 's16', 'u16']):
subtype = 'h'
subtype_bits = 16
elif type_name in set([32, '32', 'i32', 's32', 'u32', 'f32']):
subtype = 's'
subtype_bits = 32
elif type_name in set([64, '64', 'i64', 's64', 'u64', 'f64']):
subtype = 'd'
subtype_bits = 64
else:
raise ArgumentError('Unknown type: %s' % type_name)
new_register = register.Copy()
new_register.register_subtype = subtype
new_register.register_subtype_count = register.register_bits / subtype_bits
return new_register
def _UnsignedType(type_name):
return type_name in set(['u8', 'u16', 'u32', 'u64'])
def _FloatType(type_name):
return type_name in set(['f32', 'f64'])
def _WideType(type_name):
if type_name in _WIDE_TYPES.keys():
return _WIDE_TYPES[type_name]
else:
raise ArgumentError('No wide type for: %s' % type_name)
def _NarrowType(type_name):
if type_name in _NARROW_TYPES.keys():
return _NARROW_TYPES[type_name]
else:
raise ArgumentError('No narrow type for: %s' % type_name)
def _LoadStoreSize(register):
if register.lane is None:
return register.register_bits
else:
return register.lane_bits
def _MakeCompatibleDown(reg_1, reg_2, reg_3):
bits = min([reg_1.register_bits, reg_2.register_bits, reg_3.register_bits])
return (_Cast(bits, reg_1), _Cast(bits, reg_2), _Cast(bits, reg_3))
def _MakeCompatibleUp(reg_1, reg_2, reg_3):
bits = max([reg_1.register_bits, reg_2.register_bits, reg_3.register_bits])
return (_Cast(bits, reg_1), _Cast(bits, reg_2), _Cast(bits, reg_3))
def _Cast(bits, reg):
if reg.register_bits is bits:
return reg
else:
new_reg = reg.Copy()
new_reg.register_bits = bits
return new_reg
def _TypeBits(type_name):
if type_name in _TYPE_BITS.keys():
return _TYPE_BITS[type_name]
else:
raise ArgumentError('Unknown type: %s' % type_name)
def _RegisterList(list_type, registers):
lanes = list(set([register.lane for register in registers]))
if len(lanes) > 1:
raise ArgumentError('Cannot mix lanes on a register list.')
typed_registers = [_AppendType(list_type, register) for register in registers]
if lanes[0] is None:
return '{%s}' % ', '.join(map(str, typed_registers))
elif lanes[0] is -1:
raise ArgumentError('Cannot construct a list with all lane indexing.')
else:
typed_registers_nolane = [register.Copy() for register in typed_registers]
for register in typed_registers_nolane:
register.lane = None
register.register_subtype_count = None
return '{%s}[%d]' % (', '.join(map(str, typed_registers_nolane)), lanes[0])
class _GeneralRegister(object):
"""Arm v8 general register: (x|w)n."""
def __init__(self,
register_bits,
number,
dereference=False,
dereference_increment=False):
self.register_type = 'r'
self.register_bits = register_bits
self.number = number
self.dereference = dereference
self.dereference_increment = dereference_increment
def Copy(self):
return _GeneralRegister(self.register_bits, self.number, self.dereference,
self.dereference_increment)
def __repr__(self):
if self.register_bits is 64:
text = 'x%d' % self.number
elif self.register_bits <= 32:
text = 'w%d' % self.number
else:
raise RegisterSubtypeError('Wrong bits (%d) for general register: %d' %
(self.register_bits, self.number))
if self.dereference:
return '[%s]' % text
else:
return text
class _MappedParameter(object):
"""Object representing a C variable mapped to a register."""
def __init__(self,
name,
register_bits=64,
dereference=False,
dereference_increment=False):
self.name = name
self.register_bits = register_bits
self.dereference = dereference
self.dereference_increment = dereference_increment
def Copy(self):
return _MappedParameter(self.name, self.register_bits, self.dereference,
self.dereference_increment)
def __repr__(self):
if self.register_bits is 64:
text = '%%x[%s]' % self.name
elif self.register_bits <= 32:
text = '%%w[%s]' % self.name
else:
raise RegisterSubtypeError('Wrong bits (%d) for mapped parameter: %s' %
(self.register_bits, self.name))
if self.dereference:
return '[%s]' % text
else:
return text
class _VectorRegister(object):
"""Arm v8 vector register Vn.TT."""
def __init__(self,
register_bits,
number,
register_subtype=None,
register_subtype_count=None,
lane=None,
lane_bits=None):
self.register_type = 'v'
self.register_bits = register_bits
self.number = number
self.register_subtype = register_subtype
self.register_subtype_count = register_subtype_count
self.lane = lane
self.lane_bits = lane_bits
def Copy(self):
return _VectorRegister(self.register_bits, self.number,
self.register_subtype, self.register_subtype_count,
self.lane, self.lane_bits)
def __repr__(self):
if self.register_subtype is None:
raise RegisterSubtypeError('Register: %s%d has no lane types defined.' %
(self.register_type, self.number))
if (self.register_subtype_count is None or (self.lane is not None and
self.lane is not -1)):
typed_name = '%s%d.%s' % (self.register_type, self.number,
self.register_subtype)
else:
typed_name = '%s%d.%d%s' % (self.register_type, self.number,
self.register_subtype_count,
self.register_subtype)
if self.lane is None or self.lane is -1:
return typed_name
elif self.lane >= 0 and self.lane < self.register_subtype_count:
return '%s[%d]' % (typed_name, self.lane)
else:
raise LaneError('Wrong lane: %d for: %s' % (self.lane, typed_name))
class _ImmediateConstant(object):
def __init__(self, value):
self.register_type = 'i'
self.value = value
def Copy(self):
return _ImmediateConstant(self.value)
def __repr__(self):
return '#%d' % self.value
class _NeonRegisters64Bit(object):
"""Utility that keeps track of used 32bit ARM/NEON registers."""
def __init__(self):
self.vector = set()
self.vector_ever = set()
self.general = set()
self.general_ever = set()
self.parameters = dict()
self.output_parameters = dict()
def MapParameter(self, parameter, parameter_value=None):
if not parameter_value:
parameter_value = parameter
self.parameters[parameter] = (parameter_value, 'r')
return _MappedParameter(parameter)
def MapMemoryParameter(self, parameter, parameter_value=None):
if not parameter_value:
parameter_value = parameter
self.parameters[parameter] = (parameter_value, 'm')
return _MappedParameter(parameter)
def MapOutputParameter(self, parameter, parameter_value=None):
if not parameter_value:
parameter_value = parameter
self.output_parameters[parameter] = (parameter_value, '+r')
return _MappedParameter(parameter)
def _VectorRegisterNum(self, min_val=0):
for i in range(min_val, 32):
if i not in self.vector:
self.vector.add(i)
self.vector_ever.add(i)
return i
raise RegisterAllocationError('Not enough vector registers.')
def DoubleRegister(self, min_val=0):
return _VectorRegister(64, self._VectorRegisterNum(min_val))
def QuadRegister(self, min_val=0):
return _VectorRegister(128, self._VectorRegisterNum(min_val))
def GeneralRegister(self):
for i in range(0, 30):
if i not in self.general:
self.general.add(i)
self.general_ever.add(i)
return _GeneralRegister(64, i)
raise RegisterAllocationError('Not enough general registers.')
def MappedParameters(self):
return [x for x in self.parameters.items()]
def MappedOutputParameters(self):
return [x for x in self.output_parameters.items()]
def Clobbers(self):
return (
['x%d' % i
for i in self.general_ever] + ['v%d' % i for i in self.vector_ever])
def FreeRegister(self, register):
if register.register_type == 'v':
assert register.number in self.vector
self.vector.remove(register.number)
elif register.register_type == 'r':
assert register.number in self.general
self.general.remove(register.number)
else:
raise RegisterAllocationError('Register not allocated: %s%d' %
(register.register_type, register.number))
def FreeRegisters(self, registers):
for register in registers:
self.FreeRegister(register)
class NeonEmitter64(object):
"""Emits ARM/NEON 64bit assembly opcodes."""
def __init__(self, debug=False):
self.ops = {}
self.indent = ''
self.debug = debug
def PushIndent(self, delta_indent=' '):
self.indent += delta_indent
def PopIndent(self, delta=2):
self.indent = self.indent[:-delta]
def EmitIndented(self, what):
print self.indent + what
def PushOp(self, op):
if op in self.ops.keys():
self.ops[op] += 1
else:
self.ops[op] = 1
def ClearCounters(self):
self.ops.clear()
def EmitNewline(self):
print ''
def EmitPreprocessor1(self, op, param):
print '#%s %s' % (op, param)
def EmitPreprocessor(self, op):
print '#%s' % op
def EmitInclude(self, include):
self.EmitPreprocessor1('include', include)
def EmitCall1(self, function, param):
self.EmitIndented('%s(%s);' % (function, param))
def EmitAssert(self, assert_expression):
if self.debug:
self.EmitCall1('assert', assert_expression)
def EmitHeaderBegin(self, header_name, includes):
self.EmitPreprocessor1('ifndef', (header_name + '_H_').upper())
self.EmitPreprocessor1('define', (header_name + '_H_').upper())
self.EmitNewline()
if includes:
for include in includes:
self.EmitInclude(include)
self.EmitNewline()
def EmitHeaderEnd(self):
self.EmitPreprocessor('endif')
def EmitCode(self, code):
self.EmitIndented('%s;' % code)
def EmitFunctionBeginA(self, function_name, params, return_type):
self.EmitIndented('%s %s(%s) {' %
(return_type, function_name,
', '.join(['%s %s' % (t, n) for (t, n) in params])))
self.PushIndent()
def EmitFunctionEnd(self):
self.PopIndent()
self.EmitIndented('}')
def EmitAsmBegin(self):
self.EmitIndented('asm volatile(')
self.PushIndent()
def EmitAsmMapping(self, elements):
if elements:
self.EmitIndented(': ' + ', '.join(
['[%s] "%s"(%s)' % (k, v[1], v[0]) for (k, v) in elements]))
else:
self.EmitIndented(':')
def EmitClobbers(self, elements):
if elements:
self.EmitIndented(': ' + ', '.join(['"%s"' % c for c in elements]))
else:
self.EmitIndented(':')
def EmitAsmEnd(self, registers):
self.EmitAsmMapping(registers.MappedOutputParameters())
self.EmitAsmMapping(registers.MappedParameters())
self.EmitClobbers(registers.Clobbers() + ['cc', 'memory'])
self.PopIndent()
self.EmitIndented(');')
def EmitComment(self, comment):
self.EmitIndented('// ' + comment)
def EmitNumericalLabel(self, label):
self.EmitIndented('"%d:"' % label)
def EmitOp1(self, op, param1):
self.PushOp(op)
self.EmitIndented('"%s %s\\n"' % (op, param1))
def EmitOp2(self, op, param1, param2):
self.PushOp(op)
self.EmitIndented('"%s %s, %s\\n"' % (op, param1, param2))
def EmitOp3(self, op, param1, param2, param3):
self.PushOp(op)
self.EmitIndented('"%s %s, %s, %s\\n"' % (op, param1, param2, param3))
def EmitAdd(self, destination, source, param):
self.EmitOp3('add', destination, source, param)
def EmitSubs(self, destination, source, param):
self.EmitOp3('subs', destination, source, param)
def EmitSub(self, destination, source, param):
self.EmitOp3('sub', destination, source, param)
def EmitMul(self, destination, source, param):
self.EmitOp3('mul', destination, source, param)
def EmitMov(self, param1, param2):
self.EmitOp2('mov', param1, param2)
def EmitVMovl(self, mov_type, destination, source):
wide_type = _WideType(mov_type)
destination = _AppendType(wide_type, destination)
source = _AppendType(mov_type, _Cast(source.register_bits / 2, source))
if _UnsignedType(mov_type):
self.EmitOp2('uxtl', destination, source)
else:
self.EmitOp2('sxtl', destination, source)
def EmitVMovl2(self, mov_type, destination_1, destination_2, source):
wide_type = _WideType(mov_type)
if (destination_1.register_bits != source.register_bits or
destination_2.register_bits != source.register_bits):
raise ArgumentError('Register sizes do not match.')
if _UnsignedType(mov_type):
self.EmitOp2('uxtl2',
_AppendType(wide_type, destination_2),
_AppendType(mov_type, source))
self.EmitOp2('uxtl',
_AppendType(wide_type, destination_1),
_AppendType(mov_type,
_Cast(source.register_bits / 2, source)))
else:
self.EmitOp2('sxtl2',
_AppendType(wide_type, destination_2),
_AppendType(mov_type, source))
self.EmitOp2('sxtl',
_AppendType(wide_type, destination_1),
_AppendType(mov_type,
_Cast(source.register_bits / 2, source)))
def EmitVMax(self, max_type, destination, source_1, source_2):
if _UnsignedType(max_type):
self.EmitOp3('umax',
_AppendType(max_type, destination),
_AppendType(max_type, source_1),
_AppendType(max_type, source_2))
else:
self.EmitOp3('smax',
_AppendType(max_type, destination),
_AppendType(max_type, source_1),
_AppendType(max_type, source_2))
def EmitVMin(self, min_type, destination, source_1, source_2):
if _UnsignedType(min_type):
self.EmitOp3('umin',
_AppendType(min_type, destination),
_AppendType(min_type, source_1),
_AppendType(min_type, source_2))
else:
self.EmitOp3('smin',
_AppendType(min_type, destination),
_AppendType(min_type, source_1),
_AppendType(min_type, source_2))
def EmitBeqBack(self, label):
self.EmitOp1('beq', '%db' % label)
def EmitBeqFront(self, label):
self.EmitOp1('beq', '%df' % label)
def EmitBgtBack(self, label):
self.EmitOp1('bgt', '%db' % label)
def EmitBgtFront(self, label):
self.EmitOp1('bgt', '%df' % label)
def EmitBleBack(self, label):
self.EmitOp1('ble', '%db' % label)
def EmitBleFront(self, label):
self.EmitOp1('ble', '%df' % label)
def EmitBneBack(self, label):
self.EmitOp1('bne', '%db' % label)
def EmitBneFront(self, label):
self.EmitOp1('bne', '%df' % label)
def EmitVAdd(self, add_type, destination, source_1, source_2):
destination, source_1, source_2 = _MakeCompatibleDown(destination, source_1,
source_2)
if _FloatType(add_type):
self.EmitOp3('fadd',
_AppendType(add_type, destination),
_AppendType(add_type, source_1),
_AppendType(add_type, source_2))
else:
self.EmitOp3('add',
_AppendType(add_type, destination),
_AppendType(add_type, source_1),
_AppendType(add_type, source_2))
def EmitVAddw(self, add_type, destination, source_1, source_2):
wide_type = _WideType(add_type)
destination = _AppendType(wide_type, destination)
source_1 = _AppendType(wide_type, source_1)
source_2 = _AppendType(add_type, source_2)
if _UnsignedType(add_type):
self.EmitOp3('uaddw', destination, source_1, source_2)
else:
self.EmitOp3('saddw', destination, source_1, source_2)
def EmitVSub(self, sub_type, destination, source_1, source_2):
destination, source_1, source_2 = _MakeCompatibleDown(destination, source_1,
source_2)
if _FloatType(sub_type):
self.EmitOp3('fsub',
_AppendType(sub_type, destination),
_AppendType(sub_type, source_1),
_AppendType(sub_type, source_2))
else:
self.EmitOp3('sub',
_AppendType(sub_type, destination),
_AppendType(sub_type, source_1),
_AppendType(sub_type, source_2))
def EmitVCvt(self, cvt_to, cvt_from, destination, source):
if cvt_to == 'f32' and cvt_from == 's32':
self.EmitOp2('scvtf',
_AppendType('f32', destination), _AppendType('s32', source))
elif cvt_to == 'f32' and cvt_from == 'u32':
self.EmitOp2('ucvtf',
_AppendType('f32', destination), _AppendType('u32', source))
elif cvt_to == 's32' and cvt_from == 'f32':
self.EmitOp2('fcvtzs',
_AppendType('s32', destination), _AppendType('f32', source))
else:
raise ArgumentError('Convert not supported, to: %s from: %s' % (cvt_to,
cvt_from))
def EmitVDup(self, dup_type, destination, source):
if (isinstance(source, _GeneralRegister) or
isinstance(source, _MappedParameter)):
self.EmitOp2('dup',
_AppendType(dup_type, destination),
_Cast(_TypeBits(dup_type), source))
else:
self.EmitOp2('dup',
_AppendType(dup_type, destination),
_AppendType(dup_type, source))
def EmitVMov(self, mov_type, destination, source):
if isinstance(source, _ImmediateConstant):
self.EmitOp2('movi', _AppendType(mov_type, destination), source)
elif (isinstance(source, _GeneralRegister) or
isinstance(source, _MappedParameter)):
self.EmitOp2('mov',
_AppendType(mov_type, destination),
_Cast(_TypeBits(mov_type), source))
else:
self.EmitOp2('mov', _AppendType(8, destination), _AppendType(8, source))
def EmitVQmovn(self, mov_type, destination, source):
narrow_type = _NarrowType(mov_type)
if destination.register_bits * 2 == source.register_bits:
self.EmitOp2('sqxtn',
_AppendType(narrow_type, destination),
_AppendType(mov_type, source))
elif destination.register_bits == source.register_bits:
self.EmitOp2('sqxtn',
_AppendType(narrow_type,
_Cast(destination.register_bits / 2,
destination)),
_AppendType(mov_type, source))
def EmitVQmovn2(self, mov_type, destination, source_1, source_2):
narrow_type = _NarrowType(mov_type)
if (destination.register_bits != source_1.register_bits or
destination.register_bits != source_2.register_bits):
raise ArgumentError('Register sizes do not match.')
self.EmitOp2('sqxtn',
_AppendType(narrow_type,
_Cast(destination.register_bits / 2, destination)),
_AppendType(mov_type, source_1))
self.EmitOp2('sqxtn2',
_AppendType(narrow_type, destination),
_AppendType(mov_type, source_2))
def EmitVQmovun(self, mov_type, destination, source):
narrow_type = _NarrowType(mov_type)
if destination.register_bits * 2 == source.register_bits:
self.EmitOp2('sqxtun',
_AppendType(narrow_type, destination),
_AppendType(mov_type, source))
elif destination.register_bits == source.register_bits:
self.EmitOp2('sqxtun',
_AppendType(narrow_type,
_Cast(destination.register_bits / 2,
destination)),
_AppendType(mov_type, source))
def EmitVQmovun2(self, mov_type, destination, source_1, source_2):
narrow_type = _NarrowType(mov_type)
if (destination.register_bits != source_1.register_bits or
destination.register_bits != source_2.register_bits):
raise ArgumentError('Register sizes do not match.')
self.EmitOp2('sqxtun',
_AppendType(narrow_type,
_Cast(destination.register_bits / 2, destination)),
_AppendType(mov_type, source_1))
self.EmitOp2('sqxtun2',
_AppendType(narrow_type, destination),
_AppendType(mov_type, source_2))
def EmitVMul(self, mul_type, destination, source_1, source_2):
destination, source_1, source_2 = _MakeCompatibleDown(destination, source_1,
source_2)
if _FloatType(mul_type):
self.EmitOp3('fmul',
_AppendType(mul_type, destination),
_AppendType(mul_type, source_1),
_AppendType(mul_type, source_2))
else:
self.EmitOp3('mul',
_AppendType(mul_type, destination),
_AppendType(mul_type, source_1),
_AppendType(mul_type, source_2))
def EmitVMulScalar(self, mul_type, destination, source_1, source_2):
self.EmitOp3('mul',
_AppendType(mul_type, destination),
_AppendType(mul_type, source_1),
_AppendType(mul_type, source_2))
def EmitVMull(self, mul_type, destination, source_1, source_2):
wide_type = _WideType(mul_type)
if _UnsignedType(mul_type):
self.EmitOp3('umull',
_AppendType(wide_type, destination),
_AppendType(mul_type, source_1),
_AppendType(mul_type, source_2))
else:
self.EmitOp3('smull',
_AppendType(wide_type, destination),
_AppendType(mul_type, source_1),
_AppendType(mul_type, source_2))
def EmitVPadd(self, add_type, destination, source_1, source_2):
self.EmitOp3('addp',
_AppendType(add_type, destination),
_AppendType(add_type, source_1),
_AppendType(add_type, source_2))
def EmitVPaddl(self, add_type, destination, source):
wide_type = _WideType(add_type)
if _UnsignedType(add_type):
self.EmitOp2('uaddlp',
_AppendType(wide_type, destination),
_AppendType(add_type, source))
else:
self.EmitOp2('saddlp',
_AppendType(wide_type, destination),
_AppendType(add_type, source))
def EmitVPadal(self, add_type, destination, source):
wide_type = _WideType(add_type)
if _UnsignedType(add_type):
self.EmitOp2('uadalp',
_AppendType(wide_type, destination),
_AppendType(add_type, source))
else:
self.EmitOp2('sadalp',
_AppendType(wide_type, destination),
_AppendType(add_type, source))
def EmitLdr(self, register, value):
self.EmitOp2('ldr', _Cast(32, register), value)
def EmitVLoad(self, load_no, load_type, destination, source):
self.EmitVLoadA(load_no, load_type, [destination], source)
def EmitVLoadA(self, load_no, load_type, destinations, source):
if source.dereference_increment:
increment = sum(
[_LoadStoreSize(destination) for destination in destinations]) / 8
self.EmitVLoadAPostIncrement(load_no, load_type, destinations, source,
self.ImmediateConstant(increment))
else:
self.EmitVLoadAPostIncrement(load_no, load_type, destinations, source,
None)
def EmitVLoadAPostIncrement(self, load_no, load_type, destinations, source,
increment):
"""Generate assembly to load memory to registers and increment source."""
if len(destinations) == 1 and destinations[0].lane is -1:
destination = '{%s}' % _AppendType(load_type, destinations[0])
if increment:
self.EmitOp3('ld%dr' % load_no, destination, source, increment)
else:
self.EmitOp2('ld%dr' % load_no, destination, source)
return
destination_list = _RegisterList(load_type, destinations)
if increment:
self.EmitOp3('ld%d' % load_no, destination_list, source, increment)
else:
self.EmitOp2('ld%d' % load_no, destination_list, source)
def EmitVLoadAE(self,
load_type,
elem_count,
destinations,
source,
alignment=None):
"""Generate assembly to load an array of elements of given size."""
bits_to_load = load_type * elem_count
min_bits = min([destination.register_bits for destination in destinations])
max_bits = max([destination.register_bits for destination in destinations])
if min_bits is not max_bits:
raise ArgumentError('Cannot mix double and quad loads.')
if len(destinations) * min_bits < bits_to_load:
raise ArgumentError('To few destinations: %d to load %d bits.' %
(len(destinations), bits_to_load))
leftover_loaded = 0
while bits_to_load > 0:
if bits_to_load >= 4 * min_bits:
self.EmitVLoadA(1, 32, destinations[:4],
self.DereferenceIncrement(source, alignment))
bits_to_load -= 4 * min_bits
destinations = destinations[4:]
elif bits_to_load >= 3 * min_bits:
self.EmitVLoadA(1, 32, destinations[:3],
self.DereferenceIncrement(source, alignment))
bits_to_load -= 3 * min_bits
destinations = destinations[3:]
elif bits_to_load >= 2 * min_bits:
self.EmitVLoadA(1, 32, destinations[:2],
self.DereferenceIncrement(source, alignment))
bits_to_load -= 2 * min_bits
destinations = destinations[2:]
elif bits_to_load >= min_bits:
self.EmitVLoad(1, 32, destinations[0],
self.DereferenceIncrement(source, alignment))
bits_to_load -= min_bits
destinations = destinations[1:]
elif bits_to_load >= 64:
self.EmitVLoad(1, 32,
_Cast(64, destinations[0]),
self.DereferenceIncrement(source))
bits_to_load -= 64
leftover_loaded += 64
elif bits_to_load >= 32:
self.EmitVLoad(1, 32,
self.Lane(32, destinations[0], leftover_loaded / 32),
self.DereferenceIncrement(source))
bits_to_load -= 32
leftover_loaded += 32
elif bits_to_load >= 16:
self.EmitVLoad(1, 16,
self.Lane(16, destinations[0], leftover_loaded / 16),
self.DereferenceIncrement(source))
bits_to_load -= 16
leftover_loaded += 16
elif bits_to_load is 8:
self.EmitVLoad(1, 8,
self.Lane(8, destinations[0], leftover_loaded / 8),
self.DereferenceIncrement(source))
bits_to_load -= 8
leftover_loaded += 8
else:
raise ArgumentError('Wrong leftover: %d' % bits_to_load)
def EmitVLoadE(self, load_type, count, destination, source, alignment=None):
self.EmitVLoadAE(load_type, count, [destination], source, alignment)
def EmitVLoadAllLanes(self, load_no, load_type, destination, source):
new_destination = destination.Copy()
new_destination.lane = -1
new_destination.lane_bits = load_type
self.EmitVLoad(load_no, load_type, new_destination, source)
def EmitVLoadOffset(self, load_no, load_type, destination, source, offset):
self.EmitVLoadOffsetA(load_no, load_type, [destination], source, offset)
def EmitVLoadOffsetA(self, load_no, load_type, destinations, source, offset):
assert len(destinations) <= 4
self.EmitOp3('ld%d' % load_no,
_RegisterList(load_type, destinations), source, offset)
def EmitPld(self, load_address_register):
self.EmitOp2('prfm', 'pldl1keep', '[%s]' % load_address_register)
def EmitPldOffset(self, load_address_register, offset):
self.EmitOp2('prfm', 'pldl1keep',
'[%s, %s]' % (load_address_register, offset))
def EmitVShl(self, shift_type, destination, source, shift):
self.EmitOp3('sshl',
_AppendType(shift_type, destination),
_AppendType(shift_type, source), _AppendType('i32', shift))
def EmitVStore(self, store_no, store_type, source, destination):
self.EmitVStoreA(store_no, store_type, [source], destination)
def EmitVStoreA(self, store_no, store_type, sources, destination):
if destination.dereference_increment:
increment = sum([_LoadStoreSize(source) for source in sources]) / 8
self.EmitVStoreAPostIncrement(store_no, store_type, sources, destination,
self.ImmediateConstant(increment))
else:
self.EmitVStoreAPostIncrement(store_no, store_type, sources, destination,
None)
def EmitVStoreAPostIncrement(self, store_no, store_type, sources, destination,
increment):
source_list = _RegisterList(store_type, sources)
if increment:
self.EmitOp3('st%d' % store_no, source_list, destination, increment)
else:
self.EmitOp2('st%d' % store_no, source_list, destination)
def EmitVStoreAE(self,
store_type,
elem_count,
sources,
destination,
alignment=None):
"""Generate assembly to store an array of elements of given size."""
bits_to_store = store_type * elem_count
min_bits = min([source.register_bits for source in sources])
max_bits = max([source.register_bits for source in sources])
if min_bits is not max_bits:
raise ArgumentError('Cannot mix double and quad stores.')
if len(sources) * min_bits < bits_to_store:
raise ArgumentError('To few destinations: %d to store %d bits.' %
(len(sources), bits_to_store))
leftover_stored = 0
while bits_to_store > 0:
if bits_to_store >= 4 * min_bits:
self.EmitVStoreA(1, 32, sources[:4],
self.DereferenceIncrement(destination, alignment))
bits_to_store -= 4 * min_bits
sources = sources[4:]
elif bits_to_store >= 3 * min_bits:
self.EmitVStoreA(1, 32, sources[:3],
self.DereferenceIncrement(destination, alignment))
bits_to_store -= 3 * min_bits
sources = sources[3:]
elif bits_to_store >= 2 * min_bits:
self.EmitVStoreA(1, 32, sources[:2],
self.DereferenceIncrement(destination, alignment))
bits_to_store -= 2 * min_bits
sources = sources[2:]
elif bits_to_store >= min_bits:
self.EmitVStore(1, 32, sources[0],
self.DereferenceIncrement(destination, alignment))
bits_to_store -= min_bits
sources = sources[1:]
elif bits_to_store >= 64:
self.EmitVStore(1, 32,
_Cast(64, sources[0]),
self.DereferenceIncrement(destination, alignment))
bits_to_store -= 64
leftover_stored += 64
elif bits_to_store >= 32:
self.EmitVStore(1, 32,
self.Lane(32, sources[0], leftover_stored / 32),
self.DereferenceIncrement(destination))
bits_to_store -= 32
leftover_stored += 32
elif bits_to_store >= 16:
self.EmitVStore(1, 16,
self.Lane(16, sources[0], leftover_stored / 16),
self.DereferenceIncrement(destination))
bits_to_store -= 16
leftover_stored += 16
elif bits_to_store >= 8:
self.EmitVStore(1, 8,
self.Lane(8, sources[0], leftover_stored / 8),
self.DereferenceIncrement(destination))
bits_to_store -= 8
leftover_stored += 8
else:
raise ArgumentError('Wrong leftover: %d' % bits_to_store)
def EmitVStoreE(self, store_type, count, source, destination, alignment=None):
self.EmitVStoreAE(store_type, count, [source], destination, alignment)
def EmitVStoreOffset(self, store_no, store_type, source, destination, offset):
self.EmitVStoreOffsetA(store_no, store_type, [source], destination, offset)
def EmitVStoreOffsetA(self, store_no, store_type, sources, destination,
offset):
self.EmitOp3('st%d' % store_no,
_RegisterList(store_type, sources), destination, offset)
def EmitVStoreOffsetE(self, store_type, count, source, destination, offset):
if store_type is not 32:
raise ArgumentError('Unsupported store_type: %d' % store_type)
if count == 1:
self.EmitVStoreOffset(1, 32,
self.Lane(32, source, 0),
self.Dereference(destination, None), offset)
elif count == 2:
self.EmitVStoreOffset(1, 32,
_Cast(64, source),
self.Dereference(destination, None), offset)
elif count == 3:
self.EmitVStore(1, 32,
_Cast(64, source),
self.DereferenceIncrement(destination, None))
self.EmitVStoreOffset(1, 32,
self.Lane(32, source, 2),
self.Dereference(destination, None), offset)
self.EmitSub(destination, destination, self.ImmediateConstant(8))
elif count == 4:
self.EmitVStoreOffset(1, 32, source,
self.Dereference(destination, None), offset)
else:
raise ArgumentError('To many elements: %d' % count)
def EmitVSumReduce(self, reduce_type, elem_count, reduce_count, destinations,
sources):
"""Generate assembly to perform n-fold horizontal sum reduction."""
if reduce_type is not 'u32':
raise ArgumentError('Unsupported reduce: %s' % reduce_type)
if (elem_count + 3) / 4 > len(destinations):
raise ArgumentError('To few destinations: %d (%d needed)' %
(len(destinations), (elem_count + 3) / 4))
if elem_count * reduce_count > len(sources) * 4:
raise ArgumentError('To few sources: %d' % len(sources))
if reduce_count <= 1:
raise ArgumentError('Unsupported reduce_count: %d' % reduce_count)
sources = [_Cast(128, source) for source in sources]
destinations = [_Cast(128, destination) for destination in destinations]
while reduce_count > 1:
if len(sources) % 2 == 1:
sources.append(sources[-1])
if reduce_count == 2:
for i in range(len(destinations)):
self.EmitVPadd(reduce_type, destinations[i], sources[2 * i],
sources[2 * i + 1])
return
else:
sources_2 = []
for i in range(len(sources) / 2):
self.EmitVPadd(reduce_type, sources[2 * i], sources[2 * i],
sources[2 * i + 1])
sources_2.append(sources[2 * i])
reduce_count /= 2
sources = sources_2
def EmitVUzp1(self, uzp_type, destination, source_1, source_2):
self.EmitOp3('uzp1',
_AppendType(uzp_type, destination),
_AppendType(uzp_type, source_1),
_AppendType(uzp_type, source_2))
def EmitVUzp2(self, uzp_type, destination, source_1, source_2):
self.EmitOp3('uzp2',
_AppendType(uzp_type, destination),
_AppendType(uzp_type, source_1),
_AppendType(uzp_type, source_2))
def EmitVUzp(self, uzp_type, destination_1, destination_2, source_1,
source_2):
self.EmitVUzp1(uzp_type, destination_1, source_1, source_2)
self.EmitVUzp2(uzp_type, destination_2, source_1, source_2)
def EmitVTrn1(self, trn_type, destination, source_1, source_2):
self.EmitOp3('trn1',
_AppendType(trn_type, destination),
_AppendType(trn_type, source_1),
_AppendType(trn_type, source_2))
def EmitVTrn2(self, trn_type, destination, source_1, source_2):
self.EmitOp3('trn2',
_AppendType(trn_type, destination),
_AppendType(trn_type, source_1),
_AppendType(trn_type, source_2))
def EmitVTrn(self, trn_type, destination_1, destination_2, source_1,
source_2):
self.EmitVTrn1(trn_type, destination_1, source_1, source_2)
self.EmitVTrn2(trn_type, destination_2, source_1, source_2)
def EmitColBlockStride(self, cols, stride, new_stride):
assert cols in [1, 2, 3, 4, 5, 6, 7, 8]
if cols in [5, 6, 7]:
self.EmitSub(new_stride, stride, self.ImmediateConstant(4))
def EmitLoadColBlock(self, registers, load_type, cols, elements, block,
input_address, stride):
assert cols is len(block)
assert load_type is 8
input_deref = self.Dereference(input_address, None)
input_deref_increment = self.DereferenceIncrement(input_address, None)
if cols is 1:
for i in range(elements):
self.EmitVLoadOffset(1, 8,
self.Lane(8, block[0], i), input_deref, stride)
self.EmitPld(input_address)
return block
elif cols is 2:
temp = [registers.DoubleRegister() for unused_i in range(2)]
for i in range(elements):
self.EmitVLoadOffset(1, 16,
self.Lane(16, block[i / 4], i % 4), input_deref,
stride)
self.EmitPld(input_address)
self.EmitVUzp(8, temp[0], temp[1], block[0], block[1])
registers.FreeRegisters(block)
return temp
elif cols is 3:
for i in range(elements):
self.EmitVLoadOffsetA(3, 8, [self.Lane(8, row, i) for row in block],
input_deref, stride)
self.EmitPld(input_address)
return block
elif cols is 4:
temp = [registers.DoubleRegister() for unused_i in range(4)]
for i in range(elements):
self.EmitVLoadOffset(1, 32,
self.Lane(32, block[i % 4], i / 4), input_deref,
stride)
self.EmitPld(input_address)
self.EmitVTrn(16, temp[0], temp[2], block[0], block[2])
self.EmitVTrn(16, temp[1], temp[3], block[1], block[3])
self.EmitVTrn(8, block[0], block[1], temp[0], temp[1])
self.EmitVTrn(8, block[2], block[3], temp[2], temp[3])
registers.FreeRegisters(temp)
return block
elif cols is 5:
temp = [registers.DoubleRegister() for unused_i in range(4)]
for i in range(elements):
self.EmitVLoad(1, 32,
self.Lane(32, block[i % 4], i / 4),
input_deref_increment)
self.EmitVLoadOffset(1, 8,
self.Lane(8, block[4], i), input_deref, stride)
self.EmitPld(input_address)
self.EmitVTrn(16, temp[0], temp[2], block[0], block[2])
self.EmitVTrn(16, temp[1], temp[3], block[1], block[3])
self.EmitVTrn(8, block[0], block[1], temp[0], temp[1])
self.EmitVTrn(8, block[2], block[3], temp[2], temp[3])
registers.FreeRegisters(temp)
return block
elif cols is 6:
temp = [registers.DoubleRegister() for unused_i in range(6)]
for i in range(elements):
self.EmitVLoad(1, 32,
self.Lane(32, block[i % 4], i / 4),
input_deref_increment)
self.EmitVLoadOffset(1, 16,
self.Lane(16, block[4 + i / 4], i % 4),
input_deref, stride)
self.EmitPld(input_address)
self.EmitVTrn(16, temp[0], temp[2], block[0], block[2])
self.EmitVTrn(16, temp[1], temp[3], block[1], block[3])
self.EmitVUzp(8, temp[4], temp[5], block[4], block[5])
self.EmitVTrn(8, block[0], block[1], temp[0], temp[1])
self.EmitVTrn(8, block[2], block[3], temp[2], temp[3])
registers.FreeRegisters(
[block[4], block[5], temp[0], temp[1], temp[2], temp[3]])
return [block[0], block[1], block[2], block[3], temp[4], temp[5]]
elif cols is 7:
temp = [registers.DoubleRegister() for unused_i in range(4)]
for i in range(elements):
self.EmitVLoad(1, 32,
self.Lane(32, block[i % 4], i / 4),
input_deref_increment)
self.EmitVLoadOffsetA(3, 8,
[self.Lane(8, row, i) for row in block[4:]],
input_deref, stride)
self.EmitPld(input_address)
self.EmitVTrn1(16, temp[0], block[0], block[2])
self.EmitVTrn2(16, temp[2], block[0], block[2])
self.EmitVTrn1(16, temp[1], block[1], block[3])
self.EmitVTrn2(16, temp[3], block[1], block[3])
self.EmitVTrn1(8, block[0], temp[0], temp[1])
self.EmitVTrn2(8, block[1], temp[0], temp[1])
self.EmitVTrn1(8, block[2], temp[2], temp[3])
self.EmitVTrn2(8, block[3], temp[2], temp[3])
registers.FreeRegisters(temp)
return block
elif cols is 8:
temp = [registers.DoubleRegister() for unused_i in range(8)]
for i in range(elements):
self.EmitVLoadOffset(1, 32, block[i], input_deref, stride)
self.EmitPld(input_address)
self.EmitVTrn(8, temp[0], temp[1], block[0], block[1])
self.EmitVTrn(8, temp[2], temp[3], block[2], block[3])
self.EmitVTrn(8, temp[4], temp[5], block[4], block[5])
self.EmitVTrn(8, temp[6], temp[7], block[6], block[7])
self.EmitVTrn(16, block[0], block[2], temp[0], temp[2])
self.EmitVTrn(16, block[1], block[3], temp[1], temp[3])
self.EmitVTrn(16, block[4], block[6], temp[4], temp[6])
self.EmitVTrn(16, block[5], block[7], temp[5], temp[7])
self.EmitVTrn(32, temp[0], temp[4], block[0], block[4])
self.EmitVTrn(32, temp[1], temp[5], block[1], block[5])
self.EmitVTrn(32, temp[2], temp[6], block[2], block[6])
self.EmitVTrn(32, temp[3], temp[7], block[3], block[7])
registers.FreeRegisters(block)
return temp
else:
assert False
def Dereference(self, value, unused_alignment=None):
new_value = value.Copy()
new_value.dereference = True
return new_value
def DereferenceIncrement(self, value, alignment=None):
new_value = self.Dereference(value, alignment).Copy()
new_value.dereference_increment = True
return new_value
def ImmediateConstant(self, value):
return _ImmediateConstant(value)
def AllLanes(self, value):
return '%s[]' % value
def Lane(self, bits, value, lane):
new_value = value.Copy()
if bits * (lane + 1) > new_value.register_bits:
raise ArgumentError('Lane to big: (%d + 1) x %d > %d' %
(lane, bits, new_value.register_bits))
new_value.lane = lane
new_value.lane_bits = bits
return new_value
def CreateRegisters(self):
return _NeonRegisters64Bit()
|
|
# Copyright 2012, Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the volume RPC API.
"""
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from cinder.objects import base as objects_base
from cinder import rpc
from cinder.volume import utils
CONF = cfg.CONF
class VolumeAPI(object):
"""Client side of the volume rpc API.
API version history:
1.0 - Initial version.
1.1 - Adds clone volume option to create_volume.
1.2 - Add publish_service_capabilities() method.
1.3 - Pass all image metadata (not just ID) in copy_volume_to_image.
1.4 - Add request_spec, filter_properties and
allow_reschedule arguments to create_volume().
1.5 - Add accept_transfer.
1.6 - Add extend_volume.
1.7 - Adds host_name parameter to attach_volume()
to allow attaching to host rather than instance.
1.8 - Add migrate_volume, rename_volume.
1.9 - Add new_user and new_project to accept_transfer.
1.10 - Add migrate_volume_completion, remove rename_volume.
1.11 - Adds mode parameter to attach_volume()
to support volume read-only attaching.
1.12 - Adds retype.
1.13 - Adds create_export.
1.14 - Adds reservation parameter to extend_volume().
1.15 - Adds manage_existing and unmanage_only flag to delete_volume.
1.16 - Removes create_export.
1.17 - Add replica option to create_volume, promote_replica and
sync_replica.
1.18 - Adds create_consistencygroup, delete_consistencygroup,
create_cgsnapshot, and delete_cgsnapshot. Also adds
the consistencygroup_id parameter in create_volume.
1.19 - Adds update_migrated_volume
1.20 - Adds support for sending objects over RPC in create_snapshot()
and delete_snapshot()
1.21 - Adds update_consistencygroup.
1.22 - Adds create_consistencygroup_from_src.
1.23 - Adds attachment_id to detach_volume.
1.24 - Removed duplicated parameters: snapshot_id, image_id,
source_volid, source_replicaid, consistencygroup_id and
cgsnapshot_id from create_volume. All off them are already
passed either in request_spec or available in the DB.
1.25 - Add source_cg to create_consistencygroup_from_src.
1.26 - Adds support for sending objects over RPC in
create_consistencygroup(), create_consistencygroup_from_src(),
update_consistencygroup() and delete_consistencygroup().
1.27 - Adds support for replication V2
"""
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic=None):
super(VolumeAPI, self).__init__()
target = messaging.Target(topic=CONF.volume_topic,
version=self.BASE_RPC_API_VERSION)
serializer = objects_base.CinderObjectSerializer()
self.client = rpc.get_client(target, '1.27', serializer=serializer)
def create_consistencygroup(self, ctxt, group, host):
new_host = utils.extract_host(host)
cctxt = self.client.prepare(server=new_host, version='1.26')
cctxt.cast(ctxt, 'create_consistencygroup',
group=group)
def delete_consistencygroup(self, ctxt, group):
host = utils.extract_host(group.host)
cctxt = self.client.prepare(server=host, version='1.26')
cctxt.cast(ctxt, 'delete_consistencygroup',
group=group)
def update_consistencygroup(self, ctxt, group, add_volumes=None,
remove_volumes=None):
host = utils.extract_host(group.host)
cctxt = self.client.prepare(server=host, version='1.26')
cctxt.cast(ctxt, 'update_consistencygroup',
group=group,
add_volumes=add_volumes,
remove_volumes=remove_volumes)
def create_consistencygroup_from_src(self, ctxt, group, cgsnapshot=None,
source_cg=None):
new_host = utils.extract_host(group.host)
cctxt = self.client.prepare(server=new_host, version='1.26')
cctxt.cast(ctxt, 'create_consistencygroup_from_src',
group=group,
cgsnapshot_id=cgsnapshot['id'] if cgsnapshot else None,
source_cg=source_cg)
def create_cgsnapshot(self, ctxt, group, cgsnapshot):
host = utils.extract_host(group['host'])
cctxt = self.client.prepare(server=host, version='1.26')
cctxt.cast(ctxt, 'create_cgsnapshot',
group=group,
cgsnapshot_id=cgsnapshot['id'])
def delete_cgsnapshot(self, ctxt, cgsnapshot, host):
new_host = utils.extract_host(host)
cctxt = self.client.prepare(server=new_host, version='1.18')
cctxt.cast(ctxt, 'delete_cgsnapshot',
cgsnapshot_id=cgsnapshot['id'])
def create_volume(self, ctxt, volume, host, request_spec,
filter_properties, allow_reschedule=True):
new_host = utils.extract_host(host)
cctxt = self.client.prepare(server=new_host, version='1.24')
request_spec_p = jsonutils.to_primitive(request_spec)
cctxt.cast(ctxt, 'create_volume',
volume_id=volume['id'],
request_spec=request_spec_p,
filter_properties=filter_properties,
allow_reschedule=allow_reschedule)
def delete_volume(self, ctxt, volume, unmanage_only=False):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.15')
cctxt.cast(ctxt, 'delete_volume',
volume_id=volume['id'],
unmanage_only=unmanage_only)
def create_snapshot(self, ctxt, volume, snapshot):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host)
cctxt.cast(ctxt, 'create_snapshot', volume_id=volume['id'],
snapshot=snapshot)
def delete_snapshot(self, ctxt, snapshot, host):
new_host = utils.extract_host(host)
cctxt = self.client.prepare(server=new_host)
cctxt.cast(ctxt, 'delete_snapshot', snapshot=snapshot)
def attach_volume(self, ctxt, volume, instance_uuid, host_name,
mountpoint, mode):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.11')
return cctxt.call(ctxt, 'attach_volume',
volume_id=volume['id'],
instance_uuid=instance_uuid,
host_name=host_name,
mountpoint=mountpoint,
mode=mode)
def detach_volume(self, ctxt, volume, attachment_id):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.20')
return cctxt.call(ctxt, 'detach_volume', volume_id=volume['id'],
attachment_id=attachment_id)
def copy_volume_to_image(self, ctxt, volume, image_meta):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.3')
cctxt.cast(ctxt, 'copy_volume_to_image', volume_id=volume['id'],
image_meta=image_meta)
def initialize_connection(self, ctxt, volume, connector):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host)
return cctxt.call(ctxt, 'initialize_connection',
volume_id=volume['id'],
connector=connector)
def terminate_connection(self, ctxt, volume, connector, force=False):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host)
return cctxt.call(ctxt, 'terminate_connection', volume_id=volume['id'],
connector=connector, force=force)
def publish_service_capabilities(self, ctxt):
cctxt = self.client.prepare(fanout=True, version='1.2')
cctxt.cast(ctxt, 'publish_service_capabilities')
def accept_transfer(self, ctxt, volume, new_user, new_project):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.9')
return cctxt.call(ctxt, 'accept_transfer', volume_id=volume['id'],
new_user=new_user, new_project=new_project)
def extend_volume(self, ctxt, volume, new_size, reservations):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.14')
cctxt.cast(ctxt, 'extend_volume', volume_id=volume['id'],
new_size=new_size, reservations=reservations)
def migrate_volume(self, ctxt, volume, dest_host, force_host_copy):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.8')
host_p = {'host': dest_host.host,
'capabilities': dest_host.capabilities}
cctxt.cast(ctxt, 'migrate_volume', volume_id=volume['id'],
host=host_p, force_host_copy=force_host_copy)
def migrate_volume_completion(self, ctxt, volume, new_volume, error):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.10')
return cctxt.call(ctxt, 'migrate_volume_completion',
volume_id=volume['id'],
new_volume_id=new_volume['id'],
error=error)
def retype(self, ctxt, volume, new_type_id, dest_host,
migration_policy='never', reservations=None):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.12')
host_p = {'host': dest_host.host,
'capabilities': dest_host.capabilities}
cctxt.cast(ctxt, 'retype', volume_id=volume['id'],
new_type_id=new_type_id, host=host_p,
migration_policy=migration_policy,
reservations=reservations)
def manage_existing(self, ctxt, volume, ref):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.15')
cctxt.cast(ctxt, 'manage_existing', volume_id=volume['id'], ref=ref)
def promote_replica(self, ctxt, volume):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.17')
cctxt.cast(ctxt, 'promote_replica', volume_id=volume['id'])
def reenable_replication(self, ctxt, volume):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.17')
cctxt.cast(ctxt, 'reenable_replication', volume_id=volume['id'])
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
host = utils.extract_host(new_volume['host'])
cctxt = self.client.prepare(server=host, version='1.19')
cctxt.call(ctxt,
'update_migrated_volume',
volume=volume,
new_volume=new_volume,
volume_status=original_volume_status)
def enable_replication(self, ctxt, volume):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.27')
cctxt.cast(ctxt, 'enable_replication', volume=volume)
def disable_replication(self, ctxt, volume):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.27')
cctxt.cast(ctxt, 'disable_replication',
volume=volume)
def failover_replication(self,
ctxt,
volume,
secondary=None):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.27')
cctxt.cast(ctxt, 'failover_replication',
volume=volume,
secondary=secondary)
def list_replication_targets(self, ctxt, volume):
new_host = utils.extract_host(volume['host'])
cctxt = self.client.prepare(server=new_host, version='1.27')
return cctxt.call(ctxt, 'list_replication_targets', volume=volume)
|
|
#!/usr/bin/env python
import argparse
import logging
import os.path
import sys
import numpy as np
import numpy.random as rnd
import pexpect
from qrsim.tcpclient import UAVControls
from scipy.stats import gaussian_kde
import tables
import behaviors
from config import instantiate, load_config
from client import TaskPlumeClient
from recorder import ControlsRecorder, ErrorRecorder, store_obj, \
TargetsRecorder, TaskPlumeRecorder
logger = logging.getLogger(__name__)
class FilterLevelAboveOrEqual(object):
def __init__(self, level):
self.level = level
def filter(self, record):
return record.levelno < self.level
class Controller(object):
def __init__(self, client, controller, movement_behavior):
self.client = client
self.controller = controller
self.movement_behavior = movement_behavior
self.recorders = []
def add_recorder(self, recorder):
self.recorders.append(recorder)
def init_new_sim(self, seed):
self.client.reset_seed(seed)
self.client.reset()
# Ensure that all simulator variables have been set
self.step_keeping_position()
def run(self, num_steps):
for step in xrange(num_steps):
logger.info('Step %i', step + 1)
for recorder in self.recorders:
recorder.record()
self.controller.step(self.client.noisy_state)
controls = self.movement_behavior.get_controls(
self.client.noisy_state)
self.client.step(self.client.timestep, controls)
def step_keeping_position(self):
c = UAVControls(self.client.numUAVs, 'vel')
c.U.fill(0.0)
self.client.step(self.client.timestep, c)
def do_simulation_run(trial, output_filename, conf, client):
rnd.seed(conf['pyseedlist'][trial])
with tables.openFile(output_filename, 'w') as fileh:
tbl = fileh.createVLArray(
'/', 'conf', tables.ObjectAtom(),
title='Configuration used to generate the stored data.')
tbl.append(conf)
fileh.createArray(
'/', 'repeat', [trial], title='Number of repeat run.')
num_steps = conf['duration_in_steps']
kernel = instantiate(*conf['kernel'])
predictor = instantiate(*conf['predictor'], prefix_args=(kernel,))
if 'bounds' in conf:
predictor.bounds = conf['bounds']
if 'priors' in conf:
for i in range(len(conf['priors'])):
predictor.priors[i] = instantiate(*conf['priors'][i])
recorder = TaskPlumeRecorder(fileh, client, predictor, num_steps)
err_recorder = ErrorRecorder(fileh, client, predictor, num_steps)
updater = instantiate(
*conf['updater'], predictor=predictor, plume_recorder=recorder)
acq_behavior = behaviors.AcquisitionFnTargetChooser(
instantiate(*conf['acquisition_fn'], predictor=predictor),
conf['area'], conf['margin'], conf['grid_resolution'])
if 'noise_search' in conf:
if conf['noise_search'] == 'wind':
tc_factory = behaviors.WindBasedPartialSurroundFactory(
client, conf['area'], conf['margin'])
else:
tc_factory = behaviors.SurroundAreaFactory(
conf['area'], conf['margin'])
surrounder = behaviors.SurroundUntilFound(updater, tc_factory)
surrounder.observers.append(recorder)
target_chooser = behaviors.ChainTargetChoosers(
[surrounder, acq_behavior])
maxv = 4
else:
target_chooser = behaviors.ChainTargetChoosers([
behaviors.SurroundArea(conf['area'], conf['margin']),
acq_behavior])
maxv = 6
controller = behaviors.FollowWaypoints(
target_chooser, conf['target_precision'],
behaviors.VelocityTowardsWaypointController(
maxv, maxv, target_chooser.get_effective_area()))
controller.observers.append(updater)
behavior = controller.velocity_controller
if conf['full_record']:
client = ControlsRecorder(fileh, client, num_steps)
sim_controller = Controller(client, controller, behavior)
sim_controller.init_new_sim(conf['seedlist'][trial])
recorder.init(conf)
err_recorder.init(conf)
sim_controller.add_recorder(recorder)
sim_controller.add_recorder(err_recorder)
if hasattr(behavior, 'targets') and conf['full_record']:
targets_recorder = TargetsRecorder(
fileh, behavior, client.numUAVs, num_steps)
targets_recorder.init()
sim_controller.add_recorder(targets_recorder)
try:
sim_controller.run(num_steps)
except Exception as err:
err_tbl = fileh.createVLArray(
'/', 'exception', tables.ObjectAtom(),
title='Exception which was raised.')
err_tbl.append(err)
raise
finally:
try:
if conf['full_record']:
store_obj(fileh, fileh.createGroup('/', 'gp'), predictor)
else:
recorder.prune()
except:
pass
def get_correction_factor(trial, conf, client):
rnd.seed(conf['pyseedlist'][trial])
with tables.openFile('tmp', 'w') as fileh:
tbl = fileh.createVLArray(
'/', 'conf', tables.ObjectAtom(),
title='Configuration used to generate the stored data.')
tbl.append(conf)
fileh.createArray(
'/', 'repeat', [trial], title='Number of repeat run.')
num_steps = conf['duration_in_steps']
kernel = instantiate(*conf['kernel'])
predictor = instantiate(*conf['predictor'], prefix_args=(kernel,))
if 'bounds' in conf:
predictor.bounds = conf['bounds']
if 'priors' in conf:
for i in range(len(conf['priors'])):
predictor.priors[i] = instantiate(*conf['priors'][i])
recorder = TaskPlumeRecorder(fileh, client, predictor, 1)
err_recorder = ErrorRecorder(fileh, client, predictor, 1)
target_chooser = behaviors.ChainTargetChoosers([
behaviors.SurroundArea(conf['area'], conf['margin']),
behaviors.AcquisitionFnTargetChooser(
instantiate(*conf['acquisition_fn'], predictor=predictor),
conf['area'], conf['margin'], conf['grid_resolution'])])
controller = behaviors.FollowWaypoints(
target_chooser, conf['target_precision'])
updater = instantiate(
*conf['updater'], predictor=predictor, plume_recorder=recorder)
controller.observers.append(updater)
behavior = controller.velocity_controller
if conf['full_record']:
client = ControlsRecorder(fileh, client, num_steps)
sim_controller = Controller(client, controller, behavior)
sim_controller.init_new_sim(conf['seedlist'][trial])
recorder.init(conf)
err_recorder.init(conf)
volume = np.product(np.diff(conf['area'], axis=1))
print volume
test_x = err_recorder.test_x.T
return np.sqrt(len(test_x) / np.sum(
1.0 / gaussian_kde(test_x)(test_x) ** 2) / volume)
class QRSimApplication(object):
def __init__(self):
self.parser = argparse.ArgumentParser()
self.parser.add_argument(
'-q', '--quiet', action='store_true',
help='Reduce output verbosity.')
self.parser.add_argument(
'-c', '--config', nargs=1, type=str, help='Configuration to load.')
self.parser.add_argument(
'-H', '--host', nargs=1, type=str,
help='Host running QRSim. If not given it will be tried to launch '
'an instance locally and connect to that.')
self.parser.add_argument(
'-P', '--port', nargs=1, type=int, default=[10000],
help='Port on which QRSim instance is listening.')
self.parser.add_argument(
'output_dir', nargs=1, type=str, help='Output directory.')
def main(self):
args = self.parser.parse_args()
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.addFilter(FilterLevelAboveOrEqual(logging.WARNING))
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setLevel(logging.WARNING)
root_logger = logging.getLogger()
if args.quiet:
root_logger.setLevel(logging.WARNING)
else:
root_logger.setLevel(logging.INFO)
root_logger.addHandler(stdout_handler)
root_logger.addHandler(stderr_handler)
conf = load_config(args.config[0])
with TaskPlumeClient() as client:
if args.host is not None:
client.connect_to(args.host[0], args.port[0])
else:
qrsim = pexpect.spawn(
'matlab -nodesktop -nosplash -r "'
"cd(fileparts(which('QRSimTCPServer')));"
"QRSimTCPServer(0);"
'quit;"',
timeout=120)
qrsim.logfile = sys.stdout
qrsim.expect(r'Listening on port: (\d+)')
port = int(qrsim.match.group(1))
client.connect_to('127.0.0.1', port)
client.init(conf['task'])
return self._run_application(args, conf, client)
def _run_application(self, args, conf, client):
raise NotImplementedError()
class Plume(QRSimApplication):
def __init__(self):
super(Plume, self).__init__()
self.parser.add_argument(
'-o', '--output', nargs=1, type=str, default=['plume'],
help='Output file name without extension (will be add '
'automatically).')
self.parser.add_argument(
'-t', '--trial', nargs=1, type=int, required=False,
help='Only run the given trial.')
self.parser.add_argument(
'--error-correction', action='store_true',
help='Store error correction factors.')
def _run_application(self, args, conf, client):
clean = True
if args.trial is not None:
trials = args.trial
else:
trials = xrange(conf['repeats'])
err_cor = []
for i in trials:
try:
if args.error_correction:
err_cor.append(get_correction_factor(i, conf, client))
else:
output_filename = os.path.join(
args.output_dir[0], args.output[0] + '.%i.h5' % i)
do_simulation_run(i, output_filename, conf, client)
except:
logger.exception('Repeat failed.', exc_info=True)
clean = False
if len(err_cor) > 0:
output_filename = os.path.join(
args.output_dir[0], args.output[0] + '.errcor.h5')
with tables.openFile(output_filename, 'w') as fileh:
fileh.createArray(
'/', 'errcor', err_cor, title='Error correction.')
return clean
if __name__ == '__main__':
if Plume().main():
sys.exit(os.EX_OK)
else:
sys.exit(os.EX_SOFTWARE)
|
|
# -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Jun 17 2015)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
import wx.adv
import wx.xrc
wx.ID_ADDFILE = 1000
wx.ID_REMOVEFILE = 1001
wx.ID_VIEWFILE = 1002
wx.ID_MONTECARLOCHECKBOX = 1003
wx.ID_CLUSTERCHECKBOX = 1004
wx.ID_OTHERSUFFIXCHECKBOX = 1005
wx.ID_SAVELOGCHECKBOX = 1006
wx.ID_START = 1007
###########################################################################
## Class MainFrame
###########################################################################
class MainFrame ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.Size( -1,-1 ), style = wx.CLOSE_BOX|wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
self.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_3DLIGHT ) )
MainSizer = wx.FlexGridSizer( 2, 1, 0, 0 )
MainSizer.SetFlexibleDirection( wx.BOTH )
MainSizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
TopSizer = wx.GridBagSizer( 0, 0 )
TopSizer.SetFlexibleDirection( wx.BOTH )
TopSizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.InputPanel = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.InputPanel.SetMinSize( wx.Size( 400,200 ) )
InputPanelSizer = wx.StaticBoxSizer( wx.StaticBox( self.InputPanel, wx.ID_ANY, u"Input files" ), wx.VERTICAL )
InputButtonSizer = wx.GridSizer( 1, 3, 0, 0 )
self.AddButton = wx.Button( InputPanelSizer.GetStaticBox(), wx.ID_ADDFILE, u"&Add...", wx.Point( -1,-1 ), wx.DefaultSize, 0 )
self.AddButton.SetDefault()
InputButtonSizer.Add( self.AddButton, 1, wx.ALL, 5 )
self.RemoveButton = wx.Button( InputPanelSizer.GetStaticBox(), wx.ID_REMOVEFILE, u"&Remove", wx.Point( -1,-1 ), wx.DefaultSize, 0 )
InputButtonSizer.Add( self.RemoveButton, 0, wx.ALL, 5 )
self.ViewButton = wx.Button( InputPanelSizer.GetStaticBox(), wx.ID_VIEWFILE, u"&View", wx.DefaultPosition, wx.DefaultSize, 0 )
InputButtonSizer.Add( self.ViewButton, 0, wx.ALL, 5 )
InputPanelSizer.Add( InputButtonSizer, 0, 0, 5 )
self.InputFileListCtrl = wx.ListCtrl( InputPanelSizer.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LC_REPORT )
self.InputFileListCtrl.SetMinSize( wx.Size( 400,-1 ) )
InputPanelSizer.Add( self.InputFileListCtrl, 1, wx.ALL|wx.EXPAND, 5 )
self.InputPanel.SetSizer( InputPanelSizer )
self.InputPanel.Layout()
InputPanelSizer.Fit( self.InputPanel )
TopSizer.Add( self.InputPanel, wx.GBPosition( 0, 0 ), wx.GBSpan( 1, 1 ), wx.ALL|wx.EXPAND, 5 )
self.OptionsPanel = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
OptionsPanelSizer = wx.StaticBoxSizer( wx.StaticBox( self.OptionsPanel, wx.ID_ANY, u"Options" ), wx.VERTICAL )
self.OptionsNotebook = wx.Notebook( OptionsPanelSizer.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 )
self.AnalysisOptionsTab = wx.Panel( self.OptionsNotebook, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
AnalysisOptionsSizer = wx.FlexGridSizer( 0, 1, 0, 0 )
AnalysisOptionsSizer.SetFlexibleDirection( wx.BOTH )
AnalysisOptionsSizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
GenAnalysisOptionsSizer = wx.FlexGridSizer( 2, 3, 0, 0 )
GenAnalysisOptionsSizer.SetFlexibleDirection( wx.BOTH )
GenAnalysisOptionsSizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.SpatResLabel = wx.StaticText( self.AnalysisOptionsTab, wx.ID_ANY, u"Spatial resolution:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.SpatResLabel.Wrap( -1 )
self.SpatResLabel.SetToolTip( u"Spatial resolution of the point pattern" )
GenAnalysisOptionsSizer.Add( self.SpatResLabel, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.SpatResSpinCtrl = wx.SpinCtrl( self.AnalysisOptionsTab, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,-1 ), wx.SP_ARROW_KEYS, 0, 1000, 25 )
self.SpatResSpinCtrl.SetToolTip( u"Spatial resolution of the point pattern" )
GenAnalysisOptionsSizer.Add( self.SpatResSpinCtrl, 0, wx.ALL, 5 )
self.SpatResUnitLabel = wx.StaticText( self.AnalysisOptionsTab, wx.ID_ANY, u"metric units", wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_LEFT )
self.SpatResUnitLabel.Wrap( -1 )
self.SpatResUnitLabel.SetToolTip( u"Spatial resolution of the point pattern" )
GenAnalysisOptionsSizer.Add( self.SpatResUnitLabel, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5 )
self.ShellWidthLabel = wx.StaticText( self.AnalysisOptionsTab, wx.ID_ANY, u"Shell width:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.ShellWidthLabel.Wrap( -1 )
self.ShellWidthLabel.SetToolTip( u"Points farther than this from the postsynaptic element are discarded" )
GenAnalysisOptionsSizer.Add( self.ShellWidthLabel, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.ShellWidthSpinCtrl = wx.SpinCtrl( self.AnalysisOptionsTab, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,-1 ), wx.SP_ARROW_KEYS, 0, 1000, 200 )
self.ShellWidthSpinCtrl.SetToolTip( u"Points farther than this from the postsynaptic element are discarded" )
GenAnalysisOptionsSizer.Add( self.ShellWidthSpinCtrl, 0, wx.ALL, 5 )
self.ShellWidthUnitLabel = wx.StaticText( self.AnalysisOptionsTab, wx.ID_ANY, u"metric units", wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_LEFT )
self.ShellWidthUnitLabel.Wrap( -1 )
self.ShellWidthUnitLabel.SetToolTip( u"Points farther than this from the postsynaptic element are discarded" )
GenAnalysisOptionsSizer.Add( self.ShellWidthUnitLabel, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
AnalysisOptionsSizer.Add( GenAnalysisOptionsSizer, 1, wx.EXPAND|wx.TOP|wx.RIGHT, 5 )
InterpointSizer = wx.StaticBoxSizer( wx.StaticBox( self.AnalysisOptionsTab, wx.ID_ANY, u"Interpoint distances" ), wx.VERTICAL )
InterpointSizer2 = wx.GridBagSizer( 0, 0 )
InterpointSizer2.SetFlexibleDirection( wx.BOTH )
InterpointSizer2.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
InterpointModeChoiceChoices = []
self.InterpointModeChoice = wx.Choice( InterpointSizer.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, InterpointModeChoiceChoices, 0 )
self.InterpointModeChoice.SetSelection( 0 )
self.InterpointModeChoice.SetToolTip( u"Type of distance to calculate" )
InterpointSizer2.Add( self.InterpointModeChoice, wx.GBPosition( 1, 1 ), wx.GBSpan( 1, 1 ), wx.ALL|wx.EXPAND, 5 )
InterpointRelationsCheckListBoxChoices = [wx.EmptyString]
self.InterpointRelationsCheckListBox = wx.CheckListBox( InterpointSizer.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, InterpointRelationsCheckListBoxChoices, 0|wx.HSCROLL )
InterpointSizer2.Add( self.InterpointRelationsCheckListBox, wx.GBPosition( 2, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.InterpointRelationsLabel = wx.StaticText( InterpointSizer.GetStaticBox(), wx.ID_ANY, u"Distances to\ndetermine:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.InterpointRelationsLabel.Wrap( -1 )
InterpointSizer2.Add( self.InterpointRelationsLabel, wx.GBPosition( 2, 0 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.InterpointModeLabel = wx.StaticText( InterpointSizer.GetStaticBox(), wx.ID_ANY, u"Distance mode:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.InterpointModeLabel.Wrap( -1 )
self.InterpointModeLabel.SetToolTip( u"Type of distance to calculate" )
InterpointSizer2.Add( self.InterpointModeLabel, wx.GBPosition( 1, 0 ), wx.GBSpan( 1, 1 ), wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.InterpointCheckBox = wx.CheckBox( InterpointSizer.GetStaticBox(), wx.ID_ANY, u"Calculate interpoint distances", wx.DefaultPosition, wx.DefaultSize, 0 )
InterpointSizer2.Add( self.InterpointCheckBox, wx.GBPosition( 0, 0 ), wx.GBSpan( 1, 2 ), wx.ALL|wx.EXPAND, 5 )
InterpointShortLatDistSizer = wx.FlexGridSizer( 2, 2, 0, 0 )
InterpointShortLatDistSizer.SetFlexibleDirection( wx.BOTH )
InterpointShortLatDistSizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.ShortestDistCheckBox = wx.CheckBox( InterpointSizer.GetStaticBox(), wx.ID_ANY, u"Shortest distance", wx.DefaultPosition, wx.DefaultSize, 0 )
self.ShortestDistCheckBox.SetToolTip( u"Shortest distance between the points" )
InterpointShortLatDistSizer.Add( self.ShortestDistCheckBox, 0, wx.ALL, 5 )
self.LateralDistCheckBox = wx.CheckBox( InterpointSizer.GetStaticBox(), wx.ID_ANY, u"Distance along profile border", wx.DefaultPosition, wx.DefaultSize, 0 )
self.LateralDistCheckBox.SetToolTip( u"Lateral distance along profile border between the projections of the points on the border" )
InterpointShortLatDistSizer.Add( self.LateralDistCheckBox, 0, wx.ALL, 5 )
InterpointSizer2.Add( InterpointShortLatDistSizer, wx.GBPosition( 3, 0 ), wx.GBSpan( 1, 3 ), wx.EXPAND, 5 )
InterpointSizer.Add( InterpointSizer2, 1, wx.EXPAND, 5 )
AnalysisOptionsSizer.Add( InterpointSizer, 1, wx.EXPAND|wx.ALL, 5 )
MonteCarloSizer = wx.StaticBoxSizer( wx.StaticBox( self.AnalysisOptionsTab, wx.ID_ANY, u"Monte Carlo simulations" ), wx.VERTICAL )
MonteCarloSizer2 = wx.GridBagSizer( 0, 0 )
MonteCarloSizer2.SetFlexibleDirection( wx.BOTH )
MonteCarloSizer2.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.MonteCarloCheckBox = wx.CheckBox( MonteCarloSizer.GetStaticBox(), wx.ID_MONTECARLOCHECKBOX, u"Perform Monte Carlo simulations", wx.DefaultPosition, wx.DefaultSize, 0 )
self.MonteCarloCheckBox.SetToolTip( u"Generate a random point pattern evenly distributed over the profile (plus the shell defined by the skipping distance)" )
MonteCarloSizer2.Add( self.MonteCarloCheckBox, wx.GBPosition( 0, 0 ), wx.GBSpan( 1, 2 ), wx.ALL|wx.EXPAND, 5 )
self.MonteCarloRunsLabel = wx.StaticText( MonteCarloSizer.GetStaticBox(), wx.ID_ANY, u"Number of runs:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.MonteCarloRunsLabel.Wrap( -1 )
self.MonteCarloRunsLabel.SetToolTip( u"Number of point patterns to generate for each profile" )
MonteCarloSizer2.Add( self.MonteCarloRunsLabel, wx.GBPosition( 1, 0 ), wx.GBSpan( 1, 1 ), wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.MonteCarloRunsSpinCtrl = wx.SpinCtrl( MonteCarloSizer.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,-1 ), wx.SP_ARROW_KEYS, 1, 999, 99 )
self.MonteCarloRunsSpinCtrl.SetToolTip( u"Number of point patterns to generate for each profile" )
MonteCarloSizer2.Add( self.MonteCarloRunsSpinCtrl, wx.GBPosition( 1, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.SimulationWindowLabel = wx.StaticText( MonteCarloSizer.GetStaticBox(), wx.ID_ANY, u"Simulation window:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.SimulationWindowLabel.Wrap( -1 )
self.SimulationWindowLabel.SetToolTip( u"The region over which simulated points are generated" )
MonteCarloSizer2.Add( self.SimulationWindowLabel, wx.GBPosition( 2, 0 ), wx.GBSpan( 1, 1 ), wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
SimulationWindowChoiceChoices = []
self.SimulationWindowChoice = wx.Choice( MonteCarloSizer.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, SimulationWindowChoiceChoices, 0 )
self.SimulationWindowChoice.SetSelection( 0 )
self.SimulationWindowChoice.SetToolTip( u"The region over which simulated points are generated" )
MonteCarloSizer2.Add( self.SimulationWindowChoice, wx.GBPosition( 2, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.StrictLocCheckBox = wx.CheckBox( MonteCarloSizer.GetStaticBox(), wx.ID_ANY, u"Strict localization in window", wx.DefaultPosition, wx.DefaultSize, 0)
self.StrictLocCheckBox.SetToolTip(u"If checked, points located outside the window are excluded, even if they are within the spatial resolution of the border")
MonteCarloSizer2.Add(self.StrictLocCheckBox, wx.GBPosition(3, 1), wx.GBSpan(1, 1), wx.ALL, 5)
MonteCarloSizer.Add( MonteCarloSizer2, 1, wx.EXPAND, 5 )
AnalysisOptionsSizer.Add( MonteCarloSizer, 1, wx.EXPAND|wx.ALL, 5 )
ClusterSizer = wx.StaticBoxSizer( wx.StaticBox( self.AnalysisOptionsTab, wx.ID_ANY, u"Clusters" ), wx.VERTICAL )
ClusterSizer2 = wx.GridBagSizer( 0, 0 )
ClusterSizer2.SetFlexibleDirection( wx.BOTH )
ClusterSizer2.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.ClusterCheckBox = wx.CheckBox( ClusterSizer.GetStaticBox(), wx.ID_CLUSTERCHECKBOX, u"Determine point clusters", wx.DefaultPosition, wx.DefaultSize, 0 )
self.ClusterCheckBox.SetToolTip( u"Partition points into clusters" )
ClusterSizer2.Add( self.ClusterCheckBox, wx.GBPosition( 0, 0 ), wx.GBSpan( 1, 2 ), wx.ALL, 5 )
self.ClusterDistLabel = wx.StaticText( ClusterSizer.GetStaticBox(), wx.ID_ANY, u"Within-cluster distance:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.ClusterDistLabel.Wrap( -1 )
self.ClusterDistLabel.SetToolTip( u"Two points closer than this distance from each other are assigned to the same cluster" )
ClusterSizer2.Add( self.ClusterDistLabel, wx.GBPosition( 1, 0 ), wx.GBSpan( 1, 1 ), wx.ALL|wx.ALIGN_CENTER_VERTICAL|wx.ALIGN_RIGHT, 5 )
self.ClusterDistSpinCtrl = wx.SpinCtrl( ClusterSizer.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,-1 ), wx.SP_ARROW_KEYS, 1, 1000, 50 )
self.ClusterDistSpinCtrl.SetToolTip( u"Two points closer than this distance from each other are assigned to the same cluster" )
ClusterSizer2.Add( self.ClusterDistSpinCtrl, wx.GBPosition( 1, 1 ), wx.GBSpan( 1, 1 ), wx.ALL, 5 )
self.ClusterDistUnitLabel = wx.StaticText( ClusterSizer.GetStaticBox(), wx.ID_ANY, u"metric units", wx.DefaultPosition, wx.DefaultSize, wx.ALIGN_LEFT )
self.ClusterDistUnitLabel.Wrap( -1 )
self.ClusterDistUnitLabel.SetToolTip( u"Two points closer than this distance from each other are assigned to the same cluster" )
ClusterSizer2.Add( self.ClusterDistUnitLabel, wx.GBPosition( 1, 2 ), wx.GBSpan( 1, 1 ), wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
ClusterSizer.Add( ClusterSizer2, 1, wx.EXPAND, 5 )
AnalysisOptionsSizer.Add( ClusterSizer, 1, wx.EXPAND|wx.ALL, 5 )
self.AnalysisOptionsTab.SetSizer( AnalysisOptionsSizer )
self.AnalysisOptionsTab.Layout()
AnalysisOptionsSizer.Fit( self.AnalysisOptionsTab )
self.OptionsNotebook.AddPage( self.AnalysisOptionsTab, u"Analysis", True )
self.OutputOptionsTab = wx.Panel( self.OptionsNotebook, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.OutputOptionsTab.SetToolTip( u"Note: Excel output may not be available" )
OutputOptionsSizer = wx.FlexGridSizer( 3, 2, 0, 0 )
OutputOptionsSizer.SetFlexibleDirection( wx.BOTH )
OutputOptionsSizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.GenerateOutputLabel = wx.StaticText( self.OutputOptionsTab, wx.ID_ANY, u"Output to generate:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.GenerateOutputLabel.Wrap( -1 )
OutputOptionsSizer.Add( self.GenerateOutputLabel, 0, wx.ALL, 5 )
OutputCheckListBoxChoices = [u"Profile summary", u"Particle summary", u"Random summary", u"Session summary"]
self.OutputCheckListBox = wx.CheckListBox( self.OutputOptionsTab, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, OutputCheckListBoxChoices, 0 )
OutputOptionsSizer.Add( self.OutputCheckListBox, 0, wx.ALL, 5 )
OutputFormatRadioBoxChoices = [ u"Excel", u"Comma-delimited text", u"Tab-delimited text" ]
self.OutputFormatRadioBox = wx.RadioBox( self.OutputOptionsTab, wx.ID_ANY, u"Output format", wx.DefaultPosition, wx.DefaultSize, OutputFormatRadioBoxChoices, 1, wx.RA_SPECIFY_COLS )
self.OutputFormatRadioBox.SetSelection( 0 )
OutputOptionsSizer.Add( self.OutputFormatRadioBox, 1, wx.ALL|wx.EXPAND, 5 )
IfOutputExistsRadioBoxChoices = [ u"Enumerate", u"Overwrite" ]
self.IfOutputExistsRadioBox = wx.RadioBox( self.OutputOptionsTab, wx.ID_ANY, u"If output file exists", wx.DefaultPosition, wx.DefaultSize, IfOutputExistsRadioBoxChoices, 1, wx.RA_SPECIFY_COLS )
self.IfOutputExistsRadioBox.SetSelection( 0 )
OutputOptionsSizer.Add( self.IfOutputExistsRadioBox, 1, wx.ALL|wx.EXPAND, 5 )
OutputFileSuffixBox = wx.StaticBoxSizer( wx.StaticBox( self.OutputOptionsTab, wx.ID_ANY, u"Output file suffix" ), wx.VERTICAL )
self.DateSuffixCheckBox = wx.CheckBox( OutputFileSuffixBox.GetStaticBox(), wx.ID_ANY, u"Today's date", wx.DefaultPosition, wx.DefaultSize, 0 )
self.DateSuffixCheckBox.SetValue(True)
OutputFileSuffixBox.Add( self.DateSuffixCheckBox, 0, wx.ALL, 5 )
OtherSuffixSizer = wx.FlexGridSizer( 2, 2, 0, 0 )
OtherSuffixSizer.AddGrowableCol( 1 )
OtherSuffixSizer.SetFlexibleDirection( wx.BOTH )
OtherSuffixSizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.OtherSuffixCheckBox = wx.CheckBox( OutputFileSuffixBox.GetStaticBox(), wx.ID_OTHERSUFFIXCHECKBOX, u"Other:", wx.DefaultPosition, wx.DefaultSize, 0 )
OtherSuffixSizer.Add( self.OtherSuffixCheckBox, 0, wx.TOP|wx.BOTTOM|wx.LEFT|wx.ALIGN_CENTER_VERTICAL, 5 )
self.OtherSuffixTextCtrl = wx.TextCtrl( OutputFileSuffixBox.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,-1 ), 0 )
self.OtherSuffixTextCtrl.SetMaxLength( 0 )
OtherSuffixSizer.Add( self.OtherSuffixTextCtrl, 0, wx.TOP|wx.BOTTOM|wx.RIGHT, 5 )
OutputFileSuffixBox.Add( OtherSuffixSizer, 1, wx.EXPAND, 5 )
OutputOptionsSizer.Add( OutputFileSuffixBox, 1, wx.EXPAND|wx.ALL, 5 )
self.OutputOptionsTab.SetSizer( OutputOptionsSizer )
self.OutputOptionsTab.Layout()
OutputOptionsSizer.Fit( self.OutputOptionsTab )
self.OptionsNotebook.AddPage( self.OutputOptionsTab, u"Output", False )
self.LogOptionsTab = wx.Panel( self.OptionsNotebook, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
LogOptionsSizer = wx.FlexGridSizer( 2, 1, 0, 0 )
LogOptionsSizer.SetFlexibleDirection( wx.BOTH )
LogOptionsSizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
SaveLogSizer = wx.FlexGridSizer( 2, 2, 0, 0 )
SaveLogSizer.AddGrowableCol( 1 )
SaveLogSizer.SetFlexibleDirection( wx.BOTH )
SaveLogSizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.SaveLogCheckBox = wx.CheckBox( self.LogOptionsTab, wx.ID_SAVELOGCHECKBOX, u"Save as:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.SaveLogCheckBox.SetValue(True)
SaveLogSizer.Add( self.SaveLogCheckBox, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
self.LogFilePickerCtrl = wx.FilePickerCtrl( self.LogOptionsTab, wx.ID_ANY, u"Synapse.log", u"Select a file", u"*.log", wx.DefaultPosition, wx.DefaultSize, wx.FLP_USE_TEXTCTRL )
SaveLogSizer.Add( self.LogFilePickerCtrl, 1, wx.ALL|wx.EXPAND, 5 )
LogOptionsSizer.Add( SaveLogSizer, 1, wx.EXPAND, 5 )
IfLogExistsSizer = wx.FlexGridSizer( 2, 2, 0, 0 )
IfLogExistsSizer.SetFlexibleDirection( wx.BOTH )
IfLogExistsSizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
IfLogExistsRadioBoxChoices = [ u"Enumerate", u"Overwrite", u"Append" ]
self.IfLogExistsRadioBox = wx.RadioBox( self.LogOptionsTab, wx.ID_ANY, u"If log file exists", wx.DefaultPosition, wx.DefaultSize, IfLogExistsRadioBoxChoices, 1, wx.RA_SPECIFY_COLS )
self.IfLogExistsRadioBox.SetSelection( 0 )
IfLogExistsSizer.Add( self.IfLogExistsRadioBox, 0, wx.ALL, 5 )
LogOptionsSizer.Add( IfLogExistsSizer, 1, wx.EXPAND, 5 )
self.LogOptionsTab.SetSizer( LogOptionsSizer )
self.LogOptionsTab.Layout()
LogOptionsSizer.Fit( self.LogOptionsTab )
self.OptionsNotebook.AddPage( self.LogOptionsTab, u"Logging", False )
OptionsPanelSizer.Add( self.OptionsNotebook, 1, wx.EXPAND |wx.ALL, 5 )
self.SetOptionsAsDefaultButton = wx.Button( OptionsPanelSizer.GetStaticBox(), wx.ID_ANY, u"Set options as default", wx.DefaultPosition, wx.DefaultSize, 0 )
OptionsPanelSizer.Add( self.SetOptionsAsDefaultButton, 0, wx.ALL, 5 )
self.OptionsPanel.SetSizer( OptionsPanelSizer )
self.OptionsPanel.Layout()
OptionsPanelSizer.Fit( self.OptionsPanel )
TopSizer.Add( self.OptionsPanel, wx.GBPosition( 0, 1 ), wx.GBSpan( 2, 1 ), wx.EXPAND |wx.ALL, 5 )
self.LogPanel = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
LogSizer = wx.StaticBoxSizer( wx.StaticBox( self.LogPanel, wx.ID_ANY, u"Log" ), wx.VERTICAL )
self.LogTextCtrl = wx.TextCtrl( LogSizer.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( -1,-1 ), wx.HSCROLL|wx.TE_MULTILINE )
self.LogTextCtrl.SetMaxLength( 0 )
self.LogTextCtrl.SetFont( wx.Font( 8, 70, 90, 90, False, wx.EmptyString ) )
LogSizer.Add( self.LogTextCtrl, 1, wx.ALL|wx.EXPAND, 5 )
self.LogPanel.SetSizer( LogSizer )
self.LogPanel.Layout()
LogSizer.Fit( self.LogPanel )
TopSizer.Add( self.LogPanel, wx.GBPosition( 1, 0 ), wx.GBSpan( 1, 1 ), wx.EXPAND |wx.ALL, 5 )
TopSizer.AddGrowableRow( 0 )
MainSizer.Add( TopSizer, 1, wx.EXPAND, 5 )
BottomSizer = wx.FlexGridSizer( 1, 1, 0, 0 )
BottomSizer.AddGrowableCol( 0 )
BottomSizer.AddGrowableRow( 0 )
BottomSizer.SetFlexibleDirection( wx.BOTH )
BottomSizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_NONE )
self.MainButtonPanel = wx.Panel( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
MainButtonSizer = wx.FlexGridSizer( 2, 2, 0, 0 )
MainButtonSizer.AddGrowableRow( 0 )
MainButtonSizer.SetFlexibleDirection( wx.BOTH )
MainButtonSizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
MainButtonSizer.Add( 0, 0, 1, wx.EXPAND, 5 )
self.StartButton = wx.Button( self.MainButtonPanel, wx.ID_START, u"&Start", wx.DefaultPosition, wx.DefaultSize, 0 )
MainButtonSizer.Add( self.StartButton, 0, wx.ALL, 5 )
self.AboutButton = wx.Button( self.MainButtonPanel, wx.ID_ABOUT, u"A&bout...", wx.DefaultPosition, wx.DefaultSize, 0 )
MainButtonSizer.Add( self.AboutButton, 1, wx.ALL|wx.ALIGN_RIGHT, 5 )
self.ExitButton = wx.Button( self.MainButtonPanel, wx.ID_EXIT, u"&Exit", wx.DefaultPosition, wx.DefaultSize, 0 )
MainButtonSizer.Add( self.ExitButton, 1, wx.ALL|wx.ALIGN_RIGHT, 5 )
self.MainButtonPanel.SetSizer( MainButtonSizer )
self.MainButtonPanel.Layout()
MainButtonSizer.Fit( self.MainButtonPanel )
BottomSizer.Add( self.MainButtonPanel, 1, wx.ALL|wx.ALIGN_RIGHT, 5 )
MainSizer.Add( BottomSizer, 1, wx.EXPAND|wx.ALIGN_RIGHT, 5 )
self.SetSizer( MainSizer )
self.Layout()
MainSizer.Fit( self )
self.StatusBar = self.CreateStatusBar( 1, 0, wx.ID_ANY )
# Connect Events
self.AddButton.Bind( wx.EVT_BUTTON, self.OnAddFile )
self.RemoveButton.Bind( wx.EVT_BUTTON, self.OnRemoveFile )
self.ViewButton.Bind( wx.EVT_BUTTON, self.OnViewFile )
self.InterpointCheckBox.Bind( wx.EVT_CHECKBOX, self.OnInterpointCheckbox )
self.MonteCarloCheckBox.Bind( wx.EVT_CHECKBOX, self.OnMonteCarloCheckBox )
self.SimulationWindowChoice.Bind( wx.EVT_CHOICE, self.OnSimulationWindowChoice )
self.ClusterCheckBox.Bind( wx.EVT_CHECKBOX, self.OnClusterCheckBox )
self.OtherSuffixCheckBox.Bind( wx.EVT_CHECKBOX, self.OnOtherSuffixCheckBox )
self.SaveLogCheckBox.Bind( wx.EVT_CHECKBOX, self.OnSaveLogCheckBox )
self.LogFilePickerCtrl.Bind( wx.EVT_FILEPICKER_CHANGED, self.OnSaveLogCheckBox )
self.SetOptionsAsDefaultButton.Bind( wx.EVT_BUTTON, self.OnSetOptionsAsDefault )
self.StartButton.Bind( wx.EVT_BUTTON, self.OnStart )
self.AboutButton.Bind( wx.EVT_BUTTON, self.OnAbout )
self.ExitButton.Bind( wx.EVT_BUTTON, self.OnClose )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def OnAddFile( self, event ):
event.Skip()
def OnRemoveFile( self, event ):
event.Skip()
def OnViewFile( self, event ):
event.Skip()
def OnInterpointCheckbox( self, event ):
event.Skip()
def OnMonteCarloCheckBox( self, event ):
event.Skip()
def OnSimulationWindowChoice( self, event ):
event.Skip()
def OnClusterCheckBox( self, event ):
event.Skip()
def OnOtherSuffixCheckBox( self, event ):
event.Skip()
def OnSaveLogCheckBox( self, event ):
event.Skip()
def OnSetOptionsAsDefault( self, event ):
event.Skip()
def OnStart( self, event ):
event.Skip()
def OnAbout( self, event ):
event.Skip()
def OnClose( self, event ):
event.Skip()
###########################################################################
## Class ViewFileDialog
###########################################################################
class ViewFileDialog ( wx.Dialog ):
def __init__( self, parent ):
wx.Dialog.__init__ ( self, parent, id = wx.ID_ANY, title = u"View input file", pos = wx.DefaultPosition, size = wx.DefaultSize, style = wx.DEFAULT_DIALOG_STYLE )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
ViewFileSizer = wx.FlexGridSizer( 2, 1, 0, 0 )
ViewFileSizer.SetFlexibleDirection( wx.BOTH )
ViewFileSizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.ViewFileTextCtrl = wx.TextCtrl( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 400,400 ), wx.HSCROLL|wx.TE_MULTILINE|wx.TE_READONLY )
self.ViewFileTextCtrl.SetMaxLength( 0 )
ViewFileSizer.Add( self.ViewFileTextCtrl, 0, wx.ALL, 5 )
ViewFileStdButtonSizer = wx.StdDialogButtonSizer()
self.ViewFileStdButtonSizerOK = wx.Button( self, wx.ID_OK )
ViewFileStdButtonSizer.AddButton( self.ViewFileStdButtonSizerOK )
ViewFileStdButtonSizer.Realize();
ViewFileSizer.Add( ViewFileStdButtonSizer, 1, wx.EXPAND|wx.ALL, 5 )
self.SetSizer( ViewFileSizer )
self.Layout()
ViewFileSizer.Fit( self )
self.Centre( wx.BOTH )
# Connect Events
self.ViewFileStdButtonSizerOK.Bind( wx.EVT_BUTTON, self.OnClose )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def OnClose( self, event ):
event.Skip()
###########################################################################
## Class AboutDialog
###########################################################################
class AboutDialog ( wx.Dialog ):
def __init__( self, parent ):
wx.Dialog.__init__ ( self, parent, id = wx.ID_ANY, title = u"About", pos = wx.DefaultPosition, size = wx.Size( -1,-1 ), style = wx.DEFAULT_DIALOG_STYLE )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
AboutSizer = wx.FlexGridSizer( 0, 1, 0, 0 )
AboutSizer.AddGrowableRow( 2 )
AboutSizer.SetFlexibleDirection( wx.BOTH )
AboutSizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
TopSizer = wx.FlexGridSizer( 0, 2, 10, 10 )
TopSizer.SetFlexibleDirection( wx.BOTH )
TopSizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.InitialSpaceSizer = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.InitialSpaceSizer.Wrap( -1 )
self.InitialSpaceSizer.SetMinSize( wx.Size( -1,5 ) )
TopSizer.Add( self.InitialSpaceSizer, 0, wx.ALL, 5 )
TopSizer.Add( ( 0, 0), 1, wx.EXPAND, 5 )
TopSizer.Add( ( 0, 0), 1, wx.EXPAND, 5 )
TitleSizer = wx.FlexGridSizer( 1, 3, 0, 0 )
TitleSizer.AddGrowableCol( 2 )
TitleSizer.SetFlexibleDirection( wx.BOTH )
TitleSizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.IconBitmap = wx.StaticBitmap( self, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.DefaultSize, 0 )
TitleSizer.Add( self.IconBitmap, 0, wx.ALL, 5 )
self.SmallSpacer = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, 0 )
self.SmallSpacer.Wrap( -1 )
TitleSizer.Add( self.SmallSpacer, 0, wx.ALL, 5 )
self.TitleLabel = wx.StaticText( self, wx.ID_ANY, u"TitleLabel", wx.DefaultPosition, wx.DefaultSize, 0 )
self.TitleLabel.Wrap( -1 )
self.TitleLabel.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), 70, 90, 92, False, wx.EmptyString ) )
TitleSizer.Add( self.TitleLabel, 1, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 )
TopSizer.Add( TitleSizer, 1, wx.EXPAND|wx.RIGHT, 5 )
TopSizer.Add( ( 0, 0), 1, wx.EXPAND, 5 )
TopSizer.Add( ( 0, 0), 1, wx.EXPAND, 5 )
TopSizer.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.VersionLabel = wx.StaticText( self, wx.ID_ANY, u"VersionLabel", wx.DefaultPosition, wx.DefaultSize, 0 )
self.VersionLabel.Wrap( -1 )
self.VersionLabel.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), 70, 90, 90, False, wx.EmptyString ) )
TopSizer.Add( self.VersionLabel, 0, wx.ALIGN_BOTTOM|wx.TOP|wx.RIGHT|wx.LEFT, 5 )
TopSizer.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.LastModLabel = wx.StaticText( self, wx.ID_ANY, u"LastModLabel", wx.DefaultPosition, wx.DefaultSize, 0 )
self.LastModLabel.Wrap( -1 )
TopSizer.Add( self.LastModLabel, 0, wx.RIGHT|wx.LEFT, 5 )
TopSizer.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.CopyrightLabel = wx.StaticText( self, wx.ID_ANY, u"CopyrightLabel", wx.DefaultPosition, wx.DefaultSize, 0 )
self.CopyrightLabel.Wrap( -1 )
TopSizer.Add( self.CopyrightLabel, 0, wx.RIGHT|wx.LEFT, 5 )
TopSizer.Add( ( 0, 0), 1, wx.EXPAND, 5 )
self.LicenseLabel = wx.StaticText( self, wx.ID_ANY, u"LicenseLabel", wx.DefaultPosition, wx.DefaultSize, 0 )
self.LicenseLabel.Wrap( -1 )
TopSizer.Add( self.LicenseLabel, 0, wx.BOTTOM|wx.RIGHT|wx.LEFT, 5 )
TopSizer.Add( ( 0, 0), 1, wx.EXPAND, 5 )
HyperLinksSizer = wx.FlexGridSizer( 2, 2, 0, 0 )
HyperLinksSizer.AddGrowableCol( 1 )
HyperLinksSizer.SetFlexibleDirection( wx.HORIZONTAL )
HyperLinksSizer.SetNonFlexibleGrowMode( wx.FLEX_GROWMODE_SPECIFIED )
self.EmailLabel = wx.StaticText( self, wx.ID_ANY, u"E-mail:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.EmailLabel.Wrap( -1 )
HyperLinksSizer.Add( self.EmailLabel, 0, wx.ALL, 5 )
self.EmailHyperlink = wx.adv.HyperlinkCtrl( self, wx.ID_ANY, u"EmailHyperlink", wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.adv.HL_DEFAULT_STYLE )
HyperLinksSizer.Add( self.EmailHyperlink, 0, wx.ALL, 5 )
self.WebLabel = wx.StaticText( self, wx.ID_ANY, u"Web:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.WebLabel.Wrap( -1 )
HyperLinksSizer.Add( self.WebLabel, 0, wx.ALL, 5 )
self.WebHyperlink = wx.adv.HyperlinkCtrl( self, wx.ID_ANY, u"WebHyperlink", wx.EmptyString, wx.DefaultPosition, wx.DefaultSize, wx.adv.HL_DEFAULT_STYLE )
HyperLinksSizer.Add( self.WebHyperlink, 0, wx.ALL, 5 )
TopSizer.Add( HyperLinksSizer, 1, wx.EXPAND|wx.BOTTOM|wx.RIGHT, 5 )
TopSizer.Add( ( 0, 0), 1, wx.EXPAND, 5 )
AboutSizer.Add( TopSizer, 1, wx.EXPAND, 5 )
self.Staticline = wx.StaticLine( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.LI_HORIZONTAL )
AboutSizer.Add( self.Staticline, 0, wx.EXPAND |wx.ALL, 5 )
OK_SdbSizer = wx.StdDialogButtonSizer()
self.OK_SdbSizerOK = wx.Button( self, wx.ID_OK )
OK_SdbSizer.AddButton( self.OK_SdbSizerOK )
OK_SdbSizer.Realize();
AboutSizer.Add( OK_SdbSizer, 1, wx.ALIGN_CENTER_HORIZONTAL|wx.ALL, 5 )
self.SetSizer( AboutSizer )
self.Layout()
AboutSizer.Fit( self )
self.Centre( wx.BOTH )
# Connect Events
self.OK_SdbSizerOK.Bind( wx.EVT_BUTTON, self.OnClose )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def OnClose( self, event ):
event.Skip()
|
|
import astropy.wcs
import astropy.units as u
import astropy.coordinates as astroCoords
import numpy as np
import matplotlib.mlab as mlab
import astropy.convolution as conv
import matplotlib.pyplot as plt
from createImage import createImage as ci
from astropy.io import fits
from scipy.spatial.distance import euclidean
from sklearn.cluster import DBSCAN
from skimage import measure
#from pathos.multiprocessing import ProcessPool as Pool
from pathos.threading import ThreadPool as Pool
class analyzeImage(object):
def calcArrayLimits(self, imShape, centerX, centerY, scaleFactor, sigmaArr):
xmin = int(centerX-(scaleFactor*sigmaArr[0]))
xmax = int(1+centerX+(scaleFactor*sigmaArr[0]))
ymin = int(centerY-(scaleFactor*sigmaArr[1]))
ymax = int(1+centerY+(scaleFactor*sigmaArr[1]))
if ((xmin < 0) | (ymin < 0) | (xmax >= imShape[0]) | (ymax >= imShape[1])):
maxXOff = xmax-imShape[0]+1
maxYOff = ymax-imShape[1]+1
minXOff = xmin*(-1.)
minYOff = ymin*(-1.)
offset = np.max([maxXOff, maxYOff, minXOff, minYOff])
xmin += offset
xmax -= offset
ymin += offset
ymax -= offset
else:
offset = None
return xmin, xmax, ymin, ymax, offset
def createAperture(self, imShape, locationArray, radius, mask=False):
"""
Create a circular aperture for an image. Aperture area will be 1's
and all area outside will be 0's. Just multiply aperture by image to get
everything outside aperture masked out.
Parameters
----------
imShape: list, [2], required
The row, column dimensions of the image.
locationArray: list, [Nx2], required
The locations in the image where apertures should be centered.
radius: float, required
The radius of the circular aperture in pixels.
mask: boolean, optional, default=False
If true, then aperture area inside is set to 0's and outside to 1's
making this a mask of the area instead.
Returns
-------
apertureArray: numpy array
Array of the same size as imShape but with 1's inside the aperture and
0's outside unless mask is set to True then it is the opposite.
"""
apertureArray = np.zeros((imShape))
if len(np.shape(locationArray)) < 2:
locationArray = [locationArray]
for center in locationArray:
centerX = center[0]
centerY = center[1]
for ix in range(0, int(imShape[0])):
for iy in range(0, int(imShape[1])):
distX = centerX - ix
distY = centerY - iy
if np.sqrt((distX**2)+(distY**2)) <= radius:
apertureArray[ix, iy] = 1.
if mask==True:
apertureArray -= 1
apertureArray = np.abs(apertureArray)
return apertureArray
def trackSingleObject(self, imageArray, gaussSigma):
objectCoords = []
for image in imageArray:
newImage = createImage().convolveGaussian(image, gaussSigma)
maxIdx = np.argmax(newImage)
objectCoords.append(np.unravel_index(maxIdx, np.shape(newImage)))
return objectCoords
def plotSingleTrajectory(self, imageArray, gaussSigma):
objCoords = self.trackSingleObject(imageArray, gaussSigma)
fig = plt.figure(figsize=(12,12))
plt.plot(np.array(objCoords)[:,0], np.array(objCoords)[:,1], '-ko')
plt.xlim((0, np.shape(imageArray[0])[0]))
plt.ylim((0, np.shape(imageArray[0])[1]))
return fig
def calcSNR(self, image, centerArr, gaussSigma, background, imSize, apertureScale=1.6):
if isinstance(background, np.ndarray):
backgroundArray = background
else:
backgroundArray = np.ones((imSize))*background
apertureScale = 1.6 #See derivation here: http://wise2.ipac.caltech.edu/staff/fmasci/GaussApRadius.pdf
aperture = self.createAperture(imSize, centerArr, apertureScale*gaussSigma[0])
sourceCounts = np.sum(image*aperture)
print sourceCounts
if sourceCounts < 0:
sourceCounts = 0.0
noiseCounts = np.sum(backgroundArray*aperture)
print noiseCounts
snr = sourceCounts/np.sqrt(noiseCounts)
#snr = sourceCounts/np.sqrt(sourceCounts+noiseCounts)
return snr
def calcTheorySNR(self, sourceFlux, centerArr, gaussSigma, background, imSize, apertureScale=1.6):
if isinstance(background, np.ndarray):
backgroundArray = background
else:
backgroundArray = np.ones((imSize))*background
sourceTemplate = createImage().createGaussianSource(centerArr, gaussSigma, imSize, sourceFlux)
aperture = self.createAperture(imSize, centerArr, apertureScale, gaussSigma[0])
sourceCounts = np.sum(sourceTemplate*aperture)
noiseCounts = np.sum(backgroundArray*aperture)
snr = sourceCounts/np.sqrt(sourceCounts+noiseCounts)
return snr
def addMask(self, imageArray, locations, gaussSigma):
maskedArray = np.zeros((np.shape(imageArray)))
scaleFactor = 4.
i = 0
for image in imageArray:
maskedArray[i] = image * self.createAperture(np.shape(image), locations, scaleFactor, gaussSigma, mask=True)
i+=1
return maskedArray
def return_ra_dec(self, t0_pos, t0_vel, image_times, t0_mjd, wcs,
position_error, telescope_code):
"""
Return a set of ra and dec coordinates for a trajectory.
Used as input into Bernstein and Khushalani (2000) orbit fitting
code found here: http://www.physics.upenn.edu/~garyb/#software
Parameters
----------
t0_pos: numpy array, [2], required
The starting x,y pixel location
t0_vel: numpy array, [2], required
The x,y velocity of the object in pixels/hr.
image_times: numpy array, required
An array containing the image times in hours with the first image at
time 0.
t0_mjd: numpy array, required
The MJD times of each image.
wcs: astropy.wcs.wcs instance, required
The astropy.wcs instance of the first image.
position_error: numpy array, required
The position error in the observations in arcsec.
telescope_code: int, required
The telescope code for Bernstein and Khushalani (2000)
orbit fitting software. (Subaru is 568).
Returns
-------
ra_dec_coords: numpy array
Array of strings with the (mjd, ra, dec,
position_error, telescope_code) for each image in the trajectory.
"""
pixel_vals = []
for time_pt in image_times:
pixel_vals.append(t0_pos + t0_vel*time_pt)
pixel_vals = np.array(pixel_vals)
coord_vals = astroCoords.SkyCoord.from_pixel(pixel_vals[:,0], pixel_vals[:,1], wcs)
coord_list = coord_vals.to_string('hmsdms')
output_list = []
for coord_val, mjd, err_val in zip(coord_list, t0_mjd, position_error):
coord_ra, coord_dec = coord_val.split(' ')
ra_h = coord_ra.split('h')[0]
ra_m = coord_ra.split('m')[0].split('h')[1]
ra_s = str('%.4f') % float(coord_ra.split('s')[0].split('m')[1])
dec_d = coord_dec.split('d')[0]
dec_m = coord_dec.split('m')[0].split('d')[1]
dec_s = str('%.4f') % float(coord_dec.split('s')[0].split('m')[1])
output_list.append(str('%.4f' + ' ' + '%s:%s:%s' + ' ' + '%s:%s:%s' +
' ' + '%.2f %i') % (mjd+2400000.5, ra_h, ra_m,
ra_s, dec_d, dec_m, dec_s,
err_val, telescope_code))
ra_dec_coords = np.array(output_list, dtype=str)
return ra_dec_coords
def createPostageStamp(self, imageArray, objectStartArr, velArr,
timeArr, stamp_width):
"""
Create postage stamp image coadds of potential objects traveling along
a trajectory.
Parameters
----------
imageArray: numpy array, required
The masked input images.
objectStartArr: numpy array, required
An array with the starting location of the object in pixels.
velArr: numpy array, required
The x,y velocity in pixels/hr. of the object trajectory.
timeArr: numpy array, required
The time in hours of each image starting from 0 at the first image.
stamp_width: numpy array or list, [2], required
The row, column dimensions of the desired output image.
Returns
-------
stampImage: numpy array
The coadded postage stamp.
singleImagesArray: numpy array
The postage stamps that were added together to create the coadd.
"""
singleImagesArray = []
stampWidth = np.array(stamp_width, dtype=int)
#print stampWidth
stampImage = np.zeros(stampWidth)
if len(np.shape(imageArray)) < 3:
imageArray = [imageArray]
measureCoords = ci().calcCenters(np.array(objectStartArr), np.array(velArr), timeArr)
if len(np.shape(measureCoords)) < 2:
measureCoords = [measureCoords]
off_edge = []
for centerCoords in measureCoords:
if (centerCoords[0] + stampWidth[0]/2 + 1) > np.shape(imageArray[0])[1]:
#raise ValueError('The boundaries of your postage stamp for one of the images go off the edge')
off_edge.append(True)
elif (centerCoords[0] - stampWidth[0]/2) < 0:
#raise ValueError('The boundaries of your postage stamp for one of the images go off the edge')
off_edge.append(True)
elif (centerCoords[1] + stampWidth[1]/2 + 1) > np.shape(imageArray[0])[0]:
#raise ValueError('The boundaries of your postage stamp for one of the images go off the edge')
off_edge.append(True)
elif (centerCoords[1] - stampWidth[1]/2) < 0:
#raise ValueError('The boundaries of your postage stamp for one of the images go off the edge')
off_edge.append(True)
else:
off_edge.append(False)
i=0
for image in imageArray:
if off_edge[i] is False:
xmin = int(np.rint(measureCoords[i,1]-stampWidth[0]/2))
xmax = int(xmin + stampWidth[0])
ymin = int(np.rint(measureCoords[i,0]-stampWidth[1]/2))
ymax = int(ymin + stampWidth[1])
#print xmin, xmax, ymin, ymax
single_stamp = image[xmin:xmax, ymin:ymax]
single_stamp[np.isnan(single_stamp)] = 0.
single_stamp[np.isinf(single_stamp)] = 0.
stampImage += single_stamp
singleImagesArray.append(single_stamp)
else:
single_stamp = np.zeros((stampWidth))
singleImagesArray.append(single_stamp)
i+=1
return stampImage, singleImagesArray
def plotTrajectory(self, results_arr, image_times, raw_im,
im_plot_args=None, traj_plot_args=None):
"""
Plot an object's trajectory along a section of one of the
original masked images.
Parameters
----------
results_arr: numpy recarray, required
The results output from findObjects in searchImage.
image_times: numpy array, required
An array containing the image times in hours with the first image at
time 0.
raw_im: numpy array, required
One of the masked original images. See loadMaskedImages
in searchImage.py.
im_plot_args: dict, optional
Plotting arguments for the masked image.
traj_plot_args: dict, optional
Scatter plot arguments for the trajectory on top of masked image.
Returns
-------
ax: matplotlib axes instance
Returns instance after plt.imshow and plt.plot
"""
t0_pos = [results_arr['t0_x'], results_arr['t0_y']]
pixel_vel = [results_arr['v_x'], results_arr['v_y']]
coords = [np.array(t0_pos) +
np.array([pixel_vel[0]*it, pixel_vel[1]*it])
for it in image_times]
coords = np.array(coords)
default_im_plot_args = {'cmap': 'Greys_r', 'origin': 'lower'}
default_traj_plot_args = {'marker': 'o', 'c': 'r'}
if im_plot_args is not None:
default_im_plot_args.update(im_plot_args)
im_plot_args = default_im_plot_args
if traj_plot_args is not None:
default_traj_plot_args.update(traj_plot_args)
traj_plot_args = default_traj_plot_args
ax = plt.gca()
plt.imshow(raw_im, **im_plot_args)
plt.plot(coords[:, 0], coords[:, 1], **traj_plot_args)
plt.xlim((t0_pos[0]-25, t0_pos[0]+75))
plt.ylim((t0_pos[1]-25, t0_pos[1]+75))
return ax
def plotLightCurves(self, im_array, results_arr, image_times):
"""
Plots light curve of trajectory using array of masked images.
Parameters
----------
im_array: numpy array, required
The masked original images. See loadMaskedImages
in searchImage.py.
results_arr: numpy recarray, required
The results output from findObjects in searchImage.
image_times: numpy array, required
An array containing the image times in hours with the first image at
time 0.
Returns
-------
ax: matplotlib axes instance
The axes instance where the plt.plot of the lightcurve was drawn.
"""
coords = self.calc_traj_coords(results_arr, image_times)
aperture = self.createAperture([11,11], [5., 5.],
1., mask=False)
ax = plt.gca()
#plt.plot(image_times, [np.sum(im_array[x][coords[x,1]-5:coords[x,1]+6,
# coords[x,0]-5:coords[x,0]+6]*aperture)
plt.plot(image_times, [im_array[x][coords[x,1], coords[x,0]]
for x in range(0, len(image_times))], '-o')
ax.set_xlabel('Time (days)')
ax.set_ylabel('Flux')
return ax
def calc_traj_coords(self, results_arr, image_times):
"""
Calculate the image coordinates of the trajectory of an object.
Parameters
----------
results_arr: numpy recarray, required
The results output from findObjects in searchImage.
image_times: numpy array, required
An array containing the image times in hours with the first image at
time 0.
Returns
-------
traj_coords: numpy array
The x,y coordinates of the trajectory in each image.
"""
t0_pos = [results_arr['t0_x'], results_arr['t0_y']]
pixel_vel = [results_arr['v_x'], results_arr['v_y']]
coords = [np.array(t0_pos) +
np.array([pixel_vel[0]*it, pixel_vel[1]*it])
for it in image_times]
traj_coords = np.array(coords, dtype=int)
return traj_coords
def clusterResults(self, results, dbscan_args=None):
"""
Use scikit-learn algorithm of density-based spatial clustering of
applications with noise (DBSCAN)
(http://scikit-learn.org/stable/modules/generated/
sklearn.cluster.DBSCAN.html)
to cluster the results of the likelihood image search using starting
location, total velocity and slope of trajectory.
Parameters
----------
results: numpy recarray, required
The results output from findObjects in searchImage.
dbscan_args: dict, optional
Additional arguments for the DBSCAN instance. See options in link
above.
Returns
-------
db_cluster: DBSCAN instance
DBSCAN instance with clustering completed. To get cluster labels use
db_cluster.labels_
top_vals: list of integers
The indices in the results array where the most likely object in each
cluster is located.
"""
default_dbscan_args = dict(eps=0.1, min_samples=1)
if dbscan_args is not None:
default_dbscan_args.update(dbscan_args)
dbscan_args = default_dbscan_args
slope_arr = []
intercept_arr = []
t0x_arr = []
t0y_arr = []
vel_total_arr = []
vx_arr = []
vel_x_arr = []
vel_y_arr = []
for target_num in range(len(results)):
t0x = results['t0_x'][target_num]
t0x_arr.append(t0x)
t0y = results['t0_y'][target_num]
t0y_arr.append(t0y)
v0x = results['v_x'][target_num]
vel_x_arr.append(v0x)
v0y = results['v_y'][target_num]
vel_y_arr.append(v0y)
db_cluster = DBSCAN(**dbscan_args)
scaled_t0x = t0x_arr - np.min(t0x_arr)
if np.max(scaled_t0x) > 0.:
scaled_t0x = scaled_t0x/np.max(scaled_t0x)
scaled_t0y = t0y_arr - np.min(t0y_arr)
if np.max(scaled_t0y) > 0.:
scaled_t0y = scaled_t0y/np.max(scaled_t0y)
scaled_vx = vel_x_arr - np.min(vel_x_arr)
if np.max(scaled_vx) > 0.:
scaled_vx /= np.max(scaled_vx)
scaled_vy = vel_y_arr - np.min(vel_y_arr)
if np.max(scaled_vy) > 0.:
scaled_vy /= np.max(scaled_vy)
db_cluster.fit(np.array([scaled_t0x, scaled_t0y,
scaled_vx, scaled_vy
], dtype=np.float).T)
top_vals = []
for cluster_num in np.unique(db_cluster.labels_):
cluster_vals = np.where(db_cluster.labels_ == cluster_num)[0]
top_vals.append(cluster_vals[0])
return db_cluster, top_vals
def filter_results(self, im_array, results, image_times, model, psf_sigma=1.0,
batch_size = 32, chunk_size = 10000):
"""
Use a keras neural network model to detect real objects based upon
the coadded postage stamps of those objects. Filter and keep only
actual objects going forward.
Parameters
----------
im_array: numpy array, required
The masked original images. See loadMaskedImages
in searchImage.py.
results_arr: numpy recarray, required
The results output from findObjects in searchImage.
image_times: numpy array, required
An array containing the image times in DAYS with the first image at
time 0.
Note: This is different than other methods so the units of
this may change. Watch this documentation.
model: keras model, required
A previously trained model loaded from an hdf5 file.
batch_size: int
Batch size for keras predict.
Returns
-------
filtered_results: numpy array
An edited version of results_arr with only the rows where
true objects were classified.
"""
keep_objects = np.array([])
total_chunks = np.ceil(len(results)/float(chunk_size))
chunk_num = 1
circle_vals = []
enumerated_results = list(enumerate(results))
self.im_array = im_array
self.image_times = image_times
self.psf_sigma = psf_sigma
# for chunk_start in range(0, len(results), chunk_size):
# test_class = []
# p_stamp_arr = []
# #circle_chunk = []
# for imNum in range(chunk_start, chunk_start+chunk_size):
# try:
# p_stamp = self.createPostageStamp(im_array,
# list(results[['t0_x', 't0_y']][imNum]),
# np.array(list(results[['v_x', 'v_y']][imNum])),
# image_times, [25., 25.])[0]
# p_stamp = np.array(p_stamp)
# p_stamp[np.isnan(p_stamp)] = 0.
# p_stamp[np.isinf(p_stamp)] = 0.
# #p_stamp -= np.min(p_stamp)
# #p_stamp /= np.max(p_stamp)
# #p_stamp
# image_thresh = np.max(p_stamp)*0.5
# image = (p_stamp > image_thresh)*1.
# #pre_image = p_stamp > image_thresh
# #image = np.array(pre_image*1.)
# mom = measure.moments(image)
# cr = mom[0,1]/mom[0,0]
# cc = mom[1,0]/mom[0,0]
# #moments = measure.moments(image, order=3)
# #cr = moments[0,1]/moments[0,0]
# #cc = moments[1,0]/moments[0,0]
# cent_mom = measure.moments_central(image, cr, cc, order=4)
# norm_mom = measure.moments_normalized(cent_mom)
# hu_mom = measure.moments_hu(norm_mom)
# #p_stamp_arr.append(hu_mom)
# #print moments[0,0], measure.perimeter(image)
# #circularity = (4*np.pi*moments[0,0])/(measure.perimeter(image)**2.)
# #circularity = (cent_mom[0,0]**2.)/(2.*np.pi*(cent_mom[2,0] + cent_mom[0,2]))
# circularity = (1/(2.*np.pi))*(1/hu_mom[0])
# #circularity = (cent_mom[0,0]**2.)/(2*np.pi*(cent_mom[2,0] + cent_mom[0,2]))
# psf_sigma = psf_sigma
# gaussian_fwhm = psf_sigma*2.35
# fwhm_area = np.pi*(gaussian_fwhm/2.)**2.
# #print circularity, cr, cc
# if ((circularity > 0.6) & (cr > 10.) & (cr < 14.) & (cc > 10.) & (cc < 14.) &
# (cent_mom[0,0] < (9.0*fwhm_area)) & (cent_mom[0,0] > 3.0)): #Use 200% error margin on psf_sigma for now
# # test_class.append(1.)
# # print circularity, cr, cc, moments[0,0]
# #else:
# # test_class.append(0.)
# test_class.append(1.)
# else:
# test_class.append(0.)
# circle_vals.append([circularity, cr, cc, cent_mom[0,0], image_thresh])
# #print circularity, cr, cc, cent_mom[0,0], image_thresh
# except:
# #p_stamp_arr.append(np.ones((25, 25)))
# p_stamp_arr.append(np.zeros(7))
# test_class.append(0.)
# circle_vals.append([0., 0., 0., 0., 0.])
# continue
# p_stamp_arr = np.array(p_stamp_arr)#.reshape(chunk_size, 625)
#test_class = model.predict_classes(p_stamp_arr, batch_size=batch_size,
# verbose=1)
pool = Pool(nodes=8)
test_classes = pool.map(self.circularity_test, enumerated_results)
test_classes = np.array(test_classes).T
keep_idx = test_classes[0][np.where(np.array(test_classes[1]) > .5)]# + chunk_start
print keep_idx
#print np.where(np.array(test_class) > .5)
print test_classes[0][np.where(np.array(test_classes[1]) > .5)]
keep_objects = keep_idx#np.append(keep_objects, keep_idx)
#circle_vals[keep_idx] = np.array(circle_chunk)
print "Finished chunk %i of %i" % (chunk_num, total_chunks)
chunk_num += 1
# keep_objects = np.arange(len(results))
filtered_results = results[np.array(keep_objects, dtype=np.int)]
#circle_vals = np.array(circle_vals)
#circle_vals_keep = circle_vals[np.array(keep_objects, dtype=np.int)]
return filtered_results#, circle_vals_keep
def circularity_test(self, result_row):#, im_array, image_times, psf_sigma):
im_array = self.im_array
if result_row[0] % 5000 == 0.:
print result_row[0]
try:
p_stamp = self.createPostageStamp(im_array,
list([result_row[1]['t0_x'],
result_row[1]['t0_y']]),
np.array(list([result_row[1]['v_x'],
result_row[1]['v_y']])),
self.image_times, [25., 25.])[0]
p_stamp = np.array(p_stamp)
p_stamp[np.isnan(p_stamp)] = 0.
p_stamp[np.isinf(p_stamp)] = 0.
#p_stamp -= np.min(p_stamp)
#p_stamp /= np.max(p_stamp)
#p_stamp
image_thresh = np.max(p_stamp)*0.5
image = (p_stamp > image_thresh)*1.
rprop = measure.regionprops(np.array(image, dtype=np.int), intensity_image=p_stamp)[0]
label_test, max_label = measure.label(image, return_num=True)
max_conn = 0
keep_label = 1
for label_num in range(1, max_label):
if len(np.where(label_test == label_num)[0]) > max_conn:
max_conn = len(np.where(label_test == label_num)[0])
keep_label = label_num
image = (label_test == keep_label)*1.
#pre_image = p_stamp > image_thresh
#image = np.array(pre_image*1.)
mom = measure.moments(image)
if mom[0,0] > 0.:
cr = mom[0,1]/mom[0,0]
cc = mom[1,0]/mom[0,0]
#cr = 12
#cc = 12
#moments = measure.moments(image, order=3)
#cr = moments[0,1]/moments[0,0]
#cc = moments[1,0]/moments[0,0]
cent_mom = measure.moments_central(image, cr, cc, order=4)
norm_mom = measure.moments_normalized(cent_mom)
hu_mom = measure.moments_hu(norm_mom)
#p_stamp_arr.append(hu_mom)
#print moments[0,0], measure.perimeter(image)
#circularity = (4*np.pi*moments[0,0])/(measure.perimeter(image)**2.)
#circularity = (cent_mom[0,0]**2.)/(2.*np.pi*(cent_mom[2,0] + cent_mom[0,2]))
if hu_mom[0] > 0.:
#if rprop['perimeter'] > 0.:
circularity = (1/(2.*np.pi))*(1/hu_mom[0])
# circularity = (1/(2.*np.pi))*(1/rprop['weighted_moments_hu'][0])
# circularity = (4*np.pi*rprop['area'])/(rprop['perimeter']**2.)
else:
circularity = 0.
else:
circularity = 0.
#print result_row[0], circularity
#circularity = (cent_mom[0,0]**2.)/(2*np.pi*(cent_mom[2,0] + cent_mom[0,2]))
psf_sigma = self.psf_sigma
gaussian_fwhm = psf_sigma*2.35
fwhm_area = np.pi*(gaussian_fwhm/2.)**2.
wcr, wcc = rprop['weighted_centroid']
if ((circularity > 0.7) & (cr > 10.) & (cr < 14.) & (cc > 10.) & (cc < 14.) &
# if ((circularity > 0.4) & (circularity < 4.) & (cr > 10.) & (cr < 14.) & (cc > 10.) & (cc < 14.) &
(cent_mom[0,0] < (9.0*fwhm_area)) & (cent_mom[0,0] > 4.0)): #Use 200% error margin on psf_sigma for now
# test_class.append(1.)
# print circularity, cr, cc, cent_mom[0,0]
#else:
# test_class.append(0.)
test_class = 1.
#print circularity, cr, cc, cent_mom[0,0]
else:
test_class = 0.
except:
test_class = 0.
return [result_row[0], test_class]
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import warnings
import numpy as np
from math import pi
import unittest
import os
from monty.os.path import which
from pymatgen.analysis.local_env import ValenceIonicRadiusEvaluator, \
VoronoiNN, JmolNN, MinimumDistanceNN, OpenBabelNN, CovalentBondNN,\
MinimumOKeeffeNN, MinimumVIRENN, \
get_neighbors_of_site_with_index, site_is_of_motif_type, \
NearNeighbors, LocalStructOrderParams, BrunnerNN_reciprocal, \
BrunnerNN_real, BrunnerNN_relative, EconNN, CrystalNN, CutOffDictNN, \
Critic2NN, solid_angle
from pymatgen import Element, Molecule, Structure, Lattice
from pymatgen.util.testing import PymatgenTest
try:
import openbabel as ob
import pybel as pb
except ImportError:
pb = None
ob = None
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class ValenceIonicRadiusEvaluatorTest(PymatgenTest):
def setUp(self):
"""
Setup MgO rocksalt structure for testing Vacancy
"""
mgo_latt = [[4.212, 0, 0], [0, 4.212, 0], [0, 0, 4.212]]
mgo_specie = ["Mg"] * 4 + ["O"] * 4
mgo_frac_cord = [[0, 0, 0], [0.5, 0.5, 0], [0.5, 0, 0.5], [0, 0.5, 0.5],
[0.5, 0, 0], [0, 0.5, 0], [0, 0, 0.5], [0.5, 0.5, 0.5]]
self._mgo_uc = Structure(mgo_latt, mgo_specie, mgo_frac_cord, True,
True)
self._mgo_valrad_evaluator = ValenceIonicRadiusEvaluator(self._mgo_uc)
def test_valences_ionic_structure(self):
valence_dict = self._mgo_valrad_evaluator.valences
for val in list(valence_dict.values()):
self.assertTrue(val in {2, -2})
def test_radii_ionic_structure(self):
radii_dict = self._mgo_valrad_evaluator.radii
for rad in list(radii_dict.values()):
self.assertTrue(rad in {0.86, 1.26})
def tearDown(self):
del self._mgo_uc
del self._mgo_valrad_evaluator
class VoronoiNNTest(PymatgenTest):
def setUp(self):
self.s = self.get_structure('LiFePO4')
self.nn = VoronoiNN(targets=[Element("O")])
self.s_sic = self.get_structure('Si')
self.s_sic["Si"] = {'Si': 0.5, 'C': 0.5}
self.nn_sic = VoronoiNN()
def test_get_voronoi_polyhedra(self):
self.assertEqual(len(self.nn.get_voronoi_polyhedra(self.s, 0).items()), 8)
def test_get_cn(self):
self.assertAlmostEqual(self.nn.get_cn(
self.s, 0, use_weights=True), 5.809265748999465, 7)
self.assertAlmostEqual(self.nn_sic.get_cn(
self.s_sic, 0, use_weights=True), 4.5381161643940668, 7)
def test_get_coordinated_sites(self):
self.assertEqual(len(self.nn.get_nn(self.s, 0)), 8)
def test_volume(self):
self.nn.targets = None
volume = 0
for n in range(len(self.s)):
for nn in self.nn.get_voronoi_polyhedra(self.s, n).values():
volume += nn['volume']
self.assertAlmostEqual(self.s.volume, volume)
def test_solid_angle(self):
self.nn.targets = None
for n in range(len(self.s)):
angle = 0
for nn in self.nn.get_voronoi_polyhedra(self.s, n).values():
angle += nn['solid_angle']
self.assertAlmostEqual(4 * np.pi, angle)
self.assertEqual(solid_angle([0,0,0], [[1,0,0],[-1,0,0],[0,1,0]]), pi)
def test_nn_shell(self):
# First, make a SC lattice. Make my math easier
s = Structure([[1, 0, 0], [0, 1, 0], [0, 0, 1]], ['Cu'], [[0, 0, 0]])
# Get the 1NN shell
self.nn.targets = None
nns = self.nn.get_nn_shell_info(s, 0, 1)
self.assertEqual(6, len(nns))
# Test the 2nd NN shell
nns = self.nn.get_nn_shell_info(s, 0, 2)
self.assertEqual(18, len(nns))
self.assertArrayAlmostEqual([1] * 6,
[x['weight'] for x in nns if
max(np.abs(x['image'])) == 2])
self.assertArrayAlmostEqual([2] * 12,
[x['weight'] for x in nns if
max(np.abs(x['image'])) == 1])
# Test the 3rd NN shell
nns = self.nn.get_nn_shell_info(s, 0, 3)
for nn in nns:
# Check that the coordinates were set correctly
self.assertArrayAlmostEqual(nn['site'].frac_coords, nn['image'])
# Test with a structure that has unequal faces
cscl = Structure(Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),
["Cl1-", "Cs1+"], [[2.1045, 2.1045, 2.1045], [0, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.nn.weight = 'area'
nns = self.nn.get_nn_shell_info(cscl, 0, 1)
self.assertEqual(14, len(nns))
self.assertEqual(6, np.isclose([x['weight'] for x in nns],
0.125/0.32476).sum()) # Square faces
self.assertEqual(8, np.isclose([x['weight'] for x in nns], 1).sum())
nns = self.nn.get_nn_shell_info(cscl, 0, 2)
# Weight of getting back on to own site
# Square-square hop: 6*5 options times (0.125/0.32476)^2 weight each
# Hex-hex hop: 8*7 options times 1 weight each
self.assertAlmostEqual(60.4444,
np.sum([x['weight'] for x in nns if x['site_index'] == 0]),
places=3)
def test_adj_neighbors(self):
# Make a simple cubic structure
s = Structure([[1, 0, 0], [0, 1, 0], [0, 0, 1]], ['Cu'], [[0, 0, 0]])
# Compute the NNs with adjacency
self.nn.targets = None
neighbors = self.nn.get_voronoi_polyhedra(s, 0)
# Each neighbor has 4 adjacent neighbors, all orthogonal
for nn_key, nn_info in neighbors.items():
self.assertEqual(4, len(nn_info['adj_neighbors']))
for adj_key in nn_info['adj_neighbors']:
self.assertEqual(0, np.dot(nn_info['normal'], neighbors[adj_key]['normal']))
def test_all_at_once(self):
# Get all of the sites for LiFePO4
all_sites = self.nn.get_all_voronoi_polyhedra(self.s)
# Make sure they are the same as the single-atom ones
for i, site in enumerate(all_sites):
# Compute the tessellation using only one site
by_one = self.nn.get_voronoi_polyhedra(self.s, i)
# Match the coordinates the of the neighbors, as site matching does not seem to work?
all_coords = np.sort([x['site'].coords for x in site.values()], axis=0)
by_one_coords = np.sort([x['site'].coords for x in by_one.values()], axis=0)
self.assertArrayAlmostEqual(all_coords, by_one_coords)
# Test the nn_info operation
all_nn_info = self.nn.get_all_nn_info(self.s)
for i, info in enumerate(all_nn_info):
# Compute using the by-one method
by_one = self.nn.get_nn_info(self.s, i)
# Get the weights
all_weights = sorted([x['weight'] for x in info])
by_one_weights = sorted([x['weight'] for x in by_one])
self.assertArrayAlmostEqual(all_weights, by_one_weights)
def test_Cs2O(self):
"""A problematic structure in the Materials Project"""
strc = Structure([[4.358219, 0.192833, 6.406960], [2.114414, 3.815824, 6.406960],
[0.311360, 0.192833, 7.742498]],
['O', 'Cs', 'Cs'],
[[0, 0, 0], [0.264318, 0.264318, 0.264318], [0.735682, 0.735682, 0.735682]],
coords_are_cartesian=False)
# Compute the voronoi tessellation
result = VoronoiNN().get_all_voronoi_polyhedra(strc)
self.assertEqual(3, len(result))
def test_filtered(self):
nn = VoronoiNN(weight='area')
# Make a bcc crystal
bcc = Structure([[1, 0, 0], [0, 1, 0], [0, 0, 1]], ['Cu', 'Cu'],
[[0, 0, 0], [0.5, 0.5, 0.5]], coords_are_cartesian=False)
# Compute the weight of the little face
big_face_area = np.sqrt(3) * 3 / 2 * (2 / 4 / 4)
small_face_area = 0.125
little_weight = small_face_area / big_face_area
# Run one test where you get the small neighbors
nn.tol = little_weight * 0.99
nns = nn.get_nn_info(bcc, 0)
self.assertEqual(14, len(nns))
# Run a second test where we screen out little faces
nn.tol = little_weight * 1.01
nns = nn.get_nn_info(bcc, 0)
self.assertEqual(8, len(nns))
# Make sure it works for the `get_all` operation
all_nns = nn.get_all_nn_info(bcc * [2, 2, 2])
self.assertEqual([8,]*16, [len(x) for x in all_nns])
def tearDown(self):
del self.s
del self.nn
class JmolNNTest(PymatgenTest):
def setUp(self):
self.jmol = JmolNN()
self.jmol_update = JmolNN(el_radius_updates={"Li": 1})
def test_get_nn(self):
s = self.get_structure('LiFePO4')
# Test the default near-neighbor finder.
nsites_checked = 0
for site_idx, site in enumerate(s):
if site.specie == Element("Li"):
self.assertEqual(self.jmol.get_cn(s, site_idx), 0)
nsites_checked += 1
elif site.specie == Element("Fe"):
self.assertEqual(self.jmol.get_cn(s, site_idx), 6)
nsites_checked += 1
elif site.specie == Element("P"):
self.assertEqual(self.jmol.get_cn(s, site_idx), 4)
nsites_checked += 1
self.assertEqual(nsites_checked, 12)
# Test a user override that would cause Li to show up as 6-coordinated
self.assertEqual(self.jmol_update.get_cn(s, 0), 6)
# Verify get_nn function works
self.assertEqual(len(self.jmol_update.get_nn(s, 0)), 6)
def tearDown(self):
del self.jmol
del self.jmol_update
class OpenBabelNNTest(PymatgenTest):
def setUp(self):
self.benzene = Molecule.from_file(os.path.join(test_dir, "benzene.xyz"))
self.acetylene = Molecule.from_file(os.path.join(test_dir, "acetylene.xyz"))
@unittest.skipIf((not (ob and pb)) or (not which("babel")),
"OpenBabel not installed.")
def test_nn_orders(self):
strat = OpenBabelNN()
acetylene = strat.get_nn_info(self.acetylene, 0)
self.assertEqual(acetylene[0]["weight"], 3)
self.assertEqual(acetylene[1]["weight"], 1)
# Currently, benzene bonds register either as double or single,
# not aromatic
# Instead of searching for aromatic bonds, we check that bonds are
# detected in the same way from both sides
self.assertEqual(strat.get_nn_info(self.benzene, 0)[0]["weight"],
strat.get_nn_info(self.benzene, 1)[0]["weight"])
@unittest.skipIf((not (ob and pb)) or (not which("babel")),
"OpenBabel not installed.")
def test_nn_length(self):
strat = OpenBabelNN(order=False)
benzene_bonds = strat.get_nn_info(self.benzene, 0)
c_bonds = [b for b in benzene_bonds if str(b["site"].specie) == "C"]
h_bonds = [b for b in benzene_bonds if str(b["site"].specie) == "H"]
self.assertAlmostEqual(c_bonds[0]["weight"], 1.41, 2)
self.assertAlmostEqual(h_bonds[0]["weight"], 1.02, 2)
self.assertAlmostEqual(strat.get_nn_info(self.acetylene, 0)[0]["weight"],
1.19,
2)
def tearDown(self):
del self.benzene
del self.acetylene
class CovalentBondNNTest(PymatgenTest):
def setUp(self):
self.benzene = Molecule.from_file(os.path.join(test_dir, "benzene.xyz"))
self.acetylene = Molecule.from_file(os.path.join(test_dir, "acetylene.xyz"))
def test_nn_orders(self):
strat = CovalentBondNN()
acetylene = strat.get_nn_info(self.acetylene, 0)
self.assertEqual(acetylene[0]["weight"], 3)
self.assertEqual(acetylene[1]["weight"], 1)
benzene = strat.get_nn_info(self.benzene, 0)
self.assertAlmostEqual(benzene[0]["weight"], 1.6596, places=4)
def test_nn_length(self):
strat = CovalentBondNN(order=False)
benzene_bonds = strat.get_nn_info(self.benzene, 0)
c_bonds = [b for b in benzene_bonds if str(b["site"].specie) == "C"]
h_bonds = [b for b in benzene_bonds if str(b["site"].specie) == "H"]
self.assertAlmostEqual(c_bonds[0]["weight"], 1.41, 2)
self.assertAlmostEqual(h_bonds[0]["weight"], 1.02, 2)
acetylene = strat.get_nn_info(self.acetylene, 0)
self.assertAlmostEqual(acetylene[0]["weight"], 1.19, places=2)
def tearDown(self):
del self.benzene
del self.acetylene
class MiniDistNNTest(PymatgenTest):
def setUp(self):
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264],
[0, 0, 2.528]]), ["C0+", "C0+"], [[2.554, 1.806, 4.423],
[0.365, 0.258, 0.632]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.nacl = Structure(
Lattice([[3.485, 0, 2.012], [1.162, 3.286, 2.012],
[0, 0, 4.025]]), ["Na1+", "Cl1-"], [[0, 0, 0],
[2.324, 1.643, 4.025]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.cscl = Structure(
Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),
["Cl1-", "Cs1+"], [[2.105, 2.105, 2.105], [0, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.mos2 = Structure(
Lattice([[3.19, 0, 0], [-1.595, 2.763, 0], [0, 0, 17.44]]),
['Mo', 'S', 'S'], [[-1e-06, 1.842, 3.72], [1.595, 0.92, 5.29], \
[1.595, 0.92, 2.155]], coords_are_cartesian=True)
def test_all_nn_classes(self):
self.assertAlmostEqual(MinimumDistanceNN(cutoff=5, get_all_sites=True).get_cn(
self.cscl, 0), 14)
self.assertAlmostEqual(MinimumDistanceNN().get_cn(
self.diamond, 0), 4)
self.assertAlmostEqual(MinimumDistanceNN().get_cn(
self.nacl, 0), 6)
self.assertAlmostEqual(MinimumDistanceNN(tol=0.01).get_cn(
self.cscl, 0), 8)
self.assertAlmostEqual(MinimumDistanceNN(tol=0.1).get_cn(
self.mos2, 0), 6)
for image in MinimumDistanceNN(tol=0.1).get_nn_images(self.mos2, 0):
self.assertTrue(image in [(0, 0, 0), (0, 1, 0), (-1, 0, 0),
(0, 0, 0), (0, 1, 0), (-1, 0, 0)])
self.assertAlmostEqual(MinimumOKeeffeNN(tol=0.01).get_cn(
self.diamond, 0), 4)
self.assertAlmostEqual(MinimumOKeeffeNN(tol=0.01).get_cn(
self.nacl, 0), 6)
self.assertAlmostEqual(MinimumOKeeffeNN(tol=0.01).get_cn(
self.cscl, 0), 8)
self.assertAlmostEqual(MinimumVIRENN(tol=0.01).get_cn(
self.diamond, 0), 4)
self.assertAlmostEqual(MinimumVIRENN(tol=0.01).get_cn(
self.nacl, 0), 6)
self.assertAlmostEqual(MinimumVIRENN(tol=0.01).get_cn(
self.cscl, 0), 8)
self.assertAlmostEqual(BrunnerNN_reciprocal(tol=0.01).get_cn(
self.diamond, 0), 4)
self.assertAlmostEqual(BrunnerNN_reciprocal(tol=0.01).get_cn(
self.nacl, 0), 6)
self.assertAlmostEqual(BrunnerNN_reciprocal(tol=0.01).get_cn(
self.cscl, 0), 14)
self.assertAlmostEqual(BrunnerNN_relative(tol=0.01).get_cn(
self.diamond, 0), 16)
self.assertAlmostEqual(BrunnerNN_relative(tol=0.01).get_cn(
self.nacl, 0), 18)
self.assertAlmostEqual(BrunnerNN_relative(tol=0.01).get_cn(
self.cscl, 0), 8)
self.assertAlmostEqual(BrunnerNN_real(tol=0.01).get_cn(
self.diamond, 0), 16)
self.assertAlmostEqual(BrunnerNN_real(tol=0.01).get_cn(
self.nacl, 0), 18)
self.assertAlmostEqual(BrunnerNN_real(tol=0.01).get_cn(
self.cscl, 0), 8)
self.assertAlmostEqual(EconNN(tol=0.01).get_cn(
self.diamond, 0), 4)
self.assertAlmostEqual(EconNN(tol=0.01).get_cn(
self.nacl, 0), 6)
self.assertAlmostEqual(EconNN(tol=0.01).get_cn(
self.cscl, 0), 14)
self.assertAlmostEqual(VoronoiNN(tol=0.5).get_cn(
self.diamond, 0), 4)
self.assertAlmostEqual(VoronoiNN(tol=0.5).get_cn(
self.nacl, 0), 6)
self.assertAlmostEqual(VoronoiNN(tol=0.5).get_cn(
self.cscl, 0), 8)
def test_get_local_order_params(self):
nn = MinimumDistanceNN()
ops = nn.get_local_order_parameters(self.diamond, 0)
self.assertAlmostEqual(ops['tetrahedral'], 0.9999934389036574)
ops = nn.get_local_order_parameters(self.nacl, 0)
self.assertAlmostEqual(ops['octahedral'], 0.9999995266669)
def tearDown(self):
del self.diamond
del self.nacl
del self.cscl
del self.mos2
class MotifIdentificationTest(PymatgenTest):
def setUp(self):
self.silicon = Structure(
Lattice.from_lengths_and_angles(
[5.47, 5.47, 5.47],
[90.0, 90.0, 90.0]),
["Si", "Si", "Si", "Si", "Si", "Si", "Si", "Si"],
[[0.000000, 0.000000, 0.500000],
[0.750000, 0.750000, 0.750000],
[0.000000, 0.500000, 1.000000],
[0.750000, 0.250000, 0.250000],
[0.500000, 0.000000, 1.000000],
[0.250000, 0.750000, 0.250000],
[0.500000, 0.500000, 0.500000],
[0.250000, 0.250000, 0.750000]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264],
[0, 0, 2.528]]), ["C0+", "C0+"], [[2.554, 1.806, 4.423],
[0.365, 0.258, 0.632]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.nacl = Structure(
Lattice([[3.485, 0, 2.012], [1.162, 3.286, 2.012],
[0, 0, 4.025]]), ["Na1+", "Cl1-"], [[0, 0, 0],
[2.324, 1.643, 4.025]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.cscl = Structure(
Lattice([[4.209, 0, 0], [0, 4.209, 0], [0, 0, 4.209]]),
["Cl1-", "Cs1+"], [[2.105, 2.105, 2.105], [0, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.square_pyramid = Structure(
Lattice([[100, 0, 0], [0, 100, 0], [0, 0, 100]]),
["C", "C", "C", "C", "C", "C"], [
[0, 0, 0], [1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, -1, 0], \
[0, 0, 1]], validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.trigonal_bipyramid = Structure(
Lattice([[100, 0, 0], [0, 100, 0], [0, 0, 100]]),
["P", "Cl", "Cl", "Cl", "Cl", "Cl"], [
[0, 0, 0], [0, 0, 2.14], [0, 2.02, 0], [1.74937, -1.01, 0], \
[-1.74937, -1.01, 0], [0, 0, -2.14]], validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
def test_site_is_of_motif_type(self):
for i in range(self.diamond.num_sites):
self.assertEqual(site_is_of_motif_type(
self.diamond, i), "tetrahedral")
for i in range(self.nacl.num_sites):
self.assertEqual(site_is_of_motif_type(
self.nacl, i), "octahedral")
for i in range(self.cscl.num_sites):
self.assertEqual(site_is_of_motif_type(
self.cscl, i), "bcc")
self.assertEqual(site_is_of_motif_type(
self.square_pyramid, 0), "square pyramidal")
for i in range(1, self.square_pyramid.num_sites):
self.assertEqual(site_is_of_motif_type(
self.square_pyramid, i), "unrecognized")
self.assertEqual(site_is_of_motif_type(
self.trigonal_bipyramid, 0), "trigonal bipyramidal")
for i in range(1, self.trigonal_bipyramid.num_sites):
self.assertEqual(site_is_of_motif_type(
self.trigonal_bipyramid, i), "unrecognized")
def test_get_neighbors_of_site_with_index(self):
self.assertEqual(len(get_neighbors_of_site_with_index(
self.diamond, 0)), 4)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.nacl, 0)), 6)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.cscl, 0)), 8)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.diamond, 0, delta=0.01)), 4)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.diamond, 0, cutoff=6)), 4)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.diamond, 0, approach="voronoi")), 4)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.diamond, 0, approach="min_OKeeffe")), 4)
self.assertEqual(len(get_neighbors_of_site_with_index(
self.diamond, 0, approach="min_VIRE")), 4)
def tearDown(self):
del self.silicon
del self.diamond
del self.nacl
del self.cscl
class NearNeighborTest(PymatgenTest):
def setUp(self):
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264],
[0, 0, 2.528]]), ["C0+", "C0+"], [[2.554, 1.806, 4.423],
[0.365, 0.258, 0.632]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
def set_nn_info(self):
# check conformance
# implicitly assumes that all NearNeighbors subclasses
# will correctly identify bonds in diamond, if it
# can't there are probably bigger problems
subclasses = NearNeighbors.__subclasses__()
for subclass in subclasses:
# Critic2NN has external dependency, is tested separately
if 'Critic2' not in str(subclass):
nn_info = subclass().get_nn_info(self.diamond, 0)
self.assertEqual(nn_info[0]['site_index'], 1)
self.assertEqual(nn_info[0]['image'][0], 1)
def tearDown(self):
del self.diamond
class LocalStructOrderParamsTest(PymatgenTest):
def setUp(self):
self.single_bond = Structure(
Lattice.from_lengths_and_angles(
[10, 10, 10], [90, 90, 90]),
["H", "H", "H"], [[1, 0, 0], [0, 0, 0], [6, 0, 0]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.linear = Structure(
Lattice.from_lengths_and_angles(
[10, 10, 10], [90, 90, 90]),
["H", "H", "H"], [[1, 0, 0], [0, 0, 0], [2, 0, 0]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.bent45 = Structure(
Lattice.from_lengths_and_angles(
[10, 10, 10], [90, 90, 90]), ["H", "H", "H"],
[[0, 0, 0], [0.707, 0.707, 0], [0.707, 0, 0]],
validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=True,
site_properties=None)
self.cubic = Structure(
Lattice.from_lengths_and_angles(
[1, 1, 1], [90, 90, 90]),
["H"], [[0, 0, 0]], validate_proximity=False,
to_unit_cell=False, coords_are_cartesian=False,
site_properties=None)
self.bcc = Structure(
Lattice.from_lengths_and_angles(
[1, 1, 1], [90, 90, 90]),
["H", "H"], [[0, 0, 0], [0.5, 0.5, 0.5]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.fcc = Structure(
Lattice.from_lengths_and_angles(
[1, 1, 1], [90, 90, 90]), ["H", "H", "H", "H"],
[[0, 0, 0], [0, 0.5, 0.5], [0.5, 0, 0.5], [0.5, 0.5, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.hcp = Structure(
Lattice.from_lengths_and_angles(
[1, 1, 1.633], [90, 90, 120]), ["H", "H"],
[[0.3333, 0.6667, 0.25], [0.6667, 0.3333, 0.75]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.diamond = Structure(
Lattice.from_lengths_and_angles(
[1, 1, 1], [90, 90, 90]), ["H", "H", "H", "H", "H", "H", "H", "H"],
[[0, 0, 0.5], [0.75, 0.75, 0.75], [0, 0.5, 0], [0.75, 0.25, 0.25],
[0.5, 0, 0], [0.25, 0.75, 0.25], [0.5, 0.5, 0.5],
[0.25, 0.25, 0.75]], validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=False, site_properties=None)
self.trigonal_off_plane = Structure(
Lattice.from_lengths_and_angles(
[100, 100, 100], [90, 90, 90]),
["H", "H", "H", "H"],
[[0.50, 0.50, 0.50], [0.25, 0.75, 0.25], \
[0.25, 0.25, 0.75], [0.75, 0.25, 0.25]], \
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.regular_triangle = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["H", "H", "H", "H"],
[[15, 15.28867, 15.65], [14.5, 15, 15], [15.5, 15, 15], \
[15, 15.866, 15]], validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.trigonal_planar = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["H", "H", "H", "H"],
[[15, 15.28867, 15], [14.5, 15, 15], [15.5, 15, 15], \
[15, 15.866, 15]], validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.square_planar = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["H", "H", "H", "H", "H"],
[[15, 15, 15], [14.75, 14.75, 15], [14.75, 15.25, 15], \
[15.25, 14.75, 15], [15.25, 15.25, 15]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.square = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["H", "H", "H", "H", "H"],
[[15, 15, 15.707], [14.75, 14.75, 15], [14.75, 15.25, 15], \
[15.25, 14.75, 15], [15.25, 15.25, 15]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.T_shape = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["H", "H", "H", "H"],
[[15, 15, 15], [15, 15, 15.5], [15, 15.5, 15],
[15, 14.5, 15]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.square_pyramid = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["H", "H", "H", "H", "H", "H"],
[[15, 15, 15], [15, 15, 15.3535], [14.75, 14.75, 15],
[14.75, 15.25, 15], [15.25, 14.75, 15], [15.25, 15.25, 15]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.pentagonal_planar = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["Xe", "F", "F", "F", "F", "F"],
[[0, -1.6237, 0], [1.17969, 0, 0], [-1.17969, 0, 0], \
[1.90877, -2.24389, 0], [-1.90877, -2.24389, 0], [0, -3.6307, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.pentagonal_pyramid = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["Xe", "F", "F", "F", "F", "F", "F"],
[[0, -1.6237, 0], [0, -1.6237, 1.17969], [1.17969, 0, 0], \
[-1.17969, 0, 0], [1.90877, -2.24389, 0], \
[-1.90877, -2.24389, 0], [0, -3.6307, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.pentagonal_bipyramid = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]),
["Xe", "F", "F", "F", "F", "F", "F", "F"],
[[0, -1.6237, 0], [0, -1.6237, -1.17969], \
[0, -1.6237, 1.17969], [1.17969, 0, 0], \
[-1.17969, 0, 0], [1.90877, -2.24389, 0], \
[-1.90877, -2.24389, 0], [0, -3.6307, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.hexagonal_planar = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]),
["H", "C", "C", "C", "C", "C", "C"],
[[0, 0, 0], [0.71, 1.2298, 0],
[-0.71, 1.2298, 0], [0.71, -1.2298, 0], [-0.71, -1.2298, 0],
[1.4199, 0, 0], [-1.4199, 0, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.hexagonal_pyramid = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), \
["H", "Li", "C", "C", "C", "C", "C", "C"],
[[0, 0, 0], [0, 0, 1.675], [0.71, 1.2298, 0], \
[-0.71, 1.2298, 0], [0.71, -1.2298, 0], [-0.71, -1.2298, 0], \
[1.4199, 0, 0], [-1.4199, 0, 0]], \
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.hexagonal_bipyramid = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), \
["H", "Li", "Li", "C", "C", "C", "C", "C", "C"],
[[0, 0, 0], [0, 0, 1.675], [0, 0, -1.675], \
[0.71, 1.2298, 0], [-0.71, 1.2298, 0], \
[0.71, -1.2298, 0], [-0.71, -1.2298, 0], \
[1.4199, 0, 0], [-1.4199, 0, 0]], \
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.trigonal_pyramid = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["P", "Cl", "Cl", "Cl", "Cl"],
[[0, 0, 0], [0, 0, 2.14], [0, 2.02, 0],
[1.74937, -1.01, 0], [-1.74937, -1.01, 0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.trigonal_bipyramidal = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]), ["P", "Cl", "Cl", "Cl", "Cl", "Cl"],
[[0, 0, 0], [0, 0, 2.14], [0, 2.02, 0],
[1.74937, -1.01, 0], [-1.74937, -1.01, 0], [0, 0, -2.14]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.cuboctahedron = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]),
["H", "H", "H", "H", "H", "H", "H", "H", "H", "H", "H", "H", "H"],
[[15, 15, 15], [15, 14.5, 14.5], [15, 14.5, 15.5],
[15, 15.5, 14.5], [15, 15.5, 15.5],
[14.5, 15, 14.5], [14.5, 15, 15.5], [15.5, 15, 14.5], [15.5, 15, 15.5],
[14.5, 14.5, 15], [14.5, 15.5, 15], [15.5, 14.5, 15], [15.5, 15.5, 15]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.see_saw_rect = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]),
["H", "H", "H", "H", "H"],
[[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, -1.0 , 0.0],
[0.0, 0.0, -1.0], [-1.0, 0.0, 0.0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
self.sq_face_capped_trig_pris = Structure(
Lattice.from_lengths_and_angles(
[30, 30, 30], [90, 90, 90]),
["H", "H", "H", "H", "H", "H", "H", "H"],
[[0, 0, 0], [-0.6546536707079771, -0.37796447300922725, 0.6546536707079771],
[0.6546536707079771, -0.37796447300922725, 0.6546536707079771],
[0.0, 0.7559289460184545, 0.6546536707079771],
[-0.6546536707079771, -0.37796447300922725, -0.6546536707079771],
[0.6546536707079771, -0.37796447300922725, -0.6546536707079771],
[0.0, 0.7559289460184545, -0.6546536707079771], [0.0, -1.0, 0.0]],
validate_proximity=False, to_unit_cell=False,
coords_are_cartesian=True, site_properties=None)
def test_init(self):
self.assertIsNotNone(
LocalStructOrderParams(["cn"], parameters=None, cutoff=0.99))
parameters = [{'norm': 2}]
lostops = LocalStructOrderParams(["cn"], parameters=parameters)
tmp = lostops.get_parameters(0)
parameters[0]['norm'] = 3
self.assertEqual(tmp, lostops.get_parameters(0))
def test_get_order_parameters(self):
# Set up everything.
op_types = ["cn", "bent", "bent", "tet", "oct", "bcc", "q2", "q4", \
"q6", "reg_tri", "sq", "sq_pyr_legacy", "tri_bipyr", "sgl_bd", \
"tri_plan", "sq_plan", "pent_plan", "sq_pyr", "tri_pyr", \
"pent_pyr", "hex_pyr", "pent_bipyr", "hex_bipyr", "T", "cuboct", \
"see_saw_rect", "hex_plan_max", "tet_max", "oct_max", "tri_plan_max", "sq_plan_max", \
"pent_plan_max", "cuboct_max", "tet_max", "sq_face_cap_trig_pris"]
op_params = [None for i in range(len(op_types))]
op_params[1] = {'TA': 1, 'IGW_TA': 1./0.0667}
op_params[2] = {'TA': 45./180, 'IGW_TA': 1./0.0667}
op_params[33] = {'TA': 0.6081734479693927, 'IGW_TA': 18.33, "fac_AA": 1.5, "exp_cos_AA": 2}
ops_044 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=0.44)
ops_071 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=0.71)
ops_087 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=0.87)
ops_099 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=0.99)
ops_101 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=1.01)
ops_501 = LocalStructOrderParams(op_types, parameters=op_params, cutoff=5.01)
ops_voro = LocalStructOrderParams(op_types, parameters=op_params)
# Single bond.
op_vals = ops_101.get_order_parameters(self.single_bond, 0)
self.assertAlmostEqual(int(op_vals[13] * 1000), 1000)
op_vals = ops_501.get_order_parameters(self.single_bond, 0)
self.assertAlmostEqual(int(op_vals[13] * 1000), 799)
op_vals = ops_101.get_order_parameters(self.linear, 0)
self.assertAlmostEqual(int(op_vals[13] * 1000), 0)
# Linear motif.
op_vals = ops_101.get_order_parameters(self.linear, 0)
self.assertAlmostEqual(int(op_vals[1] * 1000), 1000)
# 45 degrees-bent motif.
op_vals = ops_101.get_order_parameters(self.bent45, 0)
self.assertAlmostEqual(int(op_vals[2] * 1000), 1000)
# T-shape motif.
op_vals = ops_101.get_order_parameters(
self.T_shape, 0, indices_neighs=[1,2,3])
self.assertAlmostEqual(int(op_vals[23] * 1000), 1000)
# Cubic structure.
op_vals = ops_099.get_order_parameters(self.cubic, 0)
self.assertAlmostEqual(op_vals[0], 0.0)
self.assertIsNone(op_vals[3])
self.assertIsNone(op_vals[4])
self.assertIsNone(op_vals[5])
self.assertIsNone(op_vals[6])
self.assertIsNone(op_vals[7])
self.assertIsNone(op_vals[8])
op_vals = ops_101.get_order_parameters(self.cubic, 0)
self.assertAlmostEqual(op_vals[0], 6.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 23)
self.assertAlmostEqual(int(op_vals[4] * 1000), 1000)
self.assertAlmostEqual(int(op_vals[5] * 1000), 333)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 763)
self.assertAlmostEqual(int(op_vals[8] * 1000), 353)
self.assertAlmostEqual(int(op_vals[28] * 1000), 1000)
# Bcc structure.
op_vals = ops_087.get_order_parameters(self.bcc, 0)
self.assertAlmostEqual(op_vals[0], 8.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 200)
self.assertAlmostEqual(int(op_vals[4] * 1000), 145)
self.assertAlmostEqual(int(op_vals[5] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 509)
self.assertAlmostEqual(int(op_vals[8] * 1000), 628)
# Fcc structure.
op_vals = ops_071.get_order_parameters(self.fcc, 0)
self.assertAlmostEqual(op_vals[0], 12.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 36)
self.assertAlmostEqual(int(op_vals[4] * 1000), 78)
self.assertAlmostEqual(int(op_vals[5] * 1000), -2)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 190)
self.assertAlmostEqual(int(op_vals[8] * 1000), 574)
# Hcp structure.
op_vals = ops_101.get_order_parameters(self.hcp, 0)
self.assertAlmostEqual(op_vals[0], 12.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 33)
self.assertAlmostEqual(int(op_vals[4] * 1000), 82)
self.assertAlmostEqual(int(op_vals[5] * 1000), -26)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 97)
self.assertAlmostEqual(int(op_vals[8] * 1000), 484)
# Diamond structure.
op_vals = ops_044.get_order_parameters(self.diamond, 0)
self.assertAlmostEqual(op_vals[0], 4.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 1000)
self.assertAlmostEqual(int(op_vals[4] * 1000), 37)
self.assertAlmostEqual(op_vals[5], 0.75)
self.assertAlmostEqual(int(op_vals[6] * 1000), 0)
self.assertAlmostEqual(int(op_vals[7] * 1000), 509)
self.assertAlmostEqual(int(op_vals[8] * 1000), 628)
self.assertAlmostEqual(int(op_vals[27] * 1000), 1000)
# Trigonal off-plane molecule.
op_vals = ops_044.get_order_parameters(self.trigonal_off_plane, 0)
self.assertAlmostEqual(op_vals[0], 3.0)
self.assertAlmostEqual(int(op_vals[3] * 1000), 1000)
self.assertAlmostEqual(int(op_vals[33] * 1000), 1000)
# Trigonal-planar motif.
op_vals = ops_101.get_order_parameters(self.trigonal_planar, 0)
self.assertEqual(int(op_vals[0] + 0.5), 3)
self.assertAlmostEqual(int(op_vals[14] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[29] * 1000 + 0.5), 1000)
# Regular triangle motif.
op_vals = ops_101.get_order_parameters(self.regular_triangle, 0)
self.assertAlmostEqual(int(op_vals[9] * 1000), 999)
# Square-planar motif.
op_vals = ops_101.get_order_parameters(self.square_planar, 0)
self.assertAlmostEqual(int(op_vals[15] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[30] * 1000 + 0.5), 1000)
# Square motif.
op_vals = ops_101.get_order_parameters(self.square, 0)
self.assertAlmostEqual(int(op_vals[10] * 1000), 1000)
# Pentagonal planar.
op_vals = ops_101.get_order_parameters(
self.pentagonal_planar.sites, 0, indices_neighs=[1,2,3,4,5])
self.assertAlmostEqual(int(op_vals[12] * 1000 + 0.5), 126)
self.assertAlmostEqual(int(op_vals[16] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[31] * 1000 + 0.5), 1000)
# Trigonal pyramid motif.
op_vals = ops_101.get_order_parameters(
self.trigonal_pyramid, 0, indices_neighs=[1,2,3,4])
self.assertAlmostEqual(int(op_vals[18] * 1000 + 0.5), 1000)
# Square pyramid motif.
op_vals = ops_101.get_order_parameters(self.square_pyramid, 0)
self.assertAlmostEqual(int(op_vals[11] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[12] * 1000 + 0.5), 667)
self.assertAlmostEqual(int(op_vals[17] * 1000 + 0.5), 1000)
# Pentagonal pyramid motif.
op_vals = ops_101.get_order_parameters(
self.pentagonal_pyramid, 0, indices_neighs=[1,2,3,4,5,6])
self.assertAlmostEqual(int(op_vals[19] * 1000 + 0.5), 1000)
# Hexagonal pyramid motif.
op_vals = ops_101.get_order_parameters(
self.hexagonal_pyramid, 0, indices_neighs=[1,2,3,4,5,6,7])
self.assertAlmostEqual(int(op_vals[20] * 1000 + 0.5), 1000)
# Trigonal bipyramidal.
op_vals = ops_101.get_order_parameters(
self.trigonal_bipyramidal.sites, 0, indices_neighs=[1,2,3,4,5])
self.assertAlmostEqual(int(op_vals[12] * 1000 + 0.5), 1000)
# Pentagonal bipyramidal.
op_vals = ops_101.get_order_parameters(
self.pentagonal_bipyramid.sites, 0,
indices_neighs=[1,2,3,4,5,6,7])
self.assertAlmostEqual(int(op_vals[21] * 1000 + 0.5), 1000)
# Hexagonal bipyramid motif.
op_vals = ops_101.get_order_parameters(
self.hexagonal_bipyramid, 0, indices_neighs=[1,2,3,4,5,6,7,8])
self.assertAlmostEqual(int(op_vals[22] * 1000 + 0.5), 1000)
# Cuboctahedral motif.
op_vals = ops_101.get_order_parameters(
self.cuboctahedron, 0, indices_neighs=[i for i in range(1, 13)])
self.assertAlmostEqual(int(op_vals[24] * 1000 + 0.5), 1000)
self.assertAlmostEqual(int(op_vals[32] * 1000 + 0.5), 1000)
# See-saw motif.
op_vals = ops_101.get_order_parameters(
self.see_saw_rect, 0, indices_neighs=[i for i in range(1, 5)])
self.assertAlmostEqual(int(op_vals[25] * 1000 + 0.5), 1000)
# Hexagonal planar motif.
op_vals = ops_101.get_order_parameters(
self.hexagonal_planar, 0, indices_neighs=[1,2,3,4,5,6])
self.assertAlmostEqual(int(op_vals[26] * 1000 + 0.5), 1000)
# Square face capped trigonal prism.
op_vals = ops_101.get_order_parameters(
self.sq_face_capped_trig_pris, 0,
indices_neighs=[i for i in range(1, 8)])
self.assertAlmostEqual(int(op_vals[34] * 1000 + 0.5), 1000)
# Test providing explicit neighbor lists.
op_vals = ops_101.get_order_parameters(self.bcc, 0, indices_neighs=[1])
self.assertIsNotNone(op_vals[0])
self.assertIsNone(op_vals[3])
with self.assertRaises(ValueError):
ops_101.get_order_parameters(self.bcc, 0, indices_neighs=[2])
def tearDown(self):
del self.single_bond
del self.linear
del self.bent45
del self.cubic
del self.fcc
del self.bcc
del self.hcp
del self.diamond
del self.regular_triangle
del self.square
del self.square_pyramid
del self.trigonal_off_plane
del self.trigonal_pyramid
del self.trigonal_planar
del self.square_planar
del self.pentagonal_pyramid
del self.hexagonal_pyramid
del self.pentagonal_bipyramid
del self.T_shape
del self.cuboctahedron
del self.see_saw_rect
class CrystalNNTest(PymatgenTest):
def setUp(self):
self.lifepo4 = self.get_structure('LiFePO4')
self.lifepo4.add_oxidation_state_by_guess()
self.he_bcc = self.get_structure('He_BCC')
self.he_bcc.add_oxidation_state_by_guess()
self.prev_warnings = warnings.filters
warnings.simplefilter("ignore")
def tearDown(self):
warnings.filters = self.prev_warnings
def test_sanity(self):
with self.assertRaises(ValueError):
cnn = CrystalNN()
cnn.get_cn(self.lifepo4, 0, use_weights=True)
with self.assertRaises(ValueError):
cnn = CrystalNN(weighted_cn=True)
cnn.get_cn(self.lifepo4, 0, use_weights=False)
def test_discrete_cn(self):
cnn = CrystalNN()
cn_array = []
expected_array = [6, 6, 6, 6, 6, 6, 6, 6, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4]
for idx, _ in enumerate(self.lifepo4):
cn_array.append(cnn.get_cn(self.lifepo4, idx))
self.assertSequenceEqual(cn_array, expected_array)
def test_weighted_cn(self):
cnn = CrystalNN(weighted_cn=True)
cn_array = []
expected_array = [5.863, 5.8716, 5.863 , 5.8716, 5.7182, 5.7182, 5.719,
5.7181, 3.991 , 3.991 , 3.991 , 3.9907, 3.5997, 3.525,
3.4133, 3.4714, 3.4727, 3.4133, 3.525 , 3.5997,
3.5997, 3.525 , 3.4122, 3.4738, 3.4728, 3.4109,
3.5259, 3.5997]
for idx, _ in enumerate(self.lifepo4):
cn_array.append(cnn.get_cn(self.lifepo4, idx, use_weights=True))
self.assertArrayAlmostEqual(expected_array, cn_array, 2)
def test_weighted_cn_no_oxid(self):
cnn = CrystalNN(weighted_cn=True)
cn_array = []
expected_array = [5.8962, 5.8996, 5.8962, 5.8996, 5.7195, 5.7195,
5.7202, 5.7194, 4.0012, 4.0012, 4.0012, 4.0009,
3.3897, 3.2589, 3.1218, 3.1914, 3.1914, 3.1218,
3.2589, 3.3897, 3.3897, 3.2589, 3.1207, 3.1924,
3.1915, 3.1207, 3.2598, 3.3897]
s = self.lifepo4.copy()
s.remove_oxidation_states()
for idx, _ in enumerate(s):
cn_array.append(cnn.get_cn(s, idx, use_weights=True))
self.assertArrayAlmostEqual(expected_array, cn_array, 2)
def test_fixed_length(self):
cnn = CrystalNN(fingerprint_length=30)
nndata = cnn.get_nn_data(self.lifepo4, 0)
self.assertEqual(len(nndata.cn_weights), 30)
self.assertEqual(len(nndata.cn_nninfo), 30)
def test_cation_anion(self):
cnn = CrystalNN(weighted_cn=True, cation_anion=True)
self.assertAlmostEqual(cnn.get_cn(self.lifepo4, 0, use_weights=True),
5.8630, 2)
def test_x_diff_weight(self):
cnn = CrystalNN(weighted_cn=True, x_diff_weight=0)
self.assertAlmostEqual(cnn.get_cn(self.lifepo4, 0, use_weights=True),
5.8630, 2)
def test_noble_gas_material(self):
cnn = CrystalNN()
self.assertEqual(cnn.get_cn(self.he_bcc, 0, use_weights=False), 0)
cnn = CrystalNN(distance_cutoffs=(1.25, 5))
self.assertEqual(cnn.get_cn(self.he_bcc, 0, use_weights=False), 8)
def test_shifted_sites(self):
cnn = CrystalNN()
sites = [[0., 0.2, 0.2], [0, 0, 0]]
struct = Structure([7, 0, 0, 0, 7, 0, 0, 0, 7], ['I'] * len(sites), sites)
bonded_struct = cnn.get_bonded_structure(struct)
sites_shifted = [[1., 0.2, 0.2], [0, 0, 0]]
struct_shifted = Structure([7, 0, 0, 0, 7, 0, 0, 0, 7], ['I'] * len(sites_shifted),
sites_shifted)
bonded_struct_shifted = cnn.get_bonded_structure(struct_shifted)
self.assertEqual(len(bonded_struct.get_connected_sites(0)),
len(bonded_struct_shifted.get_connected_sites(0)))
class CutOffDictNNTest(PymatgenTest):
def setUp(self):
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264], [0, 0, 2.528]]),
["C", "C"], [[2.554, 1.806, 4.423], [0.365, 0.258, 0.632]],
coords_are_cartesian=True
)
self.prev_warnings = warnings.filters
warnings.simplefilter("ignore")
def tearDown(self):
warnings.filters = self.prev_warnings
def test_cn(self):
nn = CutOffDictNN({('C', 'C'): 2})
self.assertEqual(nn.get_cn(self.diamond, 0), 4)
nn_null = CutOffDictNN()
self.assertEqual(nn_null.get_cn(self.diamond, 0), 0)
def test_from_preset(self):
nn = CutOffDictNN.from_preset("vesta_2019")
self.assertEqual(nn.get_cn(self.diamond, 0), 4)
# test error thrown on unknown preset
self.assertRaises(ValueError, CutOffDictNN.from_preset, "test")
@unittest.skipIf(not which('critic2'), "critic2 executable not present")
class Critic2NNTest(PymatgenTest):
def setUp(self):
self.diamond = Structure(
Lattice([[2.189, 0, 1.264], [0.73, 2.064, 1.264], [0, 0, 2.528]]),
["C", "C"], [[2.554, 1.806, 4.423], [0.365, 0.258, 0.632]],
coords_are_cartesian=True
)
self.prev_warnings = warnings.filters
warnings.simplefilter("ignore")
def tearDown(self):
warnings.filters = self.prev_warnings
def test_cn(self):
nn = Critic2NN()
#self.assertEqual(nn.get_cn(self.diamond, 0), 4)
if __name__ == '__main__':
unittest.main()
|
|
"""
SoftLayer.tests.managers.vs_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:license: MIT, see LICENSE for more details.
"""
import mock
import SoftLayer
from SoftLayer import exceptions
from SoftLayer import fixtures
from SoftLayer import testing
class VSTests(testing.TestCase):
def set_up(self):
self.vs = SoftLayer.VSManager(self.client,
SoftLayer.OrderingManager(self.client))
def test_list_instances(self):
results = self.vs.list_instances(hourly=True, monthly=True)
for result in results:
self.assertIn(result['id'], [100, 104])
self.assert_called_with('SoftLayer_Account', 'getVirtualGuests')
def test_list_instances_neither(self):
results = self.vs.list_instances(hourly=False, monthly=False)
for result in results:
self.assertIn(result['id'], [100, 104])
self.assert_called_with('SoftLayer_Account', 'getVirtualGuests')
def test_list_instances_monthly(self):
results = self.vs.list_instances(hourly=False, monthly=True)
for result in results:
self.assertIn(result['id'], [100])
self.assert_called_with('SoftLayer_Account', 'getMonthlyVirtualGuests')
def test_list_instances_hourly(self):
results = self.vs.list_instances(hourly=True, monthly=False)
for result in results:
self.assertIn(result['id'], [104])
self.assert_called_with('SoftLayer_Account', 'getHourlyVirtualGuests')
def test_list_instances_with_filters(self):
self.vs.list_instances(
hourly=True,
monthly=True,
tags=['tag1', 'tag2'],
cpus=2,
memory=1024,
hostname='hostname',
domain='example.com',
local_disk=True,
datacenter='dal05',
nic_speed=100,
public_ip='1.2.3.4',
private_ip='4.3.2.1',
)
_filter = {
'virtualGuests': {
'datacenter': {
'name': {'operation': '_= dal05'}},
'domain': {'operation': '_= example.com'},
'tagReferences': {
'tag': {'name': {
'operation': 'in',
'options': [{
'name': 'data', 'value': ['tag1', 'tag2']}]}}},
'maxCpu': {'operation': 2},
'localDiskFlag': {'operation': True},
'maxMemory': {'operation': 1024},
'hostname': {'operation': '_= hostname'},
'networkComponents': {'maxSpeed': {'operation': 100}},
'primaryIpAddress': {'operation': '_= 1.2.3.4'},
'primaryBackendIpAddress': {'operation': '_= 4.3.2.1'}
}
}
self.assert_called_with('SoftLayer_Account', 'getVirtualGuests',
filter=_filter)
def test_resolve_ids_ip(self):
_id = self.vs._get_ids_from_ip('172.16.240.2')
self.assertEqual(_id, [100, 104])
def test_resolve_ids_ip_private(self):
# Now simulate a private IP test
mock = self.set_mock('SoftLayer_Account', 'getVirtualGuests')
mock.side_effect = [[], [{'id': 99}]]
_id = self.vs._get_ids_from_ip('10.0.1.87')
self.assertEqual(_id, [99])
def test_resolve_ids_ip_invalid(self):
_id = self.vs._get_ids_from_ip('nope')
self.assertEqual(_id, [])
def test_resolve_ids_hostname(self):
_id = self.vs._get_ids_from_hostname('vs-test1')
self.assertEqual(_id, [100, 104])
def test_get_instance(self):
result = self.vs.get_instance(100)
self.assertEqual(fixtures.SoftLayer_Virtual_Guest.getObject, result)
self.assert_called_with('SoftLayer_Virtual_Guest', 'getObject',
identifier=100)
def test_get_create_options(self):
results = self.vs.get_create_options()
self.assertEqual(
fixtures.SoftLayer_Virtual_Guest.getCreateObjectOptions, results)
def test_cancel_instance(self):
result = self.vs.cancel_instance(1)
self.assertEqual(result, True)
self.assert_called_with('SoftLayer_Virtual_Guest', 'deleteObject',
identifier=1)
def test_reload_instance(self):
self.vs.reload_instance(1)
self.assert_called_with('SoftLayer_Virtual_Guest',
'reloadOperatingSystem',
args=('FORCE', {}),
identifier=1)
def test_reload_instance_posturi_sshkeys(self):
post_uri = 'http://test.sftlyr.ws/test.sh'
self.vs.reload_instance(1, post_uri=post_uri, ssh_keys=[1701])
args = ('FORCE', {'customProvisionScriptUri': post_uri,
'sshKeyIds': [1701]})
self.assert_called_with('SoftLayer_Virtual_Guest',
'reloadOperatingSystem',
args=args,
identifier=1)
def test_reload_instance_with_new_os(self):
self.vs.reload_instance(1, image_id=1234)
args = ('FORCE', {'imageTemplateId': 1234})
self.assert_called_with('SoftLayer_Virtual_Guest',
'reloadOperatingSystem',
args=args,
identifier=1)
@mock.patch('SoftLayer.managers.vs.VSManager._generate_create_dict')
def test_create_verify(self, create_dict):
create_dict.return_value = {'test': 1, 'verify': 1}
self.vs.verify_create_instance(test=1, verify=1, tags=['test', 'tags'])
create_dict.assert_called_once_with(test=1, verify=1)
self.assert_called_with('SoftLayer_Virtual_Guest',
'generateOrderTemplate',
args=({'test': 1, 'verify': 1},))
@mock.patch('SoftLayer.managers.vs.VSManager._generate_create_dict')
def test_create_instance(self, create_dict):
create_dict.return_value = {'test': 1, 'verify': 1}
self.vs.create_instance(test=1, verify=1, tags='dev,green')
create_dict.assert_called_once_with(test=1, verify=1)
self.assert_called_with('SoftLayer_Virtual_Guest', 'createObject',
args=({'test': 1, 'verify': 1},))
self.assert_called_with('SoftLayer_Virtual_Guest', 'setTags',
args=('dev,green',),
identifier=100)
def test_create_instances(self):
self.vs.create_instances([{'cpus': 1,
'memory': 1024,
'hostname': 'server',
'domain': 'example.com',
'tags': 'dev,green'}])
args = ([{'domain': 'example.com',
'hourlyBillingFlag': True,
'localDiskFlag': True,
'maxMemory': 1024,
'hostname': 'server',
'startCpus': 1}],)
self.assert_called_with('SoftLayer_Virtual_Guest', 'createObjects',
args=args)
self.assert_called_with('SoftLayer_Virtual_Guest', 'setTags',
args=('dev,green',),
identifier=100)
def test_generate_os_and_image(self):
self.assertRaises(
ValueError,
self.vs._generate_create_dict,
cpus=1,
memory=1,
hostname='test',
domain='example.com',
os_code=1,
image_id=1,
)
def test_generate_missing(self):
self.assertRaises(ValueError, self.vs._generate_create_dict)
self.assertRaises(ValueError, self.vs._generate_create_dict, cpus=1)
def test_generate_basic(self):
data = self.vs._generate_create_dict(
cpus=1,
memory=1,
hostname='test',
domain='example.com',
os_code="STRING",
)
assert_data = {
'startCpus': 1,
'maxMemory': 1,
'hostname': 'test',
'domain': 'example.com',
'localDiskFlag': True,
'operatingSystemReferenceCode': "STRING",
'hourlyBillingFlag': True,
}
self.assertEqual(data, assert_data)
def test_generate_monthly(self):
data = self.vs._generate_create_dict(
cpus=1,
memory=1,
hostname='test',
domain='example.com',
os_code="STRING",
hourly=False,
)
assert_data = {
'hourlyBillingFlag': False,
'startCpus': 1,
'maxMemory': 1,
'hostname': 'test',
'domain': 'example.com',
'localDiskFlag': True,
'operatingSystemReferenceCode': "STRING",
}
self.assertEqual(data, assert_data)
def test_generate_image_id(self):
data = self.vs._generate_create_dict(
cpus=1,
memory=1,
hostname='test',
domain='example.com',
image_id="45",
)
assert_data = {
'startCpus': 1,
'maxMemory': 1,
'hostname': 'test',
'domain': 'example.com',
'localDiskFlag': True,
'blockDeviceTemplateGroup': {"globalIdentifier": "45"},
'hourlyBillingFlag': True,
}
self.assertEqual(data, assert_data)
def test_generate_dedicated(self):
data = self.vs._generate_create_dict(
cpus=1,
memory=1,
hostname='test',
domain='example.com',
os_code="STRING",
dedicated=True,
)
assert_data = {
'startCpus': 1,
'maxMemory': 1,
'hostname': 'test',
'domain': 'example.com',
'localDiskFlag': True,
'operatingSystemReferenceCode': "STRING",
'hourlyBillingFlag': True,
'dedicatedAccountHostOnlyFlag': True,
}
self.assertEqual(data, assert_data)
def test_generate_datacenter(self):
data = self.vs._generate_create_dict(
cpus=1,
memory=1,
hostname='test',
domain='example.com',
os_code="STRING",
datacenter="sng01",
)
assert_data = {
'startCpus': 1,
'maxMemory': 1,
'hostname': 'test',
'domain': 'example.com',
'localDiskFlag': True,
'operatingSystemReferenceCode': "STRING",
'hourlyBillingFlag': True,
'datacenter': {"name": 'sng01'},
}
self.assertEqual(data, assert_data)
def test_generate_public_vlan(self):
data = self.vs._generate_create_dict(
cpus=1,
memory=1,
hostname='test',
domain='example.com',
os_code="STRING",
public_vlan=1,
)
assert_data = {
'startCpus': 1,
'maxMemory': 1,
'hostname': 'test',
'domain': 'example.com',
'localDiskFlag': True,
'operatingSystemReferenceCode': "STRING",
'hourlyBillingFlag': True,
'primaryNetworkComponent': {"networkVlan": {"id": 1}},
}
self.assertEqual(data, assert_data)
def test_generate_private_vlan(self):
data = self.vs._generate_create_dict(
cpus=1,
memory=1,
hostname='test',
domain='example.com',
os_code="STRING",
private_vlan=1,
)
assert_data = {
'startCpus': 1,
'maxMemory': 1,
'hostname': 'test',
'domain': 'example.com',
'localDiskFlag': True,
'operatingSystemReferenceCode': "STRING",
'hourlyBillingFlag': True,
'primaryBackendNetworkComponent': {"networkVlan": {"id": 1}},
}
self.assertEqual(data, assert_data)
def test_generate_userdata(self):
data = self.vs._generate_create_dict(
cpus=1,
memory=1,
hostname='test',
domain='example.com',
os_code="STRING",
userdata="ICANHAZVSI",
)
assert_data = {
'startCpus': 1,
'maxMemory': 1,
'hostname': 'test',
'domain': 'example.com',
'localDiskFlag': True,
'operatingSystemReferenceCode': "STRING",
'hourlyBillingFlag': True,
'userData': [{'value': "ICANHAZVSI"}],
}
self.assertEqual(data, assert_data)
def test_generate_network(self):
data = self.vs._generate_create_dict(
cpus=1,
memory=1,
hostname='test',
domain='example.com',
os_code="STRING",
nic_speed=9001,
)
assert_data = {
'startCpus': 1,
'maxMemory': 1,
'hostname': 'test',
'domain': 'example.com',
'localDiskFlag': True,
'operatingSystemReferenceCode': "STRING",
'hourlyBillingFlag': True,
'networkComponents': [{'maxSpeed': 9001}],
}
self.assertEqual(data, assert_data)
def test_generate_private_network_only(self):
data = self.vs._generate_create_dict(
cpus=1,
memory=1,
hostname='test',
domain='example.com',
os_code="STRING",
nic_speed=9001,
private=True
)
assert_data = {
'startCpus': 1,
'maxMemory': 1,
'hostname': 'test',
'domain': 'example.com',
'localDiskFlag': True,
'operatingSystemReferenceCode': "STRING",
'privateNetworkOnlyFlag': True,
'hourlyBillingFlag': True,
'networkComponents': [{'maxSpeed': 9001}],
}
self.assertEqual(data, assert_data)
def test_generate_post_uri(self):
data = self.vs._generate_create_dict(
cpus=1,
memory=1,
hostname='test',
domain='example.com',
os_code="STRING",
post_uri='https://example.com/boostrap.sh',
)
assert_data = {
'startCpus': 1,
'maxMemory': 1,
'hostname': 'test',
'domain': 'example.com',
'localDiskFlag': True,
'operatingSystemReferenceCode': "STRING",
'hourlyBillingFlag': True,
'postInstallScriptUri': 'https://example.com/boostrap.sh',
}
self.assertEqual(data, assert_data)
def test_generate_sshkey(self):
data = self.vs._generate_create_dict(
cpus=1,
memory=1,
hostname='test',
domain='example.com',
os_code="STRING",
ssh_keys=[543],
)
assert_data = {
'startCpus': 1,
'maxMemory': 1,
'hostname': 'test',
'domain': 'example.com',
'localDiskFlag': True,
'operatingSystemReferenceCode': "STRING",
'hourlyBillingFlag': True,
'sshKeys': [{'id': 543}],
}
self.assertEqual(data, assert_data)
def test_generate_no_disks(self):
data = self.vs._generate_create_dict(
cpus=1,
memory=1,
hostname='test',
domain='example.com',
os_code="STRING"
)
self.assertEqual(data.get('blockDevices'), None)
def test_generate_single_disk(self):
data = self.vs._generate_create_dict(
cpus=1,
memory=1,
hostname='test',
domain='example.com',
os_code="STRING",
disks=[50]
)
assert_data = {
'blockDevices': [
{"device": "0", "diskImage": {"capacity": 50}}]
}
self.assertTrue(data.get('blockDevices'))
self.assertEqual(data['blockDevices'], assert_data['blockDevices'])
def test_generate_multi_disk(self):
data = self.vs._generate_create_dict(
cpus=1,
memory=1,
hostname='test',
domain='example.com',
os_code="STRING",
disks=[50, 70, 100]
)
assert_data = {
'blockDevices': [
{"device": "0", "diskImage": {"capacity": 50}},
{"device": "2", "diskImage": {"capacity": 70}},
{"device": "3", "diskImage": {"capacity": 100}}]
}
self.assertTrue(data.get('blockDevices'))
self.assertEqual(data['blockDevices'], assert_data['blockDevices'])
def test_change_port_speed_public(self):
result = self.vs.change_port_speed(1, True, 100)
self.assertEqual(result, True)
self.assert_called_with('SoftLayer_Virtual_Guest',
'setPublicNetworkInterfaceSpeed',
identifier=1,
args=(100,))
def test_change_port_speed_private(self):
result = self.vs.change_port_speed(2, False, 10)
self.assertEqual(result, True)
self.assert_called_with('SoftLayer_Virtual_Guest',
'setPrivateNetworkInterfaceSpeed',
identifier=2,
args=(10,))
def test_rescue(self):
# Test rescue environment
result = self.vs.rescue(1234)
self.assertEqual(result, True)
self.assert_called_with('SoftLayer_Virtual_Guest',
'executeRescueLayer',
identifier=1234)
def test_edit_metadata(self):
# Test editing user data
result = self.vs.edit(100, userdata='my data')
self.assertEqual(result, True)
self.assert_called_with('SoftLayer_Virtual_Guest', 'setUserMetadata',
identifier=100,
args=(['my data'],))
def test_edit_blank(self):
# Now test a blank edit
self.assertTrue(self.vs.edit, 100)
def test_edit_full(self):
result = self.vs.edit(100,
hostname='new-host',
domain='new.sftlyr.ws',
notes='random notes')
self.assertEqual(result, True)
args = ({
'hostname': 'new-host',
'domain': 'new.sftlyr.ws',
'notes': 'random notes',
},)
self.assert_called_with('SoftLayer_Virtual_Guest', 'editObject',
identifier=100,
args=args)
def test_edit_tags(self):
# Test tag support
result = self.vs.edit(100, tags='dev,green')
self.assertEqual(result, True)
self.assert_called_with('SoftLayer_Virtual_Guest', 'setTags',
identifier=100,
args=('dev,green',))
def test_edit_tags_blank(self):
result = self.vs.edit(100, tags='')
self.assertEqual(result, True)
self.assert_called_with('SoftLayer_Virtual_Guest', 'setTags',
identifier=100,
args=('',))
def test_captures(self):
# capture only the OS disk
result = self.vs.capture(1, 'a')
expected = fixtures.SoftLayer_Virtual_Guest.createArchiveTransaction
self.assertEqual(result, expected)
args = ('a', [], None)
self.assert_called_with('SoftLayer_Virtual_Guest',
'createArchiveTransaction',
args=args,
identifier=1)
def test_capture_additional_disks(self):
# capture all the disks, minus the swap
# make sure the data is carried along with it
result = self.vs.capture(1, 'a', additional_disks=True)
expected = fixtures.SoftLayer_Virtual_Guest.createArchiveTransaction
self.assertEqual(result, expected)
args = ('a', [{'device': 0, 'mountType': 'Disk', 'uuid': 1},
{'device': 3, 'mountType': 'Disk', 'uuid': 3}], None)
self.assert_called_with('SoftLayer_Virtual_Guest',
'createArchiveTransaction',
args=args,
identifier=1)
def test_upgrade(self):
mock = self.set_mock('SoftLayer_Product_Package', 'getAllObjects')
mock.return_value = [
{'id': 46, 'name': 'Virtual Servers',
'description': 'Virtual Server Instances',
'type': {'keyName': 'VIRTUAL_SERVER_INSTANCE'}, 'isActive': 1},
]
# test single upgrade
result = self.vs.upgrade(1, cpus=4, public=False)
self.assertEqual(result, True)
self.assert_called_with('SoftLayer_Product_Order', 'placeOrder')
call = self.calls('SoftLayer_Product_Order', 'placeOrder')[0]
order_container = call.args[0]
self.assertEqual(order_container['prices'], [{'id': 1007}])
self.assertEqual(order_container['virtualGuests'], [{'id': 1}])
def test_upgrade_blank(self):
# Now test a blank upgrade
result = self.vs.upgrade(1)
self.assertEqual(result, False)
self.assertEqual(self.calls('SoftLayer_Product_Order', 'placeOrder'),
[])
def test_upgrade_full(self):
# Testing all parameters Upgrade
result = self.vs.upgrade(1,
cpus=4,
memory=2,
nic_speed=1000,
public=True)
self.assertEqual(result, True)
self.assert_called_with('SoftLayer_Product_Order', 'placeOrder')
call = self.calls('SoftLayer_Product_Order', 'placeOrder')[0]
order_container = call.args[0]
self.assertIn({'id': 1144}, order_container['prices'])
self.assertIn({'id': 1133}, order_container['prices'])
self.assertIn({'id': 1122}, order_container['prices'])
self.assertEqual(order_container['virtualGuests'], [{'id': 1}])
def test_upgrade_skips_location_based_prices(self):
# Test that no prices that have locationGroupId set are used
self.assertRaises(exceptions.SoftLayerError,
self.vs.upgrade, 1, cpus=55, memory=2, public=True)
def test_get_item_id_for_upgrade(self):
item_id = 0
package_items = self.client['Product_Package'].getItems(id=46)
for item in package_items:
if ((item['prices'][0]['categories'][0]['id'] == 3)
and (item.get('capacity') == '2')):
item_id = item['prices'][0]['id']
break
self.assertEqual(1133, item_id)
class VSWaitReadyGoTests(testing.TestCase):
def set_up(self):
self.client = mock.MagicMock()
self.vs = SoftLayer.VSManager(self.client)
self.guestObject = self.client['Virtual_Guest'].getObject
@mock.patch('SoftLayer.managers.vs.VSManager.wait_for_ready')
def test_wait_interface(self, ready):
# verify interface to wait_for_ready is intact
self.vs.wait_for_transaction(1, 1)
ready.assert_called_once_with(1, 1, delay=1, pending=True)
def test_active_not_provisioned(self):
# active transaction and no provision date should be false
self.guestObject.side_effect = [
{'activeTransaction': {'id': 1}},
]
value = self.vs.wait_for_ready(1, 0)
self.assertFalse(value)
def test_active_and_provisiondate(self):
# active transaction and provision date should be True
self.guestObject.side_effect = [
{'activeTransaction': {'id': 1},
'provisionDate': 'aaa'},
]
value = self.vs.wait_for_ready(1, 1)
self.assertTrue(value)
def test_active_provision_pending(self):
# active transaction and provision date
# and pending should be false
self.guestObject.side_effect = [
{'activeTransaction': {'id': 1}, 'provisionDate': 'aaa'},
]
value = self.vs.wait_for_ready(1, 0, pending=True)
self.assertFalse(value)
def test_active_reload(self):
# actively running reload
self.guestObject.side_effect = [
{
'activeTransaction': {'id': 1},
'provisionDate': 'aaa',
'lastOperatingSystemReload': {'id': 1},
},
]
value = self.vs.wait_for_ready(1, 0)
self.assertFalse(value)
def test_reload_no_pending(self):
# reload complete, maintance transactions
self.guestObject.side_effect = [
{
'activeTransaction': {'id': 2},
'provisionDate': 'aaa',
'lastOperatingSystemReload': {'id': 1},
},
]
value = self.vs.wait_for_ready(1, 1)
self.assertTrue(value)
def test_reload_pending(self):
# reload complete, pending maintance transactions
self.guestObject.side_effect = [
{
'activeTransaction': {'id': 2},
'provisionDate': 'aaa',
'lastOperatingSystemReload': {'id': 1},
},
]
value = self.vs.wait_for_ready(1, 0, pending=True)
self.assertFalse(value)
@mock.patch('time.sleep')
def test_ready_iter_once_incomplete(self, _sleep):
self.guestObject = self.client['Virtual_Guest'].getObject
# no iteration, false
self.guestObject.side_effect = [
{'activeTransaction': {'id': 1}},
]
value = self.vs.wait_for_ready(1, 0)
self.assertFalse(value)
self.assertFalse(_sleep.called)
@mock.patch('time.sleep')
def test_iter_once_complete(self, _sleep):
# no iteration, true
self.guestObject.side_effect = [
{'provisionDate': 'aaa'},
]
value = self.vs.wait_for_ready(1, 1)
self.assertTrue(value)
self.assertFalse(_sleep.called)
@mock.patch('time.sleep')
def test_iter_four_complete(self, _sleep):
# test 4 iterations with positive match
self.guestObject.side_effect = [
{'activeTransaction': {'id': 1}},
{'activeTransaction': {'id': 1}},
{'activeTransaction': {'id': 1}},
{'provisionDate': 'aaa'},
]
value = self.vs.wait_for_ready(1, 4)
self.assertTrue(value)
_sleep.assert_has_calls([mock.call(1), mock.call(1), mock.call(1)])
self.guestObject.assert_has_calls([
mock.call(id=1, mask=mock.ANY), mock.call(id=1, mask=mock.ANY),
mock.call(id=1, mask=mock.ANY), mock.call(id=1, mask=mock.ANY),
])
@mock.patch('time.time')
@mock.patch('time.sleep')
def test_iter_two_incomplete(self, _sleep, _time):
# test 2 iterations, with no matches
self.guestObject.side_effect = [
{'activeTransaction': {'id': 1}},
{'activeTransaction': {'id': 1}},
{'provisionDate': 'aaa'}
]
_time.side_effect = [0, 1, 2]
value = self.vs.wait_for_ready(1, 2)
self.assertFalse(value)
_sleep.assert_called_once_with(1)
self.guestObject.assert_has_calls([
mock.call(id=1, mask=mock.ANY),
mock.call(id=1, mask=mock.ANY),
])
@mock.patch('time.time')
@mock.patch('time.sleep')
def test_iter_20_incomplete(self, _sleep, _time):
"""Wait for up to 20 seconds (sleeping for 10 seconds) for a server."""
self.guestObject.return_value = {'activeTransaction': {'id': 1}}
_time.side_effect = [0, 10, 20]
value = self.vs.wait_for_ready(1, 20, delay=10)
self.assertFalse(value)
self.guestObject.assert_has_calls([mock.call(id=1, mask=mock.ANY)])
_sleep.assert_has_calls([mock.call(10)])
|
|
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
# Maintainer: joaander / All Developers are free to add commands for new
# features
"""Compute system properties."""
from hoomd.md import _md
from hoomd.operation import Compute
from hoomd.data.parameterdicts import ParameterDict
from hoomd.logging import log
import hoomd
class _Thermo(Compute):
def __init__(self, filter):
self._filter = filter
class ThermodynamicQuantities(_Thermo):
"""Compute thermodynamic properties of a group of particles.
Args:
filter (``hoomd.filter``): Particle filter to compute thermodynamic
properties for.
:py:class:`ThermodynamicQuantities` acts on a given group of particles and
calculates thermodynamic properties of those particles when requested. All
specified :py:class:`ThermodynamicQuantities` objects can be added to a
logger for logging during a simulation,
see :py:class:`hoomd.logging.Logger` for more details.
Examples::
f = filter.Type('A')
compute.ThermodynamicQuantities(filter=f)
"""
def __init__(self, filter):
super().__init__(filter)
def _attach(self):
if isinstance(self._simulation.device, hoomd.device.CPU):
thermo_cls = _md.ComputeThermo
else:
thermo_cls = _md.ComputeThermoGPU
group = self._simulation.state._get_group(self._filter)
self._cpp_obj = thermo_cls(self._simulation.state._cpp_sys_def, group)
super()._attach()
@log(requires_run=True)
def kinetic_temperature(self):
""":math:`kT_k`, instantaneous thermal energy of the group \
:math:`[\\mathrm{energy}]`.
Calculated as:
.. math::
kT_k = 2 \\cdot \\frac{K}{N_{\\mathrm{dof}}}
"""
self._cpp_obj.compute(self._simulation.timestep)
return self._cpp_obj.kinetic_temperature
@log(requires_run=True)
def pressure(self):
""":math:`P`, instantaneous pressure of the group \
:math:`[\\mathrm{pressure}]`.
Calculated as:
.. math::
P = \\frac{ 2 \\cdot K_{\\mathrm{trans}} + W }{D \\cdot V},
where :math:`D` is the dimensionality of the system, :math:`V` is the
total volume of the simulation box (or area in 2D), and :math:`W` is
calculated as:
.. math::
W = \\frac{1}{2} \\sum_{i \\in \\mathrm{filter}} \\sum_{j}
\\vec{F}_{ij} \\cdot \\vec{r_{ij}} + \\sum_{k} \\vec{F}_{k} \\cdot
\\vec{r_{k}},
where :math:`i` and :math:`j` are particle tags, :math:`\\vec{F}_{ij}`
are pairwise forces between particles and :math:`\\vec{F}_k` are forces
due to explicit constraints, implicit rigid body constraints, external
walls, and fields.
"""
self._cpp_obj.compute(self._simulation.timestep)
return self._cpp_obj.pressure
@log(category='sequence', requires_run=True)
def pressure_tensor(self):
"""Instantaneous pressure tensor of the group \
:math:`[\\mathrm{pressure}]`.
(:math:`P_{xx}`, :math:`P_{xy}`, :math:`P_{xz}`, :math:`P_{yy}`,
:math:`P_{yz}`, :math:`P_{zz}`). calculated as:
.. math::
P_{ij} = \\left[\\sum_{k \\in \\mathrm{filter}} m_k
\\vec{v}_{k,i} \\cdot \\vec{v}_{k,j} + \\sum_{k \\in
\\mathrm{filter}} \\sum_{l} \\frac{1}{2} \\left(\\vec{r}_{kl,i}
\\cdot \\vec{F}_{kl,j} + \\vec{r}_{kl,j} \\cdot \\vec{F}_{kl,i}
\\right) \\right]/V
where :math:`V` is the total simulation box volume (or area in 2D).
"""
self._cpp_obj.compute(self._simulation.timestep)
return self._cpp_obj.pressure_tensor
@log(requires_run=True)
def kinetic_energy(self):
""":math:`K`, total kinetic energy of particles in the group \
:math:`[\\mathrm{energy}]`.
.. math::
K = K_{\\mathrm{rot}} + K_{\\mathrm{trans}}
"""
self._cpp_obj.compute(self._simulation.timestep)
return self._cpp_obj.kinetic_energy
@log(requires_run=True)
def translational_kinetic_energy(self):
r""":math:`K_{\mathrm{trans}}`.
Translational kinetic energy of all particles in the group
:math:`[\mathrm{energy}]`.
.. math::
K_{\mathrm{trans}} = \frac{1}{2}\sum_{i \in \mathrm{filter}}
m_i|\vec{v}_i|^2
"""
self._cpp_obj.compute(self._simulation.timestep)
return self._cpp_obj.translational_kinetic_energy
@log(requires_run=True)
def rotational_kinetic_energy(self):
r""":math:`K_{\mathrm{rot}}`.
Rotational kinetic energy of all particles in the group
:math:`[\mathrm{energy}]`.
Calculated as:
.. math::
K_{\mathrm{rot}} = \frac{1}{2} \sum_{i \in \mathrm{filter}}
\frac{L_{x,i}^2}{I_{x,i}} + \frac{L_{y,i}^2}{I_{y,i}} +
\frac{L_{z,i}^2}{I_{z,i}},
where :math:`I` is the moment of inertia and :math:`L` is the angular
momentum in the (diagonal) reference frame of the particle.
"""
self._cpp_obj.compute(self._simulation.timestep)
return self._cpp_obj.rotational_kinetic_energy
@log(requires_run=True)
def potential_energy(self):
r""":math:`U`.
Potential energy that the group contributes to the entire system
:math:`[\mathrm{energy}]`.
The potential energy is calculated as a sum of per-particle energy
contributions:
.. math::
U = \sum_{i \in \mathrm{filter}} U_i,
where :math:`U_i` is defined as:
.. math::
U_i = U_{\mathrm{pair}, i} + U_{\mathrm{bond}, i} +
U_{\mathrm{angle}, i} + U_{\mathrm{dihedral}, i} +
U_{\mathrm{improper}, i} + U_{\mathrm{external}, i} +
U_{\mathrm{other}, i}
and each term on the RHS is calculated as:
.. math::
U_{\mathrm{pair}, i} &= \frac{1}{2} \sum_j V_{\mathrm{pair}, ij}
U_{\mathrm{bond}, i} &= \frac{1}{2} \sum_{(j, k) \in
\mathrm{bonds}} V_{\mathrm{bond}, jk}
U_{\mathrm{angle}, i} &= \frac{1}{3} \sum_{(j, k, l) \in
\mathrm{angles}} V_{\mathrm{angle}, jkl}
U_{\mathrm{dihedral}, i} &= \frac{1}{4} \sum_{(j, k, l, m) \in
\mathrm{dihedrals}} V_{\mathrm{dihedral}, jklm}
U_{\mathrm{improper}, i} &= \frac{1}{4} \sum_{(j, k, l, m) \in
\mathrm{impropers}} V_{\mathrm{improper}, jklm}
In each summation above, the indices go over all particles and we only
use terms where one of the summation indices (:math:`j`, :math:`k`,
:math:`l`, or :math:`m`) is equal to :math:`i`. External and other
potentials are summed similar to the other terms using per-particle
contributions.
"""
self._cpp_obj.compute(self._simulation.timestep)
return self._cpp_obj.potential_energy
@log(requires_run=True)
def degrees_of_freedom(self):
r""":math:`N_{\mathrm{dof}}`.
Number of degrees of freedom given to the group by its integration
method.
Calculated as:
.. math::
N_{\mathrm{dof}} = N_{\mathrm{dof, trans}}
+ N_{\mathrm{dof, rot}}
"""
return self._cpp_obj.degrees_of_freedom
@log(requires_run=True)
def translational_degrees_of_freedom(self):
r""":math:`N_{\mathrm{dof, trans}}`.
Number of translational degrees of freedom given to the group by its
integration method.
When using a single integration method that is momentum conserving and
operates on all particles,
:math:`N_{\mathrm{dof, trans}} = DN - D - N_{constraints}`, where
:math:`D` is the dimensionality of the system.
Note:
The removal of :math:`D` degrees of freedom accounts for the fixed
center of mass in using periodic boundary conditions. When the
*filter* in :py:class:`ThermodynamicQuantities` selects a subset
of all particles, the removed degrees of freedom are spread
proportionately.
"""
return self._cpp_obj.translational_degrees_of_freedom
@log(requires_run=True)
def rotational_degrees_of_freedom(self):
r""":math:`N_{\mathrm{dof, rot}}`.
Number of rotational degrees of freedom given to the group by its
integration method.
"""
return self._cpp_obj.rotational_degrees_of_freedom
@log(requires_run=True)
def num_particles(self):
""":math:`N`, number of particles in the group."""
return self._cpp_obj.num_particles
@log(requires_run=True)
def volume(self):
""":math:`V`, volume of the simulation box (area in 2D) \
:math:`[\\mathrm{length}^{d}]`.
Where :math:`d` is the dimensionality of the system.
"""
return self._cpp_obj.volume
class HarmonicAveragedThermodynamicQuantities(Compute):
"""Compute harmonic averaged thermodynamic properties of particles.
Args:
filter (``hoomd.filter``): Particle filter to compute thermodynamic
properties for.
kT (float): Temperature of the system :math:`[\\mathrm{energy}]`.
harmonic_pressure (float): Harmonic contribution to the pressure
:math:`[\\mathrm{pressure}]`. If ommitted, the HMA pressure can
still be computed, but will be similar in precision to
the conventional pressure.
:py:class:`HarmonicAveragedThermodynamicQuantities` acts on a given group
of particles and calculates harmonically mapped average (HMA) properties
of those particles when requested. HMA computes properties more precisely
(with less variance) for atomic crystals in NVT simulations. The presence
of diffusion (vacancy hopping, etc.) will prevent HMA from providing
improvement. HMA tracks displacements from the lattice positions, which
are saved either during first call to `Simulation.run` or when the compute
is first added to the simulation, whichever occurs last.
Note:
`HarmonicAveragedThermodynamicQuantities` is an implementation of the
methods section of Sabry G. Moustafa, Andrew J. Schultz, and David A.
Kofke. (2015). "Very fast averaging of thermal properties of crystals
by molecular simulation". Phys. Rev. E 92, 043303
doi:10.1103/PhysRevE.92.043303
Examples::
hma = hoomd.compute.HarmonicAveragedThermodynamicQuantities(
filter=hoomd.filter.Type('A'), kT=1.0)
Attributes:
filter (hoomd.filter.ParticleFilter): Subset of particles compute
thermodynamic properties for.
kT (hoomd.variant.Variant): Temperature of the system
:math:`[\\mathrm{energy}]`.
harmonic_pressure (float): Harmonic contribution to the pressure
:math:`[\\mathrm{pressure}]`.
"""
def __init__(self, filter, kT, harmonic_pressure=0):
# store metadata
param_dict = ParameterDict(kT=float(kT),
harmonic_pressure=float(harmonic_pressure))
# set defaults
self._param_dict.update(param_dict)
self._filter = filter
# initialize base class
super().__init__()
def _attach(self):
if isinstance(self._simulation.device, hoomd.device.CPU):
thermoHMA_cls = _md.ComputeThermoHMA
else:
thermoHMA_cls = _md.ComputeThermoHMAGPU
group = self._simulation.state._get_group(self._filter)
self._cpp_obj = thermoHMA_cls(self._simulation.state._cpp_sys_def,
group, self.kT, self.harmonic_pressure)
super()._attach()
@log(requires_run=True)
def potential_energy(self):
"""Average potential energy :math:`[\\mathrm{energy}]`."""
self._cpp_obj.compute(self._simulation.timestep)
return self._cpp_obj.potential_energy
@log(requires_run=True)
def pressure(self):
"""Average pressure :math:`[\\mathrm{pressure}]`."""
self._cpp_obj.compute(self._simulation.timestep)
return self._cpp_obj.pressure
|
|
import socket
from threading import *
import json
import os
import cv2
import numpy as np
from matplotlib import pyplot as plt
# Declare socket parameters
host = "192.168.1.84"
port = 60000
print (host)
print (port)
n = 1
global existMoments
existMoments = float(0.0)
global image
global M
class client(Thread):
def __init__(self, socket, address):
Thread.__init__(self)
self.sock = socket
self.addr = address
self.start()
def run(self):
while 1:
print('Client connected\n')
msg_from_robot = self.sock.recv(1024).decode()
print('Robot sent:', msg_from_robot)
perform_robot_dance()
#self.sock.close()
def startClientServerThreads():
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serversocket.bind((host, port))
serversocket.listen(5)
print ('server started and listening')
try:
while 1:
clientsocket, address = serversocket.accept()
client(clientsocket, address)
except (KeyboardInterrupt, SystemExit):
sys.exit()
def captureAndLoadImage():
capComm= str("frame.jpg")
os.system("fswebcam -r 507x456 --no-banner " + capComm)
capImg= cv2.imread(capComm, -1)
global n
n = n+1
return capImg
def showImage(capImg):
cv2.imshow('img', capImg)
cv2.waitKey(0)
cv2.destroyAllWindows()
def colorAndCornerRecognition(capImg):
#showImage(capImg)
#capImg = cv2.fastNlMeansDenoisingColored(capImg, None, 10, 10, 7, 21)
#showImage(capImg)
plt.imshow(capImg), plt.show()
hsv = cv2.cvtColor(capImg, cv2.COLOR_BGR2HSV)
lower_color = np.array([40, 50, 50])
upper_color = np.array([80, 255, 255])
mask = cv2.inRange(hsv, lower_color, upper_color)
res = cv2.bitwise_and(capImg, capImg, mask=mask)
capImg = res
#showImage(capImg)
capImg = cv2.fastNlMeansDenoisingColored(capImg, None, 10, 10, 7, 21)
#showImage(capImg)
#gray = cv2.cvtColor(capImg,cv2.COLOR_BGR2GRAY)
#surf = cv2.xfeatures2d.SURF_create(1000)
#kp, des = surf.detectAndCompute(capImg, None) #finds keypoints and descriptors in capImg
#print (kp)
#capImg = cv2.drawKeypoints(capImg, kp, None, (255,0,0), 4)
while (existMoments == 0.0):
#cv2.drawContours(image, contours, 0, (0,255,0), 3)
#showImage(capImg)
global image
#capImg = cv2.copyMakeBorder(capImg,10,10,10,10,cv2.BORDER_CONSTANT,value = [255,255,255])
img = cv2.cvtColor(capImg,cv2.COLOR_BGR2GRAY)
#showImage(img)
ret, thresh = cv2.threshold(img, 15, 250, cv2.THRESH_BINARY)
#showImage(thresh)
image, contours, heirarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
print('Num of Contours ', len(contours))
global cnt
#print (contours)
'''for x in range (0, len(contours)):
cnt = contours[x]
#print (cnt)
global M
M = cv2.moments(cnt)
global existMoments
existMoments = float(M['m10'])
if (existMoments != 0.0):
cv2.drawContours(image, contours, 0, (0,255,0), 3)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
#print (M)
print ('cx is', cx, ' cy is ', cy)'''
global contours
avgContours = len(contours)/2
for i in range (0, avgContours):
print ("i is ", i, "avg is ", avgContours )
cnt = contours[avgContours + i]
global M
M = cv2.moments(cnt)
global existMoments
existMoments = float(M['m10'])
if (existMoments != 0.0):
cv2.drawContours(image, contours, 0, (0,255,0), 3)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
print ('cx is', cx, ' cy is ', cy)
break
cnt = contours[avgContours - i]
global M
M = cv2.moments(cnt)
existMoments = float(M['m10'])
if (existMoments != 0.0):
cv2.drawContours(image, contours, 0, (0,255,0), 3)
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
print ('cx is', cx, ' cy is ', cy)
break
#capImg = cv2.fastNlMeansDenoisingColored(capImg, None, 15, 15, 7, 31)
showImage(image)
#cx = int(M['m10']/M['m00'])
#cy = int(M['m01']/M['m00'])
#area = cv2.contourArea(cnt)
#capImg = image
#showImage(capImg)
#print (M)
#print (cx)
#print (cy)
#print (area)
'''corners = cv2.goodFeaturesToTrack(gray,2,0.01,10)
corners = np.int0(corners)'''
'''for i in contours:
x,y, z = i.ravel()
cv2.circle(capImg,(x,y),30,(0,0,255),-1)
showImage(capImg)'''
def computeImage(capImg):
return
def perform_robot_dance():
limit = input("enter amount of times you want to capture: ")
limit = int(limit)
for i in range (0, limit):
#while (1):
capImg = captureAndLoadImage()
colorAndCornerRecognition(capImg)
#computeImage(capImg)
msg_to_robot = '[1000][3][0.270][0.635][0.020]'
#self.sock.send(msg_to_robot.encode())
print (msg_to_robot)
# Real code
#startClientServerThreads()
perform_robot_dance()
|
|
# -*- coding: utf-8 -*-
#
# Desc: This file is part of the ecromedos Document Preparation System
# Author: Tobias Koch <[email protected]>
# License: MIT
# URL: http://www.ecromedos.net
#
import os, re
from lxml import etree
import com.lepture.mistune as mistune
from net.ecromedos.configreader import ECMDSConfigReader
from net.ecromedos.dtdresolver import ECMDSDTDResolver
class ECMLRendererError(Exception):
pass
class ECMLRenderer(mistune.Renderer):
def __init__(self, config):
mistune.Renderer.__init__(self)
self.section_level = 0
self.footnotes_map = {}
self.config = config
#end if
# BLOCK ELEMENTS
def block_code(self, code, language=None):
if language == None:
language = "text"
return """<listing><code syntax="%(lang)s" strip="yes"
tabspaces="4">%(code)s</code></listing>""" % {
"lang": language,
"code": mistune.escape(code)
}
#end function
def block_quote(self, text):
return "<blockquote>%s</blockquote>" % text
def block_html(self, ecml):
return ecml
def header(self, text, level, raw=None):
retval = ""
diff = level - self.section_level
if diff > 1:
msg = "Heading '%s' skips a section level." % text
raise ECMLRendererError(msg)
else:
sign = int(diff > 0) - int(diff < 0)
diff = sign * diff
# we close until we reach the new level
if sign <= 0:
for i in range(diff+1):
retval += "</section>"
#end if
#end if
retval += '<section level="%d">' % level
retval += '<title>%s</title>' % text
self.section_level = level
return retval
#end function
def hrule(self):
return ""
def list(self, body, ordered=True):
if ordered:
return "<ol>%s</ol>" % body
else:
return "<ul>%s</ul>" % body
#end function
def list_item(self, text):
return "<li>%s</li>" % text
def paragraph(self, text):
return "<p>%s</p>" % text
def table(self, header, body):
return """\
<table print-width="100%%" screen-width="940px" align="left"
frame="rowsep,colsep" print-rulewidth="1pt" screen-rulewidth="1px"
rulecolor="#ffffff">
<thead>
%s
</thead>
<tbody>
%s
</tbody>
</table>""" % (header, body)
#end function
def table_row(self, content):
return '<tr valign="top">%s</tr>' % content
def table_cell(self, content, **flags):
align = flags['align']
width = flags.get('width')
attributes = ""
if align:
attributes += ' align="%s"' % align
if width:
attributes += ' width="%s"' % width
return '<td%s>%s</td>' % (attributes, content)
#end function
# INLINE ELEMENTS
def autolink(self, link, is_email=False):
link = mistune.escape(link)
href = "mailto:%s" % link if is_email else link
return '<link url="%s">%s</link>' % (href, link)
#end function
def codespan(self, text):
return "<tt>%s</tt>" % mistune.escape(text)
def double_emphasis(self, text):
return "<b>%s</b>" % text
def emphasis(self, text):
return "<i>%s</i>" % text
def image(self, src, title, text):
src = mistune.escape_link(src)
text = mistune.escape(text, quote=True)
if title:
title = mistune.escape(title, quote=True)
ecml = """\
<figure align="left">
<caption>%s</caption>
<img src="%s" print-width="100%%" screen-width="940px"/>
</figure>
""" % (title, src)
else:
ecml = """\
<figure align="left">
<img src="%s" print-width="100%%" screen-width="940px"/>
</figure>
""" % (src,)
#end if
return ecml
#end function
def linebreak(self):
return "<br/>"
def newline(self):
return ""
def footnote_ref(self, key, index):
return '<footnote-ref idref="%s"/>' % mistune.escape(key)
def footnote_item(self, key, text):
self.footnotes_map[key] = text
return ""
#end function
def footnotes(self, text):
return ""
def link(self, link, title, text):
link = mistune.escape_link(link)
return '<link url="%s">%s</a>' % (link, text)
#end function
def strikethrough(self, text):
return text
def text(self, text):
return mistune.escape(text)
def inline_html(self, ecml):
return ecml
#end class
class MarkdownConverterError(Exception):
pass
class MarkdownConverter(ECMDSDTDResolver, ECMDSConfigReader):
DOCUMENT_TEMPLATE = """\
<!DOCTYPE %(document_type)s SYSTEM "http://www.ecromedos.net/dtd/3.0/ecromedos.dtd">
<%(document_type)s bcor="%(bcor)s" div="%(div)s" lang="%(lang)s" papersize="%(papersize)s" parskip="%(parskip)s" secnumdepth="%(secnumdepth)s" secsplitdepth="%(secsplitdepth)s">
%(header)s
%(legal)s
<make-toc depth="%(tocdepth)s" lof="%(have_lof)s" lot="%(have_lot)s" lol="%(have_lol)s"/>
%(contents)s
</%(document_type)s>
"""
def __init__(self, options):
ECMDSConfigReader.__init__(self)
ECMDSDTDResolver. __init__(self)
self.readConfig(options)
self.document_settings = {
"document_type": "report",
"bcor": "0cm",
"div": "16",
"lang": "en_US",
"papersize": "a4",
"parskip": "half",
"secnumdepth": "2",
"secsplitdepth": "1",
"header": "",
"tocdepth": "5",
"have_lof": "no",
"have_lot": "no",
"have_lol": "no",
"contents": "",
"legal": ""
}
self.user_settings = options
#end function
def convert(self, string):
# initial conversion happening here
renderer = ECMLRenderer(self.config)
markdown = mistune.Markdown(renderer=renderer)
contents = markdown(self.parse_preamble(string))
footnotes = renderer.footnotes_map
def inline_markdown(s_):
t_ = etree.fromstring(markdown(s_))
# Maybe there can be a more elegant solution for this?
v_ = etree.tostring(t_, pretty_print=True,
encoding="unicode")
v_ = re.sub(r"^\<p\>|\</p\>$", "", v_,
flags=re.MULTILINE)
return v_.strip()
#end inline function
for k, v in self.document_settings.items():
if not v or isinstance(v, str) and not v.strip():
continue
if k == "legal":
self.document_settings["legal"] = \
"<legal>" + \
markdown(v) + \
"</legal>"
elif k == "author":
for i in range(len(v)):
v[i] = inline_markdown(v[i])
else:
v = re.sub(r"\s+", " ", v, flags=re.MULTILINE).strip()
self.document_settings[k] = inline_markdown(v)
#end if
header = self.generate_header(self.document_settings)
# close all open sections
for i in range(renderer.section_level):
contents += "</section>"
self.document_settings["header"] = header
self.document_settings["contents"] = contents
self.document_settings["footnotes"] = footnotes
contents = MarkdownConverter.DOCUMENT_TEMPLATE % self.document_settings
# parse XML to do post-processing
parser = etree.XMLParser(
load_dtd=True,
remove_blank_text=True
)
parser.resolvers.add(self)
tree = etree.fromstring(contents, parser=parser)
# fix footnotes, tables, section names...
tree = self.post_process(tree)
# return pretty-printed result
return etree.tostring(tree, pretty_print=True, encoding="unicode")
#end function
def parse_preamble(self, string):
document_settings = {}
m = re.match(r"\A---+\s*?$.*?^---+\s*?$", string,
flags=re.MULTILINE|re.DOTALL)
if not m:
return string
m = m.group(0)
k = ""
v = ""
for line in m.strip("-").splitlines(True):
if re.match(r"^\S+.*:.*$", line):
k, v = line.split(":", 1)
if k != "author":
document_settings[k] = v
else:
document_settings.setdefault(k, []).append(v)
elif k:
if k != "author":
document_settings[k] += line
else:
document_settings[k][-1] += line
#end if
#end for
self.document_settings.update(document_settings)
self.document_settings.update(self.user_settings)
self.validate_settings(self.document_settings)
return string[len(m):]
#end function
def generate_header(self, settings):
header_elements = [
"subject",
"title",
"subtitle",
"author",
"date",
"publisher",
"dedication"
]
header = ""
for element_name in header_elements:
if element_name == "title":
header += "<title>%s</title>\n" % settings.get("title", "")
elif element_name == "author":
for author in settings.get("author", []):
header += "<author>%s</author>\n" % author
else:
element_text = settings.get(element_name, "")
if element_text:
header += "<%s>%s</%s>\n" % \
(element_name, element_text , element_name)
#end if
#end ifs
#end for
return "<head>\n%s</head>" % header
#end function
def validate_settings(self, settings):
pass
def post_process(self, root_node):
node = root_node
while node is not None:
if node.tag == "footnote-ref":
node = self.__fix_footnote(node)
elif node.tag == "section":
node = self.__fix_section(node)
elif node.tag == "table":
node = self.__fix_table(node)
elif node.tag == "thead":
node = self.__fix_thead(node)
elif node.tag == "tbody":
node = self.__fix_tbody(node)
elif node.tag == "figure":
node = self.__fix_figure(node)
elif node.tag == "img":
node = self.__fix_img(node)
#end if
if len(node) != 0:
node = node[0]
continue
while node is not None:
following_sibling = node.getnext()
if following_sibling is not None:
node = following_sibling
break
node = node.getparent()
#end while
#end while
return root_node
#end function
# PRIVATE
def __fix_footnote(self, ref_node):
footnotes = self.document_settings["footnotes"]
footnote_ref = ref_node.get("idref", None)
footnote_def = footnotes.get(footnote_ref, None)
if footnote_def == None:
raise MarkdownConverterError(
"Unresolved footnote reference '%s'" % footnote_ref)
#end if
try:
footnote = etree.fromstring(footnote_def)
except etree.XMLSyntaxError as e:
raise MarkdownConverterError(
"Footnote '%s' is not a valid XML fragment." % footnote_ref)
#end try
if footnote.tag != "p":
raise MarkdownConverterError(
"Footnote '%s' is an invalid block element." % footnote_ref)
#end if
footnote.tag = "footnote"
footnote.tail = ref_node.tail
ref_node.getparent().replace(ref_node, footnote)
return footnote
#end function
def __fix_section(self, section_node):
document_type = self.document_settings["document_type"]
if document_type == "article":
section_names = [
"section",
"subsection",
"subsubsection",
"minisection"
]
else:
section_names = [
"chapter",
"section",
"subsection",
"subsubsection",
"minisection"
]
#end if
level = int(section_node.attrib["level"]) - 1
section_node.tag = section_names[level]
del section_node.attrib["level"]
return section_node
#end function
def __fix_table(self, table_node):
if table_node.xpath("colgroup"):
return table_node
header_cells = table_node.xpath("thead/tr/td")
widths = [int(c.attrib["width"]) for c in header_cells]
total_width = sum(widths)
widths = [w * 100.0 / float(total_width) for w in widths]
total_width += len(widths) - 1
print_width = int(total_width / 80.0 * 100.0)
screen_width = int(940.0 * print_width / 100.0)
table_node.attrib["print-width"] = str(print_width) + "%"
table_node.attrib["screen-width"] = str(screen_width) + "px"
colgroup_node = etree.Element("colgroup")
for i, cell in enumerate(header_cells):
# create a col entry for each column
col_node = etree.Element("col")
col_node.attrib["width"] = str(widths[i]) + "%"
colgroup_node.append(col_node)
# wrap the content in a <b> tag
cell.tag = "b"
new_td_element = etree.Element("td")
cell.getparent().replace(cell, new_td_element)
new_td_element.append(cell)
# copy attributes
for k, v in cell.attrib.items():
if k == "width":
continue
new_td_element.set(k, v)
cell.attrib.clear()
# set the background color of table header
new_td_element.attrib["color"] = "#bbbbbb"
#end for
body_cells = table_node.xpath("tbody/tr/td")
# set background color of table body cells
for cell in body_cells:
cell.attrib["color"] = "#ddeeff"
# insert the newly-created colgroup element
table_node.insert(0, colgroup_node)
return table_node
#end function
def __fix_thead(self, thead_node):
header_row = thead_node.xpath("tr")[0]
header_row.tag = "th"
thead_node.getparent().replace(thead_node, header_row)
return header_row
#end function
def __fix_tbody(self, tbody_node):
table_node = tbody_node.getparent()
body_rows = tbody_node.xpath("tr")
for row in body_rows:
table_node.append(row)
table_node.remove(tbody_node)
return body_rows[0]
#end function
def __fix_figure(self, figure_node):
section_elements = {
"chapter": 1,
"section": 1,
"subsection": 1,
"subsubsection": 1,
"minisection": 1,
"preface": 1,
"abstract": 1,
"appendix": 1
}
parent = figure_node.getparent()
grandparent = parent.getparent()
if not section_elements.get(grandparent.tag, None):
raise MarkdownConverterError("The parent or grandparent of image "\
"'%s' is not a sectioning element." % figure_node.get("alt"))
if etree.tostring(parent, method="text", encoding="unicode")\
.strip() == "":
grandparent.replace(parent, figure_node)
else:
figure_node.attrib["align"] = "left"
img_node = figure_node.xpath("img")[0]
img_node.attrib["print-width"] = "50%"
img_node.attrib["screen-width"] = "460px"
#end if
return figure_node
#end function
def __fix_img(self, img_node):
src = img_node.attrib["src"]
if os.path.isabs(src) or os.path.isfile(src):
return img_node
if not "input_dir" in self.config:
return img_node
input_dir = self.config["input_dir"]
img_node.attrib["src"] = os.path.normpath(os.path.join(input_dir, src))
return img_node
#end function
#end class
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'Comment', fields ['date']
db.create_index('vkontakte_board_comment', ['date'])
# Adding index on 'Topic', fields ['updated']
db.create_index('vkontakte_board_topic', ['updated'])
# Adding index on 'Topic', fields ['created']
db.create_index('vkontakte_board_topic', ['created'])
def backwards(self, orm):
# Removing index on 'Topic', fields ['created']
db.delete_index('vkontakte_board_topic', ['created'])
# Removing index on 'Topic', fields ['updated']
db.delete_index('vkontakte_board_topic', ['updated'])
# Removing index on 'Comment', fields ['date']
db.delete_index('vkontakte_board_comment', ['date'])
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'vkontakte_board.comment': {
'Meta': {'ordering': "['remote_id']", 'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'topics_comments'", 'to': "orm['vkontakte_users.User']"}),
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'50'"}),
'text': ('django.db.models.fields.TextField', [], {}),
'topic': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['vkontakte_board.Topic']"})
},
'vkontakte_board.topic': {
'Meta': {'ordering': "['remote_id']", 'object_name': 'Topic'},
'comments_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'topics_created'", 'to': "orm['vkontakte_users.User']"}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'first_comment': ('django.db.models.fields.TextField', [], {}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'topics'", 'to': "orm['vkontakte_groups.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_fixed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_comment': ('django.db.models.fields.TextField', [], {}),
'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'50'"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'updated_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'topics_updated'", 'to': "orm['vkontakte_users.User']"})
},
'vkontakte_groups.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '800'}),
'photo': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_big': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_medium': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'screen_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['vkontakte_users.User']", 'symmetrical': 'False'})
},
'vkontakte_places.city': {
'Meta': {'ordering': "['name']", 'object_name': 'City'},
'area': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cities'", 'null': 'True', 'to': "orm['vkontakte_places.Country']"}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'})
},
'vkontakte_places.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'})
},
'vkontakte_users.user': {
'Meta': {'ordering': "['remote_id']", 'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {}),
'activity': ('django.db.models.fields.TextField', [], {}),
'albums': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'audios': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'bdate': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'books': ('django.db.models.fields.TextField', [], {}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vkontakte_places.City']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'counters_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vkontakte_places.Country']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'faculty': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'faculty_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'followers': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'friends': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'friends_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'friends_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followers_users'", 'symmetrical': 'False', 'to': "orm['vkontakte_users.User']"}),
'games': ('django.db.models.fields.TextField', [], {}),
'graduation': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'has_mobile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'home_phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interests': ('django.db.models.fields.TextField', [], {}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'livejournal': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'mobile_phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'movies': ('django.db.models.fields.TextField', [], {}),
'mutual_friends': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'notes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'photo': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_big': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_medium': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_medium_rec': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_rec': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'rate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'relation': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'screen_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'sex': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'skype': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'subscriptions': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'sum_counters': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'timezone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'tv': ('django.db.models.fields.TextField', [], {}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'university': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'university_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'user_photos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'user_videos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'videos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'wall_comments': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'vkontakte_wall.comment': {
'Meta': {'ordering': "['post', '-date']", 'object_name': 'Comment'},
'author_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['contenttypes.ContentType']"}),
'author_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'from_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'wall_comments'", 'to': "orm['vkontakte_wall.Post']"}),
'raw_html': ('django.db.models.fields.TextField', [], {}),
'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'20'"}),
'reply_for_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'replies'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'reply_for_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vkontakte_wall.Comment']", 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wall_owner_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_wall_comments'", 'to': "orm['contenttypes.ContentType']"}),
'wall_owner_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'vkontakte_wall.post': {
'Meta': {'ordering': "['wall_owner_id', '-date']", 'object_name': 'Post'},
'attachments': ('django.db.models.fields.TextField', [], {}),
'author_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_posts'", 'to': "orm['contenttypes.ContentType']"}),
'author_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'copy_owner_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_wall_copy_posts'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'copy_owner_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'copy_post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['vkontakte_wall.Post']", 'null': 'True'}),
'copy_text': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'geo': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'like_users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'like_posts'", 'blank': 'True', 'to': "orm['vkontakte_users.User']"}),
'likes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'media': ('django.db.models.fields.TextField', [], {}),
'online': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'post_source': ('django.db.models.fields.TextField', [], {}),
'raw_html': ('django.db.models.fields.TextField', [], {}),
'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': "'20'"}),
'reply_count': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'repost_users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'repost_posts'", 'blank': 'True', 'to': "orm['vkontakte_users.User']"}),
'reposts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'signer_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {}),
'wall_owner_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_wall_posts'", 'to': "orm['contenttypes.ContentType']"}),
'wall_owner_id': ('django.db.models.fields.PositiveIntegerField', [], {})
}
}
complete_apps = ['vkontakte_board']
|
|
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php.
"""Test recovery from a crash during chainstate writing.
- 4 nodes
* node0, node1, and node2 will have different dbcrash ratios, and different
dbcache sizes
* node3 will be a regular node, with no crashing.
* The nodes will not connect to each other.
- use default test framework starting chain. initialize starting_tip_height to
tip height.
- Main loop:
* generate lots of transactions on node3, enough to fill up a block.
* uniformly randomly pick a tip height from starting_tip_height to
tip_height; with probability 1/(height_difference+4), invalidate this block.
* mine enough blocks to overtake tip_height at start of loop.
* for each node in [node0,node1,node2]:
- for each mined block:
* submit block to node
* if node crashed on/after submitting:
- restart until recovery succeeds
- check that utxo matches node3 using gettxoutsetinfo
"""
import errno
import http.client
import random
import sys
import time
from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut, ToHex
from test_framework.test_framework import PivxTestFramework
from test_framework.util import assert_equal, create_confirmed_utxos, hex_str_to_bytes
HTTP_DISCONNECT_ERRORS = [http.client.CannotSendRequest]
try:
HTTP_DISCONNECT_ERRORS.append(http.client.RemoteDisconnected)
except AttributeError:
pass
class ChainstateWriteCrashTest(PivxTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.rpc_timewait = 600
self.setup_clean_chain = False
# Need a bit of extra time for the nodes to start up for this test
self.chain_params = ['-nuparams=v5_shield:90000', '-nuparams=PIVX_v4.0:90000',
'-nuparams=PIVX_v3.4:90000', '-nuparams=Zerocoin_Public:90000',
'-nuparams=Zerocoin_v2:90000', '-nuparams=Zerocoin:90000',
'-nuparams=PoS_v2:90000', '-nuparams=PoS:90000']
# Set -maxmempool=0 to turn off mempool memory sharing with dbcache
# Set -rpcservertimeout=900 to reduce socket disconnects in this
# long-running test
self.base_args = ["-limitdescendantsize=0", "-maxmempool=0", "-rpcservertimeout=900"] + self.chain_params
# Set different crash ratios and cache sizes. Note that not all of
# -dbcache goes to the in-memory coins cache.
self.node0_args = ["-dbcrashratio=8", "-dbcache=4", "-dbbatchsize=200000"] + self.base_args
self.node1_args = ["-dbcrashratio=16", "-dbcache=8", "-dbbatchsize=200000"] + self.base_args
self.node2_args = ["-dbcrashratio=24", "-dbcache=16", "-dbbatchsize=200000"] + self.base_args
# Node3 is a normal node with default args, except will mine full blocks
self.node3_args = ["-blockmaxsize=1999000"] + self.chain_params # future: back port blockmaxweight
self.extra_args = [self.node0_args, self.node1_args, self.node2_args, self.node3_args]
def setup_network(self):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
# Leave them unconnected, we'll use submitblock directly in this test
def restart_node(self, node_index, expected_tip):
"""Start up a given node id, wait for the tip to reach the given block hash, and calculate the utxo hash.
Exceptions on startup should indicate node crash (due to -dbcrashratio), in which case we try again. Give up
after 60 seconds. Returns the utxo hash of the given node."""
time_start = time.time()
while time.time() - time_start < 120:
try:
# Any of these RPC calls could throw due to node crash
self.start_node(node_index)
self.nodes[node_index].waitforblock(expected_tip)
utxo_hash = self.nodes[node_index].gettxoutsetinfo()['hash_serialized_2']
return utxo_hash
except:
# An exception here should mean the node is about to crash.
# If pivxd exits, then try again. wait_for_node_exit()
# should raise an exception if pivxd doesn't exit.
self.wait_for_node_exit(node_index, timeout=10)
self.crashed_on_restart += 1
time.sleep(1)
# If we got here, pivxd isn't coming back up on restart. Could be a
# bug in pivxd, or we've gotten unlucky with our dbcrash ratio --
# perhaps we generated a test case that blew up our cache?
# TODO: If this happens a lot, we should try to restart without -dbcrashratio
# and make sure that recovery happens.
raise AssertionError("Unable to successfully restart node %d in allotted time", node_index)
def submit_block_catch_error(self, node_index, block):
"""Try submitting a block to the given node.
Catch any exceptions that indicate the node has crashed.
Returns true if the block was submitted successfully; false otherwise."""
try:
self.nodes[node_index].submitblock(block)
return True
except http.client.BadStatusLine as e:
# Prior to 3.5 BadStatusLine('') was raised for a remote disconnect error.
if sys.version_info[0] == 3 and sys.version_info[1] < 5 and e.line == "''":
self.log.debug("node %d submitblock raised exception: %s", node_index, e)
return False
else:
raise
except tuple(HTTP_DISCONNECT_ERRORS) as e:
self.log.debug("node %d submitblock raised exception: %s", node_index, e)
return False
except OSError as e:
self.log.debug("node %d submitblock raised OSError exception: errno=%s", node_index, e.errno)
if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET, errno.EPROTOTYPE]:
# The node has likely crashed
return False
else:
# Unexpected exception, raise
raise
def sync_node3blocks(self, block_hashes):
"""Use submitblock to sync node3's chain with the other nodes
If submitblock fails, restart the node and get the new utxo hash.
If any nodes crash while updating, we'll compare utxo hashes to
ensure recovery was successful."""
node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2']
# Retrieve all the blocks from node3
blocks = []
for block_hash in block_hashes:
blocks.append([block_hash, self.nodes[3].getblock(block_hash, False)])
# Deliver each block to each other node
for i in range(3):
nodei_utxo_hash = None
self.log.debug("Syncing blocks to node %d", i)
for (block_hash, block) in blocks:
# Get the block from node3, and submit to node_i
self.log.debug("submitting block %s", block_hash)
if not self.submit_block_catch_error(i, block):
# TODO: more carefully check that the crash is due to -dbcrashratio
# (change the exit code perhaps, and check that here?)
self.wait_for_node_exit(i, timeout=30)
self.log.debug("Restarting node %d after block hash %s", i, block_hash)
nodei_utxo_hash = self.restart_node(i, block_hash)
assert nodei_utxo_hash is not None
self.restart_counts[i] += 1
else:
# Clear it out after successful submitblock calls -- the cached
# utxo hash will no longer be correct
nodei_utxo_hash = None
# Check that the utxo hash matches node3's utxo set
# NOTE: we only check the utxo set if we had to restart the node
# after the last block submitted:
# - checking the utxo hash causes a cache flush, which we don't
# want to do every time; so
# - we only update the utxo cache after a node restart, since flushing
# the cache is a no-op at that point
if nodei_utxo_hash is not None:
self.log.debug("Checking txoutsetinfo matches for node %d", i)
assert_equal(nodei_utxo_hash, node3_utxo_hash)
def verify_utxo_hash(self):
"""Verify that the utxo hash of each node matches node3.
Restart any nodes that crash while querying."""
node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized_2']
self.log.info("Verifying utxo hash matches for all nodes")
for i in range(3):
try:
nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()['hash_serialized_2']
except OSError:
# probably a crash on db flushing
nodei_utxo_hash = self.restart_node(i, self.nodes[3].getbestblockhash())
assert_equal(nodei_utxo_hash, node3_utxo_hash)
def generate_small_transactions(self, node, count, utxo_list):
FEE = 10000 # TODO: replace this with node relay fee based calculation
num_transactions = 0
random.shuffle(utxo_list)
while len(utxo_list) >= 2 and num_transactions < count:
tx = CTransaction()
input_amount = 0
for _ in range(2):
utxo = utxo_list.pop()
tx.vin.append(CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout'])))
input_amount += int(utxo['amount'] * COIN)
output_amount = (input_amount - FEE) // 3
if output_amount <= 0:
# Sanity check -- if we chose inputs that are too small, skip
continue
for _ in range(3):
tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey'])))
# Sign and send the transaction to get into the mempool
tx_signed_hex = node.signrawtransaction(ToHex(tx))['hex']
node.sendrawtransaction(tx_signed_hex)
num_transactions += 1
def run_test(self):
# Track test coverage statistics
self.restart_counts = [0, 0, 0] # Track the restarts for nodes 0-2
self.crashed_on_restart = 0 # Track count of crashes during recovery
# Start by creating a lot of utxos on node3
initial_height = self.nodes[3].getblockcount()
utxo_list = create_confirmed_utxos(self.nodes[3].getnetworkinfo()['relayfee'], self.nodes[3], 5000)
self.log.info("Prepped %d utxo entries", len(utxo_list))
# Sync these blocks with the other nodes
block_hashes_to_sync = []
for height in range(initial_height + 1, self.nodes[3].getblockcount() + 1):
block_hashes_to_sync.append(self.nodes[3].getblockhash(height))
self.log.debug("Syncing %d blocks with other nodes", len(block_hashes_to_sync))
# Syncing the blocks could cause nodes to crash, so the test begins here.
self.sync_node3blocks(block_hashes_to_sync)
starting_tip_height = self.nodes[3].getblockcount()
# Main test loop:
# each time through the loop, generate a bunch of transactions,
# and then either mine a single new block on the tip, or some-sized reorg.
for i in range(40):
self.log.info("Iteration %d, generating 2500 transactions %s", i, self.restart_counts)
# Generate a bunch of small-ish transactions
self.generate_small_transactions(self.nodes[3], 2500, utxo_list)
# Pick a random block between current tip, and starting tip
current_height = self.nodes[3].getblockcount()
random_height = random.randint(starting_tip_height, current_height)
self.log.debug("At height %d, considering height %d", current_height, random_height)
if random_height > starting_tip_height:
# Randomly reorg from this point with some probability (1/4 for
# tip, 1/5 for tip-1, ...)
if random.random() < 1.0 / (current_height + 4 - random_height):
self.log.debug("Invalidating block at height %d", random_height)
self.nodes[3].invalidateblock(self.nodes[3].getblockhash(random_height))
# Now generate new blocks until we pass the old tip height
self.log.debug("Mining longer tip")
block_hashes = []
while current_height + 1 > self.nodes[3].getblockcount():
block_hashes.extend(self.nodes[3].generate(min(10, current_height + 1 - self.nodes[3].getblockcount())))
self.log.debug("Syncing %d new blocks...", len(block_hashes))
self.sync_node3blocks(block_hashes)
utxo_list = self.nodes[3].listunspent()
self.log.debug("Node3 utxo count: %d", len(utxo_list))
# Check that the utxo hashes agree with node3
# Useful side effect: each utxo cache gets flushed here, so that we
# won't get crashes on shutdown at the end of the test.
self.verify_utxo_hash()
# Check the test coverage
self.log.info("Restarted nodes: %s; crashes on restart: %d", self.restart_counts, self.crashed_on_restart)
# If no nodes were restarted, we didn't test anything.
assert self.restart_counts != [0, 0, 0]
# Make sure we tested the case of crash-during-recovery.
assert self.crashed_on_restart > 0
# Warn if any of the nodes escaped restart.
for i in range(3):
if self.restart_counts[i] == 0:
self.log.warning("Node %d never crashed during utxo flush!", i)
if __name__ == "__main__":
ChainstateWriteCrashTest().main()
|
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
""" A module for viewing the details of all available devices.
"""
import math
from qiskit.exceptions import QiskitError
try:
# pylint: disable=import-error,no-name-in-module
from qiskit.providers.ibmq import IBMQ, IBMQBackend
except ImportError:
pass
def get_unique_backends():
"""Gets the unique backends that are available.
Returns:
list: Unique available backends.
Raises:
QiskitError: No backends available.
"""
backends = []
for provider in IBMQ.providers():
for backend in provider.backends():
backends.append(backend)
unique_hardware_backends = []
unique_names = []
for back in backends:
if back.name() not in unique_names and not back.configuration().simulator:
unique_hardware_backends.append(back)
unique_names.append(back.name())
if not unique_hardware_backends:
raise QiskitError('No backends available.')
return unique_hardware_backends
def backend_monitor(backend):
"""Monitor a single IBMQ backend.
Args:
backend (IBMQBackend): Backend to monitor.
Raises:
QiskitError: Input is not a IBMQ backend.
"""
if not isinstance(backend, IBMQBackend):
raise QiskitError('Input variable is not of type IBMQBackend.')
config = backend.configuration().to_dict()
status = backend.status().to_dict()
config_dict = {**status, **config}
if not config['simulator']:
props = backend.properties().to_dict()
print(backend.name())
print('='*len(backend.name()))
print('Configuration')
print('-'*13)
offset = ' '
upper_list = ['n_qubits', 'operational',
'status_msg', 'pending_jobs',
'backend_version', 'basis_gates',
'local', 'simulator']
lower_list = list(set(config_dict.keys()).difference(upper_list))
# Remove gates because they are in a different tab
lower_list.remove('gates')
for item in upper_list+lower_list:
print(offset+item+':', config_dict[item])
# Stop here if simulator
if config['simulator']:
return
print()
qubit_header = 'Qubits [Name / Freq / T1 / T2 / U1 err / U2 err / U3 err / Readout err]'
print(qubit_header)
print('-'*len(qubit_header))
sep = ' / '
for qub in range(len(props['qubits'])):
name = 'Q%s' % qub
qubit_data = props['qubits'][qub]
gate_data = [g for g in props['gates'] if g['qubits'] == [qub]]
t1_info = qubit_data[0]
t2_info = qubit_data[1]
freq_info = qubit_data[2]
readout_info = qubit_data[3]
freq = str(round(freq_info['value'], 5))+' '+freq_info['unit']
T1 = str(round(t1_info['value'],
5))+' ' + t1_info['unit']
T2 = str(round(t2_info['value'],
5))+' ' + t2_info['unit']
for gd in gate_data:
if gd['gate'] == 'u1':
U1 = str(round(gd['parameters'][0]['value'], 5))
break
for gd in gate_data:
if gd['gate'] == 'u2':
U2 = str(round(gd['parameters'][0]['value'], 5))
break
for gd in gate_data:
if gd['gate'] == 'u3':
U3 = str(round(gd['parameters'][0]['value'], 5))
break
readout_error = str(round(readout_info['value'], 5))
qstr = sep.join([name, freq, T1, T2, U1, U2, U3, readout_error])
print(offset+qstr)
print()
multi_qubit_gates = [g for g in props['gates'] if len(g['qubits']) > 1]
multi_header = 'Multi-Qubit Gates [Name / Type / Gate Error]'
print(multi_header)
print('-'*len(multi_header))
for qub, gate in enumerate(multi_qubit_gates):
gate = multi_qubit_gates[qub]
qubits = gate['qubits']
ttype = gate['gate']
error = round(gate['parameters'][0]['value'], 5)
mstr = sep.join(["{}{}_{}".format(ttype, qubits[0], qubits[1]), ttype, str(error)])
print(offset+mstr)
def backend_overview():
"""Gives overview information on all the IBMQ
backends that are available.
"""
unique_hardware_backends = get_unique_backends()
_backends = []
# Sort backends by operational or not
for idx, back in enumerate(unique_hardware_backends):
if back.status().operational:
_backends = [back] + _backends
else:
_backends = _backends + [back]
stati = [back.status() for back in _backends]
idx = list(range(len(_backends)))
pending = [s.pending_jobs for s in stati]
_, least_idx = zip(*sorted(zip(pending, idx)))
# Make sure least pending is operational
for ind in least_idx:
if stati[ind].operational:
least_pending_idx = ind
break
num_rows = math.ceil(len(_backends)/3)
count = 0
num_backends = len(_backends)
for _ in range(num_rows):
max_len = 0
str_list = ['']*8
for idx in range(3):
offset = ' ' * 10 if idx else ''
config = _backends[count].configuration().to_dict()
props = _backends[count].properties().to_dict()
n_qubits = config['n_qubits']
str_list[0] += (' '*(max_len-len(str_list[0]))+offset)
str_list[0] += _backends[count].name()
str_list[1] += (' '*(max_len-len(str_list[1]))+offset)
str_list[1] += '-'*len(_backends[count].name())
str_list[2] += (' '*(max_len-len(str_list[2]))+offset)
str_list[2] += 'Num. Qubits: %s' % config['n_qubits']
str_list[3] += (' '*(max_len-len(str_list[3]))+offset)
str_list[3] += 'Pending Jobs: %s' % stati[count].pending_jobs
str_list[4] += (' '*(max_len-len(str_list[4]))+offset)
str_list[4] += 'Least busy: %s' % (count == least_pending_idx)
str_list[5] += (' '*(max_len-len(str_list[5]))+offset)
str_list[5] += 'Operational: %s' % stati[count].operational
str_list[6] += (' '*(max_len-len(str_list[6]))+offset)
str_list[6] += 'Avg. T1: %s' % round(sum([q[0]['value']
for q in props['qubits']])/n_qubits, 1)
str_list[7] += (' '*(max_len-len(str_list[7]))+offset)
str_list[7] += 'Avg. T2: %s' % round(sum([q[1]['value']
for q in props['qubits']])/n_qubits, 1)
count += 1
if count == num_backends:
break
max_len = max([len(s) for s in str_list])
print("\n".join(str_list))
print('\n'*2)
|
|
'''
Created on 16.08.10
@author: klizardin
The MIT License (MIT)
Copyright (c) 2016 klizardin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is furnished
to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import os
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
def save_layer(file_name,obj_name,value,consts):
#file_name = consts.get_file_name_by_index(indx,file_name)
file_name = file_name % (obj_name,)
numpy.save(file = file_name, arr = value)
return
def load_layer(file_name,obj_name,consts):
#file_name = consts.get_file_name_by_index(indx,file_name)
file_name = file_name % (obj_name,)
if not os.path.isfile(path = file_name):
return None
return numpy.asarray(a = numpy.load(file = file_name),dtype=theano.config.floatX)
class ApproxNet(object):
'''
The deep net for regression
'''
def __create_layer(self, numpy_rng, batch_size, layer_size, W, b, prev_layer, i):
if not W or not W[i]:
delta = numpy.sqrt(6 / (float(prev_layer) + float(layer_size)))
initial_W = numpy.asarray(
numpy_rng.uniform(
low = -delta,
high = delta,
size = (prev_layer, layer_size)))
self.W.append(theano.shared(value = initial_W, name = 'W' + str(i)))
#print("W%d size = (%d,%d)" % (i,prev_layer, layer_size))
else:
self.W.append(W[i])
if not b or not b[i]:
self.b.append(theano.shared(value = numpy.zeros(layer_size, dtype=theano.config.floatX),name = 'b'+str(i)))
#print("b%d size = (%d,%d)" % (i,1,layer_size))
else:
self.b.append(b[i])
self.Result.append(theano.shared(value = numpy.zeros((batch_size,layer_size), dtype=theano.config.floatX),name = 'Result'+str(i)))
#print("Result%d size = (%d,%d)" % (i,batch_size,layer_size))
return layer_size
def __create_hidden_layers(self, numpy_rng, batch_size, hidden_count, hidden_size, W, b, prev_layer,base_i):
for i in numpy.arange(hidden_count):
prev_layer = self.__create_layer(numpy_rng, batch_size, hidden_size, W, b, prev_layer, base_i+i)
return prev_layer
def __get_processed(self, input_x):
"""
Computes the values of the encoded layer
"""
data = input_x
for idx in numpy.arange(self.hidden_count):
self.Result[idx] = self.hidden_activation(T.dot(data, self.W[idx]) + self.b[idx])
data = self.Result[idx]
self.Result[self.hidden_count] = T.tanh(T.dot(data, self.W[self.hidden_count]) + self.b[self.hidden_count])
return self.Result[self.hidden_count]
def __get_L1(self):
self.L1 = 0
if len(self.W)==0:
return self.L2
for W in self.W:
self.L1 = self.L1 + T.mean(T.abs_(W))
return self.L1/len(self.W)
def __get_L2(self):
self.L2 = 0
if len(self.W)==0:
return self.L2
for W in self.W:
self.L2 = self.L2 + T.mean(T.sqr(W))
return self.L2/len(self.W)
def __get_cost_updates(self, target,learning_rate,L1_decay,L2_decay):
""" This function computes the cost and the updates for one trainng
step of the dA """
y = self.__get_processed(self.input_x)
# note : we sum over the size of a datapoint; if we are using
# minibatches, L will be a vector, with one entry per
# example in minibatch
L = T.mean(T.sqr(y-target),axis=1)
# note : L is now a vector, where each element is the
# cross-entropy cost of the reconstruction of the
# corresponding example of the minibatch. We need to
# compute the average of all these to get the cost of
# the minibatch
cost = T.mean(L) + self.__get_L2() * L2_decay + self.__get_L1() * L1_decay
# compute the gradients of the cost of the `dA` with respect
# to its parameters
gparams = T.grad(cost, self.params)
# generate the list of updates
updates = []
updates.extend([
(param, param - learning_rate * gparam)
for param, gparam in zip(self.params, gparams)
])
return (cost, updates)
def __get_run(self):
return self.__get_processed(self.input_x)
def __init__(self
,batch_size
,input_size
,output_size
,hidden_count,hidden_size,hidden_activation
,numpy_rng
,theano_rng = None
,L1_decay = 0
,L2_decay = 0
,W = None
,b = None
,input_x = None
,target_y = None
,result_y = None
):
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
self.theano_rng = theano_rng
self.input_size = input_size
self.output_size = output_size
self.hidden_count = hidden_count
self.hiden_size = hidden_size
self.hidden_activation = hidden_activation
if not input_x:
input_x = T.matrix(name="x",dtype=theano.config.floatX)
if not target_y:
target_y = T.matrix(name="target",dtype=theano.config.floatX)
if not result_y:
result_y = T.matrix(name="y",dtype=theano.config.floatX)
self.input_x = input_x
self.target_y = target_y
self.result_y = result_y
self.W = []
self.b = []
self.Result = []
prev_layer = input_size
prev_layer = self.__create_hidden_layers(numpy_rng, batch_size, hidden_count, hidden_size, W, b, prev_layer,0)
prev_layer = self.__create_layer(numpy_rng, batch_size, output_size, W, b, prev_layer, hidden_count)
self.params = []
self.params.extend(self.W)
self.params.extend(self.b)
self.learning_rate = T.scalar(name = "learning_rate",dtype=theano.config.floatX)
self.L1 = T.scalar(name = "L1",dtype=theano.config.floatX)
self.L2 = T.scalar(name = "L2",dtype=theano.config.floatX)
# create functions of deep net
cost,updates = self.__get_cost_updates(target = self.target_y, learning_rate = self.learning_rate,L1_decay = L1_decay,L2_decay = L2_decay)
self.train_fn = theano.function(inputs = [self.input_x,self.target_y,self.learning_rate],outputs = [cost],updates=updates)
self.result_y = self.__get_run()
self.run_fn = theano.function(inputs=[self.input_x],outputs=[self.result_y])
return
def save_state(self,file_name,consts):
i = 0;
for W in self.W:
save_layer(file_name,"W"+str(i),W.get_value(),consts)
i=i+1
i = 0
for b in self.b:
save_layer(file_name,"b" + str(i),b.get_value(),consts)
i=i+1
return
def load_state(self,file_name,consts):
i = 0;
for W in self.W:
layer = load_layer(file_name,"W"+str(i),consts)
if layer is None:
return False
W.set_value(layer)
i=i+1
i = 0
for b in self.b:
layer = load_layer(file_name,"b" + str(i),consts)
if layer is None:
return False
b.set_value(layer)
i=i+1
return True
def print_state(self):
i = 0;
for W in self.W:
print("W"+str(i));
print(W.get_value())
i=i+1
i = 0
for b in self.b:
print("b" + str(i))
print(b.get_value())
#i = 0
#for result in self.Result:
# print("Result"+str(i))
# print(result.get_value())
return
class AutoEncoder(object):
'''
The auto encoder deep net.
'''
def __create_layer(self, numpy_rng, mini_batch_size, layer_size, W, b, prev_layer, i):
if not W or not W[i]:
delta = numpy.sqrt(6 / (float(prev_layer) + float(layer_size)))
initial_W = numpy.asarray(
numpy_rng.uniform(
low = -delta,
high = delta,
size = (prev_layer, layer_size))
,dtype=theano.config.floatX
)
self.W.append(theano.shared(value = initial_W, name = 'W' + str(i)))
#print("W%d size = (%d,%d)" % (i,prev_layer, layer_size))
else:
self.W.append(W[i])
if not b or not b[i]:
self.b.append(theano.shared(value = numpy.zeros(layer_size, dtype=theano.config.floatX),name = 'b'+str(i)))
#print("b%d size = (%d,%d)" % (i,1,layer_size))
else:
self.b.append(b[i])
self.Result.append(theano.shared(value = numpy.zeros((mini_batch_size,layer_size), dtype=theano.config.floatX),name = 'Result'+str(i)))
#print("Result%d size = (%d,%d)" % (i,mini_batch_size,layer_size))
return layer_size
def __create_hidden_layers(self, numpy_rng, mini_batch_size, hidden_count, hidden_size, W, b, prev_layer,base_i):
for i in numpy.arange(hidden_count):
prev_layer = self.__create_layer(numpy_rng, mini_batch_size, hidden_size, W, b, prev_layer, base_i+i)
return prev_layer
def __get_corrupted_input(self, input_x, corruption_level):
"""This function keeps ``1-corruption_level`` entries of the inputs the
same and zero-out randomly selected subset of size ``coruption_level``
Note : first argument of theano.rng.binomial is the shape(size) of
random numbers that it should produce
second argument is the number of trials
third argument is the probability of success of any trial
this will produce an array of 0s and 1s where 1 has a
probability of 1 - ``corruption_level`` and 0 with
``corruption_level``
The binomial function return int64 data type by
default. int64 multiplicated by the input
type(floatX) always return float64. To keep all data
in floatX when floatX is float32, we set the dtype of
the binomial to floatX. As in our case the value of
the binomial is always 0 or 1, this don't change the
result. This is needed to allow the gpu to work
correctly as it only support float32 for now.
"""
return self.theano_rng.binomial(
size=input_x.shape, n=1,
p= 1 - corruption_level,
dtype=theano.config.floatX) * input_x
def __get_encoded(self, input_x):
"""
Computes the values of the encoded layer
"""
data = input_x
for idx in numpy.arange(self.hidden_count):
self.Result[idx] = self.activation(T.dot(data, self.W[idx]) + self.b[idx])
data = self.Result[idx]
self.Result[self.hidden_count] = T.tanh(T.dot(data, self.W[self.hidden_count]) + self.b[self.hidden_count])*float(0.5)
return self.Result[self.hidden_count]
def __get_reconstructed(self,encoded):
"""
Computes the values of the result layer
"""
data = encoded
base_i = self.hidden_count+1
for idx in numpy.arange(self.hidden_count):
self.Result[base_i+idx] = self.activation(T.dot(data, self.W[base_i+idx]) + self.b[base_i+idx])
data = self.Result[base_i+idx]
self.Result[base_i+self.hidden_count] = T.tanh(T.dot(data, self.W[base_i+self.hidden_count]) + self.b[base_i+self.hidden_count])
return self.Result[base_i+self.hidden_count]
def __get_L1(self):
self.L1 = 0
if len(self.W)==0:
return self.L2
for W in self.W:
self.L1 = self.L1 + T.mean(T.abs_(W))
return self.L1/len(self.W)
def __get_L2(self):
self.L2 = 0
if len(self.W)==0:
return self.L2
for W in self.W:
self.L2 = self.L2 + T.mean(T.sqr(W))
return self.L2/len(self.W)
def __get_cost_updates(self, corruption_level, learning_rate,L1_decay,L2_decay):
""" This function computes the cost and the updates for one trainng
step of the dA """
tilde_x = self.__get_corrupted_input(self.input_x, corruption_level)
y = self.__get_encoded(tilde_x)
z = self.__get_reconstructed(y)
# note : we sum over the size of a datapoint; if we are using
# minibatches, L will be a vector, with one entry per
# example in minibatch
#L = - T.sum(self.input_x * T.log(z) + (1 - self.input_x) * T.log(1 - z), axis=1)
L = T.mean(T.sqr(z-self.input_x),axis=1)
# note : L is now a vector, where each element is the
# cross-entropy cost of the reconstruction of the
# corresponding example of the minibatch. We need to
# compute the average of all these to get the cost of
# the minibatch
cost = T.mean(L) + self.__get_L2() * L2_decay + self.__get_L1() * L1_decay
# compute the gradients of the cost of the `dA` with respect
# to its parameters
gparams = T.grad(cost, self.params)
# generate the list of updates
updates = []
updates.extend([
(param, param - learning_rate * gparam)
for param, gparam in zip(self.params, gparams)
])
return (cost, updates)
def __get_run(self):
return self.__get_encoded(self.input_x)
def __init__(self,
mini_batch_size,
input_size,encoded_size,
hidden_count,hidden_size,activation,
L1_decay,L2_decay,
numpy_rng,
theano_rng = None,
W = None,
b = None,
input_x = None
):
'''
Constructor
'''
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
self.theano_rng = theano_rng
self.input_size = input_size
self.encoded_size = encoded_size
self.hidden_count = hidden_count
self.hiden_size = hidden_size
self.activation = activation
if not input_x:
input_x = T.matrix(name="x",dtype=theano.config.floatX)
self.input_x = input_x
self.W = []
self.b = []
self.Result = []
prev_layer = input_size
prev_layer = self.__create_hidden_layers(numpy_rng, mini_batch_size, hidden_count, hidden_size, W, b, prev_layer,0)
prev_layer = self.__create_layer(numpy_rng, mini_batch_size, encoded_size, W, b, prev_layer, hidden_count)
prev_layer = self.__create_hidden_layers(numpy_rng, mini_batch_size, hidden_count, hidden_size, W, b, prev_layer,hidden_count+1)
prev_layer = self.__create_layer(numpy_rng, mini_batch_size, input_size, W, b, prev_layer, 2*hidden_count+1)
self.params = []
self.params.extend(self.W)
self.params.extend(self.b)
self.learning_rate = T.scalar(name = "learning_rate",dtype=theano.config.floatX)
self.corruption_level = T.scalar(name = "learning_rate",dtype=theano.config.floatX)
self.L1 = T.scalar(name = "L1",dtype=theano.config.floatX)
self.L2 = T.scalar(name = "L2",dtype=theano.config.floatX)
# create functions of autoencoder
cost,updates = self.__get_cost_updates(corruption_level = self.corruption_level, learning_rate = self.learning_rate,L1_decay = L1_decay,L2_decay = L2_decay)
self.train_fn = theano.function(inputs = [self.input_x,self.learning_rate,self.corruption_level],outputs = [cost],updates=updates)
self.encoded = self.__get_run()
self.get_encoded_fn = theano.function(inputs=[self.input_x],outputs=[self.encoded])
return
def save_state(self,file_name,consts):
i = 0;
for W in self.W:
save_layer(file_name,"W"+str(i),W.get_value(),consts)
i=i+1
i = 0
for b in self.b:
save_layer(file_name,"b" + str(i),b.get_value(),consts)
i=i+1
return
def load_state(self,file_name,consts):
i = 0;
for W in self.W:
layer = load_layer(file_name,"W"+str(i),consts)
if layer is None:
return False
W.set_value(layer)
i=i+1
i = 0
for b in self.b:
layer = load_layer(file_name,"b" + str(i),consts)
if layer is None:
return False
b.set_value(layer)
i=i+1
return True
def print_state(self):
i = 0;
for W in self.W:
print("W"+str(i));
print(W.get_value())
i=i+1
i = 0
for b in self.b:
print("b" + str(i))
print(b.get_value())
#i = 0
#for result in self.Result:
# print("Result"+str(i))
# print(result.get_value())
return
|
|
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from pybuilder.cli import (parse_options,
ColoredStdOutLogger,
CommandLineUsageException,
StdOutLogger,
length_of_longest_string,
print_list_of_tasks,
get_failure_message)
from pybuilder.core import Logger
from pybuilder.errors import PyBuilderException
from test_utils import Mock, patch, call
@patch("pybuilder.cli.print_text_line", return_value=None)
class TaskListTests(unittest.TestCase):
def setUp(self):
def __eq__(self, other):
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return False
self.mock_reactor = Mock()
self.mock_reactor.project.name = "any-project-name"
self.task_1 = Mock()
self.task_1.__eq__ = __eq__
self.task_1.__ne__ = __ne__
self.task_1.__lt__ = __lt__
self.task_1.name = "task-1"
self.task_1.description = ""
self.task_1.dependencies = []
self.task_2 = Mock()
self.task_2.__eq__ = __eq__
self.task_2.__ne__ = __ne__
self.task_2.__lt__ = __lt__
self.task_2.name = "task-2"
self.task_2.description = ""
self.task_2.dependencies = []
self.mock_reactor.get_tasks.return_value = [self.task_1, self.task_2]
def test_should_render_minimal_task_list_when_in_quiet_mode(self, print_text_line):
print_list_of_tasks(self.mock_reactor, quiet=True)
print_text_line.assert_called_with('task-1:<no description available>\ntask-2:<no description available>')
def test_should_render_verbose_task_list_without_descriptions_and_dependencies(self, print_text_line):
print_list_of_tasks(self.mock_reactor, quiet=False)
print_text_line.assert_has_calls([call('Tasks found for project "any-project-name":'),
call(' task-1 - <no description available>'),
call(' task-2 - <no description available>')])
def test_should_render_verbose_task_list_with_dependencies(self, print_text_line):
self.task_1.dependencies = ["any-dependency", "any-other-dependency"]
print_list_of_tasks(self.mock_reactor, quiet=False)
print_text_line.assert_has_calls([call('Tasks found for project "any-project-name":'),
call(' task-1 - <no description available>'),
call(' depends on tasks: any-dependency any-other-dependency'),
call(' task-2 - <no description available>')])
def test_should_render_verbose_task_list_with_descriptions(self, print_text_line):
self.task_1.description = ["any", "description", "for", "task", "1"]
self.task_2.description = ["any", "description", "for", "task", "2"]
print_list_of_tasks(self.mock_reactor, quiet=False)
print_text_line.assert_has_calls([call('Tasks found for project "any-project-name":'),
call(' task-1 - any description for task 1'),
call(' task-2 - any description for task 2')])
class StdOutLoggerTest(unittest.TestCase):
def setUp(self):
self.stdout_logger = StdOutLogger()
def test_should_return_debug_message_when_debug_level_given(self):
actual_message = self.stdout_logger._level_to_string(Logger.DEBUG)
self.assertEqual(actual_message, "[DEBUG]")
def test_should_return_info_message_when_info_level_given(self):
actual_message = self.stdout_logger._level_to_string(Logger.INFO)
self.assertEqual(actual_message, "[INFO] ")
def test_should_return_warning_message_when_warning_level_given(self):
actual_message = self.stdout_logger._level_to_string(Logger.WARN)
self.assertEqual(actual_message, "[WARN] ")
def test_should_return_error_message_when_any_not_defined_level_given(self):
actual_message = self.stdout_logger._level_to_string(-1)
self.assertEqual(actual_message, "[ERROR]")
class ColoredStdOutLoggerTest(unittest.TestCase):
def setUp(self):
self.colored_stdout_logger = ColoredStdOutLogger()
def test_should_return_italic_debug_message_when_debug_level_given(self):
actual_message = self.colored_stdout_logger._level_to_string(Logger.DEBUG)
self.assertEqual(actual_message, "\x1b[2m[DEBUG]\x1b[0m")
def test_should_return_bold_info_message_when_info_level_given(self):
actual_message = self.colored_stdout_logger._level_to_string(Logger.INFO)
self.assertEqual(actual_message, "\x1b[1m[INFO] \x1b[0m")
def test_should_return_brown_and_bold_warning_message_when_warning_level_given(self):
actual_message = self.colored_stdout_logger._level_to_string(Logger.WARN)
self.assertEqual(actual_message, "\x1b[1;33m[WARN] \x1b[0m")
def test_should_return_bold_and_red_error_message_when_any_not_defined_level_given(self):
actual_message = self.colored_stdout_logger._level_to_string(-1)
self.assertEqual(actual_message, "\x1b[1;31m[ERROR]\x1b[0m")
class ParseOptionsTest(unittest.TestCase):
def assert_options(self, options, **overrides):
self.assertEqual(options.project_directory,
overrides.get("project_directory", "."))
self.assertEqual(options.debug,
overrides.get("debug", False))
self.assertEqual(options.quiet,
overrides.get("quiet", False))
self.assertEqual(options.list_tasks,
overrides.get("list_tasks", False))
self.assertEqual(options.no_color,
overrides.get("no_color", False))
self.assertEqual(options.property_overrides,
overrides.get("property_overrides", {}))
self.assertEqual(options.start_project,
overrides.get("start_project", False))
def test_should_parse_empty_arguments(self):
options, arguments = parse_options([])
self.assert_options(options)
self.assertEqual([], arguments)
def test_should_parse_task_list_without_options(self):
options, arguments = parse_options(["clean", "spam"])
self.assert_options(options)
self.assertEqual(["clean", "spam"], arguments)
def test_should_parse_start_project_without_options(self):
options, arguments = parse_options(["clean", "spam"])
self.assert_options(options)
self.assertEqual(["clean", "spam"], arguments)
def test_should_parse_empty_arguments_with_option(self):
options, arguments = parse_options(["-X"])
self.assert_options(options, debug=True)
self.assertEqual([], arguments)
def test_should_parse_arguments_and_option(self):
options, arguments = parse_options(["-X", "-D", "spam", "eggs"])
self.assert_options(options, debug=True, project_directory="spam")
self.assertEqual(["eggs"], arguments)
def test_should_set_property(self):
options, arguments = parse_options(["-P", "spam=eggs"])
self.assert_options(options, property_overrides={"spam": "eggs"})
self.assertEqual([], arguments)
def test_should_set_property_with_equals_sign(self):
options, arguments = parse_options(["-P", "spam==eg=gs"])
self.assert_options(options, property_overrides={"spam": "=eg=gs"})
self.assertEqual([], arguments)
def test_should_set_multiple_properties(self):
options, arguments = parse_options(["-P", "spam=eggs",
"-P", "foo=bar"])
self.assert_options(options, property_overrides={"spam": "eggs",
"foo": "bar"})
self.assertEqual([], arguments)
def test_should_abort_execution_when_property_definition_has_syntax_error(self):
self.assertRaises(
CommandLineUsageException, parse_options, ["-P", "spam"])
def test_should_parse_single_environment(self):
options, arguments = parse_options(["-E", "spam"])
self.assert_options(options, environments=["spam"])
self.assertEqual([], arguments)
def test_should_parse_multiple_environments(self):
options, arguments = parse_options(["-E", "spam", "-E", "eggs"])
self.assert_options(options, environments=["spam", "eggs"])
self.assertEqual([], arguments)
def test_should_parse_empty_environments(self):
options, arguments = parse_options([])
self.assert_options(options, environments=[])
self.assertEqual([], arguments)
class LengthOfLongestStringTests(unittest.TestCase):
def test_should_return_zero_when_list_is_empty(self):
self.assertEqual(0, length_of_longest_string([]))
def test_should_return_one_when_list_contains_string_with_no_characters(self):
self.assertEqual(0, length_of_longest_string([""]))
def test_should_return_one_when_list_contains_string_with_single_character(self):
self.assertEqual(1, length_of_longest_string(["a"]))
def test_should_return_four_when_list_contains_egg_and_spam(self):
self.assertEqual(4, length_of_longest_string(["egg", "spam"]))
def test_should_return_four_when_list_contains_foo_bar_egg_and_spam(self):
self.assertEqual(4, length_of_longest_string(["egg", "spam", "foo", "bar"]))
class ErrorHandlingTests(unittest.TestCase):
def test_generic_error_message(self):
try:
raise Exception("test")
except Exception:
self.assertRegexpMatches(get_failure_message(), r"Exception: test \(cli_tests.py\:\d+\)")
def test_pyb_error_message(self):
try:
raise PyBuilderException("test")
except Exception:
self.assertRegexpMatches(get_failure_message(), r"test \(cli_tests.py\:\d+\)")
|
|
from typing import Type, Container
import unittest
import pytest
import numpy as np
import sklearn.metrics
from autosklearn.pipeline.util import _test_regressor, _test_regressor_iterative_fit
from autosklearn.pipeline.constants import SPARSE
from autosklearn.pipeline.components.regression.libsvm_svr import LibSVM_SVR
from autosklearn.pipeline.components.regression import _regressors, RegressorChoice
from test.test_pipeline.ignored_warnings import regressor_warnings, ignore_warnings
class BaseRegressionComponentTest(unittest.TestCase):
res = None
module = None
sk_module = None
# Hyperparameter which is increased by iterative_fit
step_hyperparameter = None
# Magic command to not run tests on base class
__test__ = False
def test_default_boston(self):
if self.__class__ == BaseRegressionComponentTest:
return
for _ in range(2):
with ignore_warnings(regressor_warnings):
predictions, targets, n_calls = _test_regressor(
dataset="boston",
Regressor=self.module
)
score = sklearn.metrics.r2_score(y_true=targets, y_pred=predictions)
# Special treatment for Gaussian Process Regression
if "default_boston_le_ge" in self.res:
upper, lower = self.res["default_boston_le_ge"]
assert lower <= score <= upper
else:
fixture = self.res["default_boston"]
places = self.res.get("default_boston_places", 7)
if score < -1e10:
score = np.log(-score)
fixture = np.log(-fixture)
self.assertAlmostEqual(fixture, score, places)
if "boston_n_calls" in self.res:
expected = self.res["boston_n_calls"]
if isinstance(expected, Container):
assert n_calls in expected
else:
assert n_calls == expected
def test_default_boston_iterative_fit(self):
if self.__class__ == BaseRegressionComponentTest:
return
if not hasattr(self.module, 'iterative_fit'):
return
for i in range(2):
with ignore_warnings(regressor_warnings):
predictions, targets, regressor = _test_regressor_iterative_fit(
dataset="boston",
Regressor=self.module
)
score = sklearn.metrics.r2_score(targets, predictions)
fixture = self.res["default_boston_iterative"]
places = self.res.get("default_boston_iterative_places", 7)
if score < -1e10:
print(f"score = {score}, fixture = {fixture}")
score = np.log(-score)
fixture = np.log(-fixture)
self.assertAlmostEqual(fixture, score, places)
if self.step_hyperparameter is not None:
param_name = self.step_hyperparameter['name']
default = self.step_hyperparameter['value']
value = getattr(regressor.estimator, param_name)
expected = self.res.get("boston_iterative_n_iter", default)
# To currently allow for MLPRegressor which is indeterministic,
# we can have multiple values
if isinstance(expected, Container):
assert value in expected
else:
assert value == expected
def test_default_boston_iterative_sparse_fit(self):
if self.__class__ == BaseRegressionComponentTest:
return
if not hasattr(self.module, 'iterative_fit'):
return
if SPARSE not in self.module.get_properties()["input"]:
return
for i in range(2):
with ignore_warnings(regressor_warnings):
predictions, targets, _ = _test_regressor_iterative_fit(
dataset="boston",
Regressor=self.module,
sparse=True
)
self.assertAlmostEqual(self.res["default_boston_iterative_sparse"],
sklearn.metrics.r2_score(targets,
predictions),
places=self.res.get(
"default_boston_iterative_sparse_places", 7))
def test_default_boston_sparse(self):
if self.__class__ == BaseRegressionComponentTest:
return
if SPARSE not in self.module.get_properties()["input"]:
return
for i in range(2):
with ignore_warnings(regressor_warnings):
predictions, targets, _ = _test_regressor(
dataset="boston",
Regressor=self.module,
sparse=True
)
self.assertAlmostEqual(self.res["default_boston_sparse"],
sklearn.metrics.r2_score(targets,
predictions),
places=self.res.get(
"default_boston_sparse_places", 7))
def test_default_diabetes(self):
if self.__class__ == BaseRegressionComponentTest:
return
for i in range(2):
with ignore_warnings(regressor_warnings):
predictions, targets, n_calls = _test_regressor(
dataset="diabetes",
Regressor=self.module
)
self.assertAlmostEqual(self.res["default_diabetes"],
sklearn.metrics.r2_score(targets,
predictions),
places=self.res.get(
"default_diabetes_places", 7))
if self.res.get("diabetes_n_calls"):
self.assertEqual(self.res["diabetes_n_calls"], n_calls)
def test_default_diabetes_iterative_fit(self):
if self.__class__ == BaseRegressionComponentTest:
return
if not hasattr(self.module, 'iterative_fit'):
return
for i in range(2):
with ignore_warnings(regressor_warnings):
predictions, targets, _ = _test_regressor_iterative_fit(
dataset="diabetes",
Regressor=self.module
)
self.assertAlmostEqual(self.res["default_diabetes_iterative"],
sklearn.metrics.r2_score(targets,
predictions),
places=self.res.get(
"default_diabetes_iterative_places", 7))
def test_default_diabetes_iterative_sparse_fit(self):
if self.__class__ == BaseRegressionComponentTest:
return
if not hasattr(self.module, 'iterative_fit'):
return
if SPARSE not in self.module.get_properties()["input"]:
return
for i in range(2):
with ignore_warnings(regressor_warnings):
predictions, targets, regressor = _test_regressor_iterative_fit(
dataset="diabetes",
Regressor=self.module,
sparse=True
)
self.assertAlmostEqual(self.res["default_diabetes_iterative_sparse"],
sklearn.metrics.r2_score(targets,
predictions),
places=self.res.get(
"default_diabetes_iterative_sparse_places", 7))
if self.step_hyperparameter is not None:
self.assertEqual(
getattr(regressor.estimator, self.step_hyperparameter['name']),
self.res.get("diabetes_iterative_n_iter", self.step_hyperparameter['value'])
)
def test_default_diabetes_sparse(self):
if self.__class__ == BaseRegressionComponentTest:
return
if SPARSE not in self.module.get_properties()["input"]:
return
for i in range(2):
with ignore_warnings(regressor_warnings):
predictions, targets, _ = _test_regressor(
dataset="diabetes",
Regressor=self.module,
sparse=True
)
self.assertAlmostEqual(self.res["default_diabetes_sparse"],
sklearn.metrics.r2_score(targets,
predictions),
places=self.res.get(
"default_diabetes_sparse_places", 7))
def test_module_idempotent(self):
""" Fitting twice with the same config gives the same model params.
This is only valid when the random_state passed is an int. If a
RandomState object is passed then repeated calls to fit will have
different results. See the section on "Controlling Randomness" in the
sklearn docs.
https://scikit-learn.org/0.24/common_pitfalls.html#controlling-randomness
"""
if self.__class__ == BaseRegressionComponentTest:
return
regressor_cls = self.module
X = np.array([
[0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5],
[0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5],
[0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5],
[0.5, 0.5], [0.5, 0.5], [0.5, 0.5], [0.5, 0.5],
])
y = np.array([
1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, 1,
1, 1, 1, 1,
])
# We ignore certain keys when comparing
param_keys_ignored = ['base_estimator']
# We use the default config + sampled ones
configuration_space = regressor_cls.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
sampled = [configuration_space.sample_configuration() for _ in range(2)]
for seed, config in enumerate([default] + sampled):
model_args = {"random_state": seed, **config}
regressor = regressor_cls(**model_args)
# Get the parameters on the first and second fit with config params
# Also compare their random state
with ignore_warnings(regressor_warnings):
params_first = regressor.fit(X.copy(), y.copy()).estimator.get_params()
if hasattr(regressor.estimator, 'random_state'):
rs_1 = regressor.random_state
rs_estimator_1 = regressor.estimator.random_state
with ignore_warnings(regressor_warnings):
params_second = regressor.fit(X.copy(), y.copy()).estimator.get_params()
if hasattr(regressor.estimator, 'random_state'):
rs_2 = regressor.random_state
rs_estimator_2 = regressor.estimator.random_state
# Remove keys we don't wish to include in the comparison
for params in [params_first, params_second]:
for key in param_keys_ignored:
if key in params:
del params[key]
# They should have equal parameters
self.assertEqual(params_first, params_second,
f"Failed with model args {model_args}")
if (
hasattr(regressor.estimator, 'random_state')
and not isinstance(regressor, LibSVM_SVR)
):
# sklearn.svm.SVR has it as an attribute but does not use it and
# defaults it to None, even if a value is passed in
assert all([
seed == random_state
for random_state in [rs_1, rs_estimator_1, rs_2, rs_estimator_2]
])
@pytest.mark.parametrize("regressor", _regressors.values())
@pytest.mark.parametrize("X", [np.array([[1, 2, 3]] * 20)])
@pytest.mark.parametrize("y", [np.array([1] * 20)])
def test_fit_and_predict_with_1d_targets_as_1d(
regressor: Type[RegressorChoice],
X: np.ndarray,
y: np.ndarray
) -> None:
"""Test that all pipelines work with 1d target types
Parameters
----------
regressor: RegressorChoice
The regressor to test
X: np.ndarray
The features
y: np.ndarray
The 1d targets
Expects
-------
* Should be able to fit with 1d targets
* Should be able to predict with 1d targest
* Should have predictions with the same shape as y
"""
assert len(X) == len(y)
assert y.ndim == 1
config_space = regressor.get_hyperparameter_search_space()
default_config = config_space.get_default_configuration()
model = regressor(random_state=0, **default_config)
with ignore_warnings(regressor_warnings):
model.fit(X, y)
predictions = model.predict(X)
assert predictions.shape == y.shape
@pytest.mark.parametrize("regressor", _regressors.values())
@pytest.mark.parametrize("X", [np.array([[1, 2, 3]] * 20)])
@pytest.mark.parametrize("y", [np.array([[1]] * 20)])
def test_fit_and_predict_with_1d_targets_as_2d(
regressor: Type[RegressorChoice],
X: np.ndarray,
y: np.ndarray
) -> None:
"""Test that all pipelines work with 1d target types when they are wrapped as 2d
Parameters
----------
regressor: RegressorChoice
The regressor to test
X: np.ndarray
The features
y: np.ndarray
The 1d targets wrapped as 2d
Expects
-------
* Should be able to fit with 1d targets wrapped in 2d
* Should be able to predict 1d targets wrapped in 2d
* Should return 1d predictions
* Should have predictions with the same length as the y
"""
assert len(X) == len(y)
assert y.ndim == 2 and y.shape[1] == 1
config_space = regressor.get_hyperparameter_search_space()
default_config = config_space.get_default_configuration()
model = regressor(random_state=0, **default_config)
with ignore_warnings(regressor_warnings):
model.fit(X, y)
predictions = model.predict(X)
assert predictions.ndim == 1
assert len(predictions) == len(y)
@pytest.mark.parametrize("regressor", [
regressor
for regressor in _regressors.values()
if regressor.get_properties()['handles_multilabel']
])
@pytest.mark.parametrize("X", [np.array([[1, 2, 3]] * 20)])
@pytest.mark.parametrize("y", [np.array([[1, 1, 1]] * 20)])
def test_fit_and_predict_with_2d_targets(
regressor: Type[RegressorChoice],
X: np.ndarray,
y: np.ndarray
) -> None:
"""Test that all pipelines work with 2d target types
Parameters
----------
regressor: RegressorChoice
The regressor to test
X: np.ndarray
The features
y: np.ndarray
The 2d targets
Expects
-------
* Should be able to fit with 2d targets
* Should be able to predict with 2d targets
* Should have predictions with the same shape as y
"""
assert len(X) == len(y)
assert y.ndim == 2 and y.shape[1] > 1
config_space = regressor.get_hyperparameter_search_space()
default_config = config_space.get_default_configuration()
model = regressor(random_state=0, **default_config)
with ignore_warnings(regressor_warnings):
model.fit(X, y)
predictions = model.predict(X)
assert predictions.shape == y.shape
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.rdf
~~~~~~~~~~~~~~~~~~~
Lexers for semantic web and RDF query languages and markup.
:copyright: Copyright 2006-2017 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, default
from pygments.token import Keyword, Punctuation, String, Number, Operator, Generic, \
Whitespace, Name, Literal, Comment, Text
__all__ = ['SparqlLexer', 'TurtleLexer']
class SparqlLexer(RegexLexer):
"""
Lexer for `SPARQL <http://www.w3.org/TR/rdf-sparql-query/>`_ query language.
.. versionadded:: 2.0
"""
name = 'SPARQL'
aliases = ['sparql']
filenames = ['*.rq', '*.sparql']
mimetypes = ['application/sparql-query']
# character group definitions ::
PN_CHARS_BASE_GRP = (u'a-zA-Z'
u'\u00c0-\u00d6'
u'\u00d8-\u00f6'
u'\u00f8-\u02ff'
u'\u0370-\u037d'
u'\u037f-\u1fff'
u'\u200c-\u200d'
u'\u2070-\u218f'
u'\u2c00-\u2fef'
u'\u3001-\ud7ff'
u'\uf900-\ufdcf'
u'\ufdf0-\ufffd')
PN_CHARS_U_GRP = (PN_CHARS_BASE_GRP + '_')
PN_CHARS_GRP = (PN_CHARS_U_GRP +
r'\-' +
r'0-9' +
u'\u00b7' +
u'\u0300-\u036f' +
u'\u203f-\u2040')
HEX_GRP = '0-9A-Fa-f'
PN_LOCAL_ESC_CHARS_GRP = r' _~.\-!$&"()*+,;=/?#@%'
# terminal productions ::
PN_CHARS_BASE = '[' + PN_CHARS_BASE_GRP + ']'
PN_CHARS_U = '[' + PN_CHARS_U_GRP + ']'
PN_CHARS = '[' + PN_CHARS_GRP + ']'
HEX = '[' + HEX_GRP + ']'
PN_LOCAL_ESC_CHARS = '[' + PN_LOCAL_ESC_CHARS_GRP + ']'
IRIREF = r'<(?:[^<>"{}|^`\\\x00-\x20])*>'
BLANK_NODE_LABEL = '_:[0-9' + PN_CHARS_U_GRP + '](?:[' + PN_CHARS_GRP + \
'.]*' + PN_CHARS + ')?'
PN_PREFIX = PN_CHARS_BASE + '(?:[' + PN_CHARS_GRP + '.]*' + PN_CHARS + ')?'
VARNAME = u'[0-9' + PN_CHARS_U_GRP + '][' + PN_CHARS_U_GRP + \
u'0-9\u00b7\u0300-\u036f\u203f-\u2040]*'
PERCENT = '%' + HEX + HEX
PN_LOCAL_ESC = r'\\' + PN_LOCAL_ESC_CHARS
PLX = '(?:' + PERCENT + ')|(?:' + PN_LOCAL_ESC + ')'
PN_LOCAL = ('(?:[' + PN_CHARS_U_GRP + ':0-9' + ']|' + PLX + ')' +
'(?:(?:[' + PN_CHARS_GRP + '.:]|' + PLX + ')*(?:[' +
PN_CHARS_GRP + ':]|' + PLX + '))?')
EXPONENT = r'[eE][+-]?\d+'
# Lexer token definitions ::
tokens = {
'root': [
(r'\s+', Text),
# keywords ::
(r'((?i)select|construct|describe|ask|where|filter|group\s+by|minus|'
r'distinct|reduced|from\s+named|from|order\s+by|desc|asc|limit|'
r'offset|bindings|load|clear|drop|create|add|move|copy|'
r'insert\s+data|delete\s+data|delete\s+where|delete|insert|'
r'using\s+named|using|graph|default|named|all|optional|service|'
r'silent|bind|union|not\s+in|in|as|having|to|prefix|base)\b', Keyword),
(r'(a)\b', Keyword),
# IRIs ::
('(' + IRIREF + ')', Name.Label),
# blank nodes ::
('(' + BLANK_NODE_LABEL + ')', Name.Label),
# # variables ::
('[?$]' + VARNAME, Name.Variable),
# prefixed names ::
(r'(' + PN_PREFIX + ')?(\:)(' + PN_LOCAL + ')?',
bygroups(Name.Namespace, Punctuation, Name.Tag)),
# function names ::
(r'((?i)str|lang|langmatches|datatype|bound|iri|uri|bnode|rand|abs|'
r'ceil|floor|round|concat|strlen|ucase|lcase|encode_for_uri|'
r'contains|strstarts|strends|strbefore|strafter|year|month|day|'
r'hours|minutes|seconds|timezone|tz|now|md5|sha1|sha256|sha384|'
r'sha512|coalesce|if|strlang|strdt|sameterm|isiri|isuri|isblank|'
r'isliteral|isnumeric|regex|substr|replace|exists|not\s+exists|'
r'count|sum|min|max|avg|sample|group_concat|separator)\b',
Name.Function),
# boolean literals ::
(r'(true|false)', Keyword.Constant),
# double literals ::
(r'[+\-]?(\d+\.\d*' + EXPONENT + '|\.?\d+' + EXPONENT + ')', Number.Float),
# decimal literals ::
(r'[+\-]?(\d+\.\d*|\.\d+)', Number.Float),
# integer literals ::
(r'[+\-]?\d+', Number.Integer),
# operators ::
(r'(\|\||&&|=|\*|\-|\+|/|!=|<=|>=|!|<|>)', Operator),
# punctuation characters ::
(r'[(){}.;,:^\[\]]', Punctuation),
# line comments ::
(r'#[^\n]*', Comment),
# strings ::
(r'"""', String, 'triple-double-quoted-string'),
(r'"', String, 'single-double-quoted-string'),
(r"'''", String, 'triple-single-quoted-string'),
(r"'", String, 'single-single-quoted-string'),
],
'triple-double-quoted-string': [
(r'"""', String, 'end-of-string'),
(r'[^\\]+', String),
(r'\\', String, 'string-escape'),
],
'single-double-quoted-string': [
(r'"', String, 'end-of-string'),
(r'[^"\\\n]+', String),
(r'\\', String, 'string-escape'),
],
'triple-single-quoted-string': [
(r"'''", String, 'end-of-string'),
(r'[^\\]+', String),
(r'\\', String.Escape, 'string-escape'),
],
'single-single-quoted-string': [
(r"'", String, 'end-of-string'),
(r"[^'\\\n]+", String),
(r'\\', String, 'string-escape'),
],
'string-escape': [
(r'u' + HEX + '{4}', String.Escape, '#pop'),
(r'U' + HEX + '{8}', String.Escape, '#pop'),
(r'.', String.Escape, '#pop'),
],
'end-of-string': [
(r'(@)([a-zA-Z]+(?:-[a-zA-Z0-9]+)*)',
bygroups(Operator, Name.Function), '#pop:2'),
(r'\^\^', Operator, '#pop:2'),
default('#pop:2'),
],
}
class TurtleLexer(RegexLexer):
"""
Lexer for `Turtle <http://www.w3.org/TR/turtle/>`_ data language.
.. versionadded:: 2.1
"""
name = 'Turtle'
aliases = ['turtle']
filenames = ['*.ttl']
mimetypes = ['text/turtle', 'application/x-turtle']
flags = re.IGNORECASE
patterns = {
'PNAME_NS': r'((?:[a-z][\w-]*)?\:)', # Simplified character range
'IRIREF': r'(<[^<>"{}|^`\\\x00-\x20]*>)'
}
# PNAME_NS PN_LOCAL (with simplified character range)
patterns['PrefixedName'] = r'%(PNAME_NS)s([a-z][\w-]*)' % patterns
tokens = {
'root': [
(r'\s+', Whitespace),
# Base / prefix
(r'(@base|BASE)(\s+)%(IRIREF)s(\s*)(\.?)' % patterns,
bygroups(Keyword, Whitespace, Name.Variable, Whitespace,
Punctuation)),
(r'(@prefix|PREFIX)(\s+)%(PNAME_NS)s(\s+)%(IRIREF)s(\s*)(\.?)' % patterns,
bygroups(Keyword, Whitespace, Name.Namespace, Whitespace,
Name.Variable, Whitespace, Punctuation)),
# The shorthand predicate 'a'
(r'(?<=\s)a(?=\s)', Keyword.Type),
# IRIREF
(r'%(IRIREF)s' % patterns, Name.Variable),
# PrefixedName
(r'%(PrefixedName)s' % patterns,
bygroups(Name.Namespace, Name.Tag)),
# Comment
(r'#[^\n]+', Comment),
(r'\b(true|false)\b', Literal),
(r'[+\-]?\d*\.\d+', Number.Float),
(r'[+\-]?\d*(:?\.\d+)?E[+\-]?\d+', Number.Float),
(r'[+\-]?\d+', Number.Integer),
(r'[\[\](){}.;,:^]', Punctuation),
(r'"""', String, 'triple-double-quoted-string'),
(r'"', String, 'single-double-quoted-string'),
(r"'''", String, 'triple-single-quoted-string'),
(r"'", String, 'single-single-quoted-string'),
],
'triple-double-quoted-string': [
(r'"""', String, 'end-of-string'),
(r'[^\\]+', String),
(r'\\', String, 'string-escape'),
],
'single-double-quoted-string': [
(r'"', String, 'end-of-string'),
(r'[^"\\\n]+', String),
(r'\\', String, 'string-escape'),
],
'triple-single-quoted-string': [
(r"'''", String, 'end-of-string'),
(r'[^\\]+', String),
(r'\\', String, 'string-escape'),
],
'single-single-quoted-string': [
(r"'", String, 'end-of-string'),
(r"[^'\\\n]+", String),
(r'\\', String, 'string-escape'),
],
'string-escape': [
(r'.', String, '#pop'),
],
'end-of-string': [
(r'(@)([a-z]+(:?-[a-z0-9]+)*)',
bygroups(Operator, Generic.Emph), '#pop:2'),
(r'(\^\^)%(IRIREF)s' % patterns, bygroups(Operator, Generic.Emph), '#pop:2'),
(r'(\^\^)%(PrefixedName)s' % patterns,
bygroups(Operator, Generic.Emph, Generic.Emph), '#pop:2'),
default('#pop:2'),
],
}
|
|
#
# Copyright 2013 Rackspace Hosting.
#
# Author: Monsyne Dragon <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fnmatch
import os
import jsonpath_rw
from oslo.config import cfg
from oslo.utils import timeutils
import six
import yaml
from ceilometer.event.storage import models
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log
OPTS = [
cfg.StrOpt('definitions_cfg_file',
default="event_definitions.yaml",
help="Configuration file for event definitions."
),
cfg.BoolOpt('drop_unmatched_notifications',
default=False,
help='Drop notifications if no event definition matches. '
'(Otherwise, we convert them with just the default traits)'),
]
cfg.CONF.register_opts(OPTS, group='event')
LOG = log.getLogger(__name__)
class EventDefinitionException(Exception):
def __init__(self, message, definition_cfg):
super(EventDefinitionException, self).__init__(message)
self.definition_cfg = definition_cfg
def __str__(self):
return '%s %s: %s' % (self.__class__.__name__,
self.definition_cfg, self.message)
class TraitDefinition(object):
def __init__(self, name, trait_cfg, plugin_manager):
self.cfg = trait_cfg
self.name = name
type_name = trait_cfg.get('type', 'text')
if 'plugin' in trait_cfg:
plugin_cfg = trait_cfg['plugin']
if isinstance(plugin_cfg, six.string_types):
plugin_name = plugin_cfg
plugin_params = {}
else:
try:
plugin_name = plugin_cfg['name']
except KeyError:
raise EventDefinitionException(
_('Plugin specified, but no plugin name supplied for '
'trait %s') % name, self.cfg)
plugin_params = plugin_cfg.get('parameters')
if plugin_params is None:
plugin_params = {}
try:
plugin_ext = plugin_manager[plugin_name]
except KeyError:
raise EventDefinitionException(
_('No plugin named %(plugin)s available for '
'trait %(trait)s') % dict(plugin=plugin_name,
trait=name), self.cfg)
plugin_class = plugin_ext.plugin
self.plugin = plugin_class(**plugin_params)
else:
self.plugin = None
if 'fields' not in trait_cfg:
raise EventDefinitionException(
_("Required field in trait definition not specified: "
"'%s'") % 'fields',
self.cfg)
fields = trait_cfg['fields']
if not isinstance(fields, six.string_types):
# NOTE(mdragon): if not a string, we assume a list.
if len(fields) == 1:
fields = fields[0]
else:
fields = '|'.join('(%s)' % path for path in fields)
try:
self.fields = jsonpath_rw.parse(fields)
except Exception as e:
raise EventDefinitionException(
_("Parse error in JSONPath specification "
"'%(jsonpath)s' for %(trait)s: %(err)s")
% dict(jsonpath=fields, trait=name, err=e), self.cfg)
self.trait_type = models.Trait.get_type_by_name(type_name)
if self.trait_type is None:
raise EventDefinitionException(
_("Invalid trait type '%(type)s' for trait %(trait)s")
% dict(type=type_name, trait=name), self.cfg)
def _get_path(self, match):
if match.context is not None:
for path_element in self._get_path(match.context):
yield path_element
yield str(match.path)
def to_trait(self, notification_body):
values = [match for match in self.fields.find(notification_body)
if match.value is not None]
if self.plugin is not None:
value_map = [('.'.join(self._get_path(match)), match.value) for
match in values]
value = self.plugin.trait_value(value_map)
else:
value = values[0].value if values else None
if value is None:
return None
# NOTE(mdragon): some openstack projects (mostly Nova) emit ''
# for null fields for things like dates.
if self.trait_type != models.Trait.TEXT_TYPE and value == '':
return None
value = models.Trait.convert_value(self.trait_type, value)
return models.Trait(self.name, self.trait_type, value)
class EventDefinition(object):
DEFAULT_TRAITS = dict(
service=dict(type='text', fields='publisher_id'),
request_id=dict(type='text', fields='_context_request_id'),
tenant_id=dict(type='text', fields=['payload.tenant_id',
'_context_tenant']),
)
def __init__(self, definition_cfg, trait_plugin_mgr):
self._included_types = []
self._excluded_types = []
self.traits = dict()
self.cfg = definition_cfg
try:
event_type = definition_cfg['event_type']
traits = definition_cfg['traits']
except KeyError as err:
raise EventDefinitionException(
_("Required field %s not specified") % err.args[0], self.cfg)
if isinstance(event_type, six.string_types):
event_type = [event_type]
for t in event_type:
if t.startswith('!'):
self._excluded_types.append(t[1:])
else:
self._included_types.append(t)
if self._excluded_types and not self._included_types:
self._included_types.append('*')
for trait_name in self.DEFAULT_TRAITS:
self.traits[trait_name] = TraitDefinition(
trait_name,
self.DEFAULT_TRAITS[trait_name],
trait_plugin_mgr)
for trait_name in traits:
self.traits[trait_name] = TraitDefinition(
trait_name,
traits[trait_name],
trait_plugin_mgr)
def included_type(self, event_type):
for t in self._included_types:
if fnmatch.fnmatch(event_type, t):
return True
return False
def excluded_type(self, event_type):
for t in self._excluded_types:
if fnmatch.fnmatch(event_type, t):
return True
return False
def match_type(self, event_type):
return (self.included_type(event_type)
and not self.excluded_type(event_type))
@property
def is_catchall(self):
return '*' in self._included_types and not self._excluded_types
@staticmethod
def _extract_when(body):
"""Extract the generated datetime from the notification."""
# NOTE: I am keeping the logic the same as it was in the collector,
# However, *ALL* notifications should have a 'timestamp' field, it's
# part of the notification envelope spec. If this was put here because
# some openstack project is generating notifications without a
# timestamp, then that needs to be filed as a bug with the offending
# project (mdragon)
when = body.get('timestamp', body.get('_context_timestamp'))
if when:
return timeutils.normalize_time(timeutils.parse_isotime(when))
return timeutils.utcnow()
def to_event(self, notification_body):
event_type = notification_body['event_type']
message_id = notification_body['message_id']
when = self._extract_when(notification_body)
traits = (self.traits[t].to_trait(notification_body)
for t in self.traits)
# Only accept non-None value traits ...
traits = [trait for trait in traits if trait is not None]
event = models.Event(message_id, event_type, when, traits)
return event
class NotificationEventsConverter(object):
"""Notification Event Converter
The NotificationEventsConverter handles the conversion of Notifications
from openstack systems into Ceilometer Events.
The conversion is handled according to event definitions in a config file.
The config is a list of event definitions. Order is significant, a
notification will be processed according to the LAST definition that
matches it's event_type. (We use the last matching definition because that
allows you to use YAML merge syntax in the definitions file.)
Each definition is a dictionary with the following keys (all are
required):
- event_type: this is a list of notification event_types this definition
will handle. These can be wildcarded with unix shell glob (not regex!)
wildcards.
An exclusion listing (starting with a '!') will exclude any types listed
from matching. If ONLY exclusions are listed, the definition will match
anything not matching the exclusions.
This item can also be a string, which will be taken as equivalent to 1
item list.
Examples:
* ['compute.instance.exists'] will only match
compute.intance.exists notifications
* "compute.instance.exists" Same as above.
* ["image.create", "image.delete"] will match
image.create and image.delete, but not anything else.
* "compute.instance.*" will match
compute.instance.create.start but not image.upload
* ['*.start','*.end', '!scheduler.*'] will match
compute.instance.create.start, and image.delete.end,
but NOT compute.instance.exists or
scheduler.run_instance.start
* '!image.*' matches any notification except image
notifications.
* ['*', '!image.*'] same as above.
- traits: (dict) The keys are trait names, the values are the trait
definitions. Each trait definition is a dictionary with the following
keys:
- type (optional): The data type for this trait. (as a string)
Valid options are: 'text', 'int', 'float' and 'datetime', defaults to
'text' if not specified.
- fields: a path specification for the field(s) in the notification you
wish to extract. The paths can be specified with a dot syntax
(e.g. 'payload.host') or dictionary syntax (e.g. 'payload[host]') is
also supported.
In either case, if the key for the field you are looking for contains
special characters, like '.', it will need to be quoted (with double
or single quotes) like so::
"payload.image_meta.'org.openstack__1__architecture'"
The syntax used for the field specification is a variant of JSONPath,
and is fairly flexible.
(see: https://github.com/kennknowles/python-jsonpath-rw for more info)
Specifications can be written to match multiple possible fields, the
value for the trait will be derived from the matching fields that
exist and have a non-null (i.e. is not None) values in the
notification.
By default the value will be the first such field. (plugins can alter
that, if they wish)
This configuration value is normally a string, for convenience, it can
be specified as a list of specifications, which will be OR'ed together
(a union query in jsonpath terms)
- plugin (optional): (dictionary) with the following keys:
- name: (string) name of a plugin to load
- parameters: (optional) Dictionary of keyword args to pass
to the plugin on initialization. See documentation on each plugin to
see what arguments it accepts.
For convenience, this value can also be specified as a string, which is
interpreted as a plugin name, which will be loaded with no parameters.
"""
def __init__(self, events_config, trait_plugin_mgr, add_catchall=True):
self.definitions = [
EventDefinition(event_def, trait_plugin_mgr)
for event_def in reversed(events_config)]
if add_catchall and not any(d.is_catchall for d in self.definitions):
event_def = dict(event_type='*', traits={})
self.definitions.append(EventDefinition(event_def,
trait_plugin_mgr))
def to_event(self, notification_body):
event_type = notification_body['event_type']
message_id = notification_body['message_id']
edef = None
for d in self.definitions:
if d.match_type(event_type):
edef = d
break
if edef is None:
msg = (_('Dropping Notification %(type)s (uuid:%(msgid)s)')
% dict(type=event_type, msgid=message_id))
if cfg.CONF.event.drop_unmatched_notifications:
LOG.debug(msg)
else:
# If drop_unmatched_notifications is False, this should
# never happen. (mdragon)
LOG.error(msg)
return None
return edef.to_event(notification_body)
def get_config_file():
config_file = cfg.CONF.event.definitions_cfg_file
if not os.path.exists(config_file):
config_file = cfg.CONF.find_file(config_file)
return config_file
def setup_events(trait_plugin_mgr):
"""Setup the event definitions from yaml config file."""
config_file = get_config_file()
if config_file is not None:
LOG.debug(_("Event Definitions configuration file: %s"), config_file)
with open(config_file) as cf:
config = cf.read()
try:
events_config = yaml.safe_load(config)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
errmsg = (_("Invalid YAML syntax in Event Definitions file "
"%(file)s at line: %(line)s, column: %(column)s.")
% dict(file=config_file,
line=mark.line + 1,
column=mark.column + 1))
else:
errmsg = (_("YAML error reading Event Definitions file "
"%(file)s")
% dict(file=config_file))
LOG.error(errmsg)
raise
else:
LOG.debug(_("No Event Definitions configuration file found!"
" Using default config."))
events_config = []
LOG.info(_("Event Definitions: %s"), events_config)
allow_drop = cfg.CONF.event.drop_unmatched_notifications
return NotificationEventsConverter(events_config,
trait_plugin_mgr,
add_catchall=not allow_drop)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A helper class for inferring Distribution shape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
class _ShapeUtil(object):
"""Class which helps infer/identify subsets of tensor dimensions.
Terminology:
Recall that a `Tensor` has:
shape: sizes of tensor dimensions,
ndims: size of shape; number of tensor dimensions,
dims: indexes into shape; useful for transpose, reduce.
Tensors sampled from a `Distribution` can be partitioned by:
sample dims: indexes independent, identically distributed (iid) draws,
batch dims: indexes non-identical draws,
event dims: indexes coordinates of a single draw.
The sample, batch, and event dimensions constitute the entirety of a
`Tensor` shape. The dimensions are always in sample, batch, event order.
Assumptions:
We assume that batch_ndims and event_ndims are statically known for both
creating this object and for inputs to its functions.
TODO(jvdillon): Relax this assumption and support fully unknown shape.
We also assume that the `Tensor` rank is static, i.e., `x.get_shape().ndims
is not None`.
Possible use-cases:
~ Sample dimensions:
Computing summary statistics, i.e., the average is a reduction over sample
dimensions.
~ Batch dimensions:
Log-likelihood under model predicted location:
```python
mu = ... # vector of predictions, one for each covariate.
neg_log_likelihood = -tf.reduce_mean(
Normal(loc=mu, scale=1).log_pdf(x),
reduce_dims=[0])
```
Monte Carlo estimation of a marginal probability:
Average over batch dimensions where batch dimensions are associated with
random draws of a prior.
E.g., suppose we want to find the Monte Carlo estimate of the marginal
distribution of a Normal with a random Laplace location:
```
P(X=x) = integral P(X=x|y) P(Y=y) dy
~= 1/n sum_{i=1}^n P(X=x|y_i), y_i ~iid Laplace(0,1)
= tf.reduce_mean(Normal(loc=Laplace(0, 1).sample_n(n=1000),
scale=tf.ones([1000, 1])).pdf(x),
reduce_dims=[0])
```
The `Laplace` distribution generates a tensor of shape [1000, 1]. When fed
to a `Normal`, this is interpreted as 1000 different locations, i.e.,
1000 non-identical Normals. Therefore a single call to pdf(x) yields 1000
probabilities, one for every location. The average over this batch yields
the marginal.
~ Event dimensions:
Computing the determinant of the Jacobian of a function of a random
variable involves a reduction over event dimensions.
Examples:
Write S, B, E for sample shape, batch shape, and event shape (resp.).
```python
x.get_shape() == S + B + E # For statically known x shape.
# 100 iid samples from one multivariate Normal with two
# degrees of freedom (DF).
mu = [0., 0]
sigma = [[1., 0],
[0, 1]]
X = MultivariateNormal(loc=mu, scale=sigma).sample_n(n=100)
# S = [100]
# B = []
# E = [2]
# 100 iid samples from one Wishart with 2x2 DF.
sigma = [[1., 0],
[0, 1]]
X = Wishart(scale=sigma).sample_n(n=100)
# S = [100]
# B = []
# E = [2, 2]
# 100 iid samples (with shape [2, 50]) from two, non-identical bivariate
# Normal distributions.
mu = ... # shape(2, 2)
sigma = ... # shape(2, 2, 2)
X = MultivariateNormal(loc=mu, scale=sigma).sample(shape=[2, 50])
# S = [2, 50]
# B = [2]
# E = [2]
```
"""
def __init__(self, batch_ndims=None, event_ndims=None, name='ShapeUtil'):
"""Construct ShapeUtil with known sample, batch, and/or event ndims.
Typically, batch_ndims and event_ndims are fixed throughout the lifetime of
a Distribution.
Args:
batch_ndims: number of dims (rank) of the batch portion of indexes of a
`Tensor`. A "batch" is a non-identical distribution, i.e, Normal with
different parameters.
event_ndims: number of dims (rank) of the event portion of indexes of a
`Tensor`. An "event" is what is sampled from a distribution, i.e., a
trivariate Normal has an event shape of [3] and a 4 dimensional Wishart
has an event shape of [4, 4].
name: `String`. The name to give Ops created by this class.
Raises:
ValueError: if batch_ndims or event_ndims are invalid.
"""
if batch_ndims < 0:
raise ValueError('must specify non-negative batch_ndims(%d)', batch_ndims)
if batch_ndims > 0 and event_ndims < 1:
raise ValueError('must specify positive event_ndims(%d) when '
'batch_ndims(%d) is positive', event_ndims, batch_ndims)
# TODO(jvdillon): Support batches of scalars.
self._name = name
self._batch_ndims = batch_ndims
self._event_ndims = event_ndims
@property
def name(self):
"""Name given to ops created by this class."""
return self._name
@property
def batch_ndims(self):
"""Returns number of dimensions corresponding to non-identical draws."""
return self._batch_ndims
@property
def event_ndims(self):
"""Returns number of dimensions needed to index a sample's coordinates."""
return self._event_ndims
def get_ndims(self, x, name='get_ndims'):
"""Get tensor ndims (rank).
Args:
x: `Tensor`.
name: `String`. The name to give this op.
Raises:
ValueError: if ndims is not statically known.
Returns:
`Scalar` number of dimensions associated with a `Tensor`.
"""
if x is None:
raise ValueError('Input was None which does not have known ndims.')
with ops.name_scope(self.name):
with ops.op_scope([x], name):
ndims = ops.convert_to_tensor(x).get_shape().ndims
if ndims is None:
raise ValueError('ShapeUtil assumes static number of '
'dimensions(%d)', ndims)
return ndims
def get_sample_ndims(self, x):
"""Returns number of dimensions corresponding to iid draws.
Args:
x: `Tensor`.
Raises:
ValueError: if batch_ndims or event_ndims are not statically known.
ValueError: if static sample_ndims does not match inferred
Returns:
Scalar number of dimensions associated with a sample.
"""
ndims = self.get_ndims(x)
sample_ndims = ndims - self.batch_ndims - self.event_ndims
if sample_ndims < 0:
raise ValueError('expected batch_ndims(%d) + event_ndims(%d) < ndims(%d)',
self.batch_ndims, self.event_ndims, ndims)
return sample_ndims
def get_dims(self, x, sample=True, batch=True, event=True):
"""Returns subset of tensor's dimension indexes (indexes into shape).
Args:
x: `Tensor`.
sample: `Boolean`. Include sample dimensions or not.
batch: `Boolean`. Include batch dimensions or not.
event: `Boolean`. Include event dimensions or not.
Raises:
ValueError: if `x.get_shape().ndims` is `None`
Returns:
List enumerating requested dimensions.
"""
ndims = self.get_ndims(x)
if sample and batch and event:
return list(range(ndims))
sample_start = 0
batch_start = self.get_sample_ndims(x)
event_start = batch_start + self.batch_ndims
sample_shape = list(range(sample_start, batch_start)) if sample else []
batch_shape = list(range(batch_start, event_start)) if batch else []
event_shape = list(range(event_start, ndims)) if event else []
return sample_shape + batch_shape + event_shape
def get_shape(self, x, sample=True, batch=True, event=True, name='get_shape'):
"""Returns subset of tensor's shape (size of dimensions).
Args:
x: `Tensor`.
sample: `Boolean`. Include sample shape or not.
batch: `Boolean`. Include batch shape or not.
event: `Boolean`. Include event shape or not.
name: `String`. The name to give this op.
Raises:
ValueError: if `x.get_shape().ndims` is `None`
Returns:
List describing event shape if known statically, `Tensor` otherwise.
"""
if not sample and not batch and not event:
return []
with ops.name_scope(self._name):
with ops.op_scope([x], name):
x = ops.convert_to_tensor(x)
shape = (x.get_shape().as_list()
if x.get_shape().is_fully_defined()
else array_ops.shape(x))
if sample and batch and event:
return shape
sample_start = 0
batch_start = self.get_sample_ndims(x)
event_start = batch_start + self.batch_ndims
sample_shape = shape[sample_start:batch_start] if sample else []
batch_shape = shape[batch_start:event_start] if batch else []
event_shape = shape[event_start:] if event else []
if not batch and not event:
return sample_shape
if not sample and not event:
return batch_shape
if not sample and not batch:
return event_shape
if x.get_shape().is_fully_defined():
return sample_shape + batch_shape + event_shape
else:
return array_ops.concat(0, [sample_shape, batch_shape, event_shape])
def get_sample_dims(self, x):
"""Returns dimension indexes corresponding to sample.
Convenience function; identical to:
```python
get_dims(x, sample=True, batch=False, event=False)
```
Args:
x: `Tensor`.
Raises:
ValueError: if `x.get_shape().ndims` is `None`
Returns:
List enumerating sample dimensions.
"""
return self.get_dims(x, sample=True, batch=False, event=False)
def get_batch_dims(self, x):
"""Returns dimension indexes corresponding to batch.
Convenience function; identical to:
```python
get_dims(x, sample=False, batch=True, event=False)
```
Args:
x: `Tensor`.
Raises:
ValueError: if `x.get_shape().ndims` is `None`
Returns:
List enumerating batch dimensions.
"""
return self.get_dims(x, sample=False, batch=True, event=False)
def get_event_dims(self, x):
"""Returns dimension indexes corresponding to event.
Convenience function; identical to:
```python
get_dims(x, sample=False, batch=False, event=True)
```
Args:
x: `Tensor`.
Raises:
ValueError: if `x.get_shape().ndims` is `None`
Returns:
List enumerating event dimensions.
"""
return self.get_dims(x, sample=False, batch=False, event=True)
def get_sample_shape(self, x):
"""Returns shape corresponding to sample.
Convenience function; identical to:
```python
get_shape(x, sample=True, batch=False, event=False)
```
Args:
x: `Tensor`.
Returns:
List describing sample shape if known statically, `Tensor` otherwise.
"""
return self.get_shape(x, sample=True, batch=False, event=False)
def get_batch_shape(self, x):
"""Returns shape corresponding to batch.
Convenience function; identical to:
```python
get_shape(x, sample=False, batch=True, event=False)
```
Args:
x: `Tensor`.
Returns:
List describing batch shape if known statically, `Tensor` otherwise.
"""
return self.get_shape(x, sample=False, batch=True, event=False)
def get_event_shape(self, x):
"""Returns shape corresponding to event.
Convenience function; identical to:
```python
get_shape(x, sample=False, batch=False, event=True)
```
Args:
x: `Tensor`.
Returns:
List describing event shape if known statically, `Tensor` otherwise.
"""
return self.get_shape(x, sample=False, batch=False, event=True)
|
|
# Copyright 2013 Mirantis, Inc.
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient import api_versions as cinder_api_versions
from cinderclient import apiclient as cinder_apiclient
from cinderclient import exceptions as cinder_exception
from cinderclient.v2 import limits as cinder_limits
from keystoneauth1 import loading as ks_loading
from keystoneauth1 import session
from keystoneclient import exceptions as keystone_exception
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
import nova.conf
from nova import context
from nova import exception
from nova import test
from nova.tests.unit.fake_instance import fake_instance_obj
from nova.volume import cinder
CONF = nova.conf.CONF
class FakeVolume(object):
def __init__(self, volume_id, size=1, attachments=None, multiattach=False):
self.id = volume_id
self.name = 'volume_name'
self.description = 'volume_description'
self.status = 'available'
self.created_at = timeutils.utcnow()
self.size = size
self.availability_zone = 'nova'
self.attachments = attachments or []
self.volume_type = 99
self.bootable = False
self.snapshot_id = 'snap_id_1'
self.metadata = {}
self.multiattach = multiattach
def get(self, volume_id):
return self.volume_id
class FakeSnapshot(object):
def __init__(self, snapshot_id, volume_id, size=1):
self.id = snapshot_id
self.name = 'snapshot_name'
self.description = 'snapshot_description'
self.status = 'available'
self.size = size
self.created_at = timeutils.utcnow()
self.progress = '99%'
self.volume_id = volume_id
self.project_id = 'fake_project'
class FakeVolumeType(object):
def __init__(self, volume_type_name, volume_type_id):
self.id = volume_type_id
self.name = volume_type_name
class FakeAttachment(object):
def __init__(self):
self.id = uuids.attachment_id
self.status = 'attaching'
self.instance = uuids.instance_uuid
self.volume_id = uuids.volume_id
self.attached_at = timeutils.utcnow()
self.detached_at = None
self.attach_mode = 'rw'
self.connection_info = {'driver_volume_type': 'fake_type',
'target_lun': '1',
'foo': 'bar',
'attachment_id': uuids.attachment_id}
self.att = {'id': self.id,
'status': self.status,
'instance': self.instance,
'volume_id': self.volume_id,
'attached_at': self.attached_at,
'detached_at': self.detached_at,
'attach_mode': self.attach_mode,
'connection_info': self.connection_info}
def get(self, key, default=None):
return self.att.get(key, default)
def __setitem__(self, key, value):
self.att[key] = value
def __getitem__(self, key):
return self.att[key]
def to_dict(self):
return self.att
class CinderApiTestCase(test.NoDBTestCase):
def setUp(self):
super(CinderApiTestCase, self).setUp()
self.api = cinder.API()
self.ctx = context.get_admin_context()
@mock.patch('nova.volume.cinder.cinderclient')
def test_get(self, mock_cinderclient):
volume_id = 'volume_id1'
mock_volumes = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
self.api.get(self.ctx, volume_id)
mock_cinderclient.assert_called_once_with(self.ctx, microversion=None)
mock_volumes.get.assert_called_once_with(volume_id)
@mock.patch('nova.volume.cinder.cinderclient')
def test_get_failed_notfound(self, mock_cinderclient):
mock_cinderclient.return_value.volumes.get.side_effect = (
cinder_exception.NotFound(404, '404'))
self.assertRaises(exception.VolumeNotFound,
self.api.get, self.ctx, 'id1')
@mock.patch('nova.volume.cinder.cinderclient')
def test_get_failed_badrequest(self, mock_cinderclient):
mock_cinderclient.return_value.volumes.get.side_effect = (
cinder_exception.BadRequest(400, '400'))
self.assertRaises(exception.InvalidInput,
self.api.get, self.ctx, 'id1')
@mock.patch('nova.volume.cinder.cinderclient')
def test_get_failed_connection_failed(self, mock_cinderclient):
mock_cinderclient.return_value.volumes.get.side_effect = (
cinder_exception.ConnectionError(''))
self.assertRaises(exception.CinderConnectionFailed,
self.api.get, self.ctx, 'id1')
@mock.patch('nova.volume.cinder.cinderclient')
def test_get_with_shared_targets(self, mock_cinderclient):
"""Tests getting a volume at microversion 3.48 which includes the
shared_targets and service_uuid parameters in the volume response body.
"""
mock_volume = mock.MagicMock(
shared_targets=False, service_uuid=uuids.service_uuid)
mock_volumes = mock.MagicMock()
mock_volumes.get.return_value = mock_volume
mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
vol = self.api.get(self.ctx, uuids.volume_id, microversion='3.48')
mock_cinderclient.assert_called_once_with(
self.ctx, microversion='3.48')
mock_volumes.get.assert_called_once_with(uuids.volume_id)
self.assertIn('shared_targets', vol)
self.assertFalse(vol['shared_targets'])
self.assertEqual(uuids.service_uuid, vol['service_uuid'])
@mock.patch('nova.volume.cinder.cinderclient',
side_effect=exception.CinderAPIVersionNotAvailable(
version='3.48'))
def test_get_microversion_not_supported(self, mock_cinderclient):
"""Tests getting a volume at microversion 3.48 but that version
is not available.
"""
self.assertRaises(exception.CinderAPIVersionNotAvailable,
self.api.get, self.ctx, uuids.volume_id,
microversion='3.48')
@mock.patch('nova.volume.cinder.cinderclient')
def test_create(self, mock_cinderclient):
volume = FakeVolume('id1')
mock_volumes = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
mock_volumes.create.return_value = volume
created_volume = self.api.create(self.ctx, 1, '', '')
self.assertEqual('id1', created_volume['id'])
self.assertEqual(1, created_volume['size'])
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volumes.create.assert_called_once_with(1, availability_zone=None,
description='',
imageRef=None,
metadata=None, name='',
snapshot_id=None,
volume_type=None)
@mock.patch('nova.volume.cinder.cinderclient')
def test_create_failed(self, mock_cinderclient):
mock_cinderclient.return_value.volumes.create.side_effect = (
cinder_exception.BadRequest(400, '400'))
self.assertRaises(exception.InvalidInput,
self.api.create, self.ctx, 1, '', '')
@mock.patch('nova.volume.cinder.cinderclient')
def test_create_failed_not_found(self, mock_cinderclient):
mock_cinderclient.return_value.volumes.create.side_effect = (
cinder_exception.NotFound(404, 'Volume type can not be found.'))
ex = self.assertRaises(exception.NotFound,
self.api.create, self.ctx, 1, '', '')
self.assertEqual('Volume type can not be found.', str(ex))
@mock.patch('nova.volume.cinder.cinderclient')
def test_create_over_quota_failed(self, mock_cinderclient):
mock_cinderclient.return_value.volumes.create.side_effect = (
cinder_exception.OverLimit(413))
self.assertRaises(exception.OverQuota, self.api.create, self.ctx,
1, '', '')
mock_cinderclient.return_value.volumes.create.assert_called_once_with(
1, imageRef=None, availability_zone=None,
volume_type=None, description='', snapshot_id=None, name='',
metadata=None)
@mock.patch('nova.volume.cinder.cinderclient')
def test_get_all(self, mock_cinderclient):
volume1 = FakeVolume('id1')
volume2 = FakeVolume('id2')
volume_list = [volume1, volume2]
mock_volumes = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
mock_volumes.list.return_value = volume_list
volumes = self.api.get_all(self.ctx)
self.assertEqual(2, len(volumes))
self.assertEqual(['id1', 'id2'], [vol['id'] for vol in volumes])
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volumes.list.assert_called_once_with(detailed=True,
search_opts={})
@mock.patch('nova.volume.cinder.cinderclient')
def test_get_all_with_search(self, mock_cinderclient):
volume1 = FakeVolume('id1')
mock_volumes = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
mock_volumes.list.return_value = [volume1]
volumes = self.api.get_all(self.ctx, search_opts={'id': 'id1'})
self.assertEqual(1, len(volumes))
self.assertEqual('id1', volumes[0]['id'])
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volumes.list.assert_called_once_with(detailed=True,
search_opts={'id': 'id1'})
@mock.patch.object(cinder.az, 'get_instance_availability_zone',
return_value='zone1')
def test_check_availability_zone_differs(self, mock_get_instance_az):
self.flags(cross_az_attach=False, group='cinder')
volume = {'id': uuids.volume_id,
'status': 'available',
'attach_status': 'detached',
'availability_zone': 'zone2'}
instance = fake_instance_obj(self.ctx)
# Simulate _provision_instances in the compute API; the instance is not
# created in the API so the instance will not have an id attribute set.
delattr(instance, 'id')
self.assertRaises(exception.InvalidVolume,
self.api.check_availability_zone,
self.ctx, volume, instance)
mock_get_instance_az.assert_called_once_with(self.ctx, instance)
@mock.patch('nova.volume.cinder.cinderclient')
def test_reserve_volume(self, mock_cinderclient):
mock_volumes = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
self.api.reserve_volume(self.ctx, 'id1')
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volumes.reserve.assert_called_once_with('id1')
@mock.patch('nova.volume.cinder.cinderclient')
def test_unreserve_volume(self, mock_cinderclient):
mock_volumes = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
self.api.unreserve_volume(self.ctx, 'id1')
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volumes.unreserve.assert_called_once_with('id1')
@mock.patch('nova.volume.cinder.cinderclient')
def test_begin_detaching(self, mock_cinderclient):
mock_volumes = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
self.api.begin_detaching(self.ctx, 'id1')
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volumes.begin_detaching.assert_called_once_with('id1')
@mock.patch('nova.volume.cinder.cinderclient')
def test_roll_detaching(self, mock_cinderclient):
mock_volumes = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
self.api.roll_detaching(self.ctx, 'id1')
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volumes.roll_detaching.assert_called_once_with('id1')
@mock.patch('nova.volume.cinder.cinderclient')
def test_attach(self, mock_cinderclient):
mock_volumes = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
self.api.attach(self.ctx, 'id1', 'uuid', 'point')
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volumes.attach.assert_called_once_with('id1', 'uuid', 'point',
mode='rw')
@mock.patch('nova.volume.cinder.cinderclient')
def test_attach_with_mode(self, mock_cinderclient):
mock_volumes = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
self.api.attach(self.ctx, 'id1', 'uuid', 'point', mode='ro')
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volumes.attach.assert_called_once_with('id1', 'uuid', 'point',
mode='ro')
@mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_create(self, mock_cinderclient):
"""Tests the happy path for creating a volume attachment without a
mountpoint.
"""
attachment_ref = {'id': uuids.attachment_id,
'connection_info': {}}
expected_attachment_ref = {'id': uuids.attachment_id,
'connection_info': {}}
mock_cinderclient.return_value.attachments.create.return_value = (
attachment_ref)
result = self.api.attachment_create(
self.ctx, uuids.volume_id, uuids.instance_id)
self.assertEqual(expected_attachment_ref, result)
mock_cinderclient.return_value.attachments.create.\
assert_called_once_with(uuids.volume_id, None, uuids.instance_id)
@mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_create_with_mountpoint(self, mock_cinderclient):
"""Tests the happy path for creating a volume attachment with a
mountpoint.
"""
attachment_ref = {'id': uuids.attachment_id,
'connection_info': {}}
expected_attachment_ref = {'id': uuids.attachment_id,
'connection_info': {}}
mock_cinderclient.return_value.attachments.create.return_value = (
attachment_ref)
original_connector = {'host': 'fake-host'}
updated_connector = dict(original_connector, mountpoint='/dev/vdb')
result = self.api.attachment_create(
self.ctx, uuids.volume_id, uuids.instance_id,
connector=original_connector, mountpoint='/dev/vdb')
self.assertEqual(expected_attachment_ref, result)
# Make sure the original connector wasn't modified.
self.assertNotIn('mountpoint', original_connector)
# Make sure the mountpoint was passed through via the connector.
mock_cinderclient.return_value.attachments.create.\
assert_called_once_with(uuids.volume_id, updated_connector,
uuids.instance_id)
@mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_create_volume_not_found(self, mock_cinderclient):
"""Tests that the translate_volume_exception decorator is used."""
# fake out the volume not found error
mock_cinderclient.return_value.attachments.create.side_effect = (
cinder_exception.NotFound(404))
self.assertRaises(exception.VolumeNotFound, self.api.attachment_create,
self.ctx, uuids.volume_id, uuids.instance_id)
@mock.patch('nova.volume.cinder.cinderclient',
side_effect=exception.CinderAPIVersionNotAvailable(
version='3.44'))
def test_attachment_create_unsupported_api_version(self,
mock_cinderclient):
"""Tests that CinderAPIVersionNotAvailable is passed back through
if 3.44 isn't available.
"""
self.assertRaises(exception.CinderAPIVersionNotAvailable,
self.api.attachment_create,
self.ctx, uuids.volume_id, uuids.instance_id)
mock_cinderclient.assert_called_once_with(self.ctx, '3.44')
@mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_update(self, mock_cinderclient):
"""Tests the happy path for updating a volume attachment without
a mountpoint.
"""
fake_attachment = FakeAttachment()
connector = {'host': 'fake-host'}
expected_attachment_ref = {
'id': uuids.attachment_id,
'volume_id': fake_attachment.volume_id,
'attach_mode': 'rw',
'connection_info': {
'attached_at': fake_attachment.attached_at,
'data': {'foo': 'bar', 'target_lun': '1'},
'detached_at': None,
'driver_volume_type': 'fake_type',
'instance': fake_attachment.instance,
'status': 'attaching',
'volume_id': fake_attachment.volume_id}}
mock_cinderclient.return_value.attachments.update.return_value = (
fake_attachment)
result = self.api.attachment_update(
self.ctx, uuids.attachment_id, connector=connector)
self.assertEqual(expected_attachment_ref, result)
# Make sure the connector wasn't modified.
self.assertNotIn('mountpoint', connector)
mock_cinderclient.return_value.attachments.update.\
assert_called_once_with(uuids.attachment_id, connector)
@mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_update_with_mountpoint(self, mock_cinderclient):
"""Tests the happy path for updating a volume attachment with
a mountpoint.
"""
fake_attachment = FakeAttachment()
original_connector = {'host': 'fake-host'}
updated_connector = dict(original_connector, mountpoint='/dev/vdb')
expected_attachment_ref = {
'id': uuids.attachment_id,
'volume_id': fake_attachment.volume_id,
'attach_mode': 'rw',
'connection_info': {
'attached_at': fake_attachment.attached_at,
'data': {'foo': 'bar', 'target_lun': '1'},
'detached_at': None,
'driver_volume_type': 'fake_type',
'instance': fake_attachment.instance,
'status': 'attaching',
'volume_id': fake_attachment.volume_id}}
mock_cinderclient.return_value.attachments.update.return_value = (
fake_attachment)
result = self.api.attachment_update(
self.ctx, uuids.attachment_id, connector=original_connector,
mountpoint='/dev/vdb')
self.assertEqual(expected_attachment_ref, result)
# Make sure the original connector wasn't modified.
self.assertNotIn('mountpoint', original_connector)
# Make sure the mountpoint was passed through via the connector.
mock_cinderclient.return_value.attachments.update.\
assert_called_once_with(uuids.attachment_id, updated_connector)
@mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_update_attachment_not_found(self, mock_cinderclient):
"""Tests that the translate_attachment_exception decorator is used."""
# fake out the volume not found error
mock_cinderclient.return_value.attachments.update.side_effect = (
cinder_exception.NotFound(404))
self.assertRaises(exception.VolumeAttachmentNotFound,
self.api.attachment_update,
self.ctx, uuids.attachment_id,
connector={'host': 'fake-host'})
@mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_update_attachment_no_connector(self,
mock_cinderclient):
"""Tests that the translate_cinder_exception decorator is used."""
# fake out the volume bad request error
mock_cinderclient.return_value.attachments.update.side_effect = (
cinder_exception.BadRequest(400))
self.assertRaises(exception.InvalidInput,
self.api.attachment_update,
self.ctx, uuids.attachment_id, connector=None)
@mock.patch('nova.volume.cinder.cinderclient',
side_effect=exception.CinderAPIVersionNotAvailable(
version='3.44'))
def test_attachment_update_unsupported_api_version(self,
mock_cinderclient):
"""Tests that CinderAPIVersionNotAvailable is passed back through
if 3.44 isn't available.
"""
self.assertRaises(exception.CinderAPIVersionNotAvailable,
self.api.attachment_update,
self.ctx, uuids.attachment_id, connector={})
mock_cinderclient.assert_called_once_with(self.ctx, '3.44',
skip_version_check=True)
@mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_delete(self, mock_cinderclient):
mock_attachments = mock.MagicMock()
mock_cinderclient.return_value = \
mock.MagicMock(attachments=mock_attachments)
attachment_id = uuids.attachment
self.api.attachment_delete(self.ctx, attachment_id)
mock_cinderclient.assert_called_once_with(self.ctx, '3.44',
skip_version_check=True)
mock_attachments.delete.assert_called_once_with(attachment_id)
@mock.patch('nova.volume.cinder.LOG')
@mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_delete_failed(self, mock_cinderclient, mock_log):
mock_cinderclient.return_value.attachments.delete.side_effect = (
cinder_exception.NotFound(404, '404'))
attachment_id = uuids.attachment
ex = self.assertRaises(exception.VolumeAttachmentNotFound,
self.api.attachment_delete,
self.ctx,
attachment_id)
self.assertEqual(404, ex.code)
self.assertIn(attachment_id, str(ex))
@mock.patch('nova.volume.cinder.cinderclient',
side_effect=exception.CinderAPIVersionNotAvailable(
version='3.44'))
def test_attachment_delete_unsupported_api_version(self,
mock_cinderclient):
"""Tests that CinderAPIVersionNotAvailable is passed back through
if 3.44 isn't available.
"""
self.assertRaises(exception.CinderAPIVersionNotAvailable,
self.api.attachment_delete,
self.ctx, uuids.attachment_id)
mock_cinderclient.assert_called_once_with(self.ctx, '3.44',
skip_version_check=True)
@mock.patch('nova.volume.cinder.cinderclient',
side_effect=cinder_apiclient.exceptions.InternalServerError)
def test_attachment_delete_internal_server_error(self, mock_cinderclient):
self.assertRaises(cinder_apiclient.exceptions.InternalServerError,
self.api.attachment_delete,
self.ctx, uuids.attachment_id)
self.assertEqual(5, mock_cinderclient.call_count)
@mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_delete_internal_server_error_do_not_raise(
self, mock_cinderclient):
# generate exception, and then have a normal return on the next retry
mock_cinderclient.return_value.attachments.delete.side_effect = [
cinder_apiclient.exceptions.InternalServerError, None]
attachment_id = uuids.attachment
self.api.attachment_delete(self.ctx, attachment_id)
self.assertEqual(2, mock_cinderclient.call_count)
@mock.patch('nova.volume.cinder.cinderclient',
side_effect=cinder_exception.BadRequest(code=400))
def test_attachment_delete_bad_request_exception(self, mock_cinderclient):
self.assertRaises(exception.InvalidInput,
self.api.attachment_delete,
self.ctx, uuids.attachment_id)
self.assertEqual(1, mock_cinderclient.call_count)
@mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_complete(self, mock_cinderclient):
mock_attachments = mock.MagicMock()
mock_cinderclient.return_value = \
mock.MagicMock(attachments=mock_attachments)
attachment_id = uuids.attachment
self.api.attachment_complete(self.ctx, attachment_id)
mock_cinderclient.assert_called_once_with(self.ctx, '3.44',
skip_version_check=True)
mock_attachments.complete.assert_called_once_with(attachment_id)
@mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_complete_failed(self, mock_cinderclient):
mock_cinderclient.return_value.attachments.complete.side_effect = (
cinder_exception.NotFound(404, '404'))
attachment_id = uuids.attachment
ex = self.assertRaises(exception.VolumeAttachmentNotFound,
self.api.attachment_complete,
self.ctx,
attachment_id)
self.assertEqual(404, ex.code)
self.assertIn(attachment_id, str(ex))
@mock.patch('nova.volume.cinder.cinderclient',
side_effect=exception.CinderAPIVersionNotAvailable(
version='3.44'))
def test_attachment_complete_unsupported_api_version(self,
mock_cinderclient):
"""Tests that CinderAPIVersionNotAvailable is passed back.
If microversion 3.44 isn't available that should result in a
CinderAPIVersionNotAvailable exception.
"""
self.assertRaises(exception.CinderAPIVersionNotAvailable,
self.api.attachment_complete,
self.ctx, uuids.attachment_id)
mock_cinderclient.assert_called_once_with(self.ctx, '3.44',
skip_version_check=True)
@mock.patch('nova.volume.cinder.cinderclient')
def test_detach(self, mock_cinderclient):
mock_volumes = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(version='2',
volumes=mock_volumes)
self.api.detach(self.ctx, 'id1', instance_uuid='fake_uuid',
attachment_id='fakeid')
mock_cinderclient.assert_called_with(self.ctx)
mock_volumes.detach.assert_called_once_with('id1', 'fakeid')
@mock.patch('nova.volume.cinder.cinderclient')
def test_detach_no_attachment_id(self, mock_cinderclient):
attachment = {'server_id': 'fake_uuid',
'attachment_id': 'fakeid'
}
mock_volumes = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(version='2',
volumes=mock_volumes)
mock_cinderclient.return_value.volumes.get.return_value = \
FakeVolume('id1', attachments=[attachment])
self.api.detach(self.ctx, 'id1', instance_uuid='fake_uuid')
mock_cinderclient.assert_called_with(self.ctx, microversion=None)
mock_volumes.detach.assert_called_once_with('id1', None)
@mock.patch('nova.volume.cinder.cinderclient')
def test_detach_no_attachment_id_multiattach(self, mock_cinderclient):
attachment = {'server_id': 'fake_uuid',
'attachment_id': 'fakeid'
}
mock_volumes = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(version='2',
volumes=mock_volumes)
mock_cinderclient.return_value.volumes.get.return_value = \
FakeVolume('id1', attachments=[attachment], multiattach=True)
self.api.detach(self.ctx, 'id1', instance_uuid='fake_uuid')
mock_cinderclient.assert_called_with(self.ctx, microversion=None)
mock_volumes.detach.assert_called_once_with('id1', 'fakeid')
@mock.patch('nova.volume.cinder.cinderclient',
side_effect=cinder_apiclient.exceptions.InternalServerError)
def test_detach_internal_server_error(self, mock_cinderclient):
self.assertRaises(cinder_apiclient.exceptions.InternalServerError,
self.api.detach,
self.ctx, 'id1', instance_uuid='fake_uuid')
self.assertEqual(5, mock_cinderclient.call_count)
@mock.patch('nova.volume.cinder.cinderclient')
def test_detach_internal_server_error_do_not_raise(
self, mock_cinderclient):
# generate exception, and then have a normal return on the next retry
mock_cinderclient.return_value.volumes.detach.side_effect = [
cinder_apiclient.exceptions.InternalServerError, None]
self.api.detach(self.ctx, 'id1', instance_uuid='fake_uuid',
attachment_id='fakeid')
self.assertEqual(2, mock_cinderclient.call_count)
@mock.patch('nova.volume.cinder.cinderclient',
side_effect=cinder_exception.BadRequest(code=400))
def test_detach_bad_request_exception(self, mock_cinderclient):
self.assertRaises(exception.InvalidInput,
self.api.detach,
self.ctx, 'id1', instance_uuid='fake_uuid')
self.assertEqual(1, mock_cinderclient.call_count)
@mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_get(self, mock_cinderclient):
mock_attachment = mock.MagicMock()
mock_cinderclient.return_value = \
mock.MagicMock(attachments=mock_attachment)
attachment_id = uuids.attachment
self.api.attachment_get(self.ctx, attachment_id)
mock_cinderclient.assert_called_once_with(self.ctx, '3.44',
skip_version_check=True)
mock_attachment.show.assert_called_once_with(attachment_id)
@mock.patch('nova.volume.cinder.cinderclient')
def test_attachment_get_failed(self, mock_cinderclient):
mock_cinderclient.return_value.attachments.show.side_effect = (
cinder_exception.NotFound(404, '404'))
attachment_id = uuids.attachment
ex = self.assertRaises(exception.VolumeAttachmentNotFound,
self.api.attachment_get,
self.ctx,
attachment_id)
self.assertEqual(404, ex.code)
self.assertIn(attachment_id, str(ex))
@mock.patch('nova.volume.cinder.cinderclient',
side_effect=exception.CinderAPIVersionNotAvailable(
version='3.44'))
def test_attachment_get_unsupported_api_version(self, mock_cinderclient):
"""Tests that CinderAPIVersionNotAvailable is passed back.
If microversion 3.44 isn't available that should result in a
CinderAPIVersionNotAvailable exception.
"""
self.assertRaises(exception.CinderAPIVersionNotAvailable,
self.api.attachment_get,
self.ctx, uuids.attachment_id)
mock_cinderclient.assert_called_once_with(self.ctx, '3.44',
skip_version_check=True)
@mock.patch('nova.volume.cinder.cinderclient')
def test_initialize_connection(self, mock_cinderclient):
connection_info = {'foo': 'bar'}
mock_cinderclient.return_value.volumes. \
initialize_connection.return_value = connection_info
volume_id = 'fake_vid'
connector = {'host': 'fakehost1'}
actual = self.api.initialize_connection(self.ctx, volume_id, connector)
expected = connection_info
expected['connector'] = connector
self.assertEqual(expected, actual)
mock_cinderclient.return_value.volumes. \
initialize_connection.assert_called_once_with(volume_id, connector)
@mock.patch('nova.volume.cinder.LOG')
@mock.patch('nova.volume.cinder.cinderclient')
def test_initialize_connection_exception_no_code(
self, mock_cinderclient, mock_log):
mock_cinderclient.return_value.volumes. \
initialize_connection.side_effect = (
cinder_exception.ClientException(500, "500"))
mock_cinderclient.return_value.volumes. \
terminate_connection.side_effect = (
test.TestingException)
connector = {'host': 'fakehost1'}
self.assertRaises(cinder_exception.ClientException,
self.api.initialize_connection,
self.ctx,
'id1',
connector)
self.assertIsNone(mock_log.error.call_args_list[1][0][1]['code'])
@mock.patch('nova.volume.cinder.cinderclient')
def test_initialize_connection_rollback(self, mock_cinderclient):
mock_cinderclient.return_value.volumes.\
initialize_connection.side_effect = (
cinder_exception.ClientException(500, "500"))
connector = {'host': 'host1'}
ex = self.assertRaises(cinder_exception.ClientException,
self.api.initialize_connection,
self.ctx,
'id1',
connector)
self.assertEqual(500, ex.code)
mock_cinderclient.return_value.volumes.\
terminate_connection.assert_called_once_with('id1', connector)
@mock.patch('nova.volume.cinder.cinderclient')
def test_initialize_connection_no_rollback(self, mock_cinderclient):
mock_cinderclient.return_value.volumes.\
initialize_connection.side_effect = test.TestingException
connector = {'host': 'host1'}
self.assertRaises(test.TestingException,
self.api.initialize_connection,
self.ctx,
'id1',
connector)
self.assertFalse(mock_cinderclient.return_value.volumes.
terminate_connection.called)
@mock.patch('nova.volume.cinder.cinderclient')
def test_terminate_connection(self, mock_cinderclient):
mock_volumes = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
self.api.terminate_connection(self.ctx, 'id1', 'connector')
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volumes.terminate_connection.assert_called_once_with('id1',
'connector')
@mock.patch('nova.volume.cinder.cinderclient',
side_effect=cinder_apiclient.exceptions.InternalServerError)
def test_terminate_connection_internal_server_error(
self, mock_cinderclient):
self.assertRaises(cinder_apiclient.exceptions.InternalServerError,
self.api.terminate_connection,
self.ctx, 'id1', 'connector')
self.assertEqual(5, mock_cinderclient.call_count)
@mock.patch('nova.volume.cinder.cinderclient')
def test_terminate_connection_internal_server_error_do_not_raise(
self, mock_cinderclient):
# generate exception, and then have a normal return on the next retry
mock_cinderclient.return_value.volumes.terminate_connection.\
side_effect = [cinder_apiclient.exceptions.InternalServerError,
None]
self.api.terminate_connection(self.ctx, 'id1', 'connector')
self.assertEqual(2, mock_cinderclient.call_count)
@mock.patch('nova.volume.cinder.cinderclient',
side_effect=cinder_exception.BadRequest(code=400))
def test_terminate_connection_bad_request_exception(
self, mock_cinderclient):
self.assertRaises(exception.InvalidInput,
self.api.terminate_connection,
self.ctx, 'id1', 'connector')
self.assertEqual(1, mock_cinderclient.call_count)
@mock.patch('nova.volume.cinder.cinderclient')
def test_delete(self, mock_cinderclient):
mock_volumes = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
self.api.delete(self.ctx, 'id1')
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volumes.delete.assert_called_once_with('id1')
def test_update(self):
self.assertRaises(NotImplementedError,
self.api.update, self.ctx, '', '')
@mock.patch('nova.volume.cinder.cinderclient')
def test_get_absolute_limits_forbidden(self, cinderclient):
"""Tests to make sure we gracefully handle a Forbidden error raised
from python-cinderclient when getting limits.
"""
cinderclient.return_value.limits.get.side_effect = (
cinder_exception.Forbidden(403))
self.assertRaises(
exception.Forbidden, self.api.get_absolute_limits, self.ctx)
@mock.patch('nova.volume.cinder.cinderclient')
def test_get_absolute_limits(self, cinderclient):
"""Tests the happy path of getting the absolute limits."""
expected_limits = {
"totalSnapshotsUsed": 0,
"maxTotalBackups": 10,
"maxTotalVolumeGigabytes": 1000,
"maxTotalSnapshots": 10,
"maxTotalBackupGigabytes": 1000,
"totalBackupGigabytesUsed": 0,
"maxTotalVolumes": 10,
"totalVolumesUsed": 0,
"totalBackupsUsed": 0,
"totalGigabytesUsed": 0
}
limits_obj = cinder_limits.Limits(None, {'absolute': expected_limits})
cinderclient.return_value.limits.get.return_value = limits_obj
actual_limits = self.api.get_absolute_limits(self.ctx)
self.assertDictEqual(expected_limits, actual_limits)
@mock.patch('nova.volume.cinder.cinderclient')
def test_get_snapshot(self, mock_cinderclient):
snapshot_id = 'snapshot_id'
mock_volume_snapshots = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(
volume_snapshots=mock_volume_snapshots)
self.api.get_snapshot(self.ctx, snapshot_id)
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volume_snapshots.get.assert_called_once_with(snapshot_id)
@mock.patch('nova.volume.cinder.cinderclient')
def test_get_snapshot_failed_notfound(self, mock_cinderclient):
mock_cinderclient.return_value.volume_snapshots.get.side_effect = (
cinder_exception.NotFound(404, '404'))
self.assertRaises(exception.SnapshotNotFound,
self.api.get_snapshot, self.ctx, 'snapshot_id')
@mock.patch('nova.volume.cinder.cinderclient')
def test_get_snapshot_connection_failed(self, mock_cinderclient):
mock_cinderclient.return_value.volume_snapshots.get.side_effect = (
cinder_exception.ConnectionError(''))
self.assertRaises(exception.CinderConnectionFailed,
self.api.get_snapshot, self.ctx, 'snapshot_id')
@mock.patch('nova.volume.cinder.cinderclient')
def test_get_all_snapshots(self, mock_cinderclient):
snapshot1 = FakeSnapshot('snapshot_id1', 'id1')
snapshot2 = FakeSnapshot('snapshot_id2', 'id2')
snapshot_list = [snapshot1, snapshot2]
mock_volume_snapshots = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(
volume_snapshots=mock_volume_snapshots)
mock_volume_snapshots.list.return_value = snapshot_list
snapshots = self.api.get_all_snapshots(self.ctx)
self.assertEqual(2, len(snapshots))
self.assertEqual(['snapshot_id1', 'snapshot_id2'],
[snap['id'] for snap in snapshots])
self.assertEqual(['id1', 'id2'],
[snap['volume_id'] for snap in snapshots])
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volume_snapshots.list.assert_called_once_with(detailed=True)
@mock.patch('nova.volume.cinder.cinderclient')
def test_create_snapshot(self, mock_cinderclient):
snapshot = FakeSnapshot('snapshot_id1', 'id1')
mock_volume_snapshots = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(
volume_snapshots=mock_volume_snapshots)
mock_volume_snapshots.create.return_value = snapshot
created_snapshot = self.api.create_snapshot(self.ctx,
'id1',
'name',
'description')
self.assertEqual('snapshot_id1', created_snapshot['id'])
self.assertEqual('id1', created_snapshot['volume_id'])
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volume_snapshots.create.assert_called_once_with('id1', False,
'name',
'description')
@mock.patch('nova.volume.cinder.cinderclient')
def test_create_force(self, mock_cinderclient):
snapshot = FakeSnapshot('snapshot_id1', 'id1')
mock_volume_snapshots = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(
volume_snapshots=mock_volume_snapshots)
mock_volume_snapshots.create.return_value = snapshot
created_snapshot = self.api.create_snapshot_force(self.ctx,
'id1',
'name',
'description')
self.assertEqual('snapshot_id1', created_snapshot['id'])
self.assertEqual('id1', created_snapshot['volume_id'])
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volume_snapshots.create.assert_called_once_with('id1', True,
'name',
'description')
@mock.patch('nova.volume.cinder.cinderclient')
def test_delete_snapshot(self, mock_cinderclient):
mock_volume_snapshots = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(
volume_snapshots=mock_volume_snapshots)
self.api.delete_snapshot(self.ctx, 'snapshot_id')
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volume_snapshots.delete.assert_called_once_with('snapshot_id')
@mock.patch('nova.volume.cinder.cinderclient')
def test_update_snapshot_status(self, mock_cinderclient):
mock_volume_snapshots = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(
volume_snapshots=mock_volume_snapshots)
self.api.update_snapshot_status(self.ctx, 'snapshot_id', 'error')
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volume_snapshots.update_snapshot_status.assert_called_once_with(
'snapshot_id', {'status': 'error', 'progress': '90%'})
@mock.patch('nova.volume.cinder.cinderclient')
def test_get_all_volume_types(self, mock_cinderclient):
volume_type1 = FakeVolumeType('lvm_1', 'volume_type_id1')
volume_type2 = FakeVolumeType('lvm_2', 'volume_type_id2')
volume_type_list = [volume_type1, volume_type2]
mock_volume_types = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(
volume_types=mock_volume_types)
mock_volume_types.list.return_value = volume_type_list
volume_types = self.api.get_all_volume_types(self.ctx)
self.assertEqual(2, len(volume_types))
self.assertEqual(['volume_type_id1', 'volume_type_id2'],
[vol_type['id'] for vol_type in volume_types])
self.assertEqual(['lvm_1', 'lvm_2'],
[vol_type['name'] for vol_type in volume_types])
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volume_types.list.assert_called_once_with()
@mock.patch('nova.volume.cinder.cinderclient')
def test_get_volume_encryption_metadata(self, mock_cinderclient):
mock_volumes = mock.MagicMock()
mock_cinderclient.return_value = mock.MagicMock(volumes=mock_volumes)
self.api.get_volume_encryption_metadata(self.ctx,
{'encryption_key_id':
'fake_key'})
mock_cinderclient.assert_called_once_with(self.ctx)
mock_volumes.get_encryption_metadata.assert_called_once_with(
{'encryption_key_id': 'fake_key'})
def test_translate_cinder_exception_no_error(self):
my_func = mock.Mock()
my_func.__name__ = 'my_func'
my_func.return_value = 'foo'
res = cinder.translate_cinder_exception(my_func)('fizzbuzz',
'bar', 'baz')
self.assertEqual('foo', res)
my_func.assert_called_once_with('fizzbuzz', 'bar', 'baz')
def test_translate_cinder_exception_cinder_connection_error(self):
self._do_translate_cinder_exception_test(
cinder_exception.ConnectionError,
exception.CinderConnectionFailed)
def test_translate_cinder_exception_keystone_connection_error(self):
self._do_translate_cinder_exception_test(
keystone_exception.ConnectionError,
exception.CinderConnectionFailed)
def test_translate_cinder_exception_cinder_bad_request(self):
self._do_translate_cinder_exception_test(
cinder_exception.BadRequest(400, '400'),
exception.InvalidInput)
def test_translate_cinder_exception_keystone_bad_request(self):
self._do_translate_cinder_exception_test(
keystone_exception.BadRequest,
exception.InvalidInput)
def test_translate_cinder_exception_cinder_forbidden(self):
self._do_translate_cinder_exception_test(
cinder_exception.Forbidden(403, '403'),
exception.Forbidden)
def test_translate_cinder_exception_keystone_forbidden(self):
self._do_translate_cinder_exception_test(
keystone_exception.Forbidden,
exception.Forbidden)
def test_translate_mixed_exception_over_limit(self):
self._do_translate_mixed_exception_test(
cinder_exception.OverLimit(''),
exception.OverQuota)
def test_translate_mixed_exception_volume_not_found(self):
self._do_translate_mixed_exception_test(
cinder_exception.NotFound(''),
exception.VolumeNotFound)
def test_translate_mixed_exception_keystone_not_found(self):
self._do_translate_mixed_exception_test(
keystone_exception.NotFound,
exception.VolumeNotFound)
def test_translate_create_exception_keystone_not_found(self):
self._do_translate_create_exception_test(
keystone_exception.NotFound,
exception.NotFound)
def test_translate_create_exception_volume_not_found(self):
self._do_translate_create_exception_test(
cinder_exception.NotFound('Volume type could not be found'),
exception.NotFound)
def _do_translate_cinder_exception_test(self, raised_exc, expected_exc):
self._do_translate_exception_test(raised_exc, expected_exc,
cinder.translate_cinder_exception)
def _do_translate_mixed_exception_test(self, raised_exc, expected_exc):
self._do_translate_exception_test(raised_exc, expected_exc,
cinder.translate_mixed_exceptions)
def _do_translate_create_exception_test(self, raised_exc, expected_exc):
self._do_translate_exception_test(raised_exc, expected_exc,
cinder.translate_create_exception)
def _do_translate_exception_test(self, raised_exc, expected_exc, wrapper):
my_func = mock.Mock()
my_func.__name__ = 'my_func'
my_func.side_effect = raised_exc
self.assertRaises(expected_exc, wrapper(my_func), 'foo', 'bar', 'baz')
class CinderClientTestCase(test.NoDBTestCase):
"""Used to test constructing a cinder client object at various versions."""
def setUp(self):
super(CinderClientTestCase, self).setUp()
cinder.reset_globals()
self.ctxt = context.RequestContext('fake-user', 'fake-project')
# Mock out the keystoneauth stuff.
self.mock_session = mock.Mock(autospec=session.Session)
patcher = mock.patch('keystoneauth1.loading.'
'load_session_from_conf_options',
return_value=self.mock_session)
patcher.start()
self.addCleanup(patcher.stop)
@mock.patch('cinderclient.client.get_volume_api_from_url',
return_value='3')
def test_create_v3_client_no_microversion(self, get_volume_api):
"""Tests that creating a v3 client, which is the default, and without
specifying a microversion will default to 3.0 as the version to use.
"""
client = cinder.cinderclient(self.ctxt)
self.assertEqual(cinder_api_versions.APIVersion('3.0'),
client.api_version)
get_volume_api.assert_called_once_with(
self.mock_session.get_endpoint.return_value)
@mock.patch('nova.volume.cinder._get_highest_client_server_version',
# Fake the case that cinder is really old.
return_value=cinder_api_versions.APIVersion('2.0'))
@mock.patch('cinderclient.client.get_volume_api_from_url',
return_value='3')
def test_create_v3_client_with_microversion_too_new(self,
get_volume_api,
get_highest_version):
"""Tests that creating a v3 client and requesting a microversion that
is either too new for the server (or client) to support raises an
exception.
"""
self.assertRaises(exception.CinderAPIVersionNotAvailable,
cinder.cinderclient, self.ctxt, microversion='3.44')
get_volume_api.assert_called_once_with(
self.mock_session.get_endpoint.return_value)
get_highest_version.assert_called_once_with(
self.ctxt, self.mock_session.get_endpoint.return_value)
@mock.patch('nova.volume.cinder._get_highest_client_server_version',
return_value=cinder_api_versions.APIVersion(
cinder_api_versions.MAX_VERSION))
@mock.patch('cinderclient.client.get_volume_api_from_url',
return_value='3')
def test_create_v3_client_with_microversion_available(self,
get_volume_api,
get_highest_version):
"""Tests that creating a v3 client and requesting a microversion that
is available in the server and supported by the client will result in
creating a Client object with the requested microversion.
"""
client = cinder.cinderclient(self.ctxt, microversion='3.44')
self.assertEqual(cinder_api_versions.APIVersion('3.44'),
client.api_version)
get_volume_api.assert_called_once_with(
self.mock_session.get_endpoint.return_value)
get_highest_version.assert_called_once_with(
self.ctxt, self.mock_session.get_endpoint.return_value)
@mock.patch('nova.volume.cinder._get_highest_client_server_version',
new_callable=mock.NonCallableMock) # asserts not called
@mock.patch('cinderclient.client.get_volume_api_from_url',
return_value='3')
def test_create_v3_client_with_microversion_skip_version_check(
self, get_volume_api, get_highest_version):
"""Tests that creating a v3 client and requesting a microversion
but asking to skip the version discovery check is honored.
"""
client = cinder.cinderclient(self.ctxt, microversion='3.44',
skip_version_check=True)
self.assertEqual(cinder_api_versions.APIVersion('3.44'),
client.api_version)
get_volume_api.assert_called_once_with(
self.mock_session.get_endpoint.return_value)
@mock.patch('nova.volume.cinder.LOG.error')
@mock.patch.object(ks_loading, 'load_auth_from_conf_options')
def test_load_auth_plugin_failed(self, mock_load_from_conf, mock_log_err):
mock_load_from_conf.return_value = None
self.assertRaises(cinder_exception.Unauthorized,
cinder._load_auth_plugin, CONF)
mock_log_err.assert_called()
self.assertIn('The [cinder] section of your nova configuration file',
mock_log_err.call_args[0][0])
@mock.patch('nova.volume.cinder._ADMIN_AUTH')
def test_admin_context_without_token(self,
mock_admin_auth):
mock_admin_auth.return_value = '_FAKE_ADMIN_AUTH'
admin_ctx = context.get_admin_context()
params = cinder._get_cinderclient_parameters(admin_ctx)
self.assertEqual(params[0], mock_admin_auth)
|
|
import bpy, mathutils
from macouno import mesh_extras
# Select all faces (or deselect)
def all(invert=False):
for f in bpy.context.active_object.data.faces:
f.select = True
return
# Select all faces (or deselect)
def none():
for f in bpy.context.active_object.data.faces:
f.select = False
return
# Faces connected to the current selection
def connected(extend=False):
mesh = bpy.context.active_object.data
if len(mesh.faces):
# Get a list of all vertices in selected faces
vList = []
for f in mesh.faces:
if f.select:
vList.extend(f.vertices)
if len(vList):
# For every deselected face, see if it shares a vert with a selected face
selFaces = []
for f in mesh.faces:
if not f.select:
for v in f.vertices:
if v in vList:
selFaces.append(f)
break
if len(selFaces):
# Select only the connected faces
if not extend:
for f in mesh.faces:
if f in selFaces:
f.select = True
else:
f.select = False
# Add the connected faces to the current selection
else:
for f in selFaces:
f.select=True
return
# Innermost faces
def innermost(invert=False):
mesh = bpy.context.active_object.data
# No use continueing if there's no edges
if len(mesh.faces):
# Get a list with the selected items
oItems = mesh_extras.get_selected('faces',False)
oLen = len(oItems)
# No use continueing if there's no selected items and deselected items
if oLen and mesh_extras.has_selected('faces',True):
nItems = oItems
nLen = oLen
while True:
cLen = nLen
cItems = nItems
# Deselect the outermost items
outermost(True)
nItems = mesh_extras.get_selected('faces',False)
nLen = len(nItems)
if nLen >= cLen or not nLen:
break
# If we have a list with items, and it's smaller than the original
if cLen and cLen < oLen:
for item in oItems:
if not invert and item in cItems:
item.select = True
elif invert and not item in cItems:
item.select = True
return
# Select the outermost items in the current selection
# mesh=mesh data, invert = True or False
def outermost(invert=False):
mesh = bpy.context.active_object.data
if len(mesh.faces):
# Get a list of all vertices in deselected faces
vList = []
for f in mesh.faces:
if not f.select:
vList.extend(f.vertices)
if len(vList):
# For every deselected face, see if it shares a vert with a selected face
selFaces = []
for f in mesh.faces:
if f.select:
for v in f.vertices:
if v in vList:
selFaces.append(f)
break
if len(selFaces):
# Select only the connected faces
if not invert:
for f in mesh.faces:
if f in selFaces:
f.select = True
else:
f.select = False
# Add the connected faces to the current selection
else:
for f in selFaces:
f.select=False
return
# Select checkered faces
def checkered(seed=0, extend=False):
import random
random.seed(str(seed))
mesh = bpy.context.active_object.data
# Two lists of faces selFaces to be selected unFaces to be deselected
selFaces = []
unFaces = list(mesh.faces)
# Put 1 face in the list of selected faces (and remove from unselected faces)
f = random.choice(unFaces)
selFaces.append(f)
unFaces.remove(f)
preSel = len(selFaces)
# make sure there's faces before we continue!
if not preSel:
return
postSel = preSel + 1
start = True
# As long as we keep selecting more... we have to continue (to go over all the mesh)
while postSel > preSel:
if start:
start = False
else:
preSel = postSel
# Add the faces at the corners of the current selection to this one
selFaces, unFaces = addCornered(selFaces, unFaces)
postSel = len(selFaces)
# Select the faces we found!
selectFaces(selFaces, extend)
return
def addCornered(selFaces, unFaces):
# Loop through the unselected faces to find out if they should be selected
for f in unFaces:
verts = f.vertices
sel = 0
# Check against the selected faces
for fs in selFaces:
# Find the verts shared between these faces
intersection = [v for v in verts if v in fs.vertices]
intLen = len(intersection)
# If there's just the one intersection it's a corner connection
if intLen == 1 and sel == 0:
sel = 1
# If there's more than one... it's sharing an edge
elif intLen > 1:
sel = 2
break
# If it's just a corner
if sel == 1:
selFaces.append(f)
unFaces.remove(f)
return selFaces, unFaces
# Select all the faces in a certain vertex group
def in_group(group,extend=False):
groupId = group.index
mesh = bpy.context.active_object.data
selFaces = []
# Find all the faces with all verts (3 or more) in this group
for f in mesh.faces:
grCnt = 0
for v in f.vertices:
vert = mesh.vertices[v]
try:
for g in vert.groups:
if g.group == groupId:
grCnt += 1
break
except:
pass
if grCnt == len(f.vertices):
selFaces.append(f)
selectFaces(selFaces, extend)
return
# Go select a list of faces (if they need to be, depending on settings and situations)
def selectFaces(selFaces, extend=False):
mesh = bpy.context.active_object.data
hasSelected = mesh_extras.contains_selected_item(mesh.faces)
for f in mesh.faces:
# We extend and have a selection, so we just need to select extra stuff
# Selecting what is already selected does no harm
if extend and hasSelected:
if f in selFaces:
f.select = True
# If we already have a selection and we don't extend.. we just deselect what is selected
elif hasSelected:
if not f in selFaces:
f.select = False
# If we have no selection yet.. we only select what's in the list
else:
if f in selFaces:
f.select = True
return
# Select by direction
def by_direction(direction, divergence, extend=False):
mesh = bpy.context.active_object.data
direction = mathutils.Vector(direction)
hasSelected = mesh_extras.contains_selected_item(mesh.faces)
# Make sure there's an actual directions
if direction.length:
# Loop through all the given faces
for f in mesh.faces:
isSelected = f.select
s = selectCheck(isSelected, hasSelected, extend)
d = deselectCheck(isSelected, hasSelected, extend)
angle = direction.angle(f.normal)
if s and angle <= divergence:
f.select = True
elif d and angle > divergence:
f.select = False
return
# Do a semi random select based on a number
def liberal(key='', extend=False):
from macouno import liberty
lib = liberty.liberty('bool', key)
mesh = bpy.context.active_object.data
hasSelected = mesh_extras.contains_selected_item(mesh.faces)
# Loop through all the given faces
for f in mesh.faces:
s = selectCheck(f.select, hasSelected, extend)
d = deselectCheck(f.select, hasSelected, extend)
# Check if the faces match any of the directions
if s and lib.Choose('bool'):
f.select = True
if d and not lib.Choose('bool'):
f.select = False
return
# Make sure there are less faces selected than the limit
def limit(limit=1, key=''):
from macouno import liberty
lib = liberty.liberty('string', key)
nuFaces = lib.makeDict(mesh_extras.get_selected_faces())
nuLen = len(nuFaces)
while nuLen > limit:
deFace = lib.Choose('select',nuFaces)
deFace.select = False
nuFaces = lib.makeDict(mesh_extras.get_selected_faces())
nuLen = len(nuFaces)
return
# See if the current item should be selected or not
def selectCheck(isSelected, hasSelected, extend):
# If we are extending or nothing is selected we want to select
if extend or not hasSelected:
return True
return False
# See if the current item should be deselected or not
def deselectCheck(isSelected, hasSelected, extend):
# If something is selected and we're not extending we want to deselect
if hasSelected and not extend:
return True
return False
|
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from nose.plugins.skip import SkipTest
from nose.tools import assert_true, assert_equal, assert_false
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import grant_access
from hadoop import pseudo_hdfs4
from hadoop.pseudo_hdfs4 import is_live_cluster
from liboozie.oozie_api_tests import OozieServerProvider
from oozie.tests import OozieBase
from pig.models import create_or_update_script, PigScript
from pig.api import OozieApi, get
class TestPigBase(object):
SCRIPT_ATTRS = {
'id': 1000,
'name': 'Test',
'script': 'A = LOAD "$data"; STORE A INTO "$output";',
'parameters': [],
'resources': [],
'hadoopProperties': []
}
def setUp(self):
self.c = make_logged_in_client(is_superuser=False)
grant_access("test", "test", "pig")
self.user = User.objects.get(username='test')
def create_script(self):
return create_script(self.user)
def create_script(user, xattrs=None):
attrs = {'user': user}
attrs.update(TestPigBase.SCRIPT_ATTRS)
if xattrs is not None:
attrs.update(xattrs)
return create_or_update_script(**attrs)
class TestMock(TestPigBase):
def test_create_script(self):
pig_script = self.create_script()
assert_equal('Test', pig_script.dict['name'])
def test_check_hcatalogs_sharelib(self):
api = get(None, None, self.user)
pig_script = self.create_script()
# Regular
wf = api._create_workflow(pig_script, '[]')
assert_false({'name': u'oozie.action.sharelib.for.pig', 'value': u'pig,hcatalog'} in wf.find_all_parameters(), wf.find_all_parameters())
# With HCat
pig_script.update_from_dict({
'script':"""
a = LOAD 'sample_07' USING org.apache.hcatalog.pig.HCatLoader();
dump a;
"""})
pig_script.save()
wf = api._create_workflow(pig_script, '[]')
assert_true({'name': u'oozie.action.sharelib.for.pig', 'value': u'pig,hcatalog'} in wf.find_all_parameters(), wf.find_all_parameters())
def test_editor_view(self):
response = self.c.get(reverse('pig:app'))
assert_true('Unsaved script' in response.content)
def test_save(self):
attrs = {'user': self.user,}
attrs.update(TestPigBase.SCRIPT_ATTRS)
attrs['parameters'] = json.dumps(TestPigBase.SCRIPT_ATTRS['parameters'])
attrs['resources'] = json.dumps(TestPigBase.SCRIPT_ATTRS['resources'])
attrs['hadoopProperties'] = json.dumps(TestPigBase.SCRIPT_ATTRS['hadoopProperties'])
# Save
self.c.post(reverse('pig:save'), data=attrs, follow=True)
# Update
self.c.post(reverse('pig:save'), data=attrs, follow=True)
def parse_oozie_logs(self):
api = get(None, None, self.user)
assert_equal(
'''Run pig script using PigRunner.run() for Pig version 0.8+
Apache Pig version 0.11.0-cdh4.4.0-SNAPSHOT (rexported)
compiled Jun 30 2013, 03:40:22
Run pig script using PigRunner.run() for Pig version 0.8+
2013-10-09 17:30:39,709 [main] INFO org.apache.pig.Main - Apache Pig version 0.11.0-cdh4.4.0-SNAPSHOT (rexported) compiled Jun 30 2013, 03:40:22
2013-10-09 17:30:39,709 [main] INFO org.apache.pig.Main - Apache Pig version 0.11.0-cdh4.4.0-SNAPSHOT (rexported) compiled Jun 30 2013, 03:40:22
2013-10-09 17:30:39,710 [main] INFO org.apache.pig.Main - Logging error messages to: /var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/pig-job_1381360805876_0001.log
2013-10-09 17:30:39,710 [main] INFO org.apache.pig.Main - Logging error messages to: /var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/pig-job_1381360805876_0001.log
2013-10-09 17:30:39,739 [main] WARN org.apache.hadoop.conf.Configuration - dfs.df.interval is deprecated. Instead, use fs.df.interval
2013-10-09 17:30:39,739 [main] WARN org.apache.hadoop.conf.Configuration - mapred.task.tracker.http.address is deprecated. Instead, use mapreduce.tasktracker.http.address
2013-10-09 17:30:39,833 [main] INFO org.apache.pig.backend.hadoop.executionengine.HExecutionEngine - Connecting to map-reduce job tracker at: localhost:8032
hdfs://localhost:8020/user/romain/.Trash <dir>
hdfs://localhost:8020/user/romain/examples <dir>
hdfs://localhost:8020/user/romain/tweets <dir>
hdfs://localhost:8020/user/romain/wordcount.jar<r 1> 3165
hdfs://localhost:8020/user/romain/words <dir>
hdfs://localhost:8020/user/romain/yelp <dir>''', api._match_logs({'logs': [None, OOZIE_LOGS]}))
class TestWithHadoop(OozieBase):
def setUp(self):
super(TestWithHadoop, self).setUp()
# FIXME (HUE-2562): The tests unfortunately require superuser at the
# moment, but should be rewritten to not need it.
self.c = make_logged_in_client(is_superuser=True)
grant_access("test", "test", "pig")
self.user = User.objects.get(username='test')
self.c.post(reverse('pig:install_examples'))
self.cluster = pseudo_hdfs4.shared_cluster()
self.api = OozieApi(self.cluster.fs, self.cluster.jt, self.user)
def test_create_workflow(self):
xattrs = {
'parameters': [
{'name': 'output', 'value': self.cluster.fs_prefix + '/test_pig_script_workflow'},
{'name': '-param', 'value': 'input=/data'}, # Alternative way for params
{'name': '-optimizer_off', 'value': 'SplitFilter'},
{'name': '-v', 'value': ''},
],
'resources': [
{'type': 'file', 'value': '/tmp/file'},
{'type': 'archive', 'value': '/tmp/file.zip'},
],
'hadoopProperties': [
{'name': 'mapred.map.tasks.speculative.execution', 'value': 'false'},
{'name': 'mapred.job.queue', 'value': 'fast'},
]
}
pig_script = create_script(self.user, xattrs)
output_path = self.cluster.fs_prefix + '/test_pig_script_2'
params = json.dumps([
{'name': 'output', 'value': output_path},
])
workflow = self.api._create_workflow(pig_script, params)
pig_action = workflow.start.get_child('to').get_full_node()
assert_equal([
{u'type': u'argument', u'value': u'-param'}, {u'type': u'argument', u'value': u'output=%s' % output_path},
{u'type': u'argument', u'value': u'-param'}, {u'type': u'argument', u'value': u'input=/data'},
{u'type': u'argument', u'value': u'-optimizer_off'}, {u'type': u'argument', u'value': u'SplitFilter'},
{u'type': u'argument', u'value': u'-v'},
], pig_action.get_params())
assert_equal([
{u'name': u'mapred.map.tasks.speculative.execution', u'value': u'false'},
{u'name': u'mapred.job.queue', u'value': u'fast'},
], pig_action.get_properties())
assert_equal(['/tmp/file'], pig_action.get_files())
assert_equal([
{u'dummy': u'', u'name': u'/tmp/file.zip'},
], pig_action.get_archives())
def wait_until_completion(self, pig_script_id, timeout=300.0, step=5, expected_status='SUCCEEDED'):
script = PigScript.objects.get(id=pig_script_id)
job_id = script.dict['job_id']
response = self.c.get(reverse('pig:watch', args=[job_id]))
response = json.loads(response.content)
start = time.time()
while response['workflow']['status'] in ['PREP', 'RUNNING'] and time.time() - start < timeout:
time.sleep(step)
response = self.c.get(reverse('pig:watch', args=[job_id]))
response = json.loads(response.content)
logs = OozieServerProvider.oozie.get_job_log(job_id)
if response['workflow']['status'] != expected_status:
msg = "[%d] %s took more than %d to complete or %s: %s" % (time.time(), job_id, timeout, response['workflow']['status'], logs)
self.api.stop(job_id)
raise Exception(msg)
return pig_script_id
def test_submit(self):
if is_live_cluster():
raise SkipTest('HUE-2909: Skipping because test is not reentrant')
script = PigScript.objects.get(id=1100713)
script_dict = script.dict
post_data = {
'id': script.id,
'name': script_dict['name'],
'script': script_dict['script'],
'user': script.owner,
'parameters': json.dumps(script_dict['parameters']),
'resources': json.dumps(script_dict['resources']),
'hadoopProperties': json.dumps(script_dict['hadoopProperties']),
'submissionVariables': json.dumps([{"name": "output", "value": self.cluster.fs_prefix + '/test_pig_script_submit'}]),
}
response = self.c.post(reverse('pig:run'), data=post_data, follow=True)
job_id = json.loads(response.content)['id']
self.wait_until_completion(job_id)
def test_stop(self):
script = PigScript.objects.get(id=1100713)
script_dict = script.dict
post_data = {
'id': script.id,
'name': script_dict['name'],
'script': script_dict['script'],
'user': script.owner,
'parameters': json.dumps(script_dict['parameters']),
'resources': json.dumps(script_dict['resources']),
'hadoopProperties': json.dumps(script_dict['hadoopProperties']),
'submissionVariables': json.dumps([{"name": "output", "value": self.cluster.fs_prefix + '/test_pig_script_stop'}]),
}
submit_response = self.c.post(reverse('pig:run'), data=post_data, follow=True)
script = PigScript.objects.get(id=json.loads(submit_response.content)['id'])
assert_true(script.dict['job_id'], script.dict)
self.c.post(reverse('pig:stop'), data={'id': script.id}, follow=True)
self.wait_until_completion(json.loads(submit_response.content)['id'], expected_status='KILLED')
OOZIE_LOGS =""" Log Type: stdout
Log Length: 117627
Oozie Launcher starts
Heart beat
Starting the execution of prepare actions
Completed the execution of prepare actions successfully
Files in current dir:/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/.
======================
File: commons-cli-1.2.jar
File: antlr-runtime-3.4.jar
File: stringtemplate-3.2.1.jar
File: script.pig
File: jyson-1.0.2.jar
Oozie Java/Map-Reduce/Pig action launcher-job configuration
=================================================================
Workflow job id : 0000000-131009162028638-oozie-oozi-W
Workflow action id: 0000000-131009162028638-oozie-oozi-W@pig
Classpath :
------------------------
/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002
/etc/hadoop/conf
/usr/lib/hadoop/hadoop-nfs-2.1.0-cdh5.0.0-SNAPSHOT.jar
/usr/lib/hadoop/hadoop-common-2.1.0-cdh5.0.0-SNAPSHOT.jar
/usr/lib/hadoop/hadoop-auth-2.1.0-cdh5.0.0-SNAPSHOT.jar
/usr/lib/hadoop/hadoop-common.jar
/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/jyson-1.0.2.jar
------------------------
Main class : org.apache.oozie.action.hadoop.PigMain
Maximum output : 2048
Arguments :
Java System Properties:
------------------------
#
#Wed Oct 09 17:30:39 PDT 2013
java.runtime.name=Java(TM) SE Runtime Environment
awt.toolkit=sun.awt.X11.XToolkit
java.vm.info=mixed mode
java.version=1.7.0_40
java.ext.dirs=/usr/lib/jvm/java-7-oracle/jre/lib/ext\:/usr/java/packages/lib/ext
sun.boot.class.path=/usr/lib/jvm/java-7-oracle/jre/lib/resources.jar\:/usr/lib/jvm/java-7-oracle/jre/lib/rt.jar\:/usr/lib/jvm/java-7-oracle/jre/lib/sunrsasign.jar\:/usr/lib/jvm/java-7-oracle/jre/lib/jsse.jar\:/usr/lib/jvm/java-7-oracle/jre/lib/jce.jar\:/usr/lib/jvm/java-7-oracle/jre/lib/charsets.jar\:/usr/lib/jvm/java-7-oracle/jre/lib/jfr.jar\:/usr/lib/jvm/java-7-oracle/jre/classes
java.vendor=Oracle Corporation
file.separator=/
oozie.launcher.job.id=job_1381360805876_0001
oozie.action.stats.properties=/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/stats.properties
java.vendor.url.bug=http\://bugreport.sun.com/bugreport/
sun.io.unicode.encoding=UnicodeLittle
sun.cpu.endian=little
sun.cpu.isalist=
------------------------
=================================================================
>>> Invoking Main class now >>>
Oozie Pig action configuration
=================================================================
------------------------
Setting env property for mapreduce.job.credentials.binary to:/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/container_tokens
------------------------
pig.properties:
--------------------
mapreduce.job.ubertask.enable : false
yarn.resourcemanager.max-completed-applications : 10000
yarn.resourcemanager.delayed.delegation-token.removal-interval-ms : 30000
yarn.nodemanager.delete.debug-delay-sec : 0
hadoop.ssl.require.client.cert : false
dfs.datanode.max.transfer.threads : 4096
--------------------
Pig script [script.pig] content:
------------------------
ls
------------------------
Current (local) dir = /var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002
Pig command arguments :
-file
script.pig
-log4jconf
/var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/piglog4j.properties
-logfile
pig-job_1381360805876_0001.log
=================================================================
>>> Invoking Pig command line now >>>
Run pig script using PigRunner.run() for Pig version 0.8+
Apache Pig version 0.11.0-cdh4.4.0-SNAPSHOT (rexported)
compiled Jun 30 2013, 03:40:22
Run pig script using PigRunner.run() for Pig version 0.8+
2013-10-09 17:30:39,709 [main] INFO org.apache.pig.Main - Apache Pig version 0.11.0-cdh4.4.0-SNAPSHOT (rexported) compiled Jun 30 2013, 03:40:22
2013-10-09 17:30:39,709 [main] INFO org.apache.pig.Main - Apache Pig version 0.11.0-cdh4.4.0-SNAPSHOT (rexported) compiled Jun 30 2013, 03:40:22
2013-10-09 17:30:39,710 [main] INFO org.apache.pig.Main - Logging error messages to: /var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/pig-job_1381360805876_0001.log
2013-10-09 17:30:39,710 [main] INFO org.apache.pig.Main - Logging error messages to: /var/lib/hadoop-yarn/cache/yarn/nm-local-dir/usercache/romain/appcache/application_1381360805876_0001/container_1381360805876_0001_01_000002/pig-job_1381360805876_0001.log
2013-10-09 17:30:39,739 [main] WARN org.apache.hadoop.conf.Configuration - dfs.df.interval is deprecated. Instead, use fs.df.interval
2013-10-09 17:30:39,739 [main] WARN org.apache.hadoop.conf.Configuration - mapred.task.tracker.http.address is deprecated. Instead, use mapreduce.tasktracker.http.address
2013-10-09 17:30:39,833 [main] INFO org.apache.pig.backend.hadoop.executionengine.HExecutionEngine - Connecting to map-reduce job tracker at: localhost:8032
hdfs://localhost:8020/user/romain/.Trash <dir>
hdfs://localhost:8020/user/romain/examples <dir>
hdfs://localhost:8020/user/romain/tweets <dir>
hdfs://localhost:8020/user/romain/wordcount.jar<r 1> 3165
hdfs://localhost:8020/user/romain/words <dir>
hdfs://localhost:8020/user/romain/yelp <dir>
<<< Invocation of Pig command completed <<<
Hadoop Job IDs executed by Pig:
<<< Invocation of Main class completed <<<
Oozie Launcher ends
2013-10-09 17:30:40,009 [main] INFO org.apache.hadoop.mapred.Task - Task:attempt_1381360805876_0001_m_000000_0 is done. And is in the process of committing
2013-10-09 17:30:40,087 [main] INFO org.apache.hadoop.mapred.Task - Task attempt_1381360805876_0001_m_000000_0 is allowed to commit now
2013-10-09 17:30:40,094 [main] INFO org.apache.hadoop.mapreduce.lib.output.FileOutputCommitter - Saved output of task 'attempt_1381360805876_0001_m_000000_0' to hdfs://localhost:8020/user/romain/oozie-oozi/0000000-131009162028638-oozie-oozi-W/pig--pig/output/_temporary/1/task_1381360805876_0001_m_000000
2013-10-09 17:30:40,153 [main] INFO org.apache.hadoop.mapred.Task - Task 'attempt_1381360805876_0001_m_000000_0' done.
2013-10-09 17:30:40,254 [main] INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl - Stopping MapTask metrics system...
2013-10-09 17:30:40,257 [main] INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl - MapTask metrics system stopped.
2013-10-09 17:30:40,257 [main] INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl - MapTask metrics system shutdown complete.
"""
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Xcode supports build variable substitutions and CPP; sadly, that doesn't work
# because:
#
# 1. Xcode wants to do the Info.plist work before it runs any build phases,
# this means if we were to generate a .h file for INFOPLIST_PREFIX_HEADER
# we'd have to put it in another target so it runs in time.
# 2. Xcode also doesn't check to see if the header being used as a prefix for
# the Info.plist has changed. So even if we updated it, it's only looking
# at the modtime of the info.plist to see if that's changed.
#
# So, we work around all of this by making a script build phase that will run
# during the app build, and simply update the info.plist in place. This way
# by the time the app target is done, the info.plist is correct.
#
from __future__ import print_function
import optparse
import os
import plistlib
import re
import subprocess
import sys
import tempfile
TOP = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
def _ConvertPlist(source_plist, output_plist, fmt):
"""Convert |source_plist| to |fmt| and save as |output_plist|."""
assert sys.version_info.major == 2, "Use plistlib directly in Python 3"
return subprocess.call(
['plutil', '-convert', fmt, '-o', output_plist, source_plist])
def _GetOutput(args):
"""Runs a subprocess and waits for termination. Returns (stdout, returncode)
of the process. stderr is attached to the parent."""
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
stdout, _ = proc.communicate()
return stdout.decode('UTF-8'), proc.returncode
def _RemoveKeys(plist, *keys):
"""Removes a varargs of keys from the plist."""
for key in keys:
try:
del plist[key]
except KeyError:
pass
def _ApplyVersionOverrides(version, keys, overrides, separator='.'):
"""Applies version overrides.
Given a |version| string as "a.b.c.d" (assuming a default separator) with
version components named by |keys| then overrides any value that is present
in |overrides|.
>>> _ApplyVersionOverrides('a.b', ['major', 'minor'], {'minor': 'd'})
'a.d'
"""
if not overrides:
return version
version_values = version.split(separator)
for i, (key, value) in enumerate(zip(keys, version_values)):
if key in overrides:
version_values[i] = overrides[key]
return separator.join(version_values)
def _GetVersion(version_format, values, overrides=None):
"""Generates a version number according to |version_format| using the values
from |values| or |overrides| if given."""
result = version_format
for key in values:
if overrides and key in overrides:
value = overrides[key]
else:
value = values[key]
result = result.replace('@%s@' % key, value)
return result
def _AddVersionKeys(plist, version_format_for_key, version=None,
overrides=None, vivaldi_build=None):
"""Adds the product version number into the plist. Returns True on success and
False on error. The error will be printed to stderr."""
if not version:
# Pull in the Chrome version number.
VERSION_TOOL = os.path.join(TOP, 'build/util/version.py')
VERSION_FILE = os.path.join(TOP, 'chrome/VERSION')
VIVALDI_VERSION_FILE = os.path.join(TOP, '../VIVALDI_VERSION')
(stdout, retval) = _GetOutput([
VERSION_TOOL, '-f', VERSION_FILE,
'-f', VIVALDI_VERSION_FILE,
'-e', 'VIVALDI_BUILD='+(vivaldi_build or '1'),
'-t', '@VIVALDI_MAJOR@.@VIVALDI_MINOR@.@VIVALDI_NIGHTLY@.@VIVALDI_BUILD@'
])
# If the command finished with a non-zero return code, then report the
# error up.
if retval != 0:
return False
version = stdout.strip()
# Parse the given version number, that should be in MAJOR.MINOR.BUILD.PATCH
# format (where each value is a number). Note that str.isdigit() returns
# True if the string is composed only of digits (and thus match \d+ regexp).
groups = version.split('.')
if len(groups) != 4 or not all(element.isdigit() for element in groups):
print('Invalid version string specified: "%s"' % version, file=sys.stderr)
return False
values = dict(zip(('MAJOR', 'MINOR', 'BUILD', 'PATCH'), groups))
for key in version_format_for_key:
plist[key] = _GetVersion(version_format_for_key[key], values, overrides)
# Return with no error.
return True
def _DoSCMKeys(plist, add_keys):
"""Adds the SCM information, visible in about:version, to property list. If
|add_keys| is True, it will insert the keys, otherwise it will remove them."""
scm_revision = None
if add_keys:
# Pull in the Chrome revision number.
VERSION_TOOL = os.path.join(TOP, 'build/util/version.py')
LASTCHANGE_FILE = os.path.join(TOP, 'build/util/LASTCHANGE')
(stdout, retval) = _GetOutput(
[VERSION_TOOL, '-f', LASTCHANGE_FILE, '-t', '@LASTCHANGE@'])
if retval:
return False
scm_revision = stdout.rstrip()
# See if the operation failed.
_RemoveKeys(plist, 'SCMRevision')
if scm_revision != None:
plist['SCMRevision'] = scm_revision
elif add_keys:
print('Could not determine SCM revision. This may be OK.', file=sys.stderr)
return True
def _AddBreakpadKeys(plist, branding, platform, staging):
"""Adds the Breakpad keys. This must be called AFTER _AddVersionKeys() and
also requires the |branding| argument."""
plist['BreakpadReportInterval'] = '3600' # Deliberately a string.
plist['BreakpadProduct'] = '%s_%s' % (branding, platform)
plist['BreakpadProductDisplay'] = branding
if staging:
plist['BreakpadURL'] = 'https://clients2.google.com/cr/staging_report'
else:
plist['BreakpadURL'] = 'https://clients2.google.com/cr/report'
# These are both deliberately strings and not boolean.
plist['BreakpadSendAndExit'] = 'YES'
plist['BreakpadSkipConfirm'] = 'YES'
def _RemoveBreakpadKeys(plist):
"""Removes any set Breakpad keys."""
_RemoveKeys(plist, 'BreakpadURL', 'BreakpadReportInterval', 'BreakpadProduct',
'BreakpadProductDisplay', 'BreakpadVersion',
'BreakpadSendAndExit', 'BreakpadSkipConfirm')
def _TagSuffixes():
# Keep this list sorted in the order that tag suffix components are to
# appear in a tag value. That is to say, it should be sorted per ASCII.
components = ('full', )
assert tuple(sorted(components)) == components
components_len = len(components)
combinations = 1 << components_len
tag_suffixes = []
for combination in range(0, combinations):
tag_suffix = ''
for component_index in range(0, components_len):
if combination & (1 << component_index):
tag_suffix += '-' + components[component_index]
tag_suffixes.append(tag_suffix)
return tag_suffixes
def _AddKeystoneKeys(plist, bundle_identifier, base_tag):
"""Adds the Keystone keys. This must be called AFTER _AddVersionKeys() and
also requires the |bundle_identifier| argument (com.example.product)."""
plist['KSVersion'] = plist['CFBundleShortVersionString']
plist['KSProductID'] = bundle_identifier
plist['KSUpdateURL'] = 'https://tools.google.com/service/update2'
_RemoveKeys(plist, 'KSChannelID')
if base_tag != '':
plist['KSChannelID'] = base_tag
for tag_suffix in _TagSuffixes():
if tag_suffix:
plist['KSChannelID' + tag_suffix] = base_tag + tag_suffix
def _RemoveKeystoneKeys(plist):
"""Removes any set Keystone keys."""
_RemoveKeys(plist, 'KSVersion', 'KSProductID', 'KSUpdateURL')
tag_keys = ['KSChannelID']
for tag_suffix in _TagSuffixes():
tag_keys.append('KSChannelID' + tag_suffix)
_RemoveKeys(plist, *tag_keys)
def _AddGTMKeys(plist, platform):
"""Adds the GTM metadata keys. This must be called AFTER _AddVersionKeys()."""
plist['GTMUserAgentID'] = plist['CFBundleName']
if platform == 'ios':
plist['GTMUserAgentVersion'] = plist['CFBundleVersion']
else:
plist['GTMUserAgentVersion'] = plist['CFBundleShortVersionString']
def _RemoveGTMKeys(plist):
"""Removes any set GTM metadata keys."""
_RemoveKeys(plist, 'GTMUserAgentID', 'GTMUserAgentVersion')
def _AddSparkleKeys(plist, vivaldi_release_kind):
"""Adds the Sparkle keys."""
plist['SUScheduledCheckInterval'] = 86400
plist['SUEnableAutomaticChecks'] = 'YES'
plist['SUAllowsAutomaticUpdates'] = 'YES'
if vivaldi_release_kind == 'vivaldi_final':
plist['SUFeedURL'] = 'https://update.vivaldi.com/update/1.0/public/mac/appcast.xml'
elif vivaldi_release_kind == 'vivaldi_snapshot':
plist['SUFeedURL'] = 'https://update.vivaldi.com/update/1.0/snapshot/mac/appcast.xml'
else: #vivaldi_sopranos
plist['SUFeedURL'] = 'https://update.vivaldi.com/update/1.0/sopranos_new/mac/appcast.xml'
def _RemoveSparkleKeys(plist):
"""Removes any set Sparkle keys."""
_RemoveKeys(plist,
'SUScheduledCheckInterval',
'SUEnableAutomaticChecks',
'SUAllowsAutomaticUpdates',
'SUFeedURL')
def Main(argv):
parser = optparse.OptionParser('%prog [options]')
parser.add_option('--plist',
dest='plist_path',
action='store',
type='string',
default=None,
help='The path of the plist to tweak.')
parser.add_option('--output', dest='plist_output', action='store',
type='string', default=None, help='If specified, the path to output ' + \
'the tweaked plist, rather than overwriting the input.')
parser.add_option('--breakpad',
dest='use_breakpad',
action='store',
type='int',
default=False,
help='Enable Breakpad [1 or 0]')
parser.add_option(
'--breakpad_staging',
dest='use_breakpad_staging',
action='store_true',
default=False,
help='Use staging breakpad to upload reports. Ignored if --breakpad=0.')
parser.add_option('--keystone',
dest='use_keystone',
action='store',
type='int',
default=False,
help='Enable Keystone [1 or 0]')
parser.add_option('--keystone-base-tag',
default='',
help='Base Keystone tag to set')
parser.add_option('--scm',
dest='add_scm_info',
action='store',
type='int',
default=True,
help='Add SCM metadata [1 or 0]')
parser.add_option('--branding',
dest='branding',
action='store',
type='string',
default=None,
help='The branding of the binary')
parser.add_option('--bundle_id',
dest='bundle_identifier',
action='store',
type='string',
default=None,
help='The bundle id of the binary')
parser.add_option('--platform',
choices=('ios', 'mac'),
default='mac',
help='The target platform of the bundle')
parser.add_option('--add-gtm-metadata',
dest='add_gtm_info',
action='store',
type='int',
default=False,
help='Add GTM metadata [1 or 0]')
parser.add_option(
'--version-overrides',
action='append',
help='Key-value pair to override specific component of version '
'like key=value (can be passed multiple time to configure '
'more than one override)')
parser.add_option('--format',
choices=('binary1', 'xml1'),
default='xml1',
help='Format to use when writing property list '
'(default: %(default)s)')
parser.add_option('--version',
dest='version',
action='store',
type='string',
default=None,
help='The version string [major.minor.build.patch]')
parser.add_option('--vivaldi-build', dest='vivaldi_build', action='store', type='string',
default=None, help='The build number string')
parser.add_option('--vivaldi-release-kind', dest='vivaldi_release_kind', action='store', type='string',
default=None, help='The type of Vivaldi build')
parser.add_option('--sparkle', dest='use_sparkle', action='store',
type='int', default=False, help='Enable Sparkle [1 or 0]')
(options, args) = parser.parse_args(argv)
if len(args) > 0:
print(parser.get_usage(), file=sys.stderr)
return 1
if not options.plist_path:
print('No --plist specified.', file=sys.stderr)
return 1
# Read the plist into its parsed format. Convert the file to 'xml1' as
# plistlib only supports that format in Python 2.7.
with tempfile.NamedTemporaryFile() as temp_info_plist:
if sys.version_info.major == 2:
retcode = _ConvertPlist(options.plist_path, temp_info_plist.name, 'xml1')
if retcode != 0:
return retcode
plist = plistlib.readPlist(temp_info_plist.name)
else:
with open(options.plist_path, 'rb') as f:
plist = plistlib.load(f)
# Convert overrides.
overrides = {}
if options.version_overrides:
for pair in options.version_overrides:
if not '=' in pair:
print('Invalid value for --version-overrides:', pair, file=sys.stderr)
return 1
key, value = pair.split('=', 1)
overrides[key] = value
if key not in ('MAJOR', 'MINOR', 'BUILD', 'PATCH'):
print('Unsupported key for --version-overrides:', key, file=sys.stderr)
return 1
if options.platform == 'mac':
version_format_for_key = {
# Add public version info so "Get Info" works.
'CFBundleShortVersionString': '@MAJOR@.@MINOR@.@BUILD@.@PATCH@',
# Honor the 429496.72.95 limit. The maximum comes from splitting
# 2^32 - 1 into 6, 2, 2 digits. The limitation was present in Tiger,
# but it could have been fixed in later OS release, but hasn't been
# tested (it's easy enough to find out with "lsregister -dump).
# http://lists.apple.com/archives/carbon-dev/2006/Jun/msg00139.html
# BUILD will always be an increasing value, so BUILD_PATH gives us
# something unique that meetings what LS wants.
'CFBundleVersion': '@MAJOR@.@MINOR@.@BUILD@.@PATCH@',
}
else:
version_format_for_key = {
'CFBundleShortVersionString': '@MAJOR@.@BUILD@.@PATCH@',
'CFBundleVersion': '@MAJOR@.@MINOR@.@BUILD@.@PATCH@'
}
if options.use_breakpad:
version_format_for_key['BreakpadVersion'] = \
'@MAJOR@.@MINOR@.@BUILD@.@PATCH@'
# Insert the product version.
if not _AddVersionKeys(plist,
version_format_for_key,
version=options.version,
overrides=overrides,
vivaldi_build=options.vivaldi_build):
return 2
# Add Breakpad if configured to do so.
if options.use_breakpad:
if options.branding is None:
print('Use of Breakpad requires branding.', file=sys.stderr)
return 1
# Map "target_os" passed from gn via the --platform parameter
# to the platform as known by breakpad.
platform = {'mac': 'Mac', 'ios': 'iOS'}[options.platform]
_AddBreakpadKeys(plist, options.branding, platform,
options.use_breakpad_staging)
else:
_RemoveBreakpadKeys(plist)
# Add Keystone if configured to do so.
if options.use_keystone:
if options.bundle_identifier is None:
print('Use of Keystone requires the bundle id.', file=sys.stderr)
return 1
_AddKeystoneKeys(plist, options.bundle_identifier,
options.keystone_base_tag)
else:
_RemoveKeystoneKeys(plist)
# Add Sparkle.
if options.use_sparkle:
_AddSparkleKeys(plist, options.vivaldi_release_kind)
else:
_RemoveSparkleKeys(plist)
# Adds or removes any SCM keys.
if not _DoSCMKeys(plist, options.add_scm_info):
return 3
# Add GTM metadata keys.
if options.add_gtm_info:
_AddGTMKeys(plist, options.platform)
else:
_RemoveGTMKeys(plist)
output_path = options.plist_path
if options.plist_output is not None:
output_path = options.plist_output
# Now that all keys have been mutated, rewrite the file.
# Convert Info.plist to the format requested by the --format flag. Any
# format would work on Mac but iOS requires specific format.
if sys.version_info.major == 2:
with tempfile.NamedTemporaryFile() as temp_info_plist:
plistlib.writePlist(plist, temp_info_plist.name)
return _ConvertPlist(temp_info_plist.name, output_path, options.format)
with open(output_path, 'wb') as f:
plist_format = {'binary1': plistlib.FMT_BINARY, 'xml1': plistlib.FMT_XML}
plistlib.dump(plist, f, fmt=plist_format[options.format])
if __name__ == '__main__':
# TODO(https://crbug.com/941669): Temporary workaround until all scripts use
# python3 by default.
if sys.version_info[0] < 3:
os.execvp('python3', ['python3'] + sys.argv)
sys.exit(Main(sys.argv[1:]))
|
|
# This file is part of the REMOTE API
#
# Copyright 2006-2014 Coppelia Robotics GmbH. All rights reserved.
# [email protected]
# www.coppeliarobotics.com
#
# The REMOTE API is licensed under the terms of GNU GPL:
#
# -------------------------------------------------------------------
# The REMOTE API is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# THE REMOTE API IS DISTRIBUTED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
# WARRANTY. THE USER WILL USE IT AT HIS/HER OWN RISK. THE ORIGINAL
# AUTHORS AND COPPELIA ROBOTICS GMBH WILL NOT BE LIABLE FOR DATA LOSS,
# DAMAGES, LOSS OF PROFITS OR ANY OTHER KIND OF LOSS WHILE USING OR
# MISUSING THIS SOFTWARE.
#
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the REMOTE API. If not, see <http://www.gnu.org/licenses/>.
# -------------------------------------------------------------------
#
# This file was automatically created for V-REP release V3.2.0 on Feb. 3rd 2015
import os
pyDirectory = os.path.dirname(os.path.abspath(__file__))
import platform
import struct
from ctypes import *
from vrepConst import *
#load library
libsimx = None
if platform.system() =='Windows':
libsimx = CDLL(pyDirectory+"/remoteApi.dll")
elif platform.system() == 'Darwin':
libsimx = CDLL(pyDirectory+"/remoteApi.dylib")
else:
libsimx = CDLL(pyDirectory+"/remoteApi.so")
#ctypes wrapper prototypes
c_GetJointPosition = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_float), c_int32)(("simxGetJointPosition", libsimx))
c_SetJointPosition = CFUNCTYPE(c_int32,c_int32, c_int32, c_float, c_int32)(("simxSetJointPosition", libsimx))
c_GetJointMatrix = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_float), c_int32)(("simxGetJointMatrix", libsimx))
c_SetSphericalJointMatrix = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_float), c_int32)(("simxSetSphericalJointMatrix", libsimx))
c_SetJointTargetVelocity = CFUNCTYPE(c_int32,c_int32, c_int32, c_float, c_int32)(("simxSetJointTargetVelocity", libsimx))
c_SetJointTargetPosition = CFUNCTYPE(c_int32,c_int32, c_int32, c_float, c_int32)(("simxSetJointTargetPosition", libsimx))
c_GetJointForce = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_float), c_int32)(("simxGetJointForce", libsimx))
c_SetJointForce = CFUNCTYPE(c_int32,c_int32, c_int32, c_float, c_int32)(("simxSetJointForce", libsimx))
c_ReadForceSensor = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_ubyte), POINTER(c_float), POINTER(c_float), c_int32)(("simxReadForceSensor", libsimx))
c_BreakForceSensor = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32)(("simxBreakForceSensor", libsimx))
c_ReadVisionSensor = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_ubyte), POINTER(POINTER(c_float)), POINTER(POINTER(c_int32)), c_int32)(("simxReadVisionSensor", libsimx))
c_GetObjectHandle = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_int32), c_int32)(("simxGetObjectHandle", libsimx))
c_GetVisionSensorImage = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_int32), POINTER(POINTER(c_byte)), c_ubyte, c_int32)(("simxGetVisionSensorImage", libsimx))
c_SetVisionSensorImage = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_byte), c_int32, c_ubyte, c_int32)(("simxSetVisionSensorImage", libsimx))
c_GetVisionSensorDepthBuffer= CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_int32), POINTER(POINTER(c_float)), c_int32)(("simxGetVisionSensorDepthBuffer", libsimx))
c_GetObjectChild = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_int32), c_int32)(("simxGetObjectChild", libsimx))
c_GetObjectParent = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_int32), c_int32)(("simxGetObjectParent", libsimx))
c_ReadProximitySensor = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_ubyte), POINTER(c_float), POINTER(c_int32), POINTER(c_float), c_int32)(("simxReadProximitySensor", libsimx))
c_LoadModel = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_ubyte, POINTER(c_int32), c_int32)(("simxLoadModel", libsimx))
c_LoadUI = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_ubyte, POINTER(c_int32), POINTER(POINTER(c_int32)), c_int32)(("simxLoadUI", libsimx))
c_LoadScene = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_ubyte, c_int32)(("simxLoadScene", libsimx))
c_StartSimulation = CFUNCTYPE(c_int32,c_int32, c_int32)(("simxStartSimulation", libsimx))
c_PauseSimulation = CFUNCTYPE(c_int32,c_int32, c_int32)(("simxPauseSimulation", libsimx))
c_StopSimulation = CFUNCTYPE(c_int32,c_int32, c_int32)(("simxStopSimulation", libsimx))
c_GetUIHandle = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_int32), c_int32)(("simxGetUIHandle", libsimx))
c_GetUISlider = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_int32), c_int32)(("simxGetUISlider", libsimx))
c_SetUISlider = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, c_int32, c_int32)(("simxSetUISlider", libsimx))
c_GetUIEventButton = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_int32), POINTER(c_int32), c_int32)(("simxGetUIEventButton", libsimx))
c_GetUIButtonProperty = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_int32), c_int32)(("simxGetUIButtonProperty", libsimx))
c_SetUIButtonProperty = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, c_int32, c_int32)(("simxSetUIButtonProperty", libsimx))
c_AddStatusbarMessage = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_int32)(("simxAddStatusbarMessage", libsimx))
c_AuxiliaryConsoleOpen = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_int32, c_int32, POINTER(c_int32), POINTER(c_int32), POINTER(c_float), POINTER(c_float), POINTER(c_int32), c_int32)(("simxAuxiliaryConsoleOpen", libsimx))
c_AuxiliaryConsoleClose = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32)(("simxAuxiliaryConsoleClose", libsimx))
c_AuxiliaryConsolePrint = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_char), c_int32)(("simxAuxiliaryConsolePrint", libsimx))
c_AuxiliaryConsoleShow = CFUNCTYPE(c_int32,c_int32, c_int32, c_ubyte, c_int32)(("simxAuxiliaryConsoleShow", libsimx))
c_GetObjectOrientation = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_float), c_int32)(("simxGetObjectOrientation", libsimx))
c_GetObjectPosition = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_float), c_int32)(("simxGetObjectPosition", libsimx))
c_SetObjectOrientation = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_float), c_int32)(("simxSetObjectOrientation", libsimx))
c_SetObjectPosition = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_float), c_int32)(("simxSetObjectPosition", libsimx))
c_SetObjectParent = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, c_ubyte, c_int32)(("simxSetObjectParent", libsimx))
c_SetUIButtonLabel = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_char), POINTER(c_char), c_int32)(("simxSetUIButtonLabel", libsimx))
c_GetLastErrors = CFUNCTYPE(c_int32,c_int32, POINTER(c_int32), POINTER(POINTER(c_char)), c_int32)(("simxGetLastErrors", libsimx))
c_GetArrayParameter = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_float), c_int32)(("simxGetArrayParameter", libsimx))
c_SetArrayParameter = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_float), c_int32)(("simxSetArrayParameter", libsimx))
c_GetBooleanParameter = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_ubyte), c_int32)(("simxGetBooleanParameter", libsimx))
c_SetBooleanParameter = CFUNCTYPE(c_int32,c_int32, c_int32, c_ubyte, c_int32)(("simxSetBooleanParameter", libsimx))
c_GetIntegerParameter = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_int32), c_int32)(("simxGetIntegerParameter", libsimx))
c_SetIntegerParameter = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, c_int32)(("simxSetIntegerParameter", libsimx))
c_GetFloatingParameter = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_float), c_int32)(("simxGetFloatingParameter", libsimx))
c_SetFloatingParameter = CFUNCTYPE(c_int32,c_int32, c_int32, c_float, c_int32)(("simxSetFloatingParameter", libsimx))
c_GetStringParameter = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(POINTER(c_char)), c_int32)(("simxGetStringParameter", libsimx))
c_GetCollisionHandle = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_int32), c_int32)(("simxGetCollisionHandle", libsimx))
c_GetDistanceHandle = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_int32), c_int32)(("simxGetDistanceHandle", libsimx))
c_ReadCollision = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_ubyte), c_int32)(("simxReadCollision", libsimx))
c_ReadDistance = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_float), c_int32)(("simxReadDistance", libsimx))
c_RemoveObject = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32)(("simxRemoveObject", libsimx))
c_RemoveModel = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32)(("simxRemoveModel", libsimx))
c_RemoveUI = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32)(("simxRemoveUI", libsimx))
c_CloseScene = CFUNCTYPE(c_int32,c_int32, c_int32)(("simxCloseScene", libsimx))
c_GetObjects = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_int32), POINTER(POINTER(c_int32)), c_int32)(("simxGetObjects", libsimx))
c_DisplayDialog = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_char), c_int32, POINTER(c_char), POINTER(c_float), POINTER(c_float), POINTER(c_int32), POINTER(c_int32), c_int32)(("simxDisplayDialog", libsimx))
c_EndDialog = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32)(("simxEndDialog", libsimx))
c_GetDialogInput = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(POINTER(c_char)), c_int32)(("simxGetDialogInput", libsimx))
c_GetDialogResult = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_int32), c_int32)(("simxGetDialogResult", libsimx))
c_CopyPasteObjects = CFUNCTYPE(c_int32,c_int32, POINTER(c_int32), c_int32, POINTER(POINTER(c_int32)), POINTER(c_int32), c_int32)(("simxCopyPasteObjects", libsimx))
c_GetObjectSelection = CFUNCTYPE(c_int32,c_int32, POINTER(POINTER(c_int32)), POINTER(c_int32), c_int32)(("simxGetObjectSelection", libsimx))
c_SetObjectSelection = CFUNCTYPE(c_int32,c_int32, POINTER(c_int32), c_int32, c_int32)(("simxSetObjectSelection", libsimx))
c_ClearFloatSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_int32)(("simxClearFloatSignal", libsimx))
c_ClearIntegerSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_int32)(("simxClearIntegerSignal", libsimx))
c_ClearStringSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_int32)(("simxClearStringSignal", libsimx))
c_GetFloatSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_float), c_int32)(("simxGetFloatSignal", libsimx))
c_GetIntegerSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_int32), c_int32)(("simxGetIntegerSignal", libsimx))
c_GetStringSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(POINTER(c_ubyte)), POINTER(c_int32), c_int32)(("simxGetStringSignal", libsimx))
c_SetFloatSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_float, c_int32)(("simxSetFloatSignal", libsimx))
c_SetIntegerSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_int32, c_int32)(("simxSetIntegerSignal", libsimx))
c_SetStringSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_ubyte), c_int32, c_int32)(("simxSetStringSignal", libsimx))
c_AppendStringSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_ubyte), c_int32, c_int32)(("simxAppendStringSignal", libsimx))
c_WriteStringStream = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_ubyte), c_int32, c_int32)(("simxWriteStringStream", libsimx))
c_GetObjectFloatParameter = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_float), c_int32)(("simxGetObjectFloatParameter", libsimx))
c_SetObjectFloatParameter = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, c_float, c_int32)(("simxSetObjectFloatParameter", libsimx))
c_GetObjectIntParameter = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_int32), c_int32)(("simxGetObjectIntParameter", libsimx))
c_SetObjectIntParameter = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, c_int32, c_int32)(("simxSetObjectIntParameter", libsimx))
c_GetModelProperty = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_int32), c_int32)(("simxGetModelProperty", libsimx))
c_SetModelProperty = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, c_int32)(("simxSetModelProperty", libsimx))
c_Start = CFUNCTYPE(c_int32,POINTER(c_char), c_int32, c_ubyte, c_ubyte, c_int32, c_int32)(("simxStart", libsimx))
c_Finish = CFUNCTYPE(None, c_int32)(("simxFinish", libsimx))
c_GetPingTime = CFUNCTYPE(c_int32,c_int32, POINTER(c_int32))(("simxGetPingTime", libsimx))
c_GetLastCmdTime = CFUNCTYPE(c_int32,c_int32)(("simxGetLastCmdTime", libsimx))
c_SynchronousTrigger = CFUNCTYPE(c_int32,c_int32)(("simxSynchronousTrigger", libsimx))
c_Synchronous = CFUNCTYPE(c_int32,c_int32, c_ubyte)(("simxSynchronous", libsimx))
c_PauseCommunication = CFUNCTYPE(c_int32,c_int32, c_ubyte)(("simxPauseCommunication", libsimx))
c_GetInMessageInfo = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_int32))(("simxGetInMessageInfo", libsimx))
c_GetOutMessageInfo = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_int32))(("simxGetOutMessageInfo", libsimx))
c_GetConnectionId = CFUNCTYPE(c_int32,c_int32)(("simxGetConnectionId", libsimx))
c_CreateBuffer = CFUNCTYPE(POINTER(c_ubyte), c_int32)(("simxCreateBuffer", libsimx))
c_ReleaseBuffer = CFUNCTYPE(None, c_void_p)(("simxReleaseBuffer", libsimx))
c_TransferFile = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_char), c_int32, c_int32)(("simxTransferFile", libsimx))
c_EraseFile = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_int32)(("simxEraseFile", libsimx))
c_GetAndClearStringSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(POINTER(c_ubyte)), POINTER(c_int32), c_int32)(("simxGetAndClearStringSignal", libsimx))
c_ReadStringStream = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(POINTER(c_ubyte)), POINTER(c_int32), c_int32)(("simxReadStringStream", libsimx))
c_CreateDummy = CFUNCTYPE(c_int32,c_int32, c_float, POINTER(c_ubyte), POINTER(c_int32), c_int32)(("simxCreateDummy", libsimx))
c_Query = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_ubyte), c_int32, POINTER(c_char), POINTER(POINTER(c_ubyte)), POINTER(c_int32), c_int32)(("simxQuery", libsimx))
c_GetObjectGroupData = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_int32), POINTER(POINTER(c_int32)), POINTER(c_int32), POINTER(POINTER(c_int32)), POINTER(c_int32), POINTER(POINTER(c_float)), POINTER(c_int32), POINTER(POINTER(c_char)), c_int32)(("simxGetObjectGroupData", libsimx))
c_GetObjectVelocity = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_float), POINTER(c_float), c_int32)(("simxGetObjectVelocity", libsimx))
#API functions
def simxGetJointPosition(clientID, jointHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
position = c_float()
return c_GetJointPosition(clientID, jointHandle, byref(position), operationMode), position.value
def simxSetJointPosition(clientID, jointHandle, position, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetJointPosition(clientID, jointHandle, position, operationMode)
def simxGetJointMatrix(clientID, jointHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
matrix = (c_float*12)()
ret = c_GetJointMatrix(clientID, jointHandle, matrix, operationMode)
arr = []
for i in range(12):
arr.append(matrix[i])
return ret, arr
def simxSetSphericalJointMatrix(clientID, jointHandle, matrix, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
matrix = (c_float*12)(*matrix)
return c_SetSphericalJointMatrix(clientID, jointHandle, matrix, operationMode)
def simxSetJointTargetVelocity(clientID, jointHandle, targetVelocity, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetJointTargetVelocity(clientID, jointHandle, targetVelocity, operationMode)
def simxSetJointTargetPosition(clientID, jointHandle, targetPosition, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetJointTargetPosition(clientID, jointHandle, targetPosition, operationMode)
def simxJointGetForce(clientID, jointHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
force = c_float()
return c_GetJointForce(clientID, jointHandle, byref(force), operationMode), force.value
def simxGetJointForce(clientID, jointHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
force = c_float()
return c_GetJointForce(clientID, jointHandle, byref(force), operationMode), force.value
def simxSetJointForce(clientID, jointHandle, force, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetJointForce(clientID, jointHandle, force, operationMode)
def simxReadForceSensor(clientID, forceSensorHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
state = c_ubyte()
forceVector = (c_float*3)()
torqueVector = (c_float*3)()
ret = c_ReadForceSensor(clientID, forceSensorHandle, byref(state), forceVector, torqueVector, operationMode)
arr1 = []
for i in range(3):
arr1.append(forceVector[i])
arr2 = []
for i in range(3):
arr2.append(torqueVector[i])
return ret, ord(state.value), arr1, arr2
def simxBreakForceSensor(clientID, forceSensorHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_BreakForceSensor(clientID, forceSensorHandle, operationMode)
def simxReadVisionSensor(clientID, sensorHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
detectionState = c_ubyte()
auxValues = pointer(c_float())
auxValuesCount = pointer(c_int())
ret = c_ReadVisionSensor(clientID, sensorHandle, byref(detectionState), byref(auxValues), byref(auxValuesCount), operationMode)
auxValues2 = []
if ret == 0:
s = 0
for i in range(auxValuesCount[0]):
auxValues2.append(auxValues[s:s+auxValuesCount[i+1]])
s += auxValuesCount[i+1]
#free C buffers
c_ReleaseBuffer(auxValues)
c_ReleaseBuffer(auxValuesCount)
return ret, bool(detectionState.value!=0), auxValues2
def simxGetObjectHandle(clientID, objectName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
handle = c_int()
return c_GetObjectHandle(clientID, objectName, byref(handle), operationMode), handle.value
def simxGetVisionSensorImage(clientID, sensorHandle, options, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
resolution = (c_int*2)()
c_image = pointer(c_byte())
bytesPerPixel = 3
if (options and 1) != 0:
bytesPerPixel = 1
ret = c_GetVisionSensorImage(clientID, sensorHandle, resolution, byref(c_image), options, operationMode)
reso = []
image = []
if (ret == 0):
image = [None]*resolution[0]*resolution[1]*bytesPerPixel
for i in range(resolution[0] * resolution[1] * bytesPerPixel):
image[i] = c_image[i]
for i in range(2):
reso.append(resolution[i])
return ret, reso, image
def simxSetVisionSensorImage(clientID, sensorHandle, image, options, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
size = len(image)
image_bytes = (c_byte*size)(*image)
return c_SetVisionSensorImage(clientID, sensorHandle, image_bytes, size, options, operationMode)
def simxGetVisionSensorDepthBuffer(clientID, sensorHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
c_buffer = pointer(c_float())
resolution = (c_int*2)()
ret = c_GetVisionSensorDepthBuffer(clientID, sensorHandle, resolution, byref(c_buffer), operationMode)
reso = []
buffer = []
if (ret == 0):
buffer = [None]*resolution[0]*resolution[1]
for i in range(resolution[0] * resolution[1]):
buffer[i] = c_buffer[i]
for i in range(2):
reso.append(resolution[i])
return ret, reso, buffer
def simxGetObjectChild(clientID, parentObjectHandle, childIndex, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
childObjectHandle = c_int()
return c_GetObjectChild(clientID, parentObjectHandle, childIndex, byref(childObjectHandle), operationMode), childObjectHandle.value
def simxGetObjectParent(clientID, childObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
parentObjectHandle = c_int()
return c_GetObjectParent(clientID, childObjectHandle, byref(parentObjectHandle), operationMode), parentObjectHandle.value
def simxReadProximitySensor(clientID, sensorHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
detectionState = c_ubyte()
detectedObjectHandle = c_int()
detectedPoint = (c_float*3)()
detectedSurfaceNormalVector = (c_float*3)()
ret = c_ReadProximitySensor(clientID, sensorHandle, byref(detectionState), detectedPoint, byref(detectedObjectHandle), detectedSurfaceNormalVector, operationMode)
arr1 = []
for i in range(3):
arr1.append(detectedPoint[i])
arr2 = []
for i in range(3):
arr2.append(detectedSurfaceNormalVector[i])
return ret, bool(detectionState.value!=0), arr1, detectedObjectHandle.value, arr2
def simxLoadModel(clientID, modelPathAndName, options, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
baseHandle = c_int()
return c_LoadModel(clientID, modelPathAndName, options, byref(baseHandle), operationMode), baseHandle.value
def simxLoadUI(clientID, uiPathAndName, options, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
count = c_int()
uiHandles = pointer(c_int())
ret = c_LoadUI(clientID, uiPathAndName, options, byref(count), byref(uiHandles), operationMode)
handles = []
if ret == 0:
for i in range(count.value):
handles.append(uiHandles[i])
#free C buffers
c_ReleaseBuffer(uiHandles)
return ret, handles
def simxLoadScene(clientID, scenePathAndName, options, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_LoadScene(clientID, scenePathAndName, options, operationMode)
def simxStartSimulation(clientID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_StartSimulation(clientID, operationMode)
def simxPauseSimulation(clientID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_PauseSimulation(clientID, operationMode)
def simxStopSimulation(clientID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_StopSimulation(clientID, operationMode)
def simxGetUIHandle(clientID, uiName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
handle = c_int()
return c_GetUIHandle(clientID, uiName, byref(handle), operationMode), handle.value
def simxGetUISlider(clientID, uiHandle, uiButtonID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
position = c_int()
return c_GetUISlider(clientID, uiHandle, uiButtonID, byref(position), operationMode), position.value
def simxSetUISlider(clientID, uiHandle, uiButtonID, position, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetUISlider(clientID, uiHandle, uiButtonID, position, operationMode)
def simxGetUIEventButton(clientID, uiHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
uiEventButtonID = c_int()
auxValues = (c_int*2)()
ret = c_GetUIEventButton(clientID, uiHandle, byref(uiEventButtonID), auxValues, operationMode)
arr = []
for i in range(2):
arr.append(auxValues[i])
return ret, uiEventButtonID.value, arr
def simxGetUIButtonProperty(clientID, uiHandle, uiButtonID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
prop = c_int()
return c_GetUIButtonProperty(clientID, uiHandle, uiButtonID, byref(prop), operationMode), prop.value
def simxSetUIButtonProperty(clientID, uiHandle, uiButtonID, prop, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetUIButtonProperty(clientID, uiHandle, uiButtonID, prop, operationMode)
def simxAddStatusbarMessage(clientID, message, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_AddStatusbarMessage(clientID, message, operationMode)
def simxAuxiliaryConsoleOpen(clientID, title, maxLines, mode, position, size, textColor, backgroundColor, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
consoleHandle = c_int()
if position != None:
c_position = (c_int*2)(*position)
else:
c_position = None
if size != None:
c_size = (c_int*2)(*size)
else:
c_size = None
if textColor != None:
c_textColor = (c_float*3)(*textColor)
else:
c_textColor = None
if backgroundColor != None:
c_backgroundColor = (c_float*3)(*backgroundColor)
else:
c_backgroundColor = None
return c_AuxiliaryConsoleOpen(clientID, title, maxLines, mode, c_position, c_size, c_textColor, c_backgroundColor, byref(consoleHandle), operationMode), consoleHandle.value
def simxAuxiliaryConsoleClose(clientID, consoleHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_AuxiliaryConsoleClose(clientID, consoleHandle, operationMode)
def simxAuxiliaryConsolePrint(clientID, consoleHandle, txt, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_AuxiliaryConsolePrint(clientID, consoleHandle, txt, operationMode)
def simxAuxiliaryConsoleShow(clientID, consoleHandle, showState, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_AuxiliaryConsoleShow(clientID, consoleHandle, showState, operationMode)
def simxGetObjectOrientation(clientID, objectHandle, relativeToObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
eulerAngles = (c_float*3)()
ret = c_GetObjectOrientation(clientID, objectHandle, relativeToObjectHandle, eulerAngles, operationMode)
arr = []
for i in range(3):
arr.append(eulerAngles[i])
return ret, arr
def simxGetObjectPosition(clientID, objectHandle, relativeToObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
position = (c_float*3)()
ret = c_GetObjectPosition(clientID, objectHandle, relativeToObjectHandle, position, operationMode)
arr = []
for i in range(3):
arr.append(position[i])
return ret, arr
def simxSetObjectOrientation(clientID, objectHandle, relativeToObjectHandle, eulerAngles, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
angles = (c_float*3)(*eulerAngles)
return c_SetObjectOrientation(clientID, objectHandle, relativeToObjectHandle, angles, operationMode)
def simxSetObjectPosition(clientID, objectHandle, relativeToObjectHandle, position, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
c_position = (c_float*3)(*position)
return c_SetObjectPosition(clientID, objectHandle, relativeToObjectHandle, c_position, operationMode)
def simxSetObjectParent(clientID, objectHandle, parentObject, keepInPlace, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetObjectParent(clientID, objectHandle, parentObject, keepInPlace, operationMode)
def simxSetUIButtonLabel(clientID, uiHandle, uiButtonID, upStateLabel, downStateLabel, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetUIButtonLabel(clientID, uiHandle, uiButtonID, upStateLabel, downStateLabel, operationMode)
def simxGetLastErrors(clientID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
errors =[]
errorCnt = c_int()
errorStrings = pointer(c_char())
ret = c_GetLastErrors(clientID, byref(errorCnt), byref(errorStrings), operationMode)
if ret == 0:
s = 0
for i in range(errorCnt.value):
a = bytearray()
while errorStrings[s] != '\0':
a.append(errorStrings[s])
s += 1
s += 1 #skip null
errors.append(str(a))
return ret, errors
def simxGetArrayParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
paramValues = (c_float*3)()
ret = c_GetArrayParameter(clientID, paramIdentifier, paramValues, operationMode)
arr = []
for i in range(3):
arr.append(paramValues[i])
return ret, arr
def simxSetArrayParameter(clientID, paramIdentifier, paramValues, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
c_paramValues = (c_float*3)(*paramValues)
return c_SetArrayParameter(clientID, paramIdentifier, c_paramValues, operationMode)
def simxGetBooleanParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
paramValue = c_ubyte()
return c_GetBooleanParameter(clientID, paramIdentifier, byref(paramValue), operationMode), bool(paramValue.value!=0)
def simxSetBooleanParameter(clientID, paramIdentifier, paramValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetBooleanParameter(clientID, paramIdentifier, paramValue, operationMode)
def simxGetIntegerParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
paramValue = c_int()
return c_GetIntegerParameter(clientID, paramIdentifier, byref(paramValue), operationMode), paramValue.value
def simxSetIntegerParameter(clientID, paramIdentifier, paramValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetIntegerParameter(clientID, paramIdentifier, paramValue, operationMode)
def simxGetFloatingParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
paramValue = c_float()
return c_GetFloatingParameter(clientID, paramIdentifier, byref(paramValue), operationMode), paramValue.value
def simxSetFloatingParameter(clientID, paramIdentifier, paramValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetFloatingParameter(clientID, paramIdentifier, paramValue, operationMode)
def simxGetStringParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
paramValue = pointer(c_char())
ret = c_GetStringParameter(clientID, paramIdentifier, byref(paramValue), operationMode)
a = bytearray()
if ret == 0:
i = 0
while paramValue[i] != '\0':
a.append(paramValue[i])
i=i+1
return ret, str(a)
def simxGetCollisionHandle(clientID, collisionObjectName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
handle = c_int()
return c_GetCollisionHandle(clientID, collisionObjectName, byref(handle), operationMode), handle.value
def simxGetDistanceHandle(clientID, distanceObjectName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
handle = c_int()
return c_GetDistanceHandle(clientID, distanceObjectName, byref(handle), operationMode), handle.value
def simxReadCollision(clientID, collisionObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
collisionState = c_ubyte()
return c_ReadCollision(clientID, collisionObjectHandle, byref(collisionState), operationMode), bool(collisionState.value!=0)
def simxReadDistance(clientID, distanceObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
minimumDistance = c_float()
return c_ReadDistance(clientID, distanceObjectHandle, byref(minimumDistance), operationMode), minimumDistance.value
def simxRemoveObject(clientID, objectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_RemoveObject(clientID, objectHandle, operationMode)
def simxRemoveModel(clientID, objectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_RemoveModel(clientID, objectHandle, operationMode)
def simxRemoveUI(clientID, uiHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_RemoveUI(clientID, uiHandle, operationMode)
def simxCloseScene(clientID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_CloseScene(clientID, operationMode)
def simxGetObjects(clientID, objectType, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
objectCount = c_int()
objectHandles = pointer(c_int())
ret = c_GetObjects(clientID, objectType, byref(objectCount), byref(objectHandles), operationMode)
handles = []
if ret == 0:
for i in range(objectCount.value):
handles.append(objectHandles[i])
return ret, handles
def simxDisplayDialog(clientID, titleText, mainText, dialogType, initialText, titleColors, dialogColors, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
if titleColors != None:
c_titleColors = (c_float*6)(*titleColors)
else:
c_titleColors = None
if dialogColors != None:
c_dialogColors = (c_float*6)(*dialogColors)
else:
c_dialogColors = None
c_dialogHandle = c_int()
c_uiHandle = c_int()
return c_DisplayDialog(clientID, titleText, mainText, dialogType, initialText, c_titleColors, c_dialogColors, byref(c_dialogHandle), byref(c_uiHandle), operationMode), c_dialogHandle.value, c_uiHandle.value
def simxEndDialog(clientID, dialogHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_EndDialog(clientID, dialogHandle, operationMode)
def simxGetDialogInput(clientID, dialogHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
inputText = pointer(c_char())
ret = c_GetDialogInput(clientID, dialogHandle, byref(inputText), operationMode)
a = bytearray()
if ret == 0:
i = 0
while inputText[i] != '\0':
a.append(inputText[i])
i = i+1
return ret, str(a)
def simxGetDialogResult(clientID, dialogHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
result = c_int()
return c_GetDialogResult(clientID, dialogHandle, byref(result), operationMode), result.value
def simxCopyPasteObjects(clientID, objectHandles, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
c_objectHandles = (c_int*len(objectHandles))(*objectHandles)
newObjectCount = c_int()
newObjectHandles = pointer(c_int())
ret = c_CopyPasteObjects(clientID, c_objectHandles, len(objectHandles), byref(newObjectHandles), byref(newObjectCount), operationMode)
newobj = []
if ret == 0:
for i in range(newObjectCount.value):
newobj.append(newObjectHandles[i])
return ret, newobj
def simxGetObjectSelection(clientID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
objectCount = c_int()
objectHandles = pointer(c_int())
ret = c_GetObjectSelection(clientID, byref(objectHandles), byref(objectCount), operationMode)
newobj = []
if ret == 0:
for i in range(objectCount.value):
newobj.append(objectHandles[i])
return ret, newobj
def simxSetObjectSelection(clientID, objectHandles, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
c_objectHandles = (c_int*len(objectHandles))(*objectHandles)
return c_SetObjectSelection(clientID, c_objectHandles, len(objectHandles), operationMode)
def simxClearFloatSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_ClearFloatSignal(clientID, signalName, operationMode)
def simxClearIntegerSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_ClearIntegerSignal(clientID, signalName, operationMode)
def simxClearStringSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_ClearStringSignal(clientID, signalName, operationMode)
def simxGetFloatSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
signalValue = c_float()
return c_GetFloatSignal(clientID, signalName, byref(signalValue), operationMode), signalValue.value
def simxGetIntegerSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
signalValue = c_int()
return c_GetIntegerSignal(clientID, signalName, byref(signalValue), operationMode), signalValue.value
def simxGetStringSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
signalLength = c_int();
signalValue = pointer(c_ubyte())
ret = c_GetStringSignal(clientID, signalName, byref(signalValue), byref(signalLength), operationMode)
a = bytearray()
if ret == 0:
for i in range(signalLength.value):
a.append(signalValue[i])
return ret, str(a)
def simxGetAndClearStringSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
signalLength = c_int();
signalValue = pointer(c_ubyte())
ret = c_GetAndClearStringSignal(clientID, signalName, byref(signalValue), byref(signalLength), operationMode)
a = bytearray()
if ret == 0:
for i in range(signalLength.value):
a.append(signalValue[i])
return ret, str(a)
def simxReadStringStream(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
signalLength = c_int();
signalValue = pointer(c_ubyte())
ret = c_ReadStringStream(clientID, signalName, byref(signalValue), byref(signalLength), operationMode)
a = bytearray()
if ret == 0:
for i in range(signalLength.value):
a.append(signalValue[i])
return ret, str(a)
def simxSetFloatSignal(clientID, signalName, signalValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetFloatSignal(clientID, signalName, signalValue, operationMode)
def simxSetIntegerSignal(clientID, signalName, signalValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetIntegerSignal(clientID, signalName, signalValue, operationMode)
def simxSetStringSignal(clientID, signalName, signalValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetStringSignal(clientID, signalName, signalValue, len(signalValue), operationMode)
def simxAppendStringSignal(clientID, signalName, signalValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_AppendStringSignal(clientID, signalName, signalValue, len(signalValue), operationMode)
def simxWriteStringStream(clientID, signalName, signalValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_WriteStringStream(clientID, signalName, signalValue, len(signalValue), operationMode)
def simxGetObjectFloatParameter(clientID, objectHandle, parameterID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
parameterValue = c_float()
return c_GetObjectFloatParameter(clientID, objectHandle, parameterID, byref(parameterValue), operationMode), parameterValue.value
def simxSetObjectFloatParameter(clientID, objectHandle, parameterID, parameterValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetObjectFloatParameter(clientID, objectHandle, parameterID, parameterValue, operationMode)
def simxGetObjectIntParameter(clientID, objectHandle, parameterID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
parameterValue = c_int()
return c_GetObjectIntParameter(clientID, objectHandle, parameterID, byref(parameterValue), operationMode), parameterValue.value
def simxSetObjectIntParameter(clientID, objectHandle, parameterID, parameterValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetObjectIntParameter(clientID, objectHandle, parameterID, parameterValue, operationMode)
def simxGetModelProperty(clientID, objectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
prop = c_int()
return c_GetModelProperty(clientID, objectHandle, byref(prop), operationMode), prop.value
def simxSetModelProperty(clientID, objectHandle, prop, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetModelProperty(clientID, objectHandle, prop, operationMode)
def simxStart(connectionAddress, connectionPort, waitUntilConnected, doNotReconnectOnceDisconnected, timeOutInMs, commThreadCycleInMs):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_Start(connectionAddress, connectionPort, waitUntilConnected, doNotReconnectOnceDisconnected, timeOutInMs, commThreadCycleInMs)
def simxFinish(clientID):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_Finish(clientID)
def simxGetPingTime(clientID):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
pingTime = c_int()
return c_GetPingTime(clientID, byref(pingTime)), pingTime.value
def simxGetLastCmdTime(clientID):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_GetLastCmdTime(clientID)
def simxSynchronousTrigger(clientID):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SynchronousTrigger(clientID)
def simxSynchronous(clientID, enable):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_Synchronous(clientID, enable)
def simxPauseCommunication(clientID, enable):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_PauseCommunication(clientID, enable)
def simxGetInMessageInfo(clientID, infoType):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
info = c_int()
return c_GetInMessageInfo(clientID, infoType, byref(info)), info.value
def simxGetOutMessageInfo(clientID, infoType):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
info = c_int()
return c_GetOutMessageInfo(clientID, infoType, byref(info)), info.value
def simxGetConnectionId(clientID):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_GetConnectionId(clientID)
def simxCreateBuffer(bufferSize):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_CreateBuffer(bufferSize)
def simxReleaseBuffer(buffer):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_ReleaseBuffer(buffer)
def simxTransferFile(clientID, filePathAndName, fileName_serverSide, timeOut, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_TransferFile(clientID, filePathAndName, fileName_serverSide, timeOut, operationMode)
def simxEraseFile(clientID, fileName_serverSide, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_EraseFile(clientID, fileName_serverSide, operationMode)
def simxCreateDummy(clientID, size, color, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
handle = c_int()
if color != None:
c_color = (c_ubyte*12)(*color)
else:
c_color = None
return c_CreateDummy(clientID, size, c_color, byref(handle), operationMode), handle.value
def simxQuery(clientID, signalName, signalValue, retSignalName, timeOutInMs):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
retSignalLength = c_int();
retSignalValue = pointer(c_ubyte())
ret = c_Query(clientID, signalName, signalValue, len(signalValue), retSignalName, byref(retSignalValue), byref(retSignalLength), timeOutInMs)
a = bytearray()
if ret == 0:
for i in range(retSignalLength.value):
a.append(retSignalValue[i])
return ret, str(a)
def simxGetObjectGroupData(clientID, objectType, dataType, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
handles =[]
intData =[]
floatData =[]
stringData =[]
handlesC = c_int()
handlesP = pointer(c_int())
intDataC = c_int()
intDataP = pointer(c_int())
floatDataC = c_int()
floatDataP = pointer(c_float())
stringDataC = c_int()
stringDataP = pointer(c_char())
ret = c_GetObjectGroupData(clientID, objectType, dataType, byref(handlesC), byref(handlesP), byref(intDataC), byref(intDataP), byref(floatDataC), byref(floatDataP), byref(stringDataC), byref(stringDataP), operationMode)
if ret == 0:
for i in range(handlesC.value):
handles.append(handlesP[i])
for i in range(intDataC.value):
intData.append(intDataP[i])
for i in range(floatDataC.value):
floatData.append(floatDataP[i])
s = 0
for i in range(stringDataC.value):
a = bytearray()
while stringDataP[s] != '\0':
a.append(stringDataP[s])
s += 1
s += 1 #skip null
stringData.append(str(a))
return ret, handles, intData, floatData, stringData
def simxGetObjectVelocity(clientID, objectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
linearVel = (c_float*3)()
angularVel = (c_float*3)()
ret = c_GetObjectVelocity(clientID, objectHandle, linearVel, angularVel, operationMode)
arr1 = []
for i in range(3):
arr1.append(linearVel[i])
arr2 = []
for i in range(3):
arr2.append(angularVel[i])
return ret, arr1, arr2
def simxPackInts(intList):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
s=''
for i in range(len(intList)):
s+=struct.pack('<i',intList[i])
return s
def simxUnpackInts(intsPackedInString):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
b=[]
for i in range(len(intsPackedInString)/4):
b.append(struct.unpack('<i',intsPackedInString[4*i:4*(i+1)])[0])
return b
def simxPackFloats(floatList):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
s=''
for i in range(len(floatList)):
s+=struct.pack('<f',floatList[i])
return s
def simxUnpackFloats(floatsPackedInString):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
b=[]
for i in range(len(floatsPackedInString)/4):
b.append(struct.unpack('<f',floatsPackedInString[4*i:4*(i+1)])[0])
return b
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AccessPolicy
from ._models_py3 import AppendPositionAccessConditions
from ._models_py3 import ArrowConfiguration
from ._models_py3 import ArrowField
from ._models_py3 import BlobFlatListSegment
from ._models_py3 import BlobHTTPHeaders
from ._models_py3 import BlobHierarchyListSegment
from ._models_py3 import BlobItemInternal
from ._models_py3 import BlobMetadata
from ._models_py3 import BlobName
from ._models_py3 import BlobPrefix
from ._models_py3 import BlobPropertiesInternal
from ._models_py3 import BlobTag
from ._models_py3 import BlobTags
from ._models_py3 import Block
from ._models_py3 import BlockList
from ._models_py3 import BlockLookupList
from ._models_py3 import ClearRange
from ._models_py3 import ContainerCpkScopeInfo
from ._models_py3 import ContainerItem
from ._models_py3 import ContainerProperties
from ._models_py3 import CorsRule
from ._models_py3 import CpkInfo
from ._models_py3 import CpkScopeInfo
from ._models_py3 import DelimitedTextConfiguration
from ._models_py3 import FilterBlobItem
from ._models_py3 import FilterBlobSegment
from ._models_py3 import GeoReplication
from ._models_py3 import JsonTextConfiguration
from ._models_py3 import KeyInfo
from ._models_py3 import LeaseAccessConditions
from ._models_py3 import ListBlobsFlatSegmentResponse
from ._models_py3 import ListBlobsHierarchySegmentResponse
from ._models_py3 import ListContainersSegmentResponse
from ._models_py3 import Logging
from ._models_py3 import Metrics
from ._models_py3 import ModifiedAccessConditions
from ._models_py3 import PageList
from ._models_py3 import PageRange
from ._models_py3 import QueryFormat
from ._models_py3 import QueryRequest
from ._models_py3 import QuerySerialization
from ._models_py3 import RetentionPolicy
from ._models_py3 import SequenceNumberAccessConditions
from ._models_py3 import SignedIdentifier
from ._models_py3 import SourceModifiedAccessConditions
from ._models_py3 import StaticWebsite
from ._models_py3 import StorageError
from ._models_py3 import StorageServiceProperties
from ._models_py3 import StorageServiceStats
from ._models_py3 import UserDelegationKey
except (SyntaxError, ImportError):
from ._models import AccessPolicy # type: ignore
from ._models import AppendPositionAccessConditions # type: ignore
from ._models import ArrowConfiguration # type: ignore
from ._models import ArrowField # type: ignore
from ._models import BlobFlatListSegment # type: ignore
from ._models import BlobHTTPHeaders # type: ignore
from ._models import BlobHierarchyListSegment # type: ignore
from ._models import BlobItemInternal # type: ignore
from ._models import BlobMetadata # type: ignore
from ._models import BlobName # type: ignore
from ._models import BlobPrefix # type: ignore
from ._models import BlobPropertiesInternal # type: ignore
from ._models import BlobTag # type: ignore
from ._models import BlobTags # type: ignore
from ._models import Block # type: ignore
from ._models import BlockList # type: ignore
from ._models import BlockLookupList # type: ignore
from ._models import ClearRange # type: ignore
from ._models import ContainerCpkScopeInfo # type: ignore
from ._models import ContainerItem # type: ignore
from ._models import ContainerProperties # type: ignore
from ._models import CorsRule # type: ignore
from ._models import CpkInfo # type: ignore
from ._models import CpkScopeInfo # type: ignore
from ._models import DelimitedTextConfiguration # type: ignore
from ._models import FilterBlobItem # type: ignore
from ._models import FilterBlobSegment # type: ignore
from ._models import GeoReplication # type: ignore
from ._models import JsonTextConfiguration # type: ignore
from ._models import KeyInfo # type: ignore
from ._models import LeaseAccessConditions # type: ignore
from ._models import ListBlobsFlatSegmentResponse # type: ignore
from ._models import ListBlobsHierarchySegmentResponse # type: ignore
from ._models import ListContainersSegmentResponse # type: ignore
from ._models import Logging # type: ignore
from ._models import Metrics # type: ignore
from ._models import ModifiedAccessConditions # type: ignore
from ._models import PageList # type: ignore
from ._models import PageRange # type: ignore
from ._models import QueryFormat # type: ignore
from ._models import QueryRequest # type: ignore
from ._models import QuerySerialization # type: ignore
from ._models import RetentionPolicy # type: ignore
from ._models import SequenceNumberAccessConditions # type: ignore
from ._models import SignedIdentifier # type: ignore
from ._models import SourceModifiedAccessConditions # type: ignore
from ._models import StaticWebsite # type: ignore
from ._models import StorageError # type: ignore
from ._models import StorageServiceProperties # type: ignore
from ._models import StorageServiceStats # type: ignore
from ._models import UserDelegationKey # type: ignore
from ._azure_blob_storage_enums import (
AccessTier,
AccessTierOptional,
AccessTierRequired,
AccountKind,
ArchiveStatus,
BlobExpiryOptions,
BlobImmutabilityPolicyMode,
BlobType,
BlockListType,
CopyStatusType,
DeleteSnapshotsOptionType,
EncryptionAlgorithmType,
GeoReplicationStatusType,
LeaseDurationType,
LeaseStateType,
LeaseStatusType,
ListBlobsIncludeItem,
ListContainersIncludeType,
PremiumPageBlobAccessTier,
PublicAccessType,
QueryFormatType,
RehydratePriority,
SequenceNumberActionType,
SkuName,
StorageErrorCode,
)
__all__ = [
'AccessPolicy',
'AppendPositionAccessConditions',
'ArrowConfiguration',
'ArrowField',
'BlobFlatListSegment',
'BlobHTTPHeaders',
'BlobHierarchyListSegment',
'BlobItemInternal',
'BlobMetadata',
'BlobName',
'BlobPrefix',
'BlobPropertiesInternal',
'BlobTag',
'BlobTags',
'Block',
'BlockList',
'BlockLookupList',
'ClearRange',
'ContainerCpkScopeInfo',
'ContainerItem',
'ContainerProperties',
'CorsRule',
'CpkInfo',
'CpkScopeInfo',
'DelimitedTextConfiguration',
'FilterBlobItem',
'FilterBlobSegment',
'GeoReplication',
'JsonTextConfiguration',
'KeyInfo',
'LeaseAccessConditions',
'ListBlobsFlatSegmentResponse',
'ListBlobsHierarchySegmentResponse',
'ListContainersSegmentResponse',
'Logging',
'Metrics',
'ModifiedAccessConditions',
'PageList',
'PageRange',
'QueryFormat',
'QueryRequest',
'QuerySerialization',
'RetentionPolicy',
'SequenceNumberAccessConditions',
'SignedIdentifier',
'SourceModifiedAccessConditions',
'StaticWebsite',
'StorageError',
'StorageServiceProperties',
'StorageServiceStats',
'UserDelegationKey',
'AccessTier',
'AccessTierOptional',
'AccessTierRequired',
'AccountKind',
'ArchiveStatus',
'BlobExpiryOptions',
'BlobImmutabilityPolicyMode',
'BlobType',
'BlockListType',
'CopyStatusType',
'DeleteSnapshotsOptionType',
'EncryptionAlgorithmType',
'GeoReplicationStatusType',
'LeaseDurationType',
'LeaseStateType',
'LeaseStatusType',
'ListBlobsIncludeItem',
'ListContainersIncludeType',
'PremiumPageBlobAccessTier',
'PublicAccessType',
'QueryFormatType',
'RehydratePriority',
'SequenceNumberActionType',
'SkuName',
'StorageErrorCode',
]
|
|
"""
Utility functions for computational geometry
Built around shapely.
"""
from __future__ import print_function, division, absolute_import
import math
from math import pi, sqrt
from numbers import Number as _Number
import array
from shapely.geometry import (
LineString, Polygon, Point, box, asPoint,
asPolygon, MultiPoint, MultiLineString
)
from shapely.geometry.polygon import LinearRing
from shapely.affinity import rotate
from shapely.topology import TopologicalError as _TopologicalError
from . import util
from six.moves import map
def _normalize_point(p):
if isinstance(p, (tuple, list)):
if isinstance(p[0], array.array):
if len(p[0]) == 1:
return p[0][0], p[1][0]
else:
raise TypeError("only points supported")
else:
if all(isinstance(n, _Number) for n in p):
return p
else:
raise TypeError("each point must be a tuple (x, y) of float")
elif hasattr(p, "x") and hasattr(p, "y"):
return (p.x, p.y)
else:
raise TypeError("point not understood")
def _normalize_points(points):
if all(isinstance(p, _Number) for p in points):
points = util.window(points, 2, 2)
coords = list(map(_normalize_point, points))
return coords
###############################################
#
# Helpers to create shapely geometries
#
################################################
def linestr(*points):
"""
create a line-segment from the given points
Example
=======
>>> l = linestr((0, 0), (1, 1), (2, -1))
>>> l.bounds
(0.0, -1.0, 2.0, 1.0)
>>> [coord for coord in l.coords]
[(0.0, 0.0), (1.0, 1.0), (2.0, -1.0)]
"""
coords = _normalize_points(points)
return LineString(coords)
def rect_poly(x0, y0, x1, y1):
"""
a rectangular polygon (filled)
"""
return box(x0, y0, x1, y1)
def rect_line(x0, y0, x1, y1):
"""
the perimeter of a rectangle, without any dimensions.
"""
return LinearRing([(x0, y0), (x1, y0), (x1, y1), (x0, y1)])
def circle(centerx, centery, radius):
return Point(centerx, centery).buffer(radius)
def line_at_x(line, x):
_, y0, _, y1 = line.bounds
linex = LineString([(x, y0), (x, y1)])
return line.intersection(linex)
def ring(centerx, centery, radius, width):
"""
a circular ring
"""
c_out = Point(centerx, centery).buffer(radius)
c_in = Point(centerx, centery).buffer(radius - width)
return c_out.difference(c_in)
def line(x0, y0, x1, y1, width=None):
l = LineString([(x0, y0), (x1, y1)])
if width is not None:
l = l.buffer(width)
return l
def linering(*points):
"""
create a LinearRing (a closed unfilled polygon) from the points given.
A LinearRing is the "border" of a polygon
Example
=======
linering((0, 1), Point(1, 7), (10, 2))
each point is a tuple (x, y) or (x, y, z) or a Point
"""
coords = _normalize_points(points)
return LinearRing(coords)
def line_extrapolate_point(l, p, length):
"""
Return a Point p2 which would extend the line `l` so that it
would have a length of `length`
l: a line
p: a point within that line
length: the length that a line from p to p2 would have
"""
p = Point(*_normalize_point(p))
a = line_angle_at(l, p)
if a > pi:
a = a % pi
p2 = Point(p.x, p.y + length)
c = l.centroid
if p.x < c.x:
if p.y < c.y:
angle = pi-a
else:
angle = a
elif p.x > c.x:
if p.y < c.y:
angle = -a
else:
angle = -a
else:
if p.y < c.y:
angle = a + pi
elif p.y > c.y:
angle = a % pi
else:
angle = 100
p3 = rotate(p2, math.degrees(angle), origin=p)
return p3
def line_extend(l, p, distance):
p2 = line_extrapolate_point(l, p, distance)
l2 = linestr(p, p2)
return l.union(l2)
def test_line_extrpolate_point():
f = line_extrapolate_point
assert f(linestr(0, 0, 1, 0), (0, 0), 1).equals(Point(-1, 0))
assert f(linestr(0, 0, 1, 0), (1, 0), 1).equals(Point(2, 0))
assert f(linestr(0, 0, 1, 1), (1, 1), 1).almost_equals(Point(1+sqrt(0.5), 1+sqrt(0.5)))
assert f(linestr(0, 0, 1, 1), (0, 0), 1).almost_equals(Point(-sqrt(0.5), -sqrt(0.5)))
assert f(linestr(0, 1, 1, 0), (0, 1), 1).almost_equals(Point(-sqrt(0.5), 1+sqrt(0.5)))
assert f(linestr(0, 1, 1, 0), (1, 0), 1).almost_equals(Point(1+sqrt(0.5), -sqrt(0.5)))
assert f(linestr(0, 0, 0, 1), (0, 1), 1).equals(Point(0, 2))
assert f(linestr(0, 0, 0, 1), (0, 0), 1).equals(Point(0, -1))
def tube(points, diam, wallwidth=0.05, begin='closed', end='flat'):
"""
create a tube.
A tube is a set of two parallel lines, where the edges are either
closed (curved), open, or flat
"""
l = linestr(*points)
return linestr_to_tube(l, diam=diam, wallwidth=wallwidth, begin=begin, end=end)
def linestr_to_tube(l, diam, wallwidth=0.05, begin='closed', end='flat'):
"""
convert a linestring to a tube
l: a line string
diam: inner diameter of the tube
wallwidth: width of the wall of the tube
begin, end: one of 'closed', 'flat', 'open'.
Indicates the shape of the extremes.
"""
r = diam * 0.5
t = l.buffer(r+wallwidth).difference(l.buffer(r))
def get_mask(l, p):
p = _normalize_point(p)
total_diam = (r+wallwidth)*2
perp0 = perpendicular_at(l, p, total_diam)
p2 = line_extrapolate_point(l, p, (r+wallwidth)*1.01)
perp1 = perpendicular_at(l, p2, total_diam)
mask = asPolygon(
linering(perp0.coords[0], perp0.coords[1], perp1.coords[1], perp1.coords[0])
).convex_hull
return mask
if begin == 'open':
mask = get_mask(l, l.coords[0])
t = t.difference(mask)
elif begin == 'flat':
mask = get_mask(l, l.coords[0])
t = t.union(mask)
if end == 'open':
mask = get_mask(l, l.coords[-1])
t = t.difference(mask)
if end == 'flat':
mask = get_mask(l, l.coords[-1])
t = t.union(mask)
return t
tube_from_line = linestr_to_tube
def perpendicular_at(line, point, length):
"""
line: a linestring
point: a point within the line at which to search for a perpendicular line
length: length of the line
"""
point = asPoint(point)
E = 1e-8
if line.intersects(point):
refpoint = point
else:
r = 16
while True:
refpoint = point.buffer(line.distance(point)+E, resolution=r).exterior.intersection(line)
if not refpoint.is_empty:
break
else:
r = r * 2
assert not refpoint.is_empty
a = line_angle_at(line, refpoint)
a2 = a + pi/2
p2 = Point(point.x, point.y + length*0.5)
p3 = rotate(p2, -math.degrees(a2), origin=point)
p4 = rotate(p2, (180 - math.degrees(a2)), origin=point)
l = linestr(p3, p4)
return l
def line_angle_at(line, point, h=0.001):
"""
return the angle of `line` at the `point` given. I
If point is not in the line, return the angle at the
nearest point within the line.
"""
point = Point(*_normalize_point(point))
if not line.intersects(point):
point = nearest_point(line, point)
bufdist = min(line.length, h)
c = point.buffer(bufdist).exterior
points = c.intersection(line)
if isinstance(points, Point):
# only one intersection, point is one of the extremes
a = points
b = line.intersection(point.buffer(bufdist*2).exterior)
if not isinstance(b, Point):
b = b[0]
else:
assert len(points) == 2
a, b = points
return angle_from_points(a.centroid, b.centroid)
def angle_at(geom, point, h=0.00001):
if not isinstance(point, Point):
point = Point(*point)
geomext = edge(geom)
if geomext.contains(point):
nearest = point
else:
nearest = nearest_point(geomext, point)
c = nearest.buffer(h).exterior.intersection(geomext)
if c.is_empty:
return angle_at(geom, nearest, h*3)
if isinstance(c, MultiPoint):
a, b = c[:2]
return angle_from_points(a, b)
elif isinstance(c, LineString):
a = Point(*c.coords[0])
b = Point(*c.coords[-1])
return angle_from_points(a, b)
elif isinstance(c, MultiLineString):
a = c[0].centroid
b = c[0].centroid
return angle_from_points(a, b)
else:
raise ValueError("ooops!")
def angle_from_points(a, b):
"""
the angle between the points a and b in radians
0: a and b are aligned with the y axis
Example
=======
>>> angle_from_points((0, 0), (1, 1)) # 45 degrees
0.7853981633974482
>>> angle_from_points((0, 0), (0, 1)) # 0 deg
0.0
North = 0
NB: convert to 360 with 'degrees' --> degrees(0.7853981633974482) = 45
"""
a, b = _normalize_points((a, b))
ax, ay = a
bx, by = b
A = by - ay
O = bx - ax
H = sqrt(O**2 + A**2)
if H == 0:
return 0
sin_alpha = O/H
alpha = math.asin(sin_alpha)
if by < ay:
if alpha > 0:
alpha += pi
else:
alpha = pi + abs(alpha)
alpha = alpha % (pi*2)
return alpha
def edge(geom):
"""
return a polygon representing the edge of `geom`
"""
h = 1e-8
try:
geomext = geom.exterior
except:
try:
geomext = geom.buffer(h).exterior
except:
geomext = geom
return geomext
def nearest_point(geom, p, eps=None):
"""
find the point in `geom` which is nearest from point `p`
eps: epsilon
"""
MINDIST = 1e-16
if not isinstance(p, Point):
p = Point(*p)
if geom.contains(p):
return p
if eps is None:
eps = geom.distance(p) * 0.0001
dist = geom.distance(p)
if dist < MINDIST:
dist = 1e-12
try:
circunf = p.buffer(dist+eps)
p2 = circunf.exterior.intersection(geom)
except _TopologicalError:
return nearest_point(geom, p, eps*3)
if circunf.contains(geom):
# we are probably near the centroid of the geom, inside it
n = geom.representative_point()
assert abs(n.distance(p) - geom.distance(p)) < 1e-3
return n
if p2.is_empty:
# eps is too small, try with a larger one
return nearest_point(geom, p, eps*6)
if isinstance(p2, MultiPoint):
a = p2[0]
b = p2[1]
p3 = linestr(a, b).centroid
elif isinstance(p2, Polygon):
p3 = linestr(p2.centroid, p).intersection(p2.exterior)
elif isinstance(p2, LineString):
p3 = p2.centroid
elif isinstance(p2, MultiLineString):
p3 = p2[0].centroid
else:
raise TypeError("other geometries not supported YET")
assert not p3.is_empty and isinstance(p3, Point)
return p3
def holes(geom):
"""
return the geometry which would fill the holes in geom
"""
return tight_envelope(geom).difference(geom)
def tight_envelope(geom):
"""
return the geometry which builds an envelope around `geom`
"""
if hasattr(geom, 'geoms'):
g0 = max((sub.envelope.area, sub) for sub in geom.geoms)[1]
g00 = asPolygon(g0.exterior)
elif isinstance(geom, Polygon):
g00 = asPolygon(geom.exterior)
return g00
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import calendar
import math
import platform
import re
import sys
import platform
from collections import defaultdict
from . import wcwidth
from .displaying import colorme, FormattedValue, DEFAULT_VALUE_COLORS
from cassandra.cqltypes import EMPTY
from cassandra.util import datetime_from_timestamp
from util import UTC
is_win = platform.system() == 'Windows'
unicode_controlchars_re = re.compile(r'[\x00-\x31\x7f-\xa0]')
controlchars_re = re.compile(r'[\x00-\x31\x7f-\xff]')
def _show_control_chars(match):
txt = repr(match.group(0))
if txt.startswith('u'):
txt = txt[2:-1]
else:
txt = txt[1:-1]
return txt
bits_to_turn_red_re = re.compile(r'\\([^uUx]|u[0-9a-fA-F]{4}|x[0-9a-fA-F]{2}|U[0-9a-fA-F]{8})')
def _make_turn_bits_red_f(color1, color2):
def _turn_bits_red(match):
txt = match.group(0)
if txt == '\\\\':
return '\\'
return color1 + txt + color2
return _turn_bits_red
default_null_placeholder = 'null'
default_float_precision = 3
default_colormap = DEFAULT_VALUE_COLORS
empty_colormap = defaultdict(lambda: '')
def format_by_type(cqltype, val, encoding, colormap=None, addcolor=False,
nullval=None, date_time_format=None, float_precision=None):
if nullval is None:
nullval = default_null_placeholder
if val is None:
return colorme(nullval, colormap, 'error')
if addcolor is False:
colormap = empty_colormap
elif colormap is None:
colormap = default_colormap
if date_time_format is None:
date_time_format = DateTimeFormat()
if float_precision is None:
float_precision = default_float_precision
return format_value(cqltype, val, encoding=encoding, colormap=colormap,
date_time_format=date_time_format, float_precision=float_precision,
nullval=nullval)
def color_text(bval, colormap, displaywidth=None):
# note that here, we render natural backslashes as just backslashes,
# in the same color as surrounding text, when using color. When not
# using color, we need to double up the backslashes so it's not
# ambiguous. This introduces the unique difficulty of having different
# display widths for the colored and non-colored versions. To avoid
# adding the smarts to handle that in to FormattedValue, we just
# make an explicit check to see if a null colormap is being used or
# not.
if displaywidth is None:
displaywidth = len(bval)
tbr = _make_turn_bits_red_f(colormap['blob'], colormap['text'])
coloredval = colormap['text'] + bits_to_turn_red_re.sub(tbr, bval) + colormap['reset']
if colormap['text']:
displaywidth -= bval.count(r'\\')
return FormattedValue(bval, coloredval, displaywidth)
DEFAULT_NANOTIME_FORMAT = '%H:%M:%S.%N'
DEFAULT_DATE_FORMAT = '%Y-%m-%d'
DEFAULT_TIMESTAMP_FORMAT = '%Y-%m-%d %H:%M:%S%z'
if platform.system() == 'Windows':
DEFAULT_TIME_FORMAT = '%Y-%m-%d %H:%M:%S %Z'
class DateTimeFormat():
def __init__(self, timestamp_format=DEFAULT_TIMESTAMP_FORMAT, date_format=DEFAULT_DATE_FORMAT, nanotime_format=DEFAULT_NANOTIME_FORMAT):
self.timestamp_format = timestamp_format
self.date_format = date_format
self.nanotime_format = nanotime_format
def format_value_default(val, colormap, **_):
val = str(val)
escapedval = val.replace('\\', '\\\\')
bval = controlchars_re.sub(_show_control_chars, escapedval)
return color_text(bval, colormap)
# Mapping cql type base names ("int", "map", etc) to formatter functions,
# making format_value a generic function
_formatters = {}
def format_value(type, val, **kwargs):
if val == EMPTY:
return format_value_default('', **kwargs)
formatter = _formatters.get(type.__name__, format_value_default)
return formatter(val, **kwargs)
def formatter_for(typname):
def registrator(f):
_formatters[typname] = f
return f
return registrator
@formatter_for('bytearray')
def format_value_blob(val, colormap, **_):
bval = '0x' + ''.join('%02x' % c for c in val)
return colorme(bval, colormap, 'blob')
formatter_for('buffer')(format_value_blob)
def format_python_formatted_type(val, colormap, color, quote=False):
bval = str(val)
if quote:
bval = "'%s'" % bval
return colorme(bval, colormap, color)
@formatter_for('Decimal')
def format_value_decimal(val, colormap, **_):
return format_python_formatted_type(val, colormap, 'decimal')
@formatter_for('UUID')
def format_value_uuid(val, colormap, **_):
return format_python_formatted_type(val, colormap, 'uuid')
@formatter_for('inet')
def formatter_value_inet(val, colormap, quote=False, **_):
return format_python_formatted_type(val, colormap, 'inet', quote=quote)
@formatter_for('bool')
def format_value_boolean(val, colormap, **_):
return format_python_formatted_type(val, colormap, 'boolean')
def format_floating_point_type(val, colormap, float_precision, **_):
if math.isnan(val):
bval = 'NaN'
elif math.isinf(val):
bval = 'Infinity' if val > 0 else '-Infinity'
else:
exponent = int(math.log10(abs(val))) if abs(val) > sys.float_info.epsilon else -sys.maxsize - 1
if -4 <= exponent < float_precision:
# when this is true %g will not use scientific notation,
# increasing precision should not change this decision
# so we increase the precision to take into account the
# digits to the left of the decimal point
float_precision = float_precision + exponent + 1
bval = '%.*g' % (float_precision, val)
return colorme(bval, colormap, 'float')
formatter_for('float')(format_floating_point_type)
def format_integer_type(val, colormap, **_):
# base-10 only for now; support others?
bval = str(val)
return colorme(bval, colormap, 'int')
formatter_for('long')(format_integer_type)
formatter_for('int')(format_integer_type)
@formatter_for('datetime')
def format_value_timestamp(val, colormap, date_time_format, quote=False, **_):
bval = strftime(date_time_format.timestamp_format, calendar.timegm(val.utctimetuple()))
if quote:
bval = "'%s'" % bval
return colorme(bval, colormap, 'timestamp')
def strftime(time_format, seconds):
tzless_dt = datetime_from_timestamp(seconds)
return tzless_dt.replace(tzinfo=UTC()).strftime(time_format)
@formatter_for('Date')
def format_value_date(val, colormap, **_):
return format_python_formatted_type(val, colormap, 'date')
@formatter_for('Time')
def format_value_time(val, colormap, **_):
return format_python_formatted_type(val, colormap, 'time')
@formatter_for('str')
def format_value_text(val, encoding, colormap, quote=False, **_):
escapedval = val.replace(u'\\', u'\\\\')
if quote:
escapedval = escapedval.replace("'", "''")
escapedval = unicode_controlchars_re.sub(_show_control_chars, escapedval)
bval = escapedval.encode(encoding, 'backslashreplace')
if quote:
bval = "'%s'" % bval
displaywidth = wcwidth.wcswidth(bval.decode(encoding))
return color_text(bval, colormap, displaywidth)
# name alias
formatter_for('unicode')(format_value_text)
def format_simple_collection(val, lbracket, rbracket, encoding,
colormap, date_time_format, float_precision, nullval):
subs = [format_value(type(sval), sval, encoding=encoding, colormap=colormap,
date_time_format=date_time_format, float_precision=float_precision,
nullval=nullval, quote=True)
for sval in val]
bval = lbracket + ', '.join(sval.strval for sval in subs) + rbracket
lb, sep, rb = [colormap['collection'] + s + colormap['reset']
for s in (lbracket, ', ', rbracket)]
coloredval = lb + sep.join(sval.coloredval for sval in subs) + rb
displaywidth = 2 * len(subs) + sum(sval.displaywidth for sval in subs)
return FormattedValue(bval, coloredval, displaywidth)
@formatter_for('list')
def format_value_list(val, encoding, colormap, date_time_format, float_precision, nullval, **_):
return format_simple_collection(val, '[', ']', encoding, colormap,
date_time_format, float_precision, nullval)
@formatter_for('tuple')
def format_value_tuple(val, encoding, colormap, date_time_format, float_precision, nullval, **_):
return format_simple_collection(val, '(', ')', encoding, colormap,
date_time_format, float_precision, nullval)
@formatter_for('set')
def format_value_set(val, encoding, colormap, date_time_format, float_precision, nullval, **_):
return format_simple_collection(sorted(val), '{', '}', encoding, colormap,
date_time_format, float_precision, nullval)
formatter_for('frozenset')(format_value_set)
formatter_for('sortedset')(format_value_set)
@formatter_for('dict')
def format_value_map(val, encoding, colormap, date_time_format, float_precision, nullval, **_):
def subformat(v):
return format_value(type(v), v, encoding=encoding, colormap=colormap,
date_time_format=date_time_format, float_precision=float_precision,
nullval=nullval, quote=True)
subs = [(subformat(k), subformat(v)) for (k, v) in sorted(val.items())]
bval = '{' + ', '.join(k.strval + ': ' + v.strval for (k, v) in subs) + '}'
lb, comma, colon, rb = [colormap['collection'] + s + colormap['reset']
for s in ('{', ', ', ': ', '}')]
coloredval = lb \
+ comma.join(k.coloredval + colon + v.coloredval for (k, v) in subs) \
+ rb
displaywidth = 4 * len(subs) + sum(k.displaywidth + v.displaywidth for (k, v) in subs)
return FormattedValue(bval, coloredval, displaywidth)
formatter_for('OrderedDict')(format_value_map)
formatter_for('OrderedMap')(format_value_map)
formatter_for('OrderedMapSerializedKey')(format_value_map)
def format_value_utype(val, encoding, colormap, date_time_format, float_precision, nullval, **_):
def format_field_value(v):
if v is None:
return colorme(nullval, colormap, 'error')
return format_value(type(v), v, encoding=encoding, colormap=colormap,
date_time_format=date_time_format, float_precision=float_precision,
nullval=nullval, quote=True)
def format_field_name(name):
return format_value_text(name, encoding=encoding, colormap=colormap, quote=False)
subs = [(format_field_name(k), format_field_value(v)) for (k, v) in val._asdict().items()]
bval = '{' + ', '.join(k.strval + ': ' + v.strval for (k, v) in subs) + '}'
lb, comma, colon, rb = [colormap['collection'] + s + colormap['reset']
for s in ('{', ', ', ': ', '}')]
coloredval = lb \
+ comma.join(k.coloredval + colon + v.coloredval for (k, v) in subs) \
+ rb
displaywidth = 4 * len(subs) + sum(k.displaywidth + v.displaywidth for (k, v) in subs)
return FormattedValue(bval, coloredval, displaywidth)
|
|
import logging
import logging.config
import os
import re
import warnings
from asyncio import CancelledError, Protocol, ensure_future, get_event_loop
from collections import defaultdict, deque
from functools import partial
from inspect import getmodulename, isawaitable, signature, stack
from socket import socket
from ssl import Purpose, SSLContext, create_default_context
from traceback import format_exc
from typing import Any, Dict, Optional, Type, Union
from urllib.parse import urlencode, urlunparse
from sanic import reloader_helpers
from sanic.asgi import ASGIApp
from sanic.blueprint_group import BlueprintGroup
from sanic.config import BASE_LOGO, Config
from sanic.constants import HTTP_METHODS
from sanic.exceptions import SanicException, ServerError, URLBuildError
from sanic.handlers import ErrorHandler
from sanic.log import LOGGING_CONFIG_DEFAULTS, error_logger, logger
from sanic.response import HTTPResponse, StreamingHTTPResponse
from sanic.router import Router
from sanic.server import (
AsyncioServer,
HttpProtocol,
Signal,
serve,
serve_multiple,
)
from sanic.static import register as static_register
from sanic.testing import SanicASGITestClient, SanicTestClient
from sanic.views import CompositionView
from sanic.websocket import ConnectionClosed, WebSocketProtocol
class Sanic:
def __init__(
self,
name=None,
router=None,
error_handler=None,
load_env=True,
request_class=None,
strict_slashes=False,
log_config=None,
configure_logging=True,
):
# Get name from previous stack frame
if name is None:
warnings.warn(
"Sanic(name=None) is deprecated and None value support "
"for `name` will be removed in the next release. "
"Please use Sanic(name='your_application_name') instead.",
DeprecationWarning,
stacklevel=2,
)
frame_records = stack()[1]
name = getmodulename(frame_records[1])
# logging
if configure_logging:
logging.config.dictConfig(log_config or LOGGING_CONFIG_DEFAULTS)
self.name = name
self.asgi = False
self.router = router or Router()
self.request_class = request_class
self.error_handler = error_handler or ErrorHandler()
self.config = Config(load_env=load_env)
self.request_middleware = deque()
self.response_middleware = deque()
self.blueprints = {}
self._blueprint_order = []
self.configure_logging = configure_logging
self.debug = None
self.sock = None
self.strict_slashes = strict_slashes
self.listeners = defaultdict(list)
self.is_stopping = False
self.is_running = False
self.is_request_stream = False
self.websocket_enabled = False
self.websocket_tasks = set()
self.named_request_middleware = {}
self.named_response_middleware = {}
# Register alternative method names
self.go_fast = self.run
@property
def loop(self):
"""Synonymous with asyncio.get_event_loop().
Only supported when using the `app.run` method.
"""
if not self.is_running and self.asgi is False:
raise SanicException(
"Loop can only be retrieved after the app has started "
"running. Not supported with `create_server` function"
)
return get_event_loop()
# -------------------------------------------------------------------- #
# Registration
# -------------------------------------------------------------------- #
def add_task(self, task):
"""Schedule a task to run later, after the loop has started.
Different from asyncio.ensure_future in that it does not
also return a future, and the actual ensure_future call
is delayed until before server start.
:param task: future, couroutine or awaitable
"""
try:
if callable(task):
try:
self.loop.create_task(task(self))
except TypeError:
self.loop.create_task(task())
else:
self.loop.create_task(task)
except SanicException:
@self.listener("before_server_start")
def run(app, loop):
if callable(task):
try:
loop.create_task(task(self))
except TypeError:
loop.create_task(task())
else:
loop.create_task(task)
# Decorator
def listener(self, event):
"""Create a listener from a decorated function.
:param event: event to listen to
"""
def decorator(listener):
self.listeners[event].append(listener)
return listener
return decorator
def register_listener(self, listener, event):
"""
Register the listener for a given event.
:param listener: callable i.e. setup_db(app, loop)
:param event: when to register listener i.e. 'before_server_start'
:return: listener
"""
return self.listener(event)(listener)
# Decorator
def route(
self,
uri,
methods=frozenset({"GET"}),
host=None,
strict_slashes=None,
stream=False,
version=None,
name=None,
):
"""Decorate a function to be registered as a route
:param uri: path of the URL
:param methods: list or tuple of methods allowed
:param host:
:param strict_slashes:
:param stream:
:param version:
:param name: user defined route name for url_for
:return: tuple of routes, decorated function
"""
# Fix case where the user did not prefix the URL with a /
# and will probably get confused as to why it's not working
if not uri.startswith("/"):
uri = "/" + uri
if stream:
self.is_request_stream = True
if strict_slashes is None:
strict_slashes = self.strict_slashes
def response(handler):
if isinstance(handler, tuple):
# if a handler fn is already wrapped in a route, the handler
# variable will be a tuple of (existing routes, handler fn)
routes, handler = handler
else:
routes = []
args = list(signature(handler).parameters.keys())
if not args:
handler_name = handler.__name__
raise ValueError(
f"Required parameter `request` missing "
f"in the {handler_name}() route?"
)
if stream:
handler.is_stream = stream
routes.extend(
self.router.add(
uri=uri,
methods=methods,
handler=handler,
host=host,
strict_slashes=strict_slashes,
version=version,
name=name,
)
)
return routes, handler
return response
# Shorthand method decorators
def get(
self, uri, host=None, strict_slashes=None, version=None, name=None
):
"""
Add an API URL under the **GET** *HTTP* method
:param uri: URL to be tagged to **GET** method of *HTTP*
:param host: Host IP or FQDN for the service to use
:param strict_slashes: Instruct :class:`Sanic` to check if the request
URLs need to terminate with a */*
:param version: API Version
:param name: Unique name that can be used to identify the Route
:return: Object decorated with :func:`route` method
"""
return self.route(
uri,
methods=frozenset({"GET"}),
host=host,
strict_slashes=strict_slashes,
version=version,
name=name,
)
def post(
self,
uri,
host=None,
strict_slashes=None,
stream=False,
version=None,
name=None,
):
"""
Add an API URL under the **POST** *HTTP* method
:param uri: URL to be tagged to **POST** method of *HTTP*
:param host: Host IP or FQDN for the service to use
:param strict_slashes: Instruct :class:`Sanic` to check if the request
URLs need to terminate with a */*
:param version: API Version
:param name: Unique name that can be used to identify the Route
:return: Object decorated with :func:`route` method
"""
return self.route(
uri,
methods=frozenset({"POST"}),
host=host,
strict_slashes=strict_slashes,
stream=stream,
version=version,
name=name,
)
def put(
self,
uri,
host=None,
strict_slashes=None,
stream=False,
version=None,
name=None,
):
"""
Add an API URL under the **PUT** *HTTP* method
:param uri: URL to be tagged to **PUT** method of *HTTP*
:param host: Host IP or FQDN for the service to use
:param strict_slashes: Instruct :class:`Sanic` to check if the request
URLs need to terminate with a */*
:param version: API Version
:param name: Unique name that can be used to identify the Route
:return: Object decorated with :func:`route` method
"""
return self.route(
uri,
methods=frozenset({"PUT"}),
host=host,
strict_slashes=strict_slashes,
stream=stream,
version=version,
name=name,
)
def head(
self, uri, host=None, strict_slashes=None, version=None, name=None
):
return self.route(
uri,
methods=frozenset({"HEAD"}),
host=host,
strict_slashes=strict_slashes,
version=version,
name=name,
)
def options(
self, uri, host=None, strict_slashes=None, version=None, name=None
):
"""
Add an API URL under the **OPTIONS** *HTTP* method
:param uri: URL to be tagged to **OPTIONS** method of *HTTP*
:param host: Host IP or FQDN for the service to use
:param strict_slashes: Instruct :class:`Sanic` to check if the request
URLs need to terminate with a */*
:param version: API Version
:param name: Unique name that can be used to identify the Route
:return: Object decorated with :func:`route` method
"""
return self.route(
uri,
methods=frozenset({"OPTIONS"}),
host=host,
strict_slashes=strict_slashes,
version=version,
name=name,
)
def patch(
self,
uri,
host=None,
strict_slashes=None,
stream=False,
version=None,
name=None,
):
"""
Add an API URL under the **PATCH** *HTTP* method
:param uri: URL to be tagged to **PATCH** method of *HTTP*
:param host: Host IP or FQDN for the service to use
:param strict_slashes: Instruct :class:`Sanic` to check if the request
URLs need to terminate with a */*
:param version: API Version
:param name: Unique name that can be used to identify the Route
:return: Object decorated with :func:`route` method
"""
return self.route(
uri,
methods=frozenset({"PATCH"}),
host=host,
strict_slashes=strict_slashes,
stream=stream,
version=version,
name=name,
)
def delete(
self, uri, host=None, strict_slashes=None, version=None, name=None
):
"""
Add an API URL under the **DELETE** *HTTP* method
:param uri: URL to be tagged to **DELETE** method of *HTTP*
:param host: Host IP or FQDN for the service to use
:param strict_slashes: Instruct :class:`Sanic` to check if the request
URLs need to terminate with a */*
:param version: API Version
:param name: Unique name that can be used to identify the Route
:return: Object decorated with :func:`route` method
"""
return self.route(
uri,
methods=frozenset({"DELETE"}),
host=host,
strict_slashes=strict_slashes,
version=version,
name=name,
)
def add_route(
self,
handler,
uri,
methods=frozenset({"GET"}),
host=None,
strict_slashes=None,
version=None,
name=None,
stream=False,
):
"""A helper method to register class instance or
functions as a handler to the application url
routes.
:param handler: function or class instance
:param uri: path of the URL
:param methods: list or tuple of methods allowed, these are overridden
if using a HTTPMethodView
:param host:
:param strict_slashes:
:param version:
:param name: user defined route name for url_for
:param stream: boolean specifying if the handler is a stream handler
:return: function or class instance
"""
# Handle HTTPMethodView differently
if hasattr(handler, "view_class"):
methods = set()
for method in HTTP_METHODS:
_handler = getattr(handler.view_class, method.lower(), None)
if _handler:
methods.add(method)
if hasattr(_handler, "is_stream"):
stream = True
# handle composition view differently
if isinstance(handler, CompositionView):
methods = handler.handlers.keys()
for _handler in handler.handlers.values():
if hasattr(_handler, "is_stream"):
stream = True
break
if strict_slashes is None:
strict_slashes = self.strict_slashes
self.route(
uri=uri,
methods=methods,
host=host,
strict_slashes=strict_slashes,
stream=stream,
version=version,
name=name,
)(handler)
return handler
# Decorator
def websocket(
self, uri, host=None, strict_slashes=None, subprotocols=None, name=None
):
"""
Decorate a function to be registered as a websocket route
:param uri: path of the URL
:param host: Host IP or FQDN details
:param strict_slashes: If the API endpoint needs to terminate
with a "/" or not
:param subprotocols: optional list of str with supported subprotocols
:param name: A unique name assigned to the URL so that it can
be used with :func:`url_for`
:return: tuple of routes, decorated function
"""
self.enable_websocket()
# Fix case where the user did not prefix the URL with a /
# and will probably get confused as to why it's not working
if not uri.startswith("/"):
uri = "/" + uri
if strict_slashes is None:
strict_slashes = self.strict_slashes
def response(handler):
if isinstance(handler, tuple):
# if a handler fn is already wrapped in a route, the handler
# variable will be a tuple of (existing routes, handler fn)
routes, handler = handler
else:
routes = []
async def websocket_handler(request, *args, **kwargs):
request.app = self
if not getattr(handler, "__blueprintname__", False):
request.endpoint = handler.__name__
else:
request.endpoint = (
getattr(handler, "__blueprintname__", "")
+ handler.__name__
)
pass
if self.asgi:
ws = request.transport.get_websocket_connection()
else:
protocol = request.transport.get_protocol()
protocol.app = self
ws = await protocol.websocket_handshake(
request, subprotocols
)
# schedule the application handler
# its future is kept in self.websocket_tasks in case it
# needs to be cancelled due to the server being stopped
fut = ensure_future(handler(request, ws, *args, **kwargs))
self.websocket_tasks.add(fut)
try:
await fut
except (CancelledError, ConnectionClosed):
pass
finally:
self.websocket_tasks.remove(fut)
await ws.close()
routes.extend(
self.router.add(
uri=uri,
handler=websocket_handler,
methods=frozenset({"GET"}),
host=host,
strict_slashes=strict_slashes,
name=name,
)
)
return routes, handler
return response
def add_websocket_route(
self,
handler,
uri,
host=None,
strict_slashes=None,
subprotocols=None,
name=None,
):
"""
A helper method to register a function as a websocket route.
:param handler: a callable function or instance of a class
that can handle the websocket request
:param host: Host IP or FQDN details
:param uri: URL path that will be mapped to the websocket
handler
handler
:param strict_slashes: If the API endpoint needs to terminate
with a "/" or not
:param subprotocols: Subprotocols to be used with websocket
handshake
:param name: A unique name assigned to the URL so that it can
be used with :func:`url_for`
:return: Objected decorated by :func:`websocket`
"""
if strict_slashes is None:
strict_slashes = self.strict_slashes
return self.websocket(
uri,
host=host,
strict_slashes=strict_slashes,
subprotocols=subprotocols,
name=name,
)(handler)
def enable_websocket(self, enable=True):
"""Enable or disable the support for websocket.
Websocket is enabled automatically if websocket routes are
added to the application.
"""
if not self.websocket_enabled:
# if the server is stopped, we want to cancel any ongoing
# websocket tasks, to allow the server to exit promptly
@self.listener("before_server_stop")
def cancel_websocket_tasks(app, loop):
for task in self.websocket_tasks:
task.cancel()
self.websocket_enabled = enable
# Decorator
def exception(self, *exceptions):
"""Decorate a function to be registered as a handler for exceptions
:param exceptions: exceptions
:return: decorated function
"""
def response(handler):
for exception in exceptions:
if isinstance(exception, (tuple, list)):
for e in exception:
self.error_handler.add(e, handler)
else:
self.error_handler.add(exception, handler)
return handler
return response
def register_middleware(self, middleware, attach_to="request"):
"""
Register an application level middleware that will be attached
to all the API URLs registered under this application.
This method is internally invoked by the :func:`middleware`
decorator provided at the app level.
:param middleware: Callback method to be attached to the
middleware
:param attach_to: The state at which the middleware needs to be
invoked in the lifecycle of an *HTTP Request*.
**request** - Invoke before the request is processed
**response** - Invoke before the response is returned back
:return: decorated method
"""
if attach_to == "request":
if middleware not in self.request_middleware:
self.request_middleware.append(middleware)
if attach_to == "response":
if middleware not in self.response_middleware:
self.response_middleware.appendleft(middleware)
return middleware
def register_named_middleware(
self, middleware, route_names, attach_to="request"
):
if attach_to == "request":
for _rn in route_names:
if _rn not in self.named_request_middleware:
self.named_request_middleware[_rn] = deque()
if middleware not in self.named_request_middleware[_rn]:
self.named_request_middleware[_rn].append(middleware)
if attach_to == "response":
for _rn in route_names:
if _rn not in self.named_response_middleware:
self.named_response_middleware[_rn] = deque()
if middleware not in self.named_response_middleware[_rn]:
self.named_response_middleware[_rn].append(middleware)
# Decorator
def middleware(self, middleware_or_request):
"""
Decorate and register middleware to be called before a request.
Can either be called as *@app.middleware* or
*@app.middleware('request')*
:param: middleware_or_request: Optional parameter to use for
identifying which type of middleware is being registered.
"""
# Detect which way this was called, @middleware or @middleware('AT')
if callable(middleware_or_request):
return self.register_middleware(middleware_or_request)
else:
return partial(
self.register_middleware, attach_to=middleware_or_request
)
# Static Files
def static(
self,
uri,
file_or_directory,
pattern=r"/?.+",
use_modified_since=True,
use_content_range=False,
stream_large_files=False,
name="static",
host=None,
strict_slashes=None,
content_type=None,
):
"""
Register a root to serve files from. The input can either be a
file or a directory. This method will enable an easy and simple way
to setup the :class:`Route` necessary to serve the static files.
:param uri: URL path to be used for serving static content
:param file_or_directory: Path for the Static file/directory with
static files
:param pattern: Regex Pattern identifying the valid static files
:param use_modified_since: If true, send file modified time, and return
not modified if the browser's matches the server's
:param use_content_range: If true, process header for range requests
and sends the file part that is requested
:param stream_large_files: If true, use the
:func:`StreamingHTTPResponse.file_stream` handler rather
than the :func:`HTTPResponse.file` handler to send the file.
If this is an integer, this represents the threshold size to
switch to :func:`StreamingHTTPResponse.file_stream`
:param name: user defined name used for url_for
:param host: Host IP or FQDN for the service to use
:param strict_slashes: Instruct :class:`Sanic` to check if the request
URLs need to terminate with a */*
:param content_type: user defined content type for header
:return: None
"""
static_register(
self,
uri,
file_or_directory,
pattern,
use_modified_since,
use_content_range,
stream_large_files,
name,
host,
strict_slashes,
content_type,
)
def blueprint(self, blueprint, **options):
"""Register a blueprint on the application.
:param blueprint: Blueprint object or (list, tuple) thereof
:param options: option dictionary with blueprint defaults
:return: Nothing
"""
if isinstance(blueprint, (list, tuple, BlueprintGroup)):
for item in blueprint:
self.blueprint(item, **options)
return
if blueprint.name in self.blueprints:
assert self.blueprints[blueprint.name] is blueprint, (
'A blueprint with the name "%s" is already registered. '
"Blueprint names must be unique." % (blueprint.name,)
)
else:
self.blueprints[blueprint.name] = blueprint
self._blueprint_order.append(blueprint)
blueprint.register(self, options)
def register_blueprint(self, *args, **kwargs):
"""
Proxy method provided for invoking the :func:`blueprint` method
.. note::
To be deprecated in 1.0. Use :func:`blueprint` instead.
:param args: Blueprint object or (list, tuple) thereof
:param kwargs: option dictionary with blueprint defaults
:return: None
"""
if self.debug:
warnings.simplefilter("default")
warnings.warn(
"Use of register_blueprint will be deprecated in "
"version 1.0. Please use the blueprint method"
" instead",
DeprecationWarning,
)
return self.blueprint(*args, **kwargs)
def url_for(self, view_name: str, **kwargs):
r"""Build a URL based on a view name and the values provided.
In order to build a URL, all request parameters must be supplied as
keyword arguments, and each parameter must pass the test for the
specified parameter type. If these conditions are not met, a
`URLBuildError` will be thrown.
Keyword arguments that are not request parameters will be included in
the output URL's query string.
:param view_name: string referencing the view name
:param \**kwargs: keys and values that are used to build request
parameters and query string arguments.
:return: the built URL
Raises:
URLBuildError
"""
# find the route by the supplied view name
kw: Dict[str, str] = {}
# special static files url_for
if view_name == "static":
kw.update(name=kwargs.pop("name", "static"))
elif view_name.endswith(".static"): # blueprint.static
kwargs.pop("name", None)
kw.update(name=view_name)
uri, route = self.router.find_route_by_view_name(view_name, **kw)
if not (uri and route):
raise URLBuildError(
f"Endpoint with name `{view_name}` was not found"
)
# If the route has host defined, split that off
# TODO: Retain netloc and path separately in Route objects
host = uri.find("/")
if host > 0:
host, uri = uri[:host], uri[host:]
else:
host = None
if view_name == "static" or view_name.endswith(".static"):
filename = kwargs.pop("filename", None)
# it's static folder
if "<file_uri:" in uri:
folder_ = uri.split("<file_uri:", 1)[0]
if folder_.endswith("/"):
folder_ = folder_[:-1]
if filename.startswith("/"):
filename = filename[1:]
uri = f"{folder_}/{filename}"
if uri != "/" and uri.endswith("/"):
uri = uri[:-1]
out = uri
# find all the parameters we will need to build in the URL
matched_params = re.findall(self.router.parameter_pattern, uri)
# _method is only a placeholder now, don't know how to support it
kwargs.pop("_method", None)
anchor = kwargs.pop("_anchor", "")
# _external need SERVER_NAME in config or pass _server arg
external = kwargs.pop("_external", False)
scheme = kwargs.pop("_scheme", "")
if scheme and not external:
raise ValueError("When specifying _scheme, _external must be True")
netloc = kwargs.pop("_server", None)
if netloc is None and external:
netloc = host or self.config.get("SERVER_NAME", "")
if external:
if not scheme:
if ":" in netloc[:8]:
scheme = netloc[:8].split(":", 1)[0]
else:
scheme = "http"
if "://" in netloc[:8]:
netloc = netloc.split("://", 1)[-1]
for match in matched_params:
name, _type, pattern = self.router.parse_parameter_string(match)
# we only want to match against each individual parameter
specific_pattern = f"^{pattern}$"
supplied_param = None
if name in kwargs:
supplied_param = kwargs.get(name)
del kwargs[name]
else:
raise URLBuildError(
f"Required parameter `{name}` was not passed to url_for"
)
supplied_param = str(supplied_param)
# determine if the parameter supplied by the caller passes the test
# in the URL
passes_pattern = re.match(specific_pattern, supplied_param)
if not passes_pattern:
if _type != str:
type_name = _type.__name__
msg = (
f'Value "{supplied_param}" '
f"for parameter `{name}` does not "
f"match pattern for type `{type_name}`: {pattern}"
)
else:
msg = (
f'Value "{supplied_param}" for parameter `{name}` '
f"does not satisfy pattern {pattern}"
)
raise URLBuildError(msg)
# replace the parameter in the URL with the supplied value
replacement_regex = f"(<{name}.*?>)"
out = re.sub(replacement_regex, supplied_param, out)
# parse the remainder of the keyword arguments into a querystring
query_string = urlencode(kwargs, doseq=True) if kwargs else ""
# scheme://netloc/path;parameters?query#fragment
out = urlunparse((scheme, netloc, out, "", query_string, anchor))
return out
# -------------------------------------------------------------------- #
# Request Handling
# -------------------------------------------------------------------- #
def converted_response_type(self, response):
"""
No implementation provided.
"""
pass
async def handle_request(self, request, write_callback, stream_callback):
"""Take a request from the HTTP Server and return a response object
to be sent back The HTTP Server only expects a response object, so
exception handling must be done here
:param request: HTTP Request object
:param write_callback: Synchronous response function to be
called with the response as the only argument
:param stream_callback: Coroutine that handles streaming a
StreamingHTTPResponse if produced by the handler.
:return: Nothing
"""
# Define `response` var here to remove warnings about
# allocation before assignment below.
response = None
cancelled = False
name = None
try:
# Fetch handler from router
handler, args, kwargs, uri, name = self.router.get(request)
# -------------------------------------------- #
# Request Middleware
# -------------------------------------------- #
response = await self._run_request_middleware(
request, request_name=name
)
# No middleware results
if not response:
# -------------------------------------------- #
# Execute Handler
# -------------------------------------------- #
request.uri_template = uri
if handler is None:
raise ServerError(
(
"'None' was returned while requesting a "
"handler from the router"
)
)
else:
if not getattr(handler, "__blueprintname__", False):
request.endpoint = self._build_endpoint_name(
handler.__name__
)
else:
request.endpoint = self._build_endpoint_name(
getattr(handler, "__blueprintname__", ""),
handler.__name__,
)
# Run response handler
response = handler(request, *args, **kwargs)
if isawaitable(response):
response = await response
except CancelledError:
# If response handler times out, the server handles the error
# and cancels the handle_request job.
# In this case, the transport is already closed and we cannot
# issue a response.
response = None
cancelled = True
except Exception as e:
# -------------------------------------------- #
# Response Generation Failed
# -------------------------------------------- #
try:
response = self.error_handler.response(request, e)
if isawaitable(response):
response = await response
except Exception as e:
if isinstance(e, SanicException):
response = self.error_handler.default(
request=request, exception=e
)
elif self.debug:
response = HTTPResponse(
f"Error while "
f"handling error: {e}\nStack: {format_exc()}",
status=500,
)
else:
response = HTTPResponse(
"An error occurred while handling an error", status=500
)
finally:
# -------------------------------------------- #
# Response Middleware
# -------------------------------------------- #
# Don't run response middleware if response is None
if response is not None:
try:
response = await self._run_response_middleware(
request, response, request_name=name
)
except CancelledError:
# Response middleware can timeout too, as above.
response = None
cancelled = True
except BaseException:
error_logger.exception(
"Exception occurred in one of response "
"middleware handlers"
)
if cancelled:
raise CancelledError()
# pass the response to the correct callback
if write_callback is None or isinstance(
response, StreamingHTTPResponse
):
if stream_callback:
await stream_callback(response)
else:
# Should only end here IF it is an ASGI websocket.
# TODO:
# - Add exception handling
pass
else:
write_callback(response)
# -------------------------------------------------------------------- #
# Testing
# -------------------------------------------------------------------- #
@property
def test_client(self):
return SanicTestClient(self)
@property
def asgi_client(self):
return SanicASGITestClient(self)
# -------------------------------------------------------------------- #
# Execution
# -------------------------------------------------------------------- #
def run(
self,
host: Optional[str] = None,
port: Optional[int] = None,
debug: bool = False,
ssl: Union[dict, SSLContext, None] = None,
sock: Optional[socket] = None,
workers: int = 1,
protocol: Type[Protocol] = None,
backlog: int = 100,
stop_event: Any = None,
register_sys_signals: bool = True,
access_log: Optional[bool] = None,
**kwargs: Any,
) -> None:
"""Run the HTTP Server and listen until keyboard interrupt or term
signal. On termination, drain connections before closing.
:param host: Address to host on
:type host: str
:param port: Port to host on
:type port: int
:param debug: Enables debug output (slows server)
:type debug: bool
:param ssl: SSLContext, or location of certificate and key
for SSL encryption of worker(s)
:type ssl: SSLContext or dict
:param sock: Socket for the server to accept connections from
:type sock: socket
:param workers: Number of processes received before it is respected
:type workers: int
:param protocol: Subclass of asyncio Protocol class
:type protocol: type[Protocol]
:param backlog: a number of unaccepted connections that the system
will allow before refusing new connections
:type backlog: int
:param stop_event: event to be triggered
before stopping the app - deprecated
:type stop_event: None
:param register_sys_signals: Register SIG* events
:type register_sys_signals: bool
:param access_log: Enables writing access logs (slows server)
:type access_log: bool
:return: Nothing
"""
if "loop" in kwargs:
raise TypeError(
"loop is not a valid argument. To use an existing loop, "
"change to create_server().\nSee more: "
"https://sanic.readthedocs.io/en/latest/sanic/deploying.html"
"#asynchronous-support"
)
# Default auto_reload to false
auto_reload = False
# If debug is set, default it to true (unless on windows)
if debug and os.name == "posix":
auto_reload = True
# Allow for overriding either of the defaults
auto_reload = kwargs.get("auto_reload", auto_reload)
if sock is None:
host, port = host or "127.0.0.1", port or 8000
if protocol is None:
protocol = (
WebSocketProtocol if self.websocket_enabled else HttpProtocol
)
if stop_event is not None:
if debug:
warnings.simplefilter("default")
warnings.warn(
"stop_event will be removed from future versions.",
DeprecationWarning,
)
# if access_log is passed explicitly change config.ACCESS_LOG
if access_log is not None:
self.config.ACCESS_LOG = access_log
server_settings = self._helper(
host=host,
port=port,
debug=debug,
ssl=ssl,
sock=sock,
workers=workers,
protocol=protocol,
backlog=backlog,
register_sys_signals=register_sys_signals,
auto_reload=auto_reload,
)
try:
self.is_running = True
self.is_stopping = False
if workers > 1 and os.name != "posix":
logger.warn(
f"Multiprocessing is currently not supported on {os.name},"
" using workers=1 instead"
)
workers = 1
if workers == 1:
if auto_reload and os.name != "posix":
# This condition must be removed after implementing
# auto reloader for other operating systems.
raise NotImplementedError
if (
auto_reload
and os.environ.get("SANIC_SERVER_RUNNING") != "true"
):
reloader_helpers.watchdog(2)
else:
serve(**server_settings)
else:
serve_multiple(server_settings, workers)
except BaseException:
error_logger.exception(
"Experienced exception while trying to serve"
)
raise
finally:
self.is_running = False
logger.info("Server Stopped")
def stop(self):
"""This kills the Sanic"""
if not self.is_stopping:
self.is_stopping = True
get_event_loop().stop()
async def create_server(
self,
host: Optional[str] = None,
port: Optional[int] = None,
debug: bool = False,
ssl: Union[dict, SSLContext, None] = None,
sock: Optional[socket] = None,
protocol: Type[Protocol] = None,
backlog: int = 100,
stop_event: Any = None,
access_log: Optional[bool] = None,
return_asyncio_server=False,
asyncio_server_kwargs=None,
) -> Optional[AsyncioServer]:
"""
Asynchronous version of :func:`run`.
This method will take care of the operations necessary to invoke
the *before_start* events via :func:`trigger_events` method invocation
before starting the *sanic* app in Async mode.
.. note::
This does not support multiprocessing and is not the preferred
way to run a :class:`Sanic` application.
:param host: Address to host on
:type host: str
:param port: Port to host on
:type port: int
:param debug: Enables debug output (slows server)
:type debug: bool
:param ssl: SSLContext, or location of certificate and key
for SSL encryption of worker(s)
:type ssl: SSLContext or dict
:param sock: Socket for the server to accept connections from
:type sock: socket
:param protocol: Subclass of asyncio Protocol class
:type protocol: type[Protocol]
:param backlog: a number of unaccepted connections that the system
will allow before refusing new connections
:type backlog: int
:param stop_event: event to be triggered
before stopping the app - deprecated
:type stop_event: None
:param access_log: Enables writing access logs (slows server)
:type access_log: bool
:param return_asyncio_server: flag that defines whether there's a need
to return asyncio.Server or
start it serving right away
:type return_asyncio_server: bool
:param asyncio_server_kwargs: key-value arguments for
asyncio/uvloop create_server method
:type asyncio_server_kwargs: dict
:return: AsyncioServer if return_asyncio_server is true, else Nothing
"""
if sock is None:
host, port = host or "127.0.0.1", port or 8000
if protocol is None:
protocol = (
WebSocketProtocol if self.websocket_enabled else HttpProtocol
)
if stop_event is not None:
if debug:
warnings.simplefilter("default")
warnings.warn(
"stop_event will be removed from future versions.",
DeprecationWarning,
)
# if access_log is passed explicitly change config.ACCESS_LOG
if access_log is not None:
self.config.ACCESS_LOG = access_log
server_settings = self._helper(
host=host,
port=port,
debug=debug,
ssl=ssl,
sock=sock,
loop=get_event_loop(),
protocol=protocol,
backlog=backlog,
run_async=return_asyncio_server,
)
# Trigger before_start events
await self.trigger_events(
server_settings.get("before_start", []),
server_settings.get("loop"),
)
return await serve(
asyncio_server_kwargs=asyncio_server_kwargs, **server_settings
)
async def trigger_events(self, events, loop):
"""Trigger events (functions or async)
:param events: one or more sync or async functions to execute
:param loop: event loop
"""
for event in events:
result = event(loop)
if isawaitable(result):
await result
async def _run_request_middleware(self, request, request_name=None):
# The if improves speed. I don't know why
named_middleware = self.named_request_middleware.get(
request_name, deque()
)
applicable_middleware = self.request_middleware + named_middleware
if applicable_middleware:
for middleware in applicable_middleware:
response = middleware(request)
if isawaitable(response):
response = await response
if response:
return response
return None
async def _run_response_middleware(
self, request, response, request_name=None
):
named_middleware = self.named_response_middleware.get(
request_name, deque()
)
applicable_middleware = self.response_middleware + named_middleware
if applicable_middleware:
for middleware in applicable_middleware:
_response = middleware(request, response)
if isawaitable(_response):
_response = await _response
if _response:
response = _response
break
return response
def _helper(
self,
host=None,
port=None,
debug=False,
ssl=None,
sock=None,
workers=1,
loop=None,
protocol=HttpProtocol,
backlog=100,
stop_event=None,
register_sys_signals=True,
run_async=False,
auto_reload=False,
):
"""Helper function used by `run` and `create_server`."""
if isinstance(ssl, dict):
# try common aliaseses
cert = ssl.get("cert") or ssl.get("certificate")
key = ssl.get("key") or ssl.get("keyfile")
if cert is None or key is None:
raise ValueError("SSLContext or certificate and key required.")
context = create_default_context(purpose=Purpose.CLIENT_AUTH)
context.load_cert_chain(cert, keyfile=key)
ssl = context
if stop_event is not None:
if debug:
warnings.simplefilter("default")
warnings.warn(
"stop_event will be removed from future versions.",
DeprecationWarning,
)
if self.config.PROXIES_COUNT and self.config.PROXIES_COUNT < 0:
raise ValueError(
"PROXIES_COUNT cannot be negative. "
"https://sanic.readthedocs.io/en/latest/sanic/config.html"
"#proxy-configuration"
)
self.error_handler.debug = debug
self.debug = debug
server_settings = {
"protocol": protocol,
"host": host,
"port": port,
"sock": sock,
"ssl": ssl,
"app": self,
"signal": Signal(),
"loop": loop,
"register_sys_signals": register_sys_signals,
"backlog": backlog,
}
# -------------------------------------------- #
# Register start/stop events
# -------------------------------------------- #
for event_name, settings_name, reverse in (
("before_server_start", "before_start", False),
("after_server_start", "after_start", False),
("before_server_stop", "before_stop", True),
("after_server_stop", "after_stop", True),
):
listeners = self.listeners[event_name].copy()
if reverse:
listeners.reverse()
# Prepend sanic to the arguments when listeners are triggered
listeners = [partial(listener, self) for listener in listeners]
server_settings[settings_name] = listeners
if self.configure_logging and debug:
logger.setLevel(logging.DEBUG)
if (
self.config.LOGO
and os.environ.get("SANIC_SERVER_RUNNING") != "true"
):
logger.debug(
self.config.LOGO
if isinstance(self.config.LOGO, str)
else BASE_LOGO
)
if run_async:
server_settings["run_async"] = True
# Serve
if host and port and os.environ.get("SANIC_SERVER_RUNNING") != "true":
proto = "http"
if ssl is not None:
proto = "https"
logger.info(f"Goin' Fast @ {proto}://{host}:{port}")
return server_settings
def _build_endpoint_name(self, *parts):
parts = [self.name, *parts]
return ".".join(parts)
# -------------------------------------------------------------------- #
# ASGI
# -------------------------------------------------------------------- #
async def __call__(self, scope, receive, send):
"""To be ASGI compliant, our instance must be a callable that accepts
three arguments: scope, receive, send. See the ASGI reference for more
details: https://asgi.readthedocs.io/en/latest/"""
self.asgi = True
asgi_app = await ASGIApp.create(self, scope, receive, send)
await asgi_app()
|
|
try:
from urllib import quote, quote_plus, unquote_plus
except ImportError:
from urllib.parse import quote, quote_plus, unquote_plus #@UnresolvedImport
import socket
import os
import threading
import time
from _pydev_bundle import pydev_localhost
import subprocess
import sys
IS_PY3K = sys.version_info[0] >= 3
# Note: copied (don't import because we want it to be independent on the actual code because of backward compatibility).
CMD_RUN = 101
CMD_LIST_THREADS = 102
CMD_THREAD_CREATE = 103
CMD_THREAD_KILL = 104
CMD_THREAD_SUSPEND = 105
CMD_THREAD_RUN = 106
CMD_STEP_INTO = 107
CMD_STEP_OVER = 108
CMD_STEP_RETURN = 109
CMD_GET_VARIABLE = 110
CMD_SET_BREAK = 111
CMD_REMOVE_BREAK = 112
CMD_EVALUATE_EXPRESSION = 113
CMD_GET_FRAME = 114
CMD_EXEC_EXPRESSION = 115
CMD_WRITE_TO_CONSOLE = 116
CMD_CHANGE_VARIABLE = 117
CMD_RUN_TO_LINE = 118
CMD_RELOAD_CODE = 119
CMD_GET_COMPLETIONS = 120
# Note: renumbered (conflicted on merge)
CMD_CONSOLE_EXEC = 121
CMD_ADD_EXCEPTION_BREAK = 122
CMD_REMOVE_EXCEPTION_BREAK = 123
CMD_LOAD_SOURCE = 124
CMD_ADD_DJANGO_EXCEPTION_BREAK = 125
CMD_REMOVE_DJANGO_EXCEPTION_BREAK = 126
CMD_SET_NEXT_STATEMENT = 127
CMD_SMART_STEP_INTO = 128
CMD_EXIT = 129
CMD_SIGNATURE_CALL_TRACE = 130
CMD_SET_PY_EXCEPTION = 131
CMD_GET_FILE_CONTENTS = 132
CMD_SET_PROPERTY_TRACE = 133
# Pydev debug console commands
CMD_EVALUATE_CONSOLE_EXPRESSION = 134
CMD_RUN_CUSTOM_OPERATION = 135
CMD_GET_BREAKPOINT_EXCEPTION = 136
CMD_STEP_CAUGHT_EXCEPTION = 137
CMD_SEND_CURR_EXCEPTION_TRACE = 138
CMD_SEND_CURR_EXCEPTION_TRACE_PROCEEDED = 139
CMD_IGNORE_THROWN_EXCEPTION_AT = 140
CMD_ENABLE_DONT_TRACE = 141
CMD_SHOW_CONSOLE = 142
CMD_GET_ARRAY = 143
CMD_STEP_INTO_MY_CODE = 144
CMD_GET_CONCURRENCY_EVENT = 145
CMD_VERSION = 501
CMD_RETURN = 502
CMD_ERROR = 901
# Always True (because otherwise when we do have an error, it's hard to diagnose).
# Note: set to False because we seem to be using too much memory (and subprocess uses fork which can throw an error on travis).
SHOW_WRITES_AND_READS = True
SHOW_OTHER_DEBUG_INFO = True
SHOW_STDOUT = True
try:
from thread import start_new_thread
except ImportError:
from _thread import start_new_thread # @UnresolvedImport
try:
xrange
except:
xrange = range
#=======================================================================================================================
# ReaderThread
#=======================================================================================================================
class ReaderThread(threading.Thread):
def __init__(self, sock):
threading.Thread.__init__(self)
self.setDaemon(True)
self.sock = sock
self.last_received = ''
self.all_received = []
self._kill = False
def run(self):
last_printed = None
try:
buf = ''
while not self._kill:
l = self.sock.recv(1024)
if IS_PY3K:
l = l.decode('utf-8')
self.all_received.append(l)
buf += l
while '\n' in buf:
# Print each part...
i = buf.index('\n')+1
self.last_received = buf[:i]
buf = buf[i:]
if SHOW_WRITES_AND_READS:
if last_printed != self.last_received.strip():
last_printed = self.last_received.strip()
print('Test Reader Thread Received %s' % last_printed)
except:
pass # ok, finished it
finally:
del self.all_received[:]
def do_kill(self):
self._kill = True
if hasattr(self, 'sock'):
self.sock.close()
class DebuggerRunner(object):
def get_command_line(self):
'''
Returns the base command line (i.e.: ['python.exe', '-u'])
'''
raise NotImplementedError
def add_command_line_args(self, args):
writer_thread = self.writer_thread
port = int(writer_thread.port)
localhost = pydev_localhost.get_localhost()
ret = args + [
writer_thread.get_pydevd_file(),
'--DEBUG_RECORD_SOCKET_READS',
'--qt-support',
'--client',
localhost,
'--port',
str(port),
]
if writer_thread.IS_MODULE:
ret += ['--module']
ret = ret + ['--file'] + writer_thread.get_command_line_args()
return ret
def check_case(self, writer_thread_class):
writer_thread = writer_thread_class()
try:
writer_thread.start()
for _i in xrange(40000):
if hasattr(writer_thread, 'port'):
break
time.sleep(.01)
self.writer_thread = writer_thread
args = self.get_command_line()
args = self.add_command_line_args(args)
if SHOW_OTHER_DEBUG_INFO:
print('executing', ' '.join(args))
ret = self.run_process(args, writer_thread)
finally:
writer_thread.do_kill()
writer_thread.log = []
stdout = ret['stdout']
stderr = ret['stderr']
writer_thread.additional_output_checks(''.join(stdout), ''.join(stderr))
return ret
def create_process(self, args, writer_thread):
process = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=writer_thread.get_cwd() if writer_thread is not None else '.',
env=writer_thread.get_environ() if writer_thread is not None else None,
)
return process
def run_process(self, args, writer_thread):
process = self.create_process(args, writer_thread)
stdout = []
stderr = []
finish = [False]
try:
def read(stream, buffer):
for line in stream.readlines():
if finish[0]:
return
if IS_PY3K:
line = line.decode('utf-8')
if SHOW_STDOUT:
sys.stdout.write('stdout: %s' % (line,))
buffer.append(line)
start_new_thread(read, (process.stdout, stdout))
if SHOW_OTHER_DEBUG_INFO:
print('Both processes started')
# polls can fail (because the process may finish and the thread still not -- so, we give it some more chances to
# finish successfully).
check = 0
while True:
if process.poll() is not None:
break
else:
if writer_thread is not None:
if not writer_thread.isAlive():
if writer_thread.FORCE_KILL_PROCESS_WHEN_FINISHED_OK:
process.kill()
continue
check += 1
if check == 20:
print('Warning: writer thread exited and process still did not.')
if check == 100:
process.kill()
time.sleep(.2)
self.fail_with_message(
"The other process should've exited but still didn't (timeout for process to exit).",
stdout, stderr, writer_thread
)
time.sleep(.2)
if writer_thread is not None:
if not writer_thread.FORCE_KILL_PROCESS_WHEN_FINISHED_OK:
poll = process.poll()
if poll < 0:
self.fail_with_message(
"The other process exited with error code: " + str(poll), stdout, stderr, writer_thread)
if stdout is None:
self.fail_with_message(
"The other process may still be running -- and didn't give any output.", stdout, stderr, writer_thread)
check = 0
while 'TEST SUCEEDED' not in ''.join(stdout):
check += 1
if check == 50:
self.fail_with_message("TEST SUCEEDED not found in stdout.", stdout, stderr, writer_thread)
time.sleep(.1)
for _i in xrange(100):
if not writer_thread.finished_ok:
time.sleep(.1)
if not writer_thread.finished_ok:
self.fail_with_message(
"The thread that was doing the tests didn't finish successfully.", stdout, stderr, writer_thread)
finally:
finish[0] = True
return {'stdout':stdout, 'stderr':stderr}
def fail_with_message(self, msg, stdout, stderr, writerThread):
raise AssertionError(msg+
"\n\n===========================\nStdout: \n"+''.join(stdout)+
"\n\n===========================\nStderr:"+''.join(stderr)+
"\n\n===========================\nLog:\n"+'\n'.join(getattr(writerThread, 'log', [])))
#=======================================================================================================================
# AbstractWriterThread
#=======================================================================================================================
class AbstractWriterThread(threading.Thread):
FORCE_KILL_PROCESS_WHEN_FINISHED_OK = False
IS_MODULE = False
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(True)
self.finished_ok = False
self._next_breakpoint_id = 0
self.log = []
def additional_output_checks(self, stdout, stderr):
pass
def get_environ(self):
return None
def get_pydevd_file(self):
dirname = os.path.dirname(__file__)
dirname = os.path.dirname(dirname)
return os.path.abspath(os.path.join(dirname, 'pydevd.py'))
def get_cwd(self):
return os.path.dirname(self.get_pydevd_file())
def get_command_line_args(self):
return [self.TEST_FILE]
def do_kill(self):
if hasattr(self, 'server_socket'):
self.server_socket.close()
if hasattr(self, 'reader_thread'):
# if it's not created, it's not there...
self.reader_thread.do_kill()
if hasattr(self, 'sock'):
self.sock.close()
def write(self, s):
self.log.append('write: %s' % (s,))
last = self.reader_thread.last_received
if SHOW_WRITES_AND_READS:
print('Test Writer Thread Written %s' % (s,))
msg = s + '\n'
if IS_PY3K:
msg = msg.encode('utf-8')
self.sock.send(msg)
time.sleep(0.2)
i = 0
while last == self.reader_thread.last_received and i < 10:
i += 1
time.sleep(0.1)
def start_socket(self, port=0):
if SHOW_WRITES_AND_READS:
print('start_socket')
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', port))
self.port = s.getsockname()[1]
s.listen(1)
if SHOW_WRITES_AND_READS:
print('Waiting in socket.accept()')
self.server_socket = s
newSock, addr = s.accept()
if SHOW_WRITES_AND_READS:
print('Test Writer Thread Socket:', newSock, addr)
reader_thread = self.reader_thread = ReaderThread(newSock)
reader_thread.start()
self.sock = newSock
self._sequence = -1
# initial command is always the version
self.write_version()
self.log.append('start_socket')
def next_breakpoint_id(self):
self._next_breakpoint_id += 1
return self._next_breakpoint_id
def next_seq(self):
self._sequence += 2
return self._sequence
def wait_for_new_thread(self):
i = 0
# wait for hit breakpoint
while not '<xml><thread name="' in self.reader_thread.last_received or '<xml><thread name="pydevd.' in self.reader_thread.last_received:
i += 1
time.sleep(1)
if i >= 15:
raise AssertionError('After %s seconds, a thread was not created.' % i)
# we have something like <xml><thread name="MainThread" id="12103472" /></xml>
splitted = self.reader_thread.last_received.split('"')
thread_id = splitted[3]
return thread_id
def wait_for_breakpoint_hit(self, reason='111', get_line=False, get_name=False):
'''
108 is over
109 is return
111 is breakpoint
'''
self.log.append('Start: wait_for_breakpoint_hit')
i = 0
# wait for hit breakpoint
last = self.reader_thread.last_received
while not ('stop_reason="%s"' % reason) in last:
i += 1
time.sleep(1)
last = self.reader_thread.last_received
if i >= 10:
raise AssertionError('After %s seconds, a break with reason: %s was not hit. Found: %s' % \
(i, reason, last))
# we have something like <xml><thread id="12152656" stop_reason="111"><frame id="12453120" name="encode" ...
splitted = last.split('"')
thread_id = splitted[1]
frameId = splitted[7]
name = splitted[9]
if get_line:
self.log.append('End(0): wait_for_breakpoint_hit: %s' % (last,))
try:
if not get_name:
return thread_id, frameId, int(splitted[13])
else:
return thread_id, frameId, int(splitted[13]), name
except:
raise AssertionError('Error with: %s, %s, %s.\nLast: %s.\n\nAll: %s\n\nSplitted: %s' % (
thread_id, frameId, splitted[13], last, '\n'.join(self.reader_thread.all_received), splitted))
self.log.append('End(1): wait_for_breakpoint_hit: %s' % (last,))
if not get_name:
return thread_id, frameId
else:
return thread_id, frameId, name
def wait_for_custom_operation(self, expected):
i = 0
# wait for custom operation response, the response is double encoded
expectedEncoded = quote(quote_plus(expected))
while not expectedEncoded in self.reader_thread.last_received:
i += 1
time.sleep(1)
if i >= 10:
raise AssertionError('After %s seconds, the custom operation not received. Last found:\n%s\nExpected (encoded)\n%s' %
(i, self.reader_thread.last_received, expectedEncoded))
return True
def wait_for_evaluation(self, expected):
return self._wait_for(expected, 'the expected evaluation was not found')
def wait_for_vars(self, expected):
i = 0
# wait for hit breakpoint
while not expected in self.reader_thread.last_received:
i += 1
time.sleep(1)
if i >= 10:
raise AssertionError('After %s seconds, the vars were not found. Last found:\n%s' %
(i, self.reader_thread.last_received))
return True
def wait_for_var(self, expected):
self._wait_for(expected, 'the var was not found')
def _wait_for(self, expected, error_msg):
'''
:param expected:
If a list we'll work with any of the choices.
'''
if not isinstance(expected, (list, tuple)):
expected = [expected]
i = 0
found = False
while not found:
last = self.reader_thread.last_received
for e in expected:
if e in last:
found = True
break
last = unquote_plus(last)
for e in expected:
if e in last:
found = True
break
# We actually quote 2 times on the backend...
last = unquote_plus(last)
for e in expected:
if e in last:
found = True
break
if found:
break
i += 1
time.sleep(1)
if i >= 10:
raise AssertionError('After %s seconds, %s. Last found:\n%s' %
(i, error_msg, last))
return True
def wait_for_multiple_vars(self, expected_vars):
i = 0
# wait for hit breakpoint
while True:
for expected in expected_vars:
if expected not in self.reader_thread.last_received:
break # Break out of loop (and don't get to else)
else:
return True
i += 1
time.sleep(1)
if i >= 10:
raise AssertionError('After %s seconds, the vars were not found. Last found:\n%s' %
(i, self.reader_thread.last_received))
return True
def write_make_initial_run(self):
self.write("101\t%s\t" % self.next_seq())
self.log.append('write_make_initial_run')
def write_version(self):
self.write("501\t%s\t1.0\tWINDOWS\tID" % self.next_seq())
def get_main_filename(self):
return self.TEST_FILE
def write_add_breakpoint(self, line, func):
'''
@param line: starts at 1
'''
breakpoint_id = self.next_breakpoint_id()
self.write("111\t%s\t%s\t%s\t%s\t%s\t%s\tNone\tNone" % (self.next_seq(), breakpoint_id, 'python-line', self.get_main_filename(), line, func))
self.log.append('write_add_breakpoint: %s line: %s func: %s' % (breakpoint_id, line, func))
return breakpoint_id
def write_add_exception_breakpoint(self, exception):
self.write("122\t%s\t%s" % (self.next_seq(), exception))
self.log.append('write_add_exception_breakpoint: %s' % (exception,))
def write_remove_breakpoint(self, breakpoint_id):
self.write("112\t%s\t%s\t%s\t%s" % (self.next_seq(), 'python-line', self.get_main_filename(), breakpoint_id))
def write_change_variable(self, thread_id, frame_id, varname, value):
self.write("117\t%s\t%s\t%s\t%s\t%s\t%s" % (self.next_seq(), thread_id, frame_id, 'FRAME', varname, value))
def write_get_frame(self, thread_id, frameId):
self.write("114\t%s\t%s\t%s\tFRAME" % (self.next_seq(), thread_id, frameId))
self.log.append('write_get_frame')
def write_get_variable(self, thread_id, frameId, var_attrs):
self.write("110\t%s\t%s\t%s\tFRAME\t%s" % (self.next_seq(), thread_id, frameId, var_attrs))
def write_step_over(self, thread_id):
self.write("108\t%s\t%s" % (self.next_seq(), thread_id,))
def write_step_in(self, thread_id):
self.write("107\t%s\t%s" % (self.next_seq(), thread_id,))
def write_step_return(self, thread_id):
self.write("109\t%s\t%s" % (self.next_seq(), thread_id,))
def write_suspend_thread(self, thread_id):
self.write("105\t%s\t%s" % (self.next_seq(), thread_id,))
def write_run_thread(self, thread_id):
self.log.append('write_run_thread')
self.write("106\t%s\t%s" % (self.next_seq(), thread_id,))
def write_kill_thread(self, thread_id):
self.write("104\t%s\t%s" % (self.next_seq(), thread_id,))
def write_set_next_statement(self, thread_id, line, func_name):
self.write("%s\t%s\t%s\t%s\t%s" % (CMD_SET_NEXT_STATEMENT, self.next_seq(), thread_id, line, func_name,))
def write_debug_console_expression(self, locator):
self.write("%s\t%s\t%s" % (CMD_EVALUATE_CONSOLE_EXPRESSION, self.next_seq(), locator))
def write_custom_operation(self, locator, style, codeOrFile, operation_fn_name):
self.write("%s\t%s\t%s||%s\t%s\t%s" % (CMD_RUN_CUSTOM_OPERATION, self.next_seq(), locator, style, codeOrFile, operation_fn_name))
def write_evaluate_expression(self, locator, expression):
self.write("113\t%s\t%s\t%s\t1" % (self.next_seq(), locator, expression))
def write_enable_dont_trace(self, enable):
if enable:
enable = 'true'
else:
enable = 'false'
self.write("%s\t%s\t%s" % (CMD_ENABLE_DONT_TRACE, self.next_seq(), enable))
def _get_debugger_test_file(filename):
try:
rPath = os.path.realpath # @UndefinedVariable
except:
# jython does not support os.path.realpath
# realpath is a no-op on systems without islink support
rPath = os.path.abspath
return os.path.normcase(rPath(os.path.join(os.path.dirname(__file__), filename)))
def get_free_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((pydev_localhost.get_localhost(), 0))
_, port = s.getsockname()
s.close()
return port
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hmac
import os
import sys
from io import BytesIO
from hashlib import sha1
import mock
from mock import Mock
from mock import PropertyMock
import libcloud.utils.files # NOQA: F401
from libcloud.utils.py3 import ET
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import parse_qs
from libcloud.utils.py3 import StringIO
from libcloud.utils.py3 import PY3
from libcloud.utils.files import exhaust_iterator
from libcloud.common.types import InvalidCredsError
from libcloud.common.types import LibcloudError, MalformedResponseError
from libcloud.storage.base import Container, Object
from libcloud.storage.types import ContainerDoesNotExistError
from libcloud.storage.types import ContainerError
from libcloud.storage.types import ContainerIsNotEmptyError
from libcloud.storage.types import InvalidContainerNameError
from libcloud.storage.types import ObjectDoesNotExistError
from libcloud.storage.types import ObjectHashMismatchError
from libcloud.storage.drivers.s3 import BaseS3Connection, S3SignatureV4Connection
from libcloud.storage.drivers.s3 import S3StorageDriver, S3USWestStorageDriver
from libcloud.storage.drivers.s3 import CHUNK_SIZE
from libcloud.utils.py3 import b
from libcloud.test import MockHttp # pylint: disable-msg=E0611 # noqa
from libcloud.test import unittest, make_response, generate_random_data
from libcloud.test.file_fixtures import StorageFileFixtures # pylint: disable-msg=E0611
from libcloud.test.secrets import STORAGE_S3_PARAMS
from libcloud.test.storage.base import BaseRangeDownloadMockHttp
class S3MockHttp(BaseRangeDownloadMockHttp, unittest.TestCase):
fixtures = StorageFileFixtures("s3")
base_headers = {}
def _UNAUTHORIZED(self, method, url, body, headers):
return (
httplib.UNAUTHORIZED,
"",
self.base_headers,
httplib.responses[httplib.OK],
)
def _DIFFERENT_REGION(self, method, url, body, headers):
return (
httplib.MOVED_PERMANENTLY,
"",
self.base_headers,
httplib.responses[httplib.OK],
)
def _list_containers_EMPTY(self, method, url, body, headers):
body = self.fixtures.load("list_containers_empty.xml")
return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK])
def _list_containers_TOKEN(self, method, url, body, headers):
if "x-amz-security-token" in headers:
assert headers["x-amz-security-token"] == "asdf"
body = self.fixtures.load("list_containers_empty.xml")
return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK])
def _list_containers(self, method, url, body, headers):
body = self.fixtures.load("list_containers.xml")
return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK])
def _test_container_EMPTY(self, method, url, body, headers):
body = self.fixtures.load("list_container_objects_empty.xml")
return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK])
def _test_container(self, method, url, body, headers):
body = self.fixtures.load("list_container_objects.xml")
return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK])
def _test_container_ITERATOR(self, method, url, body, headers):
if url.find("3.zip") == -1:
# First part of the response (first 3 objects)
file_name = "list_container_objects_not_exhausted1.xml"
else:
file_name = "list_container_objects_not_exhausted2.xml"
body = self.fixtures.load(file_name)
return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK])
def _test2_get_object(self, method, url, body, headers):
body = self.fixtures.load("list_container_objects.xml")
return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK])
def _test2_test_get_object(self, method, url, body, headers):
# test_get_object_success
body = self.fixtures.load("list_containers.xml")
headers = {
"content-type": "application/zip",
"etag": '"e31208wqsdoj329jd"',
"x-amz-meta-rabbits": "monkeys",
"content-length": "12345",
"last-modified": "Thu, 13 Sep 2012 07:13:22 GMT",
}
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _test2_get_object_no_content_length(self, method, url, body, headers):
# test_get_object_unable_to_determine_object_size
body = self.fixtures.load("list_containers.xml")
headers = {
"content-type": "application/zip",
"etag": '"e31208wqsdoj329jd"',
"x-amz-meta-rabbits": "monkeys",
"last-modified": "Thu, 13 Sep 2012 07:13:22 GMT",
}
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _test2_test_get_object_no_content_length(self, method, url, body, headers):
# test_get_object_unable_to_determine_object_size
body = self.fixtures.load("list_containers.xml")
headers = {
"content-type": "application/zip",
"etag": '"e31208wqsdoj329jd"',
"x-amz-meta-rabbits": "monkeys",
"last-modified": "Thu, 13 Sep 2012 07:13:22 GMT",
}
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _new_container_INVALID_NAME(self, method, url, body, headers):
# test_create_container
return (httplib.BAD_REQUEST, body, headers, httplib.responses[httplib.OK])
def _new_container_ALREADY_EXISTS(self, method, url, body, headers):
# test_create_container
return (httplib.CONFLICT, body, headers, httplib.responses[httplib.OK])
def _new_container(self, method, url, body, headers):
# test_create_container, test_delete_container
if method == "PUT":
status = httplib.OK
elif method == "DELETE":
status = httplib.NO_CONTENT
return (status, body, headers, httplib.responses[httplib.OK])
def _new_container_DOESNT_EXIST(self, method, url, body, headers):
# test_delete_container
return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.OK])
def _new_container_NOT_EMPTY(self, method, url, body, headers):
# test_delete_container
return (httplib.CONFLICT, body, headers, httplib.responses[httplib.OK])
def _test1_get_container(self, method, url, body, headers):
body = self.fixtures.load("list_container_objects.xml")
return (httplib.OK, body, self.base_headers, httplib.responses[httplib.OK])
def _container1_get_container(self, method, url, body, headers):
return (
httplib.NOT_FOUND,
"",
self.base_headers,
httplib.responses[httplib.NOT_FOUND],
)
def _test_inexistent_get_object(self, method, url, body, headers):
return (
httplib.NOT_FOUND,
"",
self.base_headers,
httplib.responses[httplib.NOT_FOUND],
)
def _foo_bar_container(self, method, url, body, headers):
# test_delete_container
return (httplib.NO_CONTENT, body, headers, httplib.responses[httplib.OK])
def _foo_bar_container_NOT_FOUND(self, method, url, body, headers):
# test_delete_container_not_found
return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.OK])
def _foo_bar_container_foo_bar_object_NOT_FOUND(self, method, url, body, headers):
# test_delete_object_not_found
return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.OK])
def _foo_bar_container_foo_bar_object_DELETE(self, method, url, body, headers):
# test_delete_object
return (httplib.NO_CONTENT, body, headers, httplib.responses[httplib.OK])
def _foo_bar_container_foo_test_stream_data(self, method, url, body, headers):
# test_upload_object_via_stream
body = ""
headers = {"etag": '"0cc175b9c0f1b6a831c399e269772661"'}
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _foo_bar_container_foo_test_stream_data_MULTIPART(
self, method, url, body, headers
):
if method == "POST":
if "uploadId" in url:
# Complete multipart request
body = self.fixtures.load("complete_multipart.xml")
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
else:
# Initiate multipart request
body = self.fixtures.load("initiate_multipart.xml")
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
elif method == "DELETE":
# Abort multipart request
return (
httplib.NO_CONTENT,
"",
headers,
httplib.responses[httplib.NO_CONTENT],
)
else:
# Upload chunk multipart request
headers = {"etag": '"0cc175b9c0f1b6a831c399e269772661"'}
return (httplib.OK, "", headers, httplib.responses[httplib.OK])
def _foo_bar_container_LIST_MULTIPART(self, method, url, body, headers):
query_string = urlparse.urlsplit(url).query
query = parse_qs(query_string)
if "key-marker" not in query:
body = self.fixtures.load("list_multipart_1.xml")
else:
body = self.fixtures.load("list_multipart_2.xml")
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _foo_bar_container_my_divisor_LIST_MULTIPART(self, method, url, body, headers):
body = ""
return (
httplib.NO_CONTENT,
body,
headers,
httplib.responses[httplib.NO_CONTENT],
)
def _foo_bar_container_my_movie_m2ts_LIST_MULTIPART(
self, method, url, body, headers
):
body = ""
return (
httplib.NO_CONTENT,
body,
headers,
httplib.responses[httplib.NO_CONTENT],
)
def parse_body(self):
if len(self.body) == 0 and not self.parse_zero_length_body:
return self.body
try:
try:
body = ET.XML(self.body)
except ValueError:
# lxml wants a bytes and tests are basically hard-coded to str
body = ET.XML(self.body.encode("utf-8"))
except Exception:
raise MalformedResponseError(
"Failed to parse XML", body=self.body, driver=self.connection.driver
)
return body
def _foo_bar_container_foo_bar_object(self, method, url, body, headers):
# test_download_object_success
body = generate_random_data(1000)
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _foo_bar_container_foo_bar_object_range(self, method, url, body, headers):
# test_download_object_range_success
body = "0123456789123456789"
self.assertTrue("Range" in headers)
self.assertEqual(headers["Range"], "bytes=5-6")
start_bytes, end_bytes = self._get_start_and_end_bytes_from_range_str(
headers["Range"], body
)
return (
httplib.PARTIAL_CONTENT,
body[start_bytes : end_bytes + 1],
headers,
httplib.responses[httplib.PARTIAL_CONTENT],
)
def _foo_bar_container_foo_bar_object_range_stream(
self, method, url, body, headers
):
# test_download_object_range_as_stream_success
body = "0123456789123456789"
self.assertTrue("Range" in headers)
self.assertEqual(headers["Range"], "bytes=4-6")
start_bytes, end_bytes = self._get_start_and_end_bytes_from_range_str(
headers["Range"], body
)
return (
httplib.PARTIAL_CONTENT,
body[start_bytes : end_bytes + 1],
headers,
httplib.responses[httplib.PARTIAL_CONTENT],
)
def _foo_bar_container_foo_bar_object_NO_BUFFER(self, method, url, body, headers):
# test_download_object_data_is_not_buffered_in_memory
body = generate_random_data(1000)
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _foo_bar_container_foo_test_upload(self, method, url, body, headers):
# test_upload_object_success
body = ""
headers = {"etag": '"0cc175b9c0f1b6a831c399e269772661"'}
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
def _foo_bar_container_foo_bar_object_INVALID_SIZE(
self, method, url, body, headers
):
# test_upload_object_invalid_file_size
body = ""
return (httplib.OK, body, headers, httplib.responses[httplib.OK])
class S3Tests(unittest.TestCase):
driver_type = S3StorageDriver
driver_args = STORAGE_S3_PARAMS
mock_response_klass = S3MockHttp
@classmethod
def create_driver(self):
return self.driver_type(*self.driver_args)
def setUp(self):
self.driver_type.connectionCls.conn_class = self.mock_response_klass
self.mock_response_klass.type = None
self.driver = self.create_driver()
self._file_path = os.path.abspath(__file__) + ".temp"
def tearDown(self):
self._remove_test_file()
def _remove_test_file(self):
try:
os.unlink(self._file_path)
except OSError:
pass
def test_clean_object_name(self):
# Ensure ~ is not URL encoded
# See https://github.com/apache/libcloud/issues/1452 for details
cleaned = self.driver._clean_object_name(name="valid")
self.assertEqual(cleaned, "valid")
cleaned = self.driver._clean_object_name(name="valid/~")
self.assertEqual(cleaned, "valid/~")
cleaned = self.driver._clean_object_name(name="valid/~%foo ")
self.assertEqual(cleaned, "valid/~%25foo%20")
def test_invalid_credentials(self):
self.mock_response_klass.type = "UNAUTHORIZED"
try:
self.driver.list_containers()
except InvalidCredsError as e:
self.assertEqual(True, isinstance(e, InvalidCredsError))
else:
self.fail("Exception was not thrown")
def test_token(self):
self.mock_response_klass.type = "list_containers_TOKEN"
self.driver = self.driver_type(*self.driver_args, token="asdf")
self.driver.list_containers()
def test_signature(self):
secret_key = "ssssh!"
sig = BaseS3Connection.get_auth_signature(
method="GET",
headers={"foo": "bar", "content-type": "TYPE!", "x-aws-test": "test_value"},
params={"hello": "world"},
expires=None,
secret_key=secret_key,
path="/",
vendor_prefix="x-aws",
)
string_to_sign = "GET\n\nTYPE!\n\nx-aws-test:test_value\n/"
b64_hmac = base64.b64encode(
hmac.new(b(secret_key), b(string_to_sign), digestmod=sha1).digest()
)
expected_sig = b64_hmac.decode("utf-8")
self.assertEqual(sig, expected_sig)
def test_bucket_is_located_in_different_region(self):
self.mock_response_klass.type = "DIFFERENT_REGION"
try:
self.driver.list_containers()
except LibcloudError:
pass
else:
self.fail("Exception was not thrown")
def test_list_containers_empty(self):
self.mock_response_klass.type = "list_containers_EMPTY"
containers = self.driver.list_containers()
self.assertEqual(len(containers), 0)
def test_list_containers_success(self):
self.mock_response_klass.type = "list_containers"
containers = self.driver.list_containers()
self.assertEqual(len(containers), 2)
self.assertTrue("creation_date" in containers[1].extra)
def test_list_container_objects_empty(self):
self.mock_response_klass.type = "EMPTY"
container = Container(name="test_container", extra={}, driver=self.driver)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 0)
def test_list_container_objects_success(self):
self.mock_response_klass.type = None
container = Container(name="test_container", extra={}, driver=self.driver)
objects = self.driver.list_container_objects(container=container)
self.assertEqual(len(objects), 1)
obj = [o for o in objects if o.name == "1.zip"][0]
self.assertEqual(obj.hash, "4397da7a7649e8085de9916c240e8166")
self.assertEqual(obj.size, 1234567)
self.assertEqual(obj.container.name, "test_container")
self.assertEqual(obj.extra["last_modified"], "2011-04-09T19:05:18.000Z")
self.assertTrue("owner" in obj.meta_data)
def test_list_container_objects_iterator_has_more(self):
self.mock_response_klass.type = "ITERATOR"
container = Container(name="test_container", extra={}, driver=self.driver)
objects = self.driver.list_container_objects(container=container)
obj = [o for o in objects if o.name == "1.zip"][0]
self.assertEqual(obj.hash, "4397da7a7649e8085de9916c240e8166")
self.assertEqual(obj.size, 1234567)
self.assertEqual(obj.container.name, "test_container")
self.assertTrue(obj in objects)
self.assertEqual(len(objects), 5)
def test_list_container_objects_with_prefix(self):
self.mock_response_klass.type = None
container = Container(name="test_container", extra={}, driver=self.driver)
objects = self.driver.list_container_objects(
container=container, prefix="test_prefix"
)
self.assertEqual(len(objects), 1)
obj = [o for o in objects if o.name == "1.zip"][0]
self.assertEqual(obj.hash, "4397da7a7649e8085de9916c240e8166")
self.assertEqual(obj.size, 1234567)
self.assertEqual(obj.container.name, "test_container")
self.assertTrue("owner" in obj.meta_data)
def test_get_container_doesnt_exist(self):
self.mock_response_klass.type = "get_container"
try:
self.driver.get_container(container_name="container1")
except ContainerDoesNotExistError:
pass
else:
self.fail("Exception was not thrown")
def test_get_container_success(self):
self.mock_response_klass.type = "get_container"
container = self.driver.get_container(container_name="test1")
self.assertTrue(container.name, "test1")
def test_get_object_cdn_url(self):
self.mock_response_klass.type = "get_object"
obj = self.driver.get_object(container_name="test2", object_name="test")
# cdn urls can only be generated using a V4 connection
if issubclass(self.driver.connectionCls, S3SignatureV4Connection):
cdn_url = self.driver.get_object_cdn_url(obj, ex_expiry=12)
url = urlparse.urlparse(cdn_url)
query = urlparse.parse_qs(url.query)
self.assertEqual(len(query["X-Amz-Signature"]), 1)
self.assertGreater(len(query["X-Amz-Signature"][0]), 0)
self.assertEqual(query["X-Amz-Expires"], ["43200"])
else:
with self.assertRaises(NotImplementedError):
self.driver.get_object_cdn_url(obj)
def test_get_object_container_doesnt_exist(self):
# This method makes two requests which makes mocking the response a bit
# trickier
self.mock_response_klass.type = "get_object"
try:
self.driver.get_object(container_name="test-inexistent", object_name="test")
except ContainerDoesNotExistError:
pass
else:
self.fail("Exception was not thrown")
def test_get_object_success(self):
# This method makes two requests which makes mocking the response a bit
# trickier
self.mock_response_klass.type = "get_object"
obj = self.driver.get_object(container_name="test2", object_name="test")
self.assertEqual(obj.name, "test")
self.assertEqual(obj.container.name, "test2")
self.assertEqual(obj.size, 12345)
self.assertEqual(obj.hash, "e31208wqsdoj329jd")
self.assertEqual(obj.extra["last_modified"], "Thu, 13 Sep 2012 07:13:22 GMT")
self.assertEqual(obj.extra["content_type"], "application/zip")
self.assertEqual(obj.meta_data["rabbits"], "monkeys")
def test_get_object_unable_to_determine_object_size(self):
self.mock_response_klass.type = "get_object_no_content_length"
expected_msg = "Can not deduce object size from headers"
self.assertRaisesRegex(
KeyError,
expected_msg,
self.driver.get_object,
container_name="test2",
object_name="test",
)
def test_create_container_bad_request(self):
# invalid container name, returns a 400 bad request
self.mock_response_klass.type = "INVALID_NAME"
try:
self.driver.create_container(container_name="new_container")
except ContainerError:
pass
else:
self.fail("Exception was not thrown")
def test_create_container_already_exists(self):
# container with this name already exists
self.mock_response_klass.type = "ALREADY_EXISTS"
try:
self.driver.create_container(container_name="new-container")
except InvalidContainerNameError:
pass
else:
self.fail("Exception was not thrown")
def test_create_container_success(self):
# success
self.mock_response_klass.type = None
name = "new_container"
container = self.driver.create_container(container_name=name)
self.assertEqual(container.name, name)
def test_delete_container_doesnt_exist(self):
container = Container(name="new_container", extra=None, driver=self.driver)
self.mock_response_klass.type = "DOESNT_EXIST"
try:
self.driver.delete_container(container=container)
except ContainerDoesNotExistError:
pass
else:
self.fail("Exception was not thrown")
def test_delete_container_not_empty(self):
container = Container(name="new_container", extra=None, driver=self.driver)
self.mock_response_klass.type = "NOT_EMPTY"
try:
self.driver.delete_container(container=container)
except ContainerIsNotEmptyError:
pass
else:
self.fail("Exception was not thrown")
# success
self.mock_response_klass.type = None
self.assertTrue(self.driver.delete_container(container=container))
def test_delete_container_not_found(self):
self.mock_response_klass.type = "NOT_FOUND"
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
try:
self.driver.delete_container(container=container)
except ContainerDoesNotExistError:
pass
else:
self.fail("Container does not exist but an exception was not" + "thrown")
def test_delete_container_success(self):
self.mock_response_klass.type = None
container = Container(name="new_container", extra=None, driver=self.driver)
self.assertTrue(self.driver.delete_container(container=container))
def test_download_object_success(self):
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
obj = Object(
name="foo_bar_object",
size=1000,
hash=None,
extra={},
container=container,
meta_data=None,
driver=self.driver_type,
)
destination_path = self._file_path
result = self.driver.download_object(
obj=obj,
destination_path=destination_path,
overwrite_existing=True,
delete_on_failure=True,
)
self.assertTrue(result)
def test_download_object_range_success(self):
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
obj = Object(
name="foo_bar_object_range",
size=19,
hash=None,
extra={},
container=container,
meta_data=None,
driver=self.driver_type,
)
destination_path = self._file_path
result = self.driver.download_object_range(
obj=obj,
destination_path=destination_path,
start_bytes=5,
end_bytes=7,
overwrite_existing=True,
delete_on_failure=True,
)
self.assertTrue(result)
with open(self._file_path, "r") as fp:
content = fp.read()
self.assertEqual(content, "56")
def test_download_object_range_as_stream_success(self):
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
obj = Object(
name="foo_bar_object_range_stream",
size=19,
hash=None,
extra={},
container=container,
meta_data=None,
driver=self.driver_type,
)
iterator = self.driver.download_object_range_as_stream(
obj=obj, start_bytes=4, end_bytes=7
)
content = exhaust_iterator(iterator)
self.assertEqual(content, b"456")
def test_download_object_data_is_not_buffered_in_memory(self):
# Test case which verifies that response.body attribute is not accessed
# and as such, whole body response is not buffered into RAM
# If content is consumed and response.content attribute accessed execption
# will be thrown and test will fail
mock_response = Mock(name="mock response")
mock_response.headers = {}
mock_response.status_code = 200
msg = '"content" attribute was accessed but it shouldn\'t have been'
type(mock_response).content = PropertyMock(
name="mock content attribute", side_effect=Exception(msg)
)
mock_response.iter_content.return_value = StringIO("a" * 1000)
self.driver.connection.connection.getresponse = Mock()
self.driver.connection.connection.getresponse.return_value = mock_response
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
obj = Object(
name="foo_bar_object_NO_BUFFER",
size=1000,
hash=None,
extra={},
container=container,
meta_data=None,
driver=self.driver_type,
)
destination_path = self._file_path
result = self.driver.download_object(
obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True,
)
self.assertTrue(result)
def test_download_object_as_stream_data_is_not_buffered_in_memory(self):
# Test case which verifies that response.response attribute is not accessed
# and as such, whole body response is not buffered into RAM
# If content is consumed and response.content attribute accessed exception
# will be thrown and test will fail
mock_response = Mock(name="mock response")
mock_response.headers = {}
mock_response.status = 200
msg1 = '"response" attribute was accessed but it shouldn\'t have been'
msg2 = '"content" attribute was accessed but it shouldn\'t have been'
type(mock_response).response = PropertyMock(
name="mock response attribute", side_effect=Exception(msg1)
)
type(mock_response).content = PropertyMock(
name="mock content attribute", side_effect=Exception(msg2)
)
mock_response.iter_content.return_value = StringIO("a" * 1000)
self.driver.connection.request = Mock()
self.driver.connection.request.return_value = mock_response
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
obj = Object(
name="foo_bar_object_NO_BUFFER",
size=1000,
hash=None,
extra={},
container=container,
meta_data=None,
driver=self.driver_type,
)
result = self.driver.download_object_as_stream(obj=obj)
result = exhaust_iterator(result)
if PY3:
result = result.decode("utf-8")
self.assertEqual(result, "a" * 1000)
def test_download_object_invalid_file_size(self):
self.mock_response_klass.type = "INVALID_SIZE"
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
obj = Object(
name="foo_bar_object",
size=1000,
hash=None,
extra={},
container=container,
meta_data=None,
driver=self.driver_type,
)
destination_path = self._file_path
result = self.driver.download_object(
obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True,
)
self.assertFalse(result)
def test_download_object_invalid_file_already_exists(self):
self.mock_response_klass.type = "INVALID_SIZE"
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
obj = Object(
name="foo_bar_object",
size=1000,
hash=None,
extra={},
container=container,
meta_data=None,
driver=self.driver_type,
)
destination_path = os.path.abspath(__file__)
try:
self.driver.download_object(
obj=obj,
destination_path=destination_path,
overwrite_existing=False,
delete_on_failure=True,
)
except LibcloudError:
pass
else:
self.fail("Exception was not thrown")
@unittest.skip("The MockHttp classes cannot support this test at present")
def test_download_object_as_stream_success(self):
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
obj = Object(
name="foo_bar_object",
size=1000,
hash=None,
extra={},
container=container,
meta_data=None,
driver=self.driver_type,
)
def mock_get_object(
self, obj, callback, callback_kwargs, response, success_status_code=None
):
return response._response.iter_content(1024)
old_func = self.driver_type._get_object
self.driver_type._get_object = mock_get_object
try:
stream = self.driver.download_object_as_stream(obj=obj, chunk_size=1024)
self.assertTrue(hasattr(stream, "__iter__"))
finally:
self.driver_type._get_object = old_func
def test_upload_object_invalid_ex_storage_class(self):
# Invalid hash is detected on the amazon side and BAD_REQUEST is
# returned
file_path = os.path.abspath(__file__)
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
object_name = "foo_test_upload"
try:
self.driver.upload_object(
file_path=file_path,
container=container,
object_name=object_name,
verify_hash=True,
ex_storage_class="invalid-class",
)
except ValueError as e:
self.assertTrue(str(e).lower().find("invalid storage class") != -1)
else:
self.fail("Exception was not thrown")
def test_upload_object_invalid_hash1(self):
# Invalid hash is detected on the amazon side and BAD_REQUEST is
# returned
def upload_file(
self,
object_name=None,
content_type=None,
request_path=None,
request_method=None,
headers=None,
file_path=None,
stream=None,
):
headers = {"etag": '"foobar"'}
return {
"response": make_response(200, headers=headers),
"bytes_transferred": 1000,
"data_hash": "hash343hhash89h932439jsaa89",
}
old_func = self.driver_type._upload_object
self.driver_type._upload_object = upload_file
file_path = os.path.abspath(__file__)
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
object_name = "foo_test_upload"
try:
self.driver.upload_object(
file_path=file_path,
container=container,
object_name=object_name,
verify_hash=True,
)
except ObjectHashMismatchError:
pass
else:
self.fail("Invalid hash was returned but an exception was not thrown")
finally:
self.driver_type._upload_object = old_func
def test_upload_object_invalid_hash2(self):
# Invalid hash is detected when comparing hash provided in the response
# ETag header
def upload_file(
self,
object_name=None,
content_type=None,
request_path=None,
request_method=None,
headers=None,
file_path=None,
stream=None,
):
headers = {"etag": '"hash343hhash89h932439jsaa89"'}
return {
"response": make_response(200, headers=headers),
"bytes_transferred": 1000,
"data_hash": "0cc175b9c0f1b6a831c399e269772661",
}
old_func = self.driver_type._upload_object
self.driver_type._upload_object = upload_file
file_path = os.path.abspath(__file__)
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
object_name = "foo_test_upload"
try:
self.driver.upload_object(
file_path=file_path,
container=container,
object_name=object_name,
verify_hash=True,
)
except ObjectHashMismatchError:
pass
else:
self.fail("Invalid hash was returned but an exception was not thrown")
finally:
self.driver_type._upload_object = old_func
def test_upload_object_invalid_hash_kms_encryption(self):
# Hash check should be skipped when AWS KMS server side encryption is
# used
def upload_file(
self,
object_name=None,
content_type=None,
request_path=None,
request_method=None,
headers=None,
file_path=None,
stream=None,
):
headers = {"etag": "blahblah", "x-amz-server-side-encryption": "aws:kms"}
return {
"response": make_response(200, headers=headers),
"bytes_transferred": 1000,
"data_hash": "hash343hhash89h932439jsaa81",
}
old_func = self.driver_type._upload_object
self.driver_type._upload_object = upload_file
file_path = os.path.abspath(__file__)
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
object_name = "foo_test_upload"
try:
self.driver.upload_object(
file_path=file_path,
container=container,
object_name=object_name,
verify_hash=True,
)
finally:
self.driver_type._upload_object = old_func
def test_upload_object_success(self):
def upload_file(
self,
object_name=None,
content_type=None,
request_path=None,
request_method=None,
headers=None,
file_path=None,
stream=None,
):
return {
"response": make_response(
200, headers={"etag": "0cc175b9c0f1b6a831c399e269772661"}
),
"bytes_transferred": 1000,
"data_hash": "0cc175b9c0f1b6a831c399e269772661",
}
self.mock_response_klass.type = None
old_func = self.driver_type._upload_object
self.driver_type._upload_object = upload_file
file_path = os.path.abspath(__file__)
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
object_name = "foo_test_upload"
extra = {"meta_data": {"some-value": "foobar"}}
obj = self.driver.upload_object(
file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=True,
)
self.assertEqual(obj.name, "foo_test_upload")
self.assertEqual(obj.size, 1000)
self.assertTrue("some-value" in obj.meta_data)
self.driver_type._upload_object = old_func
def test_upload_object_with_acl(self):
def upload_file(
self,
object_name=None,
content_type=None,
request_path=None,
request_method=None,
headers=None,
file_path=None,
stream=None,
):
headers = {"etag": "0cc175b9c0f1b6a831c399e269772661"}
return {
"response": make_response(200, headers=headers),
"bytes_transferred": 1000,
"data_hash": "0cc175b9c0f1b6a831c399e269772661",
}
self.mock_response_klass.type = None
old_func = self.driver_type._upload_object
self.driver_type._upload_object = upload_file
file_path = os.path.abspath(__file__)
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
object_name = "foo_test_upload"
extra = {"acl": "public-read"}
obj = self.driver.upload_object(
file_path=file_path,
container=container,
object_name=object_name,
extra=extra,
verify_hash=True,
)
self.assertEqual(obj.name, "foo_test_upload")
self.assertEqual(obj.size, 1000)
self.assertEqual(obj.extra["acl"], "public-read")
self.driver_type._upload_object = old_func
def test_upload_empty_object_via_stream(self):
if self.driver.supports_s3_multipart_upload:
self.mock_response_klass.type = "MULTIPART"
else:
self.mock_response_klass.type = None
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
object_name = "foo_test_stream_data"
iterator = BytesIO(b(""))
extra = {"content_type": "text/plain"}
obj = self.driver.upload_object_via_stream(
container=container, object_name=object_name, iterator=iterator, extra=extra
)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, 0)
def test_upload_small_object_via_stream(self):
if self.driver.supports_s3_multipart_upload:
self.mock_response_klass.type = "MULTIPART"
else:
self.mock_response_klass.type = None
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
object_name = "foo_test_stream_data"
iterator = BytesIO(b("234"))
extra = {"content_type": "text/plain"}
obj = self.driver.upload_object_via_stream(
container=container, object_name=object_name, iterator=iterator, extra=extra
)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, 3)
def test_upload_big_object_via_stream(self):
if self.driver.supports_s3_multipart_upload:
self.mock_response_klass.type = "MULTIPART"
else:
self.mock_response_klass.type = None
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
object_name = "foo_test_stream_data"
iterator = BytesIO(b("234" * CHUNK_SIZE))
extra = {"content_type": "text/plain"}
obj = self.driver.upload_object_via_stream(
container=container, object_name=object_name, iterator=iterator, extra=extra
)
self.assertEqual(obj.name, object_name)
self.assertEqual(obj.size, CHUNK_SIZE * 3)
def test_upload_object_via_stream_guess_file_mime_type(self):
if self.driver.supports_s3_multipart_upload:
self.mock_response_klass.type = "MULTIPART"
else:
self.mock_response_klass.type = None
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
object_name = "foo_test_stream_data"
iterator = BytesIO(b("234"))
with mock.patch(
"libcloud.utils.files.guess_file_mime_type", autospec=True
) as mock_guess_file_mime_type:
mock_guess_file_mime_type.return_value = ("application/zip", None)
self.driver.upload_object_via_stream(
container=container, object_name=object_name, iterator=iterator
)
mock_guess_file_mime_type.assert_called_with(object_name)
def test_upload_object_via_stream_abort(self):
if not self.driver.supports_s3_multipart_upload:
return
self.mock_response_klass.type = "MULTIPART"
def _faulty_iterator():
for i in range(0, 5):
yield str(i)
raise RuntimeError("Error in fetching data")
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
object_name = "foo_test_stream_data"
iterator = _faulty_iterator()
extra = {"content_type": "text/plain"}
try:
self.driver.upload_object_via_stream(
container=container,
object_name=object_name,
iterator=iterator,
extra=extra,
)
except Exception:
pass
return
def test_s3_list_multipart_uploads(self):
if not self.driver.supports_s3_multipart_upload:
return
self.mock_response_klass.type = "LIST_MULTIPART"
S3StorageDriver.RESPONSES_PER_REQUEST = 2
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
for upload in self.driver.ex_iterate_multipart_uploads(container):
self.assertNotEqual(upload.key, None)
self.assertNotEqual(upload.id, None)
self.assertNotEqual(upload.created_at, None)
self.assertNotEqual(upload.owner, None)
self.assertNotEqual(upload.initiator, None)
def test_s3_abort_multipart_uploads(self):
if not self.driver.supports_s3_multipart_upload:
return
self.mock_response_klass.type = "LIST_MULTIPART"
S3StorageDriver.RESPONSES_PER_REQUEST = 2
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
self.driver.ex_cleanup_all_multipart_uploads(container)
def test_delete_object_not_found(self):
self.mock_response_klass.type = "NOT_FOUND"
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
obj = Object(
name="foo_bar_object",
size=1234,
hash=None,
extra=None,
meta_data=None,
container=container,
driver=self.driver,
)
try:
self.driver.delete_object(obj=obj)
except ObjectDoesNotExistError:
pass
else:
self.fail("Exception was not thrown")
def test_delete_object_success(self):
self.mock_response_klass.type = "DELETE"
container = Container(name="foo_bar_container", extra={}, driver=self.driver)
obj = Object(
name="foo_bar_object",
size=1234,
hash=None,
extra=None,
meta_data=None,
container=container,
driver=self.driver,
)
result = self.driver.delete_object(obj=obj)
self.assertTrue(result)
def test_region_keyword_argument(self):
# Default region
driver = S3StorageDriver(*self.driver_args)
self.assertEqual(driver.region, "us-east-1")
self.assertEqual(driver.connection.host, "s3.amazonaws.com")
# Custom region
driver = S3StorageDriver(*self.driver_args, region="us-west-2")
self.assertEqual(driver.region, "us-west-2")
self.assertEqual(driver.connection.host, "s3-us-west-2.amazonaws.com")
# Verify class instance and class variables don't get mixed up
driver1 = S3StorageDriver(*self.driver_args, region="us-west-2")
self.assertEqual(driver1.region, "us-west-2")
self.assertEqual(driver1.connection.host, "s3-us-west-2.amazonaws.com")
driver2 = S3StorageDriver(*self.driver_args, region="ap-south-1")
self.assertEqual(driver2.region, "ap-south-1")
self.assertEqual(driver2.connection.host, "s3-ap-south-1.amazonaws.com")
self.assertEqual(driver1.region, "us-west-2")
self.assertEqual(driver1.connection.host, "s3-us-west-2.amazonaws.com")
# Test all supported regions
for region in S3StorageDriver.list_regions():
driver = S3StorageDriver(*self.driver_args, region=region)
self.assertEqual(driver.region, region)
# Invalid region
expected_msg = "Invalid or unsupported region: foo"
self.assertRaisesRegex(
ValueError, expected_msg, S3StorageDriver, *self.driver_args, region="foo"
)
# host argument still has precedence over reguin
driver3 = S3StorageDriver(
*self.driver_args, region="ap-south-1", host="host1.bar.com"
)
self.assertEqual(driver3.region, "ap-south-1")
self.assertEqual(driver3.connection.host, "host1.bar.com")
driver4 = S3StorageDriver(*self.driver_args, host="host2.bar.com")
self.assertEqual(driver4.connection.host, "host2.bar.com")
def test_deprecated_driver_class_per_region(self):
driver = S3USWestStorageDriver(*self.driver_args)
self.assertEqual(driver.region, "us-west-1")
if __name__ == "__main__":
sys.exit(unittest.main())
|
|
"""
homeassistant.components.proximity
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Component to monitor the proximity of devices to a particular zone and the
direction of travel.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/proximity/
"""
import logging
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import track_state_change
from homeassistant.util.location import distance
DEPENDENCIES = ['zone', 'device_tracker']
DOMAIN = 'proximity'
# Default tolerance
DEFAULT_TOLERANCE = 1
# Default zone
DEFAULT_PROXIMITY_ZONE = 'home'
# Entity attributes
ATTR_DIST_FROM = 'dist_to_zone'
ATTR_DIR_OF_TRAVEL = 'dir_of_travel'
ATTR_NEAREST = 'nearest'
_LOGGER = logging.getLogger(__name__)
def setup(hass, config): # pylint: disable=too-many-locals,too-many-statements
""" Get the zones and offsets from configuration.yaml. """
ignored_zones = []
if 'ignored_zones' in config[DOMAIN]:
for variable in config[DOMAIN]['ignored_zones']:
ignored_zones.append(variable)
# Get the devices from configuration.yaml
if 'devices' not in config[DOMAIN]:
_LOGGER.error('devices not found in config')
return False
proximity_devices = []
for variable in config[DOMAIN]['devices']:
proximity_devices.append(variable)
# Get the direction of travel tolerance from configuration.yaml
tolerance = config[DOMAIN].get('tolerance', DEFAULT_TOLERANCE)
# Get the zone to monitor proximity to from configuration.yaml
proximity_zone = config[DOMAIN].get('zone', DEFAULT_PROXIMITY_ZONE)
entity_id = DOMAIN + '.' + proximity_zone
proximity_zone = 'zone.' + proximity_zone
state = hass.states.get(proximity_zone)
zone_friendly_name = (state.name).lower()
# set the default values
dist_to_zone = 'not set'
dir_of_travel = 'not set'
nearest = 'not set'
proximity = Proximity(hass, zone_friendly_name, dist_to_zone,
dir_of_travel, nearest, ignored_zones,
proximity_devices, tolerance, proximity_zone)
proximity.entity_id = entity_id
proximity.update_ha_state()
# Main command to monitor proximity of devices
track_state_change(hass, proximity_devices,
proximity.check_proximity_state_change)
return True
class Proximity(Entity): # pylint: disable=too-many-instance-attributes
""" Represents a Proximity. """
def __init__(self, hass, zone_friendly_name, dist_to, dir_of_travel,
nearest, ignored_zones, proximity_devices, tolerance,
proximity_zone):
# pylint: disable=too-many-arguments
self.hass = hass
self.friendly_name = zone_friendly_name
self.dist_to = dist_to
self.dir_of_travel = dir_of_travel
self.nearest = nearest
self.ignored_zones = ignored_zones
self.proximity_devices = proximity_devices
self.tolerance = tolerance
self.proximity_zone = proximity_zone
@property
def name(self):
"""Return the name of the entity."""
return self.friendly_name
@property
def state(self):
""" Returns the state. """
return self.dist_to
@property
def unit_of_measurement(self):
""" Unit of measurement of this entity. """
return "km"
@property
def state_attributes(self):
""" Returns the state attributes. """
return {
ATTR_DIR_OF_TRAVEL: self.dir_of_travel,
ATTR_NEAREST: self.nearest,
}
def check_proximity_state_change(self, entity, old_state, new_state):
# pylint: disable=too-many-branches,too-many-statements,too-many-locals
""" Function to perform the proximity checking. """
entity_name = new_state.name
devices_to_calculate = False
devices_in_zone = ''
zone_state = self.hass.states.get(self.proximity_zone)
proximity_latitude = zone_state.attributes.get('latitude')
proximity_longitude = zone_state.attributes.get('longitude')
# Check for devices in the monitored zone
for device in self.proximity_devices:
device_state = self.hass.states.get(device)
if device_state.state not in self.ignored_zones:
devices_to_calculate = True
# Check the location of all devices
if (device_state.state).lower() == (self.friendly_name).lower():
device_friendly = device_state.name
if devices_in_zone != '':
devices_in_zone = devices_in_zone + ', '
devices_in_zone = devices_in_zone + device_friendly
# No-one to track so reset the entity
if not devices_to_calculate:
self.dist_to = 'not set'
self.dir_of_travel = 'not set'
self.nearest = 'not set'
self.update_ha_state()
return
# At least one device is in the monitored zone so update the entity
if devices_in_zone != '':
self.dist_to = 0
self.dir_of_travel = 'arrived'
self.nearest = devices_in_zone
self.update_ha_state()
return
# We can't check proximity because latitude and longitude don't exist
if 'latitude' not in new_state.attributes:
return
# Collect distances to the zone for all devices
distances_to_zone = {}
for device in self.proximity_devices:
# Ignore devices in an ignored zone
device_state = self.hass.states.get(device)
if device_state.state in self.ignored_zones:
continue
# Ignore devices if proximity cannot be calculated
if 'latitude' not in device_state.attributes:
continue
# Calculate the distance to the proximity zone
dist_to_zone = distance(proximity_latitude,
proximity_longitude,
device_state.attributes['latitude'],
device_state.attributes['longitude'])
# Add the device and distance to a dictionary
distances_to_zone[device] = round(dist_to_zone / 1000, 1)
# Loop through each of the distances collected and work out the closest
closest_device = ''
dist_to_zone = 1000000
for device in distances_to_zone:
if distances_to_zone[device] < dist_to_zone:
closest_device = device
dist_to_zone = distances_to_zone[device]
# If the closest device is one of the other devices
if closest_device != entity:
self.dist_to = round(distances_to_zone[closest_device])
self.dir_of_travel = 'unknown'
device_state = self.hass.states.get(closest_device)
self.nearest = device_state.name
self.update_ha_state()
return
# Stop if we cannot calculate the direction of travel (i.e. we don't
# have a previous state and a current LAT and LONG)
if old_state is None or 'latitude' not in old_state.attributes:
self.dist_to = round(distances_to_zone[entity])
self.dir_of_travel = 'unknown'
self.nearest = entity_name
self.update_ha_state()
return
# Reset the variables
distance_travelled = 0
# Calculate the distance travelled
old_distance = distance(proximity_latitude, proximity_longitude,
old_state.attributes['latitude'],
old_state.attributes['longitude'])
new_distance = distance(proximity_latitude, proximity_longitude,
new_state.attributes['latitude'],
new_state.attributes['longitude'])
distance_travelled = round(new_distance - old_distance, 1)
# Check for tolerance
if distance_travelled < self.tolerance * -1:
direction_of_travel = 'towards'
elif distance_travelled > self.tolerance:
direction_of_travel = 'away_from'
else:
direction_of_travel = 'stationary'
# Update the proximity entity
self.dist_to = round(dist_to_zone)
self.dir_of_travel = direction_of_travel
self.nearest = entity_name
self.update_ha_state()
_LOGGER.debug('proximity.%s update entity: distance=%s: direction=%s: '
'device=%s', self.friendly_name, round(dist_to_zone),
direction_of_travel, entity_name)
_LOGGER.info('%s: proximity calculation complete', entity_name)
|
|
"""
Tests for the crochet APIs.
"""
from __future__ import absolute_import
import threading
import subprocess
import time
import gc
import sys
import weakref
import tempfile
import os
import imp
import inspect
from unittest import SkipTest
from twisted.trial.unittest import TestCase
from twisted.internet.defer import succeed, Deferred, fail, CancelledError
from twisted.python.failure import Failure
from twisted.python import threadable
from twisted.python.runtime import platform
from .._eventloop import (
EventLoop, EventualResult, TimeoutError, ResultRegistry, ReactorStopped)
from .test_setup import FakeReactor
from .. import (
_main, setup, retrieve_result, _store, no_setup,
run_in_reactor, wait_for)
from ..tests import crochet_directory
if platform.type == "posix":
try:
from twisted.internet.process import reapAllProcesses
except (SyntaxError, ImportError):
if sys.version_info < (3, 3, 0):
raise
else:
# Process support is still not ported to Python 3 on some versions
# of Twisted.
reapAllProcesses = None
else:
# waitpid() is only necessary on POSIX:
reapAllProcesses = None
class ResultRegistryTests(TestCase):
"""
Tests for ResultRegistry.
"""
def test_stopped_registered(self):
"""
ResultRegistery.stop() fires registered EventualResult with
ReactorStopped.
"""
registry = ResultRegistry()
er = EventualResult(None, None)
registry.register(er)
registry.stop()
self.assertRaises(ReactorStopped, er.wait, timeout=0)
def test_stopped_new_registration(self):
"""
After ResultRegistery.stop() is called subsequent register() calls
raise ReactorStopped.
"""
registry = ResultRegistry()
er = EventualResult(None, None)
registry.stop()
self.assertRaises(ReactorStopped, registry.register, er)
def test_stopped_already_have_result(self):
"""
ResultRegistery.stop() has no impact on registered EventualResult
which already have a result.
"""
registry = ResultRegistry()
er = EventualResult(succeed(123), None)
registry.register(er)
registry.stop()
self.assertEqual(er.wait(0.1), 123)
self.assertEqual(er.wait(0.1), 123)
self.assertEqual(er.wait(0.1), 123)
def test_weakref(self):
"""
Registering an EventualResult with a ResultRegistry does not prevent
it from being garbage collected.
"""
registry = ResultRegistry()
er = EventualResult(None, None)
registry.register(er)
ref = weakref.ref(er)
del er
gc.collect()
self.assertIdentical(ref(), None)
def test_runs_with_lock(self):
"""
All code in ResultRegistry.stop() and register() is protected by a
lock.
"""
self.assertTrue(ResultRegistry.stop.synchronized)
self.assertTrue(ResultRegistry.register.synchronized)
def append_in_thread(a_list, f, *args, **kwargs):
"""
Call a function in a thread, append its result to the given list.
Only return once the thread has actually started.
Will return a threading.Event that will be set when the action is done.
"""
started = threading.Event()
done = threading.Event()
def go():
started.set()
try:
result = f(*args, **kwargs)
except Exception as e:
a_list.extend([False, e])
else:
a_list.extend([True, result])
done.set()
threading.Thread(target=go).start()
started.wait()
return done
class EventualResultTests(TestCase):
"""
Tests for EventualResult.
"""
def setUp(self):
self.patch(threadable, "isInIOThread", lambda: False)
def test_success_result(self):
"""
wait() returns the value the Deferred fired with.
"""
dr = EventualResult(succeed(123), None)
self.assertEqual(dr.wait(0.1), 123)
def test_later_success_result(self):
"""
wait() returns the value the Deferred fired with, in the case where
the Deferred is fired after wait() is called.
"""
d = Deferred()
dr = EventualResult(d, None)
result_list = []
done = append_in_thread(result_list, dr.wait, 100)
time.sleep(0.1)
# At this point dr.wait() should have started:
d.callback(345)
done.wait(100)
self.assertEqual(result_list, [True, 345])
def test_success_result_twice(self):
"""
A second call to wait() returns same value as the first call.
"""
dr = EventualResult(succeed(123), None)
self.assertEqual(dr.wait(0.1), 123)
self.assertEqual(dr.wait(0.1), 123)
def test_failure_result(self):
"""
wait() raises the exception the Deferred fired with.
"""
dr = EventualResult(fail(RuntimeError()), None)
self.assertRaises(RuntimeError, dr.wait, 0.1)
def test_later_failure_result(self):
"""
wait() raises the exception the Deferred fired with, in the case
where the Deferred is fired after wait() is called.
"""
d = Deferred()
dr = EventualResult(d, None)
result_list = []
done = append_in_thread(result_list, dr.wait, 100)
time.sleep(0.1)
d.errback(RuntimeError())
done.wait(100)
self.assertEqual(
(result_list[0], result_list[1].__class__), (False, RuntimeError))
def test_failure_result_twice(self):
"""
A second call to wait() raises same value as the first call.
"""
dr = EventualResult(fail(ZeroDivisionError()), None)
self.assertRaises(ZeroDivisionError, dr.wait, 0.1)
self.assertRaises(ZeroDivisionError, dr.wait, 0.1)
def test_timeout(self):
"""
If no result is available, wait(timeout) will throw a TimeoutError.
"""
start = time.time()
dr = EventualResult(Deferred(), None)
self.assertRaises(TimeoutError, dr.wait, timeout=0.03)
self.assertTrue(abs(time.time() - start - 0.03) < 0.005)
def test_timeout_twice(self):
"""
If no result is available, a second call to wait(timeout) will also
result in a TimeoutError exception.
"""
dr = EventualResult(Deferred(), None)
self.assertRaises(TimeoutError, dr.wait, timeout=0.01)
self.assertRaises(TimeoutError, dr.wait, timeout=0.01)
def test_timeout_then_result(self):
"""
If a result becomes available after a timeout, a second call to
wait() will return it.
"""
d = Deferred()
dr = EventualResult(d, None)
self.assertRaises(TimeoutError, dr.wait, timeout=0.01)
d.callback(u"value")
self.assertEqual(dr.wait(0.1), u"value")
self.assertEqual(dr.wait(0.1), u"value")
def test_reactor_thread_disallowed(self):
"""
wait() cannot be called from the reactor thread.
"""
self.patch(threadable, "isInIOThread", lambda: True)
d = Deferred()
dr = EventualResult(d, None)
self.assertRaises(RuntimeError, dr.wait, 0)
def test_cancel(self):
"""
cancel() cancels the wrapped Deferred, running cancellation in the
event loop thread.
"""
reactor = FakeReactor()
cancelled = []
def error(f):
cancelled.append(reactor.in_call_from_thread)
cancelled.append(f)
d = Deferred().addErrback(error)
dr = EventualResult(d, _reactor=reactor)
dr.cancel()
self.assertTrue(cancelled[0])
self.assertIsInstance(cancelled[1].value, CancelledError)
def test_stash(self):
"""
EventualResult.stash() stores the object in the global ResultStore.
"""
dr = EventualResult(Deferred(), None)
uid = dr.stash()
self.assertIdentical(dr, _store.retrieve(uid))
def test_original_failure(self):
"""
original_failure() returns the underlying Failure of the Deferred
wrapped by the EventualResult.
"""
try:
1 / 0
except ZeroDivisionError:
f = Failure()
dr = EventualResult(fail(f), None)
self.assertIdentical(dr.original_failure(), f)
def test_original_failure_no_result(self):
"""
If there is no result yet, original_failure() returns None.
"""
dr = EventualResult(Deferred(), None)
self.assertIdentical(dr.original_failure(), None)
def test_original_failure_not_error(self):
"""
If the result is not an error, original_failure() returns None.
"""
dr = EventualResult(succeed(3), None)
self.assertIdentical(dr.original_failure(), None)
def test_error_logged_no_wait(self):
"""
If the result is an error and wait() was never called, the error will
be logged once the EventualResult is garbage-collected.
"""
dr = EventualResult(fail(ZeroDivisionError()), None)
del dr
gc.collect()
excs = self.flushLoggedErrors(ZeroDivisionError)
self.assertEqual(len(excs), 1)
def test_error_logged_wait_timeout(self):
"""
If the result is an error and wait() was called but timed out, the
error will be logged once the EventualResult is garbage-collected.
"""
d = Deferred()
dr = EventualResult(d, None)
try:
dr.wait(0)
except TimeoutError:
pass
d.errback(ZeroDivisionError())
del dr
if sys.version_info[0] == 2:
sys.exc_clear()
gc.collect()
excs = self.flushLoggedErrors(ZeroDivisionError)
self.assertEqual(len(excs), 1)
def test_error_after_gc_logged(self):
"""
If the result is an error that occurs after all user references to the
EventualResult are lost, the error is still logged.
"""
d = Deferred()
dr = EventualResult(d, None)
del dr
d.errback(ZeroDivisionError())
gc.collect()
excs = self.flushLoggedErrors(ZeroDivisionError)
self.assertEqual(len(excs), 1)
def test_control_c_is_possible(self):
"""
If you're wait()ing on an EventualResult in main thread, make sure the
KeyboardInterrupt happens in timely manner.
"""
if platform.type != "posix":
raise SkipTest("I don't have the energy to fight Windows semantics.")
program = """\
import os, threading, signal, time, sys
import crochet
crochet.setup()
from twisted.internet.defer import Deferred
if sys.platform.startswith('win'):
signal.signal(signal.SIGBREAK, signal.default_int_handler)
sig_int=signal.CTRL_BREAK_EVENT
sig_kill=signal.SIGTERM
else:
sig_int=signal.SIGINT
sig_kill=signal.SIGKILL
def interrupt():
time.sleep(0.1) # Make sure we've hit wait()
os.kill(os.getpid(), sig_int)
time.sleep(1)
# Still running, test shall fail...
os.kill(os.getpid(), sig_kill)
t = threading.Thread(target=interrupt, daemon=True)
t.start()
d = Deferred()
e = crochet.EventualResult(d, None)
try:
e.wait(10000)
except KeyboardInterrupt:
sys.exit(23)
"""
kw = {'cwd': crochet_directory}
# on Windows the only way to interrupt a subprocess reliably is to
# create a new process group:
# http://docs.python.org/2/library/subprocess.html#subprocess.CREATE_NEW_PROCESS_GROUP
if platform.type.startswith('win'):
kw['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
process = subprocess.Popen([sys.executable, "-c", program], **kw)
self.assertEqual(process.wait(), 23)
def test_connect_deferred(self):
"""
If an EventualResult is created with None,
EventualResult._connect_deferred can be called later to register a
Deferred as the one it is wrapping.
"""
er = EventualResult(None, None)
self.assertRaises(TimeoutError, er.wait, 0)
d = Deferred()
er._connect_deferred(d)
self.assertRaises(TimeoutError, er.wait, 0)
d.callback(123)
self.assertEqual(er.wait(0.1), 123)
def test_reactor_stop_unblocks_EventualResult(self):
"""
Any EventualResult.wait() calls still waiting when the reactor has
stopped will get a ReactorStopped exception.
"""
program = """\
import os, threading, signal, time, sys
from twisted.internet.defer import Deferred
from twisted.internet import reactor
import crochet
crochet.setup()
@crochet.run_in_reactor
def run():
reactor.callLater(0.1, reactor.stop)
return Deferred()
er = run()
try:
er.wait(timeout=10)
except crochet.ReactorStopped:
sys.exit(23)
"""
process = subprocess.Popen([sys.executable, "-c", program],
cwd=crochet_directory)
self.assertEqual(process.wait(), 23)
def test_reactor_stop_unblocks_EventualResult_in_threadpool(self):
"""
Any EventualResult.wait() calls still waiting when the reactor has
stopped will get a ReactorStopped exception, even if it is running in
Twisted's thread pool.
"""
program = """\
import os, threading, signal, time, sys
from twisted.internet.defer import Deferred
from twisted.internet import reactor
import crochet
crochet.setup()
@crochet.run_in_reactor
def run():
reactor.callLater(0.1, reactor.stop)
return Deferred()
result = [13]
def inthread():
er = run()
try:
er.wait(timeout=10)
except crochet.ReactorStopped:
result[0] = 23
reactor.callInThread(inthread)
time.sleep(1)
sys.exit(result[0])
"""
process = subprocess.Popen([sys.executable, "-c", program],
cwd=crochet_directory)
self.assertEqual(process.wait(), 23)
def test_immediate_cancel(self):
"""
Immediately cancelling the result of @run_in_reactor function will
still cancel the Deferred.
"""
# This depends on the way reactor runs callFromThread calls, so need
# real functional test.
program = """\
import os, threading, signal, time, sys
from twisted.internet.defer import Deferred, CancelledError
import crochet
crochet.setup()
@crochet.run_in_reactor
def run():
return Deferred()
er = run()
er.cancel()
try:
er.wait(1)
except CancelledError:
sys.exit(23)
else:
sys.exit(3)
"""
process = subprocess.Popen(
[sys.executable, "-c", program],
cwd=crochet_directory, )
self.assertEqual(process.wait(), 23)
def test_noWaitingDuringImport(self):
"""
EventualResult.wait() raises an exception if called while a module is
being imported.
This prevents the imports from taking a long time, preventing other
imports from running in other threads. It also prevents deadlocks,
which can happen if the code being waited on also tries to import
something.
"""
if sys.version_info[0] > 2:
from unittest import SkipTest
raise SkipTest(
"This test is too fragile (and insufficient) on "
"Python 3 - see "
"https://github.com/itamarst/crochet/issues/43")
directory = tempfile.mktemp()
os.mkdir(directory)
sys.path.append(directory)
self.addCleanup(sys.path.remove, directory)
with open(os.path.join(directory, "shouldbeunimportable.py"),
"w") as f:
f.write(
"""\
from crochet import EventualResult
from twisted.internet.defer import Deferred
EventualResult(Deferred(), None).wait(1.0)
""")
self.assertRaises(RuntimeError, __import__, "shouldbeunimportable")
def test_waiting_during_different_thread_importing(self):
"""
EventualResult.wait() should work if called while a module is
being imported in a different thread. See
EventualResultTests.test_noWaitingDuringImport for the explanation of
what should happen if an import is happening in the current thread.
"""
test_complete = threading.Event()
lock_held = threading.Event()
er = EventualResult(succeed(123), None)
def other_thread():
imp.acquire_lock()
lock_held.set()
test_complete.wait()
imp.release_lock()
t = threading.Thread(target=other_thread)
t.start()
lock_held.wait()
# While the imp lock is held by the other thread, we can't
# allow exceptions/assertions to happen because trial will
# try to do an import causing a deadlock instead of a
# failure. We collect all assertion pairs (result, expected),
# wait for the import lock to be released, and then check our
# assertions at the end of the test.
assertions = []
# we want to run .wait while the other thread has the lock acquired
assertions.append((imp.lock_held(), True))
try:
assertions.append((er.wait(0.1), 123))
finally:
test_complete.set()
assertions.append((imp.lock_held(), True))
test_complete.set()
t.join()
[self.assertEqual(result, expected) for result, expected in assertions]
class RunInReactorTests(TestCase):
"""
Tests for the run_in_reactor decorator.
"""
def test_signature(self):
"""
The function decorated with the run_in_reactor decorator has the same
signature as the original function.
"""
c = EventLoop(lambda: FakeReactor(), lambda f, g: None)
def some_name(arg1, arg2, karg1=2, *args, **kw):
pass
decorated = c.run_in_reactor(some_name)
self.assertEqual(inspect.signature(some_name),
inspect.signature(decorated))
def test_name(self):
"""
The function decorated with run_in_reactor has the same name as the
original function.
"""
c = EventLoop(lambda: FakeReactor(), lambda f, g: None)
@c.run_in_reactor
def some_name():
pass
self.assertEqual(some_name.__name__, "some_name")
def test_run_in_reactor_thread(self):
"""
The function decorated with run_in_reactor is run in the reactor
thread.
"""
myreactor = FakeReactor()
c = EventLoop(lambda: myreactor, lambda f, g: None)
c.no_setup()
calls = []
@c.run_in_reactor
def func(a, b, c):
self.assertTrue(myreactor.in_call_from_thread)
calls.append((a, b, c))
func(1, 2, c=3)
self.assertEqual(calls, [(1, 2, 3)])
def test_method(self):
"""
The function decorated with the wait decorator can be a method.
"""
myreactor = FakeReactor()
c = EventLoop(lambda: myreactor, lambda f, g: None)
c.no_setup()
calls = []
class C(object):
@c.run_in_reactor
def func(self, a, b, c):
calls.append((self, a, b, c))
o = C()
o.func(1, 2, c=3)
self.assertEqual(calls, [(o, 1, 2, 3)])
def test_classmethod(self):
"""
The function decorated with the wait decorator can be a classmethod.
"""
myreactor = FakeReactor()
c = EventLoop(lambda: myreactor, lambda f, g: None)
c.no_setup()
calls = []
class C(object):
@c.run_in_reactor
@classmethod
def func(cls, a, b, c):
calls.append((cls, a, b, c))
@classmethod
@c.run_in_reactor
def func2(cls, a, b, c):
calls.append((cls, a, b, c))
C.func(1, 2, c=3)
C.func2(1, 2, c=3)
self.assertEqual(calls, [(C, 1, 2, 3), (C, 1, 2, 3)])
def test_wrap_method(self):
"""
The object decorated with the wait decorator can be a method object
"""
myreactor = FakeReactor()
c = EventLoop(lambda: myreactor, lambda f, g: None)
c.no_setup()
calls = []
class C(object):
def func(self, a, b, c):
calls.append((a, b, c))
f = c.run_in_reactor(C().func)
f(4, 5, c=6)
self.assertEqual(calls, [(4, 5, 6)])
def make_wrapped_function(self):
"""
Return a function wrapped with run_in_reactor that returns its first
argument.
"""
myreactor = FakeReactor()
c = EventLoop(lambda: myreactor, lambda f, g: None)
c.no_setup()
@c.run_in_reactor
def passthrough(argument):
return argument
return passthrough
def test_deferred_success_result(self):
"""
If the underlying function returns a Deferred, the wrapper returns a
EventualResult hooked up to the Deferred.
"""
passthrough = self.make_wrapped_function()
result = passthrough(succeed(123))
self.assertIsInstance(result, EventualResult)
self.assertEqual(result.wait(0.1), 123)
def test_deferred_failure_result(self):
"""
If the underlying function returns a Deferred, the wrapper returns a
EventualResult hooked up to the Deferred that can deal with failures
as well.
"""
passthrough = self.make_wrapped_function()
result = passthrough(fail(ZeroDivisionError()))
self.assertIsInstance(result, EventualResult)
self.assertRaises(ZeroDivisionError, result.wait, 0.1)
def test_regular_result(self):
"""
If the underlying function returns a non-Deferred, the wrapper returns
a EventualResult hooked up to a Deferred wrapping the result.
"""
passthrough = self.make_wrapped_function()
result = passthrough(123)
self.assertIsInstance(result, EventualResult)
self.assertEqual(result.wait(0.1), 123)
def test_exception_result(self):
"""
If the underlying function throws an exception, the wrapper returns a
EventualResult hooked up to a Deferred wrapping the exception.
"""
myreactor = FakeReactor()
c = EventLoop(lambda: myreactor, lambda f, g: None)
c.no_setup()
@c.run_in_reactor
def raiser():
1 / 0
result = raiser()
self.assertIsInstance(result, EventualResult)
self.assertRaises(ZeroDivisionError, result.wait, 0.1)
def test_registry(self):
"""
@run_in_reactor registers the EventualResult in the ResultRegistry.
"""
myreactor = FakeReactor()
c = EventLoop(lambda: myreactor, lambda f, g: None)
c.no_setup()
@c.run_in_reactor
def run():
return
result = run()
self.assertIn(result, c._registry._results)
def test_wrapped_function(self):
"""
The function wrapped by @run_in_reactor can be accessed via the
`__wrapped__` attribute.
"""
c = EventLoop(lambda: None, lambda f, g: None)
def func():
pass
wrapper = c.run_in_reactor(func)
self.assertIdentical(wrapper.__wrapped__, func)
def test_async_function(self):
"""
Async functions can be wrapped with @run_in_reactor.
"""
myreactor = FakeReactor()
c = EventLoop(lambda: myreactor, lambda f, g: None)
c.no_setup()
calls = []
@c.run_in_reactor
async def go():
self.assertTrue(myreactor.in_call_from_thread)
calls.append(1)
return 23
self.assertEqual((go().wait(0.1), go().wait(0.1)), (23, 23))
self.assertEqual(len(calls), 2)
self.assertFalse(inspect.iscoroutinefunction(go))
class WaitTests(TestCase):
"""
Tests for wait_for decorators.
"""
def setUp(self):
self.reactor = FakeReactor()
self.eventloop = EventLoop(lambda: self.reactor, lambda f, g: None)
self.eventloop.no_setup()
DECORATOR_CALL = "wait_for(timeout=5)"
def decorator(self):
return lambda func: self.eventloop.wait_for(timeout=5)(func)
def make_wrapped_function(self):
"""
Return a function wrapped with the decorator being tested that returns
its first argument, or raises it if it's an exception.
"""
decorator = self.decorator()
@decorator
def passthrough(argument):
if isinstance(argument, Exception):
raise argument
return argument
return passthrough
def test_name(self):
"""
The function decorated with the wait decorator has the same name as the
original function.
"""
decorator = self.decorator()
@decorator
def some_name(argument):
pass
self.assertEqual(some_name.__name__, "some_name")
def test_signature(self):
"""
The function decorated with the wait decorator has the same signature
as the original function.
"""
decorator = self.decorator()
def some_name(arg1, arg2, karg1=2, *args, **kw):
pass
decorated = decorator(some_name)
self.assertEqual(inspect.signature(some_name),
inspect.signature(decorated))
def test_wrapped_function(self):
"""
The function wrapped by the wait decorator can be accessed via the
`__wrapped__` attribute.
"""
decorator = self.decorator()
def func():
pass
wrapper = decorator(func)
self.assertIdentical(wrapper.__wrapped__, func)
def test_reactor_thread_disallowed(self):
"""
Functions decorated with the wait decorator cannot be called from the
reactor thread.
"""
self.patch(threadable, "isInIOThread", lambda: True)
f = self.make_wrapped_function()
self.assertRaises(RuntimeError, f, None)
def test_wait_for_reactor_thread(self):
"""
The function decorated with the wait decorator is run in the reactor
thread.
"""
in_call_from_thread = []
decorator = self.decorator()
@decorator
def func():
in_call_from_thread.append(self.reactor.in_call_from_thread)
in_call_from_thread.append(self.reactor.in_call_from_thread)
func()
in_call_from_thread.append(self.reactor.in_call_from_thread)
self.assertEqual(in_call_from_thread, [False, True, False])
def test_arguments(self):
"""
The function decorated with wait decorator gets all arguments passed
to the wrapper.
"""
calls = []
decorator = self.decorator()
@decorator
def func(a, b, c):
calls.append((a, b, c))
func(1, 2, c=3)
self.assertEqual(calls, [(1, 2, 3)])
def test_classmethod(self):
"""
The function decorated with the wait decorator can be a classmethod.
"""
calls = []
decorator = self.decorator()
class C(object):
@decorator
@classmethod
def func(cls, a, b, c):
calls.append((a, b, c))
@classmethod
@decorator
def func2(cls, a, b, c):
calls.append((a, b, c))
C.func(1, 2, c=3)
C.func2(1, 2, c=3)
self.assertEqual(calls, [(1, 2, 3), (1, 2, 3)])
def test_deferred_success_result(self):
"""
If the underlying function returns a Deferred, the wrapper returns a
the Deferred's result.
"""
passthrough = self.make_wrapped_function()
result = passthrough(succeed(123))
self.assertEqual(result, 123)
def test_deferred_failure_result(self):
"""
If the underlying function returns a Deferred with an errback, the
wrapper throws an exception.
"""
passthrough = self.make_wrapped_function()
self.assertRaises(
ZeroDivisionError, passthrough, fail(ZeroDivisionError()))
def test_regular_result(self):
"""
If the underlying function returns a non-Deferred, the wrapper returns
that result.
"""
passthrough = self.make_wrapped_function()
result = passthrough(123)
self.assertEqual(result, 123)
def test_exception_result(self):
"""
If the underlying function throws an exception, the wrapper raises
that exception.
"""
raiser = self.make_wrapped_function()
self.assertRaises(ZeroDivisionError, raiser, ZeroDivisionError())
def test_control_c_is_possible(self):
"""
A call to a decorated function responds to a Ctrl-C (i.e. with a
KeyboardInterrupt) in a timely manner.
"""
if platform.type != "posix":
raise SkipTest("I don't have the energy to fight Windows semantics.")
program = """\
import os, threading, signal, time, sys
import crochet
crochet.setup()
from twisted.internet.defer import Deferred
if sys.platform.startswith('win'):
signal.signal(signal.SIGBREAK, signal.default_int_handler)
sig_int=signal.CTRL_BREAK_EVENT
sig_kill=signal.SIGTERM
else:
sig_int=signal.SIGINT
sig_kill=signal.SIGKILL
def interrupt():
time.sleep(0.1) # Make sure we've hit wait()
os.kill(os.getpid(), sig_int)
time.sleep(1)
# Still running, test shall fail...
os.kill(os.getpid(), sig_kill)
t = threading.Thread(target=interrupt, daemon=True)
t.start()
@crochet.%s
def wait():
return Deferred()
try:
wait()
except KeyboardInterrupt:
sys.exit(23)
""" % (self.DECORATOR_CALL, )
kw = {'cwd': crochet_directory}
if platform.type.startswith('win'):
kw['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
process = subprocess.Popen([sys.executable, "-c", program], **kw)
self.assertEqual(process.wait(), 23)
def test_reactor_stop_unblocks(self):
"""
Any @wait_for_reactor-decorated calls still waiting when the reactor
has stopped will get a ReactorStopped exception.
"""
program = """\
import os, threading, signal, time, sys
from twisted.internet.defer import Deferred
from twisted.internet import reactor
import crochet
crochet.setup()
@crochet.%s
def run():
reactor.callLater(0.1, reactor.stop)
return Deferred()
try:
er = run()
except crochet.ReactorStopped:
sys.exit(23)
""" % (self.DECORATOR_CALL, )
process = subprocess.Popen([sys.executable, "-c", program],
cwd=crochet_directory)
self.assertEqual(process.wait(), 23)
def test_timeoutRaises(self):
"""
If a function wrapped with wait_for hits the timeout, it raises
TimeoutError.
"""
@self.eventloop.wait_for(timeout=0.5)
def times_out():
return Deferred().addErrback(lambda f: f.trap(CancelledError))
start = time.time()
self.assertRaises(TimeoutError, times_out)
self.assertTrue(abs(time.time() - start - 0.5) < 0.1)
def test_timeoutCancels(self):
"""
If a function wrapped with wait_for hits the timeout, it cancels
the underlying Deferred.
"""
result = Deferred()
error = []
result.addErrback(error.append)
@self.eventloop.wait_for(timeout=0.0)
def times_out():
return result
self.assertRaises(TimeoutError, times_out)
self.assertIsInstance(error[0].value, CancelledError)
def test_async_function(self):
"""
Async functions can be wrapped with @wait_for.
"""
@self.eventloop.wait_for(timeout=0.1)
async def go():
self.assertTrue(self.reactor.in_call_from_thread)
return 17
self.assertEqual((go(), go()), (17, 17))
self.assertFalse(inspect.iscoroutinefunction(go))
class PublicAPITests(TestCase):
"""
Tests for the public API.
"""
def test_no_sideeffects(self):
"""
Creating an EventLoop object, as is done in crochet.__init__, does not
call any methods on the objects it is created with.
"""
c = EventLoop(
lambda: None,
lambda f, g: 1 / 0,
lambda *args: 1 / 0,
watchdog_thread=object(),
reapAllProcesses=lambda: 1 / 0)
del c
def test_eventloop_api(self):
"""
An EventLoop object configured with the real reactor and
_shutdown.register is exposed via its public methods.
"""
from twisted.python.log import startLoggingWithObserver
from crochet import _shutdown
self.assertIsInstance(_main, EventLoop)
self.assertEqual(_main.setup, setup)
self.assertEqual(_main.no_setup, no_setup)
self.assertEqual(_main.run_in_reactor, run_in_reactor)
self.assertEqual(_main.wait_for, wait_for)
self.assertIdentical(_main._atexit_register, _shutdown.register)
self.assertIdentical(
_main._startLoggingWithObserver, startLoggingWithObserver)
self.assertIdentical(_main._watchdog_thread, _shutdown._watchdog)
def test_eventloop_api_reactor(self):
"""
The publicly exposed EventLoop will, when setup, use the global
reactor.
"""
from twisted.internet import reactor
_main.no_setup()
self.assertIdentical(_main._reactor, reactor)
def test_retrieve_result(self):
"""
retrieve_result() calls retrieve() on the global ResultStore.
"""
dr = EventualResult(Deferred(), None)
uid = dr.stash()
self.assertIdentical(dr, retrieve_result(uid))
def test_reapAllProcesses(self):
"""
An EventLoop object configured with the real reapAllProcesses on POSIX
plaforms.
"""
self.assertIdentical(_main._reapAllProcesses, reapAllProcesses)
if platform.type != "posix":
test_reapAllProcesses.skip = "Only relevant on POSIX platforms"
if reapAllProcesses is None:
test_reapAllProcesses.skip = "Twisted does not yet support processes"
|
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import re
import socket
import subprocess
import pyaudio
from os.path import join, expanduser
from threading import Thread
from time import sleep
import json
import os.path
import psutil
from stat import S_ISREG, ST_MTIME, ST_MODE, ST_SIZE
import requests
import signal as sig
import mycroft.audio
import mycroft.configuration
from mycroft.util.format import nice_number
# Officially exported methods from this file:
# play_wav, play_mp3, play_ogg, get_cache_directory,
# resolve_resource_file, wait_while_speaking
from mycroft.util.log import LOG
from mycroft.util.parse import extract_datetime, extract_number, normalize
from mycroft.util.signal import *
def resolve_resource_file(res_name):
"""Convert a resource into an absolute filename.
Resource names are in the form: 'filename.ext'
or 'path/filename.ext'
The system wil look for ~/.mycroft/res_name first, and
if not found will look at /opt/mycroft/res_name,
then finally it will look for res_name in the 'mycroft/res'
folder of the source code package.
Example:
With mycroft running as the user 'bob', if you called
resolve_resource_file('snd/beep.wav')
it would return either '/home/bob/.mycroft/snd/beep.wav' or
'/opt/mycroft/snd/beep.wav' or '.../mycroft/res/snd/beep.wav',
where the '...' is replaced by the path where the package has
been installed.
Args:
res_name (str): a resource path/name
"""
config = mycroft.configuration.Configuration.get()
# First look for fully qualified file (e.g. a user setting)
if os.path.isfile(res_name):
return res_name
# Now look for ~/.mycroft/res_name (in user folder)
filename = os.path.expanduser("~/.mycroft/" + res_name)
if os.path.isfile(filename):
return filename
# Next look for /opt/mycroft/res/res_name
data_dir = expanduser(config['data_dir'])
filename = os.path.expanduser(join(data_dir, res_name))
if os.path.isfile(filename):
return filename
# Finally look for it in the source package
filename = os.path.join(os.path.dirname(__file__), '..', 'res', res_name)
filename = os.path.abspath(os.path.normpath(filename))
if os.path.isfile(filename):
return filename
return None # Resource cannot be resolved
def play_wav(uri):
""" Play a wav-file.
This will use the application specified in the mycroft config
and play the uri passed as argument. The function will return directly
and play the file in the background.
Arguments:
uri: uri to play
Returns: subprocess.Popen object
"""
config = mycroft.configuration.Configuration.get()
play_cmd = config.get("play_wav_cmdline")
play_wav_cmd = str(play_cmd).split(" ")
for index, cmd in enumerate(play_wav_cmd):
if cmd == "%1":
play_wav_cmd[index] = (get_http(uri))
return subprocess.Popen(play_wav_cmd)
def play_mp3(uri):
""" Play a mp3-file.
This will use the application specified in the mycroft config
and play the uri passed as argument. The function will return directly
and play the file in the background.
Arguments:
uri: uri to play
Returns: subprocess.Popen object
"""
config = mycroft.configuration.Configuration.get()
play_cmd = config.get("play_mp3_cmdline")
play_mp3_cmd = str(play_cmd).split(" ")
for index, cmd in enumerate(play_mp3_cmd):
if cmd == "%1":
play_mp3_cmd[index] = (get_http(uri))
return subprocess.Popen(play_mp3_cmd)
def play_ogg(uri):
""" Play a ogg-file.
This will use the application specified in the mycroft config
and play the uri passed as argument. The function will return directly
and play the file in the background.
Arguments:
uri: uri to play
Returns: subprocess.Popen object
"""
config = mycroft.configuration.Configuration.get()
play_cmd = config.get("play_ogg_cmdline")
play_ogg_cmd = str(play_cmd).split(" ")
for index, cmd in enumerate(play_ogg_cmd):
if cmd == "%1":
play_ogg_cmd[index] = (get_http(uri))
return subprocess.Popen(play_ogg_cmd)
def record(file_path, duration, rate, channels):
if duration > 0:
return subprocess.Popen(
["arecord", "-r", str(rate), "-c", str(channels), "-d",
str(duration), file_path])
else:
return subprocess.Popen(
["arecord", "-r", str(rate), "-c", str(channels), file_path])
def find_input_device(device_name):
""" Find audio input device by name.
Arguments:
device_name: device name or regex pattern to match
Returns: device_index (int) or None if device wasn't found
"""
LOG.info('Searching for input device: {}'.format(device_name))
LOG.debug('Devices: ')
pa = pyaudio.PyAudio()
pattern = re.compile(device_name)
for device_index in range(pa.get_device_count()):
dev = pa.get_device_info_by_index(device_index)
LOG.debug(' {}'.format(dev['name']))
if dev['maxInputChannels'] > 0 and pattern.match(dev['name']):
LOG.debug(' ^-- matched')
return device_index
return None
def get_http(uri):
return uri.replace("https://", "http://")
def remove_last_slash(url):
if url and url.endswith('/'):
url = url[:-1]
return url
def read_stripped_lines(filename):
with open(filename, 'r') as f:
return [line.strip() for line in f]
def read_dict(filename, div='='):
d = {}
with open(filename, 'r') as f:
for line in f:
(key, val) = line.split(div)
d[key.strip()] = val.strip()
return d
def connected():
""" Check connection by connecting to 8.8.8.8, if this is
blocked/fails, Microsoft NCSI is used as a backup
Returns:
True if internet connection can be detected
"""
return connected_dns() or connected_ncsi()
def connected_ncsi():
""" Check internet connection by retrieving the Microsoft NCSI endpoint.
Returns:
True if internet connection can be detected
"""
try:
r = requests.get('http://www.msftncsi.com/ncsi.txt')
if r.text == u'Microsoft NCSI':
return True
except Exception:
pass
return False
def connected_dns(host="8.8.8.8", port=53, timeout=3):
""" Check internet connection by connecting to DNS servers
Returns:
True if internet connection can be detected
"""
# Thanks to 7h3rAm on
# Host: 8.8.8.8 (google-public-dns-a.google.com)
# OpenPort: 53/tcp
# Service: domain (DNS/TCP)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(timeout)
s.connect((host, port))
return True
except IOError:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(timeout)
s.connect(("8.8.4.4", port))
return True
except IOError:
return False
def curate_cache(directory, min_free_percent=5.0, min_free_disk=50):
"""Clear out the directory if needed
This assumes all the files in the directory can be deleted as freely
Args:
directory (str): directory path that holds cached files
min_free_percent (float): percentage (0.0-100.0) of drive to keep free,
default is 5% if not specified.
min_free_disk (float): minimum allowed disk space in MB, default
value is 50 MB if not specified.
"""
# Simpleminded implementation -- keep a certain percentage of the
# disk available.
# TODO: Would be easy to add more options, like whitelisted files, etc.
space = psutil.disk_usage(directory)
# convert from MB to bytes
min_free_disk *= 1024 * 1024
# space.percent = space.used/space.total*100.0
percent_free = 100.0 - space.percent
if percent_free < min_free_percent and space.free < min_free_disk:
LOG.info('Low diskspace detected, cleaning cache')
# calculate how many bytes we need to delete
bytes_needed = (min_free_percent - percent_free) / 100.0 * space.total
bytes_needed = int(bytes_needed + 1.0)
# get all entries in the directory w/ stats
entries = (os.path.join(directory, fn) for fn in os.listdir(directory))
entries = ((os.stat(path), path) for path in entries)
# leave only regular files, insert modification date
entries = ((stat[ST_MTIME], stat[ST_SIZE], path)
for stat, path in entries if S_ISREG(stat[ST_MODE]))
# delete files with oldest modification date until space is freed
space_freed = 0
for moddate, fsize, path in sorted(entries):
try:
os.remove(path)
space_freed += fsize
except Exception:
pass
if space_freed > bytes_needed:
return # deleted enough!
def get_cache_directory(domain=None):
"""Get a directory for caching data
This directory can be used to hold temporary caches of data to
speed up performance. This directory will likely be part of a
small RAM disk and may be cleared at any time. So code that
uses these cached files must be able to fallback and regenerate
the file.
Args:
domain (str): The cache domain. Basically just a subdirectory.
Return:
str: a path to the directory where you can cache data
"""
config = mycroft.configuration.Configuration.get()
dir = config.get("cache_path")
if not dir:
# If not defined, use /tmp/mycroft/cache
dir = os.path.join(tempfile.gettempdir(), "mycroft", "cache")
return ensure_directory_exists(dir, domain)
def validate_param(value, name):
if not value:
raise ValueError("Missing or empty %s in mycroft.conf " % name)
def is_speaking():
"""Determine if Text to Speech is occurring
Returns:
bool: True while still speaking
"""
LOG.info("mycroft.utils.is_speaking() is depreciated, use "
"mycroft.audio.is_speaking() instead.")
return mycroft.audio.is_speaking()
def wait_while_speaking():
"""Pause as long as Text to Speech is still happening
Pause while Text to Speech is still happening. This always pauses
briefly to ensure that any preceeding request to speak has time to
begin.
"""
LOG.info("mycroft.utils.wait_while_speaking() is depreciated, use "
"mycroft.audio.wait_while_speaking() instead.")
return mycroft.audio.wait_while_speaking()
def stop_speaking():
# TODO: Less hacky approach to this once Audio Manager is implemented
# Skills should only be able to stop speech they've initiated
LOG.info("mycroft.utils.stop_speaking() is depreciated, use "
"mycroft.audio.stop_speaking() instead.")
mycroft.audio.stop_speaking()
def get_arch():
""" Get architecture string of system. """
return os.uname()[4]
def reset_sigint_handler():
"""
Reset the sigint handler to the default. This fixes KeyboardInterrupt
not getting raised when started via start-mycroft.sh
"""
sig.signal(sig.SIGINT, sig.default_int_handler)
def create_daemon(target, args=(), kwargs=None):
"""Helper to quickly create and start a thread with daemon = True"""
t = Thread(target=target, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
def wait_for_exit_signal():
"""Blocks until KeyboardInterrupt is received"""
try:
while True:
sleep(100)
except KeyboardInterrupt:
pass
def create_echo_function(name, whitelist=None):
from mycroft.configuration import Configuration
blacklist = Configuration.get().get("ignore_logs")
def echo(message):
"""Listen for messages and echo them for logging"""
try:
js_msg = json.loads(message)
if whitelist and js_msg.get("type") not in whitelist:
return
if blacklist and js_msg.get("type") in blacklist:
return
if js_msg.get("type") == "registration":
# do not log tokens from registration messages
js_msg["data"]["token"] = None
message = json.dumps(js_msg)
except Exception:
pass
LOG(name).debug(message)
return echo
def camel_case_split(identifier: str) -> str:
"""Split camel case string"""
regex = '.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)'
matches = re.finditer(regex, identifier)
return ' '.join([m.group(0) for m in matches])
|
|
# util/langhelpers.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to help with the creation, loading and introspection of
modules, classes, hierarchies, attributes, functions, and methods.
"""
import itertools
import inspect
import operator
import re
import sys
import types
import warnings
from functools import update_wrapper
from .. import exc
import hashlib
from . import compat
from . import _collections
def md5_hex(x):
if compat.py3k:
x = x.encode('utf-8')
m = hashlib.md5()
m.update(x)
return m.hexdigest()
class safe_reraise(object):
"""Reraise an exception after invoking some
handler code.
Stores the existing exception info before
invoking so that it is maintained across a potential
coroutine context switch.
e.g.::
try:
sess.commit()
except:
with safe_reraise():
sess.rollback()
"""
def __enter__(self):
self._exc_info = sys.exc_info()
def __exit__(self, type_, value, traceback):
# see #2703 for notes
if type_ is None:
exc_type, exc_value, exc_tb = self._exc_info
self._exc_info = None # remove potential circular references
compat.reraise(exc_type, exc_value, exc_tb)
else:
if not compat.py3k and self._exc_info and self._exc_info[1]:
# emulate Py3K's behavior of telling us when an exception
# occurs in an exception handler.
warn(
"An exception has occurred during handling of a "
"previous exception. The previous exception "
"is:\n %s %s\n" % (self._exc_info[0], self._exc_info[1]))
self._exc_info = None # remove potential circular references
compat.reraise(type_, value, traceback)
def decode_slice(slc):
"""decode a slice object as sent to __getitem__.
takes into account the 2.5 __index__() method, basically.
"""
ret = []
for x in slc.start, slc.stop, slc.step:
if hasattr(x, '__index__'):
x = x.__index__()
ret.append(x)
return tuple(ret)
def _unique_symbols(used, *bases):
used = set(used)
for base in bases:
pool = itertools.chain((base,),
compat.itertools_imap(lambda i: base + str(i),
range(1000)))
for sym in pool:
if sym not in used:
used.add(sym)
yield sym
break
else:
raise NameError("exhausted namespace for symbol base %s" % base)
def map_bits(fn, n):
"""Call the given function given each nonzero bit from n."""
while n:
b = n & (~n + 1)
yield fn(b)
n ^= b
def decorator(target):
"""A signature-matching decorator factory."""
def decorate(fn):
if not inspect.isfunction(fn):
raise Exception("not a decoratable function")
spec = compat.inspect_getfullargspec(fn)
names = tuple(spec[0]) + spec[1:3] + (fn.__name__,)
targ_name, fn_name = _unique_symbols(names, 'target', 'fn')
metadata = dict(target=targ_name, fn=fn_name)
metadata.update(format_argspec_plus(spec, grouped=False))
metadata['name'] = fn.__name__
code = """\
def %(name)s(%(args)s):
return %(target)s(%(fn)s, %(apply_kw)s)
""" % metadata
decorated = _exec_code_in_env(code,
{targ_name: target, fn_name: fn},
fn.__name__)
decorated.__defaults__ = getattr(fn, 'im_func', fn).__defaults__
decorated.__wrapped__ = fn
return update_wrapper(decorated, fn)
return update_wrapper(decorate, target)
def _exec_code_in_env(code, env, fn_name):
exec(code, env)
return env[fn_name]
def public_factory(target, location):
"""Produce a wrapping function for the given cls or classmethod.
Rationale here is so that the __init__ method of the
class can serve as documentation for the function.
"""
if isinstance(target, type):
fn = target.__init__
callable_ = target
doc = "Construct a new :class:`.%s` object. \n\n"\
"This constructor is mirrored as a public API function; "\
"see :func:`~%s` "\
"for a full usage and argument description." % (
target.__name__, location, )
else:
fn = callable_ = target
doc = "This function is mirrored; see :func:`~%s` "\
"for a description of arguments." % location
location_name = location.split(".")[-1]
spec = compat.inspect_getfullargspec(fn)
del spec[0][0]
metadata = format_argspec_plus(spec, grouped=False)
metadata['name'] = location_name
code = """\
def %(name)s(%(args)s):
return cls(%(apply_kw)s)
""" % metadata
env = {'cls': callable_, 'symbol': symbol}
exec(code, env)
decorated = env[location_name]
decorated.__doc__ = fn.__doc__
decorated.__module__ = "sqlalchemy" + location.rsplit(".", 1)[0]
if compat.py2k or hasattr(fn, '__func__'):
fn.__func__.__doc__ = doc
else:
fn.__doc__ = doc
return decorated
class PluginLoader(object):
def __init__(self, group, auto_fn=None):
self.group = group
self.impls = {}
self.auto_fn = auto_fn
def load(self, name):
if name in self.impls:
return self.impls[name]()
if self.auto_fn:
loader = self.auto_fn(name)
if loader:
self.impls[name] = loader
return loader()
try:
import pkg_resources
except ImportError:
pass
else:
for impl in pkg_resources.iter_entry_points(
self.group, name):
self.impls[name] = impl.load
return impl.load()
raise exc.NoSuchModuleError(
"Can't load plugin: %s:%s" %
(self.group, name))
def register(self, name, modulepath, objname):
def load():
mod = compat.import_(modulepath)
for token in modulepath.split(".")[1:]:
mod = getattr(mod, token)
return getattr(mod, objname)
self.impls[name] = load
def get_cls_kwargs(cls, _set=None):
"""Return the full set of inherited kwargs for the given `cls`.
Probes a class's __init__ method, collecting all named arguments. If the
__init__ defines a \**kwargs catch-all, then the constructor is presumed
to pass along unrecognized keywords to its base classes, and the
collection process is repeated recursively on each of the bases.
Uses a subset of inspect.getargspec() to cut down on method overhead.
No anonymous tuple arguments please !
"""
toplevel = _set is None
if toplevel:
_set = set()
ctr = cls.__dict__.get('__init__', False)
has_init = ctr and isinstance(ctr, types.FunctionType) and \
isinstance(ctr.__code__, types.CodeType)
if has_init:
names, has_kw = inspect_func_args(ctr)
_set.update(names)
if not has_kw and not toplevel:
return None
if not has_init or has_kw:
for c in cls.__bases__:
if get_cls_kwargs(c, _set) is None:
break
_set.discard('self')
return _set
try:
# TODO: who doesn't have this constant?
from inspect import CO_VARKEYWORDS
def inspect_func_args(fn):
co = fn.__code__
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
has_kw = bool(co.co_flags & CO_VARKEYWORDS)
return args, has_kw
except ImportError:
def inspect_func_args(fn):
names, _, has_kw, _ = inspect.getargspec(fn)
return names, bool(has_kw)
def get_func_kwargs(func):
"""Return the set of legal kwargs for the given `func`.
Uses getargspec so is safe to call for methods, functions,
etc.
"""
return compat.inspect_getargspec(func)[0]
def get_callable_argspec(fn, no_self=False, _is_init=False):
"""Return the argument signature for any callable.
All pure-Python callables are accepted, including
functions, methods, classes, objects with __call__;
builtins and other edge cases like functools.partial() objects
raise a TypeError.
"""
if inspect.isbuiltin(fn):
raise TypeError("Can't inspect builtin: %s" % fn)
elif inspect.isfunction(fn):
if _is_init and no_self:
spec = compat.inspect_getargspec(fn)
return compat.ArgSpec(spec.args[1:], spec.varargs,
spec.keywords, spec.defaults)
else:
return compat.inspect_getargspec(fn)
elif inspect.ismethod(fn):
if no_self and (_is_init or fn.__self__):
spec = compat.inspect_getargspec(fn.__func__)
return compat.ArgSpec(spec.args[1:], spec.varargs,
spec.keywords, spec.defaults)
else:
return compat.inspect_getargspec(fn.__func__)
elif inspect.isclass(fn):
return get_callable_argspec(
fn.__init__, no_self=no_self, _is_init=True)
elif hasattr(fn, '__func__'):
return compat.inspect_getargspec(fn.__func__)
elif hasattr(fn, '__call__'):
if inspect.ismethod(fn.__call__):
return get_callable_argspec(fn.__call__, no_self=no_self)
else:
raise TypeError("Can't inspect callable: %s" % fn)
else:
raise TypeError("Can't inspect callable: %s" % fn)
def format_argspec_plus(fn, grouped=True):
"""Returns a dictionary of formatted, introspected function arguments.
A enhanced variant of inspect.formatargspec to support code generation.
fn
An inspectable callable or tuple of inspect getargspec() results.
grouped
Defaults to True; include (parens, around, argument) lists
Returns:
args
Full inspect.formatargspec for fn
self_arg
The name of the first positional argument, varargs[0], or None
if the function defines no positional arguments.
apply_pos
args, re-written in calling rather than receiving syntax. Arguments are
passed positionally.
apply_kw
Like apply_pos, except keyword-ish args are passed as keywords.
Example::
>>> format_argspec_plus(lambda self, a, b, c=3, **d: 123)
{'args': '(self, a, b, c=3, **d)',
'self_arg': 'self',
'apply_kw': '(self, a, b, c=c, **d)',
'apply_pos': '(self, a, b, c, **d)'}
"""
if compat.callable(fn):
spec = compat.inspect_getfullargspec(fn)
else:
# we accept an existing argspec...
spec = fn
args = inspect.formatargspec(*spec)
if spec[0]:
self_arg = spec[0][0]
elif spec[1]:
self_arg = '%s[0]' % spec[1]
else:
self_arg = None
if compat.py3k:
apply_pos = inspect.formatargspec(spec[0], spec[1],
spec[2], None, spec[4])
num_defaults = 0
if spec[3]:
num_defaults += len(spec[3])
if spec[4]:
num_defaults += len(spec[4])
name_args = spec[0] + spec[4]
else:
apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2])
num_defaults = 0
if spec[3]:
num_defaults += len(spec[3])
name_args = spec[0]
if num_defaults:
defaulted_vals = name_args[0 - num_defaults:]
else:
defaulted_vals = ()
apply_kw = inspect.formatargspec(name_args, spec[1], spec[2],
defaulted_vals,
formatvalue=lambda x: '=' + x)
if grouped:
return dict(args=args, self_arg=self_arg,
apply_pos=apply_pos, apply_kw=apply_kw)
else:
return dict(args=args[1:-1], self_arg=self_arg,
apply_pos=apply_pos[1:-1], apply_kw=apply_kw[1:-1])
def format_argspec_init(method, grouped=True):
"""format_argspec_plus with considerations for typical __init__ methods
Wraps format_argspec_plus with error handling strategies for typical
__init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
if method is object.__init__:
args = grouped and '(self)' or 'self'
else:
try:
return format_argspec_plus(method, grouped=grouped)
except TypeError:
args = (grouped and '(self, *args, **kwargs)'
or 'self, *args, **kwargs')
return dict(self_arg='self', args=args, apply_pos=args, apply_kw=args)
def getargspec_init(method):
"""inspect.getargspec with considerations for typical __init__ methods
Wraps inspect.getargspec with error handling for typical __init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
try:
return compat.inspect_getargspec(method)
except TypeError:
if method is object.__init__:
return (['self'], None, None, None)
else:
return (['self'], 'args', 'kwargs', None)
def unbound_method_to_callable(func_or_cls):
"""Adjust the incoming callable such that a 'self' argument is not
required.
"""
if isinstance(func_or_cls, types.MethodType) and not func_or_cls.__self__:
return func_or_cls.__func__
else:
return func_or_cls
def generic_repr(obj, additional_kw=(), to_inspect=None, omit_kwarg=()):
"""Produce a __repr__() based on direct association of the __init__()
specification vs. same-named attributes present.
"""
if to_inspect is None:
to_inspect = [obj]
else:
to_inspect = _collections.to_list(to_inspect)
missing = object()
pos_args = []
kw_args = _collections.OrderedDict()
vargs = None
for i, insp in enumerate(to_inspect):
try:
(_args, _vargs, vkw, defaults) = \
compat.inspect_getargspec(insp.__init__)
except TypeError:
continue
else:
default_len = defaults and len(defaults) or 0
if i == 0:
if _vargs:
vargs = _vargs
if default_len:
pos_args.extend(_args[1:-default_len])
else:
pos_args.extend(_args[1:])
else:
kw_args.update([
(arg, missing) for arg in _args[1:-default_len]
])
if default_len:
kw_args.update([
(arg, default)
for arg, default
in zip(_args[-default_len:], defaults)
])
output = []
output.extend(repr(getattr(obj, arg, None)) for arg in pos_args)
if vargs is not None and hasattr(obj, vargs):
output.extend([repr(val) for val in getattr(obj, vargs)])
for arg, defval in kw_args.items():
if arg in omit_kwarg:
continue
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append('%s=%r' % (arg, val))
except Exception:
pass
if additional_kw:
for arg, defval in additional_kw:
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append('%s=%r' % (arg, val))
except Exception:
pass
return "%s(%s)" % (obj.__class__.__name__, ", ".join(output))
class portable_instancemethod(object):
"""Turn an instancemethod into a (parent, name) pair
to produce a serializable callable.
"""
__slots__ = 'target', 'name', '__weakref__'
def __getstate__(self):
return {'target': self.target, 'name': self.name}
def __setstate__(self, state):
self.target = state['target']
self.name = state['name']
def __init__(self, meth):
self.target = meth.__self__
self.name = meth.__name__
def __call__(self, *arg, **kw):
return getattr(self.target, self.name)(*arg, **kw)
def class_hierarchy(cls):
"""Return an unordered sequence of all classes related to cls.
Traverses diamond hierarchies.
Fibs slightly: subclasses of builtin types are not returned. Thus
class_hierarchy(class A(object)) returns (A, object), not A plus every
class systemwide that derives from object.
Old-style classes are discarded and hierarchies rooted on them
will not be descended.
"""
if compat.py2k:
if isinstance(cls, types.ClassType):
return list()
hier = set([cls])
process = list(cls.__mro__)
while process:
c = process.pop()
if compat.py2k:
if isinstance(c, types.ClassType):
continue
bases = (_ for _ in c.__bases__
if _ not in hier and not isinstance(_, types.ClassType))
else:
bases = (_ for _ in c.__bases__ if _ not in hier)
for b in bases:
process.append(b)
hier.add(b)
if compat.py3k:
if c.__module__ == 'builtins' or not hasattr(c, '__subclasses__'):
continue
else:
if c.__module__ == '__builtin__' or not hasattr(
c, '__subclasses__'):
continue
for s in [_ for _ in c.__subclasses__() if _ not in hier]:
process.append(s)
hier.add(s)
return list(hier)
def iterate_attributes(cls):
"""iterate all the keys and attributes associated
with a class, without using getattr().
Does not use getattr() so that class-sensitive
descriptors (i.e. property.__get__()) are not called.
"""
keys = dir(cls)
for key in keys:
for c in cls.__mro__:
if key in c.__dict__:
yield (key, c.__dict__[key])
break
def monkeypatch_proxied_specials(into_cls, from_cls, skip=None, only=None,
name='self.proxy', from_instance=None):
"""Automates delegation of __specials__ for a proxying type."""
if only:
dunders = only
else:
if skip is None:
skip = ('__slots__', '__del__', '__getattribute__',
'__metaclass__', '__getstate__', '__setstate__')
dunders = [m for m in dir(from_cls)
if (m.startswith('__') and m.endswith('__') and
not hasattr(into_cls, m) and m not in skip)]
for method in dunders:
try:
fn = getattr(from_cls, method)
if not hasattr(fn, '__call__'):
continue
fn = getattr(fn, 'im_func', fn)
except AttributeError:
continue
try:
spec = compat.inspect_getargspec(fn)
fn_args = inspect.formatargspec(spec[0])
d_args = inspect.formatargspec(spec[0][1:])
except TypeError:
fn_args = '(self, *args, **kw)'
d_args = '(*args, **kw)'
py = ("def %(method)s%(fn_args)s: "
"return %(name)s.%(method)s%(d_args)s" % locals())
env = from_instance is not None and {name: from_instance} or {}
compat.exec_(py, env)
try:
env[method].__defaults__ = fn.__defaults__
except AttributeError:
pass
setattr(into_cls, method, env[method])
def methods_equivalent(meth1, meth2):
"""Return True if the two methods are the same implementation."""
return getattr(meth1, '__func__', meth1) is getattr(
meth2, '__func__', meth2)
def as_interface(obj, cls=None, methods=None, required=None):
"""Ensure basic interface compliance for an instance or dict of callables.
Checks that ``obj`` implements public methods of ``cls`` or has members
listed in ``methods``. If ``required`` is not supplied, implementing at
least one interface method is sufficient. Methods present on ``obj`` that
are not in the interface are ignored.
If ``obj`` is a dict and ``dict`` does not meet the interface
requirements, the keys of the dictionary are inspected. Keys present in
``obj`` that are not in the interface will raise TypeErrors.
Raises TypeError if ``obj`` does not meet the interface criteria.
In all passing cases, an object with callable members is returned. In the
simple case, ``obj`` is returned as-is; if dict processing kicks in then
an anonymous class is returned.
obj
A type, instance, or dictionary of callables.
cls
Optional, a type. All public methods of cls are considered the
interface. An ``obj`` instance of cls will always pass, ignoring
``required``..
methods
Optional, a sequence of method names to consider as the interface.
required
Optional, a sequence of mandatory implementations. If omitted, an
``obj`` that provides at least one interface method is considered
sufficient. As a convenience, required may be a type, in which case
all public methods of the type are required.
"""
if not cls and not methods:
raise TypeError('a class or collection of method names are required')
if isinstance(cls, type) and isinstance(obj, cls):
return obj
interface = set(methods or [m for m in dir(cls) if not m.startswith('_')])
implemented = set(dir(obj))
complies = operator.ge
if isinstance(required, type):
required = interface
elif not required:
required = set()
complies = operator.gt
else:
required = set(required)
if complies(implemented.intersection(interface), required):
return obj
# No dict duck typing here.
if not isinstance(obj, dict):
qualifier = complies is operator.gt and 'any of' or 'all of'
raise TypeError("%r does not implement %s: %s" % (
obj, qualifier, ', '.join(interface)))
class AnonymousInterface(object):
"""A callable-holding shell."""
if cls:
AnonymousInterface.__name__ = 'Anonymous' + cls.__name__
found = set()
for method, impl in dictlike_iteritems(obj):
if method not in interface:
raise TypeError("%r: unknown in this interface" % method)
if not compat.callable(impl):
raise TypeError("%r=%r is not callable" % (method, impl))
setattr(AnonymousInterface, method, staticmethod(impl))
found.add(method)
if complies(found, required):
return AnonymousInterface
raise TypeError("dictionary does not contain required keys %s" %
', '.join(required - found))
class memoized_property(object):
"""A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
def _reset(self, obj):
memoized_property.reset(obj, self.__name__)
@classmethod
def reset(cls, obj, name):
obj.__dict__.pop(name, None)
def memoized_instancemethod(fn):
"""Decorate a method memoize its return value.
Best applied to no-arg methods: memoization is not sensitive to
argument values, and will always return the same value even when
called with different arguments.
"""
def oneshot(self, *args, **kw):
result = fn(self, *args, **kw)
memo = lambda *a, **kw: result
memo.__name__ = fn.__name__
memo.__doc__ = fn.__doc__
self.__dict__[fn.__name__] = memo
return result
return update_wrapper(oneshot, fn)
class group_expirable_memoized_property(object):
"""A family of @memoized_properties that can be expired in tandem."""
def __init__(self, attributes=()):
self.attributes = []
if attributes:
self.attributes.extend(attributes)
def expire_instance(self, instance):
"""Expire all memoized properties for *instance*."""
stash = instance.__dict__
for attribute in self.attributes:
stash.pop(attribute, None)
def __call__(self, fn):
self.attributes.append(fn.__name__)
return memoized_property(fn)
def method(self, fn):
self.attributes.append(fn.__name__)
return memoized_instancemethod(fn)
class MemoizedSlots(object):
"""Apply memoized items to an object using a __getattr__ scheme.
This allows the functionality of memoized_property and
memoized_instancemethod to be available to a class using __slots__.
"""
__slots__ = ()
def _fallback_getattr(self, key):
raise AttributeError(key)
def __getattr__(self, key):
if key.startswith('_memoized'):
raise AttributeError(key)
elif hasattr(self, '_memoized_attr_%s' % key):
value = getattr(self, '_memoized_attr_%s' % key)()
setattr(self, key, value)
return value
elif hasattr(self, '_memoized_method_%s' % key):
fn = getattr(self, '_memoized_method_%s' % key)
def oneshot(*args, **kw):
result = fn(*args, **kw)
memo = lambda *a, **kw: result
memo.__name__ = fn.__name__
memo.__doc__ = fn.__doc__
setattr(self, key, memo)
return result
oneshot.__doc__ = fn.__doc__
return oneshot
else:
return self._fallback_getattr(key)
def dependency_for(modulename):
def decorate(obj):
# TODO: would be nice to improve on this import silliness,
# unfortunately importlib doesn't work that great either
tokens = modulename.split(".")
mod = compat.import_(
".".join(tokens[0:-1]), globals(), locals(), tokens[-1])
mod = getattr(mod, tokens[-1])
setattr(mod, obj.__name__, obj)
return obj
return decorate
class dependencies(object):
"""Apply imported dependencies as arguments to a function.
E.g.::
@util.dependencies(
"sqlalchemy.sql.widget",
"sqlalchemy.engine.default"
);
def some_func(self, widget, default, arg1, arg2, **kw):
# ...
Rationale is so that the impact of a dependency cycle can be
associated directly with the few functions that cause the cycle,
and not pollute the module-level namespace.
"""
def __init__(self, *deps):
self.import_deps = []
for dep in deps:
tokens = dep.split(".")
self.import_deps.append(
dependencies._importlater(
".".join(tokens[0:-1]),
tokens[-1]
)
)
def __call__(self, fn):
import_deps = self.import_deps
spec = compat.inspect_getfullargspec(fn)
spec_zero = list(spec[0])
hasself = spec_zero[0] in ('self', 'cls')
for i in range(len(import_deps)):
spec[0][i + (1 if hasself else 0)] = "import_deps[%r]" % i
inner_spec = format_argspec_plus(spec, grouped=False)
for impname in import_deps:
del spec_zero[1 if hasself else 0]
spec[0][:] = spec_zero
outer_spec = format_argspec_plus(spec, grouped=False)
code = 'lambda %(args)s: fn(%(apply_kw)s)' % {
"args": outer_spec['args'],
"apply_kw": inner_spec['apply_kw']
}
decorated = eval(code, locals())
decorated.__defaults__ = getattr(fn, 'im_func', fn).__defaults__
return update_wrapper(decorated, fn)
@classmethod
def resolve_all(cls, path):
for m in list(dependencies._unresolved):
if m._full_path.startswith(path):
m._resolve()
_unresolved = set()
_by_key = {}
class _importlater(object):
_unresolved = set()
_by_key = {}
def __new__(cls, path, addtl):
key = path + "." + addtl
if key in dependencies._by_key:
return dependencies._by_key[key]
else:
dependencies._by_key[key] = imp = object.__new__(cls)
return imp
def __init__(self, path, addtl):
self._il_path = path
self._il_addtl = addtl
dependencies._unresolved.add(self)
@property
def _full_path(self):
return self._il_path + "." + self._il_addtl
@memoized_property
def module(self):
if self in dependencies._unresolved:
raise ImportError(
"importlater.resolve_all() hasn't "
"been called (this is %s %s)"
% (self._il_path, self._il_addtl))
return getattr(self._initial_import, self._il_addtl)
def _resolve(self):
dependencies._unresolved.discard(self)
self._initial_import = compat.import_(
self._il_path, globals(), locals(),
[self._il_addtl])
def __getattr__(self, key):
if key == 'module':
raise ImportError("Could not resolve module %s"
% self._full_path)
try:
attr = getattr(self.module, key)
except AttributeError:
raise AttributeError(
"Module %s has no attribute '%s'" %
(self._full_path, key)
)
self.__dict__[key] = attr
return attr
# from paste.deploy.converters
def asbool(obj):
if isinstance(obj, compat.string_types):
obj = obj.strip().lower()
if obj in ['true', 'yes', 'on', 'y', 't', '1']:
return True
elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
return False
else:
raise ValueError("String is not true/false: %r" % obj)
return bool(obj)
def bool_or_str(*text):
"""Return a callable that will evaluate a string as
boolean, or one of a set of "alternate" string values.
"""
def bool_or_value(obj):
if obj in text:
return obj
else:
return asbool(obj)
return bool_or_value
def asint(value):
"""Coerce to integer."""
if value is None:
return value
return int(value)
def coerce_kw_type(kw, key, type_, flexi_bool=True):
"""If 'key' is present in dict 'kw', coerce its value to type 'type\_' if
necessary. If 'flexi_bool' is True, the string '0' is considered false
when coercing to boolean.
"""
if key in kw and not isinstance(kw[key], type_) and kw[key] is not None:
if type_ is bool and flexi_bool:
kw[key] = asbool(kw[key])
else:
kw[key] = type_(kw[key])
def constructor_copy(obj, cls, *args, **kw):
"""Instantiate cls using the __dict__ of obj as constructor arguments.
Uses inspect to match the named arguments of ``cls``.
"""
names = get_cls_kwargs(cls)
kw.update(
(k, obj.__dict__[k]) for k in names.difference(kw)
if k in obj.__dict__)
return cls(*args, **kw)
def counter():
"""Return a threadsafe counter function."""
lock = compat.threading.Lock()
counter = itertools.count(1)
# avoid the 2to3 "next" transformation...
def _next():
lock.acquire()
try:
return next(counter)
finally:
lock.release()
return _next
def duck_type_collection(specimen, default=None):
"""Given an instance or class, guess if it is or is acting as one of
the basic collection types: list, set and dict. If the __emulates__
property is present, return that preferentially.
"""
if hasattr(specimen, '__emulates__'):
# canonicalize set vs sets.Set to a standard: the builtin set
if (specimen.__emulates__ is not None and
issubclass(specimen.__emulates__, set)):
return set
else:
return specimen.__emulates__
isa = isinstance(specimen, type) and issubclass or isinstance
if isa(specimen, list):
return list
elif isa(specimen, set):
return set
elif isa(specimen, dict):
return dict
if hasattr(specimen, 'append'):
return list
elif hasattr(specimen, 'add'):
return set
elif hasattr(specimen, 'set'):
return dict
else:
return default
def assert_arg_type(arg, argtype, name):
if isinstance(arg, argtype):
return arg
else:
if isinstance(argtype, tuple):
raise exc.ArgumentError(
"Argument '%s' is expected to be one of type %s, got '%s'" %
(name, ' or '.join("'%s'" % a for a in argtype), type(arg)))
else:
raise exc.ArgumentError(
"Argument '%s' is expected to be of type '%s', got '%s'" %
(name, argtype, type(arg)))
def dictlike_iteritems(dictlike):
"""Return a (key, value) iterator for almost any dict-like object."""
if compat.py3k:
if hasattr(dictlike, 'items'):
return list(dictlike.items())
else:
if hasattr(dictlike, 'iteritems'):
return dictlike.iteritems()
elif hasattr(dictlike, 'items'):
return iter(dictlike.items())
getter = getattr(dictlike, '__getitem__', getattr(dictlike, 'get', None))
if getter is None:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
if hasattr(dictlike, 'iterkeys'):
def iterator():
for key in dictlike.iterkeys():
yield key, getter(key)
return iterator()
elif hasattr(dictlike, 'keys'):
return iter((key, getter(key)) for key in dictlike.keys())
else:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
class classproperty(property):
"""A decorator that behaves like @property except that operates
on classes rather than instances.
The decorator is currently special when using the declarative
module, but note that the
:class:`~.sqlalchemy.ext.declarative.declared_attr`
decorator should be used for this purpose with declarative.
"""
def __init__(self, fget, *arg, **kw):
super(classproperty, self).__init__(fget, *arg, **kw)
self.__doc__ = fget.__doc__
def __get__(desc, self, cls):
return desc.fget(cls)
class hybridproperty(object):
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
clsval = self.func(owner)
clsval.__doc__ = self.func.__doc__
return clsval
else:
return self.func(instance)
class hybridmethod(object):
"""Decorate a function as cls- or instance- level."""
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self.func.__get__(owner, owner.__class__)
else:
return self.func.__get__(instance, owner)
class _symbol(int):
def __new__(self, name, doc=None, canonical=None):
"""Construct a new named symbol."""
assert isinstance(name, compat.string_types)
if canonical is None:
canonical = hash(name)
v = int.__new__(_symbol, canonical)
v.name = name
if doc:
v.__doc__ = doc
return v
def __reduce__(self):
return symbol, (self.name, "x", int(self))
def __str__(self):
return repr(self)
def __repr__(self):
return "symbol(%r)" % self.name
_symbol.__name__ = 'symbol'
class symbol(object):
"""A constant symbol.
>>> symbol('foo') is symbol('foo')
True
>>> symbol('foo')
<symbol 'foo>
A slight refinement of the MAGICCOOKIE=object() pattern. The primary
advantage of symbol() is its repr(). They are also singletons.
Repeated calls of symbol('name') will all return the same instance.
The optional ``doc`` argument assigns to ``__doc__``. This
is strictly so that Sphinx autoattr picks up the docstring we want
(it doesn't appear to pick up the in-module docstring if the datamember
is in a different module - autoattribute also blows up completely).
If Sphinx fixes/improves this then we would no longer need
``doc`` here.
"""
symbols = {}
_lock = compat.threading.Lock()
def __new__(cls, name, doc=None, canonical=None):
cls._lock.acquire()
try:
sym = cls.symbols.get(name)
if sym is None:
cls.symbols[name] = sym = _symbol(name, doc, canonical)
return sym
finally:
symbol._lock.release()
_creation_order = 1
def set_creation_order(instance):
"""Assign a '_creation_order' sequence to the given instance.
This allows multiple instances to be sorted in order of creation
(typically within a single thread; the counter is not particularly
threadsafe).
"""
global _creation_order
instance._creation_order = _creation_order
_creation_order += 1
def warn_exception(func, *args, **kwargs):
"""executes the given function, catches all exceptions and converts to
a warning.
"""
try:
return func(*args, **kwargs)
except Exception:
warn("%s('%s') ignored" % sys.exc_info()[0:2])
def ellipses_string(value, len_=25):
try:
if len(value) > len_:
return "%s..." % value[0:len_]
else:
return value
except TypeError:
return value
class _hash_limit_string(compat.text_type):
"""A string subclass that can only be hashed on a maximum amount
of unique values.
This is used for warnings so that we can send out parameterized warnings
without the __warningregistry__ of the module, or the non-overridable
"once" registry within warnings.py, overloading memory,
"""
def __new__(cls, value, num, args):
interpolated = (value % args) + \
(" (this warning may be suppressed after %d occurrences)" % num)
self = super(_hash_limit_string, cls).__new__(cls, interpolated)
self._hash = hash("%s_%d" % (value, hash(interpolated) % num))
return self
def __hash__(self):
return self._hash
def __eq__(self, other):
return hash(self) == hash(other)
def warn(msg):
"""Issue a warning.
If msg is a string, :class:`.exc.SAWarning` is used as
the category.
"""
warnings.warn(msg, exc.SAWarning, stacklevel=2)
def warn_limited(msg, args):
"""Issue a warning with a paramterized string, limiting the number
of registrations.
"""
if args:
msg = _hash_limit_string(msg, 10, args)
warnings.warn(msg, exc.SAWarning, stacklevel=2)
def only_once(fn):
"""Decorate the given function to be a no-op after it is called exactly
once."""
once = [fn]
def go(*arg, **kw):
if once:
once_fn = once.pop()
return once_fn(*arg, **kw)
return go
_SQLA_RE = re.compile(r'sqlalchemy/([a-z_]+/){0,2}[a-z_]+\.py')
_UNITTEST_RE = re.compile(r'unit(?:2|test2?/)')
def chop_traceback(tb, exclude_prefix=_UNITTEST_RE, exclude_suffix=_SQLA_RE):
"""Chop extraneous lines off beginning and end of a traceback.
:param tb:
a list of traceback lines as returned by ``traceback.format_stack()``
:param exclude_prefix:
a regular expression object matching lines to skip at beginning of
``tb``
:param exclude_suffix:
a regular expression object matching lines to skip at end of ``tb``
"""
start = 0
end = len(tb) - 1
while start <= end and exclude_prefix.search(tb[start]):
start += 1
while start <= end and exclude_suffix.search(tb[end]):
end -= 1
return tb[start:end + 1]
NoneType = type(None)
def attrsetter(attrname):
code = \
"def set(obj, value):"\
" obj.%s = value" % attrname
env = locals().copy()
exec(code, env)
return env['set']
class EnsureKWArgType(type):
"""Apply translation of functions to accept **kw arguments if they
don't already.
"""
def __init__(cls, clsname, bases, clsdict):
fn_reg = cls.ensure_kwarg
if fn_reg:
for key in clsdict:
m = re.match(fn_reg, key)
if m:
fn = clsdict[key]
spec = compat.inspect_getargspec(fn)
if not spec.keywords:
clsdict[key] = wrapped = cls._wrap_w_kw(fn)
setattr(cls, key, wrapped)
super(EnsureKWArgType, cls).__init__(clsname, bases, clsdict)
def _wrap_w_kw(self, fn):
def wrap(*arg, **kw):
return fn(*arg)
return update_wrapper(wrap, fn)
def wrap_callable(wrapper, fn):
"""Augment functools.update_wrapper() to work with objects with
a ``__call__()`` method.
:param fn:
object with __call__ method
"""
if hasattr(fn, '__name__'):
return update_wrapper(wrapper, fn)
else:
_f = wrapper
_f.__name__ = fn.__class__.__name__
_f.__module__ = fn.__module__
if hasattr(fn.__call__, '__doc__') and fn.__call__.__doc__:
_f.__doc__ = fn.__call__.__doc__
elif fn.__doc__:
_f.__doc__ = fn.__doc__
return _f
|
|
# -*-coding:utf-8 -*
# Copyright (c) 2011-2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Enum parameter type testcases.
List of tested functions :
--------------------------
- [setParameter] function
- [getParameter] function
Initial Settings :
------------------
Enum size = 8bits; 5 components :
- max range [-127,128]
Test cases :
------------
- Enum parameter nominal value = ENUM_NOMINAL : 5
- Enum parameter min value = ENUM_MIN : -127
- Enum parameter max value = ENUM_MAX : 128
- Enum parameter out of bound value = ENUM_OOB : 255
- Enum parameter out of size value = ENUM_OOS : 256
- Enum parameter undefined value = UNDEF
"""
import os
import commands
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
# Test of type UINT16 - range [0, 1000]
class TestCases(PfwTestCase):
def setUp(self):
self.param_name = "/Test/Test/TEST_TYPES/ENUM"
self.filesystem_name=os.environ["PFW_RESULT"] + "/ENUM"
self.pfw.sendCmd("setTuningMode", "on")
def tearDown(self):
self.pfw.sendCmd("setTuningMode", "off")
def test_Nominal_Case(self):
"""
Testing Enum parameter in nominal case
--------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- ENUM parameter in nominal case = ENUM_NOMINAL
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- ENUM parameter set to ENUM_NOMINAL
- FILESYSTEM set to 0x5
"""
log.D(self.test_Nominal_Case.__doc__)
value = "ENUM_NOMINAL"
filesystem_value="0x5"
log.I("Set parameter %s to %s"%(self.param_name,value))
out,err = self.pfw.sendCmd("setParameter",self.param_name, value)
assert err == None, log.E("setParameter %s %s : %s" % (self.param_name, value, err))
assert out == "Done", log.F("setParameter %s %s - expected : Done : %s" % (self.param_name, value,out))
log.I("Check Enum parameter state")
out, err = self.pfw.sendCmd("getParameter",self.param_name)
assert err == None, log.E("getParameter %s : %s" % (self.param_name, err))
assert out == value, log.F("getParameter %s - expected : %s , found : %s" % (self.param_name,value,out))
log.I("Check filesystem value")
assert open(self.filesystem_name).read()[:-1] == filesystem_value, log.F("FILESYSTEM : parameter update error for %s"%(self.param_name))
def test_TypeMin(self):
"""
Testing minimal value for Enum parameter
----------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- ENUM parameter in min case = ENUM_MIN
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- ENUM parameter set to ENUM_MIN
- FILESYSTEM set to 0x80
"""
log.D(self.test_TypeMin.__doc__)
value = "ENUM_MIN"
filesystem_value="0x80"
log.I("Set parameter %s to %s"%(self.param_name,value))
out,err = self.pfw.sendCmd("setParameter",self.param_name, value)
assert err == None, log.E("setParameter %s %s : %s" % (self.param_name, value, err))
assert out == "Done", log.F("setParameter %s %s - expected : Done : %s" % (self.param_name, value,out))
log.I("Check Enum parameter state")
out, err = self.pfw.sendCmd("getParameter",self.param_name)
assert err == None, log.E("getParameter %s : %s" % (self.param_name, err))
assert out == value, log.F("getParameter %s - expected : %s , found : %s" % (self.param_name,value,out))
log.I("Check filesystem value")
assert open(self.filesystem_name).read()[:-1] == filesystem_value, log.F("FILESYSTEM : parameter update error for %s"%(self.param_name))
def test_TypeMax(self):
"""
Testing maximal value for Enum parameter
----------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- ENUM parameter in max case = ENUM_MAX
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- ENUM parameter set to ENUM_MAX
- FILESYSTEM set to 0x7F
"""
log.D(self.test_TypeMax.__doc__)
value = "ENUM_MAX"
filesystem_value="0x7f"
log.I("Set parameter %s to %s"%(self.param_name,value))
out,err = self.pfw.sendCmd("setParameter",self.param_name, value)
assert err == None, log.E("setParameter %s %s : %s" % (self.param_name, value, err))
assert out == "Done", log.F("setParameter %s %s - expected : Done : %s" % (self.param_name, value,out))
log.I("Check Enum parameter state")
out, err = self.pfw.sendCmd("getParameter",self.param_name)
assert err == None, log.E("getParameter %s : %s" % (self.param_name, err))
assert out == value, log.F("getParameter %s - expected : %s , found : %s" % (self.param_name,value,out))
log.I("Check filesystem value")
assert open(self.filesystem_name).read()[:-1] == filesystem_value, log.F("FILESYSTEM : parameter update error for %s"%(self.param_name))
def test_TypeUndefined(self):
"""
Testing ENUM parameter in undefined reference case
--------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- ENUM parameter = UNDEF
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected, parameter must not change
- FILESYSTEM must not change
"""
log.D(self.test_TypeUndefined.__doc__)
value = "UNDEF"
log.I("Check parameter %s initial value"%(self.param_name))
init_parameter_value, err=self.pfw.sendCmd("getParameter",self.param_name)
init_filesystem_value=open(self.filesystem_name).read()[:-1]
log.I("Set parameter %s to %s"%(self.param_name,value))
out,err = self.pfw.sendCmd("setParameter",self.param_name, value, expectSuccess=False)
assert err == None, log.E("setParameter %s %s : %s" % (self.param_name, value, err))
assert out != "Done", log.F("Error not detected when setParameter %s %s" % (self.param_name, value))
log.I("Check Enum parameter state")
out, err = self.pfw.sendCmd("getParameter",self.param_name)
assert err == None, log.E("getParameter %s : %s" % (self.param_name, err))
assert out == init_parameter_value, log.F("getParameter %s - expected : %s , found : %s" % (self.param_name,init_parameter_value,out))
log.I("Check filesystem value")
assert open(self.filesystem_name).read()[:-1] == init_filesystem_value, log.F("FILESYSTEM : parameter update error for %s"%(self.param_name))
def test_TypeOutOfBound(self):
"""
Testing ENUM parameter in out of range case
-------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- ENUM parameter in max case = ENUM_OOB : 255
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected, parameter must not change
- FILESYSTEM must not change
"""
log.D(self.test_TypeOutOfBound.__doc__)
value = "ENUM_OOB"
log.I("Check parameter %s initial value"%(self.param_name))
init_parameter_value, err=self.pfw.sendCmd("getParameter",self.param_name)
init_filesystem_value=open(self.filesystem_name).read()[:-1]
log.I("Set parameter %s to %s"%(self.param_name,value))
out,err = self.pfw.sendCmd("setParameter",self.param_name, value, expectSuccess=False)
assert err == None, log.E("setParameter %s %s : %s" % (self.param_name, value, err))
assert out != "Done", log.F("Error not detected when setParameter %s %s" % (self.param_name, value))
log.I("Check Enum parameter state")
out, err = self.pfw.sendCmd("getParameter",self.param_name)
assert err == None, log.E("getParameter %s : %s" % (self.param_name, err))
assert out == init_parameter_value, log.F("getParameter %s - expected : %s , found : %s" % (self.param_name,init_parameter_value,out))
log.I("Check filesystem value")
assert open(self.filesystem_name).read()[:-1] == init_filesystem_value, log.F("FILESYSTEM : parameter update error for %s"%(self.param_name))
def test_TypeOutOfSize(self):
"""
Testing ENUM parameter in out of size case
------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- ENUM parameter in max case = ENUM_OOS : 256
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected, parameter must not change
- FILESYSTEM must not change
"""
log.D(self.test_TypeOutOfBound.__doc__)
value = "ENUM_OOS"
log.I("Check parameter %s initial value"%(self.param_name))
init_parameter_value, err=self.pfw.sendCmd("getParameter",self.param_name)
init_filesystem_value=open(self.filesystem_name).read()[:-1]
log.I("Set parameter %s to %s"%(self.param_name,value))
out,err = self.pfw.sendCmd("setParameter",self.param_name, value, expectSuccess=False)
assert err == None, log.E("setParameter %s %s : %s" % (self.param_name, value, err))
assert out != "Done", log.F("Error not detected when setParameter %s %s" % (self.param_name, value))
log.I("Check Enum parameter state")
out, err = self.pfw.sendCmd("getParameter",self.param_name)
assert err == None, log.E("getParameter %s : %s" % (self.param_name, err))
assert out == init_parameter_value, log.F("getParameter %s - expected : %s , found : %s" % (self.param_name,init_parameter_value,out))
log.I("Check filesystem value")
assert open(self.filesystem_name).read()[:-1] == init_filesystem_value, log.F("FILESYSTEM : parameter update error for %s"%(self.param_name))
|
|
#!/usr/bin/env python3
# This file is part of the Soletta Project
#
# Copyright (C) 2015 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import json
import sys
import traceback
from collections import OrderedDict
def merge_schema(directory, definitions, to_merge):
for schema in to_merge:
if not '$ref' in schema:
raise ValueError("no $ref in allOf")
path, link = schema['$ref'].split('#')
ref = load_json_schema(directory, path)
defnref = link.split('/')[-1]
definitions.update(ref[defnref])
def load_json_schema(directory, path, schemas={}):
if path in schemas:
return schemas[path]
data = json.load(open(os.path.join(directory, path), "r"))
if not data['$schema'].startswith("http://json-schema.org/schema"):
raise ValueError("not a JSON schema")
definitions = data.get("definitions", {})
if not definitions:
raise ValueError("empty definition block")
if not 'title' in data:
raise ValueError("JSON schema without title")
required = set(data.get('required', []))
for rt, descr in definitions.items():
if 'allOf' in descr:
merge_schema(directory, descr, descr['allOf'])
del descr['allOf']
if 'properties' in descr:
for field, props in descr['properties'].items():
doc = props.get('description', '')
props['read_only'] = doc.startswith('ReadOnly,')
props['required'] = field in required
if props['read_only']:
props['description'] = props['description'][len('ReadOnly,'):].strip()
descr['title'] = data['title']
schemas[path] = definitions
return definitions
JSON_TO_C = {
"string": "char *",
"integer": "int32_t",
"boolean": "bool",
"number": "double"
}
JSON_TO_C_TMP = {}
JSON_TO_C_TMP.update(JSON_TO_C)
JSON_TO_C_TMP['string'] = "const char *"
JSON_TO_C_TMP['number'] = "double"
JSON_TO_FLOW_GET_PKT = {
"string": "sol_flow_packet_get_string",
"integer": "sol_flow_packet_get_irange_value",
"boolean": "sol_flow_packet_get_boolean",
"number": "sol_flow_packet_get_drange_value"
}
JSON_TO_FLOW_SEND_PKT = {
"string": "sol_flow_send_string_packet",
"integer": "sol_flow_send_irange_value_packet",
"boolean": "sol_flow_send_boolean_packet",
"number": "sol_flow_send_drange_value_packet"
}
JSON_TO_INIT = {
"string": "NULL",
"integer": "0",
"boolean": "false",
"number": "0.0f"
}
JSON_TO_SOL_JSON = {
"string": "string",
"integer": "int",
"boolean": "boolean",
"number": "double"
}
def object_fields_common_c(state_struct_name, name, props):
fields = []
for prop_name, descr in props.items():
doc = '/* %s */' % descr.get('description', '???')
if 'enum' in descr:
var_type = 'enum %s_%s' % (state_struct_name, prop_name)
else:
var_type = JSON_TO_C[descr['type']]
fields.append("%s %s; %s" % (var_type, prop_name, doc))
return '\n'.join(fields)
def generate_object_serialize_fn_common_c(state_struct_name, name, props, client):
fmtstrings = []
for prop_name, prop_descr in props.items():
if client and prop_descr['read_only']:
continue
if 'enum' in prop_descr:
fmtstrings.append('\\"%s\\":\\"%%s\\"' % prop_name)
elif prop_descr['type'] == 'string':
fmtstrings.append('\\"%s\\":\\"%%s\\"' % prop_name)
elif prop_descr['type'] == 'boolean':
fmtstrings.append('\\"%s\\":%%s' % prop_name)
elif prop_descr['type'] == 'integer':
fmtstrings.append('\\"%s\\":%%d' % prop_name)
elif prop_descr['type'] == 'number':
fmtstrings.append('\\"%s\\":%%f' % prop_name)
else:
raise ValueError("invalid property type: %s" % prop_descr['type'])
fields = []
for prop_name, prop_descr in props.items():
if client and prop_descr['read_only']:
continue
if 'enum' in prop_descr:
fields.append('%s_%s_tbl[state->state.%s].key' % (state_struct_name, prop_name, prop_name))
elif prop_descr['type'] == 'boolean':
fields.append('(state->state.%s)?"true":"false"' % prop_name)
elif prop_descr['type'] == 'string':
fields.append('ESCAPE_STRING(state->state.%s)' % prop_name)
else:
fields.append('state->state.%s' % prop_name)
if not fields:
return ''
return '''static uint8_t *
%(struct_name)s_serialize(struct %(type)s_resource *resource, uint16_t *length)
{
struct %(struct_name)s *state = (struct %(struct_name)s *)resource;
char *payload;
int r;
r = asprintf(&payload, "{%(fmtstrings)s}", %(fields)s);
if (r < 0)
return NULL;
if (r >= 0xffff) {
free(payload);
errno = -ENOMEM;
return NULL;
}
*length = (uint16_t)r;
return (uint8_t *)payload;
}
''' % {
'type': 'client' if client else 'server',
'struct_name': name,
'fmtstrings': ','.join(fmtstrings),
'fields': ','.join(fields)
}
def get_type_from_property(prop):
if 'type' in prop:
return prop['type']
if 'enum' in prop:
return 'enum:%s' % ','.join(prop['enum'])
raise ValueError('Unknown type for property')
def object_serialize_fn_common_c(state_struct_name, name, props, client, equivalent={}):
def props_are_equivalent(p1, p2):
# This disconsiders comments
p1 = {k: get_type_from_property(v) for k, v in p1.items()}
p2 = {k: get_type_from_property(v) for k, v in p2.items()}
return p1 == p2
for item_name, item_props in equivalent.items():
if item_props[0] == client and props_are_equivalent(props, item_props[1]):
return '''static uint8_t *
%(struct_name)s_serialize(struct %(type)s_resource *resource, uint16_t *length)
{
return %(item_name)s_serialize(resource, length); /* %(item_name)s is equivalent to %(struct_name)s */
}
''' % {
'item_name': item_name,
'struct_name': name,
'type': 'client' if client else 'server'
}
equivalent[name] = (client, props)
return generate_object_serialize_fn_common_c(state_struct_name, name, props, client)
def object_serialize_fn_client_c(state_struct_name, name, props):
return object_serialize_fn_common_c(state_struct_name, name, props, True)
def object_serialize_fn_server_c(state_struct_name, name, props):
return object_serialize_fn_common_c(state_struct_name, name, props, False)
def get_field_integer_client_c(id, name, prop):
return '''if (decode_mask & (1<<%(id)d) && sol_json_token_str_eq(&key, "%(field_name)s", %(field_name_len)d)) {
int r = sol_json_token_get_int32(&value, &fields.%(field_name)s);
if (r < 0)
RETURN_ERROR(r);
decode_mask &= ~(1<<%(id)d);
continue;
}
''' % {
'field_name': name,
'field_name_len': len(name),
'id': id
}
def get_field_number_client_c(id, name, prop):
return '''if (decode_mask & (1<<%(id)d) && sol_json_token_str_eq(&key, "%(field_name)s", %(field_name_len)d)) {
int r = sol_json_token_get_double(&value, &fields.%(field_name)s);
if (r < 0)
RETURN_ERROR(r);
decode_mask &= ~(1<<%(id)d);
continue;
}
''' % {
'field_name': name,
'field_name_len': len(name),
'id': id
}
def get_field_string_client_c(id, name, prop):
return '''if (decode_mask & (1<<%(id)d) && sol_json_token_str_eq(&key, "%(field_name)s", %(field_name_len)d)) {
if (!json_token_to_string(&value, &fields.%(field_name)s))
RETURN_ERROR(-EINVAL);
decode_mask &= ~(1<<%(id)d);
continue;
}
''' % {
'field_name': name,
'field_name_len': len(name),
'id': id
}
def get_field_boolean_client_c(id, name, prop):
return '''if (decode_mask & (1<<%(id)d) && sol_json_token_str_eq(&key, "%(field_name)s", %(field_name_len)d)) {
if (!json_token_to_bool(&value, &fields.%(field_name)s))
RETURN_ERROR(-EINVAL);
decode_mask &= ~(1<<%(id)d);
continue;
}
''' % {
'field_name': name,
'field_name_len': len(name),
'id': id
}
def get_field_enum_client_c(id, struct_name, name, prop):
return '''if (decode_mask & (1<<%(id)d) && sol_json_token_str_eq(&key, "%(field_name)s", %(field_name_len)d)) {
int16_t val = sol_str_table_lookup_fallback(%(struct_name)s_%(field_name)s_tbl,
SOL_STR_SLICE_STR(value.start, value.end - value.start), -1);
if (val < 0)
RETURN_ERROR(-EINVAL);
fields.%(field_name)s = (enum %(struct_name)s_%(field_name)s)val;
decode_mask &= ~(1<<%(id)d);
continue;
}
''' % {
'struct_name': struct_name,
'field_name': name,
'field_name_len': len(name),
'id': id
}
def object_fields_deserializer(name, props):
id = 0
fields = []
for prop_name, prop in props.items():
if 'enum' in prop:
fields.append(get_field_enum_client_c(id, name, prop_name, prop))
elif prop['type'] == 'string':
fields.append(get_field_string_client_c(id, prop_name, prop))
elif prop['type'] == 'integer':
fields.append(get_field_integer_client_c(id, prop_name, prop))
elif prop['type'] == 'number':
fields.append(get_field_number_client_c(id, prop_name, prop))
elif prop['type'] == 'boolean':
fields.append(get_field_boolean_client_c(id, prop_name, prop))
else:
raise ValueError('unknown field type: %s' % prop['type'])
id += 1
return '\n'.join(fields)
def generate_object_deserialize_fn_common_c(name, props):
fields_init = []
for field_name, field_props in props.items():
if 'enum' in field_props:
fields_init.append('.%s = state->%s,' % (field_name, field_name))
elif field_props['type'] == 'string':
fields_init.append('.%s = strdup(state->%s),' % (field_name, field_name))
else:
fields_init.append('.%s = state->%s,' % (field_name, field_name))
fields_free = []
for field_name, field_props in props.items():
if 'enum' in field_props:
continue
if field_props.get('type') == 'string':
fields_free.append('free(fields.%s);' % (field_name))
update_state = []
for field_name, field_props in props.items():
if not 'enum' in field_props and field_props.get('type') == 'string':
update_state.append('free(state->%s);' % field_name)
update_state.append('state->%s = fields.%s;' % (field_name, field_name))
return '''static int
%(struct_name)s_deserialize(struct %(struct_name)s *state,
const uint8_t *payload, uint16_t payload_len, uint32_t decode_mask)
{
#define RETURN_ERROR(errcode) do { err = (errcode); goto out; } while(0)
struct sol_json_scanner scanner;
struct sol_json_token token, key, value;
enum sol_json_loop_reason reason;
int err = 0;
struct %(struct_name)s fields = {
%(fields_init)s
};
sol_json_scanner_init(&scanner, payload, payload_len);
SOL_JSON_SCANNER_OBJECT_LOOP(&scanner, &token, &key, &value, reason) {
%(deserializers)s
}
if (reason != SOL_JSON_LOOP_REASON_OK)
RETURN_ERROR(-EINVAL);
%(update_state)s
return 0;
out:
%(free_fields)s
return err;
#undef RETURN_ERROR
}
''' % {
'struct_name': name,
'fields': object_fields_common_c(name, name, props),
'fields_init': '\n'.join(fields_init),
'deserializers': object_fields_deserializer(name, props),
'free_fields': '\n'.join(fields_free),
'update_state': '\n'.join(update_state)
}
def object_deserialize_fn_common_c(name, props, equivalent={}):
def props_are_equivalent(p1, p2):
p1 = {k: get_type_from_property(v) for k, v in p1.items()}
p2 = {k: get_type_from_property(v) for k, v in p2.items()}
return p1 == p2
for item_name, item_props in equivalent.items():
if props_are_equivalent(props, item_props):
return '''static int
%(struct_name)s_deserialize(struct %(struct_name)s *state,
const uint8_t *payload, uint16_t payload_len, uint32_t decode_mask)
{
/* %(item_name)s is equivalent to %(struct_name)s */
return %(item_name)s_deserialize((struct %(item_name)s *)state, payload, payload_len, decode_mask);
}
''' % {
'item_name': item_name,
'struct_name': name
}
equivalent[name] = props
return generate_object_deserialize_fn_common_c(name, props)
def object_deserialize_fn_client_c(state_struct_name, name, props):
return '''static int
%(struct_name)s_deserialize(struct client_resource *resource, const uint8_t *payload, uint16_t payload_len)
{
struct %(struct_name)s *res = (struct %(struct_name)s *)resource;
return %(state_struct_name)s_deserialize(&res->state, payload, payload_len, ~0);
}
''' % {
'struct_name': name,
'state_struct_name': state_struct_name
}
def object_deserialize_fn_server_c(state_struct_name, name, props):
decode_mask = 0
id = 0
for field_name, field_props in props.items():
if not field_props['read_only']:
decode_mask |= 1<<id
id += 1
if not decode_mask:
return ''
return '''static int
%(struct_name)s_deserialize(struct server_resource *resource, const uint8_t *payload, uint16_t payload_len)
{
struct %(struct_name)s *res = (struct %(struct_name)s *)resource;
return %(state_struct_name)s_deserialize(&res->state, payload, payload_len, 0x%(decode_mask)x);
}
''' % {
'struct_name': name,
'state_struct_name': state_struct_name,
'decode_mask': decode_mask
}
def object_inform_flow_fn_common_c(state_struct_name, name, props, client):
send_flow_pkts = []
for field_name, field_props in props.items():
if 'enum' in field_props:
fn = 'sol_flow_send_string_packet'
val = '%(struct_name)s_%(field_name)s_tbl[state->state.%(field_name)s].key' % {
'struct_name': state_struct_name,
'field_name': field_name
}
else:
fn = JSON_TO_FLOW_SEND_PKT[field_props['type']]
val = 'state->state.%(field_name)s' % {
'field_name': field_name
}
send_flow_pkts.append('''%(flow_send_fn)s(resource->node, SOL_FLOW_NODE_TYPE_%(STRUCT_NAME)s__OUT__OUT_%(FIELD_NAME)s, %(val)s);''' % {
'flow_send_fn': fn,
'STRUCT_NAME': name.upper(),
'FIELD_NAME': field_name.upper(),
'val': val
})
return '''static void %(struct_name)s_inform_flow(struct %(type)s_resource *resource)
{
struct %(struct_name)s *state = (struct %(struct_name)s *)resource;
%(send_flow_pkts)s
}
''' % {
'type': 'client' if client else 'server',
'struct_name': name,
'send_flow_pkts': '\n'.join(send_flow_pkts)
}
def object_inform_flow_fn_client_c(state_struct_name, name, props):
return object_inform_flow_fn_common_c(state_struct_name, name, props, True)
def object_inform_flow_fn_server_c(state_struct_name, name, props):
read_only = all(field_props['read_only'] for field_name, field_props in props.items())
return '' if read_only else object_inform_flow_fn_common_c(state_struct_name, name, props, False)
def object_open_fn_client_c(state_struct_name, resource_type, name, props):
field_init = []
for field_name, field_props in props.items():
if 'enum' in field_props:
init = '(enum %s_%s)0' % (state_struct_name, field_name)
else:
init = JSON_TO_INIT[field_props.get('type', 'integer')]
field_init.append('''resource->state.%(field_name)s = %(init)s;''' % {
'field_name': field_name,
'init': init
})
no_inputs = all(field_props['read_only'] for field_name, field_props in props.items())
if no_inputs:
serialize_fn = 'NULL'
else:
serialize_fn = '%s_serialize' % name
return '''static int
%(struct_name)s_open(struct sol_flow_node *node, void *data, const struct sol_flow_node_options *options)
{
const struct sol_flow_node_type_%(struct_name)s_options *node_opts =
(const struct sol_flow_node_type_%(struct_name)s_options *)options;
static const struct client_resource_funcs funcs = {
.serialize = %(serialize_fn)s,
.deserialize = %(struct_name)s_deserialize,
.inform_flow = %(struct_name)s_inform_flow,
.found_port = SOL_FLOW_NODE_TYPE_%(STRUCT_NAME)s__OUT__FOUND
};
struct %(struct_name)s *resource = data;
int r;
r = client_resource_init(node, &resource->base, "%(resource_type)s", node_opts->hwaddr, &funcs);
if (!r) {
%(field_init)s
}
return 0;
}
''' % {
'struct_name': name,
'STRUCT_NAME': name.upper(),
'resource_type': resource_type,
'field_init': '\n'.join(field_init),
'serialize_fn': serialize_fn
}
def object_open_fn_server_c(state_struct_name, resource_type, name, props, definitions={'id':0}):
def_id = definitions['id']
definitions['id'] += 1
no_inputs = all(field_props['read_only'] for field_name, field_props in props.items())
if no_inputs:
deserialize_fn_name = 'NULL'
inform_flow_fn_name = 'NULL'
else:
deserialize_fn_name = '%s_deserialize' % name
inform_flow_fn_name = '%s_inform_flow' % name
field_init = []
for field_name, field_props in props.items():
if 'enum' in field_props:
init = '(enum %s_%s)0' % (state_struct_name, field_name)
else:
init = JSON_TO_INIT[field_props.get('type', 'integer')]
field_init.append('''resource->state.%(field_name)s = %(init)s;''' % {
'field_name': field_name,
'init': init
})
return '''static int
%(struct_name)s_open(struct sol_flow_node *node, void *data, const struct sol_flow_node_options *options)
{
static const struct sol_str_slice rt_slice = SOL_STR_SLICE_LITERAL("%(resource_type)s");
static const struct sol_str_slice def_slice = SOL_STR_SLICE_LITERAL("/etta/%(def_id)x");
static const struct server_resource_funcs funcs = {
.serialize = %(struct_name)s_serialize,
.deserialize = %(deserialize_fn_name)s,
.inform_flow = %(inform_flow_fn_name)s
};
struct %(struct_name)s *resource = data;
int r;
r = server_resource_init(&resource->base, node, rt_slice, def_slice, &funcs);
if (!r) {
%(field_init)s
}
return r;
}
''' % {
'struct_name': name,
'resource_type': resource_type,
'def_id': def_id,
'deserialize_fn_name': deserialize_fn_name,
'inform_flow_fn_name': inform_flow_fn_name,
'field_init': '\n'.join(field_init)
}
def object_close_fn_client_c(name, props):
destroy_fields = []
for field_name, field_props in props.items():
if 'enum' in field_props:
continue
if field_props.get('type') == 'string':
destroy_fields.append('free(resource->state.%s);' % field_name)
return '''static void %(struct_name)s_close(struct sol_flow_node *node, void *data)
{
struct %(struct_name)s *resource = data;
%(destroy_fields)s
client_resource_close(&resource->base);
}
''' % {
'struct_name': name,
'destroy_fields': '\n'.join(destroy_fields)
}
def object_close_fn_server_c(name, props):
destroy_fields = []
for field_name, field_props in props.items():
if 'enum' in field_props:
continue
if field_props.get('type') == 'string':
destroy_fields.append('free(resource->state.%s);' % field_name)
return '''static void %(struct_name)s_close(struct sol_flow_node *node, void *data)
{
struct %(struct_name)s *resource = data;
%(destroy_fields)s
server_resource_close(&resource->base);
}
''' % {
'struct_name': name,
'destroy_fields': '\n'.join(destroy_fields)
}
def object_setters_fn_common_c(state_struct_name, name, props, client):
fields = []
for field, descr in props.items():
if client and descr['read_only']:
continue
if 'enum' in descr:
fields.append('''static int
%(struct_name)s_set_%(field_name)s(struct sol_flow_node *node, void *data, uint16_t port,
uint16_t conn_id, const struct sol_flow_packet *packet)
{
struct %(struct_name)s *resource = data;
const char *var;
if (!sol_flow_packet_get_string(packet, &var)) {
int16_t val = sol_str_table_lookup_fallback(%(state_struct_name)s_%(field_name)s_tbl,
sol_str_slice_from_str(var), -1);
if (val >= 0) {
resource->state.%(field_name)s = (enum %(state_struct_name)s_%(field_name)s)val;
%(type)s_resource_schedule_update(&resource->base);
return 0;
}
return -ENOENT;
}
return -EINVAL;
}
''' % {
'field_name': field,
'FIELD_NAME': field.upper(),
'state_struct_name': state_struct_name,
'STATE_STRUCT_NAME': state_struct_name.upper(),
'struct_name': name,
'type': 'client' if client else 'server'
})
else:
fields.append('''static int
%(struct_name)s_set_%(field_name)s(struct sol_flow_node *node, void *data, uint16_t port,
uint16_t conn_id, const struct sol_flow_packet *packet)
{
struct %(struct_name)s *resource = data;
%(c_type_tmp)s var;
int r;
r = %(c_getter)s(packet, &var);
if (!r) {
resource->state.%(field_name)s = (%(c_type)s) var;
%(type)s_resource_schedule_update(&resource->base);
}
return r;
}
''' % {
'struct_name': name,
'field_name': field,
'c_type': JSON_TO_C[descr['type']],
'c_type_tmp': JSON_TO_C_TMP[descr['type']],
'c_getter': JSON_TO_FLOW_GET_PKT[descr['type']],
'type': 'client' if client else 'server'
})
return '\n'.join(fields)
def object_setters_fn_client_c(state_struct_name, name, props):
return object_setters_fn_common_c(state_struct_name, name, props, True)
def object_setters_fn_server_c(state_struct_name, name, props):
return object_setters_fn_common_c(state_struct_name, name, props, False)
def generate_enums_common_c(name, props):
output = []
for field, descr in props.items():
if 'enum' in descr:
if 'description' in descr:
output.append('''/* %s */''' % descr['description'])
output.append('''enum %(struct_name)s_%(field_name)s { %(items)s };''' % {
'struct_name': name,
'field_name': field,
'items': ', '.join(('%s_%s_%s' % (name, field, item)).upper() for item in descr['enum'])
})
output.append('''static const struct sol_str_table %(struct_name)s_%(field_name)s_tbl[] = {
%(items)s,
{ }
};''' % {
'struct_name': name,
'field_name': field,
'items': ',\n'.join('SOL_STR_TABLE_ITEM(\"%s\", %s_%s_%s)' % (
item, name.upper(), field.upper(), item.upper()) for item in descr['enum'])
})
return '\n'.join(output)
def generate_object_client_c(resource_type, state_struct_name, name, props):
return """struct %(struct_name)s {
struct client_resource base;
struct %(state_struct_name)s state;
};
%(serialize_fn)s
%(deserialize_fn)s
%(inform_flow_fn)s
%(open_fn)s
%(close_fn)s
%(setters_fn)s
""" % {
'state_struct_name': state_struct_name,
'struct_name': name,
'serialize_fn': object_serialize_fn_client_c(state_struct_name, name, props),
'deserialize_fn': object_deserialize_fn_client_c(state_struct_name, name, props),
'inform_flow_fn': object_inform_flow_fn_client_c(state_struct_name, name, props),
'open_fn': object_open_fn_client_c(state_struct_name, resource_type, name, props),
'close_fn': object_close_fn_client_c(name, props),
'setters_fn': object_setters_fn_client_c(state_struct_name, name, props)
}
def generate_object_server_c(resource_type, state_struct_name, name, props):
return """struct %(struct_name)s {
struct server_resource base;
struct %(state_struct_name)s state;
};
%(serialize_fn)s
%(deserialize_fn)s
%(inform_flow_fn)s
%(open_fn)s
%(close_fn)s
%(setters_fn)s
""" % {
'struct_name': name,
'state_struct_name': state_struct_name,
'serialize_fn': object_serialize_fn_server_c(state_struct_name, name, props),
'deserialize_fn': object_deserialize_fn_server_c(state_struct_name, name, props),
'inform_flow_fn': object_inform_flow_fn_server_c(state_struct_name, name, props),
'open_fn': object_open_fn_server_c(state_struct_name, resource_type, name, props),
'close_fn': object_close_fn_server_c(name, props),
'setters_fn': object_setters_fn_server_c(state_struct_name, name, props)
}
def generate_object_common_c(name, props):
return """%(enums)s
struct %(struct_name)s {
%(struct_fields)s
};
%(deserialize_fn)s
""" % {
'enums': generate_enums_common_c(name, props),
'struct_name': name,
'struct_fields': object_fields_common_c(name, name, props),
'deserialize_fn': object_deserialize_fn_common_c(name, props),
}
def generate_object_json(resource_type, struct_name, node_name, title, props, server):
in_ports = []
for prop_name, prop_descr in props.items():
if not server and prop_descr['read_only']:
continue
in_ports.append({
'data_type': JSON_TO_SOL_JSON[prop_descr.get('type', 'string')],
'description': prop_descr.get('description', '???'),
'methods': {
'process': '%s_set_%s' % (struct_name, prop_name)
},
'name': 'IN_%s' % prop_name.upper()
})
if server:
out_ports = []
else:
out_ports = [{
'data_type': 'boolean',
'description': 'Outputs true if resource was found, false if not, or if unreachable',
'name': 'FOUND'
}]
for prop_name, prop_descr in props.items():
out_ports.append({
'data_type': JSON_TO_SOL_JSON[prop_descr.get('type', 'string')],
'description': prop_descr.get('description', '???'),
'name': 'OUT_%s' % prop_name.upper()
})
output = {
'methods': {
'open': '%s_open' % struct_name,
'close': '%s_close' % struct_name
},
'private_data_type': struct_name,
'name': node_name,
'url': 'http://solettaproject.org/doc/latest/components/%s.html' % node_name.replace('/', '-')
}
if server:
output.update({
'category': 'iot/server',
'description': 'OIC Server (%s)' % title
})
else:
output.update({
'category': 'iot/client',
'description': 'OIC Client (%s)' % title,
'options': {
'version': 1,
'members': [
{
'data_type': 'string',
'description': 'Hardware address of the device (MAC address, etc)',
'name': 'hwaddr'
}
]
}
})
if in_ports:
output['in_ports'] = in_ports
if out_ports:
output['out_ports'] = out_ports
return output
def generate_object(rt, title, props):
def type_value(item):
return '%s %s' % (get_type_from_property(item[1]), item[0])
resource_type = rt
if rt.startswith('oic.r.'):
rt = rt[len('oic.r.'):]
elif rt.startswith('core.'):
rt = rt[len('core.'):]
c_identifier = rt.replace(".", "_").lower()
flow_identifier = rt.replace(".", "-").lower()
client_node_name = "oic/client-%s" % flow_identifier
client_struct_name = "oic_client_%s" % c_identifier
server_node_name = "oic/server-%s" % flow_identifier
server_struct_name = "oic_server_%s" % c_identifier
state_struct_name = "oic_state_%s" % c_identifier
new_props = OrderedDict()
for k, v in sorted(props.items(), key=type_value):
new_props[k] = v
props = new_props
retval = {
'c_common': generate_object_common_c(state_struct_name, props),
'c_client': generate_object_client_c(resource_type, state_struct_name, client_struct_name, props),
'c_server': generate_object_server_c(resource_type, state_struct_name, server_struct_name, props),
'json_client': generate_object_json(resource_type, client_struct_name, client_node_name, title, props, False),
'json_server': generate_object_json(resource_type, server_struct_name, server_node_name, title, props, True)
}
return retval
def generate_for_schema(directory, path):
j = load_json_schema(directory, path)
for rt, defn in j.items():
if not (rt.startswith("oic.r.") or rt.startswith("core.")):
raise ValueError("not an OIC resource definition")
if defn.get('type') == 'object':
yield generate_object(rt, defn['title'], defn['properties'])
def master_json_as_string(generated):
master_json = {
'$schema': 'http://solettaproject.github.io/soletta/schemas/node-type-genspec.schema',
'name': 'oic',
'meta': {
'author': 'Intel Corporation',
'license': 'BSD 3-Clause',
'version': '1'
},
'types': [t['json_server'] for t in generated] + [t['json_client'] for t in generated]
}
return json.dumps(master_json, indent=4)
def master_c_as_string(generated):
generated = list(generated)
code = '''#include <arpa/inet.h>
#include <errno.h>
#include <math.h>
#include <netinet/in.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/socket.h>
#include "oic-gen.h"
#include "sol-coap.h"
#include "sol-json.h"
#include "sol-mainloop.h"
#include "sol-missing.h"
#include "sol-oic-client.h"
#include "sol-oic-server.h"
#include "sol-str-slice.h"
#include "sol-str-table.h"
#include "sol-util.h"
#define DEFAULT_UDP_PORT 5683
#define MULTICAST_ADDRESS_IPv4 "224.0.1.187"
#define MULTICAST_ADDRESS_IPv6_LOCAL "ff02::fd"
#define MULTICAST_ADDRESS_IPv6_SITE "ff05::fd"
#define FIND_PERIOD_MS 5000
#define UPDATE_TIMEOUT_MS 50
struct client_resource;
struct server_resource;
struct client_resource_funcs {
uint8_t *(*serialize)(struct client_resource *resource, uint16_t *length);
int (*deserialize)(struct client_resource *resource, const uint8_t *payload, uint16_t payload_len);
void (*inform_flow)(struct client_resource *resource);
int found_port;
};
struct server_resource_funcs {
uint8_t *(*serialize)(struct server_resource *resource, uint16_t *length);
int (*deserialize)(struct server_resource *resource, const uint8_t *payload, uint16_t payload_len);
void (*inform_flow)(struct server_resource *resource);
};
struct client_resource {
struct sol_flow_node *node;
const struct client_resource_funcs *funcs;
struct sol_oic_resource *resource;
struct sol_timeout *find_timeout;
struct sol_timeout *update_schedule_timeout;
struct sol_oic_client client;
const char *rt;
char *hwaddr;
};
struct server_resource {
struct sol_flow_node *node;
const struct server_resource_funcs *funcs;
struct sol_coap_resource *coap;
struct sol_timeout *update_schedule_timeout;
char *endpoint;
struct sol_oic_resource_type oic;
};
static struct sol_network_link_addr multicast_ipv4, multicast_ipv6_local, multicast_ipv6_site;
static bool multicast_addresses_initialized = false;
static bool
initialize_multicast_addresses_once(void)
{
if (multicast_addresses_initialized)
return true;
multicast_ipv4 = (struct sol_network_link_addr) { .family = AF_INET, .port = DEFAULT_UDP_PORT };
if (inet_pton(AF_INET, MULTICAST_ADDRESS_IPv4, &multicast_ipv4.addr) < 0) {
SOL_WRN("Could not parse multicast IP address");
return false;
}
multicast_ipv6_local = (struct sol_network_link_addr) { .family = AF_INET6, .port = DEFAULT_UDP_PORT };
if (inet_pton(AF_INET6, MULTICAST_ADDRESS_IPv6_LOCAL, &multicast_ipv6_local.addr) < 0) {
SOL_WRN("Could not parse multicast IP address");
return false;
}
multicast_ipv6_site = (struct sol_network_link_addr) { .family = AF_INET6, .port = DEFAULT_UDP_PORT };
if (inet_pton(AF_INET6, MULTICAST_ADDRESS_IPv6_SITE, &multicast_ipv6_site.addr) < 0) {
SOL_WRN("Could not parse multicast IP address");
return false;
}
return true;
}
/* FIXME: These should go into sol-network so it's OS-agnostic. */
static bool
find_device_by_hwaddr_arp_cache(const char *hwaddr, struct sol_network_link_addr *addr)
{
static const size_t hwaddr_len = sizeof("00:00:00:00:00:00") - 1;
FILE *arpcache;
char buffer[128];
bool success = false;
arpcache = fopen("/proc/net/arp", "re");
if (!arpcache) {
SOL_WRN("Could not open arp cache file");
return false;
}
/* IP address HW type Flags HW address Mask Device */
if (!fgets(buffer, sizeof(buffer), arpcache)) {
SOL_WRN("Could not discard header line from arp cache file");
goto out;
}
/* 0000000000011111111122222222223333333333444444444455555555556666666666777777 */
/* 0123456789012345678901234567890123456789012345678901234567890123456789012345 */
/* xxx.xxx.xxx.xxx 0x0 0x0 00:00:00:00:00:00 * eth0 */
while (fgets(buffer, sizeof(buffer), arpcache)) {
buffer[58] = '\\0';
if (strncmp(&buffer[41], hwaddr, hwaddr_len))
continue;
buffer[15] = '\\0';
if (inet_pton(AF_INET, buffer, &addr->addr) < 0) {
SOL_WRN("Could not parse IP address '%%s'", buffer);
goto out;
}
SOL_INF("Found device %%s with IP address %%s", hwaddr, buffer);
success = true;
break;
}
out:
fclose(arpcache);
return success;
}
static bool
link_has_address(const struct sol_network_link *link, const struct sol_network_link_addr *addr)
{
struct sol_network_link_addr *iter;
uint16_t idx;
SOL_VECTOR_FOREACH_IDX(&link->addrs, iter, idx) {
if (sol_network_link_addr_eq(addr, iter))
return true;
}
return false;
}
static bool
has_link_with_address(const struct sol_network_link_addr *addr)
{
const struct sol_vector *links = sol_network_get_available_links();
struct sol_network_link *link;
uint16_t idx;
if (!links)
return false;
SOL_VECTOR_FOREACH_IDX(links, link, idx) {
if (link_has_address(link, addr))
return true;
}
return false;
}
static bool
find_device_by_hwaddr_ipv4(const char *hwaddr, struct sol_network_link_addr *addr)
{
if (has_link_with_address(addr))
return true;
return find_device_by_hwaddr_arp_cache(hwaddr, addr);
}
static bool
find_device_by_hwaddr_ipv6(const char *hwaddr, struct sol_network_link_addr *addr)
{
char addrstr[SOL_INET_ADDR_STRLEN] = {0};
if (!sol_network_addr_to_str(addr, addrstr, sizeof(addrstr))) {
SOL_WRN("Could not convert network address to string");
return false;
}
if (!strncmp(addrstr, "::ffff:", sizeof("::ffff:") - 1)) {
struct sol_network_link_addr tentative_addr = { .family = AF_INET };
const char *ipv4addr = addrstr + sizeof("::ffff:") - 1;
if (inet_pton(tentative_addr.family, ipv4addr, &tentative_addr.addr) < 0)
return false;
return find_device_by_hwaddr_ipv4(hwaddr, &tentative_addr);
}
/* Link local format
* MAC address: xx:xx:xx:xx:xx:xx
* IPv6 Link local address: fe80::xyxx:xxff:fexx:xxxx
* 0000000000111111111122222
* 0123456789012345678901234
*/
if (strncmp(addrstr, "fe80::", sizeof("fe80::") - 1))
goto not_link_local;
if (strncmp(&addrstr[13], "ff:fe", sizeof("ff:fe") - 1))
goto not_link_local;
/* FIXME: There's one additional check for the last byte that's missing here, but
* this is temporary until proper NDP is impemented. */
return (hwaddr[16] == addrstr[23] && hwaddr[15] == addrstr[22])
&& (hwaddr[13] == addrstr[21] && hwaddr[12] == addrstr[20])
&& (hwaddr[10] == addrstr[18] && hwaddr[9] == addrstr[17])
&& (hwaddr[7] == addrstr[11] && hwaddr[6] == addrstr[10])
&& (hwaddr[4] == addrstr[8] && hwaddr[3] == addrstr[7]);
not_link_local:
SOL_WRN("NDP not implemented and client has an IPv6 address: %%s. Ignoring.", addrstr);
return false;
}
static bool
find_device_by_hwaddr(const char *hwaddr, struct sol_network_link_addr *addr)
{
if (addr->family == AF_INET)
return find_device_by_hwaddr_ipv4(hwaddr, addr);
if (addr->family == AF_INET6)
return find_device_by_hwaddr_ipv6(hwaddr, addr);
SOL_WRN("Unknown address family: %%d", addr->family);
return false;
}
static bool
client_resource_implements_type(struct sol_oic_resource *oic_res, const char *resource_type)
{
struct sol_str_slice rt = SOL_STR_SLICE_STR(resource_type, strlen(resource_type));
struct sol_str_slice *type;
uint16_t idx;
SOL_VECTOR_FOREACH_IDX(&oic_res->types, type, idx) {
if (sol_str_slice_eq(*type, rt))
return true;
}
return false;
}
static void
state_changed(struct sol_oic_client *oic_cli, const struct sol_network_link_addr *cliaddr,
const struct sol_str_slice *href, const struct sol_str_slice *payload, void *data)
{
struct client_resource *resource = data;
int r;
if (!sol_str_slice_eq(*href, resource->resource->href)) {
SOL_WRN("Received response to href=`%%.*s`, but resource href is `%%.*s`",
SOL_STR_SLICE_PRINT(*href),
SOL_STR_SLICE_PRINT(resource->resource->href));
return;
}
if (!sol_network_link_addr_eq(cliaddr, &resource->resource->addr)) {
char resaddr[SOL_INET_ADDR_STRLEN] = {0};
char respaddr[SOL_INET_ADDR_STRLEN] = {0};
if (!sol_network_addr_to_str(&resource->resource->addr, resaddr, sizeof(resaddr))) {
SOL_WRN("Could not convert network address to string");
return;
}
if (!sol_network_addr_to_str(cliaddr, respaddr, sizeof(respaddr))) {
SOL_WRN("Could not convert network address to string");
return;
}
SOL_WRN("Expecting response from %%s, got from %%s, ignoring", resaddr, respaddr);
return;
}
r = resource->funcs->deserialize(resource, (const uint8_t *)payload->data, payload->len);
if (r >= 0)
resource->funcs->inform_flow(resource);
}
static void
found_resource(struct sol_oic_client *oic_cli, struct sol_oic_resource *oic_res, void *data)
{
struct client_resource *resource = data;
int r;
/* Some OIC device sent this node a discovery response packet but node's already set up. */
if (resource->resource)
goto out;
/* Not the droid we're looking for. */
if (!find_device_by_hwaddr(resource->hwaddr, &oic_res->addr))
goto out;
/* FIXME: Should this check move to sol-oic-client? Does it actually make sense? */
if (resource->rt && !client_resource_implements_type(oic_res, resource->rt)) {
SOL_WRN("Received resource that does not implement rt=%%s, ignoring", resource->rt);
goto out;
}
SOL_INF("Found resource matching hwaddr %%s", resource->hwaddr);
resource->resource = sol_oic_resource_ref(oic_res);
if (resource->find_timeout) {
sol_timeout_del(resource->find_timeout);
resource->find_timeout = NULL;
}
r = sol_oic_client_resource_set_observable(oic_cli, oic_res, state_changed, resource, true);
if (!r)
SOL_WRN("Could not observe resource as requested");
out:
r = sol_flow_send_boolean_packet(resource->node, resource->funcs->found_port, !!resource->resource);
if (r < 0)
SOL_WRN("Could not send flow packet, will try again");
}
static void
send_discovery_packets(struct client_resource *resource)
{
sol_oic_client_find_resource(&resource->client, &multicast_ipv4, resource->rt,
found_resource, resource);
sol_oic_client_find_resource(&resource->client, &multicast_ipv6_local, resource->rt,
found_resource, resource);
sol_oic_client_find_resource(&resource->client, &multicast_ipv6_site, resource->rt,
found_resource, resource);
}
static bool
find_timer(void *data)
{
struct client_resource *resource = data;
if (resource->resource) {
SOL_INF("Timer expired when node already configured; disabling");
resource->find_timeout = NULL;
return false;
}
send_discovery_packets(resource);
return true;
}
static char *
create_endpoint(void)
{
static int endpoint_id = 0;
char *endpoint;
if (asprintf(&endpoint, "/sol/%%x", endpoint_id) < 0)
return NULL;
endpoint_id++;
return endpoint;
}
static bool
server_resource_perform_update(void *data)
{
struct server_resource *resource = data;
uint8_t *payload;
uint16_t payload_len;
SOL_NULL_CHECK(resource->funcs->serialize, false);
payload = resource->funcs->serialize(resource, &payload_len);
if (!payload) {
SOL_WRN("Error while serializing update message");
} else {
resource->funcs->inform_flow(resource);
sol_oic_notify_observers(resource->coap, payload, payload_len);
free(payload);
}
resource->update_schedule_timeout = NULL;
return false;
}
static void
server_resource_schedule_update(struct server_resource *resource)
{
if (resource->update_schedule_timeout)
return;
resource->update_schedule_timeout = sol_timeout_add(UPDATE_TIMEOUT_MS,
server_resource_perform_update, resource);
}
static sol_coap_responsecode_t
server_handle_put(const struct sol_network_link_addr *cliaddr, const void *data,
uint8_t *payload, uint16_t *payload_len)
{
const struct server_resource *resource = data;
int r;
if (!resource->funcs->deserialize)
return SOL_COAP_RSPCODE_NOT_IMPLEMENTED;
r = resource->funcs->deserialize((struct server_resource *)resource, payload, *payload_len);
if (!r) {
server_resource_schedule_update((struct server_resource *)resource);
*payload_len = 0;
return SOL_COAP_RSPCODE_CHANGED;
}
return SOL_COAP_RSPCODE_PRECONDITION_FAILED;
}
static sol_coap_responsecode_t
server_handle_get(const struct sol_network_link_addr *cliaddr, const void *data,
uint8_t *payload, uint16_t *payload_len)
{
const struct server_resource *resource = data;
uint16_t serialized_len;
uint8_t *serialized;
if (!resource->funcs->serialize)
return SOL_COAP_RSPCODE_NOT_IMPLEMENTED;
serialized = resource->funcs->serialize((struct server_resource*)resource, &serialized_len);
if (!serialized)
return SOL_COAP_RSPCODE_INTERNAL_ERROR;
if (serialized_len > *payload_len) {
free(serialized);
return SOL_COAP_RSPCODE_INTERNAL_ERROR;
}
memcpy(payload, serialized, serialized_len);
*payload_len = serialized_len;
free(serialized);
return SOL_COAP_RSPCODE_CONTENT;
}
// log_init() implementation happens within oic-gen.c
static void log_init(void);
static int
server_resource_init(struct server_resource *resource, struct sol_flow_node *node,
struct sol_str_slice resource_type, struct sol_str_slice defn_endpoint,
const struct server_resource_funcs *funcs)
{
struct sol_oic_device_definition *def;
log_init();
if (!sol_oic_server_init(DEFAULT_UDP_PORT)) {
SOL_WRN("Could not create %%.*s server", SOL_STR_SLICE_PRINT(resource_type));
return -ENOTCONN;
}
resource->endpoint = create_endpoint();
SOL_NULL_CHECK(resource->endpoint, -ENOMEM);
resource->node = node;
resource->update_schedule_timeout = NULL;
resource->funcs = funcs;
resource->oic = (struct sol_oic_resource_type) {
.api_version = SOL_OIC_RESOURCE_TYPE_API_VERSION,
.endpoint = sol_str_slice_from_str(resource->endpoint),
.resource_type = resource_type,
.iface = SOL_STR_SLICE_LITERAL("oc.mi.def"),
.get = { .handle = server_handle_get },
.put = { .handle = server_handle_put },
};
def = sol_oic_server_register_definition(defn_endpoint, resource_type,
SOL_COAP_FLAGS_OC_CORE | SOL_COAP_FLAGS_WELL_KNOWN);
if (!def)
goto out;
resource->coap = sol_oic_device_definition_register_resource_type(def,
&resource->oic, resource, SOL_COAP_FLAGS_OC_CORE | SOL_COAP_FLAGS_OBSERVABLE);
if (!resource->coap)
goto out;
return 0;
out:
sol_oic_server_release();
free(resource->endpoint);
return -EINVAL;
}
static void
server_resource_close(struct server_resource *resource)
{
if (resource->update_schedule_timeout)
sol_timeout_del(resource->update_schedule_timeout);
free(resource->endpoint);
sol_oic_server_release();
}
static int
client_resource_init(struct sol_flow_node *node, struct client_resource *resource, const char *resource_type,
const char *hwaddr, const struct client_resource_funcs *funcs)
{
log_init();
if (!initialize_multicast_addresses_once()) {
SOL_ERR("Could not initialize multicast addresses");
return -ENOTCONN;
}
assert(resource_type);
if (!hwaddr)
return -EINVAL;
resource->client.server = sol_coap_server_new(0);
SOL_NULL_CHECK(resource->client.server, -ENOMEM);
resource->hwaddr = strdup(hwaddr);
SOL_NULL_CHECK_GOTO(resource->hwaddr, nomem);
resource->node = node;
resource->find_timeout = NULL;
resource->update_schedule_timeout = NULL;
resource->resource = NULL;
resource->funcs = funcs;
resource->rt = resource_type;
SOL_INF("Sending multicast packets to find resource with hwaddr %%s (rt=%%s)",
resource->hwaddr, resource->rt);
resource->find_timeout = sol_timeout_add(FIND_PERIOD_MS, find_timer, resource);
if (resource->find_timeout) {
/* Perform a find now instead of waiting FIND_PERIOD_MS the first time. If the
* resource is found in the mean time, the timeout will be automatically disabled. */
send_discovery_packets(resource);
return 0;
}
SOL_ERR("Could not create timeout to find resource");
free(resource->hwaddr);
nomem:
sol_coap_server_unref(resource->client.server);
return -ENOMEM;
}
static void
client_resource_close(struct client_resource *resource)
{
free(resource->hwaddr);
if (resource->find_timeout)
sol_timeout_del(resource->find_timeout);
if (resource->update_schedule_timeout)
sol_timeout_del(resource->update_schedule_timeout);
if (resource->resource) {
bool r = sol_oic_client_resource_set_observable(&resource->client, resource->resource,
NULL, NULL, false);
if (!r)
SOL_WRN("Could not unobserve resource");
sol_oic_resource_unref(resource->resource);
}
sol_coap_server_unref(resource->client.server);
}
static bool
client_resource_perform_update(void *data)
{
struct client_resource *resource = data;
uint8_t *payload;
uint16_t payload_len;
SOL_NULL_CHECK_GOTO(resource->resource, disable_timeout);
SOL_NULL_CHECK_GOTO(resource->funcs->serialize, disable_timeout);
payload = resource->funcs->serialize(resource, &payload_len);
if (!payload) {
SOL_WRN("Error while serializing update message");
} else {
int r = sol_oic_client_resource_request(&resource->client, resource->resource,
SOL_COAP_METHOD_PUT, payload, payload_len, NULL, NULL);
free(payload);
if (r < 0) {
SOL_WRN("Could not send update request to resource, will try again");
return true;
}
}
disable_timeout:
resource->update_schedule_timeout = NULL;
return false;
}
static void
client_resource_schedule_update(struct client_resource *resource)
{
if (resource->update_schedule_timeout)
return;
resource->update_schedule_timeout = sol_timeout_add(UPDATE_TIMEOUT_MS,
client_resource_perform_update, resource);
}
static const char escapable_chars[] = {'\\\\', '\\"', '/', '\\b', '\\f', '\\n', '\\r', '\\t'};
SOL_ATTR_USED static size_t
calculate_escaped_len(const char *s)
{
size_t len = 0;
for (; *s; s++) {
if (memchr(escapable_chars, *s, sizeof(escapable_chars)))
len++;
len++;
}
return len + 1;
}
SOL_ATTR_USED static char *
escape_json_string(const char *s, char *buf)
{
char *out = buf;
for (; *s; s++) {
if (memchr(escapable_chars, *s, sizeof(escapable_chars))) {
*buf++ = '\\\\';
switch (*s) {
case '"': *buf++ = '"'; break;
case '\\\\': *buf++ = '\\\\'; break;
case '/': *buf++ = '/'; break;
case '\\b': *buf++ = 'b'; break;
case '\\f': *buf++ = 'f'; break;
case '\\n': *buf++ = 'n'; break;
case '\\r': *buf++ = 'r'; break;
case '\\t': *buf++ = 't'; break;
}
} else {
*buf++ = *s;
}
}
*buf++ = '\\0';
return out;
}
#define ESCAPE_STRING(s) ({ \\
char buffer ## __COUNT__[calculate_escaped_len(s)]; \\
escape_json_string(s, buffer ## __COUNT__); \\
})
SOL_ATTR_USED static bool
json_token_to_string(struct sol_json_token *token, char **out)
{
if (sol_json_token_get_type(token) != SOL_JSON_TYPE_STRING)
return false;
free(*out);
*out = strndup(token->start, token->end - token->start);
return !!*out;
}
SOL_ATTR_USED static bool
json_token_to_bool(struct sol_json_token *token, bool *out)
{
if (sol_json_token_get_type(token) == SOL_JSON_TYPE_TRUE)
*out = true;
else if (sol_json_token_get_type(token) == SOL_JSON_TYPE_FALSE)
*out = false;
else
return false;
return true;
}
%(generated_c_common)s
%(generated_c_client)s
%(generated_c_server)s
#include "oic-gen.c"
''' % {
'generated_c_common': '\n'.join(t['c_common'] for t in generated),
'generated_c_client': '\n'.join(t['c_client'] for t in generated),
'generated_c_server': '\n'.join(t['c_server'] for t in generated),
}
return code.replace('\n\n\n', '\n')
if __name__ == '__main__':
def seems_schema(path):
return path.endswith('.json') and (path.startswith('oic.r.') or path.startswith('core.'))
generated = []
print('Generating code for schemas: ', end='')
for path in (f for f in os.listdir(sys.argv[1]) if seems_schema(f)):
print(path, end=', ')
try:
for code in generate_for_schema(sys.argv[1], path):
generated.append(code)
except KeyError as e:
if e.args[0] == 'array':
print("(arrays unsupported)", end=' ')
else:
raise e
except Exception as e:
print('Ignoring due to exception in generator. Traceback follows:')
traceback.print_exc(e, file=sys.stderr)
continue
print('\nWriting master JSON: %s' % sys.argv[2])
open(sys.argv[2], 'w+').write(master_json_as_string(generated))
print('Writing C: %s' % sys.argv[3])
open(sys.argv[3], 'w+').write(master_c_as_string(generated))
if os.path.exists('/usr/bin/indent'):
print('Indenting generated C.')
os.system("/usr/bin/indent -kr -l120 '%s'" % sys.argv[3])
print('Done.')
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Unit tests for relay pass manager."""
import numpy as np
import pytest
import tvm
from tvm import te
from tvm import relay
from tvm.relay import ExprFunctor
from tvm.relay import Function, Call
from tvm.relay import analysis
from tvm.relay import transform as _transform
from tvm.ir import instrument as _instrument
from tvm.relay.testing import run_infer_type
import tvm.testing
def get_var_func():
shape = (5, 10)
tp = relay.TensorType(shape, "float32")
x = relay.var("x", tp)
gv = relay.GlobalVar("myAbs")
func = relay.Function([x], relay.abs(x))
return gv, func
def extract_var_func(mod, name):
var = mod.get_global_var(name)
func = mod[var]
return var, func
def update_func(func):
# Double the value of Constants and vars.
class DoubleValues(ExprFunctor):
def __init__(self):
ExprFunctor.__init__(self)
def visit_constant(self, const):
return relay.add(const, const)
def visit_var(self, var):
return relay.add(var, var)
def visit_call(self, call):
new_op = self.visit(call.op)
new_args = [self.visit(arg) for arg in call.args]
return Call(new_op, new_args, call.attrs)
def visit_global_var(self, gvar):
return gvar
def visit_op(self, op):
return op
def visit_function(self, fn):
new_body = self.visit(fn.body)
return Function(list(fn.params), new_body, fn.ret_type, fn.type_params, fn.attrs)
double_value = DoubleValues()
return double_value.visit(func)
class OptTester:
"""A helper class for testing the pass manager."""
def __init__(self, mod):
if not isinstance(mod, tvm.IRModule):
raise TypeError("mod is expected to be the type of " "tvm.IRModule")
self.mod = mod
def analysis(self):
"""Perform analysis for the current module."""
pass
@staticmethod
def transform(node, ctx=None):
"""Perform optimization on node."""
if isinstance(node, tvm.IRModule):
# Add a function to the module and return an updated module.
gv, func = get_var_func()
mod = tvm.IRModule({gv: func})
mod.update(node)
return mod
if isinstance(node, relay.Function):
return update_func(node)
raise TypeError("Found not supported node type.")
def get_rand(shape, dtype="float32"):
return tvm.nd.array(np.random.rand(*shape).astype(dtype))
def check_func(func, ref_func):
func = run_infer_type(func)
ref_func = run_infer_type(ref_func)
assert tvm.ir.structural_equal(func, ref_func)
@tvm.testing.uses_gpu
def test_module_pass():
shape = (5, 10)
dtype = "float32"
tp = relay.TensorType(shape, dtype)
x = relay.var("x", tp)
y = relay.var("y", tp)
v_add = relay.GlobalVar("myAdd")
func = relay.Function([x, y], x + y)
mod = tvm.IRModule({v_add: func})
pass_name = "module_pass_test"
opt_level = 0
opt_tester = OptTester(mod)
pass_ctx = None
@tvm.transform.module_pass(opt_level=opt_level, name=pass_name)
def transform(expr, ctx):
return opt_tester.transform(expr, ctx)
def test_pass_registration():
mod_pass = transform
assert isinstance(mod_pass, tvm.transform.ModulePass)
pass_info = mod_pass.info
assert pass_info.name == pass_name
assert pass_info.opt_level == opt_level
def test_pass_registration_no_decorator():
def direct_transform(expr, ctx):
return opt_tester.transform(expr, ctx)
mod_pass = tvm.transform.module_pass(direct_transform, opt_level=3)
assert isinstance(mod_pass, tvm.transform.ModulePass)
pass_info = mod_pass.info
assert pass_info.name == "direct_transform"
assert pass_info.opt_level == 3
def test_pass_run():
module_pass = transform
assert pass_name in str(module_pass)
updated_mod = module_pass(mod)
assert isinstance(updated_mod, tvm.IRModule)
# Check the abs function in the updated module.
v_abs, myabs = get_var_func()
new_v_add = updated_mod.get_global_var(v_abs.name_hint)
new_abs = updated_mod[new_v_add]
check_func(new_abs, myabs)
# Check the add function in the updated module.
v_abs, myabs = get_var_func()
new_v_add = updated_mod.get_global_var(v_add.name_hint)
new_add = updated_mod[new_v_add]
check_func(new_add, func)
# Check the add function in the python transformed module.
ret = opt_tester.transform(mod, pass_ctx)
transformed_v_add = ret.get_global_var(v_add.name_hint)
transformed_add = mod[transformed_v_add]
check_func(new_add, transformed_add)
# Execute the add function.
x_nd = get_rand(shape, dtype)
y_nd = get_rand(shape, dtype)
ref_res = x_nd.numpy() + y_nd.numpy()
for target, dev in tvm.testing.enabled_targets():
res1 = relay.create_executor("graph", device=dev, target=target).evaluate(new_add)(
x_nd, y_nd
)
tvm.testing.assert_allclose(res1.numpy(), ref_res, rtol=1e-5)
res2 = relay.create_executor("debug", device=dev, target=target).evaluate(new_add)(
x_nd, y_nd
)
tvm.testing.assert_allclose(res2.numpy(), ref_res, rtol=1e-5)
test_pass_registration()
test_pass_registration_no_decorator
test_pass_run()
def test_function_class_pass():
@relay.transform.function_pass(opt_level=1)
class TestReplaceFunc:
"""Simple test function to replace one argument to another."""
def __init__(self, new_func):
self.new_func = new_func
def transform_function(self, func, mod, ctx):
return self.new_func
x = relay.var("x", shape=(10, 20))
f1 = relay.Function([x], x)
f2 = relay.Function([x], relay.log(x))
fpass = TestReplaceFunc(f1)
assert fpass.info.opt_level == 1
assert fpass.info.name == "TestReplaceFunc"
mod = tvm.IRModule.from_expr(f2)
mod = fpass(mod)
# wrap in expr
mod2 = tvm.IRModule.from_expr(f1)
mod2 = tvm.relay.transform.InferType()(mod2)
assert tvm.ir.structural_equal(mod["main"], mod2["main"])
@tvm.testing.uses_gpu
def test_function_pass():
shape = (10,)
dtype = "float32"
tp = relay.TensorType(shape, dtype)
x = relay.var("x", tp)
v_log = relay.GlobalVar("myLog")
log = relay.Function([x], relay.log(x))
mod = tvm.IRModule({v_log: log})
pass_name = "function_pass_test"
opt_level = 1
opt_tester = OptTester(mod)
pass_ctx = None
@_transform.function_pass(opt_level=opt_level, name=pass_name)
def transform(expr, mod, ctx):
return opt_tester.transform(expr, ctx)
def get_ref_log():
ref_log = relay.Function([x], relay.log(relay.add(x, x)))
return ref_log
def test_pass_registration():
function_pass = transform
assert isinstance(function_pass, _transform.FunctionPass)
pass_info = function_pass.info
assert pass_info.name == pass_name
assert pass_info.opt_level == opt_level
def test_pass_registration_no_decorator():
def direct_transform(expr, ctx):
return opt_tester.transform(expr, ctx)
mod_pass = _transform.function_pass(direct_transform, opt_level=0)
assert isinstance(mod_pass, _transform.FunctionPass)
pass_info = mod_pass.info
assert pass_info.name == "direct_transform"
assert pass_info.opt_level == 0
def test_pass_run():
function_pass = transform
assert pass_name in str(function_pass)
updated_mod = function_pass(mod)
assert isinstance(updated_mod, tvm.IRModule)
# Check the log function in the updated module.
new_v_log = updated_mod.get_global_var(v_log.name_hint)
new_log = updated_mod[new_v_log]
check_func(new_log, get_ref_log())
# Check the log function in the python transformed function.
ret = opt_tester.transform(log, pass_ctx)
check_func(new_log, ret)
# Execute the add function.
x_nd = get_rand(shape, dtype)
ref_res = np.log(x_nd.numpy() * 2)
for target, dev in tvm.testing.enabled_targets():
res1 = relay.create_executor("graph", device=dev, target=target).evaluate(new_log)(x_nd)
tvm.testing.assert_allclose(res1.numpy(), ref_res, rtol=1e-5)
res2 = relay.create_executor("debug", device=dev, target=target).evaluate(new_log)(x_nd)
tvm.testing.assert_allclose(res2.numpy(), ref_res, rtol=1e-5)
test_pass_registration()
test_pass_registration_no_decorator()
test_pass_run()
def test_module_class_pass():
@tvm.transform.module_pass(opt_level=1)
class TestPipeline:
"""Simple test function to replace one argument to another."""
def __init__(self, new_mod, replace):
self.new_mod = new_mod
self.replace = replace
def transform_module(self, mod, ctx):
if self.replace:
return self.new_mod
return mod
x = relay.var("x", shape=(10, 20))
m1 = tvm.IRModule.from_expr(relay.Function([x], x))
m2 = tvm.IRModule.from_expr(relay.Function([x], relay.log(x)))
fpass = TestPipeline(m2, replace=True)
assert fpass.info.name == "TestPipeline"
mod3 = fpass(m1)
assert mod3.same_as(m2)
mod4 = TestPipeline(m2, replace=False)(m1)
assert mod4.same_as(m1)
def test_pass_info():
info = tvm.transform.PassInfo(opt_level=1, name="xyz")
assert info.opt_level == 1
assert info.name == "xyz"
@tvm.testing.uses_gpu
def test_sequential_pass():
shape = (10,)
dtype = "float32"
tp = relay.TensorType(shape, dtype)
x = relay.var("x", tp)
y = relay.var("y", tp)
v_sub = relay.GlobalVar("mySub")
sub = relay.Function([x, y], relay.subtract(x, y))
z = relay.var("z", tp)
v_log = relay.GlobalVar("myLog")
log = relay.Function([z], relay.log(z))
mod = tvm.IRModule({v_sub: sub, v_log: log})
def get_ref_log():
ref_log = relay.Function([x], relay.log(relay.add(x, x)))
return ref_log
def get_ref_sub():
ref_sub = relay.Function([x, y], relay.subtract(relay.add(x, x), relay.add(y, y)))
return ref_sub
def get_ref_abs():
shape = (5, 10)
tp = relay.TensorType(shape, "float32")
a = relay.var("a", tp)
ref_abs = relay.Function([a], relay.abs(relay.add(a, a)))
return ref_abs
# Register a module pass.
opt_tester = OptTester(mod)
pass_ctx = None
@tvm.transform.module_pass(opt_level=1)
def mod_transform(expr, ctx):
return opt_tester.transform(expr, ctx)
module_pass = mod_transform
# Register a function pass.
@_transform.function_pass(opt_level=1)
def func_transform(expr, mod, ctx):
return opt_tester.transform(expr, ctx)
function_pass = func_transform
def test_pass_registration():
passes = [module_pass, function_pass]
opt_level = 2
pass_name = "sequential"
sequential = tvm.transform.Sequential(passes=passes, opt_level=opt_level)
pass_info = sequential.info
assert pass_info.name == pass_name
assert pass_info.opt_level == opt_level
def test_no_pass():
passes = []
sequential = tvm.transform.Sequential(opt_level=1, passes=passes)
ret_mod = sequential(mod)
mod_func = ret_mod[v_sub]
check_func(sub, mod_func)
def test_only_module_pass():
passes = [module_pass]
sequential = tvm.transform.Sequential(opt_level=1, passes=passes)
with tvm.transform.PassContext(required_pass=["mod_transform"]):
ret_mod = sequential(mod)
# Check the subtract function.
sub_var, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
check_func(new_sub, sub)
# Check the abs function is added.
abs_var, abs_func = get_var_func()
abs_var, new_abs = extract_var_func(ret_mod, abs_var.name_hint)
check_func(new_abs, abs_func)
def test_only_function_pass():
# Check the subtract function.
passes = [function_pass]
sequential = tvm.transform.Sequential(opt_level=1, passes=passes)
with tvm.transform.PassContext(required_pass=["func_transform"]):
ret_mod = sequential(mod)
_, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
check_func(new_sub, get_ref_sub())
# Check the log function.
log_var, new_log = extract_var_func(ret_mod, v_log.name_hint)
check_func(new_log, get_ref_log())
def test_multiple_passes():
# Reset the current module since mod has been polluted by the previous
# function pass.
mod = tvm.IRModule({v_sub: sub, v_log: log})
passes = [module_pass, function_pass]
sequential = tvm.transform.Sequential(opt_level=1, passes=passes)
required = ["mod_transform", "func_transform"]
with tvm.transform.PassContext(required_pass=required):
ret_mod = sequential(mod)
# Check the abs function is added.
abs_var, abs_func = get_var_func()
abs_var, new_abs = extract_var_func(ret_mod, abs_var.name_hint)
check_func(new_abs, get_ref_abs())
# Check the subtract function is modified correctly.
_, new_sub = extract_var_func(ret_mod, v_sub.name_hint)
check_func(new_sub, get_ref_sub())
# Check the log function is modified correctly.
_, new_log = extract_var_func(ret_mod, v_log.name_hint)
check_func(new_log, get_ref_log())
# Execute the updated subtract function.
x_nd = get_rand(shape, dtype)
y_nd = get_rand(shape, dtype)
ref_res = np.subtract(x_nd.numpy() * 2, y_nd.numpy() * 2)
for target, dev in tvm.testing.enabled_targets():
res1 = relay.create_executor("graph", device=dev, target=target).evaluate(new_sub)(
x_nd, y_nd
)
tvm.testing.assert_allclose(res1.numpy(), ref_res, rtol=1e-5)
res2 = relay.create_executor("debug", device=dev, target=target).evaluate(new_sub)(
x_nd, y_nd
)
tvm.testing.assert_allclose(res2.numpy(), ref_res, rtol=1e-5)
# Execute the updated abs function.
x_nd = get_rand((5, 10), dtype)
ref_res = np.abs(x_nd.numpy() * 2)
for target, dev in tvm.testing.enabled_targets():
res1 = relay.create_executor("graph", device=dev, target=target).evaluate(new_abs)(x_nd)
tvm.testing.assert_allclose(res1.numpy(), ref_res, rtol=1e-5)
res2 = relay.create_executor("debug", device=dev, target=target).evaluate(new_abs)(x_nd)
tvm.testing.assert_allclose(res2.numpy(), ref_res, rtol=1e-5)
test_pass_registration()
test_no_pass()
test_only_module_pass()
test_only_function_pass()
test_multiple_passes()
def test_sequential_with_scoping():
shape = (1, 2, 3)
c_data = np.array(shape).astype("float32")
tp = relay.TensorType(shape, "float32")
def before():
c = relay.const(c_data)
x = relay.var("x", tp)
y = relay.add(c, c)
y = relay.multiply(y, relay.const(2, "float32"))
y = relay.add(x, y)
z = relay.add(y, c)
z1 = relay.add(y, c)
z2 = relay.add(z, z1)
return relay.Function([x], z2)
def expected():
x = relay.var("x", tp)
c_folded = (c_data + c_data) * 2
y = relay.add(x, relay.const(c_folded))
z = relay.add(y, relay.const(c_data))
z1 = relay.add(z, z)
return relay.Function([x], z1)
seq = tvm.transform.Sequential(
[
relay.transform.InferType(),
relay.transform.FoldConstant(),
relay.transform.EliminateCommonSubexpr(),
relay.transform.AlterOpLayout(),
]
)
mod = tvm.IRModule({"main": before()})
with tvm.transform.PassContext(opt_level=3):
with tvm.target.Target("llvm"):
mod = seq(mod)
zz = mod["main"]
zexpected = run_infer_type(expected())
assert tvm.ir.structural_equal(zz, zexpected)
def test_nested_sequential_with_scoping():
def before():
x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
w = relay.var("w", shape=(32, 16, 3, 3), dtype="float32")
y = relay.nn.conv2d(x, w, padding=(1, 1))
y = relay.reshape(y, newshape=(1, 16, -1))
y = relay.reshape(y, newshape=(4, 8, -1, 16))
y = relay.reverse_reshape(y, newshape=(32, 0, -1))
return tvm.IRModule.from_expr(y)
def expected():
x = relay.var("x", shape=(1, 16, 16, 16), dtype="float32")
w = relay.var("w", shape=(32, 16, 3, 3), dtype="float32")
y = relay.nn.conv2d(x, w, padding=(1, 1))
y = relay.reshape(y, newshape=(32, 16, 16))
return tvm.IRModule.from_expr(y)
z = before()
passes = [
tvm.transform.Sequential([relay.transform.SimplifyExpr()]),
]
with tvm.transform.PassContext(opt_level=1):
zz = tvm.transform.Sequential(passes)(z)
expected = relay.transform.InferType()(expected())
assert tvm.ir.structural_equal(zz, expected)
def test_print_ir(capfd):
shape = (1, 2, 3)
tp = relay.TensorType(shape, "float32")
x = relay.var("x", tp)
y = relay.add(x, x)
y = relay.multiply(y, relay.const(2, "float32"))
func = relay.Function([x], y)
seq = tvm.transform.Sequential(
[
relay.transform.InferType(),
relay.transform.FoldConstant(),
tvm.transform.PrintIR(),
relay.transform.DeadCodeElimination(),
]
)
mod = tvm.IRModule({"main": func})
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
out = capfd.readouterr().err
assert "PrintIR" in out
assert "multiply" in out
@tvm.instrument.pass_instrument
class PassCounter:
def __init__(self):
# Just setting a garbage value to test set_up callback
self.counts = 1234
def enter_pass_ctx(self):
self.counts = 0
def exit_pass_ctx(self):
self.counts = 0
def run_before_pass(self, module, info):
self.counts += 1
def get_counts(self):
return self.counts
def test_print_debug_callback():
shape = (1, 2, 3)
tp = relay.TensorType(shape, "float32")
x = relay.var("x", tp)
y = relay.add(x, x)
y = relay.multiply(y, relay.const(2, "float32"))
func = relay.Function([x], y)
seq = tvm.transform.Sequential(
[
relay.transform.InferType(),
relay.transform.FoldConstant(),
relay.transform.DeadCodeElimination(),
]
)
mod = tvm.IRModule({"main": func})
pass_counter = PassCounter()
with tvm.transform.PassContext(opt_level=3, instruments=[pass_counter]):
# Should be reseted when entering pass context
assert pass_counter.get_counts() == 0
mod = seq(mod)
# TODO(@jroesch): when we remove new fn pass behavior we need to remove
# change this back to match correct behavior
assert pass_counter.get_counts() == 6
# Should be cleanned up after exiting pass context
assert pass_counter.get_counts() == 0
if __name__ == "__main__":
pytest.main()
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import os.path
import re
from json_parse import OrderedDict
class ParseException(Exception):
"""Thrown when data in the model is invalid.
"""
def __init__(self, parent, message):
hierarchy = _GetModelHierarchy(parent)
hierarchy.append(message)
Exception.__init__(
self, 'Model parse exception at:\n' + '\n'.join(hierarchy))
class Model(object):
"""Model of all namespaces that comprise an API.
Properties:
- |namespaces| a map of a namespace name to its model.Namespace
"""
def __init__(self):
self.namespaces = {}
def AddNamespace(self, json, source_file, include_compiler_options=False):
"""Add a namespace's json to the model and returns the namespace.
"""
namespace = Namespace(json,
source_file,
include_compiler_options=include_compiler_options)
self.namespaces[namespace.name] = namespace
return namespace
class Namespace(object):
"""An API namespace.
Properties:
- |name| the name of the namespace
- |unix_name| the unix_name of the namespace
- |source_file| the file that contained the namespace definition
- |source_file_dir| the directory component of |source_file|
- |source_file_filename| the filename component of |source_file|
- |platforms| if not None, the list of platforms that the namespace is
available to
- |types| a map of type names to their model.Type
- |functions| a map of function names to their model.Function
- |events| a map of event names to their model.Function
- |properties| a map of property names to their model.Property
- |compiler_options| the compiler_options dict, only present if
|include_compiler_options| is True
"""
def __init__(self, json, source_file, include_compiler_options=False):
self.name = json['namespace']
self.unix_name = UnixName(self.name)
self.source_file = source_file
self.source_file_dir, self.source_file_filename = os.path.split(source_file)
self.parent = None
self.platforms = _GetPlatforms(json)
toplevel_origin = Origin(from_client=True, from_json=True)
self.types = _GetTypes(self, json, self, toplevel_origin)
self.functions = _GetFunctions(self, json, self)
self.events = _GetEvents(self, json, self)
self.properties = _GetProperties(self, json, self, toplevel_origin)
if include_compiler_options:
self.compiler_options = json.get('compiler_options', {})
class Origin(object):
"""Stores the possible origin of model object as a pair of bools. These are:
|from_client| indicating that instances can originate from users of
generated code (for example, function results), or
|from_json| indicating that instances can originate from the JSON (for
example, function parameters)
It is possible for model objects to originate from both the client and json,
for example Types defined in the top-level schema, in which case both
|from_client| and |from_json| would be True.
"""
def __init__(self, from_client=False, from_json=False):
if not from_client and not from_json:
raise ValueError('One of from_client or from_json must be true')
self.from_client = from_client
self.from_json = from_json
class Type(object):
"""A Type defined in the json.
Properties:
- |name| the type name
- |namespace| the Type's namespace
- |description| the description of the type (if provided)
- |properties| a map of property unix_names to their model.Property
- |functions| a map of function names to their model.Function
- |events| a map of event names to their model.Event
- |origin| the Origin of the type
- |property_type| the PropertyType of this Type
- |item_type| if this is an array, the type of items in the array
- |simple_name| the name of this Type without a namespace
- |additional_properties| the type of the additional properties, if any is
specified
"""
def __init__(self,
parent,
name,
json,
namespace,
origin):
self.name = name
self.namespace = namespace
self.simple_name = _StripNamespace(self.name, namespace)
self.unix_name = UnixName(self.name)
self.description = json.get('description', None)
self.origin = origin
self.parent = parent
self.instance_of = json.get('isInstanceOf', None)
# TODO(kalman): Only objects need functions/events/properties, but callers
# assume that all types have them. Fix this.
self.functions = _GetFunctions(self, json, namespace)
self.events = _GetEvents(self, json, namespace)
self.properties = _GetProperties(self, json, namespace, origin)
json_type = json.get('type', None)
if json_type == 'array':
self.property_type = PropertyType.ARRAY
self.item_type = Type(
self, '%sType' % name, json['items'], namespace, origin)
elif '$ref' in json:
self.property_type = PropertyType.REF
self.ref_type = json['$ref']
elif 'enum' in json and json_type == 'string':
self.property_type = PropertyType.ENUM
self.enum_values = [value for value in json['enum']]
elif json_type == 'any':
self.property_type = PropertyType.ANY
elif json_type == 'binary':
self.property_type = PropertyType.BINARY
elif json_type == 'boolean':
self.property_type = PropertyType.BOOLEAN
elif json_type == 'integer':
self.property_type = PropertyType.INTEGER
elif (json_type == 'double' or
json_type == 'number'):
self.property_type = PropertyType.DOUBLE
elif json_type == 'string':
self.property_type = PropertyType.STRING
elif 'choices' in json:
self.property_type = PropertyType.CHOICES
self.choices = [Type(self,
# The name of the choice type - there had better be
# either a type or a $ref specified for the choice.
json.get('type', json.get('$ref')),
json,
namespace,
origin)
for json in json['choices']]
elif json_type == 'object':
if not (
'properties' in json or
'additionalProperties' in json or
'functions' in json or
'events' in json):
raise ParseException(self, name + " has no properties or functions")
self.property_type = PropertyType.OBJECT
additional_properties_json = json.get('additionalProperties', None)
if additional_properties_json is not None:
self.additional_properties = Type(self,
'additionalProperties',
additional_properties_json,
namespace,
origin)
else:
self.additional_properties = None
elif json_type == 'function':
self.property_type = PropertyType.FUNCTION
# Sometimes we might have an unnamed function, e.g. if it's a property
# of an object. Use the name of the property in that case.
function_name = json.get('name', name)
self.function = Function(self, function_name, json, namespace, origin)
else:
raise ParseException(self, 'Unsupported JSON type %s' % json_type)
class Function(object):
"""A Function defined in the API.
Properties:
- |name| the function name
- |platforms| if not None, the list of platforms that the function is
available to
- |params| a list of parameters to the function (order matters). A separate
parameter is used for each choice of a 'choices' parameter
- |description| a description of the function (if provided)
- |callback| the callback parameter to the function. There should be exactly
one
- |optional| whether the Function is "optional"; this only makes sense to be
present when the Function is representing a callback property
- |simple_name| the name of this Function without a namespace
- |returns| the return type of the function; None if the function does not
return a value
"""
def __init__(self,
parent,
name,
json,
namespace,
origin):
self.name = name
self.simple_name = _StripNamespace(self.name, namespace)
self.platforms = _GetPlatforms(json)
self.params = []
self.description = json.get('description')
self.callback = None
self.optional = json.get('optional', False)
self.parent = parent
self.nocompile = json.get('nocompile')
options = json.get('options', {})
self.conditions = options.get('conditions', [])
self.actions = options.get('actions', [])
self.supports_listeners = options.get('supportsListeners', True)
self.supports_rules = options.get('supportsRules', False)
def GeneratePropertyFromParam(p):
return Property(self, p['name'], p, namespace, origin)
self.filters = [GeneratePropertyFromParam(filter)
for filter in json.get('filters', [])]
callback_param = None
for param in json.get('parameters', []):
if param.get('type') == 'function':
if callback_param:
# No ParseException because the webstore has this.
# Instead, pretend all intermediate callbacks are properties.
self.params.append(GeneratePropertyFromParam(callback_param))
callback_param = param
else:
self.params.append(GeneratePropertyFromParam(param))
if callback_param:
self.callback = Function(self,
callback_param['name'],
callback_param,
namespace,
Origin(from_client=True))
self.returns = None
if 'returns' in json:
self.returns = Type(self,
'%sReturnType' % name,
json['returns'],
namespace,
origin)
class Property(object):
"""A property of a type OR a parameter to a function.
Properties:
- |name| name of the property as in the json. This shouldn't change since
it is the key used to access DictionaryValues
- |unix_name| the unix_style_name of the property. Used as variable name
- |optional| a boolean representing whether the property is optional
- |description| a description of the property (if provided)
- |type_| the model.Type of this property
- |simple_name| the name of this Property without a namespace
"""
def __init__(self, parent, name, json, namespace, origin):
"""Creates a Property from JSON.
"""
self.parent = parent
self.name = name
self._unix_name = UnixName(self.name)
self._unix_name_used = False
self.origin = origin
self.simple_name = _StripNamespace(self.name, namespace)
self.description = json.get('description', None)
self.optional = json.get('optional', None)
self.instance_of = json.get('isInstanceOf', None)
# HACK: only support very specific value types.
is_allowed_value = (
'$ref' not in json and
('type' not in json or json['type'] == 'integer'
or json['type'] == 'string'))
self.value = None
if 'value' in json and is_allowed_value:
self.value = json['value']
if 'type' not in json:
# Sometimes the type of the value is left out, and we need to figure
# it out for ourselves.
if isinstance(self.value, int):
json['type'] = 'integer'
elif isinstance(self.value, basestring):
json['type'] = 'string'
else:
# TODO(kalman): support more types as necessary.
raise ParseException(
parent,
'"%s" is not a supported type for "value"' % type(self.value))
self.type_ = Type(parent, name, json, namespace, origin)
def GetUnixName(self):
"""Gets the property's unix_name. Raises AttributeError if not set.
"""
if not self._unix_name:
raise AttributeError('No unix_name set on %s' % self.name)
self._unix_name_used = True
return self._unix_name
def SetUnixName(self, unix_name):
"""Set the property's unix_name. Raises AttributeError if the unix_name has
already been used (GetUnixName has been called).
"""
if unix_name == self._unix_name:
return
if self._unix_name_used:
raise AttributeError(
'Cannot set the unix_name on %s; '
'it is already used elsewhere as %s' %
(self.name, self._unix_name))
self._unix_name = unix_name
unix_name = property(GetUnixName, SetUnixName)
class _Enum(object):
"""Superclass for enum types with a "name" field, setting up repr/eq/ne.
Enums need to do this so that equality/non-equality work over pickling.
"""
@staticmethod
def GetAll(cls):
"""Yields all _Enum objects declared in |cls|.
"""
for prop_key in dir(cls):
prop_value = getattr(cls, prop_key)
if isinstance(prop_value, _Enum):
yield prop_value
def __init__(self, name):
self.name = name
def __repr(self):
return self.name
def __eq__(self, other):
return type(other) == type(self) and other.name == self.name
def __ne__(self, other):
return not (self == other)
class _PropertyTypeInfo(_Enum):
def __init__(self, is_fundamental, name):
_Enum.__init__(self, name)
self.is_fundamental = is_fundamental
class PropertyType(object):
"""Enum of different types of properties/parameters.
"""
INTEGER = _PropertyTypeInfo(True, "integer")
INT64 = _PropertyTypeInfo(True, "int64")
DOUBLE = _PropertyTypeInfo(True, "double")
BOOLEAN = _PropertyTypeInfo(True, "boolean")
STRING = _PropertyTypeInfo(True, "string")
ENUM = _PropertyTypeInfo(False, "enum")
ARRAY = _PropertyTypeInfo(False, "array")
REF = _PropertyTypeInfo(False, "ref")
CHOICES = _PropertyTypeInfo(False, "choices")
OBJECT = _PropertyTypeInfo(False, "object")
FUNCTION = _PropertyTypeInfo(False, "function")
BINARY = _PropertyTypeInfo(False, "binary")
ANY = _PropertyTypeInfo(False, "any")
def UnixName(name):
"""Returns the unix_style name for a given lowerCamelCase string.
"""
# First replace any lowerUpper patterns with lower_Upper.
s1 = re.sub('([a-z])([A-Z])', r'\1_\2', name)
# Now replace any ACMEWidgets patterns with ACME_Widgets
s2 = re.sub('([A-Z]+)([A-Z][a-z])', r'\1_\2', s1)
# Finally, replace any remaining periods, and make lowercase.
return s2.replace('.', '_').lower()
def _StripNamespace(name, namespace):
if name.startswith(namespace.name + '.'):
return name[len(namespace.name + '.'):]
return name
def _GetModelHierarchy(entity):
"""Returns the hierarchy of the given model entity."""
hierarchy = []
while entity is not None:
hierarchy.append(getattr(entity, 'name', repr(entity)))
if isinstance(entity, Namespace):
hierarchy.insert(0, ' in %s' % entity.source_file)
entity = getattr(entity, 'parent', None)
hierarchy.reverse()
return hierarchy
def _GetTypes(parent, json, namespace, origin):
"""Creates Type objects extracted from |json|.
"""
types = OrderedDict()
for type_json in json.get('types', []):
type_ = Type(parent, type_json['id'], type_json, namespace, origin)
types[type_.name] = type_
return types
def _GetFunctions(parent, json, namespace):
"""Creates Function objects extracted from |json|.
"""
functions = OrderedDict()
for function_json in json.get('functions', []):
function = Function(parent,
function_json['name'],
function_json,
namespace,
Origin(from_json=True))
functions[function.name] = function
return functions
def _GetEvents(parent, json, namespace):
"""Creates Function objects generated from the events in |json|.
"""
events = OrderedDict()
for event_json in json.get('events', []):
event = Function(parent,
event_json['name'],
event_json,
namespace,
Origin(from_client=True))
events[event.name] = event
return events
def _GetProperties(parent, json, namespace, origin):
"""Generates Property objects extracted from |json|.
"""
properties = OrderedDict()
for name, property_json in json.get('properties', {}).items():
properties[name] = Property(parent, name, property_json, namespace, origin)
return properties
class _PlatformInfo(_Enum):
def __init__(self, name):
_Enum.__init__(self, name)
class Platforms(object):
"""Enum of the possible platforms.
"""
CHROMEOS = _PlatformInfo("chromeos")
CHROMEOS_TOUCH = _PlatformInfo("chromeos_touch")
LINUX = _PlatformInfo("linux")
MAC = _PlatformInfo("mac")
WIN = _PlatformInfo("win")
def _GetPlatforms(json):
if 'platforms' not in json:
return None
platforms = []
for platform_name in json['platforms']:
for platform_enum in _Enum.GetAll(Platforms):
if platform_name == platform_enum.name:
platforms.append(platform_enum)
break
return platforms
|
|
import os
import mimetypes
try:
from io import StringIO
except ImportError:
from io import StringIO # noqa
from django.conf import settings
from django.core.files.base import File
from django.core.files.storage import Storage
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.utils.encoding import smart_str
try:
from boto.s3.connection import S3Connection, SubdomainCallingFormat
from boto.exception import S3ResponseError
from boto.s3.key import Key
except ImportError:
raise ImproperlyConfigured("Could not load Boto's S3 bindings.\n"
"See https://github.com/boto/boto")
ACCESS_KEY_NAME = getattr(settings, 'AWS_S3_ACCESS_KEY_ID', getattr(settings, 'AWS_ACCESS_KEY_ID', None))
SECRET_KEY_NAME = getattr(settings, 'AWS_S3_SECRET_ACCESS_KEY', getattr(settings, 'AWS_SECRET_ACCESS_KEY', None))
HEADERS = getattr(settings, 'AWS_HEADERS', {})
STORAGE_BUCKET_NAME = getattr(settings, 'AWS_STORAGE_BUCKET_NAME', None)
AUTO_CREATE_BUCKET = getattr(settings, 'AWS_AUTO_CREATE_BUCKET', False)
DEFAULT_ACL = getattr(settings, 'AWS_DEFAULT_ACL', 'public-read')
BUCKET_ACL = getattr(settings, 'AWS_BUCKET_ACL', DEFAULT_ACL)
QUERYSTRING_AUTH = getattr(settings, 'AWS_QUERYSTRING_AUTH', True)
QUERYSTRING_EXPIRE = getattr(settings, 'AWS_QUERYSTRING_EXPIRE', 3600)
REDUCED_REDUNDANCY = getattr(settings, 'AWS_REDUCED_REDUNDANCY', False)
LOCATION = getattr(settings, 'AWS_LOCATION', '')
ENCRYPTION = getattr(settings, 'AWS_S3_ENCRYPTION', False)
CUSTOM_DOMAIN = getattr(settings, 'AWS_S3_CUSTOM_DOMAIN', None)
CALLING_FORMAT = getattr(settings, 'AWS_S3_CALLING_FORMAT',
SubdomainCallingFormat())
SECURE_URLS = getattr(settings, 'AWS_S3_SECURE_URLS', True)
FILE_NAME_CHARSET = getattr(settings, 'AWS_S3_FILE_NAME_CHARSET', 'utf-8')
FILE_OVERWRITE = getattr(settings, 'AWS_S3_FILE_OVERWRITE', True)
FILE_BUFFER_SIZE = getattr(settings, 'AWS_S3_FILE_BUFFER_SIZE', 5242880)
IS_GZIPPED = getattr(settings, 'AWS_IS_GZIPPED', False)
PRELOAD_METADATA = getattr(settings, 'AWS_PRELOAD_METADATA', False)
GZIP_CONTENT_TYPES = getattr(settings, 'GZIP_CONTENT_TYPES', (
'text/css',
'application/javascript',
'application/x-javascript',
))
URL_PROTOCOL = getattr(settings, 'AWS_S3_URL_PROTOCOL', 'http:')
# Backward-compatibility: given the anteriority of the SECURE_URL setting
# we fall back to https if specified in order to avoid the construction
# of unsecure urls.
if SECURE_URLS:
URL_PROTOCOL = 'https:'
if IS_GZIPPED:
from gzip import GzipFile
def safe_join(base, *paths):
"""
A version of django.utils._os.safe_join for S3 paths.
Joins one or more path components to the base path component
intelligently. Returns a normalized version of the final path.
The final path must be located inside of the base path component
(otherwise a ValueError is raised).
Paths outside the base path indicate a possible security
sensitive operation.
"""
from urllib.parse import urljoin
base_path = base
base_path = base_path.rstrip('/')
paths = [p for p in paths]
final_path = base_path
for path in paths:
final_path = urljoin(final_path.rstrip('/') + "/", path.rstrip("/"))
# Ensure final_path starts with base_path and that the next character after
# the final path is '/' (or nothing, in which case final_path must be
# equal to base_path).
base_path_len = len(base_path)
if (not final_path.startswith(base_path) or
final_path[base_path_len:base_path_len + 1] not in ('', '/')):
raise ValueError('the joined path is located outside of the base path'
' component')
return final_path.lstrip('/')
class S3BotoStorage(Storage):
"""
Amazon Simple Storage Service using Boto
This storage backend supports opening files in read or write
mode and supports streaming(buffering) data in chunks to S3
when writing.
"""
connection_class = S3Connection
connection_response_error = S3ResponseError
def __init__(self, bucket=STORAGE_BUCKET_NAME, access_key=None,
secret_key=None, bucket_acl=BUCKET_ACL, acl=DEFAULT_ACL,
headers=HEADERS, gzip=IS_GZIPPED,
gzip_content_types=GZIP_CONTENT_TYPES,
querystring_auth=QUERYSTRING_AUTH,
querystring_expire=QUERYSTRING_EXPIRE,
reduced_redundancy=REDUCED_REDUNDANCY,
encryption=ENCRYPTION,
custom_domain=CUSTOM_DOMAIN,
secure_urls=SECURE_URLS,
url_protocol=URL_PROTOCOL,
location=LOCATION,
file_name_charset=FILE_NAME_CHARSET,
preload_metadata=PRELOAD_METADATA,
calling_format=CALLING_FORMAT):
self.bucket_acl = bucket_acl
self.bucket_name = bucket
self.acl = acl
self.headers = headers
self.preload_metadata = preload_metadata
self.gzip = gzip
self.gzip_content_types = gzip_content_types
self.querystring_auth = querystring_auth
self.querystring_expire = querystring_expire
self.reduced_redundancy = reduced_redundancy
self.encryption = encryption
self.custom_domain = custom_domain
self.secure_urls = secure_urls
self.url_protocol = url_protocol
self.location = location or ''
self.location = self.location.lstrip('/')
self.file_name_charset = file_name_charset
self.calling_format = calling_format
self._entries = {}
if not access_key and not secret_key:
access_key, secret_key = self._get_access_keys()
self.connection = self.connection_class(access_key, secret_key,
calling_format=self.calling_format, host="s3-us-west-2.amazonaws.com")
@property
def bucket(self):
"""
Get the current bucket. If there is no current bucket object
create it.
"""
if not hasattr(self, '_bucket'):
self._bucket = self._get_or_create_bucket(self.bucket_name)
return self._bucket
@property
def entries(self):
"""
Get the locally cached files for the bucket.
"""
if self.preload_metadata and not self._entries:
self._entries = dict((self._decode_name(entry.key), entry)
for entry in self.bucket.list())
return self._entries
def _get_access_keys(self):
"""
Gets the access keys to use when accessing S3. If none
are provided to the class in the constructor or in the
settings then get them from the environment variables.
"""
access_key = ACCESS_KEY_NAME
secret_key = SECRET_KEY_NAME
if (access_key or secret_key) and (not access_key or not secret_key):
# TODO: this seems to be broken
access_key = os.environ.get(ACCESS_KEY_NAME)
secret_key = os.environ.get(SECRET_KEY_NAME)
if access_key and secret_key:
# Both were provided, so use them
return access_key, secret_key
return None, None
def _get_or_create_bucket(self, name):
"""Retrieves a bucket if it exists, otherwise creates it."""
try:
return self.connection.get_bucket(name,
validate=AUTO_CREATE_BUCKET)
except self.connection_response_error:
if AUTO_CREATE_BUCKET:
bucket = self.connection.create_bucket(name)
bucket.set_acl(self.bucket_acl)
return bucket
raise ImproperlyConfigured("Bucket specified by "
"AWS_STORAGE_BUCKET_NAME does not exist. "
"Buckets can be automatically created by setting "
"AWS_AUTO_CREATE_BUCKET=True")
def _clean_name(self, name):
"""
Cleans the name so that Windows style paths work
"""
# Useful for windows' paths
return os.path.normpath(name).replace('\\', '/')
def _normalize_name(self, name):
"""
Normalizes the name so that paths like /path/to/ignored/../something.txt
work. We check to make sure that the path pointed to is not outside
the directory specified by the LOCATION setting.
"""
try:
return safe_join(self.location, name)
except ValueError:
raise SuspiciousOperation("Attempted access to '%s' denied." %
name)
def _encode_name(self, name):
return smart_str(name, encoding=self.file_name_charset)
def _decode_name(self, name):
return name
def _compress_content(self, content):
"""Gzip a given string content."""
zbuf = StringIO()
zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
try:
zfile.write(content.read())
finally:
zfile.close()
content.file = zbuf
content.seek(0)
return content
def _open(self, name, mode='rb'):
name = self._normalize_name(self._clean_name(name))
f = S3BotoStorageFile(name, mode, self)
if not f.key:
raise IOError('File does not exist: %s' % name)
return f
def _save(self, name, content):
cleaned_name = self._clean_name(name)
name = self._normalize_name(cleaned_name)
headers = self.headers.copy()
content_type = getattr(content, 'content_type',
mimetypes.guess_type(name)[0] or Key.DefaultContentType)
# setting the content_type in the key object is not enough.
self.headers.update({'Content-Type': content_type})
if self.gzip and content_type in self.gzip_content_types:
content = self._compress_content(content)
headers.update({'Content-Encoding': 'gzip'})
content.name = cleaned_name
encoded_name = self._encode_name(name)
key = self.bucket.get_key(encoded_name)
if not key:
key = self.bucket.new_key(encoded_name)
if self.preload_metadata:
self._entries[encoded_name] = key
key.set_metadata('Content-Type', content_type)
# only pass backwards incompatible arguments if they vary from the default
kwargs = {}
if self.encryption:
kwargs['encrypt_key'] = self.encryption
key.set_contents_from_file(content, headers=headers, policy=self.acl,
reduced_redundancy=self.reduced_redundancy,
rewind=True, **kwargs)
return cleaned_name
def delete(self, name):
name = self._normalize_name(self._clean_name(name))
self.bucket.delete_key(self._encode_name(name))
def exists(self, name):
name = self._normalize_name(self._clean_name(name))
if self.entries:
return name in self.entries
k = self.bucket.new_key(self._encode_name(name))
return k.exists()
def listdir(self, name):
name = self._normalize_name(self._clean_name(name))
# for the bucket.list and logic below name needs to end in /
# But for the root path "" we leave it as an empty string
if name:
name += '/'
dirlist = self.bucket.list(self._encode_name(name))
files = []
dirs = set()
base_parts = name.split("/")[:-1]
for item in dirlist:
parts = item.name.split("/")
parts = parts[len(base_parts):]
if len(parts) == 1:
# File
files.append(parts[0])
elif len(parts) > 1:
# Directory
dirs.add(parts[0])
return list(dirs), files
def size(self, name):
name = self._normalize_name(self._clean_name(name))
if self.entries:
entry = self.entries.get(name)
if entry:
return entry.size
return 0
return self.bucket.get_key(self._encode_name(name)).size
def modified_time(self, name):
try:
from dateutil import parser, tz
except ImportError:
raise NotImplementedError()
name = self._normalize_name(self._clean_name(name))
entry = self.entries.get(name)
# only call self.bucket.get_key() if the key is not found
# in the preloaded metadata.
if entry is None:
entry = self.bucket.get_key(self._encode_name(name))
# convert to string to date
last_modified_date = parser.parse(entry.last_modified)
# if the date has no timzone, assume UTC
if last_modified_date.tzinfo == None:
last_modified_date = last_modified_date.replace(tzinfo=tz.tzutc())
# convert date to local time w/o timezone
timezone = tz.gettz(settings.TIME_ZONE)
return last_modified_date.astimezone(timezone).replace(tzinfo=None)
def url(self, name):
name = self._normalize_name(self._clean_name(name))
if self.custom_domain:
return "%s//%s/%s" % (self.url_protocol,
self.custom_domain, name)
return self.connection.generate_url(self.querystring_expire,
method='GET', bucket=self.bucket.name, key=self._encode_name(name),
query_auth=self.querystring_auth, force_http=not self.secure_urls)
def get_available_name(self, name):
""" Overwrite existing file with the same name. """
if FILE_OVERWRITE:
name = self._clean_name(name)
return name
return super(S3BotoStorage, self).get_available_name(name)
class S3BotoStorageFile(File):
"""
The default file object used by the S3BotoStorage backend.
This file implements file streaming using boto's multipart
uploading functionality. The file can be opened in read or
write mode.
This class extends Django's File class. However, the contained
data is only the data contained in the current buffer. So you
should not access the contained file object directly. You should
access the data via this class.
Warning: This file *must* be closed using the close() method in
order to properly write the file to S3. Be sure to close the file
in your application.
"""
# TODO: Read/Write (rw) mode may be a bit undefined at the moment. Needs testing.
# TODO: When Django drops support for Python 2.5, rewrite to use the
# BufferedIO streams in the Python 2.6 io module.
def __init__(self, name, mode, storage, buffer_size=FILE_BUFFER_SIZE):
self._storage = storage
self.name = name[len(self._storage.location):].lstrip('/')
self._mode = mode
self.key = storage.bucket.get_key(self._storage._encode_name(name))
if not self.key and 'w' in mode:
self.key = storage.bucket.new_key(storage._encode_name(name))
self._is_dirty = False
self._file = None
self._multipart = None
# 5 MB is the minimum part size (if there is more than one part).
# Amazon allows up to 10,000 parts. The default supports uploads
# up to roughly 50 GB. Increase the part size to accommodate
# for files larger than this.
self._write_buffer_size = buffer_size
self._write_counter = 0
@property
def size(self):
return self.key.size
def _get_file(self):
if self._file is None:
self._file = StringIO()
if 'r' in self._mode:
self._is_dirty = False
self.key.get_contents_to_file(self._file)
self._file.seek(0)
if self._storage.gzip and self.key.content_encoding == 'gzip':
self._file = GzipFile(mode=self._mode, fileobj=self._file)
return self._file
def _set_file(self, value):
self._file = value
file = property(_get_file, _set_file)
def read(self, *args, **kwargs):
if 'r' not in self._mode:
raise AttributeError("File was not opened in read mode.")
return super(S3BotoStorageFile, self).read(*args, **kwargs)
def write(self, *args, **kwargs):
if 'w' not in self._mode:
raise AttributeError("File was not opened in write mode.")
self._is_dirty = True
if self._multipart is None:
provider = self.key.bucket.connection.provider
upload_headers = {
provider.acl_header: self._storage.acl
}
upload_headers.update(self._storage.headers)
self._multipart = self._storage.bucket.initiate_multipart_upload(
self.key.name,
headers=upload_headers,
reduced_redundancy=self._storage.reduced_redundancy
)
if self._write_buffer_size <= self._buffer_file_size:
self._flush_write_buffer()
return super(S3BotoStorageFile, self).write(*args, **kwargs)
@property
def _buffer_file_size(self):
pos = self.file.tell()
self.file.seek(0, os.SEEK_END)
length = self.file.tell()
self.file.seek(pos)
return length
def _flush_write_buffer(self):
"""
Flushes the write buffer.
"""
if self._buffer_file_size:
self._write_counter += 1
self.file.seek(0)
self._multipart.upload_part_from_file(
self.file,
self._write_counter,
headers=self._storage.headers
)
self.file.close()
self._file = None
def close(self):
if self._is_dirty:
self._flush_write_buffer()
self._multipart.complete_upload()
else:
if not self._multipart is None:
self._multipart.cancel_upload()
self.key.close()
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This sphinx extension adds a tools to simplify generating the API
documentation for Astropy packages and affiliated packages.
.. _automodapi:
========================
automodapi directive
========================
This directive takes a single argument that must be a module or package. It
will produce a block of documentation that includes the docstring for the
package, an :ref:`automodsumm` directive, and an :ref:`automod-diagram` if
there are any classes in the module. If only the main docstring of the
module/package is desired in the documentation, use `automodule`_ instead of
`automodapi`_.
It accepts the following options:
* ``:no-inheritance-diagram:``
If present, the inheritance diagram will not be shown even if
the module/package has classes.
* ``:skip: str``
This option results in the
specified object being skipped, that is the object will *not* be
included in the generated documentation. This option may appear
any number of times to skip multiple objects.
* ``:no-main-docstr:``
If present, the docstring for the module/package will not be generated.
The function and class tables will still be used, however.
* ``:headings: str``
Specifies the characters (in one string) used as the heading
levels used for the generated section. This must have at least 2
characters (any after 2 will be ignored). This also *must* match
the rest of the documentation on this page for sphinx to be
happy. Defaults to "-^", which matches the convention used for
Python's documentation, assuming the automodapi call is inside a
top-level section (which usually uses '=').
* ``:no-heading:``
If specified do not create a top level heading for the section.
That is, do not create a title heading with text like "packagename
Package". The actual docstring for the package/module will still be
shown, though, unless ``:no-main-docstr:`` is given.
* ``:allowed-package-names: str``
Specifies the packages that functions/classes documented here are
allowed to be from, as comma-separated list of package names. If not
given, only objects that are actually in a subpackage of the package
currently being documented are included.
This extension also adds two sphinx configuration options:
* ``automodapi_toctreedirnm``
This must be a string that specifies the name of the directory the
automodsumm generated documentation ends up in. This directory path should
be relative to the documentation root (e.g., same place as ``index.rst``).
Defaults to ``'api'``.
* ``automodapi_writereprocessed``
Should be a bool, and if `True`, will cause `automodapi`_ to write files
with any `automodapi`_ sections replaced with the content Sphinx
processes after `automodapi`_ has run. The output files are not
actually used by sphinx, so this option is only for figuring out the
cause of sphinx warnings or other debugging. Defaults to `False`.
.. _automodule: http://sphinx-doc.org/latest/ext/autodoc.html?highlight=automodule#directive-automodule
"""
# Implementation note:
# The 'automodapi' directive is not actually implemented as a docutils
# directive. Instead, this extension searches for the 'automodapi' text in
# all sphinx documents, and replaces it where necessary from a template built
# into this extension. This is necessary because automodsumm (and autosummary)
# use the "builder-inited" event, which comes before the directives are
# actually built.
import inspect
import os
import re
import sys
from .utils import find_mod_objs
automod_templ_modheader = """
{modname} {pkgormod}
{modhds}{pkgormodhds}
{automoduleline}
"""
automod_templ_classes = """
Classes
{clshds}
.. automodsumm:: {modname}
:classes-only:
{clsfuncoptions}
"""
automod_templ_funcs = """
Functions
{funchds}
.. automodsumm:: {modname}
:functions-only:
{clsfuncoptions}
"""
automod_templ_inh = """
Class Inheritance Diagram
{clsinhsechds}
.. automod-diagram:: {modname}
:private-bases:
:parts: 1
{allowedpkgnms}
"""
_automodapirex = re.compile(r'^(?:\s*\.\.\s+automodapi::\s*)([A-Za-z0-9_.]+)'
r'\s*$((?:\n\s+:[a-zA-Z_\-]+:.*$)*)',
flags=re.MULTILINE)
# the last group of the above regex is intended to go into finall with the below
_automodapiargsrex = re.compile(r':([a-zA-Z_\-]+):(.*)$', flags=re.MULTILINE)
def automodapi_replace(sourcestr, app, dotoctree=True, docname=None,
warnings=True):
"""
Replaces `sourcestr`'s entries of ".. automdapi::" with the
automodapi template form based on provided options.
This is used with the sphinx event 'source-read' to replace
`automodapi`_ entries before sphinx actually processes them, as
automodsumm needs the code to be present to generate stub
documentation.
Parameters
----------
sourcestr : str
The string with sphinx source to be checked for automodapi
replacement.
app : `sphinx.application.Application`
The sphinx application.
dotoctree : bool
If `True`, a ":toctree:" option will be added in the "..
automodsumm::" sections of the template, pointing to the
appropriate "generated" directory based on the Astropy convention
(e.g. in ``docs/api``)
docname : str
The name of the file for this `sourcestr` (if known - if not, it
can be `None`). If not provided and `dotoctree` is `True`, the
generated files may end up in the wrong place.
warnings : bool
If `False`, all warnings that would normally be issued are
silenced.
Returns
-------
newstr :str
The string with automodapi entries replaced with the correct
sphinx markup.
"""
spl = _automodapirex.split(sourcestr)
if len(spl) > 1: # automodsumm is in this document
if dotoctree:
toctreestr = ':toctree: '
dirnm = app.config.automodapi_toctreedirnm
if not dirnm.endswith(os.sep):
dirnm += os.sep
if docname is not None:
toctreestr += '../' * docname.count('/') + dirnm
else:
toctreestr += dirnm
else:
toctreestr = ''
newstrs = [spl[0]]
for grp in range(len(spl) // 3):
modnm = spl[grp * 3 + 1]
# find where this is in the document for warnings
if docname is None:
location = None
else:
location = (docname, spl[0].count('\n'))
# initialize default options
toskip = []
inhdiag = maindocstr = top_head = True
hds = '-^'
allowedpkgnms = []
# look for actual options
unknownops = []
for opname, args in _automodapiargsrex.findall(spl[grp * 3 + 2]):
if opname == 'skip':
toskip.append(args.strip())
elif opname == 'no-inheritance-diagram':
inhdiag = False
elif opname == 'no-main-docstr':
maindocstr = False
elif opname == 'headings':
hds = args
elif opname == 'no-heading':
top_head = False
elif opname == 'allowed-package-names':
allowedpkgnms.append(args.strip())
else:
unknownops.append(opname)
#join all the allowedpkgnms
if len(allowedpkgnms) == 0:
allowedpkgnms = ''
onlylocals = True
else:
allowedpkgnms = ':allowed-package-names: ' + ','.join(allowedpkgnms)
onlylocals = allowedpkgnms
# get the two heading chars
if len(hds) < 2:
msg = 'Not enough headings (got {0}, need 2), using default -^'
if warnings:
app.warn(msg.format(len(hds)), location)
hds = '-^'
h1, h2 = hds.lstrip()[:2]
# tell sphinx that the remaining args are invalid.
if len(unknownops) > 0 and app is not None:
opsstrs = ','.join(unknownops)
msg = 'Found additional options ' + opsstrs + ' in automodapi.'
if warnings:
app.warn(msg, location)
ispkg, hascls, hasfuncs = _mod_info(modnm, toskip, onlylocals=onlylocals)
# add automodule directive only if no-main-docstr isn't present
if maindocstr:
automodline = '.. automodule:: {modname}'.format(modname=modnm)
else:
automodline = ''
if top_head:
newstrs.append(automod_templ_modheader.format(modname=modnm,
modhds=h1 * len(modnm),
pkgormod='Package' if ispkg else 'Module',
pkgormodhds=h1 * (8 if ispkg else 7),
automoduleline=automodline))
else:
newstrs.append(automod_templ_modheader.format(
modname='',
modhds='',
pkgormod='',
pkgormodhds='',
automoduleline=automodline))
#construct the options for the class/function sections
#start out indented at 4 spaces, but need to keep the indentation.
clsfuncoptions = []
if toctreestr:
clsfuncoptions.append(toctreestr)
if toskip:
clsfuncoptions.append(':skip: ' + ','.join(toskip))
if allowedpkgnms:
clsfuncoptions.append(allowedpkgnms)
clsfuncoptionstr = '\n '.join(clsfuncoptions)
if hasfuncs:
newstrs.append(automod_templ_funcs.format(
modname=modnm,
funchds=h2 * 9,
clsfuncoptions=clsfuncoptionstr))
if hascls:
newstrs.append(automod_templ_classes.format(
modname=modnm,
clshds=h2 * 7,
clsfuncoptions=clsfuncoptionstr))
if inhdiag and hascls:
# add inheritance diagram if any classes are in the module
newstrs.append(automod_templ_inh.format(
modname=modnm,
clsinhsechds=h2 * 25,
allowedpkgnms=allowedpkgnms))
newstrs.append(spl[grp * 3 + 3])
newsourcestr = ''.join(newstrs)
if app.config.automodapi_writereprocessed:
# sometimes they are unicode, sometimes not, depending on how
# sphinx has processed things
if isinstance(newsourcestr, unicode):
ustr = newsourcestr
else:
ustr = newsourcestr.decode(app.config.source_encoding)
if docname is None:
with open(os.path.join(app.srcdir, 'unknown.automodapi'), 'a') as f:
f.write('\n**NEW DOC**\n\n')
f.write(ustr.encode('utf8'))
else:
with open(os.path.join(app.srcdir, docname + '.automodapi'), 'w') as f:
f.write(ustr.encode('utf8'))
return newsourcestr
else:
return sourcestr
def _mod_info(modname, toskip=[], onlylocals=True):
"""
Determines if a module is a module or a package and whether or not
it has classes or functions.
"""
hascls = hasfunc = False
for localnm, fqnm, obj in zip(*find_mod_objs(modname, onlylocals=onlylocals)):
if localnm not in toskip:
hascls = hascls or inspect.isclass(obj)
hasfunc = hasfunc or inspect.isfunction(obj)
if hascls and hasfunc:
break
# find_mod_objs has already imported modname
pkg = sys.modules[modname]
ispkg = '__init__.' in os.path.split(pkg.__name__)[1]
return ispkg, hascls, hasfunc
def process_automodapi(app, docname, source):
source[0] = automodapi_replace(source[0], app, True, docname)
def setup(app):
# need automodsumm for automodapi
app.setup_extension('astropy_helpers.sphinx.ext.automodsumm')
app.connect('source-read', process_automodapi)
app.add_config_value('automodapi_toctreedirnm', 'api', True)
app.add_config_value('automodapi_writereprocessed', False, True)
|
|
"""Project loader for reading BUILD and BUILD.conf files."""
import importlib
import sys
import traceback
import cobble.env
def load(root, build_dir):
"""Loads a Project, given the paths to the project root and build output
directory."""
# Create a key registry initialized with keys defined internally to Cobble.
kr = cobble.env.KeyRegistry()
for k in cobble.target.KEYS: kr.define(k)
# Create working data structures.
project = cobble.project.Project(root, build_dir)
packages_to_visit = []
installed_modules = {}
# Function that will be exposed to BUILD.conf files as 'seed()'
def _build_conf_seed(*paths):
nonlocal packages_to_visit
packages_to_visit += paths
# Function that will be exposed to BUILD.conf files as 'install()'
def _build_conf_install(module_name):
nonlocal kr
module = importlib.import_module(module_name)
if hasattr(module, 'KEYS'):
for k in module.KEYS:
kr.define(k)
installed_modules[module.__name__] = module
# Function that will be exposed to BUILD.conf files as 'environment()'
def _build_conf_environment(name, base = None, contents = {}):
assert name not in project.named_envs, \
"More than one environment named %r" % name
if base:
assert base in project.named_envs, \
"Base environment %r does not exist (must appear before)" \
% base
base_env = project.named_envs[base]
else:
base_env = cobble.env.Env(kr, {})
env = base_env.derive(cobble.env.prepare_delta(contents))
project.named_envs[name] = env
# Function that will be exposed to BUILD.conf files as 'define_key()'
def _build_conf_define_key(name, *, type):
if type == 'string':
key = cobble.env.overrideable_string_key(name)
elif type == 'bool':
key = cobble.env.overrideable_bool_key(name)
else:
raise Exception('Unknown key type: %r' % type)
kr.define(key)
# Function that will be exposed to BUILD.conf files as 'plugin_path()'
def _build_conf_plugin_path(*paths):
sys.path += [project.inpath(p) for p in paths]
# Read in BUILD.conf and eval it for its side effects
_compile_and_exec(
path = project.inpath('BUILD.conf'),
kind = 'BUILD.conf file',
globals = {
# Block access to builtins. TODO: this might be too aggressive.
'__builtins__': {},
'seed': _build_conf_seed,
'install': _build_conf_install,
'environment': _build_conf_environment,
'define_key': _build_conf_define_key,
'plugin_path': _build_conf_plugin_path,
'ROOT': project.root,
'BUILD': project.build_dir,
},
)
# Process the package worklist. We're also extending the worklist in this
# algorithm, treating it like a stack (rather than a queue). This means the
# order of package processing is a little hard to predict. Because packages
# can define keys that have effects on other packages, this should probably
# get fixed (TODO).
while packages_to_visit:
ident = packages_to_visit.pop()
# Check if we've done this one.
relpath = _get_relpath(ident)
if relpath in project.packages:
continue
package = cobble.project.Package(project, relpath)
# Prepare the global environment for eval-ing the package. We provide
# a few variables by default:
pkg_env = {
# Block access to builtins. TODO: this might be too aggressive.
'__builtins__': {},
# Easy access to the path from the build dir to the package
'PKG': package.inpath(),
# Easy access to the path from the build dir to the project
'ROOT': project.root,
# Location of the build dir
'BUILD': project.build_dir,
'define_key': _build_conf_define_key,
}
# The rest of the variables are provided by items registered in
# plugins.
for mod in installed_modules.values():
if hasattr(mod, 'package_verbs'):
for name, fn in mod.package_verbs.items():
pkg_env[name] = _wrap_verb(package, fn, packages_to_visit)
if hasattr(mod, 'global_functions'):
for name, fn in mod.global_functions.items():
pkg_env[name] = fn
# And now, the evaluation!
_compile_and_exec(
path = package.inpath('BUILD'),
kind = 'BUILD file',
globals = pkg_env,
)
# Register all plugins' ninja rules. We could probably do this earlier, but
# hey.
for mod in installed_modules.values():
if hasattr(mod, 'ninja_rules'):
project.add_ninja_rules(mod.ninja_rules)
return project
def _wrap_verb(package, verb, packages_to_visit):
"""Instruments a package-verb function 'verb' from 'package' with code to
register the resulting target and scan deps to discover new packages.
'packages_to_visit' is a reference to a (mutable) list containing relpaths
we should visit. The function returned from '_wrap_verb' will append
relpaths of deps to that list. Some of them will be redundant; the worklist
processing code is expected to deal with this.
"""
def verb_wrapper(*pos, **kw):
nonlocal packages_to_visit
tgt = verb(package, *pos, **kw)
if tgt:
package.add_target(tgt)
# TODO this is where we'd return for extend_when
packages_to_visit += tgt.deps
return verb_wrapper
def _get_relpath(ident):
"""Extracts the relative path from the project root to the directory
containing the BUILD file defining a target named by an ident."""
assert ident.startswith('//'), "bogus ident got in: %r" % ident
return ident[2:].split(':')[0]
class BuildError(Exception):
"""Exception raised if processing of a BUILD/BUILD.conf file fails."""
def __init__(self, exc_info, kind, path, limit):
"""Creates a BuildError.
'exc_info' is the information on the exception as received from
'sys.exc_info()`.
'kind' is a human-readable str description of what we were processing.
'path' is a path to the file being processed.
'limit' is the depth of the traceback that is relevant to the user
error, i.e. does not include Cobble stack frames.
"""
self.exc_info = exc_info
self.kind = kind
self.path = path
self.limit = limit
def _compile_and_exec(path, kind, globals):
"""Implementation factor of BUILD and BUILD.conf evaluation. Loads the file
at 'path' and execs it in an environment of 'globals', reporting the
failure as 'kind' if it occurs."""
with open(path, 'r') as f:
try:
mod = compile(
source = f.read(),
filename = path,
mode = 'exec',
dont_inherit = 1,
)
exec(mod, globals)
except:
exc_info = sys.exc_info()
limit = len(traceback.extract_tb(exc_info[2])) - 1
raise BuildError(
exc_info = exc_info,
limit = limit,
kind = kind,
path = path) from exc_info[1]
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""Strategy objects for creating ABINIT calculations."""
from __future__ import unicode_literals, division, print_function
import sys
import os
import abc
import collections
import copy
import six
import numpy as np
from six.moves import map, zip
from monty.string import is_string
from monty.json import MontyEncoder, MontyDecoder
from monty.dev import deprecated
from pymatgen.util.string_utils import str_delimited
from .abiobjects import Electrons
from .pseudos import PseudoTable, Pseudo
import logging
logger = logging.getLogger(__name__)
__author__ = "Matteo Giantomassi"
__copyright__ = "Copyright 2013, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Matteo Giantomassi"
__email__ = "gmatteo at gmail.com"
def num_valence_electrons(pseudos, structure):
"""
Compute the number of valence electrons from
a list of pseudopotentials and the crystalline structure.
Args:
pseudos: List of strings, list of of pseudos or `PseudoTable` instance.
structure: Pymatgen structure.
Raises:
ValueError if cannot find a pseudo in the input pseudos or if the
input list contains more than one pseudo for the chemical symbols appearing in structure.
"""
table = PseudoTable.as_table(pseudos)
valence = 0.0
for site in structure:
entries = table.pseudos_with_symbol(site.specie.symbol)
if len(entries) != 1:
raise ValueError("Found %d entries for symbol %s" % (len(entries), site.specie.symbol))
valence += entries[0].Z_val
return valence
#class AbstractStrategy(six.with_metaclass(abc.ABCMeta, object)):
# """
# A Strategy object generates the ABINIT input file used for a particular type of calculation
# e.g. ground-state runs, structural relaxations, self-energy calculations ...
#
# A Strategy can absorb data (e.g. data produced in the previous steps of a workflow) and
# can use this piece of information to generate/optimize the input variables.
# Strategy objects must provide the method make_input that builds and returns the abinit input file.
#
# Attributes:
#
# pseudos: List of pseudopotentials.
# """
#
# #@abc.abstractproperty
# #def pseudos(self):
#
# @property
# def isnc(self):
# """True if norm-conserving calculation."""
# return all(p.isnc for p in self.pseudos)
#
# @property
# def ispaw(self):
# """True if PAW calculation."""
# return all(p.ispaw for p in self.pseudos)
#
# def num_valence_electrons(self):
# """Number of valence electrons computed from the pseudos and the structure."""
# return num_valence_electrons(self.pseudos, self.structure)
#
# @abc.abstractproperty
# def structure(self):
# """Structure object"""
#
# #def set_structure(self, structure):
# # self.structure = structure
#
# #def change_structure(self, structure):
# # self.structure = structure
#
# #def to_abivars(self):
# #def to_dict(self):
# #def from_abivars(cls, d):
# #def from_dict(self, d):
#
# def copy(self):
# """Shallow copy of self."""
# return copy.copy(self)
#
# def deepcopy(self):
# """Deep copy of self."""
# return copy.deepcopy(self)
#
# @abc.abstractmethod
# def make_input(self, *args, **kwargs):
# """Returns an Input instance."""
#
#
#class StrategyWithInput(object):
# # TODO: Find a better way to do this. I will likely need to refactor the Strategy object
# def __init__(self, abinit_input, deepcopy=True):
# if deepcopy: abinit_input = copy.deepcopy(abinit_input)
# self.abinit_input = abinit_input
#
# @property
# def pseudos(self):
# # FIXME: pseudos must be order but I need to define an ABC for the Strategies and Inputs.
# # Order pseudos
# return self.abinit_input.pseudos
#
# @property
# def structure(self):
# return self.abinit_input.structure
#
# @structure.setter
# def structure(self, structure):
# self.abinit_input.set_structure(structure)
#
# def add_extra_abivars(self, abivars):
# """Add variables (dict) to extra_abivars."""
# self.abinit_input.set_vars(**abivars)
#
# def remove_extra_abivars(self, keys):
# """Remove variables from extra_abivars."""
# self.abinit_input.remove_vars(keys)
#
# def make_input(self):
# return str(self.abinit_input)
#
# def deepcopy(self):
# """Deep copy of self."""
# return copy.deepcopy(self)
#
# def as_dict(self):
# d = {'abinit_input': self.abinit_input.as_dict()}
# d["@module"] = self.__class__.__module__
# d["@class"] = self.__class__.__name__
# return d
#
# @classmethod
# def from_dict(cls, d):
# abinit_input = d['abinit_input']
# modname = abinit_input["@module"]
# classname = abinit_input["@class"]
# mod = __import__(modname, globals(), locals(), [classname], 0)
# cls_ = getattr(mod, classname)
# strategy = cls(cls_.from_dict(abinit_input))
# return strategy
#
#
#class HtcStrategy(AbstractStrategy):
# """
# Attributes:
#
# accuracy: Accuracy of the calculation used to define basic parameters of the run.
# such as tolerances, basis set truncation ...
# """
# __metaclass__ = abc.ABCMeta
#
# # Mapping runlevel --> optdriver variable
# _runl2optdriver = {
# "scf": 0,
# "nscf": 0,
# "relax": 0,
# "dfpt": 1,
# "screening": 3,
# "sigma": 4,
# "bse": 99,
# }
#
# # Name of the (default) tolerance used by the runlevels.
# _runl2tolname = {
# "scf": 'tolvrs',
# "nscf": 'tolwfr',
# "dfpt": 'toldfe', # ?
# "screening": 'toldfe', # dummy
# "sigma": 'toldfe', # dummy
# "bse": 'toldfe', # ?
# "relax": 'tolrff',
# }
#
# # Tolerances for the different levels of accuracy.
# T = collections.namedtuple('Tolerance', "low normal high")
# _tolerances = {
# "toldfe": T(1.e-7, 1.e-8, 1.e-9),
# "tolvrs": T(1.e-7, 1.e-8, 1.e-9),
# "tolwfr": T(1.e-15, 1.e-17, 1.e-19),
# "tolrff": T(0.04, 0.02, 0.01)}
# del T
#
# def __repr__(self):
# return "<%s at %s, accuracy = %s>" % (self.__class__.__name__, id(self), self.accuracy)
#
# @abc.abstractproperty
# def runlevel(self):
# """String defining the Runlevel. See _runl2optdriver."""
#
# @property
# def optdriver(self):
# """The optdriver associated to the calculation."""
# return self._runl2optdriver[self.runlevel]
#
# def learn(self, **data):
# """Update the data stored in self."""
# if not hasattr(self, "_data"):
# self._data = dict(data)
# else:
# if [k in self._data for k in data].count(True) != 0:
# raise ValueError("Keys %s are already present in data" % str([k for k in data]))
# self._data.update(data)
#
# @property
# def accuracy(self):
# """Accuracy used by the strategy."""
# try:
# return self._accuracy
# except AttributeError:
# self.set_accuracy("normal")
# return self._accuracy
#
# def set_accuracy(self, accuracy):
# """Accuracy setter."""
# if hasattr(self, "_accuracy"):
# raise RuntimeError("object already has accuracy %s " % self._accuracy)
#
# assert accuracy in ["low", "normal", "high"]
# self._accuracy = accuracy
#
# @property
# def data(self):
# """Data absorbed by the strategy during the workflow."""
# try:
# return self. _data
# except AttributeError:
# return {}
#
# @property
# def ecut(self):
# """Cutoff energy in Hartree."""
# try:
# # User option.
# return self.extra_abivars["ecut"]
# except KeyError:
# # Compute ecut from the Pseudo Hints.
# hints = [p.hint_for_accuracy(self.accuracy) for p in self.pseudos]
# return max(hint.ecut for hint in hints)
#
# @property
# def pawecutdg(self):
# """Cutoff energy in Hartree for the dense grid used in PAW calculations."""
# if not self.ispaw:
# return None
#
# try:
# # User option.
# return self.extra_abivars["pawecutdg"]
# except KeyError:
# raise NotImplementedError("")
# #ratio = max(p.suggested_augratio(accuracy) for p in self.pseudos])
# #ratio = augration_high if high else augratio_norm
# #pawecutdg = ecut * ratio
#
# @property
# def tolerance(self):
# """Return a dict {varname: varvalue} with the tolerance used for the calculation."""
# # Check user options first.
# for tolname in self._tolerances:
# try:
# return {tolname: self.extra_abivars[tolname]}
# except KeyError:
# pass
#
# # Use default values depending on the runlevel and the accuracy.
# tolname = self._runl2tolname[self.runlevel]
#
# return {tolname: getattr(self._tolerances[tolname], self.accuracy)}
#
# @property
# def need_forces(self):
# """True if forces are required at each SCF step (like the stresses)."""
# return self.runlevel in ["relax",]
#
# @property
# def need_stress(self):
# """True if the computation of the stress is required."""
# # TODO: here it's easier to check if optcell != 0
# return self.runlevel in ["relax"]
#
# def add_extra_abivars(self, abivars):
# """Add variables (dict) to extra_abivars."""
# self.extra_abivars.update(abivars)
#
# def remove_extra_abivars(self, keys):
# for key in keys:
# self.extra_abivars.pop(key)
#
#
#class ScfStrategy(HtcStrategy):
# """
# Strategy for ground-state SCF calculations.
# """
# def __init__(self, structure, pseudos, ksampling, accuracy="normal", spin_mode="polarized",
# smearing="fermi_dirac:0.1 eV", charge=0.0, scf_algorithm=None, use_symmetries=True, **extra_abivars):
# """
# Args:
# structure: pymatgen structure
# pseudos: List of pseudopotentials.
# ksampling: :class:`Ksampling` object defining the sampling of the BZ.
# accuracy: Accuracy of the calculation.
# spin_mode: Spin polarization mode.
# smearing: string or :class:`Smearing` instance.
# charge: Total charge of the system. Default is 0.
# scf_algorithm: :class:`ElectronsAlgorithm` instance.
# use_symmetries: False if point group symmetries should not be used.
# extra_abivars: Extra variables that will be directly added to the input file.
# """
# super(ScfStrategy, self).__init__()
#
# self.set_accuracy(accuracy)
# self._structure = structure
#
# table = PseudoTable.as_table(pseudos)
# self.pseudos = table.pseudos_with_symbols(list(structure.composition.get_el_amt_dict().keys()))
#
# self.ksampling = ksampling
# self.use_symmetries = use_symmetries
#
# self.electrons = Electrons(spin_mode=spin_mode, smearing=smearing, algorithm=scf_algorithm,
# nband=None, fband=None, charge=charge)
#
# self.extra_abivars = extra_abivars
#
# @property
# def runlevel(self):
# return "scf"
#
# @property
# def structure(self):
# return self._structure
#
# @structure.setter
# def structure(self, structure):
# self._structure = structure
#
# def _define_extra_params(self):
# extra = dict(optdriver=self.optdriver, ecut=self.ecut, pawecutdg=self.pawecutdg)
# extra.update(self.tolerance)
# extra.update({"nsym": 1 if not self.use_symmetries else None})
# extra.update(self.extra_abivars)
# return extra
#
# @deprecated(message="Strategy objects will be removed in pmg v3.1. Use AbiInput")
# def make_input(self):
# extra = self._define_extra_params()
#
# inpw = InputWriter(self.structure, self.electrons, self.ksampling, **extra)
# return inpw.get_string()
#
# def as_dict(self):
# d = {}
# d['structure'] = self.structure.as_dict()
# d['pseudos'] = [p.as_dict() for p in self.pseudos]
# d['ksampling'] = self.ksampling.as_dict()
# d['accuracy'] = self.accuracy
# d['electrons'] = self.electrons.as_dict()
# d['charge'] = self.electrons.charge
# d['use_symmetries'] = self.use_symmetries
# d['extra_abivars'] = self.extra_abivars
# d['@module'] = self.__class__.__module__
# d['@class'] = self.__class__.__name__
#
# return d
#
# @classmethod
# def from_dict(cls, d):
# dec = MontyDecoder()
# structure = dec.process_decoded(d["structure"])
# pseudos = dec.process_decoded(d['pseudos'])
# ksampling = dec.process_decoded(d["ksampling"])
# electrons = dec.process_decoded(d["electrons"])
#
# return cls(structure=structure, pseudos=pseudos, ksampling=ksampling, accuracy=d['accuracy'],
# spin_mode=electrons.spin_mode, smearing=electrons.smearing, charge=d['charge'],
# scf_algorithm=electrons.algorithm, use_symmetries=d['use_symmetries'],
# **d['extra_abivars'])
#
#
#class NscfStrategy(HtcStrategy):
# """
# Strategy for non-self-consistent calculations.
# """
# def __init__(self, scf_strategy, ksampling, nscf_nband, nscf_algorithm=None, **extra_abivars):
# """
# Args:
# scf_strategy: :class:`ScfStrategy` used for the GS run.
# ksampling: :class:`Ksampling` object defining the sampling of the BZ.
# nscf_nband: Number of bands to compute.
# nscf_algorithm :class:`ElectronsAlgorithm` instance.
# extra_abivars: Extra ABINIT variables that will be directly added to the input file
# """
# super(NscfStrategy, self).__init__()
#
# self.set_accuracy(scf_strategy.accuracy)
# self.scf_strategy = scf_strategy
#
# self.nscf_nband = nscf_nband
# self.pseudos = scf_strategy.pseudos
# self.ksampling = ksampling
#
# if nscf_algorithm is None:
# nscf_algorithm = {"iscf": -2}
#
# # Electrons used in the GS run.
# scf_electrons = scf_strategy.electrons
#
# self.electrons = Electrons(
# spin_mode=scf_electrons.spin_mode, smearing=scf_electrons.smearing,
# algorithm=nscf_algorithm, nband=nscf_nband,
# fband=None, charge=scf_electrons.charge, comment=None)
#
# self.extra_abivars = extra_abivars
#
# @property
# def runlevel(self):
# return "nscf"
#
# @property
# def structure(self):
# return self.scf_strategy.structure
#
# @structure.setter
# def structure(self, structure):
# self.scf_strategy.structure = structure
#
# @deprecated(message="Strategy objects will be removed in pmg v3.1. Use AbiInput")
# def make_input(self):
# # Initialize the system section from structure.
# scf_strategy = self.scf_strategy
#
# extra = dict(optdriver=self.optdriver, ecut=self.ecut, pawecutdg=self.pawecutdg)
# extra.update(self.tolerance)
# extra.update(self.extra_abivars)
#
# inp = InputWriter(scf_strategy.structure, self.electrons, self.ksampling, **extra)
# return inp.get_string()
#
# def as_dict(self):
# d = {}
# d['scf_strategy'] = self.scf_strategy.as_dict()
# d['ksampling'] = self.ksampling.as_dict()
# d['nscf_nband'] = self.nscf_nband
# d['nscf_algorithm'] = self.electrons.algorithm
# d['extra_abivars'] = self.extra_abivars
# d['@module'] = self.__class__.__module__
# d['@class'] = self.__class__.__name__
#
# @classmethod
# def from_dict(cls, d):
# dec = MontyDecoder()
# scf_strategy = dec.process_decoded(d["scf_strategy"])
# ksampling = dec.process_decoded(d["ksampling"])
# nscf_nband = dec.process_decoded(d["nscf_nband"])
# nscf_algorithm = dec.process_decoded(d["nscf_algorithm"])
#
# return cls(scf_strategy=scf_strategy, ksampling=ksampling,
# nscf_nband=nscf_nband, nscf_algorithm=nscf_algorithm, **d['extra_abivars'])
#
#class RelaxStrategy(ScfStrategy):
# """Extends ScfStrategy by adding an algorithm for the structural relaxation."""
#
# def __init__(self, structure, pseudos, ksampling, relax_algo, accuracy="normal", spin_mode="polarized",
# smearing="fermi_dirac:0.1 eV", charge=0.0, scf_algorithm=None, **extra_abivars):
# """
# Args:
# structure: pymatgen structure
# pseudos: List of pseudopotentials.
# ksampling: :class:`Ksampling` object defining the sampling of the BZ.
# relax_algo: Object defining the algorithm for the structural relaxation.
# accuracy: Accuracy of the calculation.
# spin_mode: Flag defining the spin polarization. Defaults to "polarized"
# smearing: String or :class:`Smearing` instance.
# charge: Total charge of the system. Default is 0.
# scf_algorithm: :class:`ElectronsAlgorithm` instance.
# extra_abivars: Extra ABINIT variables that will be directly added to the input file
# """
# super(RelaxStrategy, self).__init__(
# structure, pseudos, ksampling,
# accuracy=accuracy, spin_mode=spin_mode, smearing=smearing,
# charge=charge, scf_algorithm=scf_algorithm, **extra_abivars)
#
# self.relax_algo = relax_algo
#
# @property
# def runlevel(self):
# return "relax"
#
# @deprecated(message="Strategy objects will be removed in pmg v3.1. Use AbiInput")
# def make_input(self):
# # extra for the GS run
# extra = self._define_extra_params()
#
# inpw = InputWriter(self.structure, self.electrons, self.ksampling, self.relax_algo, **extra)
# return inpw.get_string()
#
# def as_dict(self):
# d = super(RelaxStrategy, self).as_dict()
# d['relax_algo'] = self.relax_algo.as_dict()
# d['@module'] = self.__class__.__module__
# d['@class'] = self.__class__.__name__
#
# return d
#
# @classmethod
# def from_dict(cls, d):
# dec = MontyDecoder()
# structure = dec.process_decoded(d["structure"])
# pseudos = [Pseudo.from_file(p['filepath']) for p in d['pseudos']]
# ksampling = dec.process_decoded(d["ksampling"])
# electrons = dec.process_decoded(d["electrons"])
# relax_algo = dec.process_decoded(d["relax_algo"])
#
# return cls(structure=structure, pseudos=pseudos, ksampling=ksampling, accuracy=d['accuracy'],
# spin_mode=electrons.spin_mode, smearing=electrons.smearing, charge=d['charge'],
# scf_algorithm=electrons.algorithm, use_symmetries=d['use_symmetries'],
# relax_algo=relax_algo, **d['extra_abivars'])
#
#
#class ScreeningStrategy(HtcStrategy):
# """Strategy for Screening calculations."""
# def __init__(self, scf_strategy, nscf_strategy, screening, **extra_abivars):
# """
# Args:
# scf_strategy: :class:`ScfStrategy` used for the ground-state calculation
# nscf_strategy: :class:`NscStrategy` used for the non-self consistent calculation
# screening: :class:`Screening` instance
# extra_abivars: Extra ABINIT variables added directly to the input file
# """
# super(ScreeningStrategy, self).__init__()
#
# self.pseudos = scf_strategy.pseudos
#
# self.scf_strategy = scf_strategy
# self.nscf_strategy = nscf_strategy
#
# self.screening = screening
#
# scr_nband = screening.nband
#
# scf_electrons = scf_strategy.electrons
# nscf_electrons = nscf_strategy.electrons
#
# if scr_nband > nscf_electrons.nband:
# raise ValueError("Cannot use more that %d bands for the screening" % nscf_electrons.nband)
#
# self.ksampling = nscf_strategy.ksampling
#
# if not self.ksampling.is_homogeneous:
# raise ValueError("The k-sampling used for the NSCF run mush be homogeneous")
#
# self.electrons = Electrons(spin_mode=scf_electrons.spin_mode,
# smearing =scf_electrons.smearing,
# nband=scr_nband, charge=scf_electrons.charge, comment=None)
#
# self.extra_abivars = extra_abivars
#
# @property
# def runlevel(self):
# return "screening"
#
# @property
# def structure(self):
# return self.scf_strategy.structure
#
# @deprecated(message="Strategy objects will be removed in pmg v3.1. Use AbiInput")
# def make_input(self):
# # FIXME
# extra = dict(optdriver=self.optdriver, ecut=self.ecut, ecutwfn=self.ecut, pawecutdg=self.pawecutdg)
# extra.update(self.tolerance)
# extra.update(self.extra_abivars)
#
# return InputWriter(self.scf_strategy.structure, self.electrons, self.ksampling, self.screening,
# **extra).get_string()
#
#
#class SelfEnergyStrategy(HtcStrategy):
# """Strategy for self-energy calculations."""
# def __init__(self, scf_strategy, nscf_strategy, scr_strategy, sigma, **extra_abivars):
# """
# Args:
# scf_strategy: :class:`ScfStrategy` used for the ground-state calculation
# nscf_strategy: :class:`NscfStrategy` used for the non-self consistent calculation
# scr_strategy: :class:`ScrStrategy` used for the screening calculation
# sigma: :class:`SelfEnergy` instance.
# extra_abivars: Extra ABINIT variables added directly to the input file
# """
# # TODO Add consistency check between SCR and SIGMA strategies
# super(SelfEnergyStrategy, self).__init__()
#
# self.pseudos = scf_strategy.pseudos
#
# self.scf_strategy = scf_strategy
# self.nscf_strategy = nscf_strategy
# self.scr_strategy = scr_strategy
#
# self.sigma = sigma
#
# self.extra_abivars = extra_abivars
#
# scf_electrons = scf_strategy.electrons
# nscf_electrons = nscf_strategy.electrons
#
# if sigma.nband > nscf_electrons.nband:
# raise ValueError("Cannot use more that %d bands for the self-energy" % nscf_electrons.nband)
#
# self.ksampling = nscf_strategy.ksampling
#
# if not self.ksampling.is_homogeneous:
# raise ValueError("The k-sampling used for the NSCF run mush be homogeneous")
#
# self.electrons = Electrons(
# spin_mode=scf_electrons.spin_mode, smearing=scf_electrons.smearing,
# nband=sigma.nband, charge=scf_electrons.charge)
#
# @property
# def runlevel(self):
# return "sigma"
#
# @property
# def structure(self):
# return self.scf_strategy.structure
#
# @deprecated(message="Strategy objects will be removed in pmg v3.1. Use AbiInput")
# def make_input(self):
# # FIXME
# extra = dict(optdriver=self.optdriver, ecut=self.ecut, ecutwfn=self.ecut, pawecutdg=self.pawecutdg)
# extra.update(self.tolerance)
# extra.update(self.extra_abivars)
#
# return InputWriter(self.scf_strategy.structure, self.electrons, self.ksampling, self.sigma,
# **extra).get_string()
#
#
#class MdfBse_Strategy(HtcStrategy):
# """
# Strategy for Bethe-Salpeter calculation based on the
# model dielectric function and the scissors operator
# """
# def __init__(self, scf_strategy, nscf_strategy, exc_ham, **extra_abivars):
# """
# Args:
# scf_strategy: :class:`Strategy` used for the ground-state calculation.
# nscf_strategy: :class:`NscStrategy` used for the non-self consistent calculation.
# exc_ham: :class:`ExcitonicHamiltonian` instance.
# extra_abivars: Extra ABINIT variables added directly to the input file.
# """
# super(MdfBse_Strategy, self).__init__()
#
# self.pseudos = scf_strategy.pseudos
#
# self.scf_strategy = scf_strategy
# self.nscf_strategy = nscf_strategy
#
# self.exc_ham = exc_ham
#
# self.extra_abivars = extra_abivars
#
# scf_electrons = scf_strategy.electrons
# nscf_electrons = nscf_strategy.electrons
#
# if exc_ham.nband > nscf_electrons.nband:
# raise ValueError("Cannot use more that %d bands for the EXC hamiltonian." % nscf_electrons.nband)
#
# self.ksampling = nscf_strategy.ksampling
#
# if not self.ksampling.is_homogeneous:
# raise ValueError("The k-sampling used for the NSCF run mush be homogeneous")
#
# self.electrons = Electrons(
# spin_mode=scf_electrons.spin_mode, smearing=scf_electrons.smearing,
# nband=exc_ham.nband, charge=scf_electrons.charge)
#
# @property
# def runlevel(self):
# return "bse"
#
# @property
# def structure(self):
# return self.scf_strategy.structure
#
# @deprecated(message="Strategy objects will be removed in pmg v3.1. Use AbiInput")
# def make_input(self):
# # FIXME
# extra = dict(optdriver=self.optdriver, ecut=self.ecut, pawecutdg=self.pawecutdg, ecutwfn=self.ecut)
# #extra.update(self.tolerance)
# extra.update(self.extra_abivars)
#
# return InputWriter(self.scf_strategy.structure, self.electrons, self.ksampling, self.exc_ham,
# **extra).get_string()
#
#
#class InputWriter(object):
# """
# This object receives a list of `AbivarAble` objects, an optional
# dictionary with extra ABINIT variables and produces a (nicely formatted?) string with the input file.
# """
# MAX_SLEN = 100
#
# def __init__(self, *args, **kwargs):
# self.abiobj_dict = collections.OrderedDict()
# self.extra_abivars = collections.OrderedDict()
# for arg in args:
# if hasattr(arg, "to_abivars"):
# self.add_abiobj(arg)
# else:
# self.add_extra_abivars(arg)
#
# for k, v in kwargs.items():
# self.add_extra_abivars({k: v})
#
# def __str__(self):
# """String representation (the section of the abinit input file)."""
# return self.get_string()
#
# @property
# def abiobjects(self):
# """List of objects stored in self."""
# return self.abiobj_dict.values()
#
# def add_abiobj(self, obj):
# """Add the object obj to self."""
# if not hasattr(obj, "to_abivars"):
# raise ValueError("%s does not define the method to_abivars" % str(obj))
#
# cname = obj.__class__.__name__
# if cname in self.abiobj_dict:
# raise ValueError("%s is already stored" % cname)
# self.abiobj_dict[cname] = obj
#
# def add_extra_abivars(self, abivars):
# """Add variables (dict) to extra_abivars."""
# self.extra_abivars.update(abivars)
#
# def to_abivars(self):
# """Returns a dictionary with the abinit variables defined by the Card."""
# abivars = {}
# for obj in self.abiobjects:
# abivars.update(obj.to_abivars())
#
# abivars.update(self.extra_abivars)
# return abivars
#
# def print_abiobjects(self, stream=sys.stdout):
# lines = [str(obj) for obj in self.abiobjects]
# stream.write("\n".join(lines))
#
# @staticmethod
# def _format_kv(key, value):
# """Formatter"""
# if value is None:
# # Use ABINIT default.
# return []
#
# if isinstance(value, collections.Iterable) and not is_string(value):
# arr = np.array(value)
# if len(arr.shape) in [0,1]:
# # scalar or vector.
# token = [key, " ".join(str(i) for i in arr)]
#
# else:
# # array --> matrix
# matrix = np.reshape(arr, (-1, arr.shape[-1]))
# lines = []
# for idx, row in enumerate(matrix):
# lines.append(" ".join(str(i) for i in row))
# token = [key + "\n", "\n".join(lines)]
#
# else:
# token = [key, str(value)]
#
# return token
#
# def _cut_lines(self, lines):
# MAX_SLEN = self.MAX_SLEN
#
# new_lines = []
# for line in lines:
# if len(line) > MAX_SLEN:
# #start, stop = 0, 0
# #while True:
# # stop = start + MAX_SLEN
# # if stop > len(line): break
# # print(start, stop)
# # if stop > len(line): stop = len(line)
# # new_lines.append(line[start:stop])
# # start = stop
#
# tokens = lines.split()
# cum_nchars, start = 0, 0
# for stop, tok in enumerate(tokens):
# cum_nchars += len(tok) + 1
#
# if cum_nchars > MAX_SLEN:
# cum_nchars = 0
# new_lines.append("".join(tokens[start:stop]))
# else:
# start = stop
#
# if cum_nchars:
# new_lines.append("".join(tokens[start:stop]))
#
# else:
# new_lines.append(line)
#
# return new_lines
#
# def get_string(self, pretty=False):
# """
# Returns a string representation of self. The reason why this
# method is different from the __str__ method is to provide options for pretty printing.
#
# Args:
# pretty: Set to True for pretty aligned output.
# """
# lines = []
# app = lines.append
#
# # extra_abivars can contain variables that are already defined
# # in the object. In this case, the value in extra_abivars is used
# # TODO: Should find a more elegant way to avoid collission between objects
# # and extra_abivars
# extra_abivars = self.extra_abivars.copy()
#
# # Write the Abinit objects first.
# for obj in self.abiobjects:
# #print(obj)
# app([80*"#", ""])
# app(["#", "%s" % obj.__class__.__name__])
# app([80*"#", ""])
# for (k, v) in obj.to_abivars().items():
# v = extra_abivars.pop(k, v)
# app(self._format_kv(k, v))
#
# # Extra variables.
# if self.extra_abivars:
# app([80*"#", ""])
# app(["#", "Extra_Abivars"])
# app([80*"#", ""])
# for (k, v) in extra_abivars.items():
# app(self._format_kv(k, v))
#
# #lines = self._cut_lines(lines)
#
# if pretty:
# return str_aligned(lines, header=None)
# else:
# return str_delimited(lines, header=None, delimiter=5*" ")
|
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function
import sys
import optparse
from glue import __version__
from glue.logger import logger
try:
from glue.utils.qt.decorators import die_on_error
except ImportError:
from glue.utils.decorators import die_on_error
def parse(argv):
""" Parse argument list, check validity
:param argv: Arguments passed to program
*Returns*
A tuple of options, position arguments
"""
usage = """usage: %prog [options] [FILE FILE...]
# start a new session
%prog
# start a new session and load a file
%prog image.fits
#start a new session with multiple files
%prog image.fits catalog.csv
#restore a saved session
%prog saved_session.glu
or
%prog -g saved_session.glu
#run a script
%prog -x script.py
#run the test suite
%prog -t
"""
parser = optparse.OptionParser(usage=usage,
version=str(__version__))
parser.add_option('-x', '--execute', action='store_true', dest='script',
help="Execute FILE as a python script", default=False)
parser.add_option('-g', action='store_true', dest='restore',
help="Restore glue session from FILE", default=False)
parser.add_option('-t', '--test', action='store_true', dest='test',
help="Run test suite", default=False)
parser.add_option('-c', '--config', type='string', dest='config',
metavar='CONFIG',
help='use CONFIG as configuration file')
parser.add_option('-v', '--verbose', action='store_true',
help="Increase the vebosity level", default=False)
parser.add_option('--no-maximized', action='store_true', dest='nomax',
help="Do not start Glue maximized", default=False)
parser.add_option('--startup', dest='startup', type='string',
help="Startup actions to carry out", default='')
parser.add_option('--auto-merge', dest='auto_merge', action='store_true',
help="Automatically merge any data passed on the command-line", default='')
err_msg = verify(parser, argv)
if err_msg:
sys.stderr.write('\n%s\n' % err_msg)
parser.print_help()
sys.exit(1)
return parser.parse_args(argv)
def verify(parser, argv):
""" Check for input errors
:param parser: OptionParser instance
:param argv: Argument list
:type argv: List of strings
*Returns*
An error message, or None
"""
opts, args = parser.parse_args(argv)
err_msg = None
if opts.script and opts.restore:
err_msg = "Cannot specify -g with -x"
elif opts.script and opts.config:
err_msg = "Cannot specify -c with -x"
elif opts.script and len(args) != 1:
err_msg = "Must provide a script\n"
elif opts.restore and len(args) != 1:
err_msg = "Must provide a .glu file\n"
return err_msg
@die_on_error("Error restoring Glue session")
def restore_session(gluefile):
"""Load a .glu file and return a DataCollection, Hub tuple"""
from glue.app.qt import GlueApplication
return GlueApplication.restore_session(gluefile)
@die_on_error("Error reading data file")
def load_data_files(datafiles):
"""Load data files and return a list of datasets"""
from glue.core.data_factories import auto_data, load_data
datasets = []
for df in datafiles:
datasets.append(load_data(df, auto_data))
return datasets
def run_tests():
from glue import test
test()
def start_glue(gluefile=None, config=None, datafiles=None, maximized=True,
startup_actions=None, auto_merge=False):
"""Run a glue session and exit
Parameters
----------
gluefile : str
An optional ``.glu`` file to restore.
config : str
An optional configuration file to use.
datafiles : str
An optional list of data files to load.
maximized : bool
Maximize screen on startup. Otherwise, use default size.
auto_merge : bool, optional
Whether to automatically merge data passed in `datafiles` (default is `False`)
"""
import glue
from glue.utils.qt import get_qapp
app = get_qapp()
splash = get_splash()
splash.show()
# Start off by loading plugins. We need to do this before restoring
# the session or loading the configuration since these may use existing
# plugins.
load_plugins(splash=splash)
from glue.app.qt import GlueApplication
datafiles = datafiles or []
hub = None
from qtpy.QtCore import QTimer
timer = QTimer()
timer.setInterval(1000)
timer.setSingleShot(True)
timer.timeout.connect(splash.close)
timer.start()
if gluefile is not None:
app = restore_session(gluefile)
return app.start()
if config is not None:
glue.env = glue.config.load_configuration(search_path=[config])
data_collection = glue.core.DataCollection()
hub = data_collection.hub
splash.set_progress(100)
session = glue.core.Session(data_collection=data_collection, hub=hub)
ga = GlueApplication(session=session)
if datafiles:
datasets = load_data_files(datafiles)
ga.add_datasets(data_collection, datasets, auto_merge=auto_merge)
if startup_actions is not None:
for name in startup_actions:
ga.run_startup_action(name)
return ga.start(maximized=maximized)
@die_on_error("Error running script")
def execute_script(script):
""" Run a python script and exit.
Provides a way for people with pre-installed binaries to use
the glue library
"""
with open(script) as fin:
exec(fin.read())
sys.exit(0)
def get_splash():
"""Instantiate a splash screen"""
from glue.app.qt.splash_screen import QtSplashScreen
splash = QtSplashScreen()
return splash
def main(argv=sys.argv):
opt, args = parse(argv[1:])
if opt.verbose:
logger.setLevel("INFO")
logger.info("Input arguments: %s", sys.argv)
# Global keywords for Glue startup.
kwargs = {'config': opt.config,
'maximized': not opt.nomax,
'auto_merge': opt.auto_merge}
if opt.startup:
kwargs['startup_actions'] = opt.startup.split(',')
if opt.test:
return run_tests()
elif opt.restore:
start_glue(gluefile=args[0], **kwargs)
elif opt.script:
execute_script(args[0])
else:
has_file = len(args) == 1
has_files = len(args) > 1
has_py = has_file and args[0].endswith('.py')
has_glu = has_file and args[0].endswith('.glu')
if has_py:
execute_script(args[0])
elif has_glu:
start_glue(gluefile=args[0], **kwargs)
elif has_file or has_files:
start_glue(datafiles=args, **kwargs)
else:
start_glue(**kwargs)
_loaded_plugins = set()
_installed_plugins = set()
def load_plugins(splash=None):
# Search for plugins installed via entry_points. Basically, any package can
# define plugins for glue, and needs to define an entry point using the
# following format:
#
# entry_points = """
# [glue.plugins]
# webcam_importer=glue_exp.importers.webcam:setup
# vizier_importer=glue_exp.importers.vizier:setup
# dataverse_importer=glue_exp.importers.dataverse:setup
# """
#
# where ``setup`` is a function that does whatever is needed to set up the
# plugin, such as add items to various registries.
import setuptools
logger.info("Loading external plugins using "
"setuptools=={0}".format(setuptools.__version__))
from glue._plugin_helpers import iter_plugin_entry_points, PluginConfig
config = PluginConfig.load()
n_plugins = len(list(iter_plugin_entry_points()))
for iplugin, item in enumerate(iter_plugin_entry_points()):
if item.module_name not in _installed_plugins:
_installed_plugins.add(item.name)
if item.module_name in _loaded_plugins:
logger.info("Plugin {0} already loaded".format(item.name))
continue
if not config.plugins[item.name]:
continue
try:
function = item.load()
function()
except Exception as exc:
logger.info("Loading plugin {0} failed "
"(Exception: {1})".format(item.name, exc))
else:
logger.info("Loading plugin {0} succeeded".format(item.name))
_loaded_plugins.add(item.module_name)
if splash is not None:
splash.set_progress(100. * iplugin / float(n_plugins))
try:
config.save()
except Exception as e:
logger.warn("Failed to load plugin configuration")
# Reload the settings now that we have loaded plugins, since some plugins
# may have added some settings. Note that this will not re-read settings
# that were previously read.
from glue._settings_helpers import load_settings
load_settings()
if __name__ == "__main__":
sys.exit(main(sys.argv)) # pragma: no cover
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class KeysOperations:
"""KeysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.keyvault.v2021_06_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def create_if_not_exist(
self,
resource_group_name: str,
vault_name: str,
key_name: str,
parameters: "_models.KeyCreateParameters",
**kwargs: Any
) -> "_models.Key":
"""Creates the first version of a new key if it does not exist. If it already exists, then the
existing key is returned without any write operations being performed. This API does not create
subsequent versions, and does not update existing keys.
:param resource_group_name: The name of the resource group which contains the specified key
vault.
:type resource_group_name: str
:param vault_name: The name of the key vault which contains the key to be created.
:type vault_name: str
:param key_name: The name of the key to be created.
:type key_name: str
:param parameters: The parameters used to create the specified key.
:type parameters: ~azure.mgmt.keyvault.v2021_06_01_preview.models.KeyCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Key, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_06_01_preview.models.Key
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Key"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_if_not_exist.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
'keyName': self._serialize.url("key_name", key_name, 'str', pattern=r'^[a-zA-Z0-9-]{1,127}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeyCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Key', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_if_not_exist.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/keys/{keyName}'} # type: ignore
async def get(
self,
resource_group_name: str,
vault_name: str,
key_name: str,
**kwargs: Any
) -> "_models.Key":
"""Gets the current version of the specified key from the specified key vault.
:param resource_group_name: The name of the resource group which contains the specified key
vault.
:type resource_group_name: str
:param vault_name: The name of the vault which contains the key to be retrieved.
:type vault_name: str
:param key_name: The name of the key to be retrieved.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Key, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_06_01_preview.models.Key
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Key"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
'keyName': self._serialize.url("key_name", key_name, 'str', pattern=r'^[a-zA-Z0-9-]{1,127}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Key', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/keys/{keyName}'} # type: ignore
def list(
self,
resource_group_name: str,
vault_name: str,
**kwargs: Any
) -> AsyncIterable["_models.KeyListResult"]:
"""Lists the keys in the specified key vault.
:param resource_group_name: The name of the resource group which contains the specified key
vault.
:type resource_group_name: str
:param vault_name: The name of the vault which contains the keys to be retrieved.
:type vault_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either KeyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.keyvault.v2021_06_01_preview.models.KeyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('KeyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/keys'} # type: ignore
async def get_version(
self,
resource_group_name: str,
vault_name: str,
key_name: str,
key_version: str,
**kwargs: Any
) -> "_models.Key":
"""Gets the specified version of the specified key in the specified key vault.
:param resource_group_name: The name of the resource group which contains the specified key
vault.
:type resource_group_name: str
:param vault_name: The name of the vault which contains the key version to be retrieved.
:type vault_name: str
:param key_name: The name of the key version to be retrieved.
:type key_name: str
:param key_version: The version of the key to be retrieved.
:type key_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Key, or the result of cls(response)
:rtype: ~azure.mgmt.keyvault.v2021_06_01_preview.models.Key
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Key"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get_version.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
'keyName': self._serialize.url("key_name", key_name, 'str', pattern=r'^[a-zA-Z0-9-]{1,127}$'),
'keyVersion': self._serialize.url("key_version", key_version, 'str', pattern=r'^[a-fA-F0-9]{32}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Key', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_version.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/keys/{keyName}/versions/{keyVersion}'} # type: ignore
def list_versions(
self,
resource_group_name: str,
vault_name: str,
key_name: str,
**kwargs: Any
) -> AsyncIterable["_models.KeyListResult"]:
"""Lists the versions of the specified key in the specified key vault.
:param resource_group_name: The name of the resource group which contains the specified key
vault.
:type resource_group_name: str
:param vault_name: The name of the vault which contains the key versions to be retrieved.
:type vault_name: str
:param key_name: The name of the key versions to be retrieved.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either KeyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.keyvault.v2021_06_01_preview.models.KeyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_versions.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'vaultName': self._serialize.url("vault_name", vault_name, 'str', pattern=r'^[a-zA-Z0-9-]{3,24}$'),
'keyName': self._serialize.url("key_name", key_name, 'str', pattern=r'^[a-zA-Z0-9-]{1,127}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('KeyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_versions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/vaults/{vaultName}/keys/{keyName}/versions'} # type: ignore
|
|
# -*- coding: utf-8 -*-
"""Tests for the artifact definitions readers."""
import io
import os
import unittest
from artifacts import definitions
from artifacts import errors
from artifacts import reader
class YamlArtifactsReaderTest(unittest.TestCase):
"""Class to test the YAML artifacts reader."""
def testReadFileObject(self):
"""Tests the ReadFileObject function."""
artifact_reader = reader.YamlArtifactsReader()
test_file = os.path.join('test_data', 'definitions.yaml')
with open(test_file, 'rb') as file_object:
artifact_definitions = list(artifact_reader.ReadFileObject(file_object))
self.assertEqual(len(artifact_definitions), 7)
# Artifact with file source type.
artifact_definition = artifact_definitions[0]
self.assertEqual(artifact_definition.name, 'SecurityEventLogEvtx')
expected_description = (
'Windows Security Event log for Vista or later systems.')
self.assertEqual(artifact_definition.description, expected_description)
self.assertEqual(len(artifact_definition.sources), 1)
source_type = artifact_definition.sources[0]
self.assertNotEqual(source_type, None)
self.assertEqual(
source_type.type_indicator, definitions.TYPE_INDICATOR_FILE)
expected_paths = sorted([
'%%environ_systemroot%%\\System32\\winevt\\Logs\\Security.evtx'])
self.assertEqual(sorted(source_type.paths), expected_paths)
self.assertEqual(len(artifact_definition.conditions), 1)
expected_condition = 'os_major_version >= 6'
self.assertEqual(artifact_definition.conditions[0], expected_condition)
self.assertEqual(len(artifact_definition.labels), 1)
self.assertEqual(artifact_definition.labels[0], 'Logs')
self.assertEqual(len(artifact_definition.supported_os), 1)
self.assertEqual(artifact_definition.supported_os[0], 'Windows')
self.assertEqual(len(artifact_definition.urls), 1)
expected_url = (
'http://www.forensicswiki.org/wiki/Windows_XML_Event_Log_(EVTX)')
self.assertEqual(artifact_definition.urls[0], expected_url)
# Artifact with Windows Registry key source type.
artifact_definition = artifact_definitions[1]
self.assertEqual(
artifact_definition.name, 'AllUsersProfileEnvironmentVariable')
self.assertEqual(len(artifact_definition.sources), 1)
source_type = artifact_definition.sources[0]
self.assertNotEqual(source_type, None)
self.assertEqual(
source_type.type_indicator,
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY)
expected_keys = sorted([
('HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows NT\\CurrentVersion\\'
'ProfileList\\ProfilesDirectory'),
('HKEY_LOCAL_MACHINE\\Software\\Microsoft\\Windows NT\\CurrentVersion\\'
'ProfileList\\AllUsersProfile')])
self.assertEqual(sorted(source_type.keys), expected_keys)
# Artifact with Windows Registry value source type.
artifact_definition = artifact_definitions[2]
self.assertEqual(artifact_definition.name, 'CurrentControlSet')
self.assertEqual(len(artifact_definition.sources), 1)
source_type = artifact_definition.sources[0]
self.assertNotEqual(source_type, None)
self.assertEqual(
source_type.type_indicator,
definitions.TYPE_INDICATOR_WINDOWS_REGISTRY_VALUE)
self.assertEqual(len(source_type.key_value_pairs), 1)
key_value_pair = source_type.key_value_pairs[0]
expected_key = 'HKEY_LOCAL_MACHINE\\SYSTEM\\Select'
self.assertEqual(key_value_pair['key'], expected_key)
self.assertEqual(key_value_pair['value'], 'Current')
# Artifact with WMI query source type.
artifact_definition = artifact_definitions[3]
self.assertEqual(artifact_definition.name, 'WMIProfileUsersHomeDir')
expected_provides = sorted(['users.homedir'])
self.assertEqual(sorted(artifact_definition.provides), expected_provides)
self.assertEqual(len(artifact_definition.sources), 1)
source_type = artifact_definition.sources[0]
self.assertNotEqual(source_type, None)
self.assertEqual(
source_type.type_indicator, definitions.TYPE_INDICATOR_WMI_QUERY)
expected_query = (
'SELECT * FROM Win32_UserProfile WHERE SID=\'%%users.sid%%\'')
self.assertEqual(source_type.query, expected_query)
# Artifact with artifact definition source type.
artifact_definition = artifact_definitions[4]
self.assertEqual(artifact_definition.name, 'EventLogs')
self.assertEqual(len(artifact_definition.sources), 1)
source_type = artifact_definition.sources[0]
self.assertNotEqual(source_type, None)
self.assertEqual(
source_type.type_indicator, definitions.TYPE_INDICATOR_ARTIFACT)
# Artifact with command definition source type.
artifact_definition = artifact_definitions[5]
self.assertEqual(artifact_definition.name, 'RedhatPackagesList')
self.assertEqual(len(artifact_definition.sources), 1)
source_type = artifact_definition.sources[0]
self.assertNotEqual(source_type, None)
self.assertEqual(
source_type.type_indicator, definitions.TYPE_INDICATOR_COMMAND)
# Artifact with COMMAND definition collector definition.
artifact_definition = artifact_definitions[5]
self.assertEqual(artifact_definition.name, 'RedhatPackagesList')
self.assertEqual(len(artifact_definition.sources), 1)
collector_definition = artifact_definition.sources[0]
self.assertNotEqual(collector_definition, None)
self.assertEqual(
collector_definition.type_indicator,
definitions.TYPE_INDICATOR_COMMAND)
def testBadKey(self):
"""Tests top level keys are correct."""
artifact_reader = reader.YamlArtifactsReader()
file_object = io.StringIO(initial_value=u"""name: BadKey
doc: bad extra key.
sources:
- type: ARTIFACT
attributes:
names:
- 'SystemEventLogEvtx'
extra_key: 'wrong'
labels: [Logs]
supported_os: [Windows]
""")
with self.assertRaises(errors.FormatError):
_ = list(artifact_reader.ReadFileObject(file_object))
def testMissingSources(self):
"""Tests sources is present."""
artifact_reader = reader.YamlArtifactsReader()
file_object = io.StringIO(initial_value=u"""name: BadSources
doc: must have one sources.
labels: [Logs]
supported_os: [Windows]
""")
with self.assertRaises(errors.FormatError):
_ = list(artifact_reader.ReadFileObject(file_object))
def testBadSupportedOS(self):
"""Tests supported_os is checked correctly."""
artifact_reader = reader.YamlArtifactsReader()
file_object = io.StringIO(initial_value=u"""name: BadSupportedOS
doc: supported_os should be an array of strings.
sources:
- type: ARTIFACT
attributes:
names:
- 'SystemEventLogEvtx'
labels: [Logs]
supported_os: Windows
""")
with self.assertRaises(errors.FormatError):
_ = list(artifact_reader.ReadFileObject(file_object))
def testBadLabels(self):
"""Tests labels is checked correctly."""
artifact_reader = reader.YamlArtifactsReader()
file_object = io.StringIO(initial_value=u"""name: BadLabel
doc: badlabel.
sources:
- type: ARTIFACT
attributes:
names:
- 'SystemEventLogEvtx'
labels: Logs
supported_os: [Windows]
""")
with self.assertRaises(errors.FormatError):
_ = list(artifact_reader.ReadFileObject(file_object))
def testMissingDoc(self):
"""Tests doc is required."""
artifact_reader = reader.YamlArtifactsReader()
file_object = io.StringIO(initial_value=u"""name: NoDoc
sources:
- type: ARTIFACT
attributes:
names:
- 'SystemEventLogEvtx'
""")
with self.assertRaises(errors.FormatError):
_ = list(artifact_reader.ReadFileObject(file_object))
def testReadFile(self):
"""Tests the ReadFile function."""
artifact_reader = reader.YamlArtifactsReader()
test_file = os.path.join('test_data', 'definitions.yaml')
artifact_definitions = list(artifact_reader.ReadFile(test_file))
self.assertEqual(len(artifact_definitions), 7)
def testReadDirectory(self):
"""Tests the ReadDirectory function."""
artifact_reader = reader.YamlArtifactsReader()
artifact_definitions = list(artifact_reader.ReadDirectory('test_data'))
self.assertEqual(len(artifact_definitions), 7)
if __name__ == '__main__':
unittest.main()
|
|
###
# Copyright (c) 2002-2004, Jeremiah Fincher
# Copyright (c) 2009-2010, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import os
import sys
import time
import supybot.conf as conf
import supybot.world as world
import supybot.ircdb as ircdb
import supybot.irclib as irclib
import supybot.utils.minisix as minisix
import supybot.ircmsgs as ircmsgs
import supybot.ircutils as ircutils
import supybot.registry as registry
import supybot.callbacks as callbacks
from supybot.i18n import PluginInternationalization, internationalizeDocstring
_ = PluginInternationalization('ChannelLogger')
if minisix.PY2:
from io import open
class FakeLog(object):
def flush(self):
return
def close(self):
return
def write(self, s):
return
class ChannelLogger(callbacks.Plugin):
"""This plugin allows the bot to log channel conversations to disk."""
noIgnore = True
def __init__(self, irc):
self.__parent = super(ChannelLogger, self)
self.__parent.__init__(irc)
self.logs = {}
self.flusher = self.flush
world.flushers.append(self.flusher)
def die(self):
for log in self._logs():
log.close()
world.flushers = [x for x in world.flushers if x is not self.flusher]
def reset(self):
for log in self._logs():
log.close()
self.logs.clear()
def _logs(self):
for logs in self.logs.values():
for log in logs.values():
yield log
def flush(self):
self.checkLogNames()
for log in self._logs():
try:
log.flush()
except ValueError as e:
if e.args[0] != 'I/O operation on a closed file':
self.log.exception('Odd exception:')
def logNameTimestamp(self, channel):
format = self.registryValue('filenameTimestamp', channel)
return time.strftime(format)
def getLogName(self, channel):
if self.registryValue('rotateLogs', channel):
return '%s.%s.log' % (channel, self.logNameTimestamp(channel))
else:
return '%s.log' % channel
def getLogDir(self, irc, channel):
logDir = conf.supybot.directories.log.dirize(self.name())
if self.registryValue('directories'):
if self.registryValue('directories.network'):
logDir = os.path.join(logDir, irc.network)
if self.registryValue('directories.channel'):
logDir = os.path.join(logDir, channel)
if self.registryValue('directories.timestamp'):
format = self.registryValue('directories.timestamp.format')
timeDir =time.strftime(format)
logDir = os.path.join(logDir, timeDir)
if not os.path.exists(logDir):
os.makedirs(logDir)
return logDir
def checkLogNames(self):
for (irc, logs) in self.logs.items():
for (channel, log) in list(logs.items()):
if self.registryValue('rotateLogs', channel):
name = self.getLogName(channel)
if name != os.path.basename(log.name):
log.close()
del logs[channel]
def getLog(self, irc, channel):
self.checkLogNames()
try:
logs = self.logs[irc]
except KeyError:
logs = ircutils.IrcDict()
self.logs[irc] = logs
if channel in logs:
return logs[channel]
else:
try:
name = self.getLogName(channel)
logDir = self.getLogDir(irc, channel)
log = open(os.path.join(logDir, name), encoding='utf-8', mode='a')
logs[channel] = log
return log
except IOError:
self.log.exception('Error opening log:')
return FakeLog()
def timestamp(self, log):
format = conf.supybot.log.timestampFormat()
if format:
string = time.strftime(format) + ' '
if minisix.PY2:
string = string.decode('utf8', 'ignore')
log.write(string)
def normalizeChannel(self, irc, channel):
return ircutils.toLower(channel)
def doLog(self, irc, channel, s, *args):
if not self.registryValue('enable', channel):
return
s = format(s, *args)
channel = self.normalizeChannel(irc, channel)
log = self.getLog(irc, channel)
if self.registryValue('timestamp', channel):
self.timestamp(log)
if self.registryValue('stripFormatting', channel):
s = ircutils.stripFormatting(s)
if minisix.PY2:
s = s.decode('utf8', 'ignore')
log.write(s)
if self.registryValue('flushImmediately'):
log.flush()
def doPrivmsg(self, irc, msg):
(recipients, text) = msg.args
for channel in recipients.split(','):
if irc.isChannel(channel):
noLogPrefix = self.registryValue('noLogPrefix', channel)
cap = ircdb.makeChannelCapability(channel, 'logChannelMessages')
try:
logChannelMessages = ircdb.checkCapability(msg.prefix, cap,
ignoreOwner=True)
except KeyError:
logChannelMessages = True
nick = msg.nick or irc.nick
if msg.tagged('ChannelLogger__relayed'):
(nick, text) = text.split(' ', 1)
nick = nick[1:-1]
msg.args = (recipients, text)
if (noLogPrefix and text.startswith(noLogPrefix)) or \
not logChannelMessages:
text = '-= THIS MESSAGE NOT LOGGED =-'
if ircmsgs.isAction(msg):
self.doLog(irc, channel,
'* %s %s\n', nick, ircmsgs.unAction(msg))
else:
self.doLog(irc, channel, '<%s> %s\n', nick, text)
def doNotice(self, irc, msg):
(recipients, text) = msg.args
for channel in recipients.split(','):
if irc.isChannel(channel):
self.doLog(irc, channel, '-%s- %s\n', msg.nick, text)
def doNick(self, irc, msg):
oldNick = msg.nick
newNick = msg.args[0]
for channel in msg.tagged('channels'):
self.doLog(irc, channel,
'*** %s is now known as %s\n', oldNick, newNick)
def doInvite(self, irc, msg):
(target, channel) = msg.args
self.doLog(irc, channel,
'*** %s <%s> invited %s to %s\n',
msg.nick, msg.prefix, target, channel)
def doJoin(self, irc, msg):
for channel in msg.args[0].split(','):
if(self.registryValue('showJoinParts', channel)):
self.doLog(irc, channel,
'*** %s <%s> has joined %s\n',
msg.nick, msg.prefix, channel)
def doKick(self, irc, msg):
if len(msg.args) == 3:
(channel, target, kickmsg) = msg.args
else:
(channel, target) = msg.args
kickmsg = ''
if kickmsg:
self.doLog(irc, channel,
'*** %s was kicked by %s (%s)\n',
target, msg.nick, kickmsg)
else:
self.doLog(irc, channel,
'*** %s was kicked by %s\n', target, msg.nick)
def doPart(self, irc, msg):
if len(msg.args) > 1:
reason = " (%s)" % msg.args[1]
else:
reason = ""
for channel in msg.args[0].split(','):
if(self.registryValue('showJoinParts', channel)):
self.doLog(irc, channel,
'*** %s <%s> has left %s%s\n',
msg.nick, msg.prefix, channel, reason)
def doMode(self, irc, msg):
channel = msg.args[0]
if irc.isChannel(channel) and msg.args[1:]:
self.doLog(irc, channel,
'*** %s sets mode: %s %s\n',
msg.nick or msg.prefix, msg.args[1],
' '.join(msg.args[2:]))
def doTopic(self, irc, msg):
if len(msg.args) == 1:
return # It's an empty TOPIC just to get the current topic.
channel = msg.args[0]
self.doLog(irc, channel,
'*** %s changes topic to "%s"\n', msg.nick, msg.args[1])
def doQuit(self, irc, msg):
if len(msg.args) == 1:
reason = " (%s)" % msg.args[0]
else:
reason = ""
for channel in msg.tagged('channels'):
if(self.registryValue('showJoinParts', channel)):
self.doLog(irc, channel,
'*** %s <%s> has quit IRC%s\n',
msg.nick, msg.prefix, reason)
def outFilter(self, irc, msg):
# Gotta catch my own messages *somehow* :)
# Let's try this little trick...
if msg.command in ('PRIVMSG', 'NOTICE'):
# Other messages should be sent back to us.
m = ircmsgs.IrcMsg(msg=msg, prefix=irc.prefix)
if msg.tagged('relayedMsg'):
m.tag('ChannelLogger__relayed')
self(irc, m)
return msg
Class = ChannelLogger
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
|
import datetime
from unittest import skipIf, skipUnless
from django.db import connection
from django.db.models import Index
from django.db.models.deletion import CASCADE
from django.db.models.fields.related import ForeignKey
from django.db.models.query_utils import Q
from django.test import (
TestCase, TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from django.test.utils import override_settings
from django.utils import timezone
from .models import (
Article, ArticleTranslation, IndexedArticle2, IndexTogetherSingleList,
)
class SchemaIndexesTests(TestCase):
"""
Test index handling by the db.backends.schema infrastructure.
"""
def test_index_name_hash(self):
"""
Index names should be deterministic.
"""
editor = connection.schema_editor()
index_name = editor._create_index_name(
table_name=Article._meta.db_table,
column_names=("c1",),
suffix="123",
)
self.assertEqual(index_name, "indexes_article_c1_a52bd80b123")
def test_index_name(self):
"""
Index names on the built-in database backends::
* Are truncated as needed.
* Include all the column names.
* Include a deterministic hash.
"""
long_name = 'l%sng' % ('o' * 100)
editor = connection.schema_editor()
index_name = editor._create_index_name(
table_name=Article._meta.db_table,
column_names=('c1', 'c2', long_name),
suffix='ix',
)
expected = {
'mysql': 'indexes_article_c1_c2_looooooooooooooooooo_255179b2ix',
'oracle': 'indexes_a_c1_c2_loo_255179b2ix',
'postgresql': 'indexes_article_c1_c2_loooooooooooooooooo_255179b2ix',
'sqlite': 'indexes_article_c1_c2_l%sng_255179b2ix' % ('o' * 100),
}
if connection.vendor not in expected:
self.skipTest('This test is only supported on the built-in database backends.')
self.assertEqual(index_name, expected[connection.vendor])
def test_index_together(self):
editor = connection.schema_editor()
index_sql = [str(statement) for statement in editor._model_indexes_sql(Article)]
self.assertEqual(len(index_sql), 1)
# Ensure the index name is properly quoted
self.assertIn(
connection.ops.quote_name(
editor._create_index_name(Article._meta.db_table, ['headline', 'pub_date'], suffix='_idx')
),
index_sql[0]
)
def test_index_together_single_list(self):
# Test for using index_together with a single list (#22172)
index_sql = connection.schema_editor()._model_indexes_sql(IndexTogetherSingleList)
self.assertEqual(len(index_sql), 1)
def test_columns_list_sql(self):
index = Index(fields=['headline'], name='whitespace_idx')
editor = connection.schema_editor()
self.assertIn(
'(%s)' % editor.quote_name('headline'),
str(index.create_sql(Article, editor)),
)
def test_descending_columns_list_sql(self):
index = Index(fields=['-headline'], name='whitespace_idx')
editor = connection.schema_editor()
self.assertIn(
'(%s DESC)' % editor.quote_name('headline'),
str(index.create_sql(Article, editor)),
)
@skipIf(connection.vendor == 'postgresql', 'opclasses are PostgreSQL only')
class SchemaIndexesNotPostgreSQLTests(TransactionTestCase):
available_apps = ['indexes']
def test_create_index_ignores_opclasses(self):
index = Index(
name='test_ops_class',
fields=['headline'],
opclasses=['varchar_pattern_ops'],
)
with connection.schema_editor() as editor:
# This would error if opclasses weren't ignored.
editor.add_index(IndexedArticle2, index)
# The `condition` parameter is ignored by databases that don't support partial
# indexes.
@skipIfDBFeature('supports_partial_indexes')
class PartialIndexConditionIgnoredTests(TransactionTestCase):
available_apps = ['indexes']
def test_condition_ignored(self):
index = Index(
name='test_condition_ignored',
fields=['published'],
condition=Q(published=True),
)
with connection.schema_editor() as editor:
# This would error if condition weren't ignored.
editor.add_index(Article, index)
self.assertNotIn(
'WHERE %s' % editor.quote_name('published'),
str(index.create_sql(Article, editor))
)
@skipUnless(connection.vendor == 'postgresql', 'PostgreSQL tests')
class SchemaIndexesPostgreSQLTests(TransactionTestCase):
available_apps = ['indexes']
get_opclass_query = '''
SELECT opcname, c.relname FROM pg_opclass AS oc
JOIN pg_index as i on oc.oid = ANY(i.indclass)
JOIN pg_class as c on c.oid = i.indexrelid
WHERE c.relname = '%s'
'''
def test_text_indexes(self):
"""Test creation of PostgreSQL-specific text indexes (#12234)"""
from .models import IndexedArticle
index_sql = [str(statement) for statement in connection.schema_editor()._model_indexes_sql(IndexedArticle)]
self.assertEqual(len(index_sql), 5)
self.assertIn('("headline" varchar_pattern_ops)', index_sql[1])
self.assertIn('("body" text_pattern_ops)', index_sql[3])
# unique=True and db_index=True should only create the varchar-specific
# index (#19441).
self.assertIn('("slug" varchar_pattern_ops)', index_sql[4])
def test_virtual_relation_indexes(self):
"""Test indexes are not created for related objects"""
index_sql = connection.schema_editor()._model_indexes_sql(Article)
self.assertEqual(len(index_sql), 1)
def test_ops_class(self):
index = Index(
name='test_ops_class',
fields=['headline'],
opclasses=['varchar_pattern_ops'],
)
with connection.schema_editor() as editor:
editor.add_index(IndexedArticle2, index)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query % 'test_ops_class')
self.assertEqual(cursor.fetchall(), [('varchar_pattern_ops', 'test_ops_class')])
def test_ops_class_multiple_columns(self):
index = Index(
name='test_ops_class_multiple',
fields=['headline', 'body'],
opclasses=['varchar_pattern_ops', 'text_pattern_ops'],
)
with connection.schema_editor() as editor:
editor.add_index(IndexedArticle2, index)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query % 'test_ops_class_multiple')
expected_ops_classes = (
('varchar_pattern_ops', 'test_ops_class_multiple'),
('text_pattern_ops', 'test_ops_class_multiple'),
)
self.assertCountEqual(cursor.fetchall(), expected_ops_classes)
def test_ops_class_partial(self):
index = Index(
name='test_ops_class_partial',
fields=['body'],
opclasses=['text_pattern_ops'],
condition=Q(headline__contains='China'),
)
with connection.schema_editor() as editor:
editor.add_index(IndexedArticle2, index)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query % 'test_ops_class_partial')
self.assertCountEqual(cursor.fetchall(), [('text_pattern_ops', 'test_ops_class_partial')])
def test_ops_class_partial_tablespace(self):
indexname = 'test_ops_class_tblspace'
index = Index(
name=indexname,
fields=['body'],
opclasses=['text_pattern_ops'],
condition=Q(headline__contains='China'),
db_tablespace='pg_default',
)
with connection.schema_editor() as editor:
editor.add_index(IndexedArticle2, index)
self.assertIn('TABLESPACE "pg_default" ', str(index.create_sql(IndexedArticle2, editor)))
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query % indexname)
self.assertCountEqual(cursor.fetchall(), [('text_pattern_ops', indexname)])
def test_ops_class_descending(self):
indexname = 'test_ops_class_ordered'
index = Index(
name=indexname,
fields=['-body'],
opclasses=['text_pattern_ops'],
)
with connection.schema_editor() as editor:
editor.add_index(IndexedArticle2, index)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query % indexname)
self.assertCountEqual(cursor.fetchall(), [('text_pattern_ops', indexname)])
def test_ops_class_descending_partial(self):
indexname = 'test_ops_class_ordered_partial'
index = Index(
name=indexname,
fields=['-body'],
opclasses=['text_pattern_ops'],
condition=Q(headline__contains='China'),
)
with connection.schema_editor() as editor:
editor.add_index(IndexedArticle2, index)
with editor.connection.cursor() as cursor:
cursor.execute(self.get_opclass_query % indexname)
self.assertCountEqual(cursor.fetchall(), [('text_pattern_ops', indexname)])
def test_ops_class_columns_lists_sql(self):
index = Index(
fields=['headline'],
name='whitespace_idx',
opclasses=['text_pattern_ops'],
)
with connection.schema_editor() as editor:
self.assertIn(
'(%s text_pattern_ops)' % editor.quote_name('headline'),
str(index.create_sql(Article, editor)),
)
def test_ops_class_descending_columns_list_sql(self):
index = Index(
fields=['-headline'],
name='whitespace_idx',
opclasses=['text_pattern_ops'],
)
with connection.schema_editor() as editor:
self.assertIn(
'(%s text_pattern_ops DESC)' % editor.quote_name('headline'),
str(index.create_sql(Article, editor)),
)
@skipUnless(connection.vendor == 'mysql', 'MySQL tests')
class SchemaIndexesMySQLTests(TransactionTestCase):
available_apps = ['indexes']
def test_no_index_for_foreignkey(self):
"""
MySQL on InnoDB already creates indexes automatically for foreign keys.
(#14180). An index should be created if db_constraint=False (#26171).
"""
storage = connection.introspection.get_storage_engine(
connection.cursor(), ArticleTranslation._meta.db_table
)
if storage != "InnoDB":
self.skip("This test only applies to the InnoDB storage engine")
index_sql = [str(statement) for statement in connection.schema_editor()._model_indexes_sql(ArticleTranslation)]
self.assertEqual(index_sql, [
'CREATE INDEX `indexes_articletranslation_article_no_constraint_id_d6c0806b` '
'ON `indexes_articletranslation` (`article_no_constraint_id`)'
])
# The index also shouldn't be created if the ForeignKey is added after
# the model was created.
field_created = False
try:
with connection.schema_editor() as editor:
new_field = ForeignKey(Article, CASCADE)
new_field.set_attributes_from_name('new_foreign_key')
editor.add_field(ArticleTranslation, new_field)
field_created = True
# No deferred SQL. The FK constraint is included in the
# statement to add the field.
self.assertFalse(editor.deferred_sql)
finally:
if field_created:
with connection.schema_editor() as editor:
editor.remove_field(ArticleTranslation, new_field)
@skipUnlessDBFeature('supports_partial_indexes')
# SQLite doesn't support timezone-aware datetimes when USE_TZ is False.
@override_settings(USE_TZ=True)
class PartialIndexTests(TransactionTestCase):
# Schema editor is used to create the index to test that it works.
available_apps = ['indexes']
def test_partial_index(self):
with connection.schema_editor() as editor:
index = Index(
name='recent_article_idx',
fields=['pub_date'],
condition=Q(
pub_date__gt=datetime.datetime(
year=2015, month=1, day=1,
# PostgreSQL would otherwise complain about the lookup
# being converted to a mutable function (by removing
# the timezone in the cast) which is forbidden.
tzinfo=timezone.get_current_timezone(),
),
)
)
self.assertIn(
'WHERE %s' % editor.quote_name('pub_date'),
str(index.create_sql(Article, schema_editor=editor))
)
editor.add_index(index=index, model=Article)
self.assertIn(index.name, connection.introspection.get_constraints(
cursor=connection.cursor(), table_name=Article._meta.db_table,
))
editor.remove_index(index=index, model=Article)
def test_integer_restriction_partial(self):
with connection.schema_editor() as editor:
index = Index(
name='recent_article_idx',
fields=['id'],
condition=Q(pk__gt=1),
)
self.assertIn(
'WHERE %s' % editor.quote_name('id'),
str(index.create_sql(Article, schema_editor=editor))
)
editor.add_index(index=index, model=Article)
self.assertIn(index.name, connection.introspection.get_constraints(
cursor=connection.cursor(), table_name=Article._meta.db_table,
))
editor.remove_index(index=index, model=Article)
def test_boolean_restriction_partial(self):
with connection.schema_editor() as editor:
index = Index(
name='published_index',
fields=['published'],
condition=Q(published=True),
)
self.assertIn(
'WHERE %s' % editor.quote_name('published'),
str(index.create_sql(Article, schema_editor=editor))
)
editor.add_index(index=index, model=Article)
self.assertIn(index.name, connection.introspection.get_constraints(
cursor=connection.cursor(), table_name=Article._meta.db_table,
))
editor.remove_index(index=index, model=Article)
@skipUnlessDBFeature('supports_functions_in_partial_indexes')
def test_multiple_conditions(self):
with connection.schema_editor() as editor:
index = Index(
name='recent_article_idx',
fields=['pub_date', 'headline'],
condition=(
Q(pub_date__gt=datetime.datetime(
year=2015,
month=1,
day=1,
tzinfo=timezone.get_current_timezone(),
)) & Q(headline__contains='China')
),
)
sql = str(index.create_sql(Article, schema_editor=editor))
where = sql.find('WHERE')
self.assertIn(
'WHERE (%s' % editor.quote_name('pub_date'),
sql
)
# Because each backend has different syntax for the operators,
# check ONLY the occurrence of headline in the SQL.
self.assertGreater(sql.rfind('headline'), where)
editor.add_index(index=index, model=Article)
self.assertIn(index.name, connection.introspection.get_constraints(
cursor=connection.cursor(), table_name=Article._meta.db_table,
))
editor.remove_index(index=index, model=Article)
def test_is_null_condition(self):
with connection.schema_editor() as editor:
index = Index(
name='recent_article_idx',
fields=['pub_date'],
condition=Q(pub_date__isnull=False),
)
self.assertIn(
'WHERE %s IS NOT NULL' % editor.quote_name('pub_date'),
str(index.create_sql(Article, schema_editor=editor))
)
editor.add_index(index=index, model=Article)
self.assertIn(index.name, connection.introspection.get_constraints(
cursor=connection.cursor(), table_name=Article._meta.db_table,
))
editor.remove_index(index=index, model=Article)
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
from django.views import generic
from horizon.templatetags.horizon import has_permissions # noqa
class MultiTableMixin(object):
"""A generic mixin which provides methods for handling DataTables."""
data_method_pattern = "get_%s_data"
def __init__(self, *args, **kwargs):
super(MultiTableMixin, self).__init__(*args, **kwargs)
self.table_classes = getattr(self, "table_classes", [])
self._data = {}
self._tables = {}
self._data_methods = defaultdict(list)
self.get_data_methods(self.table_classes, self._data_methods)
def _get_data_dict(self):
if not self._data:
for table in self.table_classes:
data = []
name = table._meta.name
func_list = self._data_methods.get(name, [])
for func in func_list:
data.extend(func())
self._data[name] = data
return self._data
def get_data_methods(self, table_classes, methods):
for table in table_classes:
name = table._meta.name
if table._meta.mixed_data_type:
for data_type in table._meta.data_types:
func = self.check_method_exist(self.data_method_pattern,
data_type)
if func:
type_name = table._meta.data_type_name
methods[name].append(self.wrap_func(func,
type_name,
data_type))
else:
func = self.check_method_exist(self.data_method_pattern,
name)
if func:
methods[name].append(func)
def wrap_func(self, data_func, type_name, data_type):
def final_data():
data = data_func()
self.assign_type_string(data, type_name, data_type)
return data
return final_data
def check_method_exist(self, func_pattern="%s", *names):
func_name = func_pattern % names
func = getattr(self, func_name, None)
if not func or not callable(func):
cls_name = self.__class__.__name__
raise NotImplementedError("You must define a %s method "
"in %s." % (func_name, cls_name))
else:
return func
def assign_type_string(self, data, type_name, data_type):
for datum in data:
setattr(datum, type_name, data_type)
def get_tables(self):
if not self.table_classes:
raise AttributeError('You must specify one or more DataTable '
'classes for the "table_classes" attribute '
'on %s.' % self.__class__.__name__)
if not self._tables:
for table in self.table_classes:
if not has_permissions(self.request.user,
table._meta):
continue
func_name = "get_%s_table" % table._meta.name
table_func = getattr(self, func_name, None)
if table_func is None:
tbl = table(self.request, **self.kwargs)
else:
tbl = table_func(self, self.request, **self.kwargs)
self._tables[table._meta.name] = tbl
return self._tables
def get_context_data(self, **kwargs):
context = super(MultiTableMixin, self).get_context_data(**kwargs)
tables = self.get_tables()
for name, table in tables.items():
context["%s_table" % name] = table
return context
def has_prev_data(self, table):
return False
def has_more_data(self, table):
return False
def handle_table(self, table):
name = table.name
data = self._get_data_dict()
self._tables[name].data = data[table._meta.name]
self._tables[name]._meta.has_more_data = self.has_more_data(table)
self._tables[name]._meta.has_prev_data = self.has_prev_data(table)
handled = self._tables[name].maybe_handle()
return handled
class MultiTableView(MultiTableMixin, generic.TemplateView):
"""A class-based generic view to handle the display and processing of
multiple :class:`~horizon.tables.DataTable` classes in a single view.
Three steps are required to use this view: set the ``table_classes``
attribute with a tuple of the desired
:class:`~horizon.tables.DataTable` classes;
define a ``get_{{ table_name }}_data`` method for each table class
which returns a set of data for that table; and specify a template for
the ``template_name`` attribute.
"""
def construct_tables(self):
tables = self.get_tables().values()
# Early out before data is loaded
for table in tables:
preempted = table.maybe_preempt()
if preempted:
return preempted
# Load data into each table and check for action handlers
for table in tables:
handled = self.handle_table(table)
if handled:
return handled
# If we didn't already return a response, returning None continues
# with the view as normal.
return None
def get(self, request, *args, **kwargs):
handled = self.construct_tables()
if handled:
return handled
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def post(self, request, *args, **kwargs):
# GET and POST handling are the same
return self.get(request, *args, **kwargs)
class DataTableView(MultiTableView):
"""A class-based generic view to handle basic DataTable processing.
Three steps are required to use this view: set the ``table_class``
attribute with the desired :class:`~horizon.tables.DataTable` class;
define a ``get_data`` method which returns a set of data for the
table; and specify a template for the ``template_name`` attribute.
Optionally, you can override the ``has_more_data`` method to trigger
pagination handling for APIs that support it.
"""
table_class = None
context_object_name = 'table'
def _get_data_dict(self):
if not self._data:
self._data = {self.table_class._meta.name: self.get_data()}
return self._data
def get_data(self):
raise NotImplementedError('You must define a "get_data" method on %s.'
% self.__class__.__name__)
def get_tables(self):
if not self._tables:
self._tables = {}
if has_permissions(self.request.user,
self.table_class._meta):
self._tables[self.table_class._meta.name] = self.get_table()
return self._tables
def get_table(self):
# Note: this method cannot be easily memoized, because get_context_data
# uses its cached value directly.
if not self.table_class:
raise AttributeError('You must specify a DataTable class for the '
'"table_class" attribute on %s.'
% self.__class__.__name__)
if not hasattr(self, "table"):
self.table = self.table_class(self.request, **self.kwargs)
return self.table
def get_context_data(self, **kwargs):
context = super(DataTableView, self).get_context_data(**kwargs)
if hasattr(self, "table"):
context[self.context_object_name] = self.table
return context
class MixedDataTableView(DataTableView):
"""A class-based generic view to handle DataTable with mixed data
types.
Basic usage is the same as DataTableView.
Three steps are required to use this view:
#. Set the ``table_class`` attribute with desired
:class:`~horizon.tables.DataTable` class. In the class the
``data_types`` list should have at least two elements.
#. Define a ``get_{{ data_type }}_data`` method for each data type
which returns a set of data for the table.
#. Specify a template for the ``template_name`` attribute.
"""
table_class = None
context_object_name = 'table'
def _get_data_dict(self):
if not self._data:
table = self.table_class
self._data = {table._meta.name: []}
for data_type in table.data_types:
func_name = "get_%s_data" % data_type
data_func = getattr(self, func_name, None)
if data_func is None:
cls_name = self.__class__.__name__
raise NotImplementedError("You must define a %s method "
"for %s data type in %s." %
(func_name, data_type, cls_name))
data = data_func()
self.assign_type_string(data, data_type)
self._data[table._meta.name].extend(data)
return self._data
def assign_type_string(self, data, type_string):
for datum in data:
setattr(datum, self.table_class.data_type_name,
type_string)
def get_table(self):
self.table = super(MixedDataTableView, self).get_table()
if not self.table._meta.mixed_data_type:
raise AttributeError('You must have at least two elements in '
'the data_types attribute '
'in table %s to use MixedDataTableView.'
% self.table._meta.name)
return self.table
|
|
"""The tests for hls streams."""
from datetime import timedelta
from io import BytesIO
import os
from unittest.mock import patch
import av
import pytest
from homeassistant.components.stream import create_stream
from homeassistant.components.stream.const import HLS_PROVIDER, RECORDER_PROVIDER
from homeassistant.components.stream.core import Part
from homeassistant.components.stream.fmp4utils import find_box
from homeassistant.components.stream.recorder import recorder_save_worker
from homeassistant.exceptions import HomeAssistantError
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from .common import DefaultSegment as Segment, generate_h264_video, remux_with_audio
from tests.common import async_fire_time_changed
MAX_ABORT_SEGMENTS = 20 # Abort test to avoid looping forever
async def test_record_stream(hass, hass_client, record_worker_sync, h264_video):
"""
Test record stream.
Tests full integration with the stream component, and captures the
stream worker and save worker to allow for clean shutdown of background
threads. The actual save logic is tested in test_recorder_save below.
"""
await async_setup_component(hass, "stream", {"stream": {}})
# Setup demo track
stream = create_stream(hass, h264_video, {})
with patch.object(hass.config, "is_allowed_path", return_value=True):
await stream.async_record("/example/path")
# After stream decoding finishes, the record worker thread starts
segments = await record_worker_sync.get_segments()
assert len(segments) >= 1
# Verify that the save worker was invoked, then block until its
# thread completes and is shutdown completely to avoid thread leaks.
await record_worker_sync.join()
stream.stop()
async def test_record_lookback(
hass, hass_client, stream_worker_sync, record_worker_sync, h264_video
):
"""Exercise record with loopback."""
await async_setup_component(hass, "stream", {"stream": {}})
stream = create_stream(hass, h264_video, {})
# Start an HLS feed to enable lookback
stream.add_provider(HLS_PROVIDER)
stream.start()
with patch.object(hass.config, "is_allowed_path", return_value=True):
await stream.async_record("/example/path", lookback=4)
# This test does not need recorder cleanup since it is not fully exercised
stream.stop()
async def test_recorder_timeout(hass, hass_client, stream_worker_sync, h264_video):
"""
Test recorder timeout.
Mocks out the cleanup to assert that it is invoked after a timeout.
This test does not start the recorder save thread.
"""
await async_setup_component(hass, "stream", {"stream": {}})
stream_worker_sync.pause()
with patch("homeassistant.components.stream.IdleTimer.fire") as mock_timeout:
# Setup demo track
stream = create_stream(hass, h264_video, {})
with patch.object(hass.config, "is_allowed_path", return_value=True):
await stream.async_record("/example/path")
recorder = stream.add_provider(RECORDER_PROVIDER)
await recorder.recv()
# Wait a minute
future = dt_util.utcnow() + timedelta(minutes=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert mock_timeout.called
stream_worker_sync.resume()
stream.stop()
await hass.async_block_till_done()
await hass.async_block_till_done()
async def test_record_path_not_allowed(hass, hass_client, h264_video):
"""Test where the output path is not allowed by home assistant configuration."""
await async_setup_component(hass, "stream", {"stream": {}})
stream = create_stream(hass, h264_video, {})
with patch.object(
hass.config, "is_allowed_path", return_value=False
), pytest.raises(HomeAssistantError):
await stream.async_record("/example/path")
def add_parts_to_segment(segment, source):
"""Add relevant part data to segment for testing recorder."""
moof_locs = list(find_box(source.getbuffer(), b"moof")) + [len(source.getbuffer())]
segment.init = source.getbuffer()[: moof_locs[0]].tobytes()
segment.parts = [
Part(
duration=None,
has_keyframe=None,
data=source.getbuffer()[moof_locs[i] : moof_locs[i + 1]],
)
for i in range(len(moof_locs) - 1)
]
async def test_recorder_save(tmpdir, h264_video):
"""Test recorder save."""
# Setup
filename = f"{tmpdir}/test.mp4"
# Run
segment = Segment(sequence=1)
add_parts_to_segment(segment, h264_video)
segment.duration = 4
recorder_save_worker(filename, [segment])
# Assert
assert os.path.exists(filename)
async def test_recorder_discontinuity(tmpdir, h264_video):
"""Test recorder save across a discontinuity."""
# Setup
filename = f"{tmpdir}/test.mp4"
# Run
segment_1 = Segment(sequence=1, stream_id=0)
add_parts_to_segment(segment_1, h264_video)
segment_1.duration = 4
segment_2 = Segment(sequence=2, stream_id=1)
add_parts_to_segment(segment_2, h264_video)
segment_2.duration = 4
recorder_save_worker(filename, [segment_1, segment_2])
# Assert
assert os.path.exists(filename)
async def test_recorder_no_segments(tmpdir):
"""Test recorder behavior with a stream failure which causes no segments."""
# Setup
filename = f"{tmpdir}/test.mp4"
# Run
recorder_save_worker("unused-file", [])
# Assert
assert not os.path.exists(filename)
@pytest.fixture(scope="module")
def h264_mov_video():
"""Generate a source video with no audio."""
return generate_h264_video(container_format="mov")
@pytest.mark.parametrize(
"audio_codec,expected_audio_streams",
[
("aac", 1), # aac is a valid mp4 codec
("pcm_mulaw", 0), # G.711 is not a valid mp4 codec
("empty", 0), # audio stream with no packets
(None, 0), # no audio stream
],
)
async def test_record_stream_audio(
hass,
hass_client,
stream_worker_sync,
record_worker_sync,
audio_codec,
expected_audio_streams,
h264_mov_video,
):
"""
Test treatment of different audio inputs.
Record stream output should have an audio channel when input has
a valid codec and audio packets and no audio channel otherwise.
"""
await async_setup_component(hass, "stream", {"stream": {}})
# Remux source video with new audio
source = remux_with_audio(h264_mov_video, "mov", audio_codec) # mov can store PCM
record_worker_sync.reset()
stream_worker_sync.pause()
stream = create_stream(hass, source, {})
with patch.object(hass.config, "is_allowed_path", return_value=True):
await stream.async_record("/example/path")
recorder = stream.add_provider(RECORDER_PROVIDER)
while True:
await recorder.recv()
if not (segment := recorder.last_segment):
break
last_segment = segment
stream_worker_sync.resume()
result = av.open(
BytesIO(last_segment.init + last_segment.get_data()),
"r",
format="mp4",
)
assert len(result.streams.audio) == expected_audio_streams
result.close()
stream.stop()
await hass.async_block_till_done()
# Verify that the save worker was invoked, then block until its
# thread completes and is shutdown completely to avoid thread leaks.
await record_worker_sync.join()
async def test_recorder_log(hass, caplog):
"""Test starting a stream to record logs the url without username and password."""
await async_setup_component(hass, "stream", {"stream": {}})
stream = create_stream(hass, "https://abcd:[email protected]", {})
with patch.object(hass.config, "is_allowed_path", return_value=True):
await stream.async_record("/example/path")
assert "https://abcd:[email protected]" not in caplog.text
assert "https://****:****@foo.bar" in caplog.text
|
|
import sys
import threading
import warnings
from collections import Counter, OrderedDict, defaultdict
from functools import partial
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from django.utils import lru_cache
from .config import AppConfig
class Apps(object):
"""
A registry that stores the configuration of installed applications.
It also django track of models eg. to provide reverse-relations.
"""
def __init__(self, installed_apps=()):
# installed_apps is set to None when creating the master registry
# because it cannot be populated at that point. Other registries must
# provide a list of installed apps and are populated immediately.
if installed_apps is None and hasattr(sys.modules[__name__], 'apps'):
raise RuntimeError("You must supply an installed_apps argument.")
# Mapping of app labels => model names => model classes. Every time a
# model is imported, ModelBase.__new__ calls apps.register_model which
# creates an entry in all_models. All imported models are registered,
# regardless of whether they're defined in an installed application
# and whether the registry has been populated. Since it isn't possible
# to reimport a module safely (it could reexecute initialization code)
# all_models is never overridden or reset.
self.all_models = defaultdict(OrderedDict)
# Mapping of labels to AppConfig instances for installed apps.
self.app_configs = OrderedDict()
# Stack of app_configs. Used to store the current state in
# set_available_apps and set_installed_apps.
self.stored_app_configs = []
# Whether the registry is populated.
self.apps_ready = self.models_ready = self.ready = False
# Lock for thread-safe population.
self._lock = threading.Lock()
# Maps ("app_label", "modelname") tuples to lists of functions to be
# called when the corresponding model is ready. Used by this class's
# `lazy_model_operation()` and `do_pending_operations()` methods.
self._pending_operations = defaultdict(list)
# Populate apps and models, unless it's the master registry.
if installed_apps is not None:
self.populate(installed_apps)
def populate(self, installed_apps=None):
"""
Loads application configurations and models.
This method imports each application module and then each model module.
It is thread safe and idempotent, but not reentrant.
"""
if self.ready:
return
# populate() might be called by two threads in parallel on servers
# that create threads before initializing the WSGI callable.
with self._lock:
if self.ready:
return
# app_config should be pristine, otherwise the code below won't
# guarantee that the order matches the order in INSTALLED_APPS.
if self.app_configs:
raise RuntimeError("populate() isn't reentrant")
# Adjust apps dependencies
installed_apps = adjust_dependencies(installed_apps)
# Load app configs and app modules.
for entry in installed_apps:
if isinstance(entry, AppConfig):
app_config = entry
else:
app_config = AppConfig.create(entry)
if app_config.label in self.app_configs:
raise ImproperlyConfigured(
"Application labels aren't unique, "
"duplicates: %s" % app_config.label)
self.app_configs[app_config.label] = app_config
# Check for duplicate app names.
counts = Counter(
app_config.name for app_config in self.app_configs.values())
duplicates = [
name for name, count in counts.most_common() if count > 1]
if duplicates:
raise ImproperlyConfigured(
"Application names aren't unique, "
"duplicates: %s" % ", ".join(duplicates))
self.apps_ready = True
# Load models.
for app_config in self.app_configs.values():
all_models = self.all_models[app_config.label]
app_config.import_models(all_models)
self.clear_cache()
self.models_ready = True
for app_config in self.get_app_configs():
app_config.ready()
self.ready = True
def check_apps_ready(self):
"""
Raises an exception if all apps haven't been imported yet.
"""
if not self.apps_ready:
raise AppRegistryNotReady("Apps aren't loaded yet.")
def check_models_ready(self):
"""
Raises an exception if all models haven't been imported yet.
"""
if not self.models_ready:
raise AppRegistryNotReady("Models aren't loaded yet.")
def get_app_configs(self):
"""
Imports applications and returns an iterable of app configs.
"""
self.check_apps_ready()
return self.app_configs.values()
def get_app_config(self, app_label):
"""
Imports applications and returns an app config for the given label.
Raises LookupError if no application exists with this label.
"""
self.check_apps_ready()
try:
return self.app_configs[app_label]
except KeyError:
message = "No installed app with label '%s'." % app_label
for app_config in self.get_app_configs():
if app_config.name == app_label:
message += " Did you mean '%s'?" % app_config.label
break
raise LookupError(message)
# This method is performance-critical at least for Django's test suite.
@lru_cache.lru_cache(maxsize=None)
def get_models(self, include_auto_created=False,
include_deferred=False, include_swapped=False):
"""
Returns a list of all installed models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models created to satisfy deferred attribute queries,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
"""
self.check_models_ready()
result = []
for app_config in self.app_configs.values():
result.extend(list(app_config.get_models(
include_auto_created, include_deferred, include_swapped)))
return result
def get_model(self, app_label, model_name=None):
"""
Returns the model matching the given app_label and model_name.
As a shortcut, this function also accepts a single argument in the
form <app_label>.<model_name>.
model_name is case-insensitive.
Raises LookupError if no application exists with this label, or no
model exists with this name in the application. Raises ValueError if
called with a single argument that doesn't contain exactly one dot.
"""
self.check_models_ready()
if model_name is None:
app_label, model_name = app_label.split('.')
return self.get_app_config(app_label).get_model(model_name.lower())
def register_model(self, app_label, model):
# Since this method is called when models are imported, it cannot
# perform imports because of the risk of import loops. It mustn't
# call get_app_config().
model_name = model._meta.model_name
app_models = self.all_models[app_label]
if model_name in app_models:
if (model.__name__ == app_models[model_name].__name__ and
model.__module__ == app_models[model_name].__module__):
warnings.warn(
"Model '%s.%s' was already registered. "
"Reloading models is not advised as it can lead to inconsistencies, "
"most notably with related models." % (app_label, model_name),
RuntimeWarning, stacklevel=2)
else:
raise RuntimeError(
"Conflicting '%s' models in application '%s': %s and %s." %
(model_name, app_label, app_models[model_name], model))
app_models[model_name] = model
self.do_pending_operations(model)
self.clear_cache()
def is_installed(self, app_name):
"""
Checks whether an application with this name exists in the registry.
app_name is the full name of the app eg. 'django.contrib.admin'.
"""
self.check_apps_ready()
return any(ac.name == app_name for ac in self.app_configs.values())
def get_containing_app_config(self, object_name):
"""
Look for an app config containing a given object.
object_name is the dotted Python path to the object.
Returns the app config for the inner application in case of nesting.
Returns None if the object isn't in any registered app config.
"""
self.check_apps_ready()
candidates = []
for app_config in self.app_configs.values():
if object_name.startswith(app_config.name):
subpath = object_name[len(app_config.name):]
if subpath == '' or subpath[0] == '.':
candidates.append(app_config)
if candidates:
return sorted(candidates, key=lambda ac: -len(ac.name))[0]
def get_registered_model(self, app_label, model_name):
"""
Similar to get_model(), but doesn't require that an app exists with
the given app_label.
It's safe to call this method at import time, even while the registry
is being populated.
"""
model = self.all_models[app_label].get(model_name.lower())
if model is None:
raise LookupError(
"Model '%s.%s' not registered." % (app_label, model_name))
return model
def set_available_apps(self, available):
"""
Restricts the set of installed apps used by get_app_config[s].
available must be an iterable of application names.
set_available_apps() must be balanced with unset_available_apps().
Primarily used for performance optimization in TransactionTestCase.
This method is safe is the sense that it doesn't trigger any imports.
"""
available = set(available)
installed = set(app_config.name for app_config in self.get_app_configs())
if not available.issubset(installed):
raise ValueError("Available apps isn't a subset of installed "
"apps, extra apps: %s" % ", ".join(available - installed))
self.stored_app_configs.append(self.app_configs)
self.app_configs = OrderedDict(
(label, app_config)
for label, app_config in self.app_configs.items()
if app_config.name in available)
self.clear_cache()
def unset_available_apps(self):
"""
Cancels a previous call to set_available_apps().
"""
self.app_configs = self.stored_app_configs.pop()
self.clear_cache()
def set_installed_apps(self, installed):
"""
Enables a different set of installed apps for get_app_config[s].
installed must be an iterable in the same format as INSTALLED_APPS.
set_installed_apps() must be balanced with unset_installed_apps(),
even if it exits with an exception.
Primarily used as a receiver of the setting_changed signal in tests.
This method may trigger new imports, which may add new models to the
registry of all imported models. They will stay in the registry even
after unset_installed_apps(). Since it isn't possible to replay
imports safely (eg. that could lead to registering listeners twice),
models are registered when they're imported and never removed.
"""
if not self.ready:
raise AppRegistryNotReady("App registry isn't ready yet.")
self.stored_app_configs.append(self.app_configs)
self.app_configs = OrderedDict()
self.apps_ready = self.models_ready = self.ready = False
self.clear_cache()
self.populate(installed)
def unset_installed_apps(self):
"""
Cancels a previous call to set_installed_apps().
"""
self.app_configs = self.stored_app_configs.pop()
self.apps_ready = self.models_ready = self.ready = True
self.clear_cache()
def clear_cache(self):
"""
Clears all internal caches, for methods that alter the app registry.
This is mostly used in tests.
"""
# Call expire cache on each model. This will purge
# the relation tree and the fields cache.
self.get_models.cache_clear()
if self.ready:
# Circumvent self.get_models() to prevent that the cache is refilled.
# This particularly prevents that an empty value is cached while cloning.
for app_config in self.app_configs.values():
for model in app_config.get_models(include_auto_created=True):
model._meta._expire_cache()
def lazy_model_operation(self, function, *model_keys):
"""
Take a function and a number of ("app_label", "modelname") tuples, and
when all the corresponding models have been imported and registered,
call the function with the model classes as its arguments.
The function passed to this method must accept exactly n models as
arguments, where n=len(model_keys).
"""
# If this function depends on more than one model, we recursively turn
# it into a chain of functions that accept a single model argument and
# pass each in turn to lazy_model_operation.
model_key, more_models = model_keys[0], model_keys[1:]
if more_models:
supplied_fn = function
def function(model):
next_function = partial(supplied_fn, model)
self.lazy_model_operation(next_function, *more_models)
# If the model is already loaded, pass it to the function immediately.
# Otherwise, delay execution until the class is prepared.
try:
model_class = self.get_registered_model(*model_key)
except LookupError:
self._pending_operations[model_key].append(function)
else:
function(model_class)
def do_pending_operations(self, model):
"""
Take a newly-prepared model and pass it to each function waiting for
it. This is called at the very end of `Apps.register_model()`.
"""
key = model._meta.app_label, model._meta.model_name
for function in self._pending_operations.pop(key, []):
function(model)
def get_dependencies(app):
r = []
if isinstance(app, str):
app = AppConfig.create(app)
deps = app.dependencies
if deps:
for dep in app.depends:
r += get_dependencies(dep)
return r + list(app.dependencies)
return []
def adjust_dependencies(apps):
# adjust module dependency priority
apps = list(apps)
for entry in apps:
deps = get_dependencies(entry)
if deps:
apps.remove(entry)
i = 0
for dep in deps:
if not dep in apps:
apps.append(dep)
i = len(apps) - 1
continue
i = max(i, apps.index(dep))
if i == 0:
apps.append(entry)
else:
apps.insert(i + 1, entry)
return apps
apps = Apps(installed_apps=None)
|
|
from __future__ import absolute_import, division, unicode_literals
from future.builtins import int, open, str
from hashlib import md5
import os
try:
from urllib.parse import quote, unquote
except ImportError:
from urllib import quote, unquote
from django.apps import apps
from django.contrib import admin
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.sites.models import Site
from django.core.exceptions import ObjectDoesNotExist
from django.core.files import File
from django.core.files.storage import default_storage
from django.urls import reverse, resolve, NoReverseMatch
from django.db.models import Model
from django.template import Node, Template, TemplateSyntaxError
from django.template.base import (TOKEN_BLOCK, TOKEN_COMMENT,
TOKEN_TEXT, TOKEN_VAR, TextNode)
from django.template.defaultfilters import escape
from django.template.loader import get_template
from django.utils import translation
from django.utils.html import strip_tags
from django.utils.text import capfirst
from django.utils.safestring import SafeText, mark_safe
from mezzanine.conf import settings
from mezzanine.core.fields import RichTextField
from mezzanine.core.forms import get_edit_form
from mezzanine.utils.cache import nevercache_token, cache_installed
from mezzanine.utils.html import decode_entities
from mezzanine.utils.importing import import_dotted_path
from mezzanine.utils.sites import current_site_id, has_site_permission
from mezzanine.utils.urls import admin_url, home_slug
from mezzanine.utils.views import is_editable
from mezzanine import template
register = template.Library()
if "compressor" in settings.INSTALLED_APPS:
@register.tag
def compress(parser, token):
"""
Shadows django-compressor's compress tag so it can be
loaded from ``mezzanine_tags``, allowing us to provide
a dummy version when django-compressor isn't installed.
"""
from compressor.templatetags.compress import compress
return compress(parser, token)
else:
@register.to_end_tag
def compress(parsed, context, token):
"""
Dummy tag for fallback when django-compressor isn't installed.
"""
return parsed
def initialize_nevercache():
if cache_installed():
@register.tag
def nevercache(parser, token):
"""
Tag for two phased rendering. Converts enclosed template
code and content into text, which gets rendered separately
in ``mezzanine.core.middleware.UpdateCacheMiddleware``.
This is to bypass caching for the enclosed code and content.
"""
text = []
end_tag = "endnevercache"
tag_mapping = {
TOKEN_TEXT: ("", ""),
TOKEN_VAR: ("{{", "}}"),
TOKEN_BLOCK: ("{%", "%}"),
TOKEN_COMMENT: ("{#", "#}"),
}
delimiter = nevercache_token()
while parser.tokens:
token = parser.next_token()
token_type = token.token_type
if token_type == TOKEN_BLOCK and token.contents == end_tag:
return TextNode(delimiter + "".join(text) + delimiter)
start, end = tag_mapping[token_type]
text.append("%s%s%s" % (start, token.contents, end))
parser.unclosed_block_tag(end_tag)
else:
@register.to_end_tag
def nevercache(parsed, context, token):
"""
Dummy fallback ``nevercache`` for when caching is not
configured.
"""
return parsed
initialize_nevercache()
@register.simple_tag(takes_context=True)
def fields_for(context, form, template="includes/form_fields.html"):
"""
Renders fields for a form with an optional template choice.
"""
context["form_for_fields"] = form
return get_template(template).render(context.flatten())
@register.inclusion_tag("includes/form_errors.html")
def errors_for(form):
"""
Renders an alert if the form has any errors.
"""
return {"form": form}
@register.filter
def sort_by(items, attr):
"""
General sort filter - sorts by either attribute or key.
"""
def key_func(item):
try:
return getattr(item, attr)
except AttributeError:
try:
return item[attr]
except TypeError:
getattr(item, attr) # Reraise AttributeError
return sorted(items, key=key_func)
@register.filter
def is_installed(app_name):
"""
Returns ``True`` if the given app name is in the
``INSTALLED_APPS`` setting.
"""
from warnings import warn
warn("The is_installed filter is deprecated. Please use the tag "
"{% ifinstalled appname %}{% endifinstalled %}")
return app_name in settings.INSTALLED_APPS
@register.tag
def ifinstalled(parser, token):
"""
Old-style ``if`` tag that renders contents if the given app is
installed. The main use case is:
{% ifinstalled app_name %}
{% include "app_name/template.html" %}
{% endifinstalled %}
so we need to manually pull out all tokens if the app isn't
installed, since if we used a normal ``if`` tag with a False arg,
the include tag will still try and find the template to include.
"""
try:
tag, app = token.split_contents()
except ValueError:
raise TemplateSyntaxError("ifinstalled should be in the form: "
"{% ifinstalled app_name %}"
"{% endifinstalled %}")
end_tag = "end" + tag
unmatched_end_tag = 1
if app.strip("\"'") not in settings.INSTALLED_APPS:
while unmatched_end_tag:
token = parser.tokens.pop(0)
if token.token_type == TOKEN_BLOCK:
block_name = token.contents.split()[0]
if block_name == tag:
unmatched_end_tag += 1
if block_name == end_tag:
unmatched_end_tag -= 1
parser.tokens.insert(0, token)
nodelist = parser.parse((end_tag,))
parser.delete_first_token()
class IfInstalledNode(Node):
def render(self, context):
return nodelist.render(context)
return IfInstalledNode()
@register.render_tag
def set_short_url_for(context, token):
"""
Sets the ``short_url`` attribute of the given model for share
links in the template.
"""
obj = context[token.split_contents()[1]]
obj.set_short_url()
return ""
@register.simple_tag
def gravatar_url(email, size=32):
"""
Return the full URL for a Gravatar given an email hash.
"""
bits = (md5(email.lower().encode("utf-8")).hexdigest(), size)
return "//www.gravatar.com/avatar/%s?s=%s&d=identicon&r=PG" % bits
@register.to_end_tag
def metablock(parsed):
"""
Remove HTML tags, entities and superfluous characters from
meta blocks.
"""
parsed = " ".join(parsed.replace("\n", "").split()).replace(" ,", ",")
return escape(strip_tags(decode_entities(parsed)))
@register.inclusion_tag("includes/pagination.html", takes_context=True)
def pagination_for(context, current_page, page_var="page", exclude_vars=""):
"""
Include the pagination template and data for persisting querystring
in pagination links. Can also contain a comma separated string of
var names in the current querystring to exclude from the pagination
links, via the ``exclude_vars`` arg.
"""
querystring = context["request"].GET.copy()
exclude_vars = [v for v in exclude_vars.split(",") if v] + [page_var]
for exclude_var in exclude_vars:
if exclude_var in querystring:
del querystring[exclude_var]
querystring = querystring.urlencode()
return {
"current_page": current_page,
"querystring": querystring,
"page_var": page_var,
}
@register.inclusion_tag("includes/search_form.html", takes_context=True)
def search_form(context, search_model_names=None):
"""
Includes the search form with a list of models to use as choices
for filtering the search by. Models should be a string with models
in the format ``app_label.model_name`` separated by spaces. The
string ``all`` can also be used, in which case the models defined
by the ``SEARCH_MODEL_CHOICES`` setting will be used.
"""
template_vars = {
"request": context["request"],
}
if not search_model_names or not settings.SEARCH_MODEL_CHOICES:
search_model_names = []
elif search_model_names == "all":
search_model_names = list(settings.SEARCH_MODEL_CHOICES)
else:
search_model_names = search_model_names.split(" ")
search_model_choices = []
for model_name in search_model_names:
try:
model = apps.get_model(*model_name.split(".", 1))
except LookupError:
pass
else:
verbose_name = model._meta.verbose_name_plural.capitalize()
search_model_choices.append((verbose_name, model_name))
template_vars["search_model_choices"] = sorted(search_model_choices)
return template_vars
@register.simple_tag
def thumbnail(image_url, width, height, upscale=True, quality=95, left=.5,
top=.5, padding=False, padding_color="#fff"):
"""
Given the URL to an image, resizes the image using the given width
and height on the first time it is requested, and returns the URL
to the new resized image. If width or height are zero then original
ratio is maintained. When ``upscale`` is False, images smaller than
the given size will not be grown to fill that size. The given width
and height thus act as maximum dimensions.
"""
if not image_url:
return ""
try:
from PIL import Image, ImageFile, ImageOps
except ImportError:
return ""
image_url = unquote(str(image_url)).split("?")[0]
if image_url.startswith(settings.MEDIA_URL):
image_url = image_url.replace(settings.MEDIA_URL, "", 1)
image_dir, image_name = os.path.split(image_url)
image_prefix, image_ext = os.path.splitext(image_name)
filetype = {".png": "PNG", ".gif": "GIF"}.get(image_ext.lower(), "JPEG")
thumb_name = "%s-%sx%s" % (image_prefix, width, height)
if not upscale:
thumb_name += "-no-upscale"
if left != .5 or top != .5:
left = min(1, max(0, left))
top = min(1, max(0, top))
thumb_name = "%s-%sx%s" % (thumb_name, left, top)
thumb_name += "-padded-%s" % padding_color if padding else ""
thumb_name = "%s%s" % (thumb_name, image_ext)
# `image_name` is used here for the directory path, as each image
# requires its own sub-directory using its own name - this is so
# we can consistently delete all thumbnails for an individual
# image, which is something we do in filebrowser when a new image
# is written, allowing us to purge any previously generated
# thumbnails that may match a new image name.
thumb_dir = os.path.join(settings.MEDIA_ROOT, image_dir,
settings.THUMBNAILS_DIR_NAME, image_name)
if not os.path.exists(thumb_dir):
try:
os.makedirs(thumb_dir)
except OSError:
pass
thumb_path = os.path.join(thumb_dir, thumb_name)
thumb_url = "%s/%s/%s" % (settings.THUMBNAILS_DIR_NAME,
quote(image_name.encode("utf-8")),
quote(thumb_name.encode("utf-8")))
image_url_path = os.path.dirname(image_url)
if image_url_path:
thumb_url = "%s/%s" % (image_url_path, thumb_url)
try:
thumb_exists = os.path.exists(thumb_path)
except UnicodeEncodeError:
# The image that was saved to a filesystem with utf-8 support,
# but somehow the locale has changed and the filesystem does not
# support utf-8.
from mezzanine.core.exceptions import FileSystemEncodingChanged
raise FileSystemEncodingChanged()
if thumb_exists:
# Thumbnail exists, don't generate it.
return thumb_url
elif not default_storage.exists(image_url):
# Requested image does not exist, just return its URL.
return image_url
f = default_storage.open(image_url)
try:
image = Image.open(f)
except:
# Invalid image format.
return image_url
image_info = image.info
# Transpose to align the image to its orientation if necessary.
# If the image is transposed, delete the exif information as
# not all browsers support the CSS image-orientation:
# - http://caniuse.com/#feat=css-image-orientation
try:
orientation = image._getexif().get(0x0112)
except:
orientation = None
if orientation:
methods = {
2: (Image.FLIP_LEFT_RIGHT,),
3: (Image.ROTATE_180,),
4: (Image.FLIP_TOP_BOTTOM,),
5: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_90),
6: (Image.ROTATE_270,),
7: (Image.FLIP_LEFT_RIGHT, Image.ROTATE_270),
8: (Image.ROTATE_90,)}.get(orientation, ())
if methods:
image_info.pop('exif', None)
for method in methods:
image = image.transpose(method)
to_width = int(width)
to_height = int(height)
from_width = image.size[0]
from_height = image.size[1]
if not upscale:
to_width = min(to_width, from_width)
to_height = min(to_height, from_height)
# Set dimensions.
if to_width == 0:
to_width = from_width * to_height // from_height
elif to_height == 0:
to_height = from_height * to_width // from_width
if image.mode not in ("P", "L", "RGBA") \
and filetype not in ("JPG", "JPEG"):
try:
image = image.convert("RGBA")
except:
return image_url
# Required for progressive jpgs.
ImageFile.MAXBLOCK = 2 * (max(image.size) ** 2)
# Padding.
if padding and to_width and to_height:
from_ratio = float(from_width) / from_height
to_ratio = float(to_width) / to_height
pad_size = None
if to_ratio < from_ratio:
pad_height = int(to_height * (float(from_width) / to_width))
pad_size = (from_width, pad_height)
pad_top = (pad_height - from_height) // 2
pad_left = 0
elif to_ratio > from_ratio:
pad_width = int(to_width * (float(from_height) / to_height))
pad_size = (pad_width, from_height)
pad_top = 0
pad_left = (pad_width - from_width) // 2
if pad_size is not None:
pad_container = Image.new("RGBA", pad_size, padding_color)
pad_container.paste(image, (pad_left, pad_top))
image = pad_container
# Make thumbnail a PNG - required if original isn't one
if filetype != "PNG":
filetype = "PNG"
thumb_path += ".png"
thumb_url += ".png"
# Create the thumbnail.
to_size = (to_width, to_height)
to_pos = (left, top)
try:
image = ImageOps.fit(image, to_size, Image.ANTIALIAS, 0, to_pos)
image = image.save(thumb_path, filetype, quality=quality, **image_info)
# Push a remote copy of the thumbnail if MEDIA_URL is
# absolute.
if "://" in settings.MEDIA_URL:
with open(thumb_path, "rb") as f:
default_storage.save(unquote(thumb_url), File(f))
except Exception:
# If an error occurred, a corrupted image may have been saved,
# so remove it, otherwise the check for it existing will just
# return the corrupted image next time it's requested.
try:
os.remove(thumb_path)
except Exception:
pass
return image_url
return thumb_url
@register.inclusion_tag("includes/editable_loader.html", takes_context=True)
def editable_loader(context):
"""
Set up the required JS/CSS for the in-line editing toolbar and controls.
"""
user = context["request"].user
template_vars = {
"has_site_permission": has_site_permission(user),
"request": context["request"],
}
if (settings.INLINE_EDITING_ENABLED and
template_vars["has_site_permission"]):
t = get_template("includes/editable_toolbar.html")
template_vars["REDIRECT_FIELD_NAME"] = REDIRECT_FIELD_NAME
template_vars["editable_obj"] = context.get("editable_obj",
context.get("page", None))
template_vars["accounts_logout_url"] = context.get(
"accounts_logout_url", None)
template_vars["toolbar"] = t.render(template_vars)
template_vars["richtext_media"] = RichTextField().formfield(
).widget.media
return template_vars
@register.filter
def richtext_filters(content):
"""
Takes a value edited via the WYSIWYG editor, and passes it through
each of the functions specified by the RICHTEXT_FILTERS setting.
"""
for filter_name in settings.RICHTEXT_FILTERS:
filter_func = import_dotted_path(filter_name)
content = filter_func(content)
if not isinstance(content, SafeText):
# raise TypeError(
# filter_name + " must mark it's return value as safe. See "
# "https://docs.djangoproject.com/en/stable/topics/security/"
# "#cross-site-scripting-xss-protection")
import warnings
warnings.warn(
filter_name + " needs to ensure that any untrusted inputs are "
"properly escaped and mark the html it returns as safe. In a "
"future release this will cause an exception. See "
"https://docs.djangoproject.com/en/stable/topics/security/"
"cross-site-scripting-xss-protection",
FutureWarning)
content = mark_safe(content)
return content
@register.to_end_tag
def editable(parsed, context, token):
"""
Add the required HTML to the parsed content for in-line editing,
such as the icon and edit form if the object is deemed to be
editable - either it has an ``editable`` method which returns
``True``, or the logged in user has change permissions for the
model.
"""
def parse_field(field):
field = field.split(".")
obj = context.get(field.pop(0), None)
attr = field.pop()
while field:
obj = getattr(obj, field.pop(0))
if callable(obj):
# Allows {% editable page.get_content_model.content %}
obj = obj()
return obj, attr
fields = [parse_field(f) for f in token.split_contents()[1:]]
if fields:
fields = [f for f in fields if len(f) == 2 and f[0] is fields[0][0]]
if not parsed.strip():
try:
parsed = "".join([str(getattr(*field)) for field in fields])
except AttributeError:
pass
if settings.INLINE_EDITING_ENABLED and fields and "request" in context:
obj = fields[0][0]
if isinstance(obj, Model) and is_editable(obj, context["request"]):
field_names = ",".join([f[1] for f in fields])
context["editable_form"] = get_edit_form(obj, field_names)
context["original"] = parsed
t = get_template("includes/editable_form.html")
return t.render(context.flatten())
return parsed
@register.simple_tag
def try_url(url_name):
"""
Mimics Django's ``url`` template tag but fails silently. Used for
url names in admin templates as these won't resolve when admin
tests are running.
"""
from warnings import warn
warn("try_url is deprecated, use the url tag with the 'as' arg instead.")
try:
url = reverse(url_name)
except NoReverseMatch:
return ""
return url
def admin_app_list(request):
"""
Adopted from ``django.contrib.admin.sites.AdminSite.index``.
Returns a list of lists of models grouped and ordered according to
``mezzanine.conf.ADMIN_MENU_ORDER``. Called from the
``admin_dropdown_menu`` template tag as well as the ``app_list``
dashboard widget.
"""
app_dict = {}
# Model or view --> (group index, group title, item index, item title).
menu_order = {}
for (group_index, group) in enumerate(settings.ADMIN_MENU_ORDER):
group_title, items = group
for (item_index, item) in enumerate(items):
if isinstance(item, (tuple, list)):
item_title, item = item
else:
item_title = None
menu_order[item] = (group_index, group_title,
item_index, item_title)
# Add all registered models, using group and title from menu order.
for (model, model_admin) in admin.site._registry.items():
opts = model._meta
in_menu = not hasattr(model_admin, "in_menu") or model_admin.in_menu()
if hasattr(model_admin, "in_menu"):
import warnings
warnings.warn(
'ModelAdmin.in_menu() has been replaced with '
'ModelAdmin.has_module_permission(request). See '
'https://docs.djangoproject.com/en/stable/ref/contrib/admin/'
'#django.contrib.admin.ModelAdmin.has_module_permission.',
DeprecationWarning)
in_menu = in_menu and model_admin.has_module_permission(request)
if in_menu and request.user.has_module_perms(opts.app_label):
admin_url_name = ""
if model_admin.has_change_permission(request):
admin_url_name = "changelist"
change_url = admin_url(model, admin_url_name)
else:
change_url = None
if model_admin.has_add_permission(request):
admin_url_name = "add"
add_url = admin_url(model, admin_url_name)
else:
add_url = None
if admin_url_name:
model_label = "%s.%s" % (opts.app_label, opts.object_name)
try:
app_index, app_title, model_index, model_title = \
menu_order[model_label]
except KeyError:
app_index = None
try:
app_title = opts.app_config.verbose_name.title()
except AttributeError:
# Third party admin classes doing weird things.
# See GH #1628
app_title = ""
model_index = None
model_title = None
else:
del menu_order[model_label]
if not model_title:
model_title = capfirst(model._meta.verbose_name_plural)
if app_title not in app_dict:
app_dict[app_title] = {
"index": app_index,
"name": app_title,
"models": [],
}
app_dict[app_title]["models"].append({
"index": model_index,
"perms": model_admin.get_model_perms(request),
"name": model_title,
"object_name": opts.object_name,
"admin_url": change_url,
"add_url": add_url
})
# Menu may also contain view or url pattern names given as (title, name).
for (item_url, item) in menu_order.items():
app_index, app_title, item_index, item_title = item
try:
item_url = reverse(item_url)
except NoReverseMatch:
continue
if app_title not in app_dict:
app_dict[app_title] = {
"index": app_index,
"name": app_title,
"models": [],
}
app_dict[app_title]["models"].append({
"index": item_index,
"perms": {"custom": True},
"name": item_title,
"admin_url": item_url,
})
app_list = list(app_dict.values())
sort = lambda x: (x["index"] if x["index"] is not None else 999, x["name"])
for app in app_list:
app["models"].sort(key=sort)
app_list.sort(key=sort)
return app_list
@register.inclusion_tag("admin/includes/dropdown_menu.html",
takes_context=True)
def admin_dropdown_menu(context):
"""
Renders the app list for the admin dropdown menu navigation.
"""
user = context["request"].user
if user.is_staff:
context["dropdown_menu_app_list"] = admin_app_list(context["request"])
if user.is_superuser:
sites = Site.objects.all()
else:
try:
sites = user.sitepermissions.sites.all()
except ObjectDoesNotExist:
sites = Site.objects.none()
context["dropdown_menu_sites"] = list(sites)
context["dropdown_menu_selected_site_id"] = current_site_id()
return context.flatten()
@register.inclusion_tag("admin/includes/app_list.html", takes_context=True)
def app_list(context):
"""
Renders the app list for the admin dashboard widget.
"""
context["dashboard_app_list"] = admin_app_list(context["request"])
return context.flatten()
@register.inclusion_tag("admin/includes/recent_actions.html",
takes_context=True)
def recent_actions(context):
"""
Renders the recent actions list for the admin dashboard widget.
"""
return context.flatten()
@register.render_tag
def dashboard_column(context, token):
"""
Takes an index for retrieving the sequence of template tags from
``mezzanine.conf.DASHBOARD_TAGS`` to render into the admin
dashboard.
"""
column_index = int(token.split_contents()[1])
output = []
for tag in settings.DASHBOARD_TAGS[column_index]:
t = Template("{%% load %s %%}{%% %s %%}" % tuple(tag.split(".")))
output.append(t.render(context))
return "".join(output)
@register.simple_tag(takes_context=True)
def translate_url(context, language):
"""
Translates the current URL for the given language code, eg:
{% translate_url "de" %}
"""
try:
request = context["request"]
except KeyError:
return ""
view = resolve(request.path)
current_language = translation.get_language()
translation.activate(language)
if not view.namespace and view.url_name == "home":
url = home_slug()
else:
try:
url = reverse(view.func, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
try:
url_name = (view.url_name if not view.namespace
else '%s:%s' % (view.namespace, view.url_name))
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
except NoReverseMatch:
url_name = "admin:" + view.url_name
url = reverse(url_name, args=view.args, kwargs=view.kwargs)
translation.activate(current_language)
qs = context['request'].META.get("QUERY_STRING", "")
if qs:
url += "?" + qs
return url
|
|
"""Test letsencrypt.client.revoker."""
import csv
import os
import pkg_resources
import shutil
import tempfile
import unittest
import mock
from letsencrypt.client import errors
from letsencrypt.client import le_util
from letsencrypt.client.plugins.apache import configurator
from letsencrypt.client.display import util as display_util
class RevokerBase(unittest.TestCase): # pylint: disable=too-few-public-methods
"""Base Class for Revoker Tests."""
def setUp(self):
self.paths, self.certs, self.key_path = create_revoker_certs()
self.backup_dir = tempfile.mkdtemp("cert_backup")
self.mock_config = mock.MagicMock(cert_key_backup=self.backup_dir)
self.list_path = os.path.join(self.backup_dir, "LIST")
def _store_certs(self):
# pylint: disable=protected-access
from letsencrypt.client.revoker import Revoker
Revoker.store_cert_key(self.paths[0], self.key_path, self.mock_config)
Revoker.store_cert_key(self.paths[1], self.key_path, self.mock_config)
# Set metadata
for i in xrange(2):
self.certs[i].add_meta(
i, self.paths[i], self.key_path,
Revoker._get_backup(self.backup_dir, i, self.paths[i]),
Revoker._get_backup(self.backup_dir, i, self.key_path))
def _get_rows(self):
with open(self.list_path, "rb") as csvfile:
return [row for row in csv.reader(csvfile)]
def _write_rows(self, rows):
with open(self.list_path, "wb") as csvfile:
csvwriter = csv.writer(csvfile)
for row in rows:
csvwriter.writerow(row)
class RevokerTest(RevokerBase):
def setUp(self):
from letsencrypt.client.revoker import Revoker
super(RevokerTest, self).setUp()
with open(self.key_path) as key_file:
self.key = le_util.Key(self.key_path, key_file.read())
self._store_certs()
self.revoker = Revoker(
mock.MagicMock(spec=configurator.ApacheConfigurator),
self.mock_config)
def tearDown(self):
shutil.rmtree(self.backup_dir)
@mock.patch("letsencrypt.client.revoker.network."
"Network.send_and_receive_expected")
@mock.patch("letsencrypt.client.revoker.revocation")
def test_revoke_by_key_all(self, mock_display, mock_net):
mock_display().confirm_revocation.return_value = True
self.revoker.revoke_from_key(self.key)
self.assertEqual(self._get_rows(), [])
# Check to make sure backups were eliminated
for i in xrange(2):
self.assertFalse(self._backups_exist(self.certs[i].get_row()))
self.assertEqual(mock_net.call_count, 2)
@mock.patch("letsencrypt.client.revoker.Crypto.PublicKey.RSA.importKey")
def test_revoke_by_invalid_keys(self, mock_import):
mock_import.side_effect = ValueError
self.assertRaises(errors.LetsEncryptRevokerError,
self.revoker.revoke_from_key,
self.key)
mock_import.side_effect = [mock.Mock(), IndexError]
self.assertRaises(errors.LetsEncryptRevokerError,
self.revoker.revoke_from_key,
self.key)
@mock.patch("letsencrypt.client.revoker.network."
"Network.send_and_receive_expected")
@mock.patch("letsencrypt.client.revoker.revocation")
def test_revoke_by_wrong_key(self, mock_display, mock_net):
mock_display().confirm_revocation.return_value = True
key_path = pkg_resources.resource_filename(
"letsencrypt.acme.jose", os.path.join(
"testdata", "rsa256_key.pem"))
wrong_key = le_util.Key(key_path, open(key_path).read())
self.revoker.revoke_from_key(wrong_key)
# Nothing was removed
self.assertEqual(len(self._get_rows()), 2)
# No revocation went through
self.assertEqual(mock_net.call_count, 0)
@mock.patch("letsencrypt.client.revoker.network."
"Network.send_and_receive_expected")
@mock.patch("letsencrypt.client.revoker.revocation")
def test_revoke_by_cert(self, mock_display, mock_net):
mock_display().confirm_revocation.return_value = True
self.revoker.revoke_from_cert(self.paths[1])
row0 = self.certs[0].get_row()
row1 = self.certs[1].get_row()
self.assertEqual(self._get_rows(), [row0])
self.assertTrue(self._backups_exist(row0))
self.assertFalse(self._backups_exist(row1))
self.assertEqual(mock_net.call_count, 1)
@mock.patch("letsencrypt.client.revoker.network."
"Network.send_and_receive_expected")
@mock.patch("letsencrypt.client.revoker.revocation")
def test_revoke_by_cert_not_found(self, mock_display, mock_net):
mock_display().confirm_revocation.return_value = True
self.revoker.revoke_from_cert(self.paths[0])
self.revoker.revoke_from_cert(self.paths[0])
row0 = self.certs[0].get_row()
row1 = self.certs[1].get_row()
# Same check as last time... just reversed.
self.assertEqual(self._get_rows(), [row1])
self.assertTrue(self._backups_exist(row1))
self.assertFalse(self._backups_exist(row0))
self.assertEqual(mock_net.call_count, 1)
@mock.patch("letsencrypt.client.revoker.network."
"Network.send_and_receive_expected")
@mock.patch("letsencrypt.client.revoker.revocation")
def test_revoke_by_menu(self, mock_display, mock_net):
mock_display().confirm_revocation.return_value = True
mock_display.display_certs.side_effect = [
(display_util.HELP, 0),
(display_util.OK, 0),
(display_util.CANCEL, -1),
]
self.revoker.revoke_from_menu()
row0 = self.certs[0].get_row()
row1 = self.certs[1].get_row()
self.assertEqual(self._get_rows(), [row1])
self.assertFalse(self._backups_exist(row0))
self.assertTrue(self._backups_exist(row1))
self.assertEqual(mock_net.call_count, 1)
self.assertEqual(mock_display.more_info_cert.call_count, 1)
@mock.patch("letsencrypt.client.revoker.logging")
@mock.patch("letsencrypt.client.revoker.network."
"Network.send_and_receive_expected")
@mock.patch("letsencrypt.client.revoker.revocation")
def test_revoke_by_menu_delete_all(self, mock_display, mock_net, mock_log):
mock_display().confirm_revocation.return_value = True
mock_display.display_certs.return_value = (display_util.OK, 0)
self.revoker.revoke_from_menu()
self.assertEqual(self._get_rows(), [])
# Everything should be deleted...
for i in xrange(2):
self.assertFalse(self._backups_exist(self.certs[i].get_row()))
self.assertEqual(mock_net.call_count, 2)
# Info is called when there aren't any certs left...
self.assertTrue(mock_log.info.called)
@mock.patch("letsencrypt.client.revoker.revocation")
@mock.patch("letsencrypt.client.revoker.Revoker._acme_revoke")
@mock.patch("letsencrypt.client.revoker.logging")
def test_safe_revoke_acme_fail(self, mock_log, mock_revoke, mock_display):
# pylint: disable=protected-access
mock_revoke.side_effect = errors.LetsEncryptClientError
mock_display().confirm_revocation.return_value = True
self.revoker._safe_revoke(self.certs)
self.assertTrue(mock_log.error.called)
@mock.patch("letsencrypt.client.revoker.Crypto.PublicKey.RSA.importKey")
def test_acme_revoke_failure(self, mock_crypto):
# pylint: disable=protected-access
mock_crypto.side_effect = ValueError
self.assertRaises(errors.LetsEncryptClientError,
self.revoker._acme_revoke,
self.certs[0])
def test_remove_certs_from_list_bad_certs(self):
# pylint: disable=protected-access
from letsencrypt.client.revoker import Cert
new_cert = Cert(self.paths[0])
# This isn't stored in the db
new_cert.idx = 10
new_cert.backup_path = self.paths[0]
new_cert.backup_key_path = self.key_path
new_cert.orig = Cert.PathStatus("false path", "not here")
new_cert.orig_key = Cert.PathStatus("false path", "not here")
self.assertRaises(errors.LetsEncryptRevokerError,
self.revoker._remove_certs_from_list,
[new_cert])
def _backups_exist(self, row):
# pylint: disable=protected-access
cert_path, key_path = self.revoker._row_to_backup(row)
return os.path.isfile(cert_path) and os.path.isfile(key_path)
class RevokerInstallerTest(RevokerBase):
def setUp(self):
super(RevokerInstallerTest, self).setUp()
self.installs = [
["installation/path0a", "installation/path0b"],
["installation/path1"],
]
self.certs_keys = [
(self.paths[0], self.key_path, self.installs[0][0]),
(self.paths[0], self.key_path, self.installs[0][1]),
(self.paths[1], self.key_path, self.installs[1][0]),
]
self._store_certs()
def _get_revoker(self, installer):
from letsencrypt.client.revoker import Revoker
return Revoker(installer, self.mock_config)
def test_no_installer_get_installed_locations(self):
# pylint: disable=protected-access
revoker = self._get_revoker(None)
self.assertEqual(revoker._get_installed_locations(), {})
def test_get_installed_locations(self):
# pylint: disable=protected-access
mock_installer = mock.MagicMock()
mock_installer.get_all_certs_keys.return_value = self.certs_keys
revoker = self._get_revoker(mock_installer)
sha_vh = revoker._get_installed_locations()
self.assertEqual(len(sha_vh), 2)
for i, cert in enumerate(self.certs):
self.assertTrue(cert.get_fingerprint() in sha_vh)
self.assertEqual(
sha_vh[cert.get_fingerprint()], self.installs[i])
@mock.patch("letsencrypt.client.revoker.M2Crypto.X509.load_cert")
def test_get_installed_load_failure(self, mock_m2):
mock_installer = mock.MagicMock()
mock_installer.get_all_certs_keys.return_value = self.certs_keys
mock_m2.side_effect = IOError
revoker = self._get_revoker(mock_installer)
# pylint: disable=protected-access
self.assertEqual(revoker._get_installed_locations(), {})
class RevokerClassMethodsTest(RevokerBase):
def setUp(self):
super(RevokerClassMethodsTest, self).setUp()
self.mock_config = mock.MagicMock(cert_key_backup=self.backup_dir)
def tearDown(self):
shutil.rmtree(self.backup_dir)
def _call(self, cert_path, key_path):
from letsencrypt.client.revoker import Revoker
Revoker.store_cert_key(cert_path, key_path, self.mock_config)
def test_store_two(self):
from letsencrypt.client.revoker import Revoker
self._call(self.paths[0], self.key_path)
self._call(self.paths[1], self.key_path)
self.assertTrue(os.path.isfile(self.list_path))
rows = self._get_rows()
for i, row in enumerate(rows):
# pylint: disable=protected-access
self.assertTrue(os.path.isfile(
Revoker._get_backup(self.backup_dir, i, self.paths[i])))
self.assertTrue(os.path.isfile(
Revoker._get_backup(self.backup_dir, i, self.key_path)))
self.assertEqual([str(i), self.paths[i], self.key_path], row)
self.assertEqual(len(rows), 2)
def test_store_one_mixed(self):
from letsencrypt.client.revoker import Revoker
self._write_rows(
[["5", "blank", "blank"], ["18", "dc", "dc"], ["21", "b", "b"]])
self._call(self.paths[0], self.key_path)
self.assertEqual(
self._get_rows()[3], ["22", self.paths[0], self.key_path])
# pylint: disable=protected-access
self.assertTrue(os.path.isfile(
Revoker._get_backup(self.backup_dir, 22, self.paths[0])))
self.assertTrue(os.path.isfile(
Revoker._get_backup(self.backup_dir, 22, self.key_path)))
class CertTest(unittest.TestCase):
def setUp(self):
self.paths, self.certs, self.key_path = create_revoker_certs()
def test_failed_load(self):
from letsencrypt.client.revoker import Cert
self.assertRaises(errors.LetsEncryptRevokerError, Cert, self.key_path)
def test_no_row(self):
self.assertEqual(self.certs[0].get_row(), None)
def test_meta_moved_files(self):
from letsencrypt.client.revoker import Cert
fake_path = "/not/a/real/path/r72d3t6"
self.certs[0].add_meta(
0, fake_path, fake_path, self.paths[0], self.key_path)
self.assertEqual(self.certs[0].orig.status, Cert.DELETED_MSG)
self.assertEqual(self.certs[0].orig_key.status, Cert.DELETED_MSG)
def test_meta_changed_files(self):
from letsencrypt.client.revoker import Cert
self.certs[0].add_meta(
0, self.paths[1], self.paths[1], self.paths[0], self.key_path)
self.assertEqual(self.certs[0].orig.status, Cert.CHANGED_MSG)
self.assertEqual(self.certs[0].orig_key.status, Cert.CHANGED_MSG)
def test_meta_no_status(self):
self.certs[0].add_meta(
0, self.paths[0], self.key_path, self.paths[0], self.key_path)
self.assertEqual(self.certs[0].orig.status, "")
self.assertEqual(self.certs[0].orig_key.status, "")
def test_print_meta(self):
"""Just make sure there aren't any major errors."""
self.certs[0].add_meta(
0, self.paths[0], self.key_path, self.paths[0], self.key_path)
# Changed path and deleted file
self.certs[1].add_meta(
1, self.paths[0], "/not/a/path", self.paths[1], self.key_path)
self.assertTrue(self.certs[0].pretty_print())
self.assertTrue(self.certs[1].pretty_print())
def test_print_no_meta(self):
self.assertTrue(self.certs[0].pretty_print())
self.assertTrue(self.certs[1].pretty_print())
def create_revoker_certs():
"""Create a few revoker.Cert objects."""
from letsencrypt.client.revoker import Cert
base_package = "letsencrypt.client.tests"
cert0_path = pkg_resources.resource_filename(
base_package, os.path.join("testdata", "cert.pem"))
cert1_path = pkg_resources.resource_filename(
base_package, os.path.join("testdata", "cert-san.pem"))
cert0 = Cert(cert0_path)
cert1 = Cert(cert1_path)
key_path = pkg_resources.resource_filename(
base_package, os.path.join("testdata", "rsa512_key.pem"))
return [cert0_path, cert1_path], [cert0, cert1], key_path
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
|
from pymongo.collection import Collection as PymongoCollection
from pymongo.database import Database as PymongoDatabase
from pymongo.cursor import Cursor as PymongoCursor
from pymongo.common import WriteConcern
from pymongo import read_preferences as rp, helpers
from bson.binary import OLD_UUID_SUBTYPE
from bson.objectid import ObjectId
from bson.json_util import dumps, loads
from bson.son import SON
from collections import deque
import json, uuid, warnings, hashlib
class Collection(PymongoCollection):
def __init__(self, database, name, create=False, **kwargs):
super(Collection, self).__init__(database, name, create, **kwargs)
def __call__(self, *args, **kwargs):
pass
def __getattr__(self, name):
return Collection(self.__database, u"%s.%s" % (self.__name, name))
def ensure_index(self, key_or_list, cache_for=300, **kwargs):
return None
def find(self, *args, **kwargs):
return Cursor(self, *args, **kwargs)
def find_one(self, spec_or_id=None, *args, **kwargs):
try:
cursor = self.find(spec_or_id, *args, **kwargs)
for result in cursor.limit(-1):
return result
except:
return None
def insert(self, to_save, manipulate=True, *args, **kwargs):
if isinstance(to_save, dict):
to_save = [to_save]
for obj in to_save:
if manipulate:
if '_id' not in obj:
obj['_id'] = ObjectId()
ids = self.database.execute(self.name, Database._INSERT, to_save)
if len(ids) == 1:
return ids[0]
else:
return ids
def update(self, document, spec, upsert=False, manipulate=False, safe=None, multi=False, check_keys=True, **kwargs):
response = self.database.execute(self.name, Database._UPDATE, document, spec=spec)
response['updatedExisting'] = False
response['n'] = None
return response
def remove(self, spec_or_id=None, safe=None, multi=True, **kwargs):
self.database.execute(self.name, Database._DELETE, None, spec=spec_or_id)
class Cursor(PymongoCursor):
__id = None
def __init__(self, collection, query, **kwargs):
super(Cursor, self).__init__(collection, **kwargs)
self._query = query
def _refresh(self):
if len(self.__data) or self.__killed:
return len(self.__data)
try:
db = self.__collection.database
self.__data = deque(db.execute(self.__collection.name, Database._QUERY, self._query))
self.__killed = True
return len(self.__data)
except Exception as e:
return 0
def __getitem__(self, index):
self._refresh()
if index >= len(self.__data) or index < 0:
raise Exception('Invalid index')
return self.__data[index]
class Database(PymongoDatabase):
_INSERT = 0
_QUERY = 1
_UPDATE = 2
_DELETE = 3
database_path = None
def __init__(self, connection, name):
super(Database, self).__init__(connection, name)
def __call__(self, *args, **kwargs):
pass
def __getattr__(self, name):
return Collection(self, name)
def error(self):
return None
def command(self, command, value=1,
check=True, allowable_errors=[],
uuid_subtype=OLD_UUID_SUBTYPE, compile_re=True, **kwargs):
cmd = dict()
if command == 'filemd5':
cmd.update(root=kwargs['root'], filemd5=value)
return self._execute_command(cmd)
def _execute_command(self, command):
response = dict()
command = json.loads(dumps(command))
print 'DATABASE COMMAND', command
keys = command.keys()
if 'count' in keys:
result = self.collection.count()
print 'COUNT RESULT', result
# TODO finish
elif 'filemd5' in keys:
collection = '%s.chunks' % command['root']
file_id = loads(json.dumps(command['filemd5']))
chunks = list()
n = 0
chunk = self.execute(collection, Database._QUERY, { 'files_id': file_id, 'n': n })
while len(chunk) > 0:
chunks.append(json.loads(dumps(chunk)))
n += 1
chunk = self.execute(collection, Database._QUERY, { 'files_id': file_id, 'n': n })
if len(chunks) > 0:
filemd5 = hashlib.md5(dumps(chunks)).hexdigest()
response.update(md5=filemd5)
else:
raise Exception(u'No chunks found for file with id %s' % file_id)
return response
def execute(self, collection, operation, content=None, spec=None):
database = open(self.database_path, 'r')
db = json.load(database)
database.close()
response = None
new_content = None
if operation == self._INSERT:
new_content = self.connection.insert(collection, db, content)
ids = list()
for obj in content:
ids.append(obj['_id'])
response = ids
elif operation == self._QUERY:
if collection == '$cmd':
response = self._execute_command(content)
else:
response = self.connection.query(collection, db, content)
elif operation == self._UPDATE:
new_content, new_obj = self.connection.update(collection, db, content, spec)
response = new_obj
elif operation == self._DELETE:
new_content = self.connection.delete(collection, db, spec)
if new_content is not None:
database = open(self.database_path, 'w')
database.write(new_content)
database.close()
return response
class Connection:
def __init__(self):
self.slave_okay = True
self.read_preference = True
self.tag_sets = None
self.secondary_acceptable_latency_ms = 1
self.safe = True
self.uuid_subtype = OLD_UUID_SUBTYPE
self.write_concern = WriteConcern()
self.max_wire_version = 2
self.document_class = None
self.is_mongos = True
self.tz_aware = True
def __getattr__(self, name):
return Database(self, name)
def _ensure_connected(self, sync=False):
pass
def _filter(self, content, spec):
for key, value in spec.iteritems():
if isinstance(value, ObjectId):
value = json.loads(dumps(ObjectId(oid=value)))
remove = list()
for item in content:
if item[key] != value:
remove.append(item)
content = [it for it in content if it not in remove]
return content
def start_request(self):
return self
def end_request(self):
pass
def insert(self, collection, database, to_save):
if database.get(collection) is None:
database[collection] = list()
json_to_save = json.loads(dumps(to_save))
for obj in json_to_save:
exists = [item for item in database[collection] if item.get('_id') == obj['_id']]
if len(exists) == 0:
database[collection].append(obj)
elif len(exists) > 1:
raise Exception('There cannot be two elements with the same id')
else:
exists[0] = obj
return json.dumps(database, indent=4)
def query(self, collection, database, query=None):
response = list()
col = database.get(collection)
if col is not None:
if query:
subcol = list(col)
response = loads(json.dumps(self._filter(subcol, query)))
else:
response = loads(json.dumps(col))
return response
def update(self, collection, database, document, spec):
content = json.loads(dumps(document))
col = database.get(collection)
if col is not None:
for doc in col:
if doc['_id'] == content['_id']:
for key, value in spec.iteritems():
if key == '$set':
for field, fvalue in value.iteritems():
if isinstance(fvalue, ObjectId):
fvalue = json.loads(dumps(fvalue))
doc[field] = fvalue
content = doc
break
else:
raise Exception('Cannot update a document from an inexistent collection')
return json.dumps(database, indent=4), loads(json.dumps(content))
def delete(self, collection, database, spec):
col = database.get(collection)
if col is not None:
subcol = list(col)
to_delete = self._filter(subcol, spec)
if to_delete:
col = [it for it in col if it not in to_delete]
database[collection] = col
else:
raise Exception('Cannot delete a document from an inexistent collection')
return json.dumps(database, indent=4)
|
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import mock
from dashboard.pinpoint.models.quest import run_test
_SWARMING_TASK_EXTRA_ARGS = [
'test_suite', '--story-filter', 'test',
'-v', '--upload-results',
'--output-format=chartjson', '--browser=release',
'--isolated-script-test-output=${ISOLATED_OUTDIR}/output.json',
'--isolated-script-test-chartjson-output='
'${ISOLATED_OUTDIR}/chartjson-output.json',
]
class _RunTestTest(unittest.TestCase):
def assertNewTaskHasDimensions(self, swarming_tasks_new):
body = {
'name': 'Pinpoint job on chromium-rel-mac11-pro',
'user': 'Pinpoint',
'priority': '100',
'expiration_secs': '600',
'properties': {
'inputs_ref': {'isolated': 'input isolate hash'},
'extra_args': _SWARMING_TASK_EXTRA_ARGS,
'dimensions': [
{'key': 'pool', 'value': 'Chrome-perf-pinpoint'},
{"key": "cores", "value": "8"},
{"key": "gpu", "value": "1002:6821"},
{"key": "os", "value": "Mac-10.11"},
],
'execution_timeout_secs': '3600',
'io_timeout_secs': '3600',
},
'tags': [
'configuration:chromium-rel-mac11-pro',
],
}
swarming_tasks_new.assert_called_with(body)
def assertNewTaskHasBotId(self, swarming_tasks_new):
body = {
'name': 'Pinpoint job on chromium-rel-mac11-pro',
'user': 'Pinpoint',
'priority': '100',
'expiration_secs': '600',
'properties': {
'inputs_ref': {'isolated': 'input isolate hash'},
'extra_args': _SWARMING_TASK_EXTRA_ARGS,
'dimensions': [
{'key': 'pool', 'value': 'Chrome-perf-pinpoint'},
{'key': 'id', 'value': 'bot id'},
],
'execution_timeout_secs': '3600',
'io_timeout_secs': '3600',
},
'tags': [
'configuration:chromium-rel-mac11-pro',
],
}
swarming_tasks_new.assert_called_with(body)
@mock.patch('dashboard.services.swarming_service.Tasks.New')
@mock.patch('dashboard.services.swarming_service.Task.Result')
class RunTestFullTest(_RunTestTest):
def testSuccess(self, swarming_task_result, swarming_tasks_new):
# Goes through a full run of two Executions.
# Call RunTest.Start() to create an Execution.
quest = run_test.RunTest('chromium-rel-mac11-pro', 'test_suite', 'test', 1)
execution = quest.Start('input isolate hash')
swarming_task_result.assert_not_called()
swarming_tasks_new.assert_not_called()
# Call the first Poll() to start the swarming task.
swarming_tasks_new.return_value = {'task_id': 'task id'}
execution.Poll()
swarming_task_result.assert_not_called()
swarming_tasks_new.assert_called_once()
self.assertNewTaskHasDimensions(swarming_tasks_new)
self.assertFalse(execution.completed)
self.assertFalse(execution.failed)
# Call subsequent Poll()s to check the task status.
swarming_task_result.return_value = {'state': 'PENDING'}
execution.Poll()
self.assertFalse(execution.completed)
self.assertFalse(execution.failed)
swarming_task_result.return_value = {
'bot_id': 'bot id',
'exit_code': 0,
'failure': False,
'outputs_ref': {'isolated': 'output isolate hash'},
'state': 'COMPLETED',
}
execution.Poll()
self.assertTrue(execution.completed)
self.assertFalse(execution.failed)
self.assertEqual(execution.result_values, (0,))
self.assertEqual(execution.result_arguments,
{'isolate_hash': 'output isolate hash'})
# Start a second Execution to check bot_id handling. We get a bot_id from
# Swarming from the first Execution and reuse it in subsequent Executions.
execution = quest.Start('input isolate hash')
execution.Poll()
self.assertNewTaskHasBotId(swarming_tasks_new)
@mock.patch('dashboard.services.swarming_service.Tasks.New')
class SwarmingTaskStartTest(_RunTestTest):
def testPagesetRepeat(self, swarming_tasks_new):
quest = run_test.RunTest('chromium-rel-mac11-pro', 'test_suite', 'test', 10)
execution = quest.Start('input isolate hash')
execution.Poll()
new_call_body = swarming_tasks_new.call_args[0][0]
self.assertIn('--pageset-repeat', new_call_body['properties']['extra_args'])
self.assertIn('10', new_call_body['properties']['extra_args'])
@mock.patch('dashboard.services.swarming_service.Task.Result')
def testUnknownConfig(self, swarming_task_result, swarming_tasks_new):
quest = run_test.RunTest('configuration', 'test_suite', 'test', 1)
execution = quest.Start('input isolate hash')
execution.Poll()
swarming_task_result.assert_not_called()
swarming_tasks_new.assert_not_called()
self.assertTrue(execution.completed)
self.assertTrue(execution.failed)
self.assertEqual(len(execution.result_values), 1)
self.assertIsInstance(execution.result_values[0],
run_test.UnknownConfigError)
@mock.patch('dashboard.services.swarming_service.Tasks.New')
@mock.patch('dashboard.services.swarming_service.Task.Result')
class SwarmingTaskStatusTest(_RunTestTest):
def testSwarmingError(self, swarming_task_result, swarming_tasks_new):
swarming_task_result.return_value = {'state': 'BOT_DIED'}
swarming_tasks_new.return_value = {'task_id': 'task id'}
quest = run_test.RunTest('chromium-rel-mac11-pro', 'test_suite', 'test', 1)
execution = quest.Start('input isolate hash')
execution.Poll()
execution.Poll()
self.assertTrue(execution.completed)
self.assertTrue(execution.failed)
self.assertEqual(len(execution.result_values), 1)
self.assertIsInstance(execution.result_values[0],
run_test.SwarmingTaskError)
def testTestError(self, swarming_task_result, swarming_tasks_new):
swarming_task_result.return_value = {
'bot_id': 'bot id',
'exit_code': 1,
'failure': True,
'state': 'COMPLETED',
}
swarming_tasks_new.return_value = {'task_id': 'task id'}
quest = run_test.RunTest('chromium-rel-mac11-pro', 'test_suite', 'test', 1)
execution = quest.Start('isolate_hash')
execution.Poll()
execution.Poll()
self.assertTrue(execution.completed)
self.assertTrue(execution.failed)
self.assertEqual(len(execution.result_values), 1)
self.assertIsInstance(execution.result_values[0],
run_test.SwarmingTestError)
@mock.patch('dashboard.services.swarming_service.Tasks.New')
@mock.patch('dashboard.services.swarming_service.Task.Result')
class BotIdHandlingTest(_RunTestTest):
def testFirstExecutionFailedWithNoBotId(
self, swarming_task_result, swarming_tasks_new):
# If the first Execution fails before it gets a bot ID, it's likely it
# couldn't find any device to run on. Subsequent Executions probably
# wouldn't have any better luck, and failing fast is less complex than
# handling retries.
swarming_tasks_new.return_value = {'task_id': 'task id'}
swarming_task_result.return_value = {'state': 'EXPIRED'}
quest = run_test.RunTest('chromium-rel-mac11-pro', 'test_suite', 'test', 1)
execution = quest.Start('input isolate hash')
execution.Poll()
execution.Poll()
swarming_task_result.return_value = {
'bot_id': 'bot id',
'exit_code': 0,
'failure': False,
'outputs_ref': {'isolated': 'output isolate hash'},
'state': 'COMPLETED',
}
execution = quest.Start('input isolate hash')
execution.Poll()
self.assertTrue(execution.completed)
self.assertTrue(execution.failed)
self.assertEqual(len(execution.result_values), 1)
self.assertIsInstance(execution.result_values[0], run_test.RunTestError)
def testSimultaneousExecutions(self, swarming_task_result,
swarming_tasks_new):
# Executions after the first must wait for the first execution to get a bot
# ID. To preserve device affinity, they must use the same bot.
quest = run_test.RunTest('chromium-rel-mac11-pro', 'test_suite', 'test', 1)
execution_1 = quest.Start('input isolate hash')
execution_2 = quest.Start('input isolate hash')
swarming_tasks_new.return_value = {'task_id': 'task id'}
swarming_task_result.return_value = {'state': 'PENDING'}
execution_1.Poll()
execution_2.Poll()
self.assertEqual(swarming_tasks_new.call_count, 1)
swarming_task_result.return_value = {
'bot_id': 'bot id',
'exit_code': 0,
'failure': False,
'outputs_ref': {'isolated': 'output isolate hash'},
'state': 'COMPLETED',
}
execution_1.Poll()
execution_2.Poll()
self.assertEqual(swarming_tasks_new.call_count, 2)
|
|
#Embedded file name: ACEStream\Policies\RateManager.pyo
import sys
from sets import Set
from threading import RLock
from traceback import print_exc
from ACEStream.Core.simpledefs import *
DEBUG = False
class RateManager:
def __init__(self):
self.lock = RLock()
self.statusmap = {}
self.currenttotal = {}
self.dset = Set()
self.clear_downloadstates()
def add_downloadstate(self, ds):
if DEBUG:
print >> sys.stderr, 'RateManager: add_downloadstate', `(ds.get_download().get_def().get_infohash())`
self.lock.acquire()
try:
d = ds.get_download()
if d not in self.dset:
self.statusmap[ds.get_status()].append(ds)
for dir in [UPLOAD, DOWNLOAD]:
self.currenttotal[dir] += ds.get_current_speed(dir)
self.dset.add(d)
return len(self.dset)
finally:
self.lock.release()
def add_downloadstatelist(self, dslist):
for ds in dslist:
self.add_downloadstate(ds)
def adjust_speeds(self):
self.lock.acquire()
try:
self.calc_and_set_speed_limits(DOWNLOAD)
self.calc_and_set_speed_limits(UPLOAD)
self.clear_downloadstates()
finally:
self.lock.release()
def clear_downloadstates(self):
self.statusmap[DLSTATUS_ALLOCATING_DISKSPACE] = []
self.statusmap[DLSTATUS_WAITING4HASHCHECK] = []
self.statusmap[DLSTATUS_HASHCHECKING] = []
self.statusmap[DLSTATUS_DOWNLOADING] = []
self.statusmap[DLSTATUS_SEEDING] = []
self.statusmap[DLSTATUS_STOPPED] = []
self.statusmap[DLSTATUS_STOPPED_ON_ERROR] = []
self.statusmap[DLSTATUS_REPEXING] = []
for dir in [UPLOAD, DOWNLOAD]:
self.currenttotal[dir] = 0
self.dset.clear()
def calc_and_set_speed_limits(self, direct):
pass
class UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager(RateManager):
def __init__(self):
RateManager.__init__(self)
self.global_max_speed = {}
self.global_max_speed[UPLOAD] = 0.0
self.global_max_speed[DOWNLOAD] = 0.0
self.global_max_seedupload_speed = 0.0
def set_global_max_speed(self, direct, speed):
self.lock.acquire()
self.global_max_speed[direct] = speed
self.lock.release()
def set_global_max_seedupload_speed(self, speed):
self.lock.acquire()
self.global_max_seedupload_speed = speed
self.lock.release()
def calc_and_set_speed_limits(self, dir = UPLOAD):
if DEBUG:
print >> sys.stderr, 'RateManager: calc_and_set_speed_limits', dir
if dir == UPLOAD:
workingset = self.statusmap[DLSTATUS_DOWNLOADING] + self.statusmap[DLSTATUS_SEEDING]
else:
workingset = self.statusmap[DLSTATUS_DOWNLOADING]
if DEBUG:
print >> sys.stderr, 'RateManager: calc_and_set_speed_limits: len workingset', len(workingset)
newws = []
for ds in workingset:
if ds.get_num_peers() > 0:
newws.append(ds)
workingset = newws
if DEBUG:
print >> sys.stderr, 'RateManager: calc_and_set_speed_limits: len active workingset', len(workingset)
if not workingset:
return
globalmaxspeed = self.get_global_max_speed(dir)
if globalmaxspeed == 0:
for ds in workingset:
d = ds.get_download()
d.set_max_speed(dir, d.get_max_desired_speed(dir))
return
if DEBUG:
print >> sys.stderr, 'RateManager: calc_and_set_speed_limits: globalmaxspeed is', globalmaxspeed, dir
todoset = []
for ds in workingset:
d = ds.get_download()
maxdesiredspeed = d.get_max_desired_speed(dir)
if maxdesiredspeed > 0.0:
d.set_max_speed(dir, maxdesiredspeed)
else:
todoset.append(ds)
if len(todoset) > 0:
localmaxspeed = globalmaxspeed / float(len(todoset))
if DEBUG:
print >> sys.stderr, 'RateManager: calc_and_set_speed_limits: localmaxspeed is', localmaxspeed, dir
for ds in todoset:
d = ds.get_download()
d.set_max_speed(dir, localmaxspeed)
def get_global_max_speed(self, dir = UPLOAD):
if dir == UPLOAD and len(self.statusmap[DLSTATUS_DOWNLOADING]) == 0 and len(self.statusmap[DLSTATUS_SEEDING]) > 0:
return self.global_max_seedupload_speed
else:
return self.global_max_speed[dir]
class UserDefinedMaxAlwaysOtherwiseDividedOnDemandRateManager(UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager):
def __init__(self):
UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager.__init__(self)
self.ROOM = 5.0
def calc_and_set_speed_limits(self, dir = UPLOAD):
if DEBUG:
print >> sys.stderr, 'RateManager: calc_and_set_speed_limits', dir
if dir == UPLOAD:
workingset = self.statusmap[DLSTATUS_DOWNLOADING] + self.statusmap[DLSTATUS_SEEDING]
else:
workingset = self.statusmap[DLSTATUS_DOWNLOADING]
if DEBUG:
print >> sys.stderr, 'RateManager: calc_and_set_speed_limits: len workingset', len(workingset)
newws = []
for ds in workingset:
if ds.get_num_peers() > 0:
newws.append(ds)
workingset = newws
if DEBUG:
print >> sys.stderr, 'RateManager: calc_and_set_speed_limits: len new workingset', len(workingset)
for ds in workingset:
d = ds.get_download()
print >> sys.stderr, 'RateManager: calc_and_set_speed_limits: working is', d.get_def().get_name()
if not workingset:
return
globalmaxspeed = self.get_global_max_speed(dir)
if globalmaxspeed == 0:
for ds in workingset:
d = ds.get_download()
d.set_max_speed(dir, d.get_max_desired_speed(dir))
return
if DEBUG:
print >> sys.stderr, 'RateManager: calc_and_set_speed_limits: globalmaxspeed is', globalmaxspeed, dir
todoset = []
for ds in workingset:
d = ds.get_download()
maxdesiredspeed = d.get_max_desired_speed(dir)
if maxdesiredspeed > 0.0:
d.set_max_speed(dir, maxdesiredspeed)
else:
todoset.append(ds)
if len(todoset) > 0:
localmaxspeed = globalmaxspeed / float(len(todoset))
if DEBUG:
print >> sys.stderr, 'RateManager: calc_and_set_speed_limits: localmaxspeed is', localmaxspeed, dir
downloadsatmax = False
downloadsunderutil = False
for ds in todoset:
d = ds.get_download()
currspeed = ds.get_current_speed(dir)
currmaxspeed = d.get_max_speed(dir)
newmaxspeed = currspeed + self.ROOM
if currspeed >= currmaxspeed - 3.0:
downloadsatmax = True
elif newmaxspeed < localmaxspeed:
downloadsunderutil = True
if downloadsatmax and downloadsunderutil:
totalunused = 0.0
todoset2 = []
for ds in todoset:
d = ds.get_download()
currspeed = ds.get_current_speed(dir)
newmaxspeed = currspeed + self.ROOM
if newmaxspeed < localmaxspeed:
totalunused += localmaxspeed - newmaxspeed
print >> sys.stderr, 'RateManager: calc_and_set_speed_limits: Underutil set to', newmaxspeed
d.set_max_speed(dir, newmaxspeed)
else:
todoset2.append(ds)
if len(todoset2) > 0:
pie = float(len(todoset2)) * localmaxspeed + totalunused
piece = pie / float(len(todoset2))
for ds in todoset:
d = ds.get_download()
print >> sys.stderr, 'RateManager: calc_and_set_speed_limits: Overutil set to', piece
d.set_max_speed(dir, piece)
else:
print >> sys.stderr, 'UserDefinedMaxAlwaysOtherwiseDividedOnDemandRateManager: Internal error: No overutilizers anymore?'
else:
for ds in todoset:
d = ds.get_download()
print >> sys.stderr, 'RateManager: calc_and_set_speed_limits: Normal set to', piece
d.set_max_speed(dir, localmaxspeed)
class UserDefinedMaxAlwaysOtherwiseDividedOverActiveSwarmsRateManager(UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager):
def __init__(self):
UserDefinedMaxAlwaysOtherwiseEquallyDividedRateManager.__init__(self)
self.ROOM = 5.0
def calc_and_set_speed_limits(self, dir = UPLOAD):
if DEBUG:
print >> sys.stderr, 'RateManager: calc_and_set_speed_limits', dir
if dir == UPLOAD:
workingset = self.statusmap[DLSTATUS_DOWNLOADING] + self.statusmap[DLSTATUS_SEEDING]
else:
workingset = self.statusmap[DLSTATUS_DOWNLOADING]
if DEBUG:
print >> sys.stderr, 'RateManager: set_lim: len workingset', len(workingset)
newws = []
inactiveset = []
for ds in workingset:
if ds.get_num_nonseeds() > 0:
newws.append(ds)
else:
inactiveset.append(ds)
workingset = newws
if DEBUG:
print >> sys.stderr, 'RateManager: set_lim: len new workingset', len(workingset)
for ds in workingset:
d = ds.get_download()
print >> sys.stderr, 'RateManager: set_lim: working is', d.get_def().get_name()
globalmaxspeed = self.get_global_max_speed(dir)
if DEBUG:
print >> sys.stderr, 'RateManager: set_lim: globalmaxspeed is', globalmaxspeed, dir
if globalmaxspeed == 0:
for ds in workingset:
d = ds.get_download()
d.set_max_speed(dir, d.get_max_desired_speed(dir))
for ds in inactiveset:
d = ds.get_download()
d.set_max_speed(dir, d.get_max_desired_speed(dir))
return
if DEBUG:
print >> sys.stderr, 'RateManager: set_lim: globalmaxspeed is', globalmaxspeed, dir
todoset = []
for ds in workingset:
d = ds.get_download()
maxdesiredspeed = d.get_max_desired_speed(dir)
if maxdesiredspeed > 0.0:
d.set_max_speed(dir, maxdesiredspeed)
else:
todoset.append(ds)
if len(todoset) > 0:
localmaxspeed = globalmaxspeed / float(len(todoset))
if DEBUG:
print >> sys.stderr, 'RateManager: set_lim: localmaxspeed is', localmaxspeed, dir
for ds in todoset:
d = ds.get_download()
if DEBUG:
print >> sys.stderr, 'RateManager: set_lim:', d.get_def().get_name(), 'WorkQ', localmaxspeed
d.set_max_speed(dir, localmaxspeed)
for ds in inactiveset:
d = ds.get_download()
desspeed = d.get_max_desired_speed(dir)
if desspeed == 0:
setspeed = globalmaxspeed
else:
setspeed = min(desspeed, globalmaxspeed)
if DEBUG:
print >> sys.stderr, 'RateManager: set_lim:', d.get_def().get_name(), 'InactQ', setspeed
d.set_max_speed(dir, setspeed)
|
|
from numpy.testing import (
assert_allclose,
assert_array_equal,
)
import numpy as np
import pytest
from sklearn.datasets import make_classification
from sklearn.compose import make_column_transformer
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.svm import SVR
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import confusion_matrix
# TODO: Remove when https://github.com/numpy/numpy/issues/14397 is resolved
pytestmark = pytest.mark.filterwarnings(
"ignore:In future, it will be an error for 'np.bool_':DeprecationWarning:"
"matplotlib.*"
)
def test_confusion_matrix_display_validation(pyplot):
"""Check that we raise the proper error when validating parameters."""
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=5, random_state=0
)
regressor = SVR().fit(X, y)
y_pred_regressor = regressor.predict(X)
y_pred_classifier = SVC().fit(X, y).predict(X)
err_msg = "ConfusionMatrixDisplay.from_estimator only supports classifiers"
with pytest.raises(ValueError, match=err_msg):
ConfusionMatrixDisplay.from_estimator(regressor, X, y)
err_msg = "Mix type of y not allowed, got types"
with pytest.raises(ValueError, match=err_msg):
# Force `y_true` to be seen as a regression problem
ConfusionMatrixDisplay.from_predictions(y + 0.5, y_pred_classifier)
with pytest.raises(ValueError, match=err_msg):
ConfusionMatrixDisplay.from_predictions(y, y_pred_regressor)
err_msg = "Found input variables with inconsistent numbers of samples"
with pytest.raises(ValueError, match=err_msg):
ConfusionMatrixDisplay.from_predictions(y, y_pred_classifier[::2])
@pytest.mark.parametrize(
"constructor_name", ["from_estimator", "from_predictions"]
)
def test_confusion_matrix_display_invalid_option(pyplot, constructor_name):
"""Check the error raise if an invalid parameter value is passed."""
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=5, random_state=0
)
classifier = SVC().fit(X, y)
y_pred = classifier.predict(X)
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
extra_params = {"normalize": "invalid"}
err_msg = r"normalize must be one of \{'true', 'pred', 'all', None\}"
with pytest.raises(ValueError, match=err_msg):
if constructor_name == "from_estimator":
ConfusionMatrixDisplay.from_estimator(
classifier, X, y, **extra_params
)
else:
ConfusionMatrixDisplay.from_predictions(
y, y_pred, **extra_params
)
@pytest.mark.parametrize(
"constructor_name", ["from_estimator", "from_predictions"]
)
@pytest.mark.parametrize("with_labels", [True, False])
@pytest.mark.parametrize("with_display_labels", [True, False])
def test_confusion_matrix_display_custom_labels(
pyplot, constructor_name, with_labels, with_display_labels
):
"""Check the resulting plot when labels are given."""
n_classes = 5
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=n_classes, random_state=0
)
classifier = SVC().fit(X, y)
y_pred = classifier.predict(X)
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
ax = pyplot.gca()
labels = [2, 1, 0, 3, 4] if with_labels else None
display_labels = ["b", "d", "a", "e", "f"] if with_display_labels else None
cm = confusion_matrix(y, y_pred, labels=labels)
common_kwargs = {
"ax": ax,
"display_labels": display_labels,
"labels": labels,
}
if constructor_name == "from_estimator":
disp = ConfusionMatrixDisplay.from_estimator(
classifier, X, y, **common_kwargs
)
else:
disp = ConfusionMatrixDisplay.from_predictions(
y, y_pred, **common_kwargs
)
assert_allclose(disp.confusion_matrix, cm)
if with_display_labels:
expected_display_labels = display_labels
elif with_labels:
expected_display_labels = labels
else:
expected_display_labels = list(range(n_classes))
expected_display_labels_str = [str(name)
for name in expected_display_labels]
x_ticks = [tick.get_text() for tick in disp.ax_.get_xticklabels()]
y_ticks = [tick.get_text() for tick in disp.ax_.get_yticklabels()]
assert_array_equal(disp.display_labels, expected_display_labels)
assert_array_equal(x_ticks, expected_display_labels_str)
assert_array_equal(y_ticks, expected_display_labels_str)
@pytest.mark.parametrize(
"constructor_name", ["from_estimator", "from_predictions"]
)
@pytest.mark.parametrize("normalize", ["true", "pred", "all", None])
@pytest.mark.parametrize("include_values", [True, False])
def test_confusion_matrix_display_plotting(
pyplot, constructor_name, normalize, include_values,
):
"""Check the overall plotting rendering."""
n_classes = 5
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=n_classes, random_state=0
)
classifier = SVC().fit(X, y)
y_pred = classifier.predict(X)
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
ax = pyplot.gca()
cmap = "plasma"
cm = confusion_matrix(y, y_pred)
common_kwargs = {
"normalize": normalize,
"cmap": cmap,
"ax": ax,
"include_values": include_values,
}
if constructor_name == "from_estimator":
disp = ConfusionMatrixDisplay.from_estimator(
classifier, X, y, **common_kwargs
)
else:
disp = ConfusionMatrixDisplay.from_predictions(
y, y_pred, **common_kwargs
)
assert disp.ax_ == ax
if normalize == "true":
cm = cm / cm.sum(axis=1, keepdims=True)
elif normalize == "pred":
cm = cm / cm.sum(axis=0, keepdims=True)
elif normalize == "all":
cm = cm / cm.sum()
assert_allclose(disp.confusion_matrix, cm)
import matplotlib as mpl
assert isinstance(disp.im_, mpl.image.AxesImage)
assert disp.im_.get_cmap().name == cmap
assert isinstance(disp.ax_, pyplot.Axes)
assert isinstance(disp.figure_, pyplot.Figure)
assert disp.ax_.get_ylabel() == "True label"
assert disp.ax_.get_xlabel() == "Predicted label"
x_ticks = [tick.get_text() for tick in disp.ax_.get_xticklabels()]
y_ticks = [tick.get_text() for tick in disp.ax_.get_yticklabels()]
expected_display_labels = list(range(n_classes))
expected_display_labels_str = [
str(name) for name in expected_display_labels
]
assert_array_equal(disp.display_labels, expected_display_labels)
assert_array_equal(x_ticks, expected_display_labels_str)
assert_array_equal(y_ticks, expected_display_labels_str)
image_data = disp.im_.get_array().data
assert_allclose(image_data, cm)
if include_values:
assert disp.text_.shape == (n_classes, n_classes)
fmt = ".2g"
expected_text = np.array([format(v, fmt) for v in cm.ravel(order="C")])
text_text = np.array(
[t.get_text() for t in disp.text_.ravel(order="C")]
)
assert_array_equal(expected_text, text_text)
else:
assert disp.text_ is None
@pytest.mark.parametrize(
"constructor_name", ["from_estimator", "from_predictions"]
)
def test_confusion_matrix_display(pyplot, constructor_name):
"""Check the behaviour of the default constructor without using the class
methods."""
n_classes = 5
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=n_classes, random_state=0
)
classifier = SVC().fit(X, y)
y_pred = classifier.predict(X)
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
cm = confusion_matrix(y, y_pred)
common_kwargs = {
"normalize": None,
"include_values": True,
"cmap": "viridis",
"xticks_rotation": 45.0,
}
if constructor_name == "from_estimator":
disp = ConfusionMatrixDisplay.from_estimator(
classifier, X, y, **common_kwargs
)
else:
disp = ConfusionMatrixDisplay.from_predictions(
y, y_pred, **common_kwargs
)
assert_allclose(disp.confusion_matrix, cm)
assert disp.text_.shape == (n_classes, n_classes)
rotations = [tick.get_rotation() for tick in disp.ax_.get_xticklabels()]
assert_allclose(rotations, 45.0)
image_data = disp.im_.get_array().data
assert_allclose(image_data, cm)
disp.plot(cmap="plasma")
assert disp.im_.get_cmap().name == "plasma"
disp.plot(include_values=False)
assert disp.text_ is None
disp.plot(xticks_rotation=90.0)
rotations = [tick.get_rotation() for tick in disp.ax_.get_xticklabels()]
assert_allclose(rotations, 90.0)
disp.plot(values_format="e")
expected_text = np.array([format(v, "e") for v in cm.ravel(order="C")])
text_text = np.array([t.get_text() for t in disp.text_.ravel(order="C")])
assert_array_equal(expected_text, text_text)
def test_confusion_matrix_contrast(pyplot):
"""Check that the text color is appropriate depending on background."""
cm = np.eye(2) / 2
disp = ConfusionMatrixDisplay(cm, display_labels=[0, 1])
disp.plot(cmap=pyplot.cm.gray)
# diagonal text is black
assert_allclose(disp.text_[0, 0].get_color(), [0.0, 0.0, 0.0, 1.0])
assert_allclose(disp.text_[1, 1].get_color(), [0.0, 0.0, 0.0, 1.0])
# off-diagonal text is white
assert_allclose(disp.text_[0, 1].get_color(), [1.0, 1.0, 1.0, 1.0])
assert_allclose(disp.text_[1, 0].get_color(), [1.0, 1.0, 1.0, 1.0])
disp.plot(cmap=pyplot.cm.gray_r)
# diagonal text is white
assert_allclose(disp.text_[0, 1].get_color(), [0.0, 0.0, 0.0, 1.0])
assert_allclose(disp.text_[1, 0].get_color(), [0.0, 0.0, 0.0, 1.0])
# off-diagonal text is black
assert_allclose(disp.text_[0, 0].get_color(), [1.0, 1.0, 1.0, 1.0])
assert_allclose(disp.text_[1, 1].get_color(), [1.0, 1.0, 1.0, 1.0])
# Regression test for #15920
cm = np.array([[19, 34], [32, 58]])
disp = ConfusionMatrixDisplay(cm, display_labels=[0, 1])
disp.plot(cmap=pyplot.cm.Blues)
min_color = pyplot.cm.Blues(0)
max_color = pyplot.cm.Blues(255)
assert_allclose(disp.text_[0, 0].get_color(), max_color)
assert_allclose(disp.text_[0, 1].get_color(), max_color)
assert_allclose(disp.text_[1, 0].get_color(), max_color)
assert_allclose(disp.text_[1, 1].get_color(), min_color)
@pytest.mark.parametrize(
"clf",
[
LogisticRegression(),
make_pipeline(StandardScaler(), LogisticRegression()),
make_pipeline(
make_column_transformer((StandardScaler(), [0, 1])),
LogisticRegression(),
),
],
ids=["clf", "pipeline-clf", "pipeline-column_transformer-clf"]
)
def test_confusion_matrix_pipeline(pyplot, clf):
"""Check the behaviour of the plotting with more complex pipeline."""
n_classes = 5
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=n_classes, random_state=0
)
with pytest.raises(NotFittedError):
ConfusionMatrixDisplay.from_estimator(clf, X, y)
clf.fit(X, y)
y_pred = clf.predict(X)
disp = ConfusionMatrixDisplay.from_estimator(clf, X, y)
cm = confusion_matrix(y, y_pred)
assert_allclose(disp.confusion_matrix, cm)
assert disp.text_.shape == (n_classes, n_classes)
@pytest.mark.parametrize(
"constructor_name", ["from_estimator", "from_predictions"]
)
def test_confusion_matrix_with_unknown_labels(pyplot, constructor_name):
"""Check that when labels=None, the unique values in `y_pred` and `y_true`
will be used.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/pull/18405
"""
n_classes = 5
X, y = make_classification(
n_samples=100, n_informative=5, n_classes=n_classes, random_state=0
)
classifier = SVC().fit(X, y)
y_pred = classifier.predict(X)
# create unseen labels in `y_true` not seen during fitting and not present
# in 'classifier.classes_'
y = y + 1
# safe guard for the binary if/else construction
assert constructor_name in ("from_estimator", "from_predictions")
common_kwargs = {"labels": None}
if constructor_name == "from_estimator":
disp = ConfusionMatrixDisplay.from_estimator(
classifier, X, y, **common_kwargs
)
else:
disp = ConfusionMatrixDisplay.from_predictions(
y, y_pred, **common_kwargs
)
display_labels = [tick.get_text() for tick in disp.ax_.get_xticklabels()]
expected_labels = [str(i) for i in range(n_classes + 1)]
assert_array_equal(expected_labels, display_labels)
|
|
# orm/dependency.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Relationship dependencies.
"""
from .. import sql, util, exc as sa_exc
from . import attributes, exc, sync, unitofwork, \
util as mapperutil
from .interfaces import ONETOMANY, MANYTOONE, MANYTOMANY
class DependencyProcessor(object):
def __init__(self, prop):
self.prop = prop
self.cascade = prop.cascade
self.mapper = prop.mapper
self.parent = prop.parent
self.secondary = prop.secondary
self.direction = prop.direction
self.post_update = prop.post_update
self.passive_deletes = prop.passive_deletes
self.passive_updates = prop.passive_updates
self.enable_typechecks = prop.enable_typechecks
if self.passive_deletes:
self._passive_delete_flag = attributes.PASSIVE_NO_INITIALIZE
else:
self._passive_delete_flag = attributes.PASSIVE_OFF
if self.passive_updates:
self._passive_update_flag = attributes.PASSIVE_NO_INITIALIZE
else:
self._passive_update_flag = attributes.PASSIVE_OFF
self.key = prop.key
if not self.prop.synchronize_pairs:
raise sa_exc.ArgumentError(
"Can't build a DependencyProcessor for relationship %s. "
"No target attributes to populate between parent and "
"child are present" %
self.prop)
@classmethod
def from_relationship(cls, prop):
return _direction_to_processor[prop.direction](prop)
def hasparent(self, state):
"""return True if the given object instance has a parent,
according to the ``InstrumentedAttribute`` handled by this
``DependencyProcessor``.
"""
return self.parent.class_manager.get_impl(self.key).hasparent(state)
def per_property_preprocessors(self, uow):
"""establish actions and dependencies related to a flush.
These actions will operate on all relevant states in
the aggregate.
"""
uow.register_preprocessor(self, True)
def per_property_flush_actions(self, uow):
after_save = unitofwork.ProcessAll(uow, self, False, True)
before_delete = unitofwork.ProcessAll(uow, self, True, True)
parent_saves = unitofwork.SaveUpdateAll(
uow,
self.parent.primary_base_mapper
)
child_saves = unitofwork.SaveUpdateAll(
uow,
self.mapper.primary_base_mapper
)
parent_deletes = unitofwork.DeleteAll(
uow,
self.parent.primary_base_mapper
)
child_deletes = unitofwork.DeleteAll(
uow,
self.mapper.primary_base_mapper
)
self.per_property_dependencies(uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete
)
def per_state_flush_actions(self, uow, states, isdelete):
"""establish actions and dependencies related to a flush.
These actions will operate on all relevant states
individually. This occurs only if there are cycles
in the 'aggregated' version of events.
"""
parent_base_mapper = self.parent.primary_base_mapper
child_base_mapper = self.mapper.primary_base_mapper
child_saves = unitofwork.SaveUpdateAll(uow, child_base_mapper)
child_deletes = unitofwork.DeleteAll(uow, child_base_mapper)
# locate and disable the aggregate processors
# for this dependency
if isdelete:
before_delete = unitofwork.ProcessAll(uow, self, True, True)
before_delete.disabled = True
else:
after_save = unitofwork.ProcessAll(uow, self, False, True)
after_save.disabled = True
# check if the "child" side is part of the cycle
if child_saves not in uow.cycles:
# based on the current dependencies we use, the saves/
# deletes should always be in the 'cycles' collection
# together. if this changes, we will have to break up
# this method a bit more.
assert child_deletes not in uow.cycles
# child side is not part of the cycle, so we will link per-state
# actions to the aggregate "saves", "deletes" actions
child_actions = [
(child_saves, False), (child_deletes, True)
]
child_in_cycles = False
else:
child_in_cycles = True
# check if the "parent" side is part of the cycle
if not isdelete:
parent_saves = unitofwork.SaveUpdateAll(
uow,
self.parent.base_mapper)
parent_deletes = before_delete = None
if parent_saves in uow.cycles:
parent_in_cycles = True
else:
parent_deletes = unitofwork.DeleteAll(
uow,
self.parent.base_mapper)
parent_saves = after_save = None
if parent_deletes in uow.cycles:
parent_in_cycles = True
# now create actions /dependencies for each state.
for state in states:
# detect if there's anything changed or loaded
# by a preprocessor on this state/attribute. In the
# case of deletes we may try to load missing items here as well.
sum_ = state.manager[self.key].impl.get_all_pending(
state, state.dict,
self._passive_delete_flag
if isdelete
else attributes.PASSIVE_NO_INITIALIZE)
if not sum_:
continue
if isdelete:
before_delete = unitofwork.ProcessState(uow,
self, True, state)
if parent_in_cycles:
parent_deletes = unitofwork.DeleteState(
uow,
state,
parent_base_mapper)
else:
after_save = unitofwork.ProcessState(uow, self, False, state)
if parent_in_cycles:
parent_saves = unitofwork.SaveUpdateState(
uow,
state,
parent_base_mapper)
if child_in_cycles:
child_actions = []
for child_state, child in sum_:
if child_state not in uow.states:
child_action = (None, None)
else:
(deleted, listonly) = uow.states[child_state]
if deleted:
child_action = (
unitofwork.DeleteState(
uow, child_state,
child_base_mapper),
True)
else:
child_action = (
unitofwork.SaveUpdateState(
uow, child_state,
child_base_mapper),
False)
child_actions.append(child_action)
# establish dependencies between our possibly per-state
# parent action and our possibly per-state child action.
for child_action, childisdelete in child_actions:
self.per_state_dependencies(uow, parent_saves,
parent_deletes,
child_action,
after_save, before_delete,
isdelete, childisdelete)
def presort_deletes(self, uowcommit, states):
return False
def presort_saves(self, uowcommit, states):
return False
def process_deletes(self, uowcommit, states):
pass
def process_saves(self, uowcommit, states):
pass
def prop_has_changes(self, uowcommit, states, isdelete):
if not isdelete or self.passive_deletes:
passive = attributes.PASSIVE_NO_INITIALIZE
elif self.direction is MANYTOONE:
passive = attributes.PASSIVE_NO_FETCH_RELATED
else:
passive = attributes.PASSIVE_OFF
for s in states:
# TODO: add a high speed method
# to InstanceState which returns: attribute
# has a non-None value, or had one
history = uowcommit.get_attribute_history(
s,
self.key,
passive)
if history and not history.empty():
return True
else:
return states and \
not self.prop._is_self_referential and \
self.mapper in uowcommit.mappers
def _verify_canload(self, state):
if self.prop.uselist and state is None:
raise exc.FlushError(
"Can't flush None value found in "
"collection %s" % (self.prop, ))
elif state is not None and \
not self.mapper._canload(
state, allow_subtypes=not self.enable_typechecks):
if self.mapper._canload(state, allow_subtypes=True):
raise exc.FlushError('Attempting to flush an item of type '
'%(x)s as a member of collection '
'"%(y)s". Expected an object of type '
'%(z)s or a polymorphic subclass of '
'this type. If %(x)s is a subclass of '
'%(z)s, configure mapper "%(zm)s" to '
'load this subtype polymorphically, or '
'set enable_typechecks=False to allow '
'any subtype to be accepted for flush. '
% {
'x': state.class_,
'y': self.prop,
'z': self.mapper.class_,
'zm': self.mapper,
})
else:
raise exc.FlushError(
'Attempting to flush an item of type '
'%(x)s as a member of collection '
'"%(y)s". Expected an object of type '
'%(z)s or a polymorphic subclass of '
'this type.' % {
'x': state.class_,
'y': self.prop,
'z': self.mapper.class_,
})
def _synchronize(self, state, child, associationrow,
clearkeys, uowcommit):
raise NotImplementedError()
def _get_reversed_processed_set(self, uow):
if not self.prop._reverse_property:
return None
process_key = tuple(sorted(
[self.key] +
[p.key for p in self.prop._reverse_property]
))
return uow.memo(
('reverse_key', process_key),
set
)
def _post_update(self, state, uowcommit, related, is_m2o_delete=False):
for x in related:
if not is_m2o_delete or x is not None:
uowcommit.issue_post_update(
state,
[r for l, r in self.prop.synchronize_pairs]
)
break
def _pks_changed(self, uowcommit, state):
raise NotImplementedError()
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.prop)
class OneToManyDP(DependencyProcessor):
def per_property_dependencies(self, uow, parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete,
):
if self.post_update:
child_post_updates = unitofwork.IssuePostUpdate(
uow,
self.mapper.primary_base_mapper,
False)
child_pre_updates = unitofwork.IssuePostUpdate(
uow,
self.mapper.primary_base_mapper,
True)
uow.dependencies.update([
(child_saves, after_save),
(parent_saves, after_save),
(after_save, child_post_updates),
(before_delete, child_pre_updates),
(child_pre_updates, parent_deletes),
(child_pre_updates, child_deletes),
])
else:
uow.dependencies.update([
(parent_saves, after_save),
(after_save, child_saves),
(after_save, child_deletes),
(child_saves, parent_deletes),
(child_deletes, parent_deletes),
(before_delete, child_saves),
(before_delete, child_deletes),
])
def per_state_dependencies(self, uow,
save_parent,
delete_parent,
child_action,
after_save, before_delete,
isdelete, childisdelete):
if self.post_update:
child_post_updates = unitofwork.IssuePostUpdate(
uow,
self.mapper.primary_base_mapper,
False)
child_pre_updates = unitofwork.IssuePostUpdate(
uow,
self.mapper.primary_base_mapper,
True)
# TODO: this whole block is not covered
# by any tests
if not isdelete:
if childisdelete:
uow.dependencies.update([
(child_action, after_save),
(after_save, child_post_updates),
])
else:
uow.dependencies.update([
(save_parent, after_save),
(child_action, after_save),
(after_save, child_post_updates),
])
else:
if childisdelete:
uow.dependencies.update([
(before_delete, child_pre_updates),
(child_pre_updates, delete_parent),
])
else:
uow.dependencies.update([
(before_delete, child_pre_updates),
(child_pre_updates, delete_parent),
])
elif not isdelete:
uow.dependencies.update([
(save_parent, after_save),
(after_save, child_action),
(save_parent, child_action)
])
else:
uow.dependencies.update([
(before_delete, child_action),
(child_action, delete_parent)
])
def presort_deletes(self, uowcommit, states):
# head object is being deleted, and we manage its list of
# child objects the child objects have to have their
# foreign key to the parent set to NULL
should_null_fks = not self.cascade.delete and \
not self.passive_deletes == 'all'
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
for child in history.deleted:
if child is not None and self.hasparent(child) is False:
if self.cascade.delete_orphan:
uowcommit.register_object(child, isdelete=True)
else:
uowcommit.register_object(child)
if should_null_fks:
for child in history.unchanged:
if child is not None:
uowcommit.register_object(
child, operation="delete", prop=self.prop)
def presort_saves(self, uowcommit, states):
children_added = uowcommit.memo(('children_added', self), set)
for state in states:
pks_changed = self._pks_changed(uowcommit, state)
if not pks_changed or self.passive_updates:
passive = attributes.PASSIVE_NO_INITIALIZE
else:
passive = attributes.PASSIVE_OFF
history = uowcommit.get_attribute_history(
state,
self.key,
passive)
if history:
for child in history.added:
if child is not None:
uowcommit.register_object(child, cancel_delete=True,
operation="add",
prop=self.prop)
children_added.update(history.added)
for child in history.deleted:
if not self.cascade.delete_orphan:
uowcommit.register_object(child, isdelete=False,
operation='delete',
prop=self.prop)
elif self.hasparent(child) is False:
uowcommit.register_object(
child, isdelete=True,
operation="delete", prop=self.prop)
for c, m, st_, dct_ in self.mapper.cascade_iterator(
'delete', child):
uowcommit.register_object(
st_,
isdelete=True)
if pks_changed:
if history:
for child in history.unchanged:
if child is not None:
uowcommit.register_object(
child,
False,
self.passive_updates,
operation="pk change",
prop=self.prop)
def process_deletes(self, uowcommit, states):
# head object is being deleted, and we manage its list of
# child objects the child objects have to have their foreign
# key to the parent set to NULL this phase can be called
# safely for any cascade but is unnecessary if delete cascade
# is on.
if self.post_update or not self.passive_deletes == 'all':
children_added = uowcommit.memo(('children_added', self), set)
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
for child in history.deleted:
if child is not None and \
self.hasparent(child) is False:
self._synchronize(
state,
child,
None, True,
uowcommit, False)
if self.post_update and child:
self._post_update(child, uowcommit, [state])
if self.post_update or not self.cascade.delete:
for child in set(history.unchanged).\
difference(children_added):
if child is not None:
self._synchronize(
state,
child,
None, True,
uowcommit, False)
if self.post_update and child:
self._post_update(child,
uowcommit,
[state])
# technically, we can even remove each child from the
# collection here too. but this would be a somewhat
# inconsistent behavior since it wouldn't happen
# if the old parent wasn't deleted but child was moved.
def process_saves(self, uowcommit, states):
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
attributes.PASSIVE_NO_INITIALIZE)
if history:
for child in history.added:
self._synchronize(state, child, None,
False, uowcommit, False)
if child is not None and self.post_update:
self._post_update(child, uowcommit, [state])
for child in history.deleted:
if not self.cascade.delete_orphan and \
not self.hasparent(child):
self._synchronize(state, child, None, True,
uowcommit, False)
if self._pks_changed(uowcommit, state):
for child in history.unchanged:
self._synchronize(state, child, None,
False, uowcommit, True)
def _synchronize(self, state, child,
associationrow, clearkeys, uowcommit,
pks_changed):
source = state
dest = child
self._verify_canload(child)
if dest is None or \
(not self.post_update and uowcommit.is_deleted(dest)):
return
if clearkeys:
sync.clear(dest, self.mapper, self.prop.synchronize_pairs)
else:
sync.populate(source, self.parent, dest, self.mapper,
self.prop.synchronize_pairs, uowcommit,
self.passive_updates and pks_changed)
def _pks_changed(self, uowcommit, state):
return sync.source_modified(
uowcommit,
state,
self.parent,
self.prop.synchronize_pairs)
class ManyToOneDP(DependencyProcessor):
def __init__(self, prop):
DependencyProcessor.__init__(self, prop)
self.mapper._dependency_processors.append(DetectKeySwitch(prop))
def per_property_dependencies(self, uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete):
if self.post_update:
parent_post_updates = unitofwork.IssuePostUpdate(
uow,
self.parent.primary_base_mapper,
False)
parent_pre_updates = unitofwork.IssuePostUpdate(
uow,
self.parent.primary_base_mapper,
True)
uow.dependencies.update([
(child_saves, after_save),
(parent_saves, after_save),
(after_save, parent_post_updates),
(after_save, parent_pre_updates),
(before_delete, parent_pre_updates),
(parent_pre_updates, child_deletes),
])
else:
uow.dependencies.update([
(child_saves, after_save),
(after_save, parent_saves),
(parent_saves, child_deletes),
(parent_deletes, child_deletes)
])
def per_state_dependencies(self, uow,
save_parent,
delete_parent,
child_action,
after_save, before_delete,
isdelete, childisdelete):
if self.post_update:
if not isdelete:
parent_post_updates = unitofwork.IssuePostUpdate(
uow,
self.parent.primary_base_mapper,
False)
if childisdelete:
uow.dependencies.update([
(after_save, parent_post_updates),
(parent_post_updates, child_action)
])
else:
uow.dependencies.update([
(save_parent, after_save),
(child_action, after_save),
(after_save, parent_post_updates)
])
else:
parent_pre_updates = unitofwork.IssuePostUpdate(
uow,
self.parent.primary_base_mapper,
True)
uow.dependencies.update([
(before_delete, parent_pre_updates),
(parent_pre_updates, delete_parent),
(parent_pre_updates, child_action)
])
elif not isdelete:
if not childisdelete:
uow.dependencies.update([
(child_action, after_save),
(after_save, save_parent),
])
else:
uow.dependencies.update([
(after_save, save_parent),
])
else:
if childisdelete:
uow.dependencies.update([
(delete_parent, child_action)
])
def presort_deletes(self, uowcommit, states):
if self.cascade.delete or self.cascade.delete_orphan:
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
if self.cascade.delete_orphan:
todelete = history.sum()
else:
todelete = history.non_deleted()
for child in todelete:
if child is None:
continue
uowcommit.register_object(
child, isdelete=True,
operation="delete", prop=self.prop)
t = self.mapper.cascade_iterator('delete', child)
for c, m, st_, dct_ in t:
uowcommit.register_object(
st_, isdelete=True)
def presort_saves(self, uowcommit, states):
for state in states:
uowcommit.register_object(state, operation="add", prop=self.prop)
if self.cascade.delete_orphan:
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
for child in history.deleted:
if self.hasparent(child) is False:
uowcommit.register_object(
child, isdelete=True,
operation="delete", prop=self.prop)
t = self.mapper.cascade_iterator('delete', child)
for c, m, st_, dct_ in t:
uowcommit.register_object(st_, isdelete=True)
def process_deletes(self, uowcommit, states):
if self.post_update and \
not self.cascade.delete_orphan and \
not self.passive_deletes == 'all':
# post_update means we have to update our
# row to not reference the child object
# before we can DELETE the row
for state in states:
self._synchronize(state, None, None, True, uowcommit)
if state and self.post_update:
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
self._post_update(
state, uowcommit, history.sum(),
is_m2o_delete=True)
def process_saves(self, uowcommit, states):
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
attributes.PASSIVE_NO_INITIALIZE)
if history:
if history.added:
for child in history.added:
self._synchronize(state, child, None, False,
uowcommit, "add")
if self.post_update:
self._post_update(state, uowcommit, history.sum())
def _synchronize(self, state, child, associationrow,
clearkeys, uowcommit, operation=None):
if state is None or \
(not self.post_update and uowcommit.is_deleted(state)):
return
if operation is not None and \
child is not None and \
not uowcommit.session._contains_state(child):
util.warn(
"Object of type %s not in session, %s "
"operation along '%s' won't proceed" %
(mapperutil.state_class_str(child), operation, self.prop))
return
if clearkeys or child is None:
sync.clear(state, self.parent, self.prop.synchronize_pairs)
else:
self._verify_canload(child)
sync.populate(child, self.mapper, state,
self.parent,
self.prop.synchronize_pairs,
uowcommit,
False)
class DetectKeySwitch(DependencyProcessor):
"""For many-to-one relationships with no one-to-many backref,
searches for parents through the unit of work when a primary
key has changed and updates them.
Theoretically, this approach could be expanded to support transparent
deletion of objects referenced via many-to-one as well, although
the current attribute system doesn't do enough bookkeeping for this
to be efficient.
"""
def per_property_preprocessors(self, uow):
if self.prop._reverse_property:
if self.passive_updates:
return
else:
if False in (prop.passive_updates for
prop in self.prop._reverse_property):
return
uow.register_preprocessor(self, False)
def per_property_flush_actions(self, uow):
parent_saves = unitofwork.SaveUpdateAll(
uow,
self.parent.base_mapper)
after_save = unitofwork.ProcessAll(uow, self, False, False)
uow.dependencies.update([
(parent_saves, after_save)
])
def per_state_flush_actions(self, uow, states, isdelete):
pass
def presort_deletes(self, uowcommit, states):
pass
def presort_saves(self, uow, states):
if not self.passive_updates:
# for non-passive updates, register in the preprocess stage
# so that mapper save_obj() gets a hold of changes
self._process_key_switches(states, uow)
def prop_has_changes(self, uow, states, isdelete):
if not isdelete and self.passive_updates:
d = self._key_switchers(uow, states)
return bool(d)
return False
def process_deletes(self, uowcommit, states):
assert False
def process_saves(self, uowcommit, states):
# for passive updates, register objects in the process stage
# so that we avoid ManyToOneDP's registering the object without
# the listonly flag in its own preprocess stage (results in UPDATE)
# statements being emitted
assert self.passive_updates
self._process_key_switches(states, uowcommit)
def _key_switchers(self, uow, states):
switched, notswitched = uow.memo(
('pk_switchers', self),
lambda: (set(), set())
)
allstates = switched.union(notswitched)
for s in states:
if s not in allstates:
if self._pks_changed(uow, s):
switched.add(s)
else:
notswitched.add(s)
return switched
def _process_key_switches(self, deplist, uowcommit):
switchers = self._key_switchers(uowcommit, deplist)
if switchers:
# if primary key values have actually changed somewhere, perform
# a linear search through the UOW in search of a parent.
for state in uowcommit.session.identity_map.all_states():
if not issubclass(state.class_, self.parent.class_):
continue
dict_ = state.dict
related = state.get_impl(self.key).get(
state, dict_, passive=self._passive_update_flag)
if related is not attributes.PASSIVE_NO_RESULT and \
related is not None:
related_state = attributes.instance_state(dict_[self.key])
if related_state in switchers:
uowcommit.register_object(state,
False,
self.passive_updates)
sync.populate(
related_state,
self.mapper, state,
self.parent, self.prop.synchronize_pairs,
uowcommit, self.passive_updates)
def _pks_changed(self, uowcommit, state):
return bool(state.key) and sync.source_modified(
uowcommit, state, self.mapper, self.prop.synchronize_pairs)
class ManyToManyDP(DependencyProcessor):
def per_property_dependencies(self, uow, parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete
):
uow.dependencies.update([
(parent_saves, after_save),
(child_saves, after_save),
(after_save, child_deletes),
# a rowswitch on the parent from deleted to saved
# can make this one occur, as the "save" may remove
# an element from the
# "deleted" list before we have a chance to
# process its child rows
(before_delete, parent_saves),
(before_delete, parent_deletes),
(before_delete, child_deletes),
(before_delete, child_saves),
])
def per_state_dependencies(self, uow,
save_parent,
delete_parent,
child_action,
after_save, before_delete,
isdelete, childisdelete):
if not isdelete:
if childisdelete:
uow.dependencies.update([
(save_parent, after_save),
(after_save, child_action),
])
else:
uow.dependencies.update([
(save_parent, after_save),
(child_action, after_save),
])
else:
uow.dependencies.update([
(before_delete, child_action),
(before_delete, delete_parent)
])
def presort_deletes(self, uowcommit, states):
# TODO: no tests fail if this whole
# thing is removed !!!!
if not self.passive_deletes:
# if no passive deletes, load history on
# the collection, so that prop_has_changes()
# returns True
for state in states:
uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
def presort_saves(self, uowcommit, states):
if not self.passive_updates:
# if no passive updates, load history on
# each collection where parent has changed PK,
# so that prop_has_changes() returns True
for state in states:
if self._pks_changed(uowcommit, state):
history = uowcommit.get_attribute_history(
state,
self.key,
attributes.PASSIVE_OFF)
if not self.cascade.delete_orphan:
return
# check for child items removed from the collection
# if delete_orphan check is turned on.
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
attributes.PASSIVE_NO_INITIALIZE)
if history:
for child in history.deleted:
if self.hasparent(child) is False:
uowcommit.register_object(
child, isdelete=True,
operation="delete", prop=self.prop)
for c, m, st_, dct_ in self.mapper.cascade_iterator(
'delete',
child):
uowcommit.register_object(
st_, isdelete=True)
def process_deletes(self, uowcommit, states):
secondary_delete = []
secondary_insert = []
secondary_update = []
processed = self._get_reversed_processed_set(uowcommit)
tmp = set()
for state in states:
# this history should be cached already, as
# we loaded it in preprocess_deletes
history = uowcommit.get_attribute_history(
state,
self.key,
self._passive_delete_flag)
if history:
for child in history.non_added():
if child is None or \
(processed is not None and
(state, child) in processed):
continue
associationrow = {}
if not self._synchronize(
state,
child,
associationrow,
False, uowcommit, "delete"):
continue
secondary_delete.append(associationrow)
tmp.update((c, state) for c in history.non_added())
if processed is not None:
processed.update(tmp)
self._run_crud(uowcommit, secondary_insert,
secondary_update, secondary_delete)
def process_saves(self, uowcommit, states):
secondary_delete = []
secondary_insert = []
secondary_update = []
processed = self._get_reversed_processed_set(uowcommit)
tmp = set()
for state in states:
need_cascade_pks = not self.passive_updates and \
self._pks_changed(uowcommit, state)
if need_cascade_pks:
passive = attributes.PASSIVE_OFF
else:
passive = attributes.PASSIVE_NO_INITIALIZE
history = uowcommit.get_attribute_history(state, self.key,
passive)
if history:
for child in history.added:
if (processed is not None and
(state, child) in processed):
continue
associationrow = {}
if not self._synchronize(state,
child,
associationrow,
False, uowcommit, "add"):
continue
secondary_insert.append(associationrow)
for child in history.deleted:
if (processed is not None and
(state, child) in processed):
continue
associationrow = {}
if not self._synchronize(state,
child,
associationrow,
False, uowcommit, "delete"):
continue
secondary_delete.append(associationrow)
tmp.update((c, state)
for c in history.added + history.deleted)
if need_cascade_pks:
for child in history.unchanged:
associationrow = {}
sync.update(state,
self.parent,
associationrow,
"old_",
self.prop.synchronize_pairs)
sync.update(child,
self.mapper,
associationrow,
"old_",
self.prop.secondary_synchronize_pairs)
secondary_update.append(associationrow)
if processed is not None:
processed.update(tmp)
self._run_crud(uowcommit, secondary_insert,
secondary_update, secondary_delete)
def _run_crud(self, uowcommit, secondary_insert,
secondary_update, secondary_delete):
connection = uowcommit.transaction.connection(self.mapper)
if secondary_delete:
associationrow = secondary_delete[0]
statement = self.secondary.delete(sql.and_(*[
c == sql.bindparam(c.key, type_=c.type)
for c in self.secondary.c
if c.key in associationrow
]))
result = connection.execute(statement, secondary_delete)
if result.supports_sane_multi_rowcount() and \
result.rowcount != len(secondary_delete):
raise exc.StaleDataError(
"DELETE statement on table '%s' expected to delete "
"%d row(s); Only %d were matched." %
(self.secondary.description, len(secondary_delete),
result.rowcount)
)
if secondary_update:
associationrow = secondary_update[0]
statement = self.secondary.update(sql.and_(*[
c == sql.bindparam("old_" + c.key, type_=c.type)
for c in self.secondary.c
if c.key in associationrow
]))
result = connection.execute(statement, secondary_update)
if result.supports_sane_multi_rowcount() and \
result.rowcount != len(secondary_update):
raise exc.StaleDataError(
"UPDATE statement on table '%s' expected to update "
"%d row(s); Only %d were matched." %
(self.secondary.description, len(secondary_update),
result.rowcount)
)
if secondary_insert:
statement = self.secondary.insert()
connection.execute(statement, secondary_insert)
def _synchronize(self, state, child, associationrow,
clearkeys, uowcommit, operation):
# this checks for None if uselist=True
self._verify_canload(child)
# but if uselist=False we get here. If child is None,
# no association row can be generated, so return.
if child is None:
return False
if child is not None and not uowcommit.session._contains_state(child):
if not child.deleted:
util.warn(
"Object of type %s not in session, %s "
"operation along '%s' won't proceed" %
(mapperutil.state_class_str(child), operation, self.prop))
return False
sync.populate_dict(state, self.parent, associationrow,
self.prop.synchronize_pairs)
sync.populate_dict(child, self.mapper, associationrow,
self.prop.secondary_synchronize_pairs)
return True
def _pks_changed(self, uowcommit, state):
return sync.source_modified(
uowcommit,
state,
self.parent,
self.prop.synchronize_pairs)
_direction_to_processor = {
ONETOMANY: OneToManyDP,
MANYTOONE: ManyToOneDP,
MANYTOMANY: ManyToManyDP,
}
|
|
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
General gateway tests.
@since: 0.1.0
"""
import unittest
import sys
import pyamf
from pyamf import remoting
from pyamf.remoting import gateway, amf0
class TestService(object):
def spam(self):
return 'spam'
def echo(self, x):
return x
class FaultTestCase(unittest.TestCase):
def test_create(self):
x = remoting.ErrorFault()
self.assertEquals(x.code, '')
self.assertEquals(x.details, '')
self.assertEquals(x.description, '')
x = remoting.ErrorFault(code=404, details='Not Found', description='Spam eggs')
self.assertEquals(x.code, 404)
self.assertEquals(x.details, 'Not Found')
self.assertEquals(x.description, 'Spam eggs')
def test_build(self):
fault = None
try:
raise TypeError("Unknown type")
except TypeError:
fault = amf0.build_fault(*sys.exc_info())
self.assertTrue(isinstance(fault, remoting.ErrorFault))
self.assertEquals(fault.level, 'error')
self.assertEquals(fault.code, 'TypeError')
self.assertEquals(fault.details, None)
def test_build_traceback(self):
fault = None
try:
raise TypeError("Unknown type")
except TypeError:
fault = amf0.build_fault(include_traceback=True, *sys.exc_info())
self.assertTrue(isinstance(fault, remoting.ErrorFault))
self.assertEquals(fault.level, 'error')
self.assertEquals(fault.code, 'TypeError')
self.assertTrue("\\n" not in fault.details)
def test_encode(self):
encoder = pyamf.get_encoder(pyamf.AMF0)
decoder = pyamf.get_decoder(pyamf.AMF0)
decoder.stream = encoder.stream
try:
raise TypeError("Unknown type")
except TypeError:
encoder.writeElement(amf0.build_fault(*sys.exc_info()))
buffer = encoder.stream
buffer.seek(0, 0)
fault = decoder.readElement()
old_fault = amf0.build_fault(*sys.exc_info())
self.assertEquals(fault.level, old_fault.level)
self.assertEquals(fault.type, old_fault.type)
self.assertEquals(fault.code, old_fault.code)
self.assertEquals(fault.details, old_fault.details)
self.assertEquals(fault.description, old_fault.description)
def test_explicit_code(self):
class X(Exception):
_amf_code = 'Server.UnknownResource'
try:
raise X()
except X:
fault = amf0.build_fault(*sys.exc_info())
self.assertEquals(fault.code, 'Server.UnknownResource')
class ServiceWrapperTestCase(unittest.TestCase):
def test_create(self):
x = gateway.ServiceWrapper('blah')
self.assertEquals(x.service, 'blah')
def test_create_preprocessor(self):
x = gateway.ServiceWrapper('blah', preprocessor=ord)
self.assertEquals(x.preprocessor, ord)
def test_cmp(self):
x = gateway.ServiceWrapper('blah')
y = gateway.ServiceWrapper('blah')
z = gateway.ServiceWrapper('bleh')
self.assertEquals(x, y)
self.assertNotEquals(y, z)
def test_call(self):
def add(x, y):
self.assertEquals(x, 1)
self.assertEquals(y, 2)
return x + y
x = gateway.ServiceWrapper(add)
self.assertTrue(callable(x))
self.assertEquals(x(None, [1, 2]), 3)
x = gateway.ServiceWrapper('blah')
self.assertRaises(gateway.UnknownServiceMethodError, x, None, [])
x = gateway.ServiceWrapper(TestService)
self.assertRaises(gateway.UnknownServiceMethodError, x, None, [])
self.assertEquals(x('spam', []), 'spam')
self.assertRaises(gateway.UnknownServiceMethodError, x, 'xyx', [])
self.assertRaises(gateway.InvalidServiceMethodError, x, '_private', [])
self.assertEquals(x('echo', [x]), x)
class ServiceRequestTestCase(unittest.TestCase):
def test_create(self):
sw = gateway.ServiceWrapper(TestService)
request = remoting.Envelope()
x = gateway.ServiceRequest(request, sw, None)
self.assertEquals(x.request, request)
self.assertEquals(x.service, sw)
self.assertEquals(x.method, None)
def test_call(self):
sw = gateway.ServiceWrapper(TestService)
request = remoting.Envelope()
x = gateway.ServiceRequest(request, sw, None)
self.assertRaises(gateway.UnknownServiceMethodError, x)
x = gateway.ServiceRequest(request, sw, 'spam')
self.assertEquals(x(), 'spam')
x = gateway.ServiceRequest(request, sw, 'echo')
self.assertEquals(x(x), x)
class ServiceCollectionTestCase(unittest.TestCase):
def test_contains(self):
x = gateway.ServiceCollection()
self.assertFalse(TestService in x)
self.assertFalse('spam.eggs' in x)
x['spam.eggs'] = gateway.ServiceWrapper(TestService)
self.assertTrue(TestService in x)
self.assertTrue('spam.eggs' in x)
class BaseGatewayTestCase(unittest.TestCase):
def test_create(self):
x = gateway.BaseGateway()
self.assertEquals(x.services, {})
x = gateway.BaseGateway({})
self.assertEquals(x.services, {})
x = gateway.BaseGateway({})
self.assertEquals(x.services, {})
x = gateway.BaseGateway({'x': TestService})
self.assertEquals(x.services, {'x': TestService})
x = gateway.BaseGateway({}, timezone_offset=-180)
self.assertEquals(x.timezone_offset, -180)
self.assertRaises(TypeError, gateway.BaseGateway, [])
self.assertRaises(TypeError, gateway.BaseGateway, foo='bar')
def test_add_service(self):
gw = gateway.BaseGateway()
self.assertEquals(gw.services, {})
gw.addService(TestService)
self.assertTrue(TestService in gw.services)
self.assertTrue('TestService' in gw.services)
del gw.services['TestService']
gw.addService(TestService, 'spam.eggs')
self.assertTrue(TestService in gw.services)
self.assertTrue('spam.eggs' in gw.services)
del gw.services['spam.eggs']
class SpamService(object):
def __str__(self):
return 'spam'
def __call__(*args, **kwargs):
pass
x = SpamService()
gw.addService(x)
self.assertTrue(x in gw.services)
self.assertTrue('spam' in gw.services)
del gw.services['spam']
self.assertEquals(gw.services, {})
self.assertRaises(TypeError, gw.addService, 1)
import new
temp = new.module('temp')
gw.addService(temp)
self.assertTrue(temp in gw.services)
self.assertTrue('temp' in gw.services)
del gw.services['temp']
self.assertEquals(gw.services, {})
def test_remove_service(self):
gw = gateway.BaseGateway({'test': TestService})
self.assertTrue('test' in gw.services)
wrapper = gw.services['test']
gw.removeService('test')
self.assertFalse('test' in gw.services)
self.assertFalse(TestService in gw.services)
self.assertFalse(wrapper in gw.services)
self.assertEquals(gw.services, {})
gw = gateway.BaseGateway({'test': TestService})
self.assertTrue(TestService in gw.services)
wrapper = gw.services['test']
gw.removeService(TestService)
self.assertFalse('test' in gw.services)
self.assertFalse(TestService in gw.services)
self.assertFalse(wrapper in gw.services)
self.assertEquals(gw.services, {})
gw = gateway.BaseGateway({'test': TestService})
self.assertTrue(TestService in gw.services)
wrapper = gw.services['test']
gw.removeService(wrapper)
self.assertFalse('test' in gw.services)
self.assertFalse(TestService in gw.services)
self.assertFalse(wrapper in gw.services)
self.assertEquals(gw.services, {})
self.assertRaises(NameError, gw.removeService, 'test')
self.assertRaises(NameError, gw.removeService, TestService)
self.assertRaises(NameError, gw.removeService, wrapper)
def test_service_request(self):
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
message = remoting.Request('spam', [], envelope=envelope)
self.assertRaises(gateway.UnknownServiceError, gw.getServiceRequest,
message, 'spam')
message = remoting.Request('test.spam', [], envelope=envelope)
sr = gw.getServiceRequest(message, 'test.spam')
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
self.assertEquals(sr.request, envelope)
self.assertEquals(sr.service, TestService)
self.assertEquals(sr.method, 'spam')
message = remoting.Request('test')
sr = gw.getServiceRequest(message, 'test')
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
self.assertEquals(sr.request, None)
self.assertEquals(sr.service, TestService)
self.assertEquals(sr.method, None)
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
message = remoting.Request('test')
sr = gw.getServiceRequest(message, 'test')
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
self.assertEquals(sr.request, None)
self.assertEquals(sr.service, TestService)
self.assertEquals(sr.method, None)
# try to access an unknown service
message = remoting.Request('spam')
self.assertRaises(gateway.UnknownServiceError, gw.getServiceRequest,
message, 'spam')
# check x.x calls
message = remoting.Request('test.test')
sr = gw.getServiceRequest(message, 'test.test')
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
self.assertEquals(sr.request, None)
self.assertEquals(sr.service, TestService)
self.assertEquals(sr.method, 'test')
def test_long_service_name(self):
gw = gateway.BaseGateway({'a.c.b.d': TestService})
envelope = remoting.Envelope()
message = remoting.Request('a.c.b.d', [], envelope=envelope)
sr = gw.getServiceRequest(message, 'a.c.b.d.spam')
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
self.assertEquals(sr.request, envelope)
self.assertEquals(sr.service, TestService)
self.assertEquals(sr.method, 'spam')
def test_get_response(self):
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
self.assertRaises(NotImplementedError, gw.getResponse, envelope)
def test_process_request(self):
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
request = remoting.Request('test.spam', envelope=envelope)
processor = gw.getProcessor(request)
response = processor(request)
self.assertTrue(isinstance(response, remoting.Response))
self.assertEquals(response.status, remoting.STATUS_OK)
self.assertEquals(response.body, 'spam')
def test_unknown_service(self):
# Test a non existant service call
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
request = remoting.Request('nope', envelope=envelope)
processor = gw.getProcessor(request)
response = processor(request)
self.assertFalse(gw.debug)
self.assertTrue(isinstance(response, remoting.Message))
self.assertEquals(response.status, remoting.STATUS_ERROR)
self.assertTrue(isinstance(response.body, remoting.ErrorFault))
self.assertEquals(response.body.code, 'Service.ResourceNotFound')
self.assertEquals(response.body.description, 'Unknown service nope')
self.assertEquals(response.body.details, None)
def test_debug_traceback(self):
# Test a non existant service call
gw = gateway.BaseGateway({'test': TestService}, debug=True)
envelope = remoting.Envelope()
# Test a non existant service call
request = remoting.Request('nope', envelope=envelope)
processor = gw.getProcessor(request)
response = processor(request)
self.assertTrue(isinstance(response, remoting.Message))
self.assertEquals(response.status, remoting.STATUS_ERROR)
self.assertTrue(isinstance(response.body, remoting.ErrorFault))
self.assertEquals(response.body.code, 'Service.ResourceNotFound')
self.assertEquals(response.body.description, 'Unknown service nope')
self.assertNotEquals(response.body.details, None)
def test_malformed_credentials_header(self):
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
request = remoting.Request('test.spam', envelope=envelope)
request.headers['Credentials'] = {'spam': 'eggs'}
processor = gw.getProcessor(request)
response = processor(request)
self.assertTrue(isinstance(response, remoting.Response))
self.assertEquals(response.status, remoting.STATUS_ERROR)
self.assertTrue(isinstance(response.body, remoting.ErrorFault))
self.assertEquals(response.body.code, 'KeyError')
def test_authenticate(self):
gw = gateway.BaseGateway({'test': TestService})
sr = gateway.ServiceRequest(None, gw.services['test'], None)
self.assertTrue(gw.authenticateRequest(sr, None, None))
def auth(u, p):
if u == 'spam' and p == 'eggs':
return True
return False
gw = gateway.BaseGateway({'test': TestService}, authenticator=auth)
self.assertFalse(gw.authenticateRequest(sr, None, None))
self.assertTrue(gw.authenticateRequest(sr, 'spam', 'eggs'))
class QueryBrowserTestCase(unittest.TestCase):
def test_request(self):
gw = gateway.BaseGateway()
echo = lambda x: x
gw.addService(echo, 'echo', description='This is a test')
envelope = remoting.Envelope()
request = remoting.Request('echo')
envelope['/1'] = request
request.headers['DescribeService'] = None
processor = gw.getProcessor(request)
response = processor(request)
self.assertEquals(response.status, remoting.STATUS_OK)
self.assertEquals(response.body, 'This is a test')
class AuthenticatorTestCase(unittest.TestCase):
def setUp(self):
self.called = False
def tearDown(self):
if self.called is False:
self.fail("authenticator not called")
def _auth(self, username, password):
self.called = True
if username == 'fred' and password == 'wilma':
return True
return False
def test_gateway(self):
gw = gateway.BaseGateway(authenticator=self._auth)
echo = lambda x: x
gw.addService(echo, 'echo')
envelope = remoting.Envelope()
request = remoting.Request('echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='wilma')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEquals(response.status, remoting.STATUS_OK)
self.assertEquals(response.body, 'spam')
def test_service(self):
gw = gateway.BaseGateway()
echo = lambda x: x
gw.addService(echo, 'echo', authenticator=self._auth)
envelope = remoting.Envelope()
request = remoting.Request('echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='wilma')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEquals(response.status, remoting.STATUS_OK)
self.assertEquals(response.body, 'spam')
def test_class_decorator(self):
class TestService:
def echo(self, x):
return x
TestService.echo = gateway.authenticate(TestService.echo, self._auth)
gw = gateway.BaseGateway({'test': TestService})
envelope = remoting.Envelope()
request = remoting.Request('test.echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='wilma')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEquals(response.status, remoting.STATUS_OK)
self.assertEquals(response.body, 'spam')
def test_func_decorator(self):
def echo(x):
return x
echo = gateway.authenticate(echo, self._auth)
gw = gateway.BaseGateway({'echo': echo})
envelope = remoting.Envelope()
request = remoting.Request('echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='wilma')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEquals(response.status, remoting.STATUS_OK)
self.assertEquals(response.body, 'spam')
def test_expose_request_decorator(self):
def echo(x):
return x
def exposed_auth(request, username, password):
return self._auth(username, password)
exposed_auth = gateway.expose_request(exposed_auth)
echo = gateway.authenticate(echo, exposed_auth)
gw = gateway.BaseGateway({'echo': echo})
envelope = remoting.Envelope()
request = remoting.Request('echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='wilma')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEquals(response.status, remoting.STATUS_OK)
self.assertEquals(response.body, 'spam')
def test_expose_request_keyword(self):
def echo(x):
return x
def exposed_auth(request, username, password):
return self._auth(username, password)
echo = gateway.authenticate(echo, exposed_auth, expose_request=True)
gw = gateway.BaseGateway({'echo': echo})
envelope = remoting.Envelope()
request = remoting.Request('echo', body=['spam'])
envelope.headers['Credentials'] = dict(userid='fred', password='wilma')
envelope['/1'] = request
processor = gw.getProcessor(request)
response = processor(request)
self.assertEquals(response.status, remoting.STATUS_OK)
self.assertEquals(response.body, 'spam')
class ExposeRequestTestCase(unittest.TestCase):
def test_default(self):
gw = gateway.BaseGateway()
gw.addService(lambda x: x, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(envelope, gw.services['test'], None)
self.assertFalse(gw.mustExposeRequest(service_request))
def test_gateway(self):
gw = gateway.BaseGateway(expose_request=True)
gw.addService(lambda x: x, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(envelope, gw.services['test'], None)
self.assertTrue(gw.mustExposeRequest(service_request))
def test_service(self):
gw = gateway.BaseGateway()
gw.addService(lambda x: x, 'test', expose_request=True)
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(envelope, gw.services['test'], None)
self.assertTrue(gw.mustExposeRequest(service_request))
def test_decorator(self):
def echo(x):
return x
gateway.expose_request(echo)
gw = gateway.BaseGateway()
gw.addService(echo, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(envelope, gw.services['test'], None)
self.assertTrue(gw.mustExposeRequest(service_request))
class PreProcessingTestCase(unittest.TestCase):
def _preproc(self):
pass
def test_default(self):
gw = gateway.BaseGateway()
gw.addService(lambda x: x, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(envelope, gw.services['test'], None)
self.assertEquals(gw.getPreprocessor(service_request), None)
def test_global(self):
gw = gateway.BaseGateway(preprocessor=self._preproc)
gw.addService(lambda x: x, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(envelope, gw.services['test'], None)
self.assertEquals(gw.getPreprocessor(service_request), self._preproc)
def test_service(self):
gw = gateway.BaseGateway()
gw.addService(lambda x: x, 'test', preprocessor=self._preproc)
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(envelope, gw.services['test'], None)
self.assertEquals(gw.getPreprocessor(service_request), self._preproc)
def test_decorator(self):
def echo(x):
return x
gateway.preprocess(echo, self._preproc)
gw = gateway.BaseGateway()
gw.addService(echo, 'test')
envelope = remoting.Envelope()
request = remoting.Request('test')
envelope['/1'] = request
service_request = gateway.ServiceRequest(envelope, gw.services['test'], None)
self.assertEquals(gw.getPreprocessor(service_request), self._preproc)
def test_call(self):
def preproc(sr, *args):
self.called = True
self.assertEquals(args, tuple())
self.assertTrue(isinstance(sr, gateway.ServiceRequest))
gw = gateway.BaseGateway({'test': TestService}, preprocessor=preproc)
envelope = remoting.Envelope()
request = remoting.Request('test.spam', envelope=envelope)
processor = gw.getProcessor(request)
response = processor(request)
self.assertTrue(isinstance(response, remoting.Response))
self.assertEquals(response.status, remoting.STATUS_OK)
self.assertEquals(response.body, 'spam')
self.assertTrue(self.called)
def test_fail(self):
def preproc(sr, *args):
raise IndexError
gw = gateway.BaseGateway({'test': TestService}, preprocessor=preproc)
envelope = remoting.Envelope()
request = remoting.Request('test.spam', envelope=envelope)
processor = gw.getProcessor(request)
response = processor(request)
self.assertTrue(isinstance(response, remoting.Response))
self.assertEquals(response.status, remoting.STATUS_ERROR)
def suite():
suite = unittest.TestSuite()
# basics first
suite.addTest(unittest.makeSuite(FaultTestCase))
suite.addTest(unittest.makeSuite(ServiceWrapperTestCase))
suite.addTest(unittest.makeSuite(ServiceRequestTestCase))
suite.addTest(unittest.makeSuite(ServiceCollectionTestCase))
suite.addTest(unittest.makeSuite(BaseGatewayTestCase))
suite.addTest(unittest.makeSuite(QueryBrowserTestCase))
suite.addTest(unittest.makeSuite(AuthenticatorTestCase))
suite.addTest(unittest.makeSuite(ExposeRequestTestCase))
suite.addTest(unittest.makeSuite(PreProcessingTestCase))
try:
import wsgiref
except ImportError:
wsgiref = None
if wsgiref:
from pyamf.tests.gateway import test_wsgi
suite.addTest(test_wsgi.suite())
try:
from twisted import web
except ImportError:
web = None
if web:
from pyamf.tests.gateway import test_twisted
suite.addTest(test_twisted.suite())
try:
import django
except ImportError:
django = None
if django:
from pyamf.tests.gateway import test_django
suite.addTest(test_django.suite())
try:
from google.appengine.ext import webapp
except ImportError:
try:
import dev_appserver
sys.path = dev_appserver.EXTRA_PATHS + sys.path
from google.appengine.ext import webapp
except ImportError:
webapp = None
if webapp:
from pyamf.tests.gateway import test_google
suite.addTest(test_google.suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for generative models used to derive intrinsic rewards.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from dopamine.discrete_domains import atari_lib
import gin
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim
PSEUDO_COUNT_QUANTIZATION_FACTOR = 8
PSEUDO_COUNT_OBSERVATION_SHAPE = (42, 42)
NATURE_DQN_OBSERVATION_SHAPE = atari_lib.NATURE_DQN_OBSERVATION_SHAPE
@slim.add_arg_scope
def masked_conv2d(inputs, num_outputs, kernel_size,
activation_fn=tf.nn.relu,
weights_initializer=tf.initializers.glorot_normal(),
biases_initializer=tf.initializers.zeros(),
stride=(1, 1),
scope=None,
mask_type='A',
collection=None,
output_multiplier=1):
"""Creates masked convolutions used in PixelCNN.
There are two types of masked convolutions, type A and B, see Figure 1 in
https://arxiv.org/abs/1606.05328 for more details.
Args:
inputs: input image.
num_outputs: int, number of filters used in the convolution.
kernel_size: int, size of convolution kernel.
activation_fn: activation function used after the convolution.
weights_initializer: distribution used to initialize the kernel.
biases_initializer: distribution used to initialize biases.
stride: convolution stride.
scope: name of the tensorflow scope.
mask_type: type of masked convolution, must be A or B.
collection: tf variables collection.
output_multiplier: number of convolutional network stacks.
Returns:
frame post convolution.
"""
assert mask_type in ('A', 'B') and num_outputs % output_multiplier == 0
num_inputs = int(inputs.get_shape()[-1])
kernel_shape = tuple(kernel_size) + (num_inputs, num_outputs)
strides = (1,) + tuple(stride) + (1,)
biases_shape = [num_outputs]
mask_list = [np.zeros(
tuple(kernel_size) + (num_inputs, num_outputs // output_multiplier),
dtype=np.float32) for _ in range(output_multiplier)]
for i in range(output_multiplier):
# Mask type A
if kernel_shape[0] > 1:
mask_list[i][:kernel_shape[0]//2] = 1.0
if kernel_shape[1] > 1:
mask_list[i][kernel_shape[0]//2, :kernel_shape[1]//2] = 1.0
# Mask type B
if mask_type == 'B':
mask_list[i][kernel_shape[0]//2, kernel_shape[1]//2] = 1.0
mask_values = np.concatenate(mask_list, axis=3)
with tf.variable_scope(scope):
w = tf.get_variable('W', kernel_shape, trainable=True,
initializer=weights_initializer)
b = tf.get_variable('biases', biases_shape, trainable=True,
initializer=biases_initializer)
if collection is not None:
tf.add_to_collection(collection, w)
tf.add_to_collection(collection, b)
mask = tf.constant(mask_values, dtype=tf.float32)
mask.set_shape(kernel_shape)
convolution = tf.nn.conv2d(inputs, mask * w, strides, padding='SAME')
convolution_bias = tf.nn.bias_add(convolution, b)
if activation_fn is not None:
convolution_bias = activation_fn(convolution_bias)
return convolution_bias
def gating_layer(x, embedding, hidden_units, scope_name=''):
"""Create the gating layer used in the PixelCNN architecture."""
with tf.variable_scope(scope_name):
out = masked_conv2d(x, 2*hidden_units, [3, 3],
mask_type='B',
activation_fn=None,
output_multiplier=2,
scope='masked_conv')
out += slim.conv2d(embedding, 2*hidden_units, [1, 1],
activation_fn=None)
out = tf.reshape(out, [-1, 2])
out = tf.tanh(out[:, 0]) + tf.sigmoid(out[:, 1])
return tf.reshape(out, x.get_shape())
@gin.configurable
class CTSIntrinsicReward(object):
"""Class used to instantiate a CTS density model used for exploration."""
def __init__(self,
reward_scale,
convolutional=False,
observation_shape=PSEUDO_COUNT_OBSERVATION_SHAPE,
quantization_factor=PSEUDO_COUNT_QUANTIZATION_FACTOR):
"""Constructor.
Args:
reward_scale: float, scale factor applied to the raw rewards.
convolutional: bool, whether to use convolutional CTS.
observation_shape: tuple, 2D dimensions of the observation predicted
by the model. Needs to be square.
quantization_factor: int, number of bits for the predicted image
Raises:
ValueError: when the `observation_shape` is not square.
"""
self._reward_scale = reward_scale
if (len(observation_shape) != 2
or observation_shape[0] != observation_shape[1]):
raise ValueError('Observation shape needs to be square')
self._observation_shape = observation_shape
self.density_model = shannon.CTSTensorModel(
observation_shape, convolutional)
self._quantization_factor = quantization_factor
def update(self, observation):
"""Updates the density model with the given observation.
Args:
observation: Input frame.
Returns:
Update log-probability.
"""
input_tensor = self._preprocess(observation)
return self.density_model.Update(input_tensor)
def compute_intrinsic_reward(self, observation, training_steps, eval_mode):
"""Updates the model, returns the intrinsic reward.
Args:
observation: Input frame. For compatibility with other models, this
may have a batch-size of 1 as its first dimension.
training_steps: int, number of training steps.
eval_mode: bool, whether or not running eval mode.
Returns:
The corresponding intrinsic reward.
"""
del training_steps
input_tensor = self._preprocess(observation)
if not eval_mode:
log_rho_t = self.density_model.Update(input_tensor)
log_rho_tp1 = self.density_model.LogProb(input_tensor)
ipd = log_rho_tp1 - log_rho_t
else:
# Do not update the density model in evaluation mode
ipd = self.density_model.IPD(input_tensor)
# Compute the pseudo count
ipd_clipped = min(ipd, 25)
inv_pseudo_count = max(0, math.expm1(ipd_clipped))
reward = float(self._reward_scale) * math.sqrt(inv_pseudo_count)
return reward
def _preprocess(self, observation):
"""Converts the given observation into something the model can use.
Args:
observation: Input frame.
Returns:
Processed frame.
Raises:
ValueError: If observation provided is not 2D.
"""
if observation.ndim != 2:
raise ValueError('Observation needs to be 2D.')
input_tensor = cv2.resize(observation,
self._observation_shape,
interpolation=cv2.INTER_AREA)
input_tensor //= (256 // self._quantization_factor)
# Convert to signed int (this may be unpleasantly inefficient).
input_tensor = input_tensor.astype('i', copy=False)
return input_tensor
@gin.configurable
class PixelCNNIntrinsicReward(object):
"""PixelCNN class to instantiate a bonus using a PixelCNN density model."""
def __init__(self,
sess,
reward_scale,
ipd_scale,
observation_shape=NATURE_DQN_OBSERVATION_SHAPE,
resize_shape=PSEUDO_COUNT_OBSERVATION_SHAPE,
quantization_factor=PSEUDO_COUNT_QUANTIZATION_FACTOR,
tf_device='/cpu:*',
optimizer=tf.train.RMSPropOptimizer(
learning_rate=0.0001,
momentum=0.9,
epsilon=0.0001)):
self._sess = sess
self.reward_scale = reward_scale
self.ipd_scale = ipd_scale
self.observation_shape = observation_shape
self.resize_shape = resize_shape
self.quantization_factor = quantization_factor
self.optimizer = optimizer
with tf.device(tf_device), tf.name_scope('intrinsic_pixelcnn'):
observation_shape = (1,) + observation_shape + (1,)
self.obs_ph = tf.placeholder(tf.uint8, shape=observation_shape,
name='obs_ph')
self.preproccessed_obs = self._preprocess(self.obs_ph, resize_shape)
self.iter_ph = tf.placeholder(tf.uint32, shape=[], name='iter_num')
self.eval_ph = tf.placeholder(tf.bool, shape=[], name='eval_mode')
self.network = tf.make_template('PixelCNN', self._network_template)
self.ipd = tf.cond(tf.logical_not(self.eval_ph),
self.update,
self.virtual_update)
self.reward = self.ipd_to_reward(self.ipd, self.iter_ph)
def compute_intrinsic_reward(self, observation, training_steps, eval_mode):
"""Updates the model (during training), returns the intrinsic reward.
Args:
observation: Input frame. For compatibility with other models, this
may have a batch-size of 1 as its first dimension.
training_steps: Number of training steps, int.
eval_mode: bool, whether or not running eval mode.
Returns:
The corresponding intrinsic reward.
"""
observation = observation[np.newaxis, :, :, np.newaxis]
return self._sess.run(self.reward, {self.obs_ph: observation,
self.iter_ph: training_steps,
self.eval_ph: eval_mode})
def _preprocess(self, obs, obs_shape):
"""Preprocess the input."""
obs = tf.cast(obs, tf.float32)
obs = tf.image.resize_bilinear(obs, obs_shape)
denom = tf.constant(256 // self.quantization_factor, dtype=tf.float32)
return tf.floordiv(obs, denom)
@gin.configurable
def _network_template(self, obs, num_layers, hidden_units):
"""PixelCNN network architecture."""
with slim.arg_scope([slim.conv2d, masked_conv2d],
weights_initializer=tf.variance_scaling_initializer(
distribution='uniform'),
biases_initializer=tf.constant_initializer(0.0)):
net = masked_conv2d(obs, hidden_units, [7, 7], mask_type='A',
activation_fn=None, scope='masked_conv_1')
embedding = slim.model_variable(
'embedding',
shape=(1,) + self.resize_shape + (4,),
initializer=tf.variance_scaling_initializer(
distribution='uniform'))
for i in range(1, num_layers + 1):
net2 = gating_layer(net, embedding, hidden_units,
'gating_{}'.format(i))
net += masked_conv2d(net2, hidden_units, [1, 1],
mask_type='B',
activation_fn=None,
scope='masked_conv_{}'.format(i+1))
net += slim.conv2d(embedding, hidden_units, [1, 1],
activation_fn=None)
net = tf.nn.relu(net)
net = masked_conv2d(net, 64, [1, 1], scope='1x1_conv_out',
mask_type='B',
activation_fn=tf.nn.relu)
logits = masked_conv2d(net, self.quantization_factor, [1, 1],
scope='logits', mask_type='B',
activation_fn=None)
loss = tf.losses.sparse_softmax_cross_entropy(
labels=tf.cast(obs, tf.int32),
logits=logits,
reduction=tf.losses.Reduction.MEAN)
return collections.namedtuple('PixelCNN_network', ['logits', 'loss'])(
logits, loss)
def update(self):
"""Computes the log likehood difference and update the density model."""
with tf.name_scope('update'):
with tf.name_scope('pre_update'):
loss = self.network(self.preproccessed_obs).loss
train_op = self.optimizer.minimize(loss)
with tf.name_scope('post_update'), tf.control_dependencies([train_op]):
loss_post_training = self.network(self.preproccessed_obs).loss
ipd = (loss - loss_post_training) * (
self.resize_shape[0] * self.resize_shape[1])
return ipd
def virtual_update(self):
"""Computes the log likelihood difference without updating the network."""
with tf.name_scope('virtual_update'):
with tf.name_scope('pre_update'):
loss = self.network(self.preproccessed_obs).loss
grads_and_vars = self.optimizer.compute_gradients(loss)
model_vars = [gv[1] for gv in grads_and_vars]
saved_vars = [tf.Variable(v.initialized_value()) for v in model_vars]
backup_op = tf.group(*[t.assign(s)
for t, s in zip(saved_vars, model_vars)])
with tf.control_dependencies([backup_op]):
train_op = self.optimizer.apply_gradients(grads_and_vars)
with tf.control_dependencies([train_op]), tf.name_scope('post_update'):
loss_post_training = self.network(self.preproccessed_obs).loss
with tf.control_dependencies([loss_post_training]):
restore_op = tf.group(*[d.assign(s)
for d, s in zip(model_vars, saved_vars)])
with tf.control_dependencies([restore_op]):
ipd = (loss - loss_post_training) * \
self.resize_shape[0] * self.resize_shape[1]
return ipd
def ipd_to_reward(self, ipd, steps):
"""Computes the intrinsic reward from IPD."""
# Prediction gain decay
ipd = self.ipd_scale * ipd / tf.sqrt(tf.to_float(steps))
inv_pseudo_count = tf.maximum(tf.expm1(ipd), 0.0)
return self.reward_scale * tf.sqrt(inv_pseudo_count)
@gin.configurable
class RNDIntrinsicReward(object):
"""Class used to instantiate a bonus using random network distillation."""
def __init__(self,
sess,
embedding_size=512,
observation_shape=NATURE_DQN_OBSERVATION_SHAPE,
tf_device='/gpu:0',
reward_scale=1.0,
optimizer=tf.train.AdamOptimizer(
learning_rate=0.0001,
epsilon=0.00001),
summary_writer=None):
self.embedding_size = embedding_size
self.reward_scale = reward_scale
self.optimizer = optimizer
self._sess = sess
self.summary_writer = summary_writer
with tf.device(tf_device), tf.name_scope('intrinsic_rnd'):
obs_shape = (1,) + observation_shape + (1,)
self.iter_ph = tf.placeholder(tf.uint64, shape=[], name='iter_num')
self.iter = tf.cast(self.iter_ph, tf.float32)
self.obs_ph = tf.placeholder(tf.uint8, shape=obs_shape,
name='obs_ph')
self.eval_ph = tf.placeholder(tf.bool, shape=[], name='eval_mode')
self.obs = tf.cast(self.obs_ph, tf.float32)
# Placeholder for running mean and std of observations and rewards
self.obs_mean = tf.Variable(tf.zeros(shape=obs_shape),
trainable=False,
name='obs_mean',
dtype=tf.float32)
self.obs_std = tf.Variable(tf.ones(shape=obs_shape),
trainable=False,
name='obs_std',
dtype=tf.float32)
self.reward_mean = tf.Variable(tf.zeros(shape=[]),
trainable=False,
name='reward_mean',
dtype=tf.float32)
self.reward_std = tf.Variable(tf.ones(shape=[]),
trainable=False,
name='reward_std',
dtype=tf.float32)
self.obs = self._preprocess(self.obs)
self.target_embedding = self._target_network(self.obs)
self.prediction_embedding = self._prediction_network(self.obs)
self._train_op = self._build_train_op()
def _preprocess(self, obs):
return tf.clip_by_value((obs - self.obs_mean) / self.obs_std, -5.0, 5.0)
def compute_intrinsic_reward(self, obs, training_step, eval_mode=False):
"""Computes the RND intrinsic reward."""
obs = obs[np.newaxis, :, :, np.newaxis]
to_evaluate = [self.intrinsic_reward]
if not eval_mode:
# Also update the prediction network
to_evaluate.append(self._train_op)
reward = self._sess.run(to_evaluate,
{self.obs_ph: obs,
self.iter_ph: training_step,
self.eval_ph: eval_mode})[0]
return self.reward_scale * float(reward)
def _target_network(self, obs):
"""Implements the random target network used by RND."""
with slim.arg_scope([slim.conv2d, slim.fully_connected], trainable=False,
weights_initializer=tf.orthogonal_initializer(
gain=np.sqrt(2)),
biases_initializer=tf.zeros_initializer()):
net = slim.conv2d(obs, 32, [8, 8], stride=4,
activation_fn=tf.nn.leaky_relu)
net = slim.conv2d(net, 64, [4, 4], stride=2,
activation_fn=tf.nn.leaky_relu)
net = slim.conv2d(net, 64, [3, 3], stride=1,
activation_fn=tf.nn.leaky_relu)
net = slim.flatten(net)
embedding = slim.fully_connected(net, self.embedding_size,
activation_fn=None)
return embedding
def _prediction_network(self, obs):
"""Prediction network used by RND to predict to target network output."""
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_initializer=tf.orthogonal_initializer(
gain=np.sqrt(2)),
biases_initializer=tf.zeros_initializer()):
net = slim.conv2d(obs, 32, [8, 8], stride=4,
activation_fn=tf.nn.leaky_relu)
net = slim.conv2d(net, 64, [4, 4], stride=2,
activation_fn=tf.nn.leaky_relu)
net = slim.conv2d(net, 64, [3, 3], stride=1,
activation_fn=tf.nn.leaky_relu)
net = slim.flatten(net)
net = slim.fully_connected(net, 512, activation_fn=tf.nn.relu)
net = slim.fully_connected(net, 512, activation_fn=tf.nn.relu)
embedding = slim.fully_connected(net, self.embedding_size,
activation_fn=None)
return embedding
def _update_moments(self):
"""Update the moments estimates, assumes a batch size of 1."""
def update():
"""Update moment function passed later to a tf.cond."""
moments = [
(self.obs, self.obs_mean, self.obs_std),
(self.loss, self.reward_mean, self.reward_std)
]
ops = []
for value, mean, std in moments:
delta = value - mean
assign_mean = mean.assign_add(delta / self.iter)
std_ = std * self.iter + (delta ** 2) * self.iter / (self.iter + 1)
assign_std = std.assign(std_ / (self.iter + 1))
ops.extend([assign_mean, assign_std])
return ops
return tf.cond(
tf.logical_not(self.eval_ph),
update,
# false_fn must have the same number and type of outputs.
lambda: 4 * [tf.constant(0., tf.float32)])
def _build_train_op(self):
"""Returns train op to update the prediction network."""
prediction = self.prediction_embedding
target = tf.stop_gradient(self.target_embedding)
self.loss = tf.losses.mean_squared_error(
target, prediction, reduction=tf.losses.Reduction.MEAN)
with tf.control_dependencies(self._update_moments()):
self.intrinsic_reward = (self.loss - self.reward_mean) / self.reward_std
return self.optimizer.minimize(self.loss)
|
|
# Copyright (c) 2010, individual contributors (see AUTHORS file)
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from dashboard.forms import LoginForm, RegistrationForm, ForgotPasswordForm
from dashboard.models import Contributor, Event
from django.contrib import auth
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404
from hashlib import md5
from observatory.dashboard.views import projects
from observatory.settings import RECAPTCHA_PUBLIC, RECAPTCHA_PRIVATE
from observatory.lib.recaptcha.client import captcha
# display the list of users
def people(request):
people = User.objects.all()
return render_to_response("users/people.html", {
"people": people
}, context_instance = RequestContext(request))
# display's the user's profile
def profile(request, user_id):
user = get_object_or_404(User, id = user_id)
try:
contributor = Contributor.objects.get(user = user)
except:
contributor = None
try:
is_self = user.id == request.user.id
except:
is_self = False
return render_to_response('users/profile.html', {
'user_page': user,
'contributor': contributor,
'is_self': is_self
}, context_instance = RequestContext(request))
# displays both the login and registration forms. If there is an error with the
# selected form, the user is redirected to a page with only that form.
def login_or_reg(request):
next = reverse(projects.list)
if 'next' in request.GET:
next = request.GET['next']
reg_form = RegistrationForm(auto_id = "id_login_%s")
login_form = LoginForm(auto_id = "id_login_%s")
return render_to_response('users/login-register.html', {
'next': next,
'js_page_id': 'login-register',
'reg_form': reg_form,
'login_form': login_form,
'RECAPTCHA_PUBLIC': RECAPTCHA_PUBLIC,
'RECAPTCHA_PRIVATE': RECAPTCHA_PRIVATE
}, context_instance = RequestContext(request))
# displays a registration form
def register(request):
if request.method == "POST":
class RegisterError:
pass
try:
form = RegistrationForm(request.POST)
if not form.is_valid():
error_header = "That's not quite right."
raise RegisterError()
if len(User.objects.filter(email = form.cleaned_data["email"])) is not 0:
error_header = "That email is already registered."
raise RegisterError()
if form.cleaned_data['password'] != request.POST['password_confirm']:
error_header = "Your passwords didn't match."
raise RegisterError()
# validate the captcha is recaptcha is enabled
if RECAPTCHA_PUBLIC is not None:
capt = captcha.submit(request.POST["recaptcha_challenge_field"],
request.POST["recaptcha_response_field"],
RECAPTCHA_PRIVATE,
request.META["REMOTE_ADDR"])
if not capt.is_valid:
error_header = "Let's try that captcha again."
raise RegisterError()
resp = create_user(request, form)
return resp
except RegisterError:
pass
# GET
else:
error_header = None
form = RegistrationForm()
return render_to_response('users/register.html', {
'next': reverse(projects.list),
'reg_form': form,
'error_header': error_header,
'RECAPTCHA_PUBLIC': RECAPTCHA_PUBLIC,
'RECAPTCHA_PRIVATE': RECAPTCHA_PRIVATE
}, context_instance = RequestContext(request))
# creates a user, submitted from register
def create_user(request, form):
data = form.cleaned_data
# use an md5 of the email as a username
m = md5()
m.update(data["email"])
# if it's ok, register the user
user = User.objects.create_user(m.hexdigest()[0:30],
data['email'],
data['password'])
# set the user's first/last names
user.first_name = data['first_name']
user.last_name = data['last_name']
# save the user
user.save()
# search past events for the user's email
for event in Event.objects.filter(author_email__iexact = user.email,
author = None):
event.author = user
event.save()
# search past events for the user's first and last name
name = user.get_full_name()
for event in Event.objects.filter(author_name__iexact = name, author = None):
event.author = user
event.save()
# search contributors for the user's name and email
for contrib in Contributor.objects.filter(email__iexact = user.email,
user = None):
contrib.user = user
contrib.save()
for contrib in Contributor.objects.filter(name__iexact = name, user = None):
contrib.user = user
contrib.save()
# log the user in (since we can't send emails for validation AFAIK)
user = auth.authenticate(username = user.username,
password = data['password'])
auth.login(request, user)
return HttpResponseRedirect(request.POST['next'])
class LoginError:
def __init__(self, username_correct):
self.username_corrent = username_correct
# allows a user to login
def login(request):
next = reverse(projects.list)
error_header = None
if request.method == 'POST':
if 'next' in request.POST:
next = request.POST['next']
login_form = LoginForm(request.POST, auto_id = "id_login_%s")
if login_form.is_valid():
try:
data = login_form.cleaned_data
# query for a user via email
try:
user = User.objects.get(email = data['email'])
except:
error_header = "{0} isn't registered.".format(data['email'])
raise LoginError(False)
# authenticate that user
user = auth.authenticate(username = user.username,
password = data['password'])
# if the password is incorrect, redireect to the login page
if user is None:
error_header = "Invalid password."
raise LoginError(True)
# otherwise, log the user in
if user.is_active:
auth.login(request, user)
return HttpResponseRedirect(next)
except LoginError as e:
pass
except:
raise
else:
login_form = LoginForm(auto_id = "id_login_%s")
return render_to_response('users/login.html', {
'next': next,
'error_header': error_header,
'login_form': login_form
}, context_instance = RequestContext(request))
# logs out a user
def logout(request):
auth.logout(request)
return HttpResponseRedirect(reverse(projects.list))
# forgot password
def forgot_password(request):
forgot_password_form = ForgotPasswordForm(request.POST, auto_id="id_%s")
if request.method == 'POST':
if forgot_password_form.is_valid():
try:
data = login_form.cleaned_data
# query for a user via email
user = User.objects.get(email = data['email'])
return render_to_response('users/forgot_password_success.html', {
})
except:
raise Exception('An error occurred')
else:
forgot_password_form = ForgotPasswordForm(auto_id="id_%s")
return render_to_response('users/forgot_password.html', {
'forgot_password_form': forgot_password_form
}, context_instance = RequestContext(request))
|
|
#!/usr/bin/env python2
"""
Rules for building C/API module with f2py2e.
Here is a skeleton of a new wrapper function (13Dec2001):
wrapper_function(args)
declarations
get_python_arguments, say, `a' and `b'
get_a_from_python
if (successful) {
get_b_from_python
if (successful) {
callfortran
if (succesful) {
put_a_to_python
if (succesful) {
put_b_to_python
if (succesful) {
buildvalue = ...
}
}
}
}
cleanup_b
}
cleanup_a
return buildvalue
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/08/30 08:58:42 $
Pearu Peterson
"""
from __future__ import division, absolute_import, print_function
__version__ = "$Revision: 1.129 $"[10:-1]
from . import __version__
f2py_version = __version__.version
import pprint
import sys
import time
import copy
from .auxfuncs import *
from . import capi_maps
from .capi_maps import *
from . import cfuncs
from . import common_rules
from . import use_rules
from . import f90mod_rules
from . import func2subr
errmess = sys.stderr.write
outmess = sys.stdout.write
show = pprint.pprint
options={}
sepdict={}
#for k in ['need_cfuncs']: sepdict[k]=','
for k in ['decl',
'frompyobj',
'cleanupfrompyobj',
'topyarr', 'method',
'pyobjfrom', 'closepyobjfrom',
'freemem',
'userincludes',
'includes0', 'includes', 'typedefs', 'typedefs_generated',
'cppmacros', 'cfuncs', 'callbacks',
'latexdoc',
'restdoc',
'routine_defs', 'externroutines',
'initf2pywraphooks',
'commonhooks', 'initcommonhooks',
'f90modhooks', 'initf90modhooks']:
sepdict[k]='\n'
#################### Rules for C/API module #################
module_rules={
'modulebody':"""\
/* File: #modulename#module.c
* This file is auto-generated with f2py (version:#f2py_version#).
* f2py is a Fortran to Python Interface Generator (FPIG), Second Edition,
* written by Pearu Peterson <[email protected]>.
* See http://cens.ioc.ee/projects/f2py2e/
* Generation date: """+time.asctime(time.localtime(time.time()))+"""
* $R"""+"""evision:$
* $D"""+"""ate:$
* Do not edit this file directly unless you know what you are doing!!!
*/
#ifdef __cplusplus
extern \"C\" {
#endif
"""+gentitle("See f2py2e/cfuncs.py: includes")+"""
#includes#
#includes0#
"""+gentitle("See f2py2e/rules.py: mod_rules['modulebody']")+"""
static PyObject *#modulename#_error;
static PyObject *#modulename#_module;
"""+gentitle("See f2py2e/cfuncs.py: typedefs")+"""
#typedefs#
"""+gentitle("See f2py2e/cfuncs.py: typedefs_generated")+"""
#typedefs_generated#
"""+gentitle("See f2py2e/cfuncs.py: cppmacros")+"""
#cppmacros#
"""+gentitle("See f2py2e/cfuncs.py: cfuncs")+"""
#cfuncs#
"""+gentitle("See f2py2e/cfuncs.py: userincludes")+"""
#userincludes#
"""+gentitle("See f2py2e/capi_rules.py: usercode")+"""
#usercode#
/* See f2py2e/rules.py */
#externroutines#
"""+gentitle("See f2py2e/capi_rules.py: usercode1")+"""
#usercode1#
"""+gentitle("See f2py2e/cb_rules.py: buildcallback")+"""
#callbacks#
"""+gentitle("See f2py2e/rules.py: buildapi")+"""
#body#
"""+gentitle("See f2py2e/f90mod_rules.py: buildhooks")+"""
#f90modhooks#
"""+gentitle("See f2py2e/rules.py: module_rules['modulebody']")+"""
"""+gentitle("See f2py2e/common_rules.py: buildhooks")+"""
#commonhooks#
"""+gentitle("See f2py2e/rules.py")+"""
static FortranDataDef f2py_routine_defs[] = {
#routine_defs#
\t{NULL}
};
static PyMethodDef f2py_module_methods[] = {
#pymethoddef#
\t{NULL,NULL}
};
#if PY_VERSION_HEX >= 0x03000000
static struct PyModuleDef moduledef = {
\tPyModuleDef_HEAD_INIT,
\t"#modulename#",
\tNULL,
\t-1,
\tf2py_module_methods,
\tNULL,
\tNULL,
\tNULL,
\tNULL
};
#endif
#if PY_VERSION_HEX >= 0x03000000
#define RETVAL m
PyMODINIT_FUNC PyInit_#modulename#(void) {
#else
#define RETVAL
PyMODINIT_FUNC init#modulename#(void) {
#endif
\tint i;
\tPyObject *m,*d, *s;
#if PY_VERSION_HEX >= 0x03000000
\tm = #modulename#_module = PyModule_Create(&moduledef);
#else
\tm = #modulename#_module = Py_InitModule(\"#modulename#\", f2py_module_methods);
#endif
\tPy_TYPE(&PyFortran_Type) = &PyType_Type;
\timport_array();
\tif (PyErr_Occurred())
\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return RETVAL;}
\td = PyModule_GetDict(m);
\ts = PyString_FromString(\"$R"""+"""evision: $\");
\tPyDict_SetItemString(d, \"__version__\", s);
#if PY_VERSION_HEX >= 0x03000000
\ts = PyUnicode_FromString(
#else
\ts = PyString_FromString(
#endif
\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\");
\tPyDict_SetItemString(d, \"__doc__\", s);
\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
\tPy_DECREF(s);
\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++)
\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name,PyFortranObject_NewAsAttr(&f2py_routine_defs[i]));
#initf2pywraphooks#
#initf90modhooks#
#initcommonhooks#
#interface_usercode#
#ifdef F2PY_REPORT_ATEXIT
\tif (! PyErr_Occurred())
\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\");
#endif
\treturn RETVAL;
}
#ifdef __cplusplus
}
#endif
""",
'separatorsfor':{'latexdoc':'\n\n',
'restdoc':'\n\n'},
'latexdoc':['\\section{Module \\texttt{#texmodulename#}}\n',
'#modnote#\n',
'#latexdoc#'],
'restdoc':['Module #modulename#\n'+'='*80,
'\n#restdoc#']
}
defmod_rules=[
{'body': '/*eof body*/',
'method': '/*eof method*/',
'externroutines': '/*eof externroutines*/',
'routine_defs': '/*eof routine_defs*/',
'initf90modhooks': '/*eof initf90modhooks*/',
'initf2pywraphooks': '/*eof initf2pywraphooks*/',
'initcommonhooks': '/*eof initcommonhooks*/',
'latexdoc': '',
'restdoc': '',
'modnote': {hasnote:'#note#',l_not(hasnote):''},
}
]
routine_rules={
'separatorsfor':sepdict,
'body':"""
#begintitle#
static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\";
/* #declfortranroutine# */
static PyObject *#apiname#(const PyObject *capi_self,
PyObject *capi_args,
PyObject *capi_keywds,
#functype# (*f2py_func)(#callprotoargument#)) {
\tPyObject * volatile capi_buildvalue = NULL;
\tvolatile int f2py_success = 1;
#decl#
\tstatic char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL};
#usercode#
#routdebugenter#
#ifdef F2PY_REPORT_ATEXIT
f2py_start_clock();
#endif
\tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\
\t\t\"#argformat##keyformat##xaformat#:#pyname#\",\\
\t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL;
#frompyobj#
/*end of frompyobj*/
#ifdef F2PY_REPORT_ATEXIT
f2py_start_call_clock();
#endif
#callfortranroutine#
if (PyErr_Occurred())
f2py_success = 0;
#ifdef F2PY_REPORT_ATEXIT
f2py_stop_call_clock();
#endif
/*end of callfortranroutine*/
\t\tif (f2py_success) {
#pyobjfrom#
/*end of pyobjfrom*/
\t\tCFUNCSMESS(\"Building return value.\\n\");
\t\tcapi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#);
/*closepyobjfrom*/
#closepyobjfrom#
\t\t} /*if (f2py_success) after callfortranroutine*/
/*cleanupfrompyobj*/
#cleanupfrompyobj#
\tif (capi_buildvalue == NULL) {
#routdebugfailure#
\t} else {
#routdebugleave#
\t}
\tCFUNCSMESS(\"Freeing memory.\\n\");
#freemem#
#ifdef F2PY_REPORT_ATEXIT
f2py_stop_clock();
#endif
\treturn capi_buildvalue;
}
#endtitle#
""",
'routine_defs':'#routine_def#',
'initf2pywraphooks':'#initf2pywraphook#',
'externroutines':'#declfortranroutine#',
'doc':'#docreturn##name#(#docsignature#)',
'docshort':'#docreturn##name#(#docsignatureshort#)',
'docs':'"\t#docreturn##name#(#docsignature#)\\n"\n',
'need':['arrayobject.h', 'CFUNCSMESS', 'MINMAX'],
'cppmacros':{debugcapi:'#define DEBUGCFUNCS'},
'latexdoc':['\\subsection{Wrapper function \\texttt{#texname#}}\n',
"""
\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)}
#routnote#
#latexdocstrsigns#
"""],
'restdoc':['Wrapped function ``#name#``\n'+'-'*80,
]
}
################## Rules for C/API function ##############
rout_rules=[
{ # Init
'separatorsfor': {'callfortranroutine': '\n', 'routdebugenter': '\n', 'decl': '\n',
'routdebugleave': '\n', 'routdebugfailure': '\n',
'setjmpbuf': ' || ',
'docstrreq': '\n', 'docstropt': '\n', 'docstrout': '\n',
'docstrcbs': '\n', 'docstrsigns': '\\n"\n"',
'latexdocstrsigns': '\n',
'latexdocstrreq': '\n', 'latexdocstropt': '\n',
'latexdocstrout': '\n', 'latexdocstrcbs': '\n',
},
'kwlist': '', 'kwlistopt': '', 'callfortran': '', 'callfortranappend': '',
'docsign': '', 'docsignopt': '', 'decl': '/*decl*/',
'freemem': '/*freemem*/',
'docsignshort': '', 'docsignoptshort': '',
'docstrsigns': '', 'latexdocstrsigns': '',
'docstrreq': '\\nParameters\\n----------',
'docstropt': '\\nOther Parameters\\n----------------',
'docstrout': '\\nReturns\\n-------',
'docstrcbs': '\\nNotes\\n-----\\nCall-back functions::\\n',
'latexdocstrreq': '\\noindent Required arguments:',
'latexdocstropt': '\\noindent Optional arguments:',
'latexdocstrout': '\\noindent Return objects:',
'latexdocstrcbs': '\\noindent Call-back functions:',
'args_capi': '', 'keys_capi': '', 'functype': '',
'frompyobj': '/*frompyobj*/',
'cleanupfrompyobj': ['/*end of cleanupfrompyobj*/'], #this list will be reversed
'pyobjfrom': '/*pyobjfrom*/',
'closepyobjfrom': ['/*end of closepyobjfrom*/'], #this list will be reversed
'topyarr': '/*topyarr*/', 'routdebugleave': '/*routdebugleave*/',
'routdebugenter': '/*routdebugenter*/',
'routdebugfailure': '/*routdebugfailure*/',
'callfortranroutine': '/*callfortranroutine*/',
'argformat': '', 'keyformat': '', 'need_cfuncs': '',
'docreturn': '', 'return': '', 'returnformat': '', 'rformat': '',
'kwlistxa': '', 'keys_xa': '', 'xaformat': '', 'docsignxa': '', 'docsignxashort': '',
'initf2pywraphook': '',
'routnote': {hasnote:'--- #note#',l_not(hasnote):''},
}, {
'apiname':'f2py_rout_#modulename#_#name#',
'pyname':'#modulename#.#name#',
'decl':'',
'_check':l_not(ismoduleroutine)
}, {
'apiname':'f2py_rout_#modulename#_#f90modulename#_#name#',
'pyname':'#modulename#.#f90modulename#.#name#',
'decl':'',
'_check':ismoduleroutine
}, { # Subroutine
'functype': 'void',
'declfortranroutine': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)):'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)):'extern void #fortranname#(#callprotoargument#);',
ismoduleroutine:'',
isdummyroutine:''
},
'routine_def': {l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isdummyroutine): '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)):'F_FUNC'},
'callfortranroutine': [
{debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]},
{hasexternals:"""\
\t\tif (#setjmpbuf#) {
\t\t\tf2py_success = 0;
\t\t} else {"""},
{isthreadsafe:'\t\t\tPy_BEGIN_ALLOW_THREADS'},
{hascallstatement:'''\t\t\t\t#callstatement#;
\t\t\t\t/*(*f2py_func)(#callfortran#);*/'''},
{l_not(l_or(hascallstatement, isdummyroutine)):'\t\t\t\t(*f2py_func)(#callfortran#);'},
{isthreadsafe:'\t\t\tPy_END_ALLOW_THREADS'},
{hasexternals:"""\t\t}"""}
],
'_check': l_and(issubroutine, l_not(issubroutine_wrap)),
}, { # Wrapped function
'functype': 'void',
'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
isdummyroutine: '',
},
'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)):'''
{
extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL));
#if PY_VERSION_HEX >= 0x03000000
PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#"));
#else
PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#"));
#endif
}
'''},
'need': {l_not(l_or(ismoduleroutine, isdummyroutine)):['F_WRAPPEDFUNC', 'F_FUNC']},
'callfortranroutine': [
{debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals:"""\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement, isdummyroutine)):'\t(*f2py_func)(#callfortran#);'},
{hascallstatement:'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
{isthreadsafe:'\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t}'}
],
'_check': isfunction_wrap,
}, { # Wrapped subroutine
'functype': 'void',
'declfortranroutine': {l_not(l_or(ismoduleroutine, isdummyroutine)): 'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
isdummyroutine: '',
},
'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)):'''
{
extern void #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL));
#if PY_VERSION_HEX >= 0x03000000
PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#"));
#else
PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#"));
#endif
}
'''},
'need': {l_not(l_or(ismoduleroutine, isdummyroutine)):['F_WRAPPEDFUNC', 'F_FUNC']},
'callfortranroutine': [
{debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals:"""\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement, isdummyroutine)):'\t(*f2py_func)(#callfortran#);'},
{hascallstatement:'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
{isthreadsafe:'\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t}'}
],
'_check': issubroutine_wrap,
}, { # Function
'functype':'#ctype#',
'docreturn':{l_not(isintent_hide):'#rname#,'},
'docstrout':'#pydocsignout#',
'latexdocstrout':['\\item[]{{}\\verb@#pydocsignout#@{}}',
{hasresultnote:'--- #resultnote#'}],
'callfortranroutine':[{l_and(debugcapi, isstringfunction):"""\
#ifdef USESCOMPAQFORTRAN
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\");
#else
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
#endif
"""},
{l_and(debugcapi, l_not(isstringfunction)):"""\
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
"""}
],
'_check':l_and(isfunction, l_not(isfunction_wrap))
}, { # Scalar function
'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)):'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)):'extern #ctype# #fortranname#(#callprotoargument#);',
isdummyroutine:''
},
'routine_def':{l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'decl':[{iscomplexfunction_warn:'\t#ctype# #name#_return_value={0,0};',
l_not(iscomplexfunction):'\t#ctype# #name#_return_value=0;'},
{iscomplexfunction:'\tPyObject *#name#_return_value_capi = Py_None;'}
],
'callfortranroutine':[
{hasexternals:"""\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'},
{hascallstatement:'''\t#callstatement#;
/*\t#name#_return_value = (*f2py_func)(#callfortran#);*/
'''},
{l_not(l_or(hascallstatement, isdummyroutine)):'\t#name#_return_value = (*f2py_func)(#callfortran#);'},
{isthreadsafe:'\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t}'},
{l_and(debugcapi, iscomplexfunction):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'},
{l_and(debugcapi, l_not(iscomplexfunction)):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}],
'pyobjfrom':{iscomplexfunction:'\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'},
'need':[{l_not(isdummyroutine):'F_FUNC'},
{iscomplexfunction:'pyobj_from_#ctype#1'},
{islong_longfunction:'long_long'},
{islong_doublefunction:'long_double'}],
'returnformat':{l_not(isintent_hide):'#rformat#'},
'return':{iscomplexfunction:',#name#_return_value_capi',
l_not(l_or(iscomplexfunction, isintent_hide)):',#name#_return_value'},
'_check':l_and(isfunction, l_not(isstringfunction), l_not(isfunction_wrap))
}, { # String function # in use for --no-wrap
'declfortranroutine':'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
'routine_def':{l_not(l_or(ismoduleroutine, isintent_c)):
# '\t{\"#name#\",-1,{{-1}},0,(char *)F_FUNC(#fortranname#,#FORTRANNAME#),(void *)#apiname#,doc_#apiname#},',
'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isintent_c):
# '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(void *)#apiname#,doc_#apiname#},'
'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},'
},
'decl':['\t#ctype# #name#_return_value = NULL;',
'\tint #name#_return_value_len = 0;'],
'callfortran':'#name#_return_value,#name#_return_value_len,',
'callfortranroutine':['\t#name#_return_value_len = #rlength#;',
'\tif ((#name#_return_value = (string)malloc(sizeof(char)*(#name#_return_value_len+1))) == NULL) {',
'\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");',
'\t\tf2py_success = 0;',
'\t} else {',
"\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';",
'\t}',
'\tif (f2py_success) {',
{hasexternals:"""\
\t\tif (#setjmpbuf#) {
\t\t\tf2py_success = 0;
\t\t} else {"""},
{isthreadsafe:'\t\tPy_BEGIN_ALLOW_THREADS'},
"""\
#ifdef USESCOMPAQFORTRAN
\t\t(*f2py_func)(#callcompaqfortran#);
#else
\t\t(*f2py_func)(#callfortran#);
#endif
""",
{isthreadsafe:'\t\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t\t}'},
{debugcapi:'\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'},
'\t} /* if (f2py_success) after (string)malloc */',
],
'returnformat':'#rformat#',
'return':',#name#_return_value',
'freemem':'\tSTRINGFREE(#name#_return_value);',
'need':['F_FUNC', '#ctype#', 'STRINGFREE'],
'_check':l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete
},
{ # Debugging
'routdebugenter':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");',
'routdebugleave':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");',
'routdebugfailure':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");',
'_check':debugcapi
}
]
################ Rules for arguments ##################
typedef_need_dict = {islong_long: 'long_long',
islong_double: 'long_double',
islong_complex: 'complex_long_double',
isunsigned_char: 'unsigned_char',
isunsigned_short: 'unsigned_short',
isunsigned: 'unsigned',
isunsigned_long_long: 'unsigned_long_long',
isunsigned_chararray: 'unsigned_char',
isunsigned_shortarray: 'unsigned_short',
isunsigned_long_longarray: 'unsigned_long_long',
issigned_long_longarray: 'long_long',
}
aux_rules=[
{
'separatorsfor':sepdict
},
{ # Common
'frompyobj': ['\t/* Processing auxiliary variable #varname# */',
{debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},],
'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */',
'need': typedef_need_dict,
},
# Scalars (not complex)
{ # Common
'decl': '\t#ctype# #varname# = 0;',
'need': {hasinitvalue:'math.h'},
'frompyobj': {hasinitvalue:'\t#varname# = #init#;'},
'_check': l_and(isscalar, l_not(iscomplex)),
},
{
'return': ',#varname#',
'docstrout': '#pydocsignout#',
'docreturn': '#outvarname#,',
'returnformat': '#varrformat#',
'_check': l_and(isscalar, l_not(iscomplex), isintent_out),
},
# Complex scalars
{ # Common
'decl':'\t#ctype# #varname#;',
'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check':iscomplex
},
# String
{ # Common
'decl':['\t#ctype# #varname# = NULL;',
'\tint slen(#varname#);',
],
'need':['len..'],
'_check':isstring
},
# Array
{ # Common
'decl':['\t#ctype# *#varname# = NULL;',
'\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
'\tconst int #varname#_Rank = #rank#;',
],
'need':['len..', {hasinitvalue:'forcomb'}, {hasinitvalue:'CFUNCSMESS'}],
'_check':isarray
},
# Scalararray
{ # Common
'_check':l_and(isarray, l_not(iscomplexarray))
}, { # Not hidden
'_check':l_and(isarray, l_not(iscomplexarray), isintent_nothide)
},
# Integer*1 array
{'need':'#ctype#',
'_check':isint1array,
'_depend':''
},
# Integer*-1 array
{'need':'#ctype#',
'_check':isunsigned_chararray,
'_depend':''
},
# Integer*-2 array
{'need':'#ctype#',
'_check':isunsigned_shortarray,
'_depend':''
},
# Integer*-8 array
{'need':'#ctype#',
'_check':isunsigned_long_longarray,
'_depend':''
},
# Complexarray
{'need':'#ctype#',
'_check':iscomplexarray,
'_depend':''
},
# Stringarray
{
'callfortranappend':{isarrayofstrings:'flen(#varname#),'},
'need':'string',
'_check':isstringarray
}
]
arg_rules=[
{
'separatorsfor':sepdict
},
{ # Common
'frompyobj': ['\t/* Processing variable #varname# */',
{debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},],
'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */',
'_depend': '',
'need': typedef_need_dict,
},
# Doc signatures
{
'docstropt':{l_and(isoptional, isintent_nothide):'#pydocsign#'},
'docstrreq':{l_and(isrequired, isintent_nothide):'#pydocsign#'},
'docstrout':{isintent_out:'#pydocsignout#'},
'latexdocstropt':{l_and(isoptional, isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote:'--- #note#'}]},
'latexdocstrreq':{l_and(isrequired, isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote:'--- #note#'}]},
'latexdocstrout':{isintent_out:['\\item[]{{}\\verb@#pydocsignout#@{}}',
{l_and(hasnote, isintent_hide):'--- #note#',
l_and(hasnote, isintent_nothide):'--- See above.'}]},
'depend':''
},
# Required/Optional arguments
{
'kwlist':'"#varname#",',
'docsign':'#varname#,',
'_check':l_and(isintent_nothide, l_not(isoptional))
},
{
'kwlistopt':'"#varname#",',
'docsignopt':'#varname#=#showinit#,',
'docsignoptshort':'#varname#,',
'_check':l_and(isintent_nothide, isoptional)
},
# Docstring/BuildValue
{
'docreturn':'#outvarname#,',
'returnformat':'#varrformat#',
'_check':isintent_out
},
# Externals (call-back functions)
{ # Common
'docsignxa':{isintent_nothide:'#varname#_extra_args=(),'},
'docsignxashort':{isintent_nothide:'#varname#_extra_args,'},
'docstropt':{isintent_nothide:'#varname#_extra_args : input tuple, optional\\n Default: ()'},
'docstrcbs':'#cbdocstr#',
'latexdocstrcbs':'\\item[] #cblatexdocstr#',
'latexdocstropt':{isintent_nothide:'\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'},
'decl':['\tPyObject *#varname#_capi = Py_None;',
'\tPyTupleObject *#varname#_xa_capi = NULL;',
'\tPyTupleObject *#varname#_args_capi = NULL;',
'\tint #varname#_nofargs_capi = 0;',
{l_not(isintent_callback):'\t#cbname#_typedef #varname#_cptr;'}
],
'kwlistxa':{isintent_nothide:'"#varname#_extra_args",'},
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'xaformat':{isintent_nothide:'O!'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'keys_xa':',&PyTuple_Type,&#varname#_xa_capi',
'setjmpbuf':'(setjmp(#cbname#_jmpbuf))',
'callfortran':{l_not(isintent_callback):'#varname#_cptr,'},
'need':['#cbname#', 'setjmp.h'],
'_check':isexternal
},
{
'frompyobj':[{l_not(isintent_callback):"""\
if(F2PyCapsule_Check(#varname#_capi)) {
#varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_capi);
} else {
#varname#_cptr = #cbname#;
}
"""}, {isintent_callback:"""\
if (#varname#_capi==Py_None) {
#varname#_capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\");
if (#varname#_capi) {
if (#varname#_xa_capi==NULL) {
if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) {
PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\");
if (capi_tmp)
#varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp);
else
#varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\");
if (#varname#_xa_capi==NULL) {
PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\");
return NULL;
}
}
}
}
if (#varname#_capi==NULL) {
PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\");
return NULL;
}
}
"""},
## {l_not(isintent_callback):"""\
## if (#varname#_capi==Py_None) {
## printf(\"hoi\\n\");
## }
## """},
"""\
\t#varname#_nofargs_capi = #cbname#_nofargs;
\tif (create_cb_arglist(#varname#_capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#cbname#_nofargs,&#varname#_args_capi,\"failed in processing argument list for call-back #varname#.\")) {
\t\tjmp_buf #varname#_jmpbuf;""",
{debugcapi:["""\
\t\tfprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#cbname#_nofargs);
\t\tCFUNCSMESSPY(\"for #varname#=\",#cbname#_capi);""",
{l_not(isintent_callback):"""\t\tfprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]},
"""\
\t\tCFUNCSMESS(\"Saving jmpbuf for `#varname#`.\\n\");
\t\tSWAP(#varname#_capi,#cbname#_capi,PyObject);
\t\tSWAP(#varname#_args_capi,#cbname#_args_capi,PyTupleObject);
\t\tmemcpy(&#varname#_jmpbuf,&#cbname#_jmpbuf,sizeof(jmp_buf));""",
],
'cleanupfrompyobj':
"""\
\t\tCFUNCSMESS(\"Restoring jmpbuf for `#varname#`.\\n\");
\t\t#cbname#_capi = #varname#_capi;
\t\tPy_DECREF(#cbname#_args_capi);
\t\t#cbname#_args_capi = #varname#_args_capi;
\t\t#cbname#_nofargs = #varname#_nofargs_capi;
\t\tmemcpy(&#cbname#_jmpbuf,&#varname#_jmpbuf,sizeof(jmp_buf));
\t}""",
'need':['SWAP', 'create_cb_arglist'],
'_check':isexternal,
'_depend':''
},
# Scalars (not complex)
{ # Common
'decl':'\t#ctype# #varname# = 0;',
'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'},
'return':{isintent_out:',#varname#'},
'_check':l_and(isscalar, l_not(iscomplex))
}, {
'need': {hasinitvalue:'math.h'},
'_check': l_and(isscalar, l_not(iscomplex)),
#'_depend':''
}, { # Not hidden
'decl':'\tPyObject *#varname#_capi = Py_None;',
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'pyobjfrom':{isintent_inout:"""\
\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
\tif (f2py_success) {"""},
'closepyobjfrom':{isintent_inout:"\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
'need':{isintent_inout:'try_pyarr_from_#ctype#'},
'_check':l_and(isscalar, l_not(iscomplex), isintent_nothide)
}, {
'frompyobj':[
# hasinitvalue...
# if pyobj is None:
# varname = init
# else
# from_pyobj(varname)
#
# isoptional and noinitvalue...
# if pyobj is not None:
# from_pyobj(varname)
# else:
# varname is uninitialized
#
# ...
# from_pyobj(varname)
#
{hasinitvalue:'\tif (#varname#_capi == Py_None) #varname# = #init#; else',
'_depend':''},
{l_and(isoptional, l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)',
'_depend':''},
{l_not(islogical):'''\
\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");
\tif (f2py_success) {'''},
{islogical:'''\
\t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi);
\t\tf2py_success = 1;
\tif (f2py_success) {'''},
],
'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname#*/',
'need':{l_not(islogical):'#ctype#_from_pyobj'},
'_check':l_and(isscalar, l_not(iscomplex), isintent_nothide),
'_depend':''
# },{ # Hidden
# '_check':l_and(isscalar,l_not(iscomplex),isintent_hide)
}, { # Hidden
'frompyobj':{hasinitvalue:'\t#varname# = #init#;'},
'need':typedef_need_dict,
'_check':l_and(isscalar, l_not(iscomplex), isintent_hide),
'_depend':''
}, { # Common
'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'_check':l_and(isscalar, l_not(iscomplex)),
'_depend':''
},
# Complex scalars
{ # Common
'decl':'\t#ctype# #varname#;',
'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'},
'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'return':{isintent_out:',#varname#_capi'},
'_check':iscomplex
}, { # Not hidden
'decl':'\tPyObject *#varname#_capi = Py_None;',
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'need':{isintent_inout:'try_pyarr_from_#ctype#'},
'pyobjfrom':{isintent_inout:"""\
\t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
\t\tif (f2py_success) {"""},
'closepyobjfrom':{isintent_inout:"\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
'_check':l_and(iscomplex, isintent_nothide)
}, {
'frompyobj':[{hasinitvalue:'\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'},
{l_and(isoptional, l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)'},
# '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\\n");'
'\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");'
'\n\tif (f2py_success) {'],
'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname# frompyobj*/',
'need':['#ctype#_from_pyobj'],
'_check':l_and(iscomplex, isintent_nothide),
'_depend':''
}, { # Hidden
'decl':{isintent_out:'\tPyObject *#varname#_capi = Py_None;'},
'_check':l_and(iscomplex, isintent_hide)
}, {
'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check':l_and(iscomplex, isintent_hide),
'_depend':''
}, { # Common
'pyobjfrom':{isintent_out:'\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'},
'need':['pyobj_from_#ctype#1'],
'_check':iscomplex
}, {
'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'_check':iscomplex,
'_depend':''
},
# String
{ # Common
'decl':['\t#ctype# #varname# = NULL;',
'\tint slen(#varname#);',
'\tPyObject *#varname#_capi = Py_None;'],
'callfortran':'#varname#,',
'callfortranappend':'slen(#varname#),',
'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
# 'freemem':'\tSTRINGFREE(#varname#);',
'return':{isintent_out:',#varname#'},
'need':['len..'],#'STRINGFREE'],
'_check':isstring
}, { # Common
'frompyobj':"""\
\tslen(#varname#) = #length#;
\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\");
\tif (f2py_success) {""",
'cleanupfrompyobj':"""\
\t\tSTRINGFREE(#varname#);
\t} /*if (f2py_success) of #varname#*/""",
'need':['#ctype#_from_pyobj', 'len..', 'STRINGFREE'],
'_check':isstring,
'_depend':''
}, { # Not hidden
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'pyobjfrom':{isintent_inout:'''\
\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,#varname#);
\tif (f2py_success) {'''},
'closepyobjfrom':{isintent_inout:'\t} /*if (f2py_success) of #varname# pyobjfrom*/'},
'need':{isintent_inout:'try_pyarr_from_#ctype#'},
'_check':l_and(isstring, isintent_nothide)
}, { # Hidden
'_check':l_and(isstring, isintent_hide)
}, {
'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
'_check':isstring,
'_depend':''
},
# Array
{ # Common
'decl':['\t#ctype# *#varname# = NULL;',
'\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
'\tconst int #varname#_Rank = #rank#;',
'\tPyArrayObject *capi_#varname#_tmp = NULL;',
'\tint capi_#varname#_intent = 0;',
],
'callfortran':'#varname#,',
'return':{isintent_out:',capi_#varname#_tmp'},
'need':'len..',
'_check':isarray
}, { # intent(overwrite) array
'decl': '\tint capi_overwrite_#varname# = 1;',
'kwlistxa': '"overwrite_#varname#",',
'xaformat': 'i',
'keys_xa': ',&capi_overwrite_#varname#',
'docsignxa': 'overwrite_#varname#=1,',
'docsignxashort': 'overwrite_#varname#,',
'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1',
'_check': l_and(isarray, isintent_overwrite),
}, {
'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check': l_and(isarray, isintent_overwrite),
'_depend': '',
},
{ # intent(copy) array
'decl': '\tint capi_overwrite_#varname# = 0;',
'kwlistxa': '"overwrite_#varname#",',
'xaformat': 'i',
'keys_xa': ',&capi_overwrite_#varname#',
'docsignxa': 'overwrite_#varname#=0,',
'docsignxashort': 'overwrite_#varname#,',
'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0',
'_check': l_and(isarray, isintent_copy),
}, {
'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check': l_and(isarray, isintent_copy),
'_depend': '',
}, {
'need':[{hasinitvalue:'forcomb'}, {hasinitvalue:'CFUNCSMESS'}],
'_check':isarray,
'_depend':''
}, { # Not hidden
'decl':'\tPyObject *#varname#_capi = Py_None;',
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
# 'pyobjfrom':{isintent_inout:"""\
# /* Partly because of the following hack, intent(inout) is depreciated,
# Use intent(in,out) instead.
# \tif ((#varname#_capi != Py_None) && PyArray_Check(#varname#_capi) \\
# \t\t&& (#varname#_capi != (PyObject *)capi_#varname#_tmp)) {
# \t\tif (((PyArrayObject *)#varname#_capi)->nd != capi_#varname#_tmp->nd) {
# \t\t\tif (#varname#_capi != capi_#varname#_tmp->base)
# \t\t\t\tcopy_ND_array((PyArrayObject *)capi_#varname#_tmp->base,(PyArrayObject *)#varname#_capi);
# \t\t} else
# \t\t\tcopy_ND_array(capi_#varname#_tmp,(PyArrayObject *)#varname#_capi);
# \t}
# */
# """},
# 'need':{isintent_inout:'copy_ND_array'},
'_check':l_and(isarray, isintent_nothide)
}, {
'frompyobj':['\t#setdims#;',
'\tcapi_#varname#_intent |= #intent#;',
{isintent_hide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'},
{isintent_nothide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'},
"""\
\tif (capi_#varname#_tmp == NULL) {
\t\tif (!PyErr_Occurred())
\t\t\tPyErr_SetString(#modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" );
\t} else {
\t\t#varname# = (#ctype# *)(capi_#varname#_tmp->data);
""",
{hasinitvalue:[
{isintent_nothide:'\tif (#varname#_capi == Py_None) {'},
{isintent_hide:'\t{'},
{iscomplexarray:'\t\t#ctype# capi_c;'},
"""\
\t\tint *_i,capi_i=0;
\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\");
\t\tif (initforcomb(capi_#varname#_tmp->dimensions,capi_#varname#_tmp->nd,1)) {
\t\t\twhile ((_i = nextforcomb()))
\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */
\t\t} else {
\t\t\tif (!PyErr_Occurred())
\t\t\t\tPyErr_SetString(#modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\");
\t\t\tf2py_success = 0;
\t\t}
\t}
\tif (f2py_success) {"""]},
],
'cleanupfrompyobj':[ # note that this list will be reversed
'\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/',
{l_not(l_or(isintent_out, isintent_hide)):"""\
\tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) {
\t\tPy_XDECREF(capi_#varname#_tmp); }"""},
{l_and(isintent_hide, l_not(isintent_out)):"""\t\tPy_XDECREF(capi_#varname#_tmp);"""},
{hasinitvalue:'\t} /*if (f2py_success) of #varname# init*/'},
],
'_check':isarray,
'_depend':''
},
# { # Hidden
# 'freemem':{l_not(isintent_out):'\tPy_XDECREF(capi_#varname#_tmp);'},
# '_check':l_and(isarray,isintent_hide)
# },
# Scalararray
{ # Common
'_check':l_and(isarray, l_not(iscomplexarray))
}, { # Not hidden
'_check':l_and(isarray, l_not(iscomplexarray), isintent_nothide)
},
# Integer*1 array
{'need':'#ctype#',
'_check':isint1array,
'_depend':''
},
# Integer*-1 array
{'need':'#ctype#',
'_check':isunsigned_chararray,
'_depend':''
},
# Integer*-2 array
{'need':'#ctype#',
'_check':isunsigned_shortarray,
'_depend':''
},
# Integer*-8 array
{'need':'#ctype#',
'_check':isunsigned_long_longarray,
'_depend':''
},
# Complexarray
{'need':'#ctype#',
'_check':iscomplexarray,
'_depend':''
},
# Stringarray
{
'callfortranappend':{isarrayofstrings:'flen(#varname#),'},
'need':'string',
'_check':isstringarray
}
]
################# Rules for checking ###############
check_rules=[
{
'frompyobj':{debugcapi:'\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'},
'need':'len..'
}, {
'frompyobj':'\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
'cleanupfrompyobj':'\t} /*CHECKSCALAR(#check#)*/',
'need':'CHECKSCALAR',
'_check':l_and(isscalar, l_not(iscomplex)),
'_break':''
}, {
'frompyobj':'\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
'cleanupfrompyobj':'\t} /*CHECKSTRING(#check#)*/',
'need':'CHECKSTRING',
'_check':isstring,
'_break':''
}, {
'need':'CHECKARRAY',
'frompyobj':'\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {',
'cleanupfrompyobj':'\t} /*CHECKARRAY(#check#)*/',
'_check':isarray,
'_break':''
}, {
'need': 'CHECKGENERIC',
'frompyobj': '\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {',
'cleanupfrompyobj': '\t} /*CHECKGENERIC(#check#)*/',
}
]
########## Applying the rules. No need to modify what follows #############
#################### Build C/API module #######################
def buildmodule(m, um):
"""
Return
"""
global f2py_version, options
outmess('\tBuilding module "%s"...\n'%(m['name']))
ret = {}
mod_rules=defmod_rules[:]
vrd=modsign2map(m)
rd=dictappend({'f2py_version':f2py_version}, vrd)
funcwrappers = []
funcwrappers2 = [] # F90 codes
for n in m['interfaced']:
nb=None
for bi in m['body']:
if not bi['block']=='interface':
errmess('buildmodule: Expected interface block. Skipping.\n')
continue
for b in bi['body']:
if b['name']==n: nb=b;break
if not nb:
errmess('buildmodule: Could not found the body of interfaced routine "%s". Skipping.\n'%(n))
continue
nb_list = [nb]
if 'entry' in nb:
for k, a in nb['entry'].items():
nb1 = copy.deepcopy(nb)
del nb1['entry']
nb1['name'] = k
nb1['args'] = a
nb_list.append(nb1)
for nb in nb_list:
api, wrap=buildapi(nb)
if wrap:
if ismoduleroutine(nb):
funcwrappers2.append(wrap)
else:
funcwrappers.append(wrap)
ar=applyrules(api, vrd)
rd=dictappend(rd, ar)
# Construct COMMON block support
cr, wrap = common_rules.buildhooks(m)
if wrap:
funcwrappers.append(wrap)
ar=applyrules(cr, vrd)
rd=dictappend(rd, ar)
# Construct F90 module support
mr, wrap = f90mod_rules.buildhooks(m)
if wrap:
funcwrappers2.append(wrap)
ar=applyrules(mr, vrd)
rd=dictappend(rd, ar)
for u in um:
ar=use_rules.buildusevars(u, m['use'][u['name']])
rd=dictappend(rd, ar)
needs=cfuncs.get_needs()
code={}
for n in needs.keys():
code[n]=[]
for k in needs[n]:
c=''
if k in cfuncs.includes0:
c=cfuncs.includes0[k]
elif k in cfuncs.includes:
c=cfuncs.includes[k]
elif k in cfuncs.userincludes:
c=cfuncs.userincludes[k]
elif k in cfuncs.typedefs:
c=cfuncs.typedefs[k]
elif k in cfuncs.typedefs_generated:
c=cfuncs.typedefs_generated[k]
elif k in cfuncs.cppmacros:
c=cfuncs.cppmacros[k]
elif k in cfuncs.cfuncs:
c=cfuncs.cfuncs[k]
elif k in cfuncs.callbacks:
c=cfuncs.callbacks[k]
elif k in cfuncs.f90modhooks:
c=cfuncs.f90modhooks[k]
elif k in cfuncs.commonhooks:
c=cfuncs.commonhooks[k]
else:
errmess('buildmodule: unknown need %s.\n'%(repr(k)));continue
code[n].append(c)
mod_rules.append(code)
for r in mod_rules:
if ('_check' in r and r['_check'](m)) or ('_check' not in r):
ar=applyrules(r, vrd, m)
rd=dictappend(rd, ar)
ar=applyrules(module_rules, rd)
fn = os.path.join(options['buildpath'], vrd['coutput'])
ret['csrc'] = fn
f=open(fn, 'w')
f.write(ar['modulebody'].replace('\t', 2*' '))
f.close()
outmess('\tWrote C/API module "%s" to file "%s"\n'%(m['name'], fn))
if options['dorestdoc']:
fn = os.path.join(options['buildpath'], vrd['modulename']+'module.rest')
f=open(fn, 'w')
f.write('.. -*- rest -*-\n')
f.write('\n'.join(ar['restdoc']))
f.close()
outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n'%(options['buildpath'], vrd['modulename']))
if options['dolatexdoc']:
fn = os.path.join(options['buildpath'], vrd['modulename']+'module.tex')
ret['ltx'] = fn
f=open(fn, 'w')
f.write('%% This file is auto-generated with f2py (version:%s)\n'%(f2py_version))
if 'shortlatex' not in options:
f.write('\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n')
f.write('\n'.join(ar['latexdoc']))
if 'shortlatex' not in options:
f.write('\\end{document}')
f.close()
outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n'%(options['buildpath'], vrd['modulename']))
if funcwrappers:
wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output'])
ret['fsrc'] = wn
f=open(wn, 'w')
f.write('C -*- fortran -*-\n')
f.write('C This file is autogenerated with f2py (version:%s)\n'%(f2py_version))
f.write('C It contains Fortran 77 wrappers to fortran functions.\n')
lines = []
for l in ('\n\n'.join(funcwrappers)+'\n').split('\n'):
if l and l[0]==' ':
while len(l)>=66:
lines.append(l[:66]+'\n &')
l = l[66:]
lines.append(l+'\n')
else: lines.append(l+'\n')
lines = ''.join(lines).replace('\n &\n', '\n')
f.write(lines)
f.close()
outmess('\tFortran 77 wrappers are saved to "%s"\n'%(wn))
if funcwrappers2:
wn = os.path.join(options['buildpath'], '%s-f2pywrappers2.f90'%(vrd['modulename']))
ret['fsrc'] = wn
f=open(wn, 'w')
f.write('! -*- f90 -*-\n')
f.write('! This file is autogenerated with f2py (version:%s)\n'%(f2py_version))
f.write('! It contains Fortran 90 wrappers to fortran functions.\n')
lines = []
for l in ('\n\n'.join(funcwrappers2)+'\n').split('\n'):
if len(l)>72 and l[0]==' ':
lines.append(l[:72]+'&\n &')
l = l[72:]
while len(l)>66:
lines.append(l[:66]+'&\n &')
l = l[66:]
lines.append(l+'\n')
else: lines.append(l+'\n')
lines = ''.join(lines).replace('\n &\n', '\n')
f.write(lines)
f.close()
outmess('\tFortran 90 wrappers are saved to "%s"\n'%(wn))
return ret
################## Build C/API function #############
stnd={1:'st',2:'nd',3:'rd',4:'th',5:'th',6:'th',7:'th',8:'th',9:'th',0:'th'}
def buildapi(rout):
rout, wrap = func2subr.assubr(rout)
args, depargs=getargs2(rout)
capi_maps.depargs=depargs
var=rout['vars']
auxvars = [a for a in var.keys() if isintent_aux(var[a])]
if ismoduleroutine(rout):
outmess('\t\t\tConstructing wrapper function "%s.%s"...\n'%(rout['modulename'], rout['name']))
else:
outmess('\t\tConstructing wrapper function "%s"...\n'%(rout['name']))
# Routine
vrd=routsign2map(rout)
rd=dictappend({}, vrd)
for r in rout_rules:
if ('_check' in r and r['_check'](rout)) or ('_check' not in r):
ar=applyrules(r, vrd, rout)
rd=dictappend(rd, ar)
# Args
nth, nthk=0, 0
savevrd={}
for a in args:
vrd=sign2map(a, var[a])
if isintent_aux(var[a]):
_rules = aux_rules
else:
_rules = arg_rules
if not isintent_hide(var[a]):
if not isoptional(var[a]):
nth=nth+1
vrd['nth']=repr(nth)+stnd[nth%10]+' argument'
else:
nthk=nthk+1
vrd['nth']=repr(nthk)+stnd[nthk%10]+' keyword'
else: vrd['nth']='hidden'
savevrd[a]=vrd
for r in _rules:
if '_depend' in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar=applyrules(r, vrd, var[a])
rd=dictappend(rd, ar)
if '_break' in r:
break
for a in depargs:
if isintent_aux(var[a]):
_rules = aux_rules
else:
_rules = arg_rules
vrd=savevrd[a]
for r in _rules:
if '_depend' not in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar=applyrules(r, vrd, var[a])
rd=dictappend(rd, ar)
if '_break' in r:
break
if 'check' in var[a]:
for c in var[a]['check']:
vrd['check']=c
ar=applyrules(check_rules, vrd, var[a])
rd=dictappend(rd, ar)
if isinstance(rd['cleanupfrompyobj'], list):
rd['cleanupfrompyobj'].reverse()
if isinstance(rd['closepyobjfrom'], list):
rd['closepyobjfrom'].reverse()
rd['docsignature']=stripcomma(replace('#docsign##docsignopt##docsignxa#',
{'docsign':rd['docsign'],
'docsignopt':rd['docsignopt'],
'docsignxa':rd['docsignxa']}))
optargs=stripcomma(replace('#docsignopt##docsignxa#',
{'docsignxa':rd['docsignxashort'],
'docsignopt':rd['docsignoptshort']}
))
if optargs=='':
rd['docsignatureshort']=stripcomma(replace('#docsign#', {'docsign':rd['docsign']}))
else:
rd['docsignatureshort']=replace('#docsign#[#docsignopt#]',
{'docsign': rd['docsign'],
'docsignopt': optargs,
})
rd['latexdocsignatureshort']=rd['docsignatureshort'].replace('_', '\\_')
rd['latexdocsignatureshort']=rd['latexdocsignatureshort'].replace(',', ', ')
cfs=stripcomma(replace('#callfortran##callfortranappend#', {'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']}))
if len(rd['callfortranappend'])>1:
rd['callcompaqfortran']=stripcomma(replace('#callfortran# 0,#callfortranappend#', {'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']}))
else:
rd['callcompaqfortran']=cfs
rd['callfortran']=cfs
if isinstance(rd['docreturn'], list):
rd['docreturn']=stripcomma(replace('#docreturn#', {'docreturn':rd['docreturn']}))+' = '
rd['docstrsigns']=[]
rd['latexdocstrsigns']=[]
for k in ['docstrreq', 'docstropt', 'docstrout', 'docstrcbs']:
if k in rd and isinstance(rd[k], list):
rd['docstrsigns']=rd['docstrsigns']+rd[k]
k='latex'+k
if k in rd and isinstance(rd[k], list):
rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\
['\\begin{description}']+rd[k][1:]+\
['\\end{description}']
# Workaround for Python 2.6, 2.6.1 bug: http://bugs.python.org/issue4720
if rd['keyformat'] or rd['xaformat']:
argformat = rd['argformat']
if isinstance(argformat, list):
argformat.append('|')
else:
assert isinstance(argformat, str), repr((argformat, type(argformat)))
rd['argformat'] += '|'
ar=applyrules(routine_rules, rd)
if ismoduleroutine(rout):
outmess('\t\t\t %s\n'%(ar['docshort']))
else:
outmess('\t\t %s\n'%(ar['docshort']))
return ar, wrap
#################### EOF rules.py #######################
|
|
"""Tests for Plex config flow."""
import copy
from http import HTTPStatus
import ssl
from unittest.mock import patch
import plexapi.exceptions
import pytest
import requests.exceptions
from homeassistant.components.plex import config_flow
from homeassistant.components.plex.const import (
AUTOMATIC_SETUP_STRING,
CONF_IGNORE_NEW_SHARED_USERS,
CONF_IGNORE_PLEX_WEB_CLIENTS,
CONF_MONITORED_USERS,
CONF_SERVER,
CONF_SERVER_IDENTIFIER,
CONF_USE_EPISODE_ART,
DOMAIN,
MANUAL_SETUP_STRING,
PLEX_SERVER_CONFIG,
SERVERS,
)
from homeassistant.config_entries import (
SOURCE_INTEGRATION_DISCOVERY,
SOURCE_REAUTH,
SOURCE_USER,
ConfigEntryState,
)
from homeassistant.const import (
CONF_HOST,
CONF_PORT,
CONF_SSL,
CONF_TOKEN,
CONF_URL,
CONF_VERIFY_SSL,
Platform,
)
from .const import DEFAULT_OPTIONS, MOCK_SERVERS, MOCK_TOKEN, PLEX_DIRECT_URL
from .helpers import trigger_plex_update, wait_for_debouncer
from .mock_classes import MockGDM
from tests.common import MockConfigEntry
async def test_bad_credentials(hass, current_request_with_host):
"""Test when provided credentials are rejected."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch(
"plexapi.myplex.MyPlexAccount", side_effect=plexapi.exceptions.Unauthorized
), patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value="BAD TOKEN"
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"][CONF_TOKEN] == "faulty_credentials"
async def test_bad_hostname(hass, mock_plex_calls, current_request_with_host):
"""Test when an invalid address is provided."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch(
"plexapi.myplex.MyPlexResource.connect",
side_effect=requests.exceptions.ConnectionError,
), patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=MOCK_TOKEN
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"][CONF_HOST] == "not_found"
async def test_unknown_exception(hass, current_request_with_host):
"""Test when an unknown exception is encountered."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch("plexapi.myplex.MyPlexAccount", side_effect=Exception), patch(
"plexauth.PlexAuth.initiate_auth"
), patch("plexauth.PlexAuth.token", return_value="MOCK_TOKEN"):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "abort"
assert result["reason"] == "unknown"
async def test_no_servers_found(
hass, mock_plex_calls, requests_mock, empty_payload, current_request_with_host
):
"""Test when no servers are on an account."""
requests_mock.get("https://plex.tv/api/resources", text=empty_payload)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=MOCK_TOKEN
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"]["base"] == "no_servers"
async def test_single_available_server(
hass, mock_plex_calls, current_request_with_host
):
"""Test creating an entry with one server available."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=MOCK_TOKEN
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "create_entry"
server_id = result["data"][CONF_SERVER_IDENTIFIER]
mock_plex_server = hass.data[DOMAIN][SERVERS][server_id]
assert result["title"] == mock_plex_server.url_in_use
assert result["data"][CONF_SERVER] == mock_plex_server.friendly_name
assert (
result["data"][CONF_SERVER_IDENTIFIER]
== mock_plex_server.machine_identifier
)
assert (
result["data"][PLEX_SERVER_CONFIG][CONF_URL] == mock_plex_server.url_in_use
)
assert result["data"][PLEX_SERVER_CONFIG][CONF_TOKEN] == MOCK_TOKEN
async def test_multiple_servers_with_selection(
hass,
mock_plex_calls,
requests_mock,
plextv_resources_base,
current_request_with_host,
):
"""Test creating an entry with multiple servers available."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
requests_mock.get(
"https://plex.tv/api/resources",
text=plextv_resources_base.format(
first_server_enabled=1, second_server_enabled=1
),
)
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=MOCK_TOKEN
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["step_id"] == "select_server"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_SERVER: MOCK_SERVERS[0][CONF_SERVER]},
)
assert result["type"] == "create_entry"
server_id = result["data"][CONF_SERVER_IDENTIFIER]
mock_plex_server = hass.data[DOMAIN][SERVERS][server_id]
assert result["title"] == mock_plex_server.url_in_use
assert result["data"][CONF_SERVER] == mock_plex_server.friendly_name
assert (
result["data"][CONF_SERVER_IDENTIFIER]
== mock_plex_server.machine_identifier
)
assert (
result["data"][PLEX_SERVER_CONFIG][CONF_URL] == mock_plex_server.url_in_use
)
assert result["data"][PLEX_SERVER_CONFIG][CONF_TOKEN] == MOCK_TOKEN
async def test_only_non_present_servers(
hass,
mock_plex_calls,
requests_mock,
plextv_resources_base,
current_request_with_host,
):
"""Test creating an entry with one server available."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
requests_mock.get(
"https://plex.tv/api/resources",
text=plextv_resources_base.format(
first_server_enabled=0, second_server_enabled=0
),
)
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=MOCK_TOKEN
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["step_id"] == "select_server"
async def test_adding_last_unconfigured_server(
hass,
mock_plex_calls,
requests_mock,
plextv_resources_base,
current_request_with_host,
):
"""Test automatically adding last unconfigured server when multiple servers on account."""
MockConfigEntry(
domain=DOMAIN,
data={
CONF_SERVER_IDENTIFIER: MOCK_SERVERS[1][CONF_SERVER_IDENTIFIER],
CONF_SERVER: MOCK_SERVERS[1][CONF_SERVER],
},
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
requests_mock.get(
"https://plex.tv/api/resources",
text=plextv_resources_base.format(
first_server_enabled=1, second_server_enabled=1
),
)
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=MOCK_TOKEN
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "create_entry"
server_id = result["data"][CONF_SERVER_IDENTIFIER]
mock_plex_server = hass.data[DOMAIN][SERVERS][server_id]
assert result["title"] == mock_plex_server.url_in_use
assert result["data"][CONF_SERVER] == mock_plex_server.friendly_name
assert (
result["data"][CONF_SERVER_IDENTIFIER]
== mock_plex_server.machine_identifier
)
assert (
result["data"][PLEX_SERVER_CONFIG][CONF_URL] == mock_plex_server.url_in_use
)
assert result["data"][PLEX_SERVER_CONFIG][CONF_TOKEN] == MOCK_TOKEN
async def test_all_available_servers_configured(
hass,
entry,
requests_mock,
plextv_account,
plextv_resources_base,
current_request_with_host,
):
"""Test when all available servers are already configured."""
entry.add_to_hass(hass)
MockConfigEntry(
domain=DOMAIN,
data={
CONF_SERVER_IDENTIFIER: MOCK_SERVERS[1][CONF_SERVER_IDENTIFIER],
CONF_SERVER: MOCK_SERVERS[1][CONF_SERVER],
},
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
requests_mock.get("https://plex.tv/users/account", text=plextv_account)
requests_mock.get(
"https://plex.tv/api/resources",
text=plextv_resources_base.format(
first_server_enabled=1, second_server_enabled=1
),
)
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=MOCK_TOKEN
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "abort"
assert result["reason"] == "all_configured"
async def test_option_flow(hass, entry, mock_plex_server):
"""Test config options flow selection."""
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state is ConfigEntryState.LOADED
result = await hass.config_entries.options.async_init(
entry.entry_id, context={"source": "test"}, data=None
)
assert result["type"] == "form"
assert result["step_id"] == "plex_mp_settings"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_USE_EPISODE_ART: True,
CONF_IGNORE_NEW_SHARED_USERS: True,
CONF_MONITORED_USERS: list(mock_plex_server.accounts),
},
)
assert result["type"] == "create_entry"
assert result["data"] == {
Platform.MEDIA_PLAYER: {
CONF_USE_EPISODE_ART: True,
CONF_IGNORE_NEW_SHARED_USERS: True,
CONF_MONITORED_USERS: {
user: {"enabled": True} for user in mock_plex_server.accounts
},
CONF_IGNORE_PLEX_WEB_CLIENTS: False,
}
}
async def test_missing_option_flow(hass, entry, mock_plex_server):
"""Test config options flow selection when no options stored."""
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state is ConfigEntryState.LOADED
result = await hass.config_entries.options.async_init(
entry.entry_id, context={"source": "test"}, data=None
)
assert result["type"] == "form"
assert result["step_id"] == "plex_mp_settings"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_USE_EPISODE_ART: True,
CONF_IGNORE_NEW_SHARED_USERS: True,
CONF_MONITORED_USERS: list(mock_plex_server.accounts),
},
)
assert result["type"] == "create_entry"
assert result["data"] == {
Platform.MEDIA_PLAYER: {
CONF_USE_EPISODE_ART: True,
CONF_IGNORE_NEW_SHARED_USERS: True,
CONF_MONITORED_USERS: {
user: {"enabled": True} for user in mock_plex_server.accounts
},
CONF_IGNORE_PLEX_WEB_CLIENTS: False,
}
}
async def test_option_flow_new_users_available(hass, entry, setup_plex_server):
"""Test config options multiselect defaults when new Plex users are seen."""
OPTIONS_OWNER_ONLY = copy.deepcopy(DEFAULT_OPTIONS)
OPTIONS_OWNER_ONLY[Platform.MEDIA_PLAYER][CONF_MONITORED_USERS] = {
"User 1": {"enabled": True}
}
entry.options = OPTIONS_OWNER_ONLY
mock_plex_server = await setup_plex_server(config_entry=entry)
await hass.async_block_till_done()
server_id = mock_plex_server.machine_identifier
monitored_users = hass.data[DOMAIN][SERVERS][server_id].option_monitored_users
new_users = [x for x in mock_plex_server.accounts if x not in monitored_users]
assert len(monitored_users) == 1
assert len(new_users) == 2
result = await hass.config_entries.options.async_init(
entry.entry_id, context={"source": "test"}, data=None
)
assert result["type"] == "form"
assert result["step_id"] == "plex_mp_settings"
multiselect_defaults = result["data_schema"].schema["monitored_users"].options
assert "[Owner]" in multiselect_defaults["User 1"]
for user in new_users:
assert "[New]" in multiselect_defaults[user]
async def test_external_timed_out(hass, current_request_with_host):
"""Test when external flow times out."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=None
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "abort"
assert result["reason"] == "token_request_timeout"
async def test_callback_view(hass, hass_client_no_auth, current_request_with_host):
"""Test callback view."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=MOCK_TOKEN
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "external"
client = await hass_client_no_auth()
forward_url = f'{config_flow.AUTH_CALLBACK_PATH}?flow_id={result["flow_id"]}'
resp = await client.get(forward_url)
assert resp.status == HTTPStatus.OK
async def test_manual_config(hass, mock_plex_calls, current_request_with_host):
"""Test creating via manual configuration."""
class WrongCertValidaitionException(requests.exceptions.SSLError):
"""Mock the exception showing an unmatched error."""
def __init__(self):
self.__context__ = ssl.SSLCertVerificationError(
"some random message that doesn't match"
)
# Basic mode
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["data_schema"] is None
hass.config_entries.flow.async_abort(result["flow_id"])
# Advanced automatic
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": SOURCE_USER, "show_advanced_options": True},
)
assert result["data_schema"] is not None
assert result["type"] == "form"
assert result["step_id"] == "user_advanced"
with patch("plexauth.PlexAuth.initiate_auth"):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"setup_method": AUTOMATIC_SETUP_STRING}
)
assert result["type"] == "external"
hass.config_entries.flow.async_abort(result["flow_id"])
# Advanced manual
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": SOURCE_USER, "show_advanced_options": True},
)
assert result["data_schema"] is not None
assert result["type"] == "form"
assert result["step_id"] == "user_advanced"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"setup_method": MANUAL_SETUP_STRING}
)
assert result["type"] == "form"
assert result["step_id"] == "manual_setup"
MANUAL_SERVER = {
CONF_HOST: MOCK_SERVERS[0][CONF_HOST],
CONF_PORT: MOCK_SERVERS[0][CONF_PORT],
CONF_SSL: False,
CONF_VERIFY_SSL: True,
CONF_TOKEN: MOCK_TOKEN,
}
MANUAL_SERVER_NO_HOST_OR_TOKEN = {
CONF_PORT: MOCK_SERVERS[0][CONF_PORT],
CONF_SSL: False,
CONF_VERIFY_SSL: True,
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MANUAL_SERVER_NO_HOST_OR_TOKEN
)
assert result["type"] == "form"
assert result["step_id"] == "manual_setup"
assert result["errors"]["base"] == "host_or_token"
with patch(
"plexapi.server.PlexServer",
side_effect=requests.exceptions.SSLError,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MANUAL_SERVER
)
assert result["type"] == "form"
assert result["step_id"] == "manual_setup"
assert result["errors"]["base"] == "ssl_error"
with patch(
"plexapi.server.PlexServer",
side_effect=WrongCertValidaitionException,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MANUAL_SERVER
)
assert result["type"] == "form"
assert result["step_id"] == "manual_setup"
assert result["errors"]["base"] == "ssl_error"
with patch(
"homeassistant.components.plex.PlexServer.connect",
side_effect=requests.exceptions.SSLError,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MANUAL_SERVER
)
assert result["type"] == "form"
assert result["step_id"] == "manual_setup"
assert result["errors"]["base"] == "ssl_error"
with patch("homeassistant.components.plex.PlexWebsocket", autospec=True), patch(
"homeassistant.components.plex.GDM", return_value=MockGDM(disabled=True)
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MANUAL_SERVER
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
server_id = result["data"][CONF_SERVER_IDENTIFIER]
mock_plex_server = hass.data[DOMAIN][SERVERS][server_id]
assert result["title"] == mock_plex_server.url_in_use
assert result["data"][CONF_SERVER] == mock_plex_server.friendly_name
assert result["data"][CONF_SERVER_IDENTIFIER] == mock_plex_server.machine_identifier
assert result["data"][PLEX_SERVER_CONFIG][CONF_URL] == mock_plex_server.url_in_use
assert result["data"][PLEX_SERVER_CONFIG][CONF_TOKEN] == MOCK_TOKEN
async def test_manual_config_with_token(
hass, mock_plex_calls, requests_mock, empty_library, empty_payload
):
"""Test creating via manual configuration with only token."""
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": SOURCE_USER, "show_advanced_options": True},
)
assert result["type"] == "form"
assert result["step_id"] == "user_advanced"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"setup_method": MANUAL_SETUP_STRING}
)
assert result["type"] == "form"
assert result["step_id"] == "manual_setup"
with patch(
"homeassistant.components.plex.GDM", return_value=MockGDM(disabled=True)
), patch("homeassistant.components.plex.PlexWebsocket", autospec=True):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_TOKEN: MOCK_TOKEN}
)
assert result["type"] == "create_entry"
server_id = result["data"][CONF_SERVER_IDENTIFIER]
mock_plex_server = hass.data[DOMAIN][SERVERS][server_id]
mock_url = mock_plex_server.url_in_use
assert result["title"] == mock_url
assert result["data"][CONF_SERVER] == mock_plex_server.friendly_name
assert result["data"][CONF_SERVER_IDENTIFIER] == mock_plex_server.machine_identifier
assert result["data"][PLEX_SERVER_CONFIG][CONF_URL] == mock_url
assert result["data"][PLEX_SERVER_CONFIG][CONF_TOKEN] == MOCK_TOKEN
# Complete Plex integration setup before teardown
requests_mock.get(f"{mock_url}/library", text=empty_library)
requests_mock.get(f"{mock_url}/library/sections", text=empty_payload)
await hass.async_block_till_done()
async def test_setup_with_limited_credentials(hass, entry, setup_plex_server):
"""Test setup with a user with limited permissions."""
with patch(
"plexapi.server.PlexServer.systemAccounts",
side_effect=plexapi.exceptions.Unauthorized,
) as mock_accounts:
mock_plex_server = await setup_plex_server()
assert mock_accounts.called
plex_server = hass.data[DOMAIN][SERVERS][mock_plex_server.machine_identifier]
assert len(plex_server.accounts) == 0
assert plex_server.owner is None
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state is ConfigEntryState.LOADED
async def test_integration_discovery(hass):
"""Test integration self-discovery."""
mock_gdm = MockGDM()
with patch("homeassistant.components.plex.config_flow.GDM", return_value=mock_gdm):
await config_flow.async_discover(hass)
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
flow = flows[0]
assert flow["handler"] == DOMAIN
assert flow["context"]["source"] == SOURCE_INTEGRATION_DISCOVERY
assert (
flow["context"]["unique_id"]
== mock_gdm.entries[0]["data"]["Resource-Identifier"]
)
assert flow["step_id"] == "user"
async def test_trigger_reauth(
hass, entry, mock_plex_server, mock_websocket, current_request_with_host
):
"""Test setup and reauthorization of a Plex token."""
assert entry.state is ConfigEntryState.LOADED
with patch(
"plexapi.server.PlexServer.clients", side_effect=plexapi.exceptions.Unauthorized
), patch("plexapi.server.PlexServer", side_effect=plexapi.exceptions.Unauthorized):
trigger_plex_update(mock_websocket)
await wait_for_debouncer(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state is not ConfigEntryState.LOADED
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
assert flows[0]["context"]["source"] == SOURCE_REAUTH
flow_id = flows[0]["flow_id"]
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value="BRAND_NEW_TOKEN"
):
result = await hass.config_entries.flow.async_configure(flow_id, user_input={})
assert result["type"] == "external"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "external_done"
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "abort"
assert result["reason"] == "reauth_successful"
assert result["flow_id"] == flow_id
assert len(hass.config_entries.flow.async_progress()) == 0
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state is ConfigEntryState.LOADED
assert entry.data[CONF_SERVER] == mock_plex_server.friendly_name
assert entry.data[CONF_SERVER_IDENTIFIER] == mock_plex_server.machine_identifier
assert entry.data[PLEX_SERVER_CONFIG][CONF_URL] == PLEX_DIRECT_URL
assert entry.data[PLEX_SERVER_CONFIG][CONF_TOKEN] == "BRAND_NEW_TOKEN"
async def test_client_request_missing(hass):
"""Test when client headers are not set properly."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=None
):
with pytest.raises(RuntimeError):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
async def test_client_header_issues(hass, current_request_with_host):
"""Test when client headers are not set properly."""
class MockRequest:
headers = {}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
with patch("plexauth.PlexAuth.initiate_auth"), patch(
"plexauth.PlexAuth.token", return_value=None
), patch(
"homeassistant.components.http.current_request.get", return_value=MockRequest()
):
with pytest.raises(RuntimeError):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
|
|
"""
A VIVO harvester for the SHARE project
This harvester makes several SPARQL queries to a VIVO SPARQL endpoint,
the information to access the VIVO endpoint must be provided in the local.py file.
There is also a Map to the SPARQL queries made to harvest documents
from the VIVO endpoint in the sparql_mapping.py file.
"""
from __future__ import unicode_literals
import json
import logging
from datetime import date, timedelta
from six.moves import xrange
from SPARQLWrapper import SPARQLWrapper, JSON
from scrapi import settings
from scrapi.settings import sparql_mapping as mapping
from scrapi.base import JSONHarvester
from scrapi.linter.document import RawDocument
from scrapi.base.helpers import build_properties, datetime_formatter
logger = logging.getLogger(__name__)
def process_object_uris(pmid, doi):
ret = []
if pmid:
pmid = 'http://www.ncbi.nlm.nih.gov/pubmed/{}'.format(pmid)
ret.append(pmid)
if doi:
doi = 'https://dx.doi.org/{}'.format(doi)
ret.append(doi)
return ret
class VIVOHarvester(JSONHarvester):
short_name = 'vivo'
long_name = 'VIVO'
url = settings.VIVO_ACCESS['url']
base_url = settings.VIVO_ACCESS['query_endpoint']
sparql_wrapper = SPARQLWrapper(base_url)
sparql_wrapper.setReturnFormat(JSON)
sparql_wrapper.addParameter("email", settings.VIVO_ACCESS['username'])
sparql_wrapper.addParameter("password", settings.VIVO_ACCESS['password'])
sparql_wrapper.method = 'GET'
DEFAULT_ENCODING = 'UTF-8'
QUERY_TEMPLATE = """
PREFIX vivo: <http://vivoweb.org/ontology/core#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX bibo: <http://purl.org/ontology/bibo/>
PREFIX foaf: <http://xmlns.com/foaf/0.1/>
PREFIX vitro: <http://vitro.mannlib.cornell.edu/ns/vitro/0.7#>
PREFIX dc: <http://purl.org/dc/terms/>
PREFIX vcard: <http://www.w3.org/2006/vcard/ns#>
PREFIX obo: <http://purl.obolibrary.org/obo/>
SELECT {}
{{
{}
}}
"""
GET_TOTAL_QUERY = """
PREFIX vivo: <http://vivoweb.org/ontology/core#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
SELECT (COUNT(*) AS ?total)
{{
?s vivo:dateTimeValue ?dateURI .
?dateURI vivo:dateTime ?date .
FILTER (strdt(?date, xsd:date) >= "{}"^^xsd:date && strdt(?date, xsd:date) <= "{}"^^xsd:date)
}}
"""
GET_URIS_QUERY = """
PREFIX vivo: <http://vivoweb.org/ontology/core#>
PREFIX xsd: <http://www.w3.org/2001/XMLSchema#>
SELECT ?uri
{{
?uri vivo:dateTimeValue ?dateUri .
?dateUri vivo:dateTime ?date .
FILTER (strdt(?date, xsd:date) >= "{}"^^xsd:date && strdt(?date, xsd:date) <= "{}"^^xsd:date)
}} LIMIT {} OFFSET {}
"""
record_encoding = None
def get_string(self, uri, sparql_map):
variable = sparql_map['name']
pattern = sparql_map['pattern'].format(uri)
sparql_query = self.QUERY_TEMPLATE.format('?' + variable, pattern)
self.sparql_wrapper.setQuery(sparql_query)
result = self.sparql_wrapper.query()
result = result.convert()
if result['results']['bindings']:
return result['results']['bindings'][0][variable]['value']
else:
return ''
def get_array(self, uri, sparql_map):
variable = sparql_map['fields'][0]
pattern = sparql_map['pattern'].format(uri)
sparql_query = self.QUERY_TEMPLATE.format('?' + variable, pattern)
self.sparql_wrapper.setQuery(sparql_query)
results = self.sparql_wrapper.query()
results = results.convert()
return [result[variable]['value'] for result in results['results']['bindings']]
def get_dict(self, uri, sparql_map):
variables = ''
for variable in sparql_map['fields']:
variables += '?' + variable + ' '
pattern = sparql_map['pattern'].format(uri)
sparql_query = self.QUERY_TEMPLATE.format(variables, pattern)
self.sparql_wrapper.setQuery(sparql_query)
results = self.sparql_wrapper.query()
results = results.convert()
ret = []
for result in results['results']['bindings']:
item = {}
for variable in sparql_map['fields']:
item[variable] = result[variable]['value']
ret.append(item)
return ret
def get_records(self, uris, sparql_mapping):
records = []
for uri in uris:
record = {}
record['uri'] = uri
for sparql_map in sparql_mapping:
if sparql_map['type'] == 'string':
record[sparql_map['name']] = self.get_string(uri, sparql_map)
if sparql_map['type'] == 'array':
record[sparql_map['name']] = self.get_array(uri, sparql_map)
if sparql_map['type'] == 'dict':
record[sparql_map['name']] = self.get_dict(uri, sparql_map)
record['authors'] = self.complete_authors(record['authors'])
records.append(record)
return records
def get_total(self, start_date, end_date):
query_str = self.GET_TOTAL_QUERY.format(start_date.isoformat(), end_date.isoformat())
self.sparql_wrapper.setQuery(query_str)
result = self.sparql_wrapper.query()
result = result.convert()
return int(result['results']['bindings'][0]['total']['value'])
def get_uris(self, start_date, end_date, limit, offset):
query_str = self.GET_URIS_QUERY.format(start_date.isoformat(), end_date.isoformat(), limit, offset)
self.sparql_wrapper.setQuery(query_str)
results = self.sparql_wrapper.query()
results = results.convert()
return [result['uri']['value'] for result in results['results']['bindings']]
def complete_authors(self, authors):
for author in authors:
email = self.get_string(author['sameAs'], mapping.AUTHOR_MAPPING['email'])
if email:
author['email'] = email
affiliation = self.get_dict(author['sameAs'], mapping.AUTHOR_MAPPING['affiliation'])
if affiliation:
author['affiliation'] = affiliation
orcidId = self.get_string(author['sameAs'], mapping.AUTHOR_MAPPING['orcidId'])
author['sameAs'] = [author['sameAs']]
if orcidId:
author['sameAs'].append(orcidId)
return authors
@property
def schema(self):
return {
'title': ('/title', lambda x: x if x else ''),
'providerUpdatedDateTime': ('/date', datetime_formatter),
'uris': {
'canonicalUri': '/uri',
'providerUris': ('/uri', lambda x: [x]),
'objectUris': ('/pmid', '/doi', process_object_uris)
},
'contributors': '/authors',
'subjects': '/subjects',
'tags': '/keywords',
'publisher': ('/publisher', lambda x: {'name': x} if x else ''),
'otherProperties': build_properties(
('journalTitle', '/journalTitle'),
('abstract', ('/abstract', lambda x: x if x else '')),
('type', '/types'),
('ISSN', ('/issn', lambda x: x if x else '')),
('number', '/number'),
('ISBN', '/isbn'),
('startPage', '/startPage'),
('endPage', '/endPage'),
('volume', '/volume'),
)
}
def harvest(self, start_date=None, end_date=None):
start_date = start_date or date.today() - timedelta(settings.DAYS_BACK)
end_date = end_date or date.today()
total = self.get_total(start_date, end_date)
logger.info('{} documents to be harvested'.format(total))
doc_list = []
for i in xrange(0, total, 1000):
uris = self.get_uris(start_date, end_date, 1000, i)
records = self.get_records(uris, mapping.DOCUMENT_MAPPING)
logger.info('Harvested {} documents'.format(i + len(records)))
for record in records:
if 'doi' in record:
doc_id = record['doi']
else:
doc_id = record['uri']
doc_list.append(RawDocument({
'doc': json.dumps(record),
'source': self.short_name,
'docID': doc_id,
'filetype': 'json'
}))
return doc_list
|
|
#!/usr/bin/env python2.7
import sys, os, argparse, subprocess, json
def main():
readLengths = ['32', '36', '40', '50', '58', '72', '76', '100']
genomes = ['hg38', 'hg19', 'mm9', 'mm10']
dataTypes = ['DNase-seq', 'ChIP-seq']
parser = argparse.ArgumentParser(description = 'Hotspot wrapper for Uniform Analysis Pipeline. Version 3')
parser.add_argument('hotspotLocation', help='The directory to the hotspot installation, for instance "/tools/hotspot/dir"')
parser.add_argument('inputBam', help='Alignment file (in BAM format) to run hotspot on')
parser.add_argument('genome', help='Which genome to use, the following are supported: ' + ','.join(genomes))
parser.add_argument('dataType', help='Which datatype to use, the following are supported: ' + ','.join(dataTypes))
parser.add_argument('readLength', help='Tag length in base pairs, the following are supported: ' + ','.join(readLengths))
parser.add_argument('tmpDir', help='Path to a temporary directory that will be created by this tool and cleaned up afterwards')
parser.add_argument('outputDir', help='Path to a directory to which the output files will be copied')
parser.add_argument('-s', '--seed', type=int, default=101)
parser.add_argument('-o', '--onlyspot', action="store_false", default=True)
parser.add_argument('-i', '--inputControl', default=None, help='Bam file, For ChIP-seq runs, an input will be required')
parser.add_argument('-c', '--checkChr', default=None, help='Tests a portion of the given chromosome (e.g. chrX)')
if len(sys.argv) < 2:
parser.print_usage()
return
args = parser.parse_args(sys.argv[1:])
# ensure all inputs are valid directories/files/arguments
if not os.path.isdir(args.hotspotLocation):
raise ValueError('hotspotLocation: %s is not a valid directory' % args.hotspotLocation)
if not os.path.isfile(args.inputBam):
raise ValueError('inputBam: %s is not a valid file' % args.inputBam)
if args.genome not in genomes:
raise ValueError('genome: ' + args.genome + ' is not a valid genome, must be one of: ' + ','.join(genomes))
if args.readLength not in readLengths:
raise ValueError('readLength: ' + args.readLength + ' is not a supported read length, must be one of: ' + ','.join(readLengths))
# checking dataType constraints
if args.dataType.lower() == 'dnase-seq':
if args.inputControl != None:
raise ValueError('DNase-seq does not support input controls')
elif args.dataType.lower() == 'chip-seq':
if args.inputControl == None:
raise ValueError('ChIP-seq requires an input control')
if not os.path.isfile(args.inputControl):
raise ValueError('inputControl: %s is not a valid file' % args.inputControl)
else:
raise ValueError('unrecognized dataType %s' % args.dataType)
args.hotspotLocation = os.path.abspath(args.hotspotLocation)
args.inputBam = os.path.abspath(args.inputBam)
args.tmpDir = os.path.abspath(args.tmpDir)
args.outputDir = os.path.abspath(args.outputDir)
if args.inputControl != None:
args.inputControl = os.path.abspath(args.inputControl)
# make all directory names end with a slash
if not args.hotspotLocation.endswith('/'):
args.hotspotLocation += '/'
if not args.tmpDir.endswith('/'):
args.tmpDir += '/'
if not args.outputDir.endswith('/'):
args.outputDir += '/'
# create all hotspot filenames
chromInfoBed = args.hotspotLocation + 'data/' + args.genome + '.chromInfo.bed'
mappableFile = args.hotspotLocation + 'data/' + args.genome + '.K' + args.readLength + '.mappable_only.bed'
omitRegionsFile = args.hotspotLocation + 'data/Satellite.' + args.genome + '.bed'
hotspotExe = args.hotspotLocation + 'hotspot-deploy/bin/hotspot'
peakFindExe = args.hotspotLocation + 'hotspot-deploy/bin/wavePeaks'
tokenizerExe = args.hotspotLocation + 'ScriptTokenizer/src/script-tokenizer.py'
pipeDir = args.hotspotLocation + 'pipeline-scripts'
# ensure all hotspot files are in the right location
for f in chromInfoBed, mappableFile, omitRegionsFile, hotspotExe, peakFindExe, tokenizerExe, pipeDir:
if not os.path.exists(f):
raise ValueError('hotspotLocation: installation is missing ' + f)
# hotspot names its directories according to the name of the input bam, so we must capture that value as well
fileName = os.path.split(args.inputBam)[1]
runName = os.path.splitext(fileName)[0]
# mapping from files hotspot creates to what we want to name them as
# Use hotspot v4, output list:
# *-final/*.hot.bed minimally thresholded hotspots( corresponding to hotspot v3 b, broad Peak)
# *-final/*.fdr0.01.hot.bed FDR thresholded hotspots ( corresponding to hotspot v3 c)
# *-final/*.fdr0.01.pks.bed FDR thresholded peaks ( corresponding to hotspot v3 d, narrow Peak)
# tag.density.starch in target directory, 20bp resolution, converted to bigwiggle
outputs = {
args.tmpDir + runName + '-peaks/' + runName + '.tagdensity.bed.starch': args.outputDir + 'density.bed.starch',
args.tmpDir + runName + '-final/' + runName + '.hot.bed': args.outputDir + 'broadPeaks.bed',
args.tmpDir + runName + '-final/' + runName + '.hot.pval.txt': args.outputDir + 'broadPeaks.pval',
args.tmpDir + runName + '-final/' + runName + '.fdr0.01.pks.bed': args.outputDir + 'narrowPeaks.bed',
args.tmpDir + runName + '-final/' + runName + '.fdr0.01.pks.dens.txt': args.outputDir + 'narrowPeaks.dens',
args.tmpDir + runName + '-final/' + runName + '.fdr0.01.pks.pval.txt': args.outputDir + 'narrowPeaks.pval',
args.tmpDir + runName + '-final/' + runName + '.fdr0.01.pks.zscore.txt': args.outputDir + 'narrowPeaks.zscore',
}
if not os.path.isdir(args.tmpDir):
os.makedirs(args.tmpDir)
# generate tokens.txt file
tokensName = args.tmpDir + 'tokens.txt'
with open(tokensName, 'w') as tokens:
tokens.write('[script-tokenizer]\n')
tokens.write('_TAGS_ = %s\n' % args.inputBam)
if args.inputControl != None:
tokens.write('_USE_INPUT_ = T\n')
tokens.write('_INPUT_TAGS_ = %s\n' % args.inputControl)
else:
tokens.write('_USE_INPUT_ = F\n')
tokens.write('_INPUT_TAGS_ =\n')
tokens.write('_GENOME_ = %s\n' % args.genome)
tokens.write('_CHROM_FILE_ = %s\n' % chromInfoBed)
tokens.write('_K_ = %s\n' % args.readLength)
tokens.write('_MAPPABLE_FILE_ = %s\n' % mappableFile)
# Duplicates ok for DNAse, but not for other datatypes
if args.dataType.lower() == 'dnase-seq':
tokens.write('_DUPOK_ = T\n')
else:
tokens.write('_DUPOK_ = F\n')
tokens.write('_FDRS_ = "0.01"\n')
tokens.write('_DENS_:\n') # If not provided, will be generated
tokens.write('_OUTDIR_ = %s\n' % args.tmpDir[:-1])
tokens.write('_RANDIR_ = %s\n' % args.tmpDir[:-1]) # Nothing overlaps
tokens.write('_OMIT_REGIONS_: %s\n' % omitRegionsFile)
if args.checkChr != None:
tokens.write('_CHECK_ = T\n')
tokens.write('_CHKCHR_ = %s\n' % args.checkChr)
else:
tokens.write('_CHECK_ = F\n')
tokens.write('_CHKCHR_ = chrX\n')
tokens.write('_HOTSPOT_ = %s\n' % hotspotExe)
tokens.write('_CLEAN_ = F\n') # We handle cleanup
tokens.write('_PKFIND_BIN_ = %s\n' % peakFindExe)
tokens.write('_PKFIND_SMTH_LVL_ = 3\n')
tokens.write('_SEED_=%d\n' % args.seed)
# Hotspot program parameters, should these be parameterized in the script?
tokens.write('_THRESH_ = 2\n')
tokens.write('_WIN_MIN_ = 200\n')
tokens.write('_WIN_MAX_ = 300\n')
tokens.write('_WIN_INCR_ = 50\n')
tokens.write('_BACKGRD_WIN_ = 50000\n')
tokens.write('_MERGE_DIST_ = 150\n')
tokens.write('_MINSIZE_ = 10\n')
# generate runhotspot file
runhotspotName = args.tmpDir + 'runhotspot.sh'
with open(runhotspotName, 'w') as runhotspot:
runhotspot.write('#! /bin/bash -ex\n')
runhotspot.write('scriptTokBin=%s\n' % tokenizerExe)
runhotspot.write('pipeDir=%s\n' % pipeDir)
runhotspot.write('tokenFile=%s\n' % tokensName)
runhotspot.write('scripts="$pipeDir/run_badspot\n')
runhotspot.write(' $pipeDir/run_make_lib\n')
runhotspot.write(' $pipeDir/run_wavelet_peak_finding\n')
runhotspot.write(' $pipeDir/run_10kb_counts\n')
runhotspot.write(' $pipeDir/run_generate_random_lib\n')
runhotspot.write(' $pipeDir/run_pass1_hotspot\n')
runhotspot.write(' $pipeDir/run_pass1_merge_and_thresh_hotspots\n')
runhotspot.write(' $pipeDir/run_pass2_hotspot\n')
runhotspot.write(' $pipeDir/run_rescore_hotspot_passes\n')
if not args.onlyspot:
runhotspot.write(' $pipeDir/run_spot"\n')
else:
runhotspot.write(' $pipeDir/run_spot\n') ## Indeed, no need for all reads peak call
if args.onlyspot: ## only computing SPOT score, do not call narrowpeak
runhotspot.write(' $pipeDir/run_thresh_hot.R\n')
runhotspot.write(' $pipeDir/run_both-passes_merge_and_thresh_hotspots\n')
runhotspot.write(' $pipeDir/run_add_peaks_per_hotspot\n')
runhotspot.write(' $pipeDir/run_final"\n')
runhotspot.write('python2.7 $scriptTokBin --clobber --output-dir=%s $tokenFile $scripts\n' % args.tmpDir)
runhotspot.write('cd %s\n' % args.tmpDir)
runhotspot.write('retCode=0\n')
runhotspot.write('for script in $scripts\n')
runhotspot.write('do\n')
runhotspot.write(' time %s$(basename $script).tok\n' % args.tmpDir)
runhotspot.write(' retCode=$?\n')
runhotspot.write('done\n')
runhotspot.write('exit $retCode\n')
os.chmod(runhotspotName, 0o755) # Make this executable (leading 0 is for octal)
retCode = subprocess.call(runhotspotName)
if retCode != 0:
print(retCode)
return retCode
if not os.path.isdir(args.outputDir):
os.makedirs(args.outputDir)
# move out all the files we want to keep
if args.onlyspot:
for hotfile, outfile in outputs.items():
print(" cp %s %s\n" % (hotfile, outfile))
os.rename(hotfile, outfile)
return 0
if __name__ == '__main__':
main()
|
|
import logging
from django.dispatch import Signal
from corehq.apps.commtrack.helpers import make_supply_point
from corehq.apps.commtrack.models import Program, SupplyPointCase, Product, RequisitionCase
from corehq.apps.domain.models import Domain
from corehq.apps.locations.models import Location
from corehq.apps.users.models import CommCareUser
from custom.openlmis.api import OpenLMISEndpoint
from custom.openlmis.exceptions import BadParentException, OpenLMISAPIException
from corehq.apps.commtrack import const
from collections import defaultdict
requisition_approved = Signal(providing_args=["requisitions"])
requisition_receipt = Signal(providing_args=["requisitions"])
def _apply_updates(doc, update_dict):
# updates the doc with items from the dict
# returns whether or not any updates were made
should_save = False
for key, value in update_dict.items():
if getattr(doc, key, None) != value:
setattr(doc, key, value)
should_save = True
return should_save
def bootstrap_domain(domain):
project = Domain.get_by_name(domain)
if project.commtrack_settings and project.commtrack_settings.openlmis_config.is_configured:
endpoint = OpenLMISEndpoint.from_config(project.commtrack_settings.openlmis_config)
for f in endpoint.get_all_facilities():
try:
sync_facility_to_supply_point(domain, f)
except OpenLMISAPIException, e:
logging.exception('Problem syncing facility %s' % f.code)
for program in endpoint.get_all_programs(include_products=True):
sync_openlmis_program(domain, program)
def get_supply_point(domain, facility_or_code):
facility_code = facility_or_code if isinstance(facility_or_code, basestring) else facility_or_code.code
return SupplyPointCase.view('hqcase/by_domain_external_id',
key=[domain, facility_code],
reduce=False,
include_docs=True,
limit=1
).first()
def sync_facility_to_supply_point(domain, facility):
supply_point = get_supply_point(domain, facility)
facility_dict = {
'domain': domain,
'location_type': facility.type,
'external_id': facility.code,
'name': facility.name,
'site_code': facility.code, # todo: do they have a human readable code?
'latitude': facility.latitude,
'longitude': facility.longitude,
}
parent_sp = None
if facility.parent_id:
parent_sp = get_supply_point(domain, facility.parent_id)
if not parent_sp:
raise BadParentException('No matching supply point with code %s found' % facility.parent_id)
if supply_point is None:
if parent_sp:
facility_dict['parent'] = parent_sp.location
facility_loc = Location(**facility_dict)
facility_loc.save()
return make_supply_point(domain, facility_loc)
else:
facility_loc = supply_point.location
if parent_sp and facility_loc.parent_id != parent_sp.location._id:
raise BadParentException('You are trying to move a location. This is currently not supported.')
should_save = _apply_updates(facility_loc, facility_dict)
if should_save:
facility_loc.save()
return supply_point
def get_product(domain, lmis_product):
return Product.get_by_code(domain, lmis_product.code)
def get_program(domain, lmis_program):
program = Program.get_by_code(domain, lmis_program.code)
return program
def sync_openlmis_program(domain, lmis_program):
program = get_program(domain, lmis_program)
if program is None:
program = Program(domain=domain)
program.name = lmis_program.name
program.code = lmis_program.code.lower()
program._doc_type_attr = "Program"
program.save()
if lmis_program.products:
for lmis_product in lmis_program.products:
sync_openlmis_product(domain, program, lmis_product)
return program
def sync_openlmis_product(domain, program, lmis_product):
product = get_product(domain, lmis_product)
product_dict = {
'domain': domain,
'name': lmis_product.name,
'code': lmis_product.code,
'unit': str(lmis_product.unit),
'description': lmis_product.description,
'category': lmis_product.category,
'program_id': program._id,
}
if product is None:
product = Product(**product_dict)
product.save()
else:
if _apply_updates(product, product_dict):
product.save()
return product
def supply_point_to_json(supply_point):
base = {
'agentCode': supply_point.location.site_code,
'agentName': supply_point.name,
'active': not supply_point.closed,
}
if len(supply_point.location.lineage) > 0:
parent_facility_code = Location.get(supply_point.location.lineage[0]).external_id
base['parentFacilityCode'] = parent_facility_code
# todo phone number
return base
def sync_stock_data_to_openlmis(submission, openlmis_endpoint):
return openlmis_endpoint.submit_requisition(submission)
def sync_supply_point_to_openlmis(supply_point, openlmis_endpoint, create=True):
"""
https://github.com/OpenLMIS/documents/blob/master/4.1-CreateVirtualFacility%20API.md
{
"agentCode":"A2",
"agentName":"AgentVinod",
"parentFacilityCode":"F10",
"phoneNumber":"0099887766",
"active":"true"
}
"""
json_sp = supply_point_to_json(supply_point)
if create:
return openlmis_endpoint.create_virtual_facility(json_sp)
else:
return openlmis_endpoint.update_virtual_facility(supply_point.location.site_code, json_sp)
def sync_requisition_from_openlmis(domain, requisition_id, openlmis_endpoint):
cases = []
send_notification = False
lmis_requisition_details = openlmis_endpoint.get_requisition_details(requisition_id)
if lmis_requisition_details:
rec_cases = [c for c in RequisitionCase.get_by_external_id(domain, str(lmis_requisition_details.id)) if c.type == const.REQUISITION_CASE_TYPE]
if len(rec_cases) == 0:
products = [product for product in lmis_requisition_details.products if product.skipped == False]
for product in products:
pdt = Product.get_by_code(domain, product.code.lower())
if pdt:
case = lmis_requisition_details.to_requisition_case(pdt._id)
case.save()
if case.requisition_status == 'AUTHORIZED':
send_notification = True
cases.append(case)
else:
for case in rec_cases:
before_status = case.requisition_status
if _apply_updates(case, lmis_requisition_details.to_dict(case.product_id)):
after_status = case.requisition_status
case.save()
if before_status in ['INITIATED', 'SUBMITTED'] and after_status == 'AUTHORIZED':
send_notification = True
cases.append(case)
return cases, send_notification
else:
return None, False
def submit_requisition(requisition, openlmis_endpoint):
return openlmis_endpoint.submit_requisition(requisition)
def approve_requisition(requisition_cases, openlmis_endpoint):
groups = defaultdict( list )
for case in requisition_cases:
groups[case.external_id].append(case)
for group in groups.keys():
if(group):
cases = groups.get(group)
products = []
approver = CommCareUser.get(cases[0].user_id)
for rec in cases:
product = Product.get(rec.product_id)
products.append({"productCode": product.code, "quantityApproved": rec.amount_approved})
approve_data = {
"approverName": approver.human_friendly_name,
"products": products
}
openlmis_endpoint.approve_requisition(approve_data, group)
def delivery_update(requisition_cases, openlmis_endpoint):
order_id = requisition_cases[0].get_case_property("order_id")
products = []
for rec in requisition_cases:
product = Product.get(rec.product_id)
products.append({'productCode': product.code, 'quantityReceived': rec.amount_received})
delivery_data = {'podLineItems': products}
return openlmis_endpoint.confirm_delivery(order_id, delivery_data)
|
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from PIL import Image
from six.moves import cStringIO as StringIO
__all__ = []
def resize_image(img, target_size):
"""
Resize an image so that the shorter edge has length target_size.
img: the input image to be resized.
target_size: the target resized image size.
"""
percent = (target_size / float(min(img.size[0], img.size[1])))
resized_size = int(round(img.size[0] * percent)), int(
round(img.size[1] * percent))
img = img.resize(resized_size, Image.ANTIALIAS)
return img
def flip(im):
"""
Return the flipped image.
Flip an image along the horizontal direction.
im: input image, (K x H x W) ndarrays
"""
if len(im.shape) == 3:
return im[:, :, ::-1]
else:
return im[:, ::-1]
def crop_img(im, inner_size, color=True, test=True):
"""
Return cropped image.
The size of the cropped image is inner_size * inner_size.
im: (K x H x W) ndarrays
inner_size: the cropped image size.
color: whether it is color image.
test: whether in test mode.
If False, does random cropping and flipping.
If True, crop the center of images.
"""
if color:
height, width = max(inner_size, im.shape[1]), max(inner_size,
im.shape[2])
padded_im = np.zeros((3, height, width))
startY = (height - im.shape[1]) / 2
startX = (width - im.shape[2]) / 2
endY, endX = startY + im.shape[1], startX + im.shape[2]
padded_im[:, startY:endY, startX:endX] = im
else:
im = im.astype('float32')
height, width = max(inner_size, im.shape[0]), max(inner_size,
im.shape[1])
padded_im = np.zeros((height, width))
startY = (height - im.shape[0]) / 2
startX = (width - im.shape[1]) / 2
endY, endX = startY + im.shape[0], startX + im.shape[1]
padded_im[startY:endY, startX:endX] = im
if test:
startY = (height - inner_size) / 2
startX = (width - inner_size) / 2
else:
startY = np.random.randint(0, height - inner_size + 1)
startX = np.random.randint(0, width - inner_size + 1)
endY, endX = startY + inner_size, startX + inner_size
if color:
pic = padded_im[:, startY:endY, startX:endX]
else:
pic = padded_im[startY:endY, startX:endX]
if (not test) and (np.random.randint(2) == 0):
pic = flip(pic)
return pic
def decode_jpeg(jpeg_string):
np_array = np.array(Image.open(StringIO(jpeg_string)))
if len(np_array.shape) == 3:
np_array = np.transpose(np_array, (2, 0, 1))
return np_array
def preprocess_img(im, img_mean, crop_size, is_train, color=True):
"""
Does data augmentation for images.
If is_train is false, cropping the center region from the image.
If is_train is true, randomly crop a region from the image,
and random does flipping.
im: (K x H x W) ndarrays
"""
im = im.astype('float32')
test = not is_train
pic = crop_img(im, crop_size, color, test)
pic -= img_mean
return pic.flatten()
def load_meta(meta_path, mean_img_size, crop_size, color=True):
"""
Return the loaded meta file.
Load the meta image, which is the mean of the images in the dataset.
The mean image is subtracted from every input image so that the expected mean
of each input image is zero.
"""
mean = np.load(meta_path)['data_mean']
border = (mean_img_size - crop_size) / 2
if color:
assert (mean_img_size * mean_img_size * 3 == mean.shape[0])
mean = mean.reshape(3, mean_img_size, mean_img_size)
mean = mean[:, border:border + crop_size, border:border +
crop_size].astype('float32')
else:
assert (mean_img_size * mean_img_size == mean.shape[0])
mean = mean.reshape(mean_img_size, mean_img_size)
mean = mean[border:border + crop_size, border:border +
crop_size].astype('float32')
return mean
def load_image(img_path, is_color=True):
"""
Load image and return.
img_path: image path.
is_color: is color image or not.
"""
img = Image.open(img_path)
img.load()
return img
def oversample(img, crop_dims):
"""
image : iterable of (H x W x K) ndarrays
crop_dims: (height, width) tuple for the crops.
Returned data contains ten crops of input image, namely,
four corner patches and the center patch as well as their
horizontal reflections.
"""
# Dimensions and center.
im_shape = np.array(img[0].shape)
crop_dims = np.array(crop_dims)
im_center = im_shape[:2] / 2.0
# Make crop coordinates
h_indices = (0, im_shape[0] - crop_dims[0])
w_indices = (0, im_shape[1] - crop_dims[1])
crops_ix = np.empty((5, 4), dtype=int)
curr = 0
for i in h_indices:
for j in w_indices:
crops_ix[curr] = (i, j, i + crop_dims[0], j + crop_dims[1])
curr += 1
crops_ix[4] = np.tile(im_center, (1, 2)) + np.concatenate(
[-crop_dims / 2.0, crop_dims / 2.0])
crops_ix = np.tile(crops_ix, (2, 1))
# Extract crops
crops = np.empty(
(10 * len(img), crop_dims[0], crop_dims[1], im_shape[-1]),
dtype=np.float32)
ix = 0
for im in img:
for crop in crops_ix:
crops[ix] = im[crop[0]:crop[2], crop[1]:crop[3], :]
ix += 1
crops[ix - 5:ix] = crops[ix - 5:ix, :, ::-1, :] # flip for mirrors
return crops
class ImageTransformer:
def __init__(self,
transpose=None,
channel_swap=None,
mean=None,
is_color=True):
self.is_color = is_color
self.set_transpose(transpose)
self.set_channel_swap(channel_swap)
self.set_mean(mean)
def set_transpose(self, order):
if order is not None:
if self.is_color:
assert 3 == len(order)
self.transpose = order
def set_channel_swap(self, order):
if order is not None:
if self.is_color:
assert 3 == len(order)
self.channel_swap = order
def set_mean(self, mean):
if mean is not None:
# mean value, may be one value per channel
if mean.ndim == 1:
mean = mean[:, np.newaxis, np.newaxis]
else:
# elementwise mean
if self.is_color:
assert len(mean.shape) == 3
self.mean = mean
def transformer(self, data):
if self.transpose is not None:
data = data.transpose(self.transpose)
if self.channel_swap is not None:
data = data[self.channel_swap, :, :]
if self.mean is not None:
data -= self.mean
return data
|
|
# -*- coding: utf-8 -*-
"""
jinja2.loaders
~~~~~~~~~~~~~~
Jinja loader classes.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import weakref
from types import ModuleType
from os import path
from hashlib import sha1
from .exceptions import TemplateNotFound
from .utils import open_if_exists, internalcode
from ._compat import string_types, iteritems
def split_template_path(template):
"""Split a path into segments and perform a sanity check. If it detects
'..' in the path it will raise a `TemplateNotFound` error.
"""
pieces = []
for piece in template.split('/'):
if path.sep in piece \
or (path.altsep and path.altsep in piece) or \
piece == path.pardir:
raise TemplateNotFound(template)
elif piece and piece != '.':
pieces.append(piece)
return pieces
class BaseLoader(object):
"""Baseclass for all loaders. Subclass this and override `get_source` to
implement a custom loading mechanism. The environment provides a
`get_template` method that calls the loader's `load` method to get the
:class:`Template` object.
A very basic example for a loader that looks up templates on the file
system could look like this::
from . import BaseLoader, TemplateNotFound
from os.path import join, exists, getmtime
class MyLoader(BaseLoader):
def __init__(self, path):
self.path = path
def get_source(self, environment, template):
path = join(self.path, template)
if not exists(path):
raise TemplateNotFound(template)
mtime = getmtime(path)
with file(path) as f:
source = f.read().decode('utf-8')
return source, path, lambda: mtime == getmtime(path)
"""
#: if set to `False` it indicates that the loader cannot provide access
#: to the source of templates.
#:
#: .. versionadded:: 2.4
has_source_access = True
def get_source(self, environment, template):
"""Get the template source, filename and reload helper for a template.
It's passed the environment and template name and has to return a
tuple in the form ``(source, filename, uptodate)`` or raise a
`TemplateNotFound` error if it can't locate the template.
The source part of the returned tuple must be the source of the
template as unicode string or a ASCII bytestring. The filename should
be the name of the file on the filesystem if it was loaded from there,
otherwise `None`. The filename is used by python for the tracebacks
if no loader extension is used.
The last item in the tuple is the `uptodate` function. If auto
reloading is enabled it's always called to check if the template
changed. No arguments are passed so the function must store the
old state somewhere (for example in a closure). If it returns `False`
the template will be reloaded.
"""
if not self.has_source_access:
raise RuntimeError('%s cannot provide access to the source' %
self.__class__.__name__)
raise TemplateNotFound(template)
def list_templates(self):
"""Iterates over all templates. If the loader does not support that
it should raise a :exc:`TypeError` which is the default behavior.
"""
raise TypeError('this loader cannot iterate over all templates')
@internalcode
def load(self, environment, name, globals=None):
"""Loads a template. This method looks up the template in the cache
or loads one by calling :meth:`get_source`. Subclasses should not
override this method as loaders working on collections of other
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
will not call this method but `get_source` directly.
"""
code = None
if globals is None:
globals = {}
# first we try to get the source for this template together
# with the filename and the uptodate function.
source, filename, uptodate = self.get_source(environment, name)
# try to load the code from the bytecode cache if there is a
# bytecode cache configured.
bcc = environment.bytecode_cache
if bcc is not None:
bucket = bcc.get_bucket(environment, name, filename, source)
code = bucket.code
# if we don't have code so far (not cached, no longer up to
# date) etc. we compile the template
if code is None:
code = environment.compile(source, name, filename)
# if the bytecode cache is available and the bucket doesn't
# have a code so far, we give the bucket the new code and put
# it back to the bytecode cache.
if bcc is not None and bucket.code is None:
bucket.code = code
bcc.set_bucket(bucket)
return environment.template_class.from_code(environment, code,
globals, uptodate)
class FileSystemLoader(BaseLoader):
"""Loads templates from the file system. This loader can find templates
in folders on the file system and is the preferred way to load them.
The loader takes the path to the templates as string, or if multiple
locations are wanted a list of them which is then looked up in the
given order::
>>> loader = FileSystemLoader('/path/to/templates')
>>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else.
To follow symbolic links, set the *followlinks* parameter to ``True``::
>>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
.. versionchanged:: 2.8+
The *followlinks* parameter was added.
"""
def __init__(self, searchpath, encoding='utf-8', followlinks=False):
if isinstance(searchpath, string_types):
searchpath = [searchpath]
self.searchpath = list(searchpath)
self.encoding = encoding
self.followlinks = followlinks
def get_source(self, environment, template):
pieces = split_template_path(template)
for searchpath in self.searchpath:
filename = path.join(searchpath, *pieces)
f = open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
return contents, filename, uptodate
raise TemplateNotFound(template)
def list_templates(self):
found = set()
for searchpath in self.searchpath:
walk_dir = os.walk(searchpath, followlinks=self.followlinks)
for dirpath, dirnames, filenames in walk_dir:
for filename in filenames:
template = os.path.join(dirpath, filename) \
[len(searchpath):].strip(os.path.sep) \
.replace(os.path.sep, '/')
if template[:2] == './':
template = template[2:]
if template not in found:
found.add(template)
return sorted(found)
class PackageLoader(BaseLoader):
"""Load templates from python eggs or packages. It is constructed with
the name of the python package and the path to the templates in that
package::
loader = PackageLoader('mypackage', 'views')
If the package path is not given, ``'templates'`` is assumed.
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else. Due to the nature
of eggs it's only possible to reload templates if the package was loaded
from the file system and not a zip file.
"""
def __init__(self, package_name, package_path='templates',
encoding='utf-8'):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
provider = get_provider(package_name)
self.encoding = encoding
self.manager = ResourceManager()
self.filesystem_bound = isinstance(provider, DefaultProvider)
self.provider = provider
self.package_path = package_path
def get_source(self, environment, template):
pieces = split_template_path(template)
p = '/'.join((self.package_path,) + tuple(pieces))
if not self.provider.has_resource(p):
raise TemplateNotFound(template)
filename = uptodate = None
if self.filesystem_bound:
filename = self.provider.get_resource_filename(self.manager, p)
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
source = self.provider.get_resource_string(self.manager, p)
return source.decode(self.encoding), filename, uptodate
def list_templates(self):
path = self.package_path
if path[:2] == './':
path = path[2:]
elif path == '.':
path = ''
offset = len(path)
results = []
def _walk(path):
for filename in self.provider.resource_listdir(path):
fullname = path + '/' + filename
if self.provider.resource_isdir(fullname):
_walk(fullname)
else:
results.append(fullname[offset:].lstrip('/'))
_walk(path)
results.sort()
return results
class DictLoader(BaseLoader):
"""Loads a template from a python dict. It's passed a dict of unicode
strings bound to template names. This loader is useful for unittesting:
>>> loader = DictLoader({'index.html': 'source here'})
Because auto reloading is rarely useful this is disabled per default.
"""
def __init__(self, mapping):
self.mapping = mapping
def get_source(self, environment, template):
if template in self.mapping:
source = self.mapping[template]
return source, None, lambda: source == self.mapping.get(template)
raise TemplateNotFound(template)
def list_templates(self):
return sorted(self.mapping)
class FunctionLoader(BaseLoader):
"""A loader that is passed a function which does the loading. The
function receives the name of the template and has to return either
an unicode string with the template source, a tuple in the form ``(source,
filename, uptodatefunc)`` or `None` if the template does not exist.
>>> def load_template(name):
... if name == 'index.html':
... return '...'
...
>>> loader = FunctionLoader(load_template)
The `uptodatefunc` is a function that is called if autoreload is enabled
and has to return `True` if the template is still up to date. For more
details have a look at :meth:`BaseLoader.get_source` which has the same
return value.
"""
def __init__(self, load_func):
self.load_func = load_func
def get_source(self, environment, template):
rv = self.load_func(template)
if rv is None:
raise TemplateNotFound(template)
elif isinstance(rv, string_types):
return rv, None, None
return rv
class PrefixLoader(BaseLoader):
"""A loader that is passed a dict of loaders where each loader is bound
to a prefix. The prefix is delimited from the template by a slash per
default, which can be changed by setting the `delimiter` argument to
something else::
loader = PrefixLoader({
'app1': PackageLoader('mypackage.app1'),
'app2': PackageLoader('mypackage.app2')
})
By loading ``'app1/index.html'`` the file from the app1 package is loaded,
by loading ``'app2/index.html'`` the file from the second.
"""
def __init__(self, mapping, delimiter='/'):
self.mapping = mapping
self.delimiter = delimiter
def get_loader(self, template):
try:
prefix, name = template.split(self.delimiter, 1)
loader = self.mapping[prefix]
except (ValueError, KeyError):
raise TemplateNotFound(template)
return loader, name
def get_source(self, environment, template):
loader, name = self.get_loader(template)
try:
return loader.get_source(environment, name)
except TemplateNotFound:
# re-raise the exception with the correct filename here.
# (the one that includes the prefix)
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
loader, local_name = self.get_loader(name)
try:
return loader.load(environment, local_name, globals)
except TemplateNotFound:
# re-raise the exception with the correct filename here.
# (the one that includes the prefix)
raise TemplateNotFound(name)
def list_templates(self):
result = []
for prefix, loader in iteritems(self.mapping):
for template in loader.list_templates():
result.append(prefix + self.delimiter + template)
return result
class ChoiceLoader(BaseLoader):
"""This loader works like the `PrefixLoader` just that no prefix is
specified. If a template could not be found by one loader the next one
is tried.
>>> loader = ChoiceLoader([
... FileSystemLoader('/path/to/user/templates'),
... FileSystemLoader('/path/to/system/templates')
... ])
This is useful if you want to allow users to override builtin templates
from a different location.
"""
def __init__(self, loaders):
self.loaders = loaders
def get_source(self, environment, template):
for loader in self.loaders:
try:
return loader.get_source(environment, template)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
for loader in self.loaders:
try:
return loader.load(environment, name, globals)
except TemplateNotFound:
pass
raise TemplateNotFound(name)
def list_templates(self):
found = set()
for loader in self.loaders:
found.update(loader.list_templates())
return sorted(found)
class _TemplateModule(ModuleType):
"""Like a normal module but with support for weak references"""
class ModuleLoader(BaseLoader):
"""This loader loads templates from precompiled templates.
Example usage:
>>> loader = ChoiceLoader([
... ModuleLoader('/path/to/compiled/templates'),
... FileSystemLoader('/path/to/templates')
... ])
Templates can be precompiled with :meth:`Environment.compile_templates`.
"""
has_source_access = False
def __init__(self, path):
package_name = '_jinja2_module_templates_%x' % id(self)
# create a fake module that looks for the templates in the
# path given.
mod = _TemplateModule(package_name)
if isinstance(path, string_types):
path = [path]
else:
path = list(path)
mod.__path__ = path
sys.modules[package_name] = weakref.proxy(mod,
lambda x: sys.modules.pop(package_name, None))
# the only strong reference, the sys.modules entry is weak
# so that the garbage collector can remove it once the
# loader that created it goes out of business.
self.module = mod
self.package_name = package_name
@staticmethod
def get_template_key(name):
return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
@staticmethod
def get_module_filename(name):
return ModuleLoader.get_template_key(name) + '.py'
@internalcode
def load(self, environment, name, globals=None):
key = self.get_template_key(name)
module = '%s.%s' % (self.package_name, key)
mod = getattr(self.module, module, None)
if mod is None:
try:
mod = __import__(module, None, None, ['root'])
except ImportError:
raise TemplateNotFound(name)
# remove the entry from sys.modules, we only want the attribute
# on the module object we have stored on the loader.
sys.modules.pop(module, None)
return environment.template_class.from_module_dict(
environment, mod.__dict__, globals)
|
|
# Copyright (c) 2012, Cloudscaling
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import os
import re
import pep8
import six
"""
Guidelines for writing new hacking checks
- Use only for Nova specific tests. OpenStack general tests
should be submitted to the common 'hacking' module.
- Pick numbers in the range N3xx. Find the current test with
the highest allocated number and then pick the next value.
- Keep the test method code in the source file ordered based
on the N3xx value.
- List the new rule in the top level HACKING.rst file
- Add test cases for each new rule to nova/tests/unit/test_hacking.py
"""
UNDERSCORE_IMPORT_FILES = []
session_check = re.compile(r"\w*def [a-zA-Z0-9].*[(].*session.*[)]")
cfg_re = re.compile(r".*\scfg\.")
# Excludes oslo.config OptGroup objects
cfg_opt_re = re.compile(r".*[\s\[]cfg\.[a-zA-Z]*Opt\(")
rule_default_re = re.compile(r".*RuleDefault\(")
policy_enforce_re = re.compile(r".*_ENFORCER\.enforce\(")
vi_header_re = re.compile(r"^#\s+vim?:.+")
virt_file_re = re.compile(r"\./nova/(?:tests/)?virt/(\w+)/")
virt_import_re = re.compile(
r"^\s*(?:import|from) nova\.(?:tests\.)?virt\.(\w+)")
virt_config_re = re.compile(
r"CONF\.import_opt\('.*?', 'nova\.virt\.(\w+)('|.)")
asse_trueinst_re = re.compile(
r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, "
"(\w|\.|\'|\"|\[|\])+\)\)")
asse_equal_type_re = re.compile(
r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), "
"(\w|\.|\'|\"|\[|\])+\)")
asse_equal_in_end_with_true_or_false_re = re.compile(r"assertEqual\("
r"(\w|[][.'\"])+ in (\w|[][.'\", ])+, (True|False)\)")
asse_equal_in_start_with_true_or_false_re = re.compile(r"assertEqual\("
r"(True|False), (\w|[][.'\"])+ in (\w|[][.'\", ])+\)")
# NOTE(snikitin): Next two regexes weren't united to one for more readability.
# asse_true_false_with_in_or_not_in regex checks
# assertTrue/False(A in B) cases where B argument has no spaces
# asse_true_false_with_in_or_not_in_spaces regex checks cases
# where B argument has spaces and starts/ends with [, ', ".
# For example: [1, 2, 3], "some string", 'another string'.
# We have to separate these regexes to escape a false positives
# results. B argument should have spaces only if it starts
# with [, ", '. Otherwise checking of string
# "assertFalse(A in B and C in D)" will be false positives.
# In this case B argument is "B and C in D".
asse_true_false_with_in_or_not_in = re.compile(r"assert(True|False)\("
r"(\w|[][.'\"])+( not)? in (\w|[][.'\",])+(, .*)?\)")
asse_true_false_with_in_or_not_in_spaces = re.compile(r"assert(True|False)"
r"\((\w|[][.'\"])+( not)? in [\[|'|\"](\w|[][.'\", ])+"
r"[\[|'|\"](, .*)?\)")
asse_raises_regexp = re.compile(r"assertRaisesRegexp\(")
conf_attribute_set_re = re.compile(r"CONF\.[a-z0-9_.]+\s*=\s*\w")
log_translation = re.compile(
r"(.)*LOG\.(audit|error|critical)\(\s*('|\")")
log_translation_info = re.compile(
r"(.)*LOG\.(info)\(\s*(_\(|'|\")")
log_translation_exception = re.compile(
r"(.)*LOG\.(exception)\(\s*(_\(|'|\")")
log_translation_LW = re.compile(
r"(.)*LOG\.(warning|warn)\(\s*(_\(|'|\")")
translated_log = re.compile(
r"(.)*LOG\.(audit|error|info|critical|exception)"
"\(\s*_\(\s*('|\")")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
string_translation = re.compile(r"[^_]*_\(\s*('|\")")
underscore_import_check = re.compile(r"(.)*import _(.)*")
import_translation_for_log_or_exception = re.compile(
r"(.)*(from\snova.i18n\simport)\s_")
# We need this for cases where they have created their own _ function.
custom_underscore_check = re.compile(r"(.)*_\s*=\s*(.)*")
api_version_re = re.compile(r"@.*api_version")
dict_constructor_with_list_copy_re = re.compile(r".*\bdict\((\[)?(\(|\[)")
decorator_re = re.compile(r"@.*")
http_not_implemented_re = re.compile(r"raise .*HTTPNotImplemented\(")
spawn_re = re.compile(
r".*(eventlet|greenthread)\.(?P<spawn_part>spawn(_n)?)\(.*\)")
contextlib_nested = re.compile(r"^with (contextlib\.)?nested\(")
doubled_words_re = re.compile(
r"\b(then?|[iao]n|i[fst]|but|f?or|at|and|[dt]o)\s+\1\b")
log_remove_context = re.compile(
r"(.)*LOG\.(.*)\(.*(context=[_a-zA-Z0-9].*)+.*\)")
class BaseASTChecker(ast.NodeVisitor):
"""Provides a simple framework for writing AST-based checks.
Subclasses should implement visit_* methods like any other AST visitor
implementation. When they detect an error for a particular node the
method should call ``self.add_error(offending_node)``. Details about
where in the code the error occurred will be pulled from the node
object.
Subclasses should also provide a class variable named CHECK_DESC to
be used for the human readable error message.
"""
def __init__(self, tree, filename):
"""This object is created automatically by pep8.
:param tree: an AST tree
:param filename: name of the file being analyzed
(ignored by our checks)
"""
self._tree = tree
self._errors = []
def run(self):
"""Called automatically by pep8."""
self.visit(self._tree)
return self._errors
def add_error(self, node, message=None):
"""Add an error caused by a node to the list of errors for pep8."""
message = message or self.CHECK_DESC
error = (node.lineno, node.col_offset, message, self.__class__)
self._errors.append(error)
def _check_call_names(self, call_node, names):
if isinstance(call_node, ast.Call):
if isinstance(call_node.func, ast.Name):
if call_node.func.id in names:
return True
return False
def import_no_db_in_virt(logical_line, filename):
"""Check for db calls from nova/virt
As of grizzly-2 all the database calls have been removed from
nova/virt, and we want to keep it that way.
N307
"""
if "nova/virt" in filename and not filename.endswith("fake.py"):
if logical_line.startswith("from nova import db"):
yield (0, "N307: nova.db import not allowed in nova/virt/*")
def no_db_session_in_public_api(logical_line, filename):
if "db/api.py" in filename:
if session_check.match(logical_line):
yield (0, "N309: public db api methods may not accept session")
def use_timeutils_utcnow(logical_line, filename):
# tools are OK to use the standard datetime module
if "/tools/" in filename:
return
msg = "N310: timeutils.utcnow() must be used instead of datetime.%s()"
datetime_funcs = ['now', 'utcnow']
for f in datetime_funcs:
pos = logical_line.find('datetime.%s' % f)
if pos != -1:
yield (pos, msg % f)
def _get_virt_name(regex, data):
m = regex.match(data)
if m is None:
return None
driver = m.group(1)
# Ignore things we mis-detect as virt drivers in the regex
if driver in ["test_virt_drivers", "driver", "firewall",
"disk", "api", "imagecache", "cpu", "hardware",
"image"]:
return None
return driver
def import_no_virt_driver_import_deps(physical_line, filename):
"""Check virt drivers' modules aren't imported by other drivers
Modules under each virt driver's directory are
considered private to that virt driver. Other drivers
in Nova must not access those drivers. Any code that
is to be shared should be refactored into a common
module
N311
"""
thisdriver = _get_virt_name(virt_file_re, filename)
thatdriver = _get_virt_name(virt_import_re, physical_line)
if (thatdriver is not None and
thisdriver is not None and
thisdriver != thatdriver):
return (0, "N311: importing code from other virt drivers forbidden")
def import_no_virt_driver_config_deps(physical_line, filename):
"""Check virt drivers' config vars aren't used by other drivers
Modules under each virt driver's directory are
considered private to that virt driver. Other drivers
in Nova must not use their config vars. Any config vars
that are to be shared should be moved into a common module
N312
"""
thisdriver = _get_virt_name(virt_file_re, filename)
thatdriver = _get_virt_name(virt_config_re, physical_line)
if (thatdriver is not None and
thisdriver is not None and
thisdriver != thatdriver):
return (0, "N312: using config vars from other virt drivers forbidden")
def capital_cfg_help(logical_line, tokens):
msg = "N313: capitalize help string"
if cfg_re.match(logical_line):
for t in range(len(tokens)):
if tokens[t][1] == "help":
txt = tokens[t + 2][1]
if len(txt) > 1 and txt[1].islower():
yield(0, msg)
def no_vi_headers(physical_line, line_number, lines):
"""Check for vi editor configuration in source files.
By default vi modelines can only appear in the first or
last 5 lines of a source file.
N314
"""
# NOTE(gilliard): line_number is 1-indexed
if line_number <= 5 or line_number > len(lines) - 5:
if vi_header_re.match(physical_line):
return 0, "N314: Don't put vi configuration in source files"
def assert_true_instance(logical_line):
"""Check for assertTrue(isinstance(a, b)) sentences
N316
"""
if asse_trueinst_re.match(logical_line):
yield (0, "N316: assertTrue(isinstance(a, b)) sentences not allowed")
def assert_equal_type(logical_line):
"""Check for assertEqual(type(A), B) sentences
N317
"""
if asse_equal_type_re.match(logical_line):
yield (0, "N317: assertEqual(type(A), B) sentences not allowed")
def assert_equal_none(logical_line):
"""Check for assertEqual(A, None) or assertEqual(None, A) sentences
N318
"""
_start_re = re.compile(r"assertEqual\(.*?,\s+None\)$")
_end_re = re.compile(r"assertEqual\(None,")
if _start_re.search(logical_line) or _end_re.search(logical_line):
yield (0, "N318: assertEqual(A, None) or assertEqual(None, A) "
"sentences not allowed. Use assertIsNone(A) instead.")
_start_re = re.compile(r"assertIs(Not)?\(None,")
_end_re = re.compile(r"assertIs(Not)?\(.*,\s+None\)$")
if _start_re.search(logical_line) or _end_re.search(logical_line):
yield (0, "N318: assertIsNot(A, None) or assertIsNot(None, A) must "
"not be used. Use assertIsNone(A) or assertIsNotNone(A) "
"instead.")
def check_python3_xrange(logical_line):
if re.search(r"\bxrange\s*\(", logical_line):
yield(0, "N327: Do not use xrange(). 'xrange()' is not compatible "
"with Python 3. Use range() or six.moves.range() instead.")
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
* Use filename so we can start enforcing this in specific folders instead
of needing to do so all at once.
N319
"""
if logical_line.startswith("LOG.debug(_("):
yield(0, "N319 Don't translate debug level logs")
def no_import_translation_in_tests(logical_line, filename):
"""Check for 'from nova.i18n import _'
N337
"""
if 'nova/tests/' in filename:
res = import_translation_for_log_or_exception.match(logical_line)
if res:
yield(0, "N337 Don't import translation in tests")
def no_setting_conf_directly_in_tests(logical_line, filename):
"""Check for setting CONF.* attributes directly in tests
The value can leak out of tests affecting how subsequent tests run.
Using self.flags(option=value) is the preferred method to temporarily
set config options in tests.
N320
"""
if 'nova/tests/' in filename:
res = conf_attribute_set_re.match(logical_line)
if res:
yield (0, "N320: Setting CONF.* attributes directly in tests is "
"forbidden. Use self.flags(option=value) instead")
def validate_log_translations(logical_line, physical_line, filename):
# Translations are not required in the test directory
# and the Xen utilities
if ("nova/tests" in filename or
"plugins/xenserver/xenapi/etc/xapi.d" in filename):
return
if pep8.noqa(physical_line):
return
msg = "N328: LOG.info messages require translations `_LI()`!"
if log_translation_info.match(logical_line):
yield (0, msg)
msg = "N329: LOG.exception messages require translations `_LE()`!"
if log_translation_exception.match(logical_line):
yield (0, msg)
msg = "N330: LOG.warning, LOG.warn messages require translations `_LW()`!"
if log_translation_LW.match(logical_line):
yield (0, msg)
msg = "N321: Log messages require translations!"
if log_translation.match(logical_line):
yield (0, msg)
def no_mutable_default_args(logical_line):
msg = "N322: Method's default argument shouldn't be mutable!"
if mutable_default_args.match(logical_line):
yield (0, msg)
def check_explicit_underscore_import(logical_line, filename):
"""Check for explicit import of the _ function
We need to ensure that any files that are using the _() function
to translate logs are explicitly importing the _ function. We
can't trust unit test to catch whether the import has been
added so we need to check for it here.
"""
# Build a list of the files that have _ imported. No further
# checking needed once it is found.
if filename in UNDERSCORE_IMPORT_FILES:
pass
elif (underscore_import_check.match(logical_line) or
custom_underscore_check.match(logical_line)):
UNDERSCORE_IMPORT_FILES.append(filename)
elif (translated_log.match(logical_line) or
string_translation.match(logical_line)):
yield(0, "N323: Found use of _() without explicit import of _ !")
def use_jsonutils(logical_line, filename):
# the code below that path is not meant to be executed from neutron
# tree where jsonutils module is present, so don't enforce its usage
# for this subdirectory
if "plugins/xenserver" in filename:
return
# tools are OK to use the standard json module
if "/tools/" in filename:
return
msg = "N324: jsonutils.%(fun)s must be used instead of json.%(fun)s"
if "json." in logical_line:
json_funcs = ['dumps(', 'dump(', 'loads(', 'load(']
for f in json_funcs:
pos = logical_line.find('json.%s' % f)
if pos != -1:
yield (pos, msg % {'fun': f[:-1]})
def check_api_version_decorator(logical_line, previous_logical, blank_before,
filename):
msg = ("N332: the api_version decorator must be the first decorator"
" on a method.")
if blank_before == 0 and re.match(api_version_re, logical_line) \
and re.match(decorator_re, previous_logical):
yield(0, msg)
class CheckForStrUnicodeExc(BaseASTChecker):
"""Checks for the use of str() or unicode() on an exception.
This currently only handles the case where str() or unicode()
is used in the scope of an exception handler. If the exception
is passed into a function, returned from an assertRaises, or
used on an exception created in the same scope, this does not
catch it.
"""
CHECK_DESC = ('N325 str() and unicode() cannot be used on an '
'exception. Remove or use six.text_type()')
def __init__(self, tree, filename):
super(CheckForStrUnicodeExc, self).__init__(tree, filename)
self.name = []
self.already_checked = []
# Python 2 produces ast.TryExcept and ast.TryFinally nodes, but Python 3
# only produces ast.Try nodes.
if six.PY2:
def visit_TryExcept(self, node):
for handler in node.handlers:
if handler.name:
self.name.append(handler.name.id)
super(CheckForStrUnicodeExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrUnicodeExc, self).generic_visit(node)
else:
def visit_Try(self, node):
for handler in node.handlers:
if handler.name:
self.name.append(handler.name)
super(CheckForStrUnicodeExc, self).generic_visit(node)
self.name = self.name[:-1]
else:
super(CheckForStrUnicodeExc, self).generic_visit(node)
def visit_Call(self, node):
if self._check_call_names(node, ['str', 'unicode']):
if node not in self.already_checked:
self.already_checked.append(node)
if isinstance(node.args[0], ast.Name):
if node.args[0].id in self.name:
self.add_error(node.args[0])
super(CheckForStrUnicodeExc, self).generic_visit(node)
class CheckForTransAdd(BaseASTChecker):
"""Checks for the use of concatenation on a translated string.
Translations should not be concatenated with other strings, but
should instead include the string being added to the translated
string to give the translators the most information.
"""
CHECK_DESC = ('N326 Translated messages cannot be concatenated. '
'String should be included in translated message.')
TRANS_FUNC = ['_', '_LI', '_LW', '_LE', '_LC']
def visit_BinOp(self, node):
if isinstance(node.op, ast.Add):
if self._check_call_names(node.left, self.TRANS_FUNC):
self.add_error(node.left)
elif self._check_call_names(node.right, self.TRANS_FUNC):
self.add_error(node.right)
super(CheckForTransAdd, self).generic_visit(node)
class _FindVariableReferences(ast.NodeVisitor):
def __init__(self):
super(_FindVariableReferences, self).__init__()
self._references = []
def visit_Name(self, node):
if isinstance(node.ctx, ast.Load):
# This means the value of a variable was loaded. For example a
# variable 'foo' was used like:
# mocked_thing.bar = foo
# foo()
# self.assertRaises(excepion, foo)
self._references.append(node.id)
super(_FindVariableReferences, self).generic_visit(node)
class CheckForUncalledTestClosure(BaseASTChecker):
"""Look for closures that are never called in tests.
A recurring pattern when using multiple mocks is to create a closure
decorated with mocks like:
def test_thing(self):
@mock.patch.object(self.compute, 'foo')
@mock.patch.object(self.compute, 'bar')
def _do_test(mock_bar, mock_foo):
# Test things
_do_test()
However it is easy to leave off the _do_test() and have the test pass
because nothing runs. This check looks for methods defined within a test
method and ensures that there is a reference to them. Only methods defined
one level deep are checked. Something like:
def test_thing(self):
class FakeThing:
def foo(self):
would not ensure that foo is referenced.
N349
"""
def __init__(self, tree, filename):
super(CheckForUncalledTestClosure, self).__init__(tree, filename)
self._filename = filename
def visit_FunctionDef(self, node):
# self._filename is 'stdin' in the unit test for this check.
if (not os.path.basename(self._filename).startswith('test_') and
not 'stdin'):
return
closures = []
references = []
# Walk just the direct nodes of the test method
for child_node in ast.iter_child_nodes(node):
if isinstance(child_node, ast.FunctionDef):
closures.append(child_node.name)
# Walk all nodes to find references
find_references = _FindVariableReferences()
find_references.generic_visit(node)
references = find_references._references
missed = set(closures) - set(references)
if missed:
self.add_error(node, 'N349: Test closures not called: %s'
% ','.join(missed))
def assert_true_or_false_with_in(logical_line):
"""Check for assertTrue/False(A in B), assertTrue/False(A not in B),
assertTrue/False(A in B, message) or assertTrue/False(A not in B, message)
sentences.
N334
"""
res = (asse_true_false_with_in_or_not_in.search(logical_line) or
asse_true_false_with_in_or_not_in_spaces.search(logical_line))
if res:
yield (0, "N334: Use assertIn/NotIn(A, B) rather than "
"assertTrue/False(A in/not in B) when checking collection "
"contents.")
def assert_raises_regexp(logical_line):
"""Check for usage of deprecated assertRaisesRegexp
N335
"""
res = asse_raises_regexp.search(logical_line)
if res:
yield (0, "N335: assertRaisesRegex must be used instead "
"of assertRaisesRegexp")
def dict_constructor_with_list_copy(logical_line):
msg = ("N336: Must use a dict comprehension instead of a dict constructor"
" with a sequence of key-value pairs."
)
if dict_constructor_with_list_copy_re.match(logical_line):
yield (0, msg)
def assert_equal_in(logical_line):
"""Check for assertEqual(A in B, True), assertEqual(True, A in B),
assertEqual(A in B, False) or assertEqual(False, A in B) sentences
N338
"""
res = (asse_equal_in_start_with_true_or_false_re.search(logical_line) or
asse_equal_in_end_with_true_or_false_re.search(logical_line))
if res:
yield (0, "N338: Use assertIn/NotIn(A, B) rather than "
"assertEqual(A in B, True/False) when checking collection "
"contents.")
def check_http_not_implemented(logical_line, physical_line, filename):
msg = ("N339: HTTPNotImplemented response must be implemented with"
" common raise_feature_not_supported().")
if pep8.noqa(physical_line):
return
if ("nova/api/openstack/compute" not in filename):
return
if re.match(http_not_implemented_re, logical_line):
yield(0, msg)
def check_greenthread_spawns(logical_line, physical_line, filename):
"""Check for use of greenthread.spawn(), greenthread.spawn_n(),
eventlet.spawn(), and eventlet.spawn_n()
N340
"""
msg = ("N340: Use nova.utils.%(spawn)s() rather than "
"greenthread.%(spawn)s() and eventlet.%(spawn)s()")
if "nova/utils.py" in filename or "nova/tests/" in filename:
return
match = re.match(spawn_re, logical_line)
if match:
yield (0, msg % {'spawn': match.group('spawn_part')})
def check_no_contextlib_nested(logical_line, filename):
msg = ("N341: contextlib.nested is deprecated. With Python 2.7 and later "
"the with-statement supports multiple nested objects. See https://"
"docs.python.org/2/library/contextlib.html#contextlib.nested for "
"more information. nova.test.nested() is an alternative as well.")
if contextlib_nested.match(logical_line):
yield(0, msg)
def check_config_option_in_central_place(logical_line, filename):
msg = ("N342: Config options should be in the central location "
"'/nova/conf/*'. Do not declare new config options outside "
"of that folder.")
# That's the correct location
if "nova/conf/" in filename:
return
# TODO(markus_z) This is just temporary until all config options are
# moved to the central place. To avoid that a once cleaned up place
# introduces new config options, we do a check here. This array will
# get quite huge over the time, but will be removed at the end of the
# reorganization.
# You can add the full path to a module or folder. It's just a substring
# check, which makes it flexible enough.
cleaned_up = ["nova/console/serial.py",
"nova/cmd/serialproxy.py",
]
if not any(c in filename for c in cleaned_up):
return
if cfg_opt_re.match(logical_line):
yield(0, msg)
def check_policy_registration_in_central_place(logical_line, filename):
msg = ('N350: Policy registration should be in the central location '
'"/nova/policies/*".')
# This is where registration should happen
if "nova/policies/" in filename:
return
# A couple of policy tests register rules
if "nova/tests/unit/test_policy.py" in filename:
return
if rule_default_re.match(logical_line):
yield(0, msg)
def check_policy_enforce(logical_line, filename):
"""Look for uses of nova.policy._ENFORCER.enforce()
Now that policy defaults are registered in code the _ENFORCER.authorize
method should be used. That ensures that only registered policies are used.
Uses of _ENFORCER.enforce could allow unregistered policies to be used, so
this check looks for uses of that method.
N351
"""
msg = ('N351: nova.policy._ENFORCER.enforce() should not be used. '
'Use the authorize() method instead.')
if policy_enforce_re.match(logical_line):
yield(0, msg)
def check_doubled_words(physical_line, filename):
"""Check for the common doubled-word typos
N343
"""
msg = ("N343: Doubled word '%(word)s' typo found")
match = re.search(doubled_words_re, physical_line)
if match:
return (0, msg % {'word': match.group(1)})
def check_python3_no_iteritems(logical_line):
msg = ("N344: Use six.iteritems() instead of dict.iteritems().")
if re.search(r".*\.iteritems\(\)", logical_line):
yield(0, msg)
def check_python3_no_iterkeys(logical_line):
msg = ("N345: Use six.iterkeys() instead of dict.iterkeys().")
if re.search(r".*\.iterkeys\(\)", logical_line):
yield(0, msg)
def check_python3_no_itervalues(logical_line):
msg = ("N346: Use six.itervalues() instead of dict.itervalues().")
if re.search(r".*\.itervalues\(\)", logical_line):
yield(0, msg)
def no_os_popen(logical_line):
"""Disallow 'os.popen('
Deprecated library function os.popen() Replace it using subprocess
https://bugs.launchpad.net/tempest/+bug/1529836
N348
"""
if 'os.popen(' in logical_line:
yield(0, 'N348 Deprecated library function os.popen(). '
'Replace it using subprocess module. ')
def no_log_warn(logical_line):
"""Disallow 'LOG.warn('
Deprecated LOG.warn(), instead use LOG.warning
https://bugs.launchpad.net/senlin/+bug/1508442
N352
"""
msg = ("N352: LOG.warn is deprecated, please use LOG.warning!")
if "LOG.warn(" in logical_line:
yield (0, msg)
def check_context_log(logical_line, physical_line, filename):
"""check whether context is being passed to the logs
Not correct: LOG.info(_LI("Rebooting instance"), context=context)
Correct: LOG.info(_LI("Rebooting instance"))
https://bugs.launchpad.net/nova/+bug/1500896
N353
"""
if "nova/tests" in filename:
return
if pep8.noqa(physical_line):
return
if log_remove_context.match(logical_line):
yield(0,
"N353: Nova is using oslo.context's RequestContext "
"which means the context object is in scope when "
"doing logging using oslo.log, so no need to pass it as"
"kwarg.")
def factory(register):
register(import_no_db_in_virt)
register(no_db_session_in_public_api)
register(use_timeutils_utcnow)
register(import_no_virt_driver_import_deps)
register(import_no_virt_driver_config_deps)
register(capital_cfg_help)
register(no_vi_headers)
register(no_import_translation_in_tests)
register(assert_true_instance)
register(assert_equal_type)
register(assert_equal_none)
register(assert_raises_regexp)
register(no_translate_debug_logs)
register(no_setting_conf_directly_in_tests)
register(validate_log_translations)
register(no_mutable_default_args)
register(check_explicit_underscore_import)
register(use_jsonutils)
register(check_api_version_decorator)
register(CheckForStrUnicodeExc)
register(CheckForTransAdd)
register(assert_true_or_false_with_in)
register(dict_constructor_with_list_copy)
register(assert_equal_in)
register(check_http_not_implemented)
register(check_no_contextlib_nested)
register(check_greenthread_spawns)
register(check_config_option_in_central_place)
register(check_policy_registration_in_central_place)
register(check_policy_enforce)
register(check_doubled_words)
register(check_python3_no_iteritems)
register(check_python3_no_iterkeys)
register(check_python3_no_itervalues)
register(check_python3_xrange)
register(no_os_popen)
register(no_log_warn)
register(CheckForUncalledTestClosure)
register(check_context_log)
|
|
# -*- coding: utf-8 -*-
import re
import sys
import unittest
from flask import Flask
from flask.ext.script._compat import StringIO, text_type
from flask.ext.script import Command, Manager, Option, prompt, prompt_bool, prompt_choices
from pytest import raises
class Catcher(object):
"""Helper decorator to test raw_input."""
## see: http://stackoverflow.com/questions/13480632/python-stringio-selectively-place-data-into-stdin
def __init__(self, handler):
self.handler = handler
self.inputs = []
def __enter__(self):
self.__stdin = sys.stdin
self.__stdout = sys.stdout
sys.stdin = self
sys.stdout = self
def __exit__(self, type, value, traceback):
sys.stdin = self.__stdin
sys.stdout = self.__stdout
def write(self, value):
self.__stdout.write(value)
result = self.handler(value)
if result is not None:
self.inputs.append(result)
def readline(self):
return self.inputs.pop()
def getvalue(self):
return self.__stdout.getvalue()
def truncate(self, pos):
return self.__stdout.truncate(pos)
def run(command_line, manager_run):
'''
Runs a manager command line, returns exit code
'''
sys.argv = command_line.split()
exit_code = None
try:
manager_run()
except SystemExit as e:
exit_code = e.code
return exit_code
class SimpleCommand(Command):
'simple command'
def run(self):
print('OK')
class NamedCommand(Command):
'named command'
def run(self):
print('OK')
class ExplicitNamedCommand(Command):
'named command'
name = 'named'
def run(self):
print('OK')
class NamespacedCommand(Command):
'namespaced command'
namespace = 'ns'
def run(self):
print('OK')
class CommandWithArgs(Command):
'command with args'
option_list = (
Option('name'),
)
def run(self, name):
print(name)
class CommandWithOptionalArg(Command):
'command with optional arg'
option_list = (
Option('-n','--name', required=False),
)
def run(self, name="NotGiven"):
print("OK name="+str(name))
class CommandWithOptions(Command):
'command with options'
option_list = (
Option('-n', '--name',
help='name to pass in',
dest='name'),
)
def run(self, name):
print(name)
class CommandWithDynamicOptions(Command):
'command with options'
def __init__(self, default_name='Joe'):
self.default_name = default_name
def get_options(self):
return (
Option('-n', '--name',
help='name to pass in',
dest='name',
default=self.default_name),
)
def run(self, name):
print(name)
class CommandWithCatchAll(Command):
'command with catch all args'
capture_all_args = True
def get_options(self):
return (Option('--foo', dest='foo',
action='store_true'),)
def run(self, remaining_args, foo):
print(remaining_args)
class EmptyContext(object):
def __enter__(self):
pass
def __exit__(self, a,b,c):
pass
class TestApp(object):
def __init__(self, verbose=False):
self.verbose = verbose
def test_request_context(self):
return EmptyContext()
def __call__(self,**kw):
if self.verbose:
print("APP "+" ".join("%s=%s" % (k,v) for k,v in kw.items()))
return self
class TestManager:
def setup(self):
self.app = TestApp()
def test_with_default_commands(self):
manager = Manager(self.app)
manager.set_defaults()
assert 'runserver' in manager._commands
assert 'shell' in manager._commands
def test_without_default_commands(self):
manager = Manager(self.app, with_default_commands=False)
manager.set_defaults()
assert 'runserver' not in manager._commands
assert 'shell' not in manager._commands
def test_add_command(self):
manager = Manager(self.app)
manager.add_command('simple', SimpleCommand())
assert isinstance(manager._commands['simple'], SimpleCommand)
def test_add_named_command(self):
manager = Manager(self.app)
manager.add_command(NamedCommand())
assert 'named' in manager._commands
assert isinstance(manager._commands['named'], NamedCommand)
def test_add_explicit_named_command(self):
manager = Manager(self.app)
manager.add_command(ExplicitNamedCommand())
name = ExplicitNamedCommand.name
assert name in manager._commands
assert isinstance(manager._commands[name], ExplicitNamedCommand)
def test_add_namespaced_command(self):
manager = Manager(self.app)
manager.add_command('one', NamespacedCommand())
manager.add_command('two', NamespacedCommand())
assert 'ns' in manager._commands
assert isinstance(manager._commands['ns'], Manager)
ns = manager._commands['ns']
assert isinstance(ns._commands['one'], NamespacedCommand)
assert isinstance(ns._commands['two'], NamespacedCommand)
def test_add_namespaced_simple_command(self):
manager = Manager(self.app)
manager.add_command('hello', SimpleCommand(), namespace='ns')
manager.add_command('world', SimpleCommand(), namespace='ns')
assert 'ns' in manager._commands
assert isinstance(manager._commands['ns'], Manager)
ns = manager._commands['ns']
assert isinstance(ns._commands['hello'], SimpleCommand)
assert isinstance(ns._commands['world'], SimpleCommand)
def test_add_command_class(self):
manager = Manager(self.app)
manager.add_command('simple', SimpleCommand)
assert isinstance(manager._commands['simple'], SimpleCommand)
def test_simple_command_decorator(self, capsys):
manager = Manager(self.app)
@manager.command
def hello():
print('hello')
assert 'hello' in manager._commands
code = run('manage.py hello', manager.run)
out, err = capsys.readouterr()
assert 'hello' in out
def test_simple_command_decorator_with_pos_arg(self, capsys):
manager = Manager(self.app)
@manager.command
def hello(name):
print('hello ' + name)
assert 'hello' in manager._commands
code = run('manage.py hello joe', manager.run)
out, err = capsys.readouterr()
assert 'hello joe' in out
def test_method_command_decorator_with_pos_arg(self, capsys):
manager = Manager(self.app)
class SomeTest(object):
def hello(self,name):
print('hello ' + name)
sometest = SomeTest()
manager.command(sometest.hello)
assert 'hello' in manager._commands
code = run('manage.py hello joe', lambda: manager.run())
out, err = capsys.readouterr()
assert 'hello joe' in out
def test_command_decorator_with_options(self, capsys):
manager = Manager(self.app)
@manager.command
def hello(name='fred'):
'Prints your name'
print('hello ' + name)
assert 'hello' in manager._commands
code = run('manage.py hello --name=joe', manager.run)
out, err = capsys.readouterr()
assert 'hello joe' in out
code = run('manage.py hello -n joe', manager.run)
out, err = capsys.readouterr()
assert 'hello joe' in out
code = run('manage.py hello -?', manager.run)
out, err = capsys.readouterr()
assert 'Prints your name' in out
code = run('manage.py hello --help', manager.run)
out, err = capsys.readouterr()
assert 'Prints your name' in out
def test_no_help(self, capsys):
"""
Tests that erasing --help really works.
"""
manager = Manager(self.app)
manager.help_args = ()
@manager.command
def hello(name='fred'):
'Prints your name'
print('hello ' + name)
assert 'hello' in manager._commands
code = run('manage.py --help hello', manager.run)
out, err = capsys.readouterr()
print(out)
assert 'too many arguments' in err
code = run('manage.py hello --help', manager.run)
out, err = capsys.readouterr()
print(out)
assert 'too many arguments' in err
def test_command_decorator_with_boolean_options(self, capsys):
manager = Manager(self.app)
@manager.command
def verify(verified=False):
'Checks if verified'
print('VERIFIED ? ' + 'YES' if verified else 'NO')
assert 'verify' in manager._commands
code = run('manage.py verify --verified', manager.run)
out, err = capsys.readouterr()
assert 'YES' in out
code = run('manage.py verify -v', manager.run)
out, err = capsys.readouterr()
assert 'YES' in out
code = run('manage.py verify', manager.run)
out, err = capsys.readouterr()
assert 'NO' in out
code = run('manage.py verify -?', manager.run)
out, err = capsys.readouterr()
assert 'Checks if verified' in out
def test_simple_command_decorator_with_pos_arg_and_options(self, capsys):
manager = Manager(self.app)
@manager.command
def hello(name, url=None):
if url:
assert type(url) is text_type
print('hello ' + name + ' from ' + url)
else:
assert type(name) is text_type
print('hello ' + name)
assert 'hello' in manager._commands
code = run('manage.py hello joe', manager.run)
out, err = capsys.readouterr()
assert 'hello joe' in out
code = run('manage.py hello joe --url=reddit.com', manager.run)
out, err = capsys.readouterr()
assert 'hello joe from reddit.com' in out
def test_command_decorator_with_additional_options(self, capsys):
manager = Manager(self.app)
@manager.option('-n', '--name', dest='name', help='Your name')
def hello(name):
print('hello ' + name)
assert 'hello' in manager._commands
code = run('manage.py hello --name=joe', manager.run)
out, err = capsys.readouterr()
assert 'hello joe' in out
code = run('manage.py hello -?', manager.run)
out, err = capsys.readouterr()
assert 'Your name' in out
@manager.option('-n', '--name', dest='name', help='Your name')
@manager.option('-u', '--url', dest='url', help='Your URL')
def hello_again(name, url=None):
if url:
print('hello ' + name + ' from ' + url)
else:
print('hello ' + name)
assert 'hello_again' in manager._commands
code = run('manage.py hello_again --name=joe', manager.run)
out, err = capsys.readouterr()
assert 'hello joe' in out
code = run('manage.py hello_again --name=joe --url=reddit.com', manager.run)
out, err = capsys.readouterr()
assert 'hello joe from reddit.com' in out
def test_global_option_provided_before_and_after_command(self, capsys):
manager = Manager(self.app)
manager.add_option('-c', '--config', dest='config_name', required=False, default='Development')
manager.add_command('simple', SimpleCommand())
assert isinstance(manager._commands['simple'], SimpleCommand)
code = run('manage.py -c Development simple', manager.run)
out, err = capsys.readouterr()
assert code == 0
assert 'OK' in out
code = run('manage.py simple -c Development', manager.run)
out, err = capsys.readouterr()
assert code == 2
assert 'OK' not in out
def test_global_option_value(self, capsys):
def create_app(config_name='Empty'):
print(config_name)
return self.app
manager = Manager(create_app)
manager.add_option('-c', '--config', dest='config_name', required=False, default='Development')
manager.add_command('simple', SimpleCommand())
assert isinstance(manager._commands['simple'], SimpleCommand)
code = run('manage.py simple', manager.run)
out, err = capsys.readouterr()
assert code == 0
assert 'Empty' not in out # config_name is overwritten by default option value
assert 'Development' in out
assert 'OK' in out
def test_get_usage(self):
manager = Manager(self.app)
manager.add_command('simple', SimpleCommand())
usage = manager.create_parser('manage.py').format_help()
assert 'simple command' in usage
def test_get_usage_with_specified_usage(self):
manager = Manager(self.app, usage='hello')
manager.add_command('simple', SimpleCommand())
usage = manager.create_parser('manage.py').format_help()
assert 'simple command' in usage
assert 'hello' in usage
def test_run_existing_command(self, capsys):
manager = Manager(self.app)
manager.add_command('simple', SimpleCommand())
code = run('manage.py simple', manager.run)
out, err = capsys.readouterr()
assert 'OK' in out
def test_run_non_existant_command(self, capsys):
manager = Manager(self.app)
run('manage.py simple', manager.run)
out, err = capsys.readouterr()
assert 'invalid choice' in err
def test_run_existing(self, capsys):
manager = Manager(self.app)
manager.add_command('simple', SimpleCommand())
code = run('manage.py simple', manager.run)
out, err = capsys.readouterr()
assert 0 == code
assert 'OK' in out
def test_run_existing_bind_later(self, capsys):
manager = Manager(self.app)
code = run('manage.py simple', lambda: manager.run({'simple': SimpleCommand()}))
out, err = capsys.readouterr()
assert code == 0
assert 'OK' in out
def test_run_not_existing(self, capsys):
manager = Manager(self.app)
code = run('manage.py simple', manager.run)
out, err = capsys.readouterr()
assert code == 2
assert 'OK' not in out
def test_run_no_name(self, capsys):
manager = Manager(self.app)
manager.add_command('simple', SimpleCommand())
code = run('manage.py', manager.run)
out, err = capsys.readouterr()
assert code == 2
assert 'simple command' in out
def test_run_good_options(self, capsys):
manager = Manager(self.app)
manager.add_command('simple', CommandWithOptions())
code = run('manage.py simple --name=Joe', manager.run)
out, err = capsys.readouterr()
assert code == 0
assert 'Joe' in out
def test_run_dynamic_options(self, capsys):
manager = Manager(self.app)
manager.add_command('simple', CommandWithDynamicOptions('Fred'))
code = run('manage.py simple', manager.run)
out, err = capsys.readouterr()
assert code == 0
assert 'Fred' in out
def test_run_catch_all(self, capsys):
manager = Manager(self.app)
manager.add_command('catch', CommandWithCatchAll())
code = run('manage.py catch pos1 --foo pos2 --bar', manager.run)
out, err = capsys.readouterr()
out_list = [o.strip('u\'') for o in out.strip('[]\n').split(', ')]
assert code == 0
assert ['pos1', 'pos2', '--bar'] == out_list
def test_run_bad_options(self, capsys):
manager = Manager(self.app)
manager.add_command('simple', CommandWithOptions())
code = run('manage.py simple --foo=bar', manager.run)
assert code == 2
def test_init_with_flask_instance(self):
manager = Manager(self.app)
assert callable(manager.app)
def test_init_with_callable(self):
manager = Manager(lambda: self.app)
assert callable(manager.app)
def test_raise_index_error(self):
manager = Manager(self.app)
@manager.command
def error():
raise IndexError()
with raises(IndexError):
run('manage.py error', manager.run)
def test_run_with_default_command(self, capsys):
manager = Manager(self.app)
manager.add_command('simple', SimpleCommand())
code = run('manage.py', lambda: manager.run(default_command='simple'))
out, err = capsys.readouterr()
assert code == 0
assert 'OK' in out
def test_command_with_prompt(self, capsys):
manager = Manager(self.app)
@manager.command
def hello():
print(prompt(name='hello'))
@Catcher
def hello_john(msg):
if re.search("hello", msg):
return 'john'
with hello_john:
code = run('manage.py hello', manager.run)
out, err = capsys.readouterr()
assert 'hello: john' in out
def test_command_with_default_prompt(self, capsys):
manager = Manager(self.app)
@manager.command
def hello():
print(prompt(name='hello', default='romeo'))
@Catcher
def hello(msg):
if re.search("hello", msg):
return '\n' # just hit enter
with hello:
code = run('manage.py hello', manager.run)
out, err = capsys.readouterr()
assert 'hello [romeo]: romeo' in out
@Catcher
def hello_juliette(msg):
if re.search("hello", msg):
return 'juliette'
with hello_juliette:
code = run('manage.py hello', manager.run)
out, err = capsys.readouterr()
assert 'hello [romeo]: juliette' in out
def test_command_with_prompt_bool(self, capsys):
manager = Manager(self.app)
@manager.command
def hello():
print(prompt_bool(name='correct', default=True, yes_choices=['y'],
no_choices=['n']) and 'yes' or 'no')
@Catcher
def correct_default(msg):
if re.search("correct", msg):
return '\n' # just hit enter
@Catcher
def correct_y(msg):
if re.search("correct", msg):
return 'y'
@Catcher
def correct_n(msg):
if re.search("correct", msg):
return 'n'
with correct_default:
code = run('manage.py hello', manager.run)
out, err = capsys.readouterr()
assert 'correct [y]: yes' in out
with correct_y:
code = run('manage.py hello', manager.run)
out, err = capsys.readouterr()
assert 'correct [y]: yes' in out
with correct_n:
code = run('manage.py hello', manager.run)
out, err = capsys.readouterr()
assert 'correct [y]: no' in out
def test_command_with_prompt_choices(self, capsys):
manager = Manager(self.app)
@manager.command
def hello():
print(prompt_choices(name='hello', choices=['peter', 'john', 'sam']))
@Catcher
def hello_john(msg):
if re.search("hello", msg):
return 'john'
with hello_john:
code = run('manage.py hello', manager.run)
out, err = capsys.readouterr()
assert 'hello - (peter, john, sam): john' in out
def test_command_with_default_prompt_choices(self, capsys):
manager = Manager(self.app)
@manager.command
def hello():
print(prompt_choices(name='hello', choices=['peter', 'charlie', 'sam'], default="john"))
@Catcher
def hello_john(msg):
if re.search("hello", msg):
return '\n'
with hello_john:
code = run('manage.py hello', manager.run)
out, err = capsys.readouterr()
assert 'hello - (peter, charlie, sam) [john]: john' in out
@Catcher
def hello_charlie(msg):
if re.search("hello", msg):
return 'charlie'
with hello_charlie:
code = run('manage.py hello', manager.run)
out, err = capsys.readouterr()
assert 'hello - (peter, charlie, sam) [john]: charlie' in out
class TestSubManager:
def setup(self):
self.app = TestApp()
def test_add_submanager(self):
sub_manager = Manager()
manager = Manager(self.app)
manager.add_command('sub_manager', sub_manager)
assert isinstance(manager._commands['sub_manager'], Manager)
assert sub_manager.parent == manager
assert sub_manager.get_options() == manager.get_options()
def test_run_submanager_command(self, capsys):
sub_manager = Manager()
sub_manager.add_command('simple', SimpleCommand())
manager = Manager(self.app)
manager.add_command('sub_manager', sub_manager)
code = run('manage.py sub_manager simple', manager.run)
out, err = capsys.readouterr()
assert code == 0
assert 'OK' in out
def test_submanager_has_options(self, capsys):
sub_manager = Manager()
sub_manager.add_command('simple', SimpleCommand())
manager = Manager(self.app)
manager.add_command('sub_manager', sub_manager)
manager.add_option('-c', '--config', dest='config', required=False)
code = run('manage.py sub_manager simple', manager.run)
out, err = capsys.readouterr()
assert code == 0
assert 'OK' in out
code = run('manage.py -c Development sub_manager simple', manager.run)
out, err = capsys.readouterr()
assert code == 0
assert 'OK' in out
def test_submanager_separate_options(self, capsys):
sub_manager = Manager(TestApp(verbose=True), with_default_commands=False)
sub_manager.add_command('opt', CommandWithOptionalArg())
sub_manager.add_option('-n', '--name', dest='name_sub', required=False)
manager = Manager(TestApp(verbose=True), with_default_commands=False)
manager.add_command('sub_manager', sub_manager)
manager.add_option('-n', '--name', dest='name_main', required=False)
code = run('manage.py -n MyMainName sub_manager -n MySubName opt -n MyName', manager.run)
out, err = capsys.readouterr()
assert code == 0
assert 'APP name_main=MyMainName' in out
assert 'APP name_sub=MySubName' in out
assert 'OK name=MyName' in out
def test_manager_usage_with_submanager(self, capsys):
sub_manager = Manager(usage='Example sub-manager')
manager = Manager(self.app)
manager.add_command('sub_manager', sub_manager)
code = run('manage.py -?', manager.run)
out, err = capsys.readouterr()
assert code == 0
assert 'Example sub-manager' in out
def test_submanager_usage_and_help_and_description(self, capsys):
sub_manager = Manager(usage='sub_manager [--foo]',
help='shorter desc for submanager',
description='longer desc for submanager')
sub_manager.add_command('simple', SimpleCommand())
manager = Manager(self.app)
manager.add_command('sub_manager', sub_manager)
code = run('manage.py -?', manager.run)
out, err = capsys.readouterr()
assert code == 0
assert 'sub_manager [--foo]' not in out
assert 'shorter desc for submanager' in out
assert 'longer desc for submanager' not in out
code = run('manage.py sub_manager', manager.run)
out, err = capsys.readouterr()
assert code == 2
assert 'sub_manager [--foo]' in out
assert 'shorter desc for submanager' not in out
assert 'longer desc for submanager' in out
assert 'simple command' in out
code = run('manage.py sub_manager -?', manager.run)
out, err = capsys.readouterr()
assert code == 0
assert 'sub_manager [--foo]' in out
assert 'shorter desc for submanager' not in out
assert 'longer desc for submanager' in out
assert 'simple command' in out
code = run('manage.py sub_manager simple -?', manager.run)
out, err = capsys.readouterr()
assert code == 0
assert 'sub_manager [--foo] simple [-?]' in out
assert 'simple command' in out
def test_submanager_has_no_default_commands(self):
sub_manager = Manager()
manager = Manager()
manager.add_command('sub_manager', sub_manager)
manager.set_defaults()
assert 'runserver' not in sub_manager._commands
assert 'shell' not in sub_manager._commands
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Mix.mix_image'
db.alter_column(u'spa_mix', 'mix_image', self.gf('django.db.models.fields.files.ImageField')(max_length=1024))
# Changing field 'UserProfile.avatar_image'
db.alter_column(u'spa_userprofile', 'avatar_image', self.gf('django.db.models.fields.files.ImageField')(max_length=1024))
def backwards(self, orm):
# Changing field 'Mix.mix_image'
db.alter_column(u'spa_mix', 'mix_image', self.gf('django.db.models.fields.files.ImageField')(max_length=100))
# Changing field 'UserProfile.avatar_image'
db.alter_column(u'spa_userprofile', 'avatar_image', self.gf('django.db.models.fields.files.ImageField')(max_length=100))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'spa._lookup': {
'Meta': {'object_name': '_Lookup'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'spa.activity': {
'Meta': {'object_name': 'Activity'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.UserProfile']", 'null': 'True', 'blank': 'True'})
},
'spa.activitydownload': {
'Meta': {'object_name': 'ActivityDownload', '_ormbases': ['spa.Activity']},
u'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activity_downloads'", 'to': "orm['spa.Mix']"})
},
'spa.activityfavourite': {
'Meta': {'object_name': 'ActivityFavourite', '_ormbases': ['spa.Activity']},
u'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activity_favourites'", 'to': "orm['spa.Mix']"})
},
'spa.activityfollow': {
'Meta': {'object_name': 'ActivityFollow', '_ormbases': ['spa.Activity']},
u'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'to_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activity_follow'", 'to': "orm['spa.UserProfile']"})
},
'spa.activitylike': {
'Meta': {'object_name': 'ActivityLike', '_ormbases': ['spa.Activity']},
u'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activity_likes'", 'to': "orm['spa.Mix']"})
},
'spa.activityplay': {
'Meta': {'object_name': 'ActivityPlay', '_ormbases': ['spa.Activity']},
u'activity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa.Activity']", 'unique': 'True', 'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activity_plays'", 'to': "orm['spa.Mix']"})
},
'spa.chatmessage': {
'Meta': {'object_name': 'ChatMessage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'chat_messages'", 'null': 'True', 'to': "orm['spa.UserProfile']"})
},
'spa.comment': {
'Meta': {'object_name': 'Comment'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['spa.Mix']"}),
'time_index': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
'spa.event': {
'Meta': {'object_name': 'Event'},
'attendees': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'attendees'", 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'date_created': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 8, 20, 0, 0)'}),
'event_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 8, 20, 0, 0)'}),
'event_description': ('tinymce.views.HTMLField', [], {}),
'event_recurrence': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.Recurrence']"}),
'event_time': ('django.db.models.fields.TimeField', [], {'default': 'datetime.datetime(2013, 8, 20, 0, 0)'}),
'event_title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'event_venue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.Venue']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'spa.genre': {
'Meta': {'object_name': 'Genre'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'spa.label': {
'Meta': {'object_name': 'Label'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'spa.mix': {
'Meta': {'object_name': 'Mix'},
'description': ('django.db.models.fields.TextField', [], {}),
'download_allowed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'duration': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'favourites': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'favourites'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['spa.UserProfile']"}),
'genres': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['spa.Genre']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'likes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['spa.UserProfile']"}),
'local_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'mix_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '1024', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'uid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '38', 'blank': 'True'}),
'upload_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 8, 20, 0, 0)'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mixes'", 'to': "orm['spa.UserProfile']"}),
'waveform_generated': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'spa.notification': {
'Meta': {'object_name': 'Notification'},
'accepted_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'from_user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'notifications'", 'null': 'True', 'to': "orm['spa.UserProfile']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notification_text': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'notification_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'to_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'to_notications'", 'to': "orm['spa.UserProfile']"}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'})
},
'spa.purchaselink': {
'Meta': {'object_name': 'PurchaseLink'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'track': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'purchase_link'", 'to': "orm['spa.Tracklist']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'spa.recurrence': {
'Meta': {'object_name': 'Recurrence', '_ormbases': ['spa._Lookup']},
u'_lookup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['spa._Lookup']", 'unique': 'True', 'primary_key': 'True'})
},
'spa.release': {
'Meta': {'object_name': 'Release'},
'embed_code': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'release_artist': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'release_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 8, 20, 0, 0)'}),
'release_description': ('django.db.models.fields.TextField', [], {}),
'release_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'release_label': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.Label']"}),
'release_title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['spa.UserProfile']"})
},
'spa.releaseaudio': {
'Meta': {'object_name': 'ReleaseAudio'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'local_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'release_audio'", 'null': 'True', 'to': "orm['spa.Release']"})
},
'spa.tracklist': {
'Meta': {'object_name': 'Tracklist'},
'artist': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.SmallIntegerField', [], {}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tracklist'", 'to': "orm['spa.Mix']"}),
'remixer': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timeindex': ('django.db.models.fields.TimeField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'spa.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'activity_sharing': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'activity_sharing_networks': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'avatar_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '1024', 'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'social'", 'max_length': '15'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '35', 'blank': 'True'}),
'following': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'followers'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['spa.UserProfile']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'default': 'None', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'userprofile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
'spa.venue': {
'Meta': {'object_name': 'Venue'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'venue_address': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'venue_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'venue_name': ('django.db.models.fields.CharField', [], {'max_length': '250'})
}
}
complete_apps = ['spa']
|
|
"""Support for Tado to create a climate device for each zone."""
import logging
from typing import List, Optional
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.climate.const import (
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
FAN_HIGH,
FAN_LOW,
FAN_MIDDLE,
FAN_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_HOME,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, PRECISION_TENTHS, TEMP_CELSIUS
from homeassistant.util.temperature import convert as convert_temperature
from . import DATA_TADO
_LOGGER = logging.getLogger(__name__)
CONST_MODE_SMART_SCHEDULE = "SMART_SCHEDULE" # Default mytado mode
CONST_MODE_OFF = "OFF" # Switch off heating in a zone
# When we change the temperature setting, we need an overlay mode
# wait until tado changes the mode automatic
CONST_OVERLAY_TADO_MODE = "TADO_MODE"
# the user has change the temperature or mode manually
CONST_OVERLAY_MANUAL = "MANUAL"
# the temperature will be reset after a timespan
CONST_OVERLAY_TIMER = "TIMER"
CONST_MODE_FAN_HIGH = "HIGH"
CONST_MODE_FAN_MIDDLE = "MIDDLE"
CONST_MODE_FAN_LOW = "LOW"
FAN_MAP_TADO = {"HIGH": FAN_HIGH, "MIDDLE": FAN_MIDDLE, "LOW": FAN_LOW}
HVAC_MAP_TADO_HEAT = {
"MANUAL": HVAC_MODE_HEAT,
"TIMER": HVAC_MODE_HEAT,
"TADO_MODE": HVAC_MODE_HEAT,
"SMART_SCHEDULE": HVAC_MODE_AUTO,
"OFF": HVAC_MODE_OFF,
}
HVAC_MAP_TADO_COOL = {
"MANUAL": HVAC_MODE_COOL,
"TIMER": HVAC_MODE_COOL,
"TADO_MODE": HVAC_MODE_COOL,
"SMART_SCHEDULE": HVAC_MODE_AUTO,
"OFF": HVAC_MODE_OFF,
}
HVAC_MAP_TADO_HEAT_COOL = {
"MANUAL": HVAC_MODE_HEAT_COOL,
"TIMER": HVAC_MODE_HEAT_COOL,
"TADO_MODE": HVAC_MODE_HEAT_COOL,
"SMART_SCHEDULE": HVAC_MODE_AUTO,
"OFF": HVAC_MODE_OFF,
}
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
SUPPORT_HVAC_HEAT = [HVAC_MODE_HEAT, HVAC_MODE_AUTO, HVAC_MODE_OFF]
SUPPORT_HVAC_COOL = [HVAC_MODE_COOL, HVAC_MODE_AUTO, HVAC_MODE_OFF]
SUPPORT_HVAC_HEAT_COOL = [HVAC_MODE_HEAT_COOL, HVAC_MODE_AUTO, HVAC_MODE_OFF]
SUPPORT_FAN = [FAN_HIGH, FAN_MIDDLE, FAN_LOW, FAN_OFF]
SUPPORT_PRESET = [PRESET_AWAY, PRESET_HOME]
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Tado climate platform."""
tado = hass.data[DATA_TADO]
try:
zones = tado.get_zones()
except RuntimeError:
_LOGGER.error("Unable to get zone info from mytado")
return
climate_devices = []
for zone in zones:
device = create_climate_device(tado, hass, zone, zone["name"], zone["id"])
if not device:
continue
climate_devices.append(device)
if climate_devices:
add_entities(climate_devices, True)
def create_climate_device(tado, hass, zone, name, zone_id):
"""Create a Tado climate device."""
capabilities = tado.get_capabilities(zone_id)
unit = TEMP_CELSIUS
ac_device = capabilities["type"] == "AIR_CONDITIONING"
ac_support_heat = False
if ac_device:
# Only use heat if available
# (you don't have to setup a heat mode, but cool is required)
# Heat is preferred as it generally has a lower minimum temperature
if "HEAT" in capabilities:
temperatures = capabilities["HEAT"]["temperatures"]
ac_support_heat = True
else:
temperatures = capabilities["COOL"]["temperatures"]
elif "temperatures" in capabilities:
temperatures = capabilities["temperatures"]
else:
_LOGGER.debug("Received zone %s has no temperature; not adding", name)
return
min_temp = float(temperatures["celsius"]["min"])
max_temp = float(temperatures["celsius"]["max"])
step = temperatures["celsius"].get("step", PRECISION_TENTHS)
data_id = f"zone {name} {zone_id}"
device = TadoClimate(
tado,
name,
zone_id,
data_id,
hass.config.units.temperature(min_temp, unit),
hass.config.units.temperature(max_temp, unit),
step,
ac_device,
ac_support_heat,
)
tado.add_sensor(
data_id, {"id": zone_id, "zone": zone, "name": name, "climate": device}
)
return device
class TadoClimate(ClimateDevice):
"""Representation of a Tado climate device."""
def __init__(
self,
store,
zone_name,
zone_id,
data_id,
min_temp,
max_temp,
step,
ac_device,
ac_support_heat,
tolerance=0.3,
):
"""Initialize of Tado climate device."""
self._store = store
self._data_id = data_id
self.zone_name = zone_name
self.zone_id = zone_id
self._ac_device = ac_device
self._ac_support_heat = ac_support_heat
self._cooling = False
self._active = False
self._device_is_active = False
self._unit = TEMP_CELSIUS
self._cur_temp = None
self._cur_humidity = None
self._is_away = False
self._min_temp = min_temp
self._max_temp = max_temp
self._step = step
self._target_temp = None
self._tolerance = tolerance
self._current_fan = CONST_MODE_OFF
self._current_operation = CONST_MODE_SMART_SCHEDULE
self._overlay_mode = CONST_MODE_SMART_SCHEDULE
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def name(self):
"""Return the name of the device."""
return self.zone_name
@property
def current_humidity(self):
"""Return the current humidity."""
return self._cur_humidity
def set_humidity(self, humidity: int) -> None:
"""Set new target humidity."""
pass
@property
def current_temperature(self):
"""Return the sensor temperature."""
return self._cur_temp
@property
def hvac_mode(self):
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
if self._ac_device and self._ac_support_heat:
return HVAC_MAP_TADO_HEAT_COOL.get(self._current_operation)
if self._ac_device and not self._ac_support_heat:
return HVAC_MAP_TADO_COOL.get(self._current_operation)
return HVAC_MAP_TADO_HEAT.get(self._current_operation)
@property
def hvac_modes(self):
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
if self._ac_device and self._ac_support_heat:
return SUPPORT_HVAC_HEAT_COOL
if self._ac_device and not self._ac_support_heat:
return SUPPORT_HVAC_COOL
return SUPPORT_HVAC_HEAT
@property
def hvac_action(self):
"""Return the current running hvac operation if supported.
Need to be one of CURRENT_HVAC_*.
"""
if not self._device_is_active:
return CURRENT_HVAC_OFF
if self._ac_device and self._ac_support_heat and self._cooling:
if self._active:
return CURRENT_HVAC_COOL
return CURRENT_HVAC_IDLE
if self._ac_device and self._ac_support_heat and not self._cooling:
if self._active:
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_IDLE
if self._ac_device and not self._ac_support_heat:
if self._active:
return CURRENT_HVAC_COOL
return CURRENT_HVAC_IDLE
if self._active:
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_IDLE
@property
def fan_mode(self):
"""Return the fan setting."""
if self._ac_device:
return FAN_MAP_TADO.get(self._current_fan)
return None
@property
def fan_modes(self):
"""List of available fan modes."""
if self._ac_device:
return SUPPORT_FAN
return None
def set_fan_mode(self, fan_mode: str):
"""Turn fan on/off."""
pass
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp."""
if self._is_away:
return PRESET_AWAY
return PRESET_HOME
@property
def preset_modes(self):
"""Return a list of available preset modes."""
return SUPPORT_PRESET
def set_preset_mode(self, preset_mode):
"""Set new preset mode."""
pass
@property
def temperature_unit(self):
"""Return the unit of measurement used by the platform."""
return self._unit
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self._step
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temp
@property
def target_temperature_high(self):
"""Return the upper bound temperature we try to reach."""
return None
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
return None
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self._current_operation = CONST_OVERLAY_TADO_MODE
self._overlay_mode = None
self._target_temp = temperature
self._control_heating()
def set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode."""
mode = None
if hvac_mode == HVAC_MODE_OFF:
mode = CONST_MODE_OFF
elif hvac_mode == HVAC_MODE_AUTO:
mode = CONST_MODE_SMART_SCHEDULE
elif hvac_mode == HVAC_MODE_HEAT:
mode = CONST_OVERLAY_TADO_MODE
elif hvac_mode == HVAC_MODE_COOL:
mode = CONST_OVERLAY_TADO_MODE
elif hvac_mode == HVAC_MODE_HEAT_COOL:
mode = CONST_OVERLAY_TADO_MODE
self._current_operation = mode
self._overlay_mode = None
if self._target_temp is None and self._ac_device:
self._target_temp = 27
self._control_heating()
@property
def min_temp(self):
"""Return the minimum temperature."""
return convert_temperature(
self._min_temp, self._unit, self.hass.config.units.temperature_unit
)
@property
def max_temp(self):
"""Return the maximum temperature."""
return convert_temperature(
self._max_temp, self._unit, self.hass.config.units.temperature_unit
)
def update(self):
"""Update the state of this climate device."""
self._store.update()
data = self._store.get_data(self._data_id)
if data is None:
_LOGGER.debug("Received no data for zone %s", self.zone_name)
return
if "sensorDataPoints" in data:
sensor_data = data["sensorDataPoints"]
unit = TEMP_CELSIUS
if "insideTemperature" in sensor_data:
temperature = float(sensor_data["insideTemperature"]["celsius"])
self._cur_temp = self.hass.config.units.temperature(temperature, unit)
if "humidity" in sensor_data:
humidity = float(sensor_data["humidity"]["percentage"])
self._cur_humidity = humidity
# temperature setting will not exist when device is off
if (
"temperature" in data["setting"]
and data["setting"]["temperature"] is not None
):
setting = float(data["setting"]["temperature"]["celsius"])
self._target_temp = self.hass.config.units.temperature(setting, unit)
if "tadoMode" in data:
mode = data["tadoMode"]
self._is_away = mode == "AWAY"
if "setting" in data:
power = data["setting"]["power"]
if power == "OFF":
self._current_operation = CONST_MODE_OFF
self._current_fan = CONST_MODE_OFF
# There is no overlay, the mode will always be
# "SMART_SCHEDULE"
self._overlay_mode = CONST_MODE_SMART_SCHEDULE
self._device_is_active = False
else:
self._device_is_active = True
active = False
if "activityDataPoints" in data:
activity_data = data["activityDataPoints"]
if self._ac_device:
if "acPower" in activity_data and activity_data["acPower"] is not None:
if not activity_data["acPower"]["value"] == "OFF":
active = True
else:
if (
"heatingPower" in activity_data
and activity_data["heatingPower"] is not None
):
if float(activity_data["heatingPower"]["percentage"]) > 0.0:
active = True
self._active = active
overlay = False
overlay_data = None
termination = CONST_MODE_SMART_SCHEDULE
cooling = False
fan_speed = CONST_MODE_OFF
if "overlay" in data:
overlay_data = data["overlay"]
overlay = overlay_data is not None
if overlay:
termination = overlay_data["termination"]["type"]
setting = False
setting_data = None
if "setting" in overlay_data:
setting_data = overlay_data["setting"]
setting = setting_data is not None
if setting:
if "mode" in setting_data:
cooling = setting_data["mode"] == "COOL"
if "fanSpeed" in setting_data:
fan_speed = setting_data["fanSpeed"]
if self._device_is_active:
# If you set mode manually to off, there will be an overlay
# and a termination, but we want to see the mode "OFF"
self._overlay_mode = termination
self._current_operation = termination
self._cooling = cooling
self._current_fan = fan_speed
def _control_heating(self):
"""Send new target temperature to mytado."""
if None not in (self._cur_temp, self._target_temp):
_LOGGER.info(
"Obtained current (%d) and target temperature (%d). "
"Tado thermostat active",
self._cur_temp,
self._target_temp,
)
if self._current_operation == CONST_MODE_SMART_SCHEDULE:
_LOGGER.info(
"Switching mytado.com to SCHEDULE (default) for zone %s (%d)",
self.zone_name,
self.zone_id,
)
self._store.reset_zone_overlay(self.zone_id)
self._overlay_mode = self._current_operation
return
if self._current_operation == CONST_MODE_OFF:
if self._ac_device:
_LOGGER.info(
"Switching mytado.com to OFF for zone %s (%d) - AIR_CONDITIONING",
self.zone_name,
self.zone_id,
)
self._store.set_zone_off(
self.zone_id, CONST_OVERLAY_MANUAL, "AIR_CONDITIONING"
)
else:
_LOGGER.info(
"Switching mytado.com to OFF for zone %s (%d) - HEATING",
self.zone_name,
self.zone_id,
)
self._store.set_zone_off(self.zone_id, CONST_OVERLAY_MANUAL, "HEATING")
self._overlay_mode = self._current_operation
return
if self._ac_device:
_LOGGER.info(
"Switching mytado.com to %s mode for zone %s (%d). Temp (%s) - AIR_CONDITIONING",
self._current_operation,
self.zone_name,
self.zone_id,
self._target_temp,
)
self._store.set_zone_overlay(
self.zone_id,
self._current_operation,
self._target_temp,
None,
"AIR_CONDITIONING",
"COOL",
)
else:
_LOGGER.info(
"Switching mytado.com to %s mode for zone %s (%d). Temp (%s) - HEATING",
self._current_operation,
self.zone_name,
self.zone_id,
self._target_temp,
)
self._store.set_zone_overlay(
self.zone_id,
self._current_operation,
self._target_temp,
None,
"HEATING",
)
self._overlay_mode = self._current_operation
@property
def is_aux_heat(self) -> Optional[bool]:
"""Return true if aux heater.
Requires SUPPORT_AUX_HEAT.
"""
return None
def turn_aux_heat_on(self) -> None:
"""Turn auxiliary heater on."""
pass
def turn_aux_heat_off(self) -> None:
"""Turn auxiliary heater off."""
pass
@property
def swing_mode(self) -> Optional[str]:
"""Return the swing setting.
Requires SUPPORT_SWING_MODE.
"""
return None
@property
def swing_modes(self) -> Optional[List[str]]:
"""Return the list of available swing modes.
Requires SUPPORT_SWING_MODE.
"""
return None
def set_swing_mode(self, swing_mode: str) -> None:
"""Set new target swing operation."""
pass
|
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for interpreting typed objects in Oppia."""
__author__ = 'Sean Lip'
import copy
import os
import feconf
import schema_utils
import utils
class BaseObject(object):
"""Base object class.
This is the superclass for typed object specifications in Oppia, such as
SanitizedUrl and CoordTwoDim.
Typed objects are initialized from a raw Python object which is expected to
be derived from a JSON object. They are validated and normalized to basic
Python objects (primitive types combined via lists and dicts; no sets or
tuples).
"""
# These values should be overridden in subclasses.
description = ''
edit_html_filename = None
edit_js_filename = None
@classmethod
def normalize(cls, raw):
"""Validates and normalizes a raw Python object.
Returns:
a normalized Python object describing the Object specified by this
class.
Raises:
TypeError: if the Python object cannot be normalized.
"""
return schema_utils.normalize_against_schema(raw, cls.SCHEMA)
@classmethod
def has_editor_js_template(cls):
return cls.edit_js_filename is not None
@classmethod
def get_editor_js_template(cls):
if cls.edit_js_filename is None:
raise Exception(
'There is no editor template defined for objects of type %s' %
cls.__name__)
return utils.get_file_contents(os.path.join(
os.getcwd(), feconf.OBJECT_TEMPLATES_DIR,
'%s.js' % cls.edit_js_filename))
@classmethod
def get_editor_html_template(cls):
if cls.edit_html_filename is None:
raise Exception(
'There is no editor template defined for objects of type %s' %
cls.__name__)
return utils.get_file_contents(os.path.join(
os.getcwd(), feconf.OBJECT_TEMPLATES_DIR,
'%s.html' % cls.edit_html_filename))
class Null(BaseObject):
"""Class for a null object."""
description = 'A null object.'
@classmethod
def normalize(cls, raw):
"""Validates and normalizes a raw Python object."""
return None
class Boolean(BaseObject):
"""Class for booleans."""
description = 'A boolean.'
edit_html_filename = 'boolean_editor'
edit_js_filename = 'BooleanEditor'
SCHEMA = {
'type': 'bool'
}
@classmethod
def normalize(cls, raw):
"""Validates and normalizes a raw Python object."""
if raw is None or raw == '':
raw = False
return schema_utils.normalize_against_schema(raw, cls.SCHEMA)
class Real(BaseObject):
"""Real number class."""
description = 'A real number.'
edit_html_filename = 'real_editor'
edit_js_filename = 'RealEditor'
SCHEMA = {
'type': 'float'
}
class Int(BaseObject):
"""Integer class."""
description = 'An integer.'
edit_html_filename = 'int_editor'
edit_js_filename = 'IntEditor'
SCHEMA = {
'type': 'int'
}
class UnicodeString(BaseObject):
"""Unicode string class."""
description = 'A unicode string.'
edit_html_filename = 'unicode_string_editor'
edit_js_filename = 'UnicodeStringEditor'
SCHEMA = {
'type': 'unicode',
}
class Html(BaseObject):
"""HTML string class."""
description = 'An HTML string.'
edit_html_filename = 'html_editor'
edit_js_filename = 'HtmlEditor'
SCHEMA = {
'type': 'html',
}
class NonnegativeInt(BaseObject):
"""Nonnegative integer class."""
description = 'A non-negative integer.'
edit_html_filename = 'nonnegative_int_editor'
edit_js_filename = 'NonnegativeIntEditor'
SCHEMA = {
'type': 'int',
'validators': [{
'id': 'is_at_least',
'min_value': 0
}]
}
class CodeEvaluation(BaseObject):
"""Evaluation result of programming code."""
description = 'Code and its evaluation results.'
SCHEMA = {
'type': 'dict',
'properties': [{
'name': 'code',
'schema': UnicodeString.SCHEMA,
}, {
'name': 'output',
'schema': UnicodeString.SCHEMA,
}, {
'name': 'evaluation',
'schema': UnicodeString.SCHEMA,
}, {
'name': 'error',
'schema': UnicodeString.SCHEMA,
}]
}
class CoordTwoDim(BaseObject):
"""2D coordinate class."""
description = 'A two-dimensional coordinate (a pair of reals).'
edit_html_filename = 'coord_two_dim_editor'
edit_js_filename = 'CoordTwoDimEditor'
SCHEMA = {
'type': 'list',
'len': 2,
'items': Real.SCHEMA,
}
class ListOfUnicodeString(BaseObject):
"""List class."""
description = 'A list.'
edit_html_filename = 'list_editor'
edit_js_filename = 'ListOfUnicodeStringEditor'
SCHEMA = {
'type': 'list',
'items': UnicodeString.SCHEMA
}
class SetOfUnicodeString(BaseObject):
"""Class for sets of UnicodeStrings."""
description = 'A set (a list with unique elements) of unicode strings.'
edit_html_filename = 'list_editor'
edit_js_filename = 'SetOfUnicodeStringEditor'
SCHEMA = {
'type': 'list',
'items': UnicodeString.SCHEMA,
'validators': [{
'id': 'is_uniquified'
}]
}
class NormalizedString(BaseObject):
"""Unicode string with spaces collapsed."""
description = 'A unicode string with adjacent whitespace collapsed.'
edit_html_filename = 'unicode_string_editor'
edit_js_filename = 'NormalizedStringEditor'
SCHEMA = {
'type': 'unicode',
'post_normalizers': [{
'id': 'normalize_spaces'
}]
}
class MathLatexString(BaseObject):
"""Math LaTeX string class"""
description = 'A LaTeX string.'
edit_html_filename = 'math_latex_string_editor'
edit_js_filename = 'MathLatexStringEditor'
SCHEMA = UnicodeString.SCHEMA
class SanitizedUrl(BaseObject):
"""HTTP or HTTPS url string class."""
description = 'An HTTP or HTTPS url.'
edit_html_filename = 'unicode_string_editor'
edit_js_filename = 'SanitizedUrlEditor'
SCHEMA = {
'type': 'unicode',
'post_normalizers': [{
'id': 'sanitize_url'
}]
}
class MusicPhrase(BaseObject):
"""List of Objects that represent a musical phrase."""
description = ('A musical phrase that contains zero or more notes, rests, '
'and time signature.')
edit_html_filename = 'music_phrase_editor'
edit_js_filename = 'MusicPhraseEditor'
# The maximum number of notes allowed in a music phrase.
_MAX_NOTES_IN_PHRASE = 8
_FRACTION_PART_SCHEMA = {
'type': 'int',
'validators': [{
'id': 'is_at_least',
'min_value': 1
}]
}
SCHEMA = {
'type': 'list',
'items': {
'type': 'dict',
'properties': [{
'name': 'readableNoteName',
'schema': {
'type': 'unicode',
'choices': [
'C4', 'D4', 'E4', 'F4', 'G4', 'A4', 'B4', 'C5',
'D5', 'E5', 'F5', 'G5', 'A5'
]
}
}, {
'name': 'noteDuration',
'schema': {
'type': 'dict',
'properties': [{
'name': 'num',
'schema': _FRACTION_PART_SCHEMA
}, {
'name': 'den',
'schema': _FRACTION_PART_SCHEMA
}]
}
}],
},
'validators': [{
'id': 'has_length_at_most',
'max_value': _MAX_NOTES_IN_PHRASE,
}]
}
class Filepath(BaseObject):
"""A string representing a filepath.
The path will be prefixed with '[exploration_id]/assets'.
"""
description = 'A string that represents a filepath'
edit_html_filename = 'filepath_editor'
edit_js_filename = 'FilepathEditor'
SCHEMA = UnicodeString.SCHEMA
class CheckedProof(BaseObject):
"""A proof attempt and any errors it makes."""
description = 'A proof attempt and any errors it makes.'
@classmethod
def normalize(cls, raw):
"""Validates and normalizes a raw Python object."""
try:
assert isinstance(raw, dict)
assert isinstance(raw['assumptions_string'], basestring)
assert isinstance(raw['target_string'], basestring)
assert isinstance(raw['proof_string'], basestring)
assert raw['correct'] in [True, False]
if not raw['correct']:
assert isinstance(raw['error_category'], basestring)
assert isinstance(raw['error_code'], basestring)
assert isinstance(raw['error_message'], basestring)
assert isinstance(raw['error_line_number'], int)
return copy.deepcopy(raw)
except Exception:
raise TypeError('Cannot convert to checked proof %s' % raw)
class LogicQuestion(BaseObject):
"""A question giving a formula to prove"""
description = 'A question giving a formula to prove.'
edit_html_filename = 'logic_question_editor'
edit_js_filename = 'LogicQuestionEditor'
@classmethod
def normalize(cls, raw):
"""Validates and normalizes a raw Python object."""
def _validateExpression(expression):
assert isinstance(expression, dict)
assert isinstance(expression['top_kind_name'], basestring)
assert isinstance(expression['top_operator_name'], basestring)
_validateExpressionArray(expression['arguments'])
_validateExpressionArray(expression['dummies'])
def _validateExpressionArray(array):
assert isinstance(array, list)
for item in array:
_validateExpression(item)
try:
assert isinstance(raw, dict)
_validateExpressionArray(raw['assumptions'])
_validateExpressionArray(raw['results'])
assert isinstance(raw['default_proof_string'], basestring)
return copy.deepcopy(raw)
except Exception:
raise TypeError('Cannot convert to a logic question %s' % raw)
class LogicErrorCategory(BaseObject):
"""A string from a list of possible categories"""
description = 'One of the possible error categories of a logic proof.'
edit_html_filename = 'logic_error_category_editor'
edit_js_filename = 'LogicErrorCategoryEditor'
SCHEMA = {
'type': 'unicode',
'choices': [
'parsing', 'typing', 'line', 'layout', 'variables', 'logic',
'target', 'mistake'
]
}
class Graph(BaseObject):
"""A (mathematical) graph with edges and vertices"""
description = 'A (mathematical) graph'
edit_html_filename = 'graph_editor'
edit_js_filename = 'GraphEditor'
_VERTEX_SCHEMA = {
'type': 'dict',
'properties': [{
'name': 'x',
'schema': Real.SCHEMA
}, {
'name': 'y',
'schema': Real.SCHEMA
}, {
'name': 'label',
'schema': UnicodeString.SCHEMA
}]
}
_EDGE_SCHEMA = {
'type': 'dict',
'properties': [{
'name': 'src',
'schema': Int.SCHEMA
}, {
'name': 'dst',
'schema': Int.SCHEMA
}, {
'name': 'weight',
'schema': Int.SCHEMA
}]
}
SCHEMA = {
'type': 'dict',
'properties': [{
'name': 'vertices',
'schema': {
'type': 'list',
'items': _VERTEX_SCHEMA
}
}, {
'name': 'edges',
'schema': {
'type': 'list',
'items': _EDGE_SCHEMA
}
}, {
'name': 'isLabeled',
'schema': Boolean.SCHEMA
}, {
'name': 'isDirected',
'schema': Boolean.SCHEMA
}, {
'name': 'isWeighted',
'schema': Boolean.SCHEMA
}]
}
@classmethod
def normalize(cls, raw):
"""Validates and normalizes a raw Python object."""
"""
Checks that there are no self-loops or multiple edges.
Checks that unlabeled graphs have all labels empty.
Checks that unweighted graphs have all weights set to 1.
TODO(czx): Think about support for multigraphs?
"""
try:
raw = schema_utils.normalize_against_schema(raw, cls.SCHEMA)
if not raw['isLabeled']:
for vertex in raw['vertices']:
assert (vertex['label'] == '')
for edge in raw['edges']:
assert (edge['src'] != edge['dst'])
if not raw['isWeighted']:
assert (edge['weight'] == 1.0)
if raw['isDirected']:
edge_pairs = [
(edge['src'], edge['dst']) for edge in raw['edges']]
else:
edge_pairs = (
[(edge['src'], edge['dst']) for edge in raw['edges']] +
[(edge['dst'], edge['src']) for edge in raw['edges']]
)
assert len(set(edge_pairs)) == len(edge_pairs)
except Exception:
raise TypeError('Cannot convert to graph %s' % raw)
return raw
class NormalizedRectangle2D(BaseObject):
"""Normalized Rectangle class."""
description = (
'A rectangle normalized so that the coordinates are within the range '
'[0,1].')
SCHEMA = {
'type': 'list',
'len': 2,
'items': {
'type': 'list',
'len': 2,
'items': Real.SCHEMA
}
}
@classmethod
def normalize(cls, raw):
# Moves cur_value to the nearest available value in the range
# [min_value, max_value].
def clamp(min_value, current_value, max_value):
return min(max_value, max(min_value, current_value))
try:
raw = schema_utils.normalize_against_schema(raw, cls.SCHEMA)
raw[0][0] = clamp(0.0, raw[0][0], 1.0)
raw[0][1] = clamp(0.0, raw[0][1], 1.0)
raw[1][0] = clamp(0.0, raw[1][0], 1.0)
raw[1][1] = clamp(0.0, raw[1][1], 1.0)
except Exception:
raise TypeError('Cannot convert to Normalized Rectangle %s' % raw)
return raw
class ImageRegion(BaseObject):
"""A region of an image, including its shape and coordinates."""
description = 'A region of an image.'
# Note: at the moment, only supports rectangular image regions.
# Coordinates are:
# [[top-left-x, top-left-y], [bottom-right-x, bottom-right-y]].
# Origin is top-left, increasing x is to the right, increasing y is down.
SCHEMA = {
'type': 'dict',
'properties': [{
'name': 'regionType',
'schema': UnicodeString.SCHEMA
}, {
'name': 'area',
'schema': NormalizedRectangle2D.SCHEMA
}]
}
class ImageWithRegions(BaseObject):
"""An image overlaid with labeled regions."""
description = 'An image overlaid with regions.'
edit_html_filename = 'image_with_regions_editor'
edit_js_filename = 'ImageWithRegionsEditor'
SCHEMA = {
'type': 'dict',
'properties': [{
'name': 'imagePath',
'schema': Filepath.SCHEMA
}, {
'name': 'labeledRegions',
'schema': {
'type': 'list',
'items': {
'type': 'dict',
'properties': [{
'name': 'label',
'schema': UnicodeString.SCHEMA
}, {
'name': 'region',
'schema': ImageRegion.SCHEMA
}]
}
}
}]
}
class ClickOnImage(BaseObject):
"""A click on an image and the clicked regions."""
description = "Position of a click and a list of regions clicked."
SCHEMA = {
'type': 'dict',
'properties': [{
'name': 'clickPosition',
'schema': {
'type': 'list',
'items': Real.SCHEMA,
'len': 2
}
}, {
'name': 'clickedRegions',
'schema': {
'type': 'list',
'items': UnicodeString.SCHEMA
}
}]
}
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The TensorBoard plugin for performance profiling."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import os
import re
import threading
import six
import tensorflow.compat.v2 as tf
from werkzeug import wrappers
from tensorboard.backend.event_processing import plugin_asset_util
from tensorboard.context import RequestContext
from tensorboard.plugins import base_plugin
from tensorflow.python.profiler import profiler_client # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.profiler import profiler_v2 as profiler # pylint: disable=g-direct-tensorflow-import
from tensorboard_plugin_profile.convert import raw_to_tool_data as convert
tf.enable_v2_behavior()
logger = logging.getLogger('tensorboard')
# The prefix of routes provided by this plugin.
PLUGIN_NAME = 'profile'
INDEX_JS_ROUTE = '/index.js'
INDEX_HTML_ROUTE = '/index.html'
BUNDLE_JS_ROUTE = '/bundle.js'
STYLES_CSS_ROUTE = '/styles.css'
MATERIALICONS_WOFF2_ROUTE = '/materialicons.woff2'
TRACE_VIEWER_INDEX_HTML_ROUTE = '/trace_viewer_index.html'
TRACE_VIEWER_INDEX_JS_ROUTE = '/trace_viewer_index.js'
ZONE_JS_ROUTE = '/zone.js'
DATA_ROUTE = '/data'
TOOLS_ROUTE = '/tools'
HOSTS_ROUTE = '/hosts'
CAPTURE_ROUTE = '/capture_profile'
# Suffixes of "^, #, @" symbols represent different input data formats for the
# same tool.
# 1) '^': data generate from XPlane.
# 2) '#': data is in gzip format.
# 3) '@': data generate from proto, or tracetable for streaming trace viewer.
# 4) no suffix: data is in json format, ready to feed to frontend.
TOOLS = {
'trace_viewer': 'trace',
'trace_viewer#': 'trace.json.gz',
'trace_viewer@': 'tracetable', # streaming trace viewer
'op_profile': 'op_profile.json',
'input_pipeline_analyzer': 'input_pipeline.json',
'input_pipeline_analyzer@': 'input_pipeline.pb',
'overview_page': 'overview_page.json',
'overview_page@': 'overview_page.pb',
'memory_viewer': 'memory_viewer.json',
'pod_viewer': 'pod_viewer.json',
'tensorflow_stats': 'tensorflow_stats.pb',
'kernel_stats': 'kernel_stats.pb',
'memory_profile#': 'memory_profile.json.gz',
'xplane': 'xplane.pb',
'tf_data_bottleneck_analysis': 'tf_data_bottleneck_analysis.json',
}
ALL_HOSTS = 'ALL_HOSTS'
_EXTENSION_TO_TOOL = {extension: tool for tool, extension in TOOLS.items()}
_FILENAME_RE = re.compile(r'(?:(.*)\.)?(' +
'|'.join(TOOLS.values()).replace('.', r'\.') + r')')
# Tools that consume raw data.
RAW_DATA_TOOLS = frozenset(
tool for tool, extension in TOOLS.items()
if extension.endswith('.json') or extension.endswith('.json.gz'))
# Tools that can be generated from xplane end with ^.
XPLANE_TOOLS = [
'trace_viewer^',
'overview_page^',
'input_pipeline_analyzer^',
'tensorflow_stats^',
'kernel_stats^',
'memory_profile^',
'pod_viewer^',
'tf_data_bottleneck_analysis^',
]
# XPlane generated tools that support all host mode.
XPLANE_TOOLS_ALL_HOSTS_SUPPORTED = frozenset([
'input_pipeline_analyzer^',
'tensorflow_stats^',
'kernel_stats^',
'overview_page^',
'pod_viewer^',
'tf_data_bottleneck_analysis^',
])
# XPlane generated tools that only support all host mode.
XPLANE_TOOLS_ALL_HOSTS_ONLY = frozenset(
['overview_page^', 'pod_viewer^', 'tf_data_bottleneck_analysis^'])
def use_xplane(tool):
return tool[-1] == '^'
def make_filename(host, tool):
"""Returns the name of the file containing data for the given host and tool.
Args:
host: Name of the host that produced the profile data, e.g., 'localhost'.
tool: Name of the tool, e.g., 'trace_viewer'.
Returns:
The host name concatenated with the tool-specific extension, e.g.,
'localhost.trace'.
"""
filename = str(host) + '.' if host else ''
tool = 'xplane' if use_xplane(tool) else tool
return filename + TOOLS[tool]
def _parse_filename(filename):
"""Returns the host and tool encoded in a filename in the run directory.
Args:
filename: Name of a file in the run directory. The name might encode a host
and tool, e.g., 'host.tracetable', 'host.domain.op_profile.json', or just
a tool, e.g., 'trace', 'tensorflow_stats.pb'.
Returns:
A tuple (host, tool) containing the names of the host and tool, e.g.,
('localhost', 'trace_viewer'). Either of the tuple's components can be None.
"""
m = _FILENAME_RE.fullmatch(filename)
if m is None:
return filename, None
return m.group(1), _EXTENSION_TO_TOOL[m.group(2)]
def _get_hosts(filenames):
"""Parses a list of filenames and returns the set of hosts.
Args:
filenames: A list of filenames (just basenames, no directory).
Returns:
A set of host names encoded in the filenames.
"""
hosts = set()
for name in filenames:
host, _ = _parse_filename(name)
if host:
hosts.add(host)
return hosts
def _get_tools(filenames):
"""Parses a list of filenames and returns the set of tools.
If xplane is present in the repository, add tools that can be generated by
xplane if we don't have a file for the tool.
Args:
filenames: A list of filenames (just basenames, no directory).
Returns:
A set of tool names encoded in the filenames.
"""
tools = set()
found = set()
has_xplane = False
for name in filenames:
_, tool = _parse_filename(name)
if tool == 'xplane':
has_xplane = True
continue
elif tool:
tools.add(tool)
if tool[-1] in ('@', '#'):
found.add(tool[:-1])
else:
found.add(tool)
if has_xplane:
for item in XPLANE_TOOLS:
if item[:-1] not in found:
tools.add(item)
return tools
def get_worker_list(cluster_resolver):
"""Parses TPU workers list from the cluster resolver."""
cluster_spec = cluster_resolver.cluster_spec()
task_indices = cluster_spec.task_indices('worker')
worker_list = [
cluster_spec.task_address('worker', i).replace(':8470', ':8466')
for i in task_indices
]
return ','.join(worker_list)
def respond(body, content_type, code=200, content_encoding=None):
"""Create a Werkzeug response, handling JSON serialization and CSP.
Args:
body: For JSON responses, a JSON-serializable object; otherwise, a raw
`bytes` string or Unicode `str` (which will be encoded as UTF-8).
content_type: Response content-type (`str`); use `application/json` to
automatically serialize structures.
code: HTTP status code (`int`).
content_encoding: Response Content-Encoding header ('str'); e.g. 'gzip'.
Returns:
A `werkzeug.wrappers.BaseResponse` object.
"""
if content_type == 'application/json' and isinstance(
body, (dict, list, set, tuple)):
body = json.dumps(body, sort_keys=True)
if not isinstance(body, bytes):
body = body.encode('utf-8')
csp_parts = {
'default-src': ["'self'"],
'script-src': [
"'self'",
"'unsafe-eval'",
"'unsafe-inline'",
'https://www.gstatic.com',
],
'object-src': ["'none'"],
'style-src': [
"'self'",
"'unsafe-inline'",
'https://www.gstatic.com',
],
'img-src': [
"'self'",
'blob:',
'data:',
],
}
csp = ';'.join((' '.join([k] + v) for (k, v) in csp_parts.items()))
headers = [
('Content-Security-Policy', csp),
('X-Content-Type-Options', 'nosniff'),
]
if content_encoding:
headers.append(('Content-Encoding', content_encoding))
return wrappers.Response(
body, content_type=content_type, status=code, headers=headers)
def _plugin_assets(logdir, runs, plugin_name):
result = {}
for run in runs:
run_path = os.path.join(logdir, run)
assets = plugin_asset_util.ListAssets(run_path, plugin_name)
result[run] = assets
return result
def filenames_to_hosts(filenames, tool):
"""Convert a list of filenames to a list of host names given a tool.
Args:
filenames: A list of filenames.
tool: A string representing the profiling tool.
Returns:
A list of hostnames.
"""
hosts = _get_hosts(filenames)
if len(hosts) > 1:
if tool in XPLANE_TOOLS_ALL_HOSTS_ONLY:
hosts = [ALL_HOSTS]
elif tool in XPLANE_TOOLS_ALL_HOSTS_SUPPORTED:
hosts.add(ALL_HOSTS)
return sorted(hosts)
def get_data_content_encoding(raw_data, tool, tqx):
"""Converts raw tool proto into the correct tool data.
Args:
raw_data: bytes representing raw data from the tool.
tool: string of tool name.
tqx: Gviz output format.
Returns:
The converted data and the content encoding of the data.
"""
data, content_encoding = None, None
if tool in RAW_DATA_TOOLS:
data = raw_data
if tool[-1] == '#':
content_encoding = 'gzip'
else:
data = convert.tool_proto_to_tool_data(raw_data, tool, tqx)
return data, content_encoding
class ProfilePlugin(base_plugin.TBPlugin):
"""Profile Plugin for TensorBoard."""
plugin_name = PLUGIN_NAME
def __init__(self, context):
"""Constructs a profiler plugin for TensorBoard.
This plugin adds handlers for performance-related frontends.
Args:
context: A base_plugin.TBContext instance.
"""
self.logdir = context.logdir
self.data_provider = context.data_provider
self.stub = None
self.master_tpu_unsecure_channel = context.flags.master_tpu_unsecure_channel
# Whether the plugin is active. This is an expensive computation, so we
# compute this asynchronously and cache positive results indefinitely.
self._is_active = False
# Lock to ensure at most one thread computes _is_active at a time.
self._is_active_lock = threading.Lock()
def is_active(self):
"""Whether this plugin is active and has any profile data to show.
Detecting profile data is expensive, so this process runs asynchronously
and the value reported by this method is the cached value and may be stale.
Returns:
Whether any run has profile data.
"""
# If we are already active, we remain active and don't recompute this.
# Otherwise, try to acquire the lock without blocking; if we get it and
# we're still not active, launch a thread to check if we're active and
# release the lock once the computation is finished. Either way, this
# thread returns the current cached value to avoid blocking.
if not self._is_active and self._is_active_lock.acquire(False):
if self._is_active:
self._is_active_lock.release()
else:
def compute_is_active():
self._is_active = any(self.generate_run_to_tools())
self._is_active_lock.release()
new_thread = threading.Thread(
target=compute_is_active, name='DynamicProfilePluginIsActiveThread')
new_thread.start()
return self._is_active
def get_plugin_apps(self):
return {
INDEX_JS_ROUTE: self.static_file_route,
INDEX_HTML_ROUTE: self.static_file_route,
BUNDLE_JS_ROUTE: self.static_file_route,
STYLES_CSS_ROUTE: self.static_file_route,
MATERIALICONS_WOFF2_ROUTE: self.static_file_route,
TRACE_VIEWER_INDEX_HTML_ROUTE: self.static_file_route,
TRACE_VIEWER_INDEX_JS_ROUTE: self.static_file_route,
ZONE_JS_ROUTE: self.static_file_route,
TOOLS_ROUTE: self.tools_route,
HOSTS_ROUTE: self.hosts_route,
DATA_ROUTE: self.data_route,
CAPTURE_ROUTE: self.capture_route,
}
def frontend_metadata(self):
return base_plugin.FrontendMetadata(es_module_path='/index.js')
def _read_static_file_impl(self, filename):
"""Reads contents from a filename.
Args:
filename (str): Name of the file.
Returns:
Contents of the file.
Raises:
IOError: File could not be read or found.
"""
filepath = os.path.join(os.path.dirname(__file__), 'static', filename)
try:
with open(filepath, 'rb') as infile:
contents = infile.read()
except IOError as io_error:
raise io_error
return contents
@wrappers.Request.application
def static_file_route(self, request):
filename = os.path.basename(request.path)
extention = os.path.splitext(filename)[1]
if extention == '.html':
mimetype = 'text/html'
elif extention == '.css':
mimetype = 'text/css'
elif extention == '.js':
mimetype = 'application/javascript'
else:
mimetype = 'application/octet-stream'
try:
contents = self._read_static_file_impl(filename)
except IOError:
return respond('404 Not Found', 'text/plain', code=404)
return respond(contents, mimetype)
@wrappers.Request.application
def tools_route(self, request):
run_to_tools = self.tools_impl(request)
return respond(run_to_tools, 'application/json')
def tools_impl(self, request):
return dict(self.generate_run_to_tools())
def host_impl(self, run, tool, request=None):
"""Returns available hosts for the run and tool in the log directory.
In the plugin log directory, each directory contains profile data for a
single run (identified by the directory name), and files in the run
directory contains data for different tools and hosts. The file that
contains profile for a specific tool "x" will have extension TOOLS["x"].
Example:
log/
run1/
plugins/
profile/
host1.trace
host2.trace
run2/
plugins/
profile/
host1.trace
host2.trace
Args:
run: the frontend run name, e.g., 'run1' or 'run2' for the example above.
tool: the requested tool, e.g., 'trace_viewer' for the example above.
request: Optional; werkzeug request used for grabbing ctx and experiment
id for other host implementations
Returns:
A list of host names, e.g. ["host1", "host2"] for the example above.
"""
run_dir = self._run_dir(run)
if not run_dir:
logger.warning('Cannot find asset directory for: %s', run)
return []
tool_pattern = make_filename('*', tool)
filenames = []
try:
filenames = tf.io.gfile.glob(os.path.join(run_dir, tool_pattern))
except tf.errors.OpError as e:
logger.warning('Cannot read asset directory: %s, OpError %s', run_dir, e)
filenames = [os.path.basename(f) for f in filenames]
return filenames_to_hosts(filenames, tool)
@wrappers.Request.application
def hosts_route(self, request):
run = request.args.get('run')
tool = request.args.get('tag')
hosts = self.host_impl(run, tool, request)
return respond(hosts, 'application/json')
def data_impl(self, request):
"""Retrieves and processes the tool data for a run and a host.
Args:
request: XMLHttpRequest
Returns:
A string that can be served to the frontend tool or None if tool,
run or host is invalid.
"""
run = request.args.get('run')
tool = request.args.get('tag')
host = request.args.get('host')
tqx = request.args.get('tqx')
run_dir = self._run_dir(run)
# Profile plugin "run" is the last component of run dir.
profile_run = os.path.basename(run_dir)
if tool not in TOOLS and not use_xplane(tool):
return None, None
self.start_grpc_stub_if_necessary()
if tool == 'trace_viewer@' and self.stub is not None:
# Streaming trace viewer needs profiler_analysis service, which is only
# supported in Cloud TPU. This code is unused when data was produced by
# open-source TensorFlow. Only import the library when needed.
# pylint: disable=g-import-not-at-top
# pylint: disable=g-direct-tensorflow-import
from tensorflow.core.profiler import profiler_analysis_pb2
# pylint: enable=g-import-not-at-top
# pylint: enable=g-direct-tensorflow-import
grpc_request = profiler_analysis_pb2.ProfileSessionDataRequest()
grpc_request.repository_root = os.path.dirname(run_dir)
grpc_request.session_id = profile_run
grpc_request.tool_name = 'trace_viewer'
# Remove the trailing dot if present
grpc_request.host_name = host.rstrip('.')
grpc_request.parameters['resolution'] = request.args.get(
'resolution', 8000)
if request.args.get('start_time_ms') is not None:
grpc_request.parameters['start_time_ms'] = request.args.get(
'start_time_ms')
if request.args.get('end_time_ms') is not None:
grpc_request.parameters['end_time_ms'] = request.args.get('end_time_ms')
grpc_response = self.stub.GetSessionToolData(grpc_request)
return grpc_response.output, None
asset_path = os.path.join(run_dir, make_filename(host, tool))
data, content_encoding = None, None
if use_xplane(tool):
if host == ALL_HOSTS:
file_pattern = make_filename('*', 'xplane')
try:
asset_paths = tf.io.gfile.glob(os.path.join(run_dir, file_pattern))
except tf.errors.OpError as e:
logger.warning('Cannot read asset directory: %s, OpError %s', run_dir,
e)
else:
asset_paths = [asset_path]
try:
data = convert.xspace_to_tool_data(asset_paths, tool, tqx)
except AttributeError:
logger.warning('XPlane converters are available after Tensorflow 2.4')
return data, content_encoding
raw_data = None
try:
with tf.io.gfile.GFile(asset_path, 'rb') as f:
raw_data = f.read()
except tf.errors.NotFoundError:
logger.warning('Asset path %s not found', asset_path)
except tf.errors.OpError as e:
logger.warning("Couldn't read asset path: %s, OpError %s", asset_path, e)
if raw_data is None:
return None, None
return get_data_content_encoding(raw_data, tool, tqx)
@wrappers.Request.application
def data_route(self, request):
# params
# request: XMLHTTPRequest.
data, content_encoding = self.data_impl(request)
if data is None:
return respond('404 Not Found', 'text/plain', code=404)
return respond(data, 'application/json', content_encoding=content_encoding)
@wrappers.Request.application
def capture_route(self, request):
return self.capture_route_impl(request)
def capture_route_impl(self, request):
"""Runs the client trace for capturing profiling information."""
service_addr = request.args.get('service_addr')
duration = int(request.args.get('duration', '1000'))
is_tpu_name = request.args.get('is_tpu_name') == 'true'
worker_list = request.args.get('worker_list')
num_tracing_attempts = int(request.args.get('num_retry', '0')) + 1
options = None
try:
options = profiler.ProfilerOptions(
host_tracer_level=int(request.args.get('host_tracer_level', '2')),
device_tracer_level=int(request.args.get('device_tracer_level', '1')),
python_tracer_level=int(request.args.get('python_tracer_level', '0')),
)
# For preserving backwards compatibility with TensorFlow 2.3 and older.
if 'delay_ms' in options._fields:
options.delay_ms = int(request.args.get('delay', '0'))
except AttributeError:
logger.warning('ProfilerOptions are available after tensorflow 2.3')
if is_tpu_name:
try:
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
service_addr)
master_grpc_addr = tpu_cluster_resolver.get_master()
except (ImportError, RuntimeError) as err:
return respond({'error': err.message}, 'application/json', code=200)
except (ValueError, TypeError):
return respond(
{'error': 'no TPUs with the specified names exist.'},
'application/json',
code=200,
)
if not worker_list:
worker_list = get_worker_list(tpu_cluster_resolver)
# TPU cluster resolver always returns port 8470. Replace it with 8466
# on which profiler service is running.
master_ip = master_grpc_addr.replace('grpc://', '').replace(':8470', '')
service_addr = master_ip + ':8466'
# Set the master TPU for streaming trace viewer.
self.master_tpu_unsecure_channel = master_ip
try:
if options:
profiler_client.trace(
service_addr,
self.logdir,
duration,
worker_list,
num_tracing_attempts,
options=options)
else:
profiler_client.trace(
service_addr,
self.logdir,
duration,
worker_list,
num_tracing_attempts,
)
return respond(
{'result': 'Capture profile successfully. Please refresh.'},
'application/json',
)
except tf.errors.UnavailableError:
return respond(
{'error': 'empty trace result.'},
'application/json',
code=200,
)
except Exception as e: # pylint: disable=broad-except
return respond(
{'error': str(e)},
'application/json',
code=200,
)
def start_grpc_stub_if_necessary(self):
# We will enable streaming trace viewer on two conditions:
# 1. user specify the flags master_tpu_unsecure_channel to the ip address of
# as "master" TPU. grpc will be used to fetch streaming trace data.
# 2. the logdir is on google cloud storage.
if self.master_tpu_unsecure_channel and self.logdir.startswith('gs://'):
if self.stub is None:
# gRPC and profiler_analysis are only needed to support streaming trace
# viewer in Cloud TPU. This code is unused when data was produced by
# open-source TensorFlow. Only import the libraries when needed.
# pylint: disable=g-import-not-at-top
import grpc
from tensorflow.python.tpu.profiler import profiler_analysis_pb2_grpc
# pylint: enable=g-import-not-at-top
# Workaround the grpc's 4MB message limitation.
gigabyte = 1024 * 1024 * 1024
options = [('grpc.max_message_length', gigabyte),
('grpc.max_send_message_length', gigabyte),
('grpc.max_receive_message_length', gigabyte)]
tpu_profiler_port = self.master_tpu_unsecure_channel + ':8466'
channel = grpc.insecure_channel(tpu_profiler_port, options)
self.stub = profiler_analysis_pb2_grpc.ProfileAnalysisStub(channel)
def _run_dir(self, run):
"""Helper that maps a frontend run name to a profile "run" directory.
The frontend run name consists of the TensorBoard run name (aka the relative
path from the logdir root to the directory containing the data) path-joined
to the Profile plugin's "run" concept (which is a subdirectory of the
plugins/profile directory representing an individual run of the tool), with
the special case that TensorBoard run is the logdir root (which is the run
named '.') then only the Profile plugin "run" name is used, for backwards
compatibility.
Args:
run: the frontend run name, as described above, e.g. train/run1.
Returns:
The resolved directory path, e.g. /logdir/train/plugins/profile/run1.
Raises:
RuntimeError: If the run directory is not found.
"""
run = run.rstrip(os.sep)
tb_run_name, profile_run_name = os.path.split(run)
if not tb_run_name:
tb_run_name = '.'
if tb_run_name == '.' and tf.io.gfile.isdir(self.logdir):
tb_run_directory = self.logdir
else:
tb_run_directory = os.path.join(self.logdir, tb_run_name)
if not tf.io.gfile.isdir(tb_run_directory):
raise RuntimeError('No matching run directory for run %s' % run)
plugin_directory = plugin_asset_util.PluginDirectory(
tb_run_directory, PLUGIN_NAME)
return os.path.join(plugin_directory, profile_run_name)
def generate_run_to_tools(self):
"""Generator for pairs of "run name" and a list of tools for that run.
The "run name" here is a "frontend run name" - see _run_dir() for the
definition of a "frontend run name" and how it maps to a directory of
profile data for a specific profile "run". The profile plugin concept of
"run" is different from the normal TensorBoard run; each run in this case
represents a single instance of profile data collection, more similar to a
"step" of data in typical TensorBoard semantics. These runs reside in
subdirectories of the plugins/profile directory within any regular
TensorBoard run directory (defined as a subdirectory of the logdir that
contains at least one tfevents file) or within the logdir root directory
itself (even if it contains no tfevents file and would thus not be
considered a normal TensorBoard run, for backwards compatibility).
Within those "profile run directories", there are files in the directory
that correspond to different profiling tools. The file that contains profile
for a specific tool "x" will have a suffix name TOOLS["x"].
Example:
logs/
plugins/
profile/
run1/
hostA.trace
train/
events.out.tfevents.foo
plugins/
profile/
run1/
hostA.trace
hostB.trace
run2/
hostA.trace
validation/
events.out.tfevents.foo
plugins/
profile/
run1/
hostA.trace
Yields:
A sequence of tuples mapping "frontend run names" to lists of tool names
available for those runs. For the above example, this would be:
("run1", ["trace_viewer"])
("train/run1", ["trace_viewer"])
("train/run2", ["trace_viewer"])
("validation/run1", ["trace_viewer"])
"""
self.start_grpc_stub_if_necessary()
# Create a background context; we may not be in a request.
ctx = RequestContext()
tb_run_names_to_dirs = {
run.run_name: os.path.join(self.logdir, run.run_name)
for run in self.data_provider.list_runs(ctx, experiment_id='')
}
plugin_assets = _plugin_assets(self.logdir, list(tb_run_names_to_dirs),
PLUGIN_NAME)
# Ensure that we also check the root logdir, even if it isn't a recognized
# TensorBoard run (i.e. has no tfevents file directly under it), to remain
# backwards compatible with previously profile plugin behavior. Note that we
# check if logdir is a directory to handle case where it's actually a
# multipart directory spec, which this plugin does not support.
if '.' not in plugin_assets and tf.io.gfile.isdir(self.logdir):
tb_run_names_to_dirs['.'] = self.logdir
plugin_assets['.'] = plugin_asset_util.ListAssets(self.logdir,
PLUGIN_NAME)
for tb_run_name, profile_runs in six.iteritems(plugin_assets):
tb_run_dir = tb_run_names_to_dirs[tb_run_name]
tb_plugin_dir = plugin_asset_util.PluginDirectory(tb_run_dir, PLUGIN_NAME)
for profile_run in profile_runs:
# Remove trailing separator; some filesystem implementations emit this.
profile_run = profile_run.rstrip(os.sep)
if tb_run_name == '.':
frontend_run = profile_run
else:
frontend_run = os.path.join(tb_run_name, profile_run)
profile_run_dir = os.path.join(tb_plugin_dir, profile_run)
if tf.io.gfile.isdir(profile_run_dir):
try:
filenames = tf.io.gfile.listdir(profile_run_dir)
except tf.errors.NotFoundError as e:
logger.warning('Cannot read asset directory: %s, NotFoundError %s',
profile_run_dir, e)
filenames = []
yield frontend_run, self._get_active_tools(
filenames) if filenames else filenames
def _get_active_tools(self, filenames):
"""Get a list of tools available given the filenames created by profiler.
Args:
filenames: List of strings that represent filenames
Returns:
A list of strings representing the available tools
"""
tools = _get_tools(filenames)
if 'trace_viewer@' in tools:
# streaming trace viewer always override normal trace viewer.
# the trailing '@' is to inform tf-profile-dashboard.html and
# tf-trace-viewer.html that stream trace viewer should be used.
if self.stub is None:
tools.discard('trace_viewer@')
else:
tools.discard('trace_viewer#')
tools.discard('trace_viewer')
if 'trace_viewer#' in tools:
# use compressed trace
tools.discard('trace_viewer')
# Return sorted list of tools with 'overview_page' at the front.
op = frozenset(['overview_page@', 'overview_page', 'overview_page^'])
return list(tools.intersection(op)) + sorted(tools.difference(op))
|
|
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Tokenizer class. """
from collections import OrderedDict
from ... import GPTNeoConfig
from ...configuration_utils import PretrainedConfig
from ...file_utils import is_sentencepiece_available, is_tokenizers_available
from ...utils import logging
from ..bart.tokenization_bart import BartTokenizer
from ..bert.tokenization_bert import BertTokenizer
from ..bert_japanese.tokenization_bert_japanese import BertJapaneseTokenizer
from ..bertweet.tokenization_bertweet import BertweetTokenizer
from ..blenderbot.tokenization_blenderbot import BlenderbotTokenizer
from ..blenderbot_small.tokenization_blenderbot_small import BlenderbotSmallTokenizer
from ..convbert.tokenization_convbert import ConvBertTokenizer
from ..ctrl.tokenization_ctrl import CTRLTokenizer
from ..deberta.tokenization_deberta import DebertaTokenizer
from ..distilbert.tokenization_distilbert import DistilBertTokenizer
from ..dpr.tokenization_dpr import DPRQuestionEncoderTokenizer
from ..electra.tokenization_electra import ElectraTokenizer
from ..flaubert.tokenization_flaubert import FlaubertTokenizer
from ..fsmt.tokenization_fsmt import FSMTTokenizer
from ..funnel.tokenization_funnel import FunnelTokenizer
from ..gpt2.tokenization_gpt2 import GPT2Tokenizer
from ..herbert.tokenization_herbert import HerbertTokenizer
from ..layoutlm.tokenization_layoutlm import LayoutLMTokenizer
from ..led.tokenization_led import LEDTokenizer
from ..longformer.tokenization_longformer import LongformerTokenizer
from ..luke.tokenization_luke import LukeTokenizer
from ..lxmert.tokenization_lxmert import LxmertTokenizer
from ..mobilebert.tokenization_mobilebert import MobileBertTokenizer
from ..mpnet.tokenization_mpnet import MPNetTokenizer
from ..openai.tokenization_openai import OpenAIGPTTokenizer
from ..phobert.tokenization_phobert import PhobertTokenizer
from ..prophetnet.tokenization_prophetnet import ProphetNetTokenizer
from ..rag.tokenization_rag import RagTokenizer
from ..retribert.tokenization_retribert import RetriBertTokenizer
from ..roberta.tokenization_roberta import RobertaTokenizer
from ..roformer.tokenization_roformer import RoFormerTokenizer
from ..squeezebert.tokenization_squeezebert import SqueezeBertTokenizer
from ..tapas.tokenization_tapas import TapasTokenizer
from ..transfo_xl.tokenization_transfo_xl import TransfoXLTokenizer
from ..wav2vec2.tokenization_wav2vec2 import Wav2Vec2CTCTokenizer
from ..xlm.tokenization_xlm import XLMTokenizer
from .configuration_auto import (
AlbertConfig,
AutoConfig,
BartConfig,
BertConfig,
BertGenerationConfig,
BigBirdConfig,
BigBirdPegasusConfig,
BlenderbotConfig,
BlenderbotSmallConfig,
CamembertConfig,
ConvBertConfig,
CTRLConfig,
DebertaConfig,
DebertaV2Config,
DistilBertConfig,
DPRConfig,
ElectraConfig,
EncoderDecoderConfig,
FlaubertConfig,
FSMTConfig,
FunnelConfig,
GPT2Config,
IBertConfig,
LayoutLMConfig,
LEDConfig,
LongformerConfig,
LukeConfig,
LxmertConfig,
M2M100Config,
MarianConfig,
MBartConfig,
MobileBertConfig,
MPNetConfig,
MT5Config,
OpenAIGPTConfig,
PegasusConfig,
ProphetNetConfig,
RagConfig,
ReformerConfig,
RetriBertConfig,
RobertaConfig,
RoFormerConfig,
Speech2TextConfig,
SqueezeBertConfig,
T5Config,
TapasConfig,
TransfoXLConfig,
Wav2Vec2Config,
XLMConfig,
XLMProphetNetConfig,
XLMRobertaConfig,
XLNetConfig,
replace_list_option_in_docstrings,
)
if is_sentencepiece_available():
from ..albert.tokenization_albert import AlbertTokenizer
from ..barthez.tokenization_barthez import BarthezTokenizer
from ..bert_generation.tokenization_bert_generation import BertGenerationTokenizer
from ..big_bird.tokenization_big_bird import BigBirdTokenizer
from ..camembert.tokenization_camembert import CamembertTokenizer
from ..cpm.tokenization_cpm import CpmTokenizer
from ..deberta_v2.tokenization_deberta_v2 import DebertaV2Tokenizer
from ..m2m_100 import M2M100Tokenizer
from ..marian.tokenization_marian import MarianTokenizer
from ..mbart.tokenization_mbart import MBartTokenizer
from ..mbart.tokenization_mbart50 import MBart50Tokenizer
from ..mt5 import MT5Tokenizer
from ..pegasus.tokenization_pegasus import PegasusTokenizer
from ..reformer.tokenization_reformer import ReformerTokenizer
from ..speech_to_text import Speech2TextTokenizer
from ..t5.tokenization_t5 import T5Tokenizer
from ..xlm_prophetnet.tokenization_xlm_prophetnet import XLMProphetNetTokenizer
from ..xlm_roberta.tokenization_xlm_roberta import XLMRobertaTokenizer
from ..xlnet.tokenization_xlnet import XLNetTokenizer
else:
AlbertTokenizer = None
BarthezTokenizer = None
BertGenerationTokenizer = None
BigBirdTokenizer = None
CamembertTokenizer = None
CpmTokenizer = None
DebertaV2Tokenizer = None
MarianTokenizer = None
MBartTokenizer = None
MBart50Tokenizer = None
MT5Tokenizer = None
PegasusTokenizer = None
ReformerTokenizer = None
T5Tokenizer = None
XLMRobertaTokenizer = None
XLNetTokenizer = None
XLMProphetNetTokenizer = None
M2M100Tokenizer = None
Speech2TextTokenizer = None
if is_tokenizers_available():
from ..albert.tokenization_albert_fast import AlbertTokenizerFast
from ..bart.tokenization_bart_fast import BartTokenizerFast
from ..barthez.tokenization_barthez_fast import BarthezTokenizerFast
from ..bert.tokenization_bert_fast import BertTokenizerFast
from ..big_bird.tokenization_big_bird_fast import BigBirdTokenizerFast
from ..camembert.tokenization_camembert_fast import CamembertTokenizerFast
from ..convbert.tokenization_convbert_fast import ConvBertTokenizerFast
from ..deberta.tokenization_deberta_fast import DebertaTokenizerFast
from ..distilbert.tokenization_distilbert_fast import DistilBertTokenizerFast
from ..dpr.tokenization_dpr_fast import DPRQuestionEncoderTokenizerFast
from ..electra.tokenization_electra_fast import ElectraTokenizerFast
from ..funnel.tokenization_funnel_fast import FunnelTokenizerFast
from ..gpt2.tokenization_gpt2_fast import GPT2TokenizerFast
from ..herbert.tokenization_herbert_fast import HerbertTokenizerFast
from ..layoutlm.tokenization_layoutlm_fast import LayoutLMTokenizerFast
from ..led.tokenization_led_fast import LEDTokenizerFast
from ..longformer.tokenization_longformer_fast import LongformerTokenizerFast
from ..lxmert.tokenization_lxmert_fast import LxmertTokenizerFast
from ..mbart.tokenization_mbart50_fast import MBart50TokenizerFast
from ..mbart.tokenization_mbart_fast import MBartTokenizerFast
from ..mobilebert.tokenization_mobilebert_fast import MobileBertTokenizerFast
from ..mpnet.tokenization_mpnet_fast import MPNetTokenizerFast
from ..mt5 import MT5TokenizerFast
from ..openai.tokenization_openai_fast import OpenAIGPTTokenizerFast
from ..pegasus.tokenization_pegasus_fast import PegasusTokenizerFast
from ..reformer.tokenization_reformer_fast import ReformerTokenizerFast
from ..retribert.tokenization_retribert_fast import RetriBertTokenizerFast
from ..roberta.tokenization_roberta_fast import RobertaTokenizerFast
from ..squeezebert.tokenization_squeezebert_fast import SqueezeBertTokenizerFast
from ..t5.tokenization_t5_fast import T5TokenizerFast
from ..xlm_roberta.tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
from ..xlnet.tokenization_xlnet_fast import XLNetTokenizerFast
else:
AlbertTokenizerFast = None
BartTokenizerFast = None
BarthezTokenizerFast = None
BertTokenizerFast = None
BigBirdTokenizerFast = None
CamembertTokenizerFast = None
ConvBertTokenizerFast = None
DebertaTokenizerFast = None
DistilBertTokenizerFast = None
DPRQuestionEncoderTokenizerFast = None
ElectraTokenizerFast = None
FunnelTokenizerFast = None
GPT2TokenizerFast = None
HerbertTokenizerFast = None
LayoutLMTokenizerFast = None
LEDTokenizerFast = None
LongformerTokenizerFast = None
LxmertTokenizerFast = None
MBartTokenizerFast = None
MBart50TokenizerFast = None
MobileBertTokenizerFast = None
MPNetTokenizerFast = None
MT5TokenizerFast = None
OpenAIGPTTokenizerFast = None
PegasusTokenizerFast = None
ReformerTokenizerFast = None
RetriBertTokenizerFast = None
RobertaTokenizerFast = None
SqueezeBertTokenizerFast = None
T5TokenizerFast = None
XLMRobertaTokenizerFast = None
XLNetTokenizerFast = None
logger = logging.get_logger(__name__)
TOKENIZER_MAPPING = OrderedDict(
[
(RetriBertConfig, (RetriBertTokenizer, RetriBertTokenizerFast)),
(RoFormerConfig, (RoFormerTokenizer, None)),
(T5Config, (T5Tokenizer, T5TokenizerFast)),
(MT5Config, (MT5Tokenizer, MT5TokenizerFast)),
(MobileBertConfig, (MobileBertTokenizer, MobileBertTokenizerFast)),
(DistilBertConfig, (DistilBertTokenizer, DistilBertTokenizerFast)),
(AlbertConfig, (AlbertTokenizer, AlbertTokenizerFast)),
(CamembertConfig, (CamembertTokenizer, CamembertTokenizerFast)),
(PegasusConfig, (PegasusTokenizer, PegasusTokenizerFast)),
(MBartConfig, (MBartTokenizer, MBartTokenizerFast)),
(XLMRobertaConfig, (XLMRobertaTokenizer, XLMRobertaTokenizerFast)),
(MarianConfig, (MarianTokenizer, None)),
(BlenderbotSmallConfig, (BlenderbotSmallTokenizer, None)),
(BlenderbotConfig, (BlenderbotTokenizer, None)),
(BartConfig, (BartTokenizer, BartTokenizerFast)),
(LongformerConfig, (LongformerTokenizer, LongformerTokenizerFast)),
(RobertaConfig, (RobertaTokenizer, RobertaTokenizerFast)),
(ReformerConfig, (ReformerTokenizer, ReformerTokenizerFast)),
(ElectraConfig, (ElectraTokenizer, ElectraTokenizerFast)),
(FunnelConfig, (FunnelTokenizer, FunnelTokenizerFast)),
(LxmertConfig, (LxmertTokenizer, LxmertTokenizerFast)),
(LayoutLMConfig, (LayoutLMTokenizer, LayoutLMTokenizerFast)),
(DPRConfig, (DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast)),
(SqueezeBertConfig, (SqueezeBertTokenizer, SqueezeBertTokenizerFast)),
(BertConfig, (BertTokenizer, BertTokenizerFast)),
(OpenAIGPTConfig, (OpenAIGPTTokenizer, OpenAIGPTTokenizerFast)),
(GPT2Config, (GPT2Tokenizer, GPT2TokenizerFast)),
(TransfoXLConfig, (TransfoXLTokenizer, None)),
(XLNetConfig, (XLNetTokenizer, XLNetTokenizerFast)),
(FlaubertConfig, (FlaubertTokenizer, None)),
(XLMConfig, (XLMTokenizer, None)),
(CTRLConfig, (CTRLTokenizer, None)),
(FSMTConfig, (FSMTTokenizer, None)),
(BertGenerationConfig, (BertGenerationTokenizer, None)),
(DebertaConfig, (DebertaTokenizer, DebertaTokenizerFast)),
(DebertaV2Config, (DebertaV2Tokenizer, None)),
(RagConfig, (RagTokenizer, None)),
(XLMProphetNetConfig, (XLMProphetNetTokenizer, None)),
(Speech2TextConfig, (Speech2TextTokenizer, None)),
(M2M100Config, (M2M100Tokenizer, None)),
(ProphetNetConfig, (ProphetNetTokenizer, None)),
(MPNetConfig, (MPNetTokenizer, MPNetTokenizerFast)),
(TapasConfig, (TapasTokenizer, None)),
(LEDConfig, (LEDTokenizer, LEDTokenizerFast)),
(ConvBertConfig, (ConvBertTokenizer, ConvBertTokenizerFast)),
(BigBirdConfig, (BigBirdTokenizer, BigBirdTokenizerFast)),
(IBertConfig, (RobertaTokenizer, RobertaTokenizerFast)),
(Wav2Vec2Config, (Wav2Vec2CTCTokenizer, None)),
(GPTNeoConfig, (GPT2Tokenizer, GPT2TokenizerFast)),
(LukeConfig, (LukeTokenizer, None)),
(BigBirdPegasusConfig, (PegasusTokenizer, PegasusTokenizerFast)),
]
)
# For tokenizers which are not directly mapped from a config
NO_CONFIG_TOKENIZER = [
BertJapaneseTokenizer,
BertweetTokenizer,
CpmTokenizer,
HerbertTokenizer,
HerbertTokenizerFast,
PhobertTokenizer,
BarthezTokenizer,
BarthezTokenizerFast,
MBart50Tokenizer,
MBart50TokenizerFast,
]
SLOW_TOKENIZER_MAPPING = {
k: (v[0] if v[0] is not None else v[1])
for k, v in TOKENIZER_MAPPING.items()
if (v[0] is not None or v[1] is not None)
}
def tokenizer_class_from_name(class_name: str):
all_tokenizer_classes = (
[v[0] for v in TOKENIZER_MAPPING.values() if v[0] is not None]
+ [v[1] for v in TOKENIZER_MAPPING.values() if v[1] is not None]
+ [v for v in NO_CONFIG_TOKENIZER if v is not None]
)
for c in all_tokenizer_classes:
if c.__name__ == class_name:
return c
class AutoTokenizer:
r"""
This is a generic tokenizer class that will be instantiated as one of the tokenizer classes of the library when
created with the :meth:`AutoTokenizer.from_pretrained` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoTokenizer is designed to be instantiated "
"using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method."
)
@classmethod
@replace_list_option_in_docstrings(SLOW_TOKENIZER_MAPPING)
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
r"""
Instantiate one of the tokenizer classes of the library from a pretrained model vocabulary.
The tokenizer class to instantiate is selected based on the :obj:`model_type` property of the config object
(either passed as an argument or loaded from :obj:`pretrained_model_name_or_path` if possible), or when it's
missing, by falling back to using pattern matching on :obj:`pretrained_model_name_or_path`:
List options
Params:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
Can be either:
- A string, the `model id` of a predefined tokenizer hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing vocabulary files required by the tokenizer, for instance saved
using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.,
``./my_model_directory/``.
- A path or url to a single saved vocabulary file if and only if the tokenizer only requires a
single vocabulary file (like Bert or XLNet), e.g.: ``./my_model_directory/vocab.txt``. (Not
applicable to all derived classes)
inputs (additional positional arguments, `optional`):
Will be passed along to the Tokenizer ``__init__()`` method.
config (:class:`~transformers.PreTrainedConfig`, `optional`)
The configuration object used to dertermine the tokenizer class to instantiate.
cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download the model weights and configuration files and override the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`Dict[str, str]`, `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
subfolder (:obj:`str`, `optional`):
In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for
facebook/rag-token-base), specify it here.
use_fast (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to try to load the fast version of the tokenizer.
kwargs (additional keyword arguments, `optional`):
Will be passed to the Tokenizer ``__init__()`` method. Can be used to set special tokens like
``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``,
``mask_token``, ``additional_special_tokens``. See parameters in the ``__init__()`` for more details.
Examples::
>>> from transformers import AutoTokenizer
>>> # Download vocabulary from huggingface.co and cache.
>>> tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
>>> # Download vocabulary from huggingface.co (user-uploaded) and cache.
>>> tokenizer = AutoTokenizer.from_pretrained('dbmdz/bert-base-german-cased')
>>> # If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
>>> tokenizer = AutoTokenizer.from_pretrained('./test/bert_saved_model/')
"""
config = kwargs.pop("config", None)
kwargs["_from_auto"] = True
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
use_fast = kwargs.pop("use_fast", True)
if config.tokenizer_class is not None:
tokenizer_class = None
if use_fast and not config.tokenizer_class.endswith("Fast"):
tokenizer_class_candidate = f"{config.tokenizer_class}Fast"
tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
if tokenizer_class is None:
tokenizer_class_candidate = config.tokenizer_class
tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
if tokenizer_class is None:
raise ValueError(
f"Tokenizer class {tokenizer_class_candidate} does not exist or is not currently imported."
)
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
# if model is an encoder decoder, the encoder tokenizer class is used by default
if isinstance(config, EncoderDecoderConfig):
if type(config.decoder) is not type(config.encoder): # noqa: E721
logger.warning(
f"The encoder model config class: {config.encoder.__class__} is different from the decoder model "
f"config class: {config.decoder.__class}. It is not recommended to use the "
"`AutoTokenizer.from_pretrained()` method in this case. Please use the encoder and decoder "
"specific tokenizer classes."
)
config = config.encoder
if type(config) in TOKENIZER_MAPPING.keys():
tokenizer_class_py, tokenizer_class_fast = TOKENIZER_MAPPING[type(config)]
if tokenizer_class_fast and (use_fast or tokenizer_class_py is None):
return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
if tokenizer_class_py is not None:
return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
raise ValueError(
"This tokenizer cannot be instantiated. Please make sure you have `sentencepiece` installed "
"in order to use this tokenizer."
)
raise ValueError(
f"Unrecognized configuration class {config.__class__} to build an AutoTokenizer.\n"
f"Model type should be one of {', '.join(c.__name__ for c in TOKENIZER_MAPPING.keys())}."
)
|
Subsets and Splits