gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
##########
# Contribution by the Center on Long-Term Risk:
# https://github.com/longtermrisk/marltoolbox
##########
import random
from ray.rllib.examples.env.matrix_sequential_social_dilemma import (
IteratedPrisonersDilemma,
IteratedChicken,
IteratedStagHunt,
IteratedBoS,
)
ENVS = [IteratedPrisonersDilemma, IteratedChicken, IteratedStagHunt, IteratedBoS]
def test_reset():
max_steps = 20
env_all = [init_env(max_steps, env_class) for env_class in ENVS]
for env in env_all:
obs = env.reset()
check_obs(obs, env)
assert_logger_buffer_size_two_players(env, n_steps=0)
def init_env(max_steps, env_class, seed=None):
config = {
"max_steps": max_steps,
}
env = env_class(config)
env.seed(seed)
return env
def check_obs(obs, env):
assert len(obs) == 2, "two players"
for key, player_obs in obs.items():
assert isinstance(player_obs, int) # .shape == (env.NUM_STATES)
assert player_obs < env.NUM_STATES
def assert_logger_buffer_size_two_players(env, n_steps):
assert len(env.cc_count) == n_steps
assert len(env.dd_count) == n_steps
assert len(env.cd_count) == n_steps
assert len(env.dc_count) == n_steps
def test_step():
max_steps = 20
env_all = [init_env(max_steps, env_class) for env_class in ENVS]
for env in env_all:
obs = env.reset()
check_obs(obs, env)
assert_logger_buffer_size_two_players(env, n_steps=0)
actions = {
policy_id: random.randint(0, env.NUM_ACTIONS - 1)
for policy_id in env.players_ids
}
obs, reward, done, info = env.step(actions)
check_obs(obs, env)
assert_logger_buffer_size_two_players(env, n_steps=1)
assert not done["__all__"]
def test_multiple_steps():
max_steps = 20
env_all = [init_env(max_steps, env_class) for env_class in ENVS]
n_steps = int(max_steps * 0.75)
for env in env_all:
obs = env.reset()
check_obs(obs, env)
assert_logger_buffer_size_two_players(env, n_steps=0)
for step_i in range(1, n_steps, 1):
actions = {
policy_id: random.randint(0, env.NUM_ACTIONS - 1)
for policy_id in env.players_ids
}
obs, reward, done, info = env.step(actions)
check_obs(obs, env)
assert_logger_buffer_size_two_players(env, n_steps=step_i)
assert not done["__all__"]
def test_multiple_episodes():
max_steps = 20
env_all = [init_env(max_steps, env_class) for env_class in ENVS]
n_steps = int(max_steps * 8.25)
for env in env_all:
obs = env.reset()
check_obs(obs, env)
assert_logger_buffer_size_two_players(env, n_steps=0)
step_i = 0
for _ in range(n_steps):
step_i += 1
actions = {
policy_id: random.randint(0, env.NUM_ACTIONS - 1)
for policy_id in env.players_ids
}
obs, reward, done, info = env.step(actions)
check_obs(obs, env)
assert_logger_buffer_size_two_players(env, n_steps=step_i)
assert not done["__all__"] or (step_i == max_steps and done["__all__"])
if done["__all__"]:
obs = env.reset()
check_obs(obs, env)
step_i = 0
def assert_info(n_steps, p_row_act, p_col_act, env, max_steps, CC, DD, CD, DC):
step_i = 0
for _ in range(n_steps):
step_i += 1
actions = {
"player_row": p_row_act[step_i - 1],
"player_col": p_col_act[step_i - 1],
}
obs, reward, done, info = env.step(actions)
check_obs(obs, env)
assert_logger_buffer_size_two_players(env, n_steps=step_i)
assert not done["__all__"] or (step_i == max_steps and done["__all__"])
if done["__all__"]:
assert info["player_row"]["CC"] == CC
assert info["player_col"]["CC"] == CC
assert info["player_row"]["DD"] == DD
assert info["player_col"]["DD"] == DD
assert info["player_row"]["CD"] == CD
assert info["player_col"]["CD"] == CD
assert info["player_row"]["DC"] == DC
assert info["player_col"]["DC"] == DC
obs = env.reset()
check_obs(obs, env)
assert_logger_buffer_size_two_players(env, n_steps=0)
step_i = 0
def test_logged_info_full_CC():
p_row_act = [0, 0, 0, 0]
p_col_act = [0, 0, 0, 0]
max_steps = 4
env_all = [init_env(max_steps, env_class) for env_class in ENVS]
n_steps = int(max_steps * 8.25)
for env in env_all:
obs = env.reset()
check_obs(obs, env)
assert_logger_buffer_size_two_players(env, n_steps=0)
assert_info(
n_steps,
p_row_act,
p_col_act,
env,
max_steps,
CC=1.0,
DD=0.0,
CD=0.0,
DC=0.0,
)
def test_logged_info_full_DD():
p_row_act = [1, 1, 1, 1]
p_col_act = [1, 1, 1, 1]
max_steps = 4
env_all = [init_env(max_steps, env_class) for env_class in ENVS]
n_steps = int(max_steps * 8.25)
for env in env_all:
obs = env.reset()
check_obs(obs, env)
assert_logger_buffer_size_two_players(env, n_steps=0)
assert_info(
n_steps,
p_row_act,
p_col_act,
env,
max_steps,
CC=0.0,
DD=1.0,
CD=0.0,
DC=0.0,
)
def test_logged_info_full_CD():
p_row_act = [0, 0, 0, 0]
p_col_act = [1, 1, 1, 1]
max_steps = 4
env_all = [init_env(max_steps, env_class) for env_class in ENVS]
n_steps = int(max_steps * 8.25)
for env in env_all:
obs = env.reset()
check_obs(obs, env)
assert_logger_buffer_size_two_players(env, n_steps=0)
assert_info(
n_steps,
p_row_act,
p_col_act,
env,
max_steps,
CC=0.0,
DD=0.0,
CD=1.0,
DC=0.0,
)
def test_logged_info_full_DC():
p_row_act = [1, 1, 1, 1]
p_col_act = [0, 0, 0, 0]
max_steps = 4
env_all = [init_env(max_steps, env_class) for env_class in ENVS]
n_steps = int(max_steps * 8.25)
for env in env_all:
obs = env.reset()
check_obs(obs, env)
assert_logger_buffer_size_two_players(env, n_steps=0)
assert_info(
n_steps,
p_row_act,
p_col_act,
env,
max_steps,
CC=0.0,
DD=0.0,
CD=0.0,
DC=1.0,
)
def test_logged_info_mix_CC_DD():
p_row_act = [0, 1, 1, 1]
p_col_act = [0, 1, 1, 1]
max_steps = 4
env_all = [init_env(max_steps, env_class) for env_class in ENVS]
n_steps = int(max_steps * 8.25)
for env in env_all:
obs = env.reset()
check_obs(obs, env)
assert_logger_buffer_size_two_players(env, n_steps=0)
assert_info(
n_steps,
p_row_act,
p_col_act,
env,
max_steps,
CC=0.25,
DD=0.75,
CD=0.0,
DC=0.0,
)
def test_logged_info_mix_CD_CD():
p_row_act = [1, 0, 1, 0]
p_col_act = [0, 1, 0, 1]
max_steps = 4
env_all = [init_env(max_steps, env_class) for env_class in ENVS]
n_steps = int(max_steps * 8.25)
for env in env_all:
obs = env.reset()
check_obs(obs, env)
assert_logger_buffer_size_two_players(env, n_steps=0)
assert_info(
n_steps,
p_row_act,
p_col_act,
env,
max_steps,
CC=0.0,
DD=0.0,
CD=0.5,
DC=0.5,
)
def test_observations_are_invariant_to_the_player_trained():
p_row_act = [0, 1, 1, 0]
p_col_act = [0, 1, 0, 1]
max_steps = 4
env_all = [init_env(max_steps, env_class) for env_class in ENVS]
n_steps = 4
for env in env_all:
_ = env.reset()
step_i = 0
for _ in range(n_steps):
step_i += 1
actions = {
"player_row": p_row_act[step_i - 1],
"player_col": p_col_act[step_i - 1],
}
obs, reward, done, info = env.step(actions)
# assert observations are symmetrical respective to the actions
if step_i == 1:
assert obs[env.players_ids[0]] == obs[env.players_ids[1]]
elif step_i == 2:
assert obs[env.players_ids[0]] == obs[env.players_ids[1]]
elif step_i == 3:
obs_step_3 = obs
elif step_i == 4:
assert obs[env.players_ids[0]] == obs_step_3[env.players_ids[1]]
assert obs[env.players_ids[1]] == obs_step_3[env.players_ids[0]]
|
|
# Copyright 2016 OVH SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from neutron_lib import constants
from oslo_utils import uuidutils
from neutron.agent.linux import ip_lib
from neutron.common import utils as common_utils
from neutron.tests.fullstack import base
from neutron.tests.fullstack.cmd import dhcp_agent as cmd
from neutron.tests.fullstack.resources import environment
from neutron.tests.fullstack.resources import machine
from neutron.tests.unit import testlib_api
load_tests = testlib_api.module_load_tests
class BaseDhcpAgentTest(base.BaseFullStackTestCase):
scenarios = [
(constants.AGENT_TYPE_OVS,
{'l2_agent_type': constants.AGENT_TYPE_OVS}),
(constants.AGENT_TYPE_LINUXBRIDGE,
{'l2_agent_type': constants.AGENT_TYPE_LINUXBRIDGE})
]
def setUp(self):
host_descriptions = [
environment.HostDescription(
dhcp_agent=True,
l2_agent_type=self.l2_agent_type
) for _ in range(self.number_of_hosts)]
env = environment.Environment(
environment.EnvironmentDescription(
l2_pop=False,
arp_responder=False,
agent_down_time=self.agent_down_time),
host_descriptions)
super(BaseDhcpAgentTest, self).setUp(env)
self.project_id = uuidutils.generate_uuid()
self._create_network_subnet_and_vm()
def _spawn_vm(self):
host = random.choice(self.environment.hosts)
vm = self.useFixture(
machine.FakeFullstackMachine(
host,
self.network['id'],
self.project_id,
self.safe_client,
use_dhcp=True))
vm.block_until_boot()
return vm
def _create_network_subnet_and_vm(self):
self.network = self.safe_client.create_network(self.project_id)
self.subnet = self.safe_client.create_subnet(
self.project_id, self.network['id'],
cidr='10.0.0.0/24',
gateway_ip='10.0.0.1',
name='subnet-test',
enable_dhcp=True)
self.vm = self._spawn_vm()
def _wait_until_agent_down(self, agent_id):
def _agent_down():
agent = self.client.show_agent(agent_id)['agent']
return not agent.get('alive')
common_utils.wait_until_true(_agent_down)
class TestDhcpAgentNoHA(BaseDhcpAgentTest):
number_of_hosts = 1
agent_down_time = 60
def test_dhcp_assignment(self):
# First check if network was scheduled to one DHCP agent
dhcp_agents = self.client.list_dhcp_agent_hosting_networks(
self.network['id'])
self.assertEqual(1, len(dhcp_agents['agents']))
# And check if IP and gateway config is fine on FakeMachine
self.vm.block_until_dhcp_config_done()
def test_mtu_update(self):
# The test case needs access to devices in nested namespaces. ip_lib
# doesn't support it, and it's probably unsafe to touch the library for
# testing matters.
# TODO(jlibosva) revisit when ip_lib supports nested namespaces
if self.environment.hosts[0].dhcp_agent.namespace is not None:
self.skipTest("ip_lib doesn't support nested namespaces")
self.vm.block_until_dhcp_config_done()
namespace = cmd._get_namespace_name(
self.network['id'],
suffix=self.environment.hosts[0].dhcp_agent.get_namespace_suffix())
ip = ip_lib.IPWrapper(namespace)
devices = ip.get_devices()
self.assertEqual(1, len(devices))
dhcp_dev = devices[0]
mtu = dhcp_dev.link.mtu
self.assertEqual(1450, mtu)
mtu -= 1
self.safe_client.update_network(self.network['id'], mtu=mtu)
common_utils.wait_until_true(lambda: dhcp_dev.link.mtu == mtu)
class TestDhcpAgentHA(BaseDhcpAgentTest):
number_of_hosts = 2
agent_down_time = 10
def _wait_until_network_rescheduled(self, old_agent):
def _agent_rescheduled():
network_agents = self.client.list_dhcp_agent_hosting_networks(
self.network['id'])['agents']
if network_agents:
return network_agents[0]['id'] != old_agent['id']
return False
common_utils.wait_until_true(_agent_rescheduled)
def _kill_dhcp_agent(self, agent):
for host in self.environment.hosts:
hostname = host.dhcp_agent.get_agent_hostname()
if hostname == agent['host']:
host.dhcp_agent.kill()
self._wait_until_agent_down(agent['id'])
break
def _add_network_to_new_agent(self):
dhcp_agents = self.client.list_agents(
agent_type=constants.AGENT_TYPE_DHCP)['agents']
dhcp_agents_ids = [agent['id'] for agent in dhcp_agents]
current_agents = self.client.list_dhcp_agent_hosting_networks(
self.network['id'])['agents']
current_agents_ids = [agent['id'] for agent in current_agents]
new_agents_ids = list(set(dhcp_agents_ids) - set(current_agents_ids))
if new_agents_ids:
new_agent_id = random.choice(new_agents_ids)
self.client.add_network_to_dhcp_agent(
new_agent_id, {'network_id': self.network['id']})
def test_reschedule_network_on_new_agent(self):
network_dhcp_agents = self.client.list_dhcp_agent_hosting_networks(
self.network['id'])['agents']
self.assertEqual(1, len(network_dhcp_agents))
self._kill_dhcp_agent(network_dhcp_agents[0])
self._wait_until_network_rescheduled(network_dhcp_agents[0])
# ensure that only one agent is handling DHCP for this network
new_network_dhcp_agents = self.client.list_dhcp_agent_hosting_networks(
self.network['id'])['agents']
self.assertEqual(1, len(new_network_dhcp_agents))
# check if new vm will get IP from new DHCP agent
new_vm = self._spawn_vm()
new_vm.block_until_dhcp_config_done()
def test_multiple_agents_for_network(self):
network_dhcp_agents = self.client.list_dhcp_agent_hosting_networks(
self.network['id'])['agents']
self.assertEqual(1, len(network_dhcp_agents))
self._add_network_to_new_agent()
# ensure that two agents are handling DHCP for this network
network_dhcp_agents = self.client.list_dhcp_agent_hosting_networks(
self.network['id'])['agents']
self.assertEqual(2, len(network_dhcp_agents))
self._kill_dhcp_agent(network_dhcp_agents[0])
# check if new vm will get IP from DHCP agent which is still alive
new_vm = self._spawn_vm()
new_vm.block_until_dhcp_config_done()
|
|
"""
then/verification steps
"""
from behave import *
import logging
import ast
import re
import time
from selenium.common.exceptions import NoSuchElementException
from mobilebdd.steps.input import switch_to
from selenium.webdriver.support.ui import WebDriverWait
log = logging.getLogger(u'mobilebdd')
# mapping of relative regions that define the start and end to determine
# if an element is in a region
SearchRegions = {
u'top': (u'y', 0, 0.5),
u'bottom': (u'y', 0.5, 1),
u'right': (u'x', 0.5, 1),
u'left': (u'x', 0, 0.5),
}
@then(u'{element} should appear')
@then(u'{element} should be visible')
@then(u'{element} should not be hidden')
def _step(context, element):
"""
:type context: HackedContext
"""
assert context.driver.simple_find(element).is_displayed(),\
'{} should be present and displayed'.format(element)
@then(u'{element} should be hidden')
@then(u'{element} should not appear')
@then(u'{element} should not be visible')
def _step(context, element):
"""
:type context: HackedContext
"""
try:
item = context.driver.simple_find(element)
assert not item.is_displayed(),\
u'{} should not be present and displayed'.format(element)
except NoSuchElementException:
pass
@then(u'{element} should be selected')
def _step(context, element):
"""
:type context: HackedContext
"""
assert context.driver.simple_find(element).is_selected(),\
u'{} should be selected'.format(element)
@then(u'{element} should not be selected')
def _step(context, element):
"""
:type context: HackedContext
"""
assert not context.driver.simple_find(element).is_selected(),\
u'{} should not be selected'.format(element)
def _is_element_in_webview_region(context, thing, location):
"""
checks if the element is in the region of the webview described by 'location'
:type context: HackedContext
:param thing: an identifier for an element; id, class name, partial link text, etc.
:param location: one of top, bottom, left, right, center, viewport
:return True/False
"""
try:
element = context.driver.simple_find_in_context(thing, u'WEBVIEW')
except Exception:
return False
# To correctly find an element within a webview region, we need the size of
# the webview as reported by script within the webview itself. This
# provides a value of the same pixel density used when finding the element,
# so that the arithmetic is correct when we compare those values later.
window_size = {
u'width': float(context.driver.execute_script(u'return document.documentElement.clientWidth')),
u'height': float(context.driver.execute_script(u'return document.documentElement.clientHeight'))
}
return _is_element_in_region(element, window_size, location)
def _is_element_in_native_region(context, thing, location):
"""
checks if the element is in the region of the app described by 'location'
:type context: HackedContext
:param thing: an identifier for an element; id, class name, partial link text, etc.
:param location: one of top, bottom, left, right, center, viewport
:return True/False
"""
try:
element = context.driver.simple_find_in_context(thing, u'NATIVE_APP')
except Exception:
return False
window_size = context.driver.get_window_size()
return _is_element_in_region(element, window_size, location)
def _is_element_in_region(element, window_size, location):
"""
checks if the element is in the region of the window described by 'location'
:param element: the app or webview element in question
:param window_size: an object with width and height properties
:param location: one of top, bottom, left, right, center, viewport
:return True/False
"""
# For starters, check if the element is even displayed.
if not element.is_displayed():
return False
element_pos = element.location
element_size = element.size
window_width = float(window_size[u'width'])
window_height = float(window_size[u'height'])
element_top = element_pos[u'y'] / window_height
element_bottom = element_top + element_size[u'height'] / window_height
element_left = element_pos[u'x']/ window_width
element_right = element_left + element_size[u'width'] / window_width
log.info(u'element position: x {}, y {}'.format(element_pos[u'x'], element_pos[u'y']))
log.info(u'element dimensions: width {}, height {}'.format(element_size[u'width'], element_size[u'height']))
log.info(u'window dimensions: width {}, height {}'.format(window_size[u'width'], window_size[u'height']))
log.info(u'expecting to find the element at {}'.format(location))
log.info(u'element bounds: top {}, left {}, right {}, bottom {}'.format(
element_top, element_left, element_right, element_bottom
))
is_correct = True
if location == u'center':
return element_top > 0.3\
and element_bottom < 0.7\
and element_left > 0.3\
and element_right < 0.7
elif location == u'viewport':
return element_top >= 0\
and element_bottom <= 1\
and element_left >= 0\
and element_right <= 1
else:
for location_word in location.split():
if location_word not in SearchRegions:
log.error(u'unsupported location {}'.format(location))
return False
region = SearchRegions[location_word]
if region[0] == u'y':
return element_top >= region[1] and element_bottom <= region[2]
else:
return element_left >= region[1] and element_right <= region[2]
return is_correct
@then(u'{thing} should be at the {location} of the webview')
@then(u'{thing} should be in the {location} of the webview')
def _step(context, thing, location):
"""
:type context: HackedContext
:param thing: an identifier for an element; id, class name, partial link text, etc.
:param location: one of top, bottom, left, right, center, viewport
"""
is_correct = _is_element_in_webview_region(context, thing, location)
assert is_correct, u'{} was not at {}'.format(thing, location)
@then(u'{thing} should be at the {location} of the app')
@then(u'{thing} should be in the {location} of the app')
def _step(context, thing, location):
"""
:type context: HackedContext
:param thing: an identifier for an element; id, class name, partial link text, etc.
:param location: one of top, bottom, left, right, center, viewport
"""
is_correct = _is_element_in_native_region(context, thing, location)
assert is_correct, u'{} was not at {}'.format(thing, location)
@then(u'{thing} should not be at the {location} of the webview')
@then(u'{thing} should not be in the {location} of the webview')
def _step(context, thing, location):
"""
:type context: HackedContext
:param thing: an identifier for an element; id, class name, partial link text, etc.
:param location: one of top, bottom, left, right, center, viewport
"""
is_correct = _is_element_in_webview_region(context, thing, location)
assert not is_correct, u'{} was at {}'.format(thing, location)
@then(u'{thing} should not be at the {location} of the app')
@then(u'{thing} should not be in the {location} of the app')
def _step(context, thing, location):
"""
:type context: HackedContext
:param thing: an identifier for an element; id, class name, partial link text, etc.
:param location: one of top, bottom, left, right, center, viewport
"""
is_correct = _is_element_in_native_region(context, thing, location)
assert not is_correct, u'{} was at {}'.format(thing, location)
@then(u'{thing} should be inside {element} located at {location}')
def _step(context, thing, element, location):
"""
:type context: HackedContext
"""
item = context.driver.simple_find(thing)
elem = context.driver.simple_find(element)
itemCorner = item.location
elemCorner = elem.location
if location == u'top-right':
elemWidth = elem.size['width']
itemWidth = item.size['width']
elemCorner['x'] = elemCorner['x'] + elemWidth
itemCorner['x'] = itemCorner['x'] + itemWidth
elif location == u'top-center':
elemWidth = elem.size['width']
itemWidth = item.size['width']
elemCorner['x'] = elemCorner['x'] + elemWidth / 2
itemCorner['x'] = itemCorner['x'] + itemWidth / 2
elif location == u'bottom-left':
elemHeight = elem.size['height']
itemHeight = item.size['height']
elemCorner['y'] = elemCorner['y'] + elemHeight
itemCorner['y'] = itemCorner['y'] + itemHeight
elif location == u'bottom-right':
elemWidth = elem.size['width']
itemWidth = item.size['width']
elemHeight = elem.size['height']
itemHeight = item.size['height']
elemCorner['x'] = elemCorner['x'] + elemWidth
itemCorner['x'] = itemCorner['x'] + itemWidth
elemCorner['y'] = elemCorner['y'] + elemHeight
itemCorner['y'] = itemCorner['y'] + itemHeight
elif location == u'bottom-center':
elemWidth = elem.size['width']
itemWidth = item.size['width']
elemHeight = elem.size['height']
itemHeight = item.size['height']
elemCorner['x'] = elemCorner['x'] + elemWidth / 2
itemCorner['x'] = itemCorner['x'] + itemWidth / 2
elemCorner['y'] = elemCorner['y'] + elemHeight
itemCorner['y'] = itemCorner['y'] + itemHeight
elif location == u'center':
elemWidth = elem.size['width']
itemWidth = item.size['width']
elemHeight = elem.size['height']
itemHeight = item.size['height']
elemCorner['x'] = elemCorner['x'] + elemWidth / 2
itemCorner['x'] = itemCorner['x'] + itemWidth / 2
elemCorner['y'] = elemCorner['y'] + elemHeight / 2
itemCorner['y'] = itemCorner['y'] + itemHeight / 2
elif location != u'top-left':
assert False,u'{} is not a supported location'.format(location)
xDiff = itemCorner['x'] - elemCorner['x']
yDiff = itemCorner['y'] - elemCorner['y']
# There may be rounding error, if any of the dimensions were odd numbers, so verify that they match within 1 pixel
assert xDiff <= 1 and yDiff <= 1,\
u'{} is not in expected location inside {} at {}. Expected at [{}, {}] but was at [{}, {}]'.format(thing, element, location, elemCorner['x'], elemCorner['y'], itemCorner['x'], itemCorner['x'])
@then(u'{thing} should contain the text {text}')
def _step(context, text, thing):
"""Assert that the given text is in the element found by searching for 'thing'.
:type context: HackedContext
"""
element = context.driver.simple_find(thing)
assert element, u'could not find {}'.format(thing)
assert text in element.text, u'specified text "{}" was not present in element text: "{}"'.format(text, element.text)
@then(u'{thing} should not contain the text {text}')
def _step(context, text, thing):
"""Assert that the given text is not in the element found by searching for 'thing'.
:type context: HackedContext
"""
element = context.driver.simple_find(thing)
assert element, u'could not find {}'.format(thing)
assert text not in element.text, u'specified text "{}" was present in element text: "{}"'.format(text, element.text)
@then(u'{thing} should contain the exact text {text}')
def _step(context, text, thing):
"""Assert that the given text is equal to the text in the element found by
searching for 'thing'.
:type context: HackedContext
"""
element = context.driver.simple_find(thing)
assert element, u'could not find {}'.format(thing)
assert text == element.text, u'specified text "{}"" != element text "{}"'.format(text, element.text)
@then(u'{text} should be in the current url')
def _step(context, text):
"""
:type context: HackedContext
"""
assert text in context.driver.current_url,\
u'"{}"" was not in the current url: {}'.format(text, context.driver.current_url)
@then(u'{text} should be in the page source')
def _step(context, text):
"""
:type context: HackedContext
"""
assert text in context.driver.page_source,\
u'"{}" was not in the page source'.format(text)
@then(u'{thing} should have a {attribute} containing {text}')
@then(u'{thing} should have an {attribute} containing {text}')
def _step(context, thing, attribute, text):
"""
:type context: HackedContext
"""
element = context.driver.simple_find(thing)
assert element, u'could not find {}'.format(thing)
value = element.get_attribute(attribute)
assert value, u'element did not have an attribute named {} or it was empty'.format(attribute)
assert text in value, u'could not find the text "{}" in the "{}" attribute, real value: "{}"'.format(text, attribute, value)
def assert_text_appears_in_logs(context, text_array, log_name):
"""
Tests that a list of strings each appear in the logs a specified number of times.
Set the number of times to 0 to verify that text does not appear in the logs.
:type context: HackedContext
text_array -- A dictionary containing key/value pairs for a word and its expected frequency
log_name -- the name of the log to search for the text in (e.g. logcat)
"""
results = {}
log_system = context.driver.get_log(log_name)
for log_entry in log_system:
for text_entry in text_array:
if text_entry not in results:
results[text_entry] = 0
if text_entry in log_entry[u'message']:
results[text_entry] += 1
# If we've already exceeded the number of expected occurrences, we can fail right away.
times = int(text_array[text_entry])
if times < results[text_entry]:
assert False,\
u"Expected {} {} times in the {} logs, but the number of occurrences exceeded the expectation".format(text_entry, times, log_name)
for text_entry in text_array:
times = int(text_array[text_entry])
if times >= 0:
assert times == results[text_entry],\
u"Expected {} {} times in the {} logs, but found it {} times".format(text_entry, times, log_name, results[text_entry])
else:
assert results[text_entry] > 0,\
u"{} was not found in the {} logs".format(text_entry, log_name)
@then(u'the {log_name} logs will contain the following strings')
@then(u'the {log_name} logs will contain strings based on their frequencies listed in the following table')
def _step(context, log_name):
"""
Tests that a list of strings each appear in the logs a specified number of times.
Set the number of times to 0 to verify that text does not appear in the logs.
Currently, we must call "I clear the {log_name} logs" at the beginning of your scenario
or else this step may pick up spillover from previous scenarios
TODO: In the future, we should make this extra log clearance call unnecessary by having the
functionality included in a listener or by adding a simple way to automatically enable or
disable log clearance ahead of time.
This step expects that the user will pass in a table listing strings and frequencies.
The table should be included immediately after the step is written out. Below is an example
of what the step and table declaration might look like in practice:
...
Then the logcat logs will contain the following strings
| string | frequency |
| Hello | 1 |
| Goodbye | 0 |
:type context: HackedContext
log_name -- the name of the log to search for the text in (e.g. logcat)
"""
text_array = {}
for row in context.table:
text_array[row['string']] = row['frequency']
assert_text_appears_in_logs(context, text_array, log_name)
@then(u'the strings {text_json} should be in the {log_name} logs')
def _step(context, text_json, log_name):
"""
Tests that a list of strings each appear in the logs a specified number of times.
Set the number of times to 0 to verify that text does not appear in the logs.
Currently, we must call "I clear the {log_name} logs" at the beginning of your scenario
or else this step may pick up spillover from previous scenarios
:type context: HackedContext
text_json -- A JSON-formatted dictionary
key -- text to search the logs for
value -- number of times the text is expected to appear in the logs
Example:
{"'This' is the Expected Text":1,"More Text":0}
log_name -- the name of the log to search for the text in (e.g. logcat)
"""
assert_text_appears_in_logs(context, ast.literal_eval(text_json), log_name)
@then(u'{text} should be in the {log_name} logs')
@then(u'{text} should be in the {log_name} logs {times} time')
@then(u'{text} should be in the {log_name} logs {times} times')
@then(u'the following string should be in the {log_name} logs: {text}')
@then(u'the following string should be in the {log_name} logs {times} time: {text}')
@then(u'the following string should be in the {log_name} logs {times} times: {text}')
def _step(context, text, log_name, times = -1):
"""
Tests that a string appears in the logs a specified number of times.
Set the number of times to 0 to verify that text does not appear in the logs.
For best results, call "I clear the {log_name} logs" at the beginning of your scenario, otherwise
this step may pick up spillover from previous scenarios
:type context: HackedContext
text -- the exact text to search for in the logs
log_name -- the name of the log to search for the text in (e.g. logcat)
times -- the number of times the text is expected to appear in the logs. If not set, or set to < 0, the text will be expected at least once.
"""
times = int(times)
log_system = context.driver.get_log(log_name)
found_times = 0
for log_entry in log_system:
if text in log_entry[u'message']:
log.debug(u"String found in {}".format(log_entry[u'message']))
found_times += 1
if times >= 0:
assert found_times == times,\
u"Expected {} {} times in the {} logs, but found it {} times".format(text, times, log_name, found_times)
else:
assert found_times > 0,\
u"{} was not found in the {} logs".format(text, log_name)
@then(u'{text} should not be in the {log_name} logs')
def _step(context, text, log_name):
"""
Tests that a string does not appear in the logs.
"""
context.run_steps('Then {} should be in the {} logs 0 times'.format(text, log_name))
@step('I save the {log_name} log message containing the text {regex} as {key}')
@step('I save the {log_name} log message matching the regular expression {regex} as {key}')
def _step(context, log_name, regex, key):
'''
Retrieves a log message containing the specified text and saves it to
HackedContext's saved_data for later access. This enables steps executed
after this step to access the found log message for further processing.
:type context: HackedContext
:param log_name: the name of the log to search for the text in (e.g. logcat)
:param regex: the regular expression to match the log message to
- this can also be a string without regular expression notation as it will also match
:param key: the key to save the message to in the context's saved_data
'''
log_system = context.driver.get_log(log_name)
for log_entry in log_system:
if re.search(regex, log_entry[u'message']):
context.saved_data[key] = log_entry[u'message']
assert context.saved_data[key], u"{} was not found in the {} logs".format(text, log_name)
@step('I verify {value} is in the context\'s list at {key}')
def _step(context, value, key):
'''
Verifies that the given value is not in the list stored at the
key of the context's saved_data
:type context: HackedContext
'''
assert value in context.saved_data[key]
@step('I verify {value} is not in the context\'s list at {key}')
def _step(context, value, key):
'''
Verifies that the given value is not in the list stored at the
key of the context's saved_data
:type context: HackedContext
'''
assert value not in context.saved_data[key]
@then(u'the {log_name} logs will contain the following strings in the same entry')
@then(u'the {log_name} logs will contain the following strings in the same line')
@then(u'the {log_name} logs will contain the following strings on the same line')
def _step(context, log_name):
"""
Tests that each given substring appears in the logs in the same log entry.
Each substring must appear at least once or else the assertion will fail.
This step expects that the user will pass in a table of substrings.
The table should be included immediately after the step is written out. Below is an example
of what the step and table declaration might look like in practice:
...
Then the logcat logs will contain the following strings in the same entry
| string |
| Hello |
| Goodbye |
:type context: HackedContext
log_name -- the name of the log to search for the text in (e.g. logcat)
"""
text_list = []
for row in context.table:
text_list.append(row['string'])
results = {}
log_system = context.driver.get_log(log_name)
all_strings_in_same_entry = False
for log_entry in log_system:
strings_present_so_far = True
for text_entry in text_list:
if re.search(text_entry, log_entry[u'message']) == None:
strings_present_so_far = False
break
if strings_present_so_far == True:
all_strings_in_same_entry = True
break
assert all_strings_in_same_entry, u"The strings were not present in the same entry of the {} logs".format(log_name)
def does_text_appear_in_logs(driver, text, log_name):
log_system = driver.get_log(log_name)
for log_entry in log_system:
if text in log_entry[u'message']:
return True
return False
@step(u'I wait until {text} appears in the {log_name} logs')
@step(u'I wait until the following text appears in the {log_name} logs: {text}')
@step(u'{text} will eventually appear in the {log_name} logs')
@step(u'I wait until {text} appears in the {log_name} logs within {wait_time} seconds')
@step(u'I wait until the following text appears in the {log_name} logs within {wait_time} seconds: {text}')
@step(u'{text} will eventually appear in the {log_name} logs within {wait_time} seconds')
def _step(context, text, log_name, wait_time=20):
"""
Waits for a string to appear in the logs within some time limit. Time limit defaults to 20 seconds.
"""
def isDisplayed(driver):
return does_text_appear_in_logs(driver, text, log_name)
visible = WebDriverWait(context.driver, float(wait_time)).until(isDisplayed)
assert visible, u'{} did not appear in the {} logs within {} seconds'.format(element, log_name, wait_time)
|
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run a CANINE model for TyDi QA."""
from absl import logging
from language.canine import modeling as canine_modeling
from language.canine.tydiqa import char_splitter
from language.canine.tydiqa import run_tydi_lib
from language.canine.tydiqa import tydi_modeling
import tensorflow.compat.v1 as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"model_config_file", None,
"The config json file corresponding to the pre-trained CANINE model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
flags.DEFINE_string("train_records_file", None,
"Precomputed tf records for training.")
flags.DEFINE_string(
"record_count_file", None,
"File containing number of precomputed training records "
"(in terms of 'features', meaning slices of articles). "
"This is used for computing how many steps to take in "
"each fine tuning epoch.")
flags.DEFINE_integer(
"candidate_beam", None,
"How many wordpiece offset to be considered as boundary at inference time.")
flags.DEFINE_string(
"predict_file", None,
"TyDi json for predictions. E.g., dev-v1.1.jsonl.gz or test-v1.1.jsonl.gz. "
"Used only for `--do_predict`.")
flags.DEFINE_string(
"precomputed_predict_file", None,
"TyDi tf.Example records for predictions, created separately by "
"`prepare_tydi_data.py` Used only for `--do_predict`.")
flags.DEFINE_string(
"output_prediction_file", None,
"Where to print predictions in TyDi prediction format, to be passed to"
"tydi_eval.py.")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained mBERT model).")
flags.DEFINE_integer(
"max_seq_length", None,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"doc_stride", None,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_question_length", None,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_predict", False, "Whether to run prediction.")
flags.DEFINE_integer("train_batch_size", None, "Total batch size for training.")
flags.DEFINE_integer("predict_batch_size", None,
"Total batch size for predictions.")
flags.DEFINE_integer(
"predict_file_shard_size", None, "[Optional] If specified, the maximum "
"number of examples to put into each temporary TF example file used as "
"model input at prediction time.")
flags.DEFINE_float("learning_rate", None, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", None,
"Total number of training epochs to perform.")
flags.DEFINE_integer("max_to_predict", None,
"Maximum number of examples to predict (for debugging). "
"`None` or `0` will disable this and predict all.")
flags.DEFINE_float(
"warmup_proportion", None, "Proportion of training to perform linear "
"learning rate warmup for. E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer(
"max_answer_length", None, "An upper bound on the number of subword pieces "
"that a generated answer may contain. This is needed because the start and "
"end predictions are not conditioned on one another.")
flags.DEFINE_float(
"include_unknowns", None,
"If positive, probability of including answers of type `UNKNOWN`.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal TyDi evaluation.")
flags.DEFINE_integer(
"max_passages", None, "Maximum number of passages to consider for a single "
"article. If an article contains more than this, they will be discarded "
"during training. BERT's WordPiece vocabulary must be modified to include "
"these within the [unused*] vocab IDs.")
flags.DEFINE_integer(
"max_position", None,
"Maximum passage position for which to generate special tokens.")
flags.DEFINE_bool(
"fail_on_invalid", True,
"Stop immediately on encountering an invalid example? "
"If false, just print a warning and skip it.")
### TPU-specific flags:
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class CanineTyDiRunner(run_tydi_lib.TyDiRunner):
"""CANINE version of TyDiRunner."""
def __init__(self):
super(CanineTyDiRunner, self).__init__(
model_config_file=None,
output_dir=FLAGS.output_dir,
train_records_file=FLAGS.train_records_file,
record_count_file=FLAGS.record_count_file,
candidate_beam=FLAGS.candidate_beam,
predict_file=FLAGS.predict_file,
precomputed_predict_file=FLAGS.precomputed_predict_file,
output_prediction_file=FLAGS.output_prediction_file,
init_checkpoint=FLAGS.init_checkpoint,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_question_length=FLAGS.max_question_length,
do_train=FLAGS.do_train,
do_predict=FLAGS.do_predict,
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.predict_batch_size,
predict_file_shard_size=FLAGS.predict_file_shard_size,
learning_rate=FLAGS.learning_rate,
num_train_epochs=FLAGS.num_train_epochs,
warmup_proportion=FLAGS.warmup_proportion,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
iterations_per_loop=FLAGS.iterations_per_loop,
max_answer_length=FLAGS.max_answer_length,
include_unknowns=FLAGS.include_unknowns,
verbose_logging=FLAGS.verbose_logging,
max_passages=FLAGS.max_passages,
max_position=FLAGS.max_position,
fail_on_invalid=FLAGS.fail_on_invalid,
use_tpu=FLAGS.use_tpu,
tpu_name=FLAGS.tpu_name,
tpu_zone=FLAGS.tpu_zone,
gcp_project=FLAGS.gcp_project,
master=FLAGS.master,
num_tpu_cores=FLAGS.num_tpu_cores,
max_to_predict=FLAGS.max_to_predict)
self.model_config = canine_modeling.CanineModelConfig.from_json_file(
FLAGS.model_config_file)
def validate_flags_or_throw(self):
"""Validate the input FLAGS or throw an exception."""
if FLAGS.model_config_file is None:
raise ValueError("model_config_file is required.")
if self.output_dir is None:
raise ValueError("output_dir is required.")
if not self.do_train and not self.do_predict:
raise ValueError("At least one of `{do_train,do_predict}` must be True.")
if self.do_train:
if not self.train_records_file:
raise ValueError("If `do_train` is True, then `train_records_file` "
"must be specified.")
if not self.record_count_file:
raise ValueError("If `do_train` is True, then `record_count_file` "
"must be specified.")
if not self.train_batch_size:
raise ValueError("If `do_train` is True, then `train_batch_size` "
"must be specified.")
if not self.learning_rate:
raise ValueError("If `do_train` is True, then `learning_rate` "
"must be specified.")
if not self.num_train_epochs:
raise ValueError("If `do_train` is True, then `num_train_epochs` "
"must be specified.")
if not self.warmup_proportion:
raise ValueError("If `do_train` is True, then `warmup_proportion` "
"must be specified.")
else:
if self.train_batch_size is None:
# TPUEstimator errors if train_batch_size is not a positive integer,
# even if we're not actually training.
self.train_batch_size = 1
if self.do_predict:
if not self.predict_file:
raise ValueError("If `do_predict` is True, "
"then `predict_file` must be specified.")
if not self.max_answer_length:
raise ValueError("If `do_predict` is True, "
"then `max_answer_length` must be specified.")
if not self.candidate_beam:
raise ValueError("If `do_predict` is True, "
"then `candidate_beam` must be specified.")
if not self.predict_batch_size:
raise ValueError("If `do_predict` is True, "
"then `predict_batch_size` must be specified.")
if not self.output_prediction_file:
raise ValueError("If `do_predict` is True, "
"then `output_prediction_file` must be specified.")
if not self.precomputed_predict_file:
if not self.max_passages:
raise ValueError("If `precomputed_predict_file` is not specified, "
"then `max_passages` must be specified.")
if not self.max_position:
raise ValueError("If `precomputed_predict_file` is not specified, "
"then `max_position` must be specified.")
if not self.doc_stride:
raise ValueError("If `precomputed_predict_file` is not specified, "
"then `doc_stride` must be specified.")
if not self.max_question_length:
raise ValueError("If `precomputed_predict_file` is not specified, "
"then `max_question_length` must be specified.")
if self.max_seq_length <= self.max_question_length + 3:
raise ValueError(
f"The max_seq_length ({self.max_seq_length}) must be greater "
f"than max_question_length ({self.max_question_length}) + 3")
if not self.include_unknowns:
raise ValueError("If `precomputed_predict_file` is not specified, "
"then `include_unknowns` must be specified.")
if self.max_seq_length > self.model_config.max_positions:
raise ValueError(
f"Cannot use sequence length {self.max_seq_length} "
"because the CANINE model was only trained up to sequence length "
f"{self.model_config.max_positions}")
def get_tokenizer(self):
return char_splitter.CharacterSplitter()
def get_tydi_model_builder(self):
return tydi_modeling.CanineModelBuilder(
model_config=self.model_config)
def main(_):
logging.set_verbosity(logging.INFO)
CanineTyDiRunner().run()
if __name__ == "__main__":
tf.disable_v2_behavior()
# Required with both `do_train` and `do_predict`:
flags.mark_flag_as_required("model_config_file")
flags.mark_flag_as_required("output_dir")
flags.mark_flag_as_required("init_checkpoint")
flags.mark_flag_as_required("max_seq_length")
tf.app.run()
|
|
# orm/unitofwork.py
# Copyright (C) 2005, 2006, 2007, 2008 Michael Bayer [email protected]
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""The internals for the Unit Of Work system.
Includes hooks into the attributes package enabling the routing of
change events to Unit Of Work objects, as well as the flush()
mechanism which creates a dependency structure that executes change
operations.
A Unit of Work is essentially a system of maintaining a graph of
in-memory objects and their modified state. Objects are maintained as
unique against their primary key identity using an *identity map*
pattern. The Unit of Work then maintains lists of objects that are
new, dirty, or deleted and provides the capability to flush all those
changes at once.
"""
import StringIO, weakref
from sqlalchemy import util, logging, topological, exceptions
from sqlalchemy.orm import attributes, interfaces
from sqlalchemy.orm import util as mapperutil
from sqlalchemy.orm.mapper import object_mapper, _state_mapper, has_identity
# Load lazily
object_session = None
class UOWEventHandler(interfaces.AttributeExtension):
"""An event handler added to all relation attributes which handles
session cascade operations.
"""
def __init__(self, key, class_, cascade):
self.key = key
self.class_ = class_
self.cascade = cascade
def _target_mapper(self, obj):
prop = object_mapper(obj).get_property(self.key)
return prop.mapper
def append(self, obj, item, initiator):
# process "save_update" cascade rules for when an instance is appended to the list of another instance
sess = object_session(obj)
if sess:
if self.cascade.save_update and item not in sess:
sess.save_or_update(item, entity_name=self._target_mapper(obj).entity_name)
def remove(self, obj, item, initiator):
sess = object_session(obj)
if sess:
# expunge pending orphans
if self.cascade.delete_orphan and item in sess.new:
if self._target_mapper(obj)._is_orphan(item):
sess.expunge(item)
def set(self, obj, newvalue, oldvalue, initiator):
# process "save_update" cascade rules for when an instance is attached to another instance
if oldvalue is newvalue:
return
sess = object_session(obj)
if sess:
if newvalue is not None and self.cascade.save_update and newvalue not in sess:
sess.save_or_update(newvalue, entity_name=self._target_mapper(obj).entity_name)
if self.cascade.delete_orphan and oldvalue in sess.new:
sess.expunge(oldvalue)
def register_attribute(class_, key, *args, **kwargs):
"""overrides attributes.register_attribute() to add UOW event handlers
to new InstrumentedAttributes.
"""
cascade = kwargs.pop('cascade', None)
useobject = kwargs.get('useobject', False)
if useobject:
# for object-holding attributes, instrument UOWEventHandler
# to process per-attribute cascades
extension = util.to_list(kwargs.pop('extension', None) or [])
extension.insert(0, UOWEventHandler(key, class_, cascade=cascade))
kwargs['extension'] = extension
return attributes.register_attribute(class_, key, *args, **kwargs)
class UnitOfWork(object):
"""Main UOW object which stores lists of dirty/new/deleted objects.
Provides top-level *flush* functionality as well as the
default transaction boundaries involved in a write
operation.
"""
def __init__(self, session):
if session.weak_identity_map:
self.identity_map = attributes.WeakInstanceDict()
else:
self.identity_map = attributes.StrongInstanceDict()
self.new = {} # InstanceState->object, strong refs object
self.deleted = {} # same
self.logger = logging.instance_logger(self, echoflag=session.echo_uow)
def _remove_deleted(self, state):
if '_instance_key' in state.dict:
del self.identity_map[state.dict['_instance_key']]
self.deleted.pop(state, None)
self.new.pop(state, None)
def _is_valid(self, state):
if '_instance_key' in state.dict:
return state.dict['_instance_key'] in self.identity_map
else:
return state in self.new
def _register_clean(self, state):
"""register the given object as 'clean' (i.e. persistent) within this unit of work, after
a save operation has taken place."""
mapper = _state_mapper(state)
instance_key = mapper._identity_key_from_state(state)
if '_instance_key' not in state.dict:
state.dict['_instance_key'] = instance_key
elif state.dict['_instance_key'] != instance_key:
# primary key switch
del self.identity_map[state.dict['_instance_key']]
state.dict['_instance_key'] = instance_key
if hasattr(state, 'insert_order'):
delattr(state, 'insert_order')
o = state.obj()
# prevent against last minute dereferences of the object
# TODO: identify a code path where state.obj() is None
if o is not None:
self.identity_map[state.dict['_instance_key']] = o
state.commit_all()
# remove from new last, might be the last strong ref
self.new.pop(state, None)
def register_new(self, obj):
"""register the given object as 'new' (i.e. unsaved) within this unit of work."""
if hasattr(obj, '_instance_key'):
raise exceptions.InvalidRequestError("Object '%s' already has an identity - it can't be registered as new" % repr(obj))
if obj._state not in self.new:
self.new[obj._state] = obj
obj._state.insert_order = len(self.new)
def register_deleted(self, obj):
"""register the given persistent object as 'to be deleted' within this unit of work."""
self.deleted[obj._state] = obj
def locate_dirty(self):
"""return a set of all persistent instances within this unit of work which
either contain changes or are marked as deleted.
"""
# a little bit of inlining for speed
return util.IdentitySet([x for x in self.identity_map.values()
if x._state not in self.deleted
and (
x._state.modified
or (x.__class__._class_state.has_mutable_scalars and x._state.is_modified())
)
])
def flush(self, session, objects=None):
"""create a dependency tree of all pending SQL operations within this unit of work and execute."""
dirty = [x for x in self.identity_map.all_states()
if x.modified
or (x.class_._class_state.has_mutable_scalars and x.is_modified())
]
if not dirty and not self.deleted and not self.new:
return
deleted = util.Set(self.deleted)
new = util.Set(self.new)
dirty = util.Set(dirty).difference(deleted)
flush_context = UOWTransaction(self, session)
if session.extension is not None:
session.extension.before_flush(session, flush_context, objects)
# create the set of all objects we want to operate upon
if objects:
# specific list passed in
objset = util.Set([o._state for o in objects])
else:
# or just everything
objset = util.Set(self.identity_map.all_states()).union(new)
# store objects whose fate has been decided
processed = util.Set()
# put all saves/updates into the flush context. detect top-level orphans and throw them into deleted.
for state in new.union(dirty).intersection(objset).difference(deleted):
if state in processed:
continue
obj = state.obj()
is_orphan = _state_mapper(state)._is_orphan(obj)
if is_orphan and not has_identity(obj):
raise exceptions.FlushError("instance %s is an unsaved, pending instance and is an orphan (is not attached to %s)" %
(
obj,
", nor ".join(["any parent '%s' instance via that classes' '%s' attribute" % (klass.__name__, key) for (key,klass) in _state_mapper(state).delete_orphans])
))
flush_context.register_object(state, isdelete=is_orphan)
processed.add(state)
# put all remaining deletes into the flush context.
for state in deleted.intersection(objset).difference(processed):
flush_context.register_object(state, isdelete=True)
if len(flush_context.tasks) == 0:
return
session.create_transaction(autoflush=False)
flush_context.transaction = session.transaction
try:
flush_context.execute()
if session.extension is not None:
session.extension.after_flush(session, flush_context)
session.commit()
except:
session.rollback()
raise
flush_context.post_exec()
if session.extension is not None:
session.extension.after_flush_postexec(session, flush_context)
def prune_identity_map(self):
"""Removes unreferenced instances cached in a strong-referencing identity map.
Note that this method is only meaningful if "weak_identity_map"
on the parent Session is set to False and therefore this UnitOfWork's
identity map is a regular dictionary
Removes any object in the identity map that is not referenced
in user code or scheduled for a unit of work operation. Returns
the number of objects pruned.
"""
if isinstance(self.identity_map, attributes.WeakInstanceDict):
return 0
ref_count = len(self.identity_map)
dirty = self.locate_dirty()
keepers = weakref.WeakValueDictionary(self.identity_map)
self.identity_map.clear()
self.identity_map.update(keepers)
return ref_count - len(self.identity_map)
class UOWTransaction(object):
"""Handles the details of organizing and executing transaction
tasks during a UnitOfWork object's flush() operation.
The central operation is to form a graph of nodes represented by the
``UOWTask`` class, which is then traversed by a ``UOWExecutor`` object
that issues SQL and instance-synchronizing operations via the related
packages.
"""
def __init__(self, uow, session):
self.uow = uow
self.session = session
self.mapper_flush_opts = session._mapper_flush_opts
# stores tuples of mapper/dependent mapper pairs,
# representing a partial ordering fed into topological sort
self.dependencies = util.Set()
# dictionary of mappers to UOWTasks
self.tasks = {}
# dictionary used by external actors to store arbitrary state
# information.
self.attributes = {}
self.logger = logging.instance_logger(self, echoflag=session.echo_uow)
def get_attribute_history(self, state, key, passive=True):
hashkey = ("history", state, key)
# cache the objects, not the states; the strong reference here
# prevents newly loaded objects from being dereferenced during the
# flush process
if hashkey in self.attributes:
(added, unchanged, deleted, cached_passive) = self.attributes[hashkey]
# if the cached lookup was "passive" and now we want non-passive, do a non-passive
# lookup and re-cache
if cached_passive and not passive:
(added, unchanged, deleted) = attributes.get_history(state, key, passive=False)
self.attributes[hashkey] = (added, unchanged, deleted, passive)
else:
(added, unchanged, deleted) = attributes.get_history(state, key, passive=passive)
self.attributes[hashkey] = (added, unchanged, deleted, passive)
if added is None:
return (added, unchanged, deleted)
else:
return (
[getattr(c, '_state', c) for c in added],
[getattr(c, '_state', c) for c in unchanged],
[getattr(c, '_state', c) for c in deleted],
)
def register_object(self, state, isdelete = False, listonly = False, postupdate=False, post_update_cols=None, **kwargs):
# if object is not in the overall session, do nothing
if not self.uow._is_valid(state):
if self._should_log_debug:
self.logger.debug("object %s not part of session, not registering for flush" % (mapperutil.state_str(state)))
return
if self._should_log_debug:
self.logger.debug("register object for flush: %s isdelete=%s listonly=%s postupdate=%s" % (mapperutil.state_str(state), isdelete, listonly, postupdate))
mapper = _state_mapper(state)
task = self.get_task_by_mapper(mapper)
if postupdate:
task.append_postupdate(state, post_update_cols)
else:
task.append(state, listonly, isdelete=isdelete, **kwargs)
def set_row_switch(self, state):
"""mark a deleted object as a 'row switch'.
this indicates that an INSERT statement elsewhere corresponds to this DELETE;
the INSERT is converted to an UPDATE and the DELETE does not occur.
"""
mapper = _state_mapper(state)
task = self.get_task_by_mapper(mapper)
taskelement = task._objects[state]
taskelement.isdelete = "rowswitch"
def is_deleted(self, state):
"""return true if the given state is marked as deleted within this UOWTransaction."""
mapper = _state_mapper(state)
task = self.get_task_by_mapper(mapper)
return task.is_deleted(state)
def get_task_by_mapper(self, mapper, dontcreate=False):
"""return UOWTask element corresponding to the given mapper.
Will create a new UOWTask, including a UOWTask corresponding to the
"base" inherited mapper, if needed, unless the dontcreate flag is True.
"""
try:
return self.tasks[mapper]
except KeyError:
if dontcreate:
return None
base_mapper = mapper.base_mapper
if base_mapper in self.tasks:
base_task = self.tasks[base_mapper]
else:
self.tasks[base_mapper] = base_task = UOWTask(self, base_mapper)
base_mapper._register_dependencies(self)
if mapper not in self.tasks:
self.tasks[mapper] = task = UOWTask(self, mapper, base_task=base_task)
mapper._register_dependencies(self)
else:
task = self.tasks[mapper]
return task
def register_dependency(self, mapper, dependency):
"""register a dependency between two mappers.
Called by ``mapper.PropertyLoader`` to register the objects
handled by one mapper being dependent on the objects handled
by another.
"""
# correct for primary mapper
# also convert to the "base mapper", the parentmost task at the top of an inheritance chain
# dependency sorting is done via non-inheriting mappers only, dependencies between mappers
# in the same inheritance chain is done at the per-object level
mapper = mapper.primary_mapper().base_mapper
dependency = dependency.primary_mapper().base_mapper
self.dependencies.add((mapper, dependency))
def register_processor(self, mapper, processor, mapperfrom):
"""register a dependency processor, corresponding to dependencies between
the two given mappers.
"""
# correct for primary mapper
mapper = mapper.primary_mapper()
mapperfrom = mapperfrom.primary_mapper()
task = self.get_task_by_mapper(mapper)
targettask = self.get_task_by_mapper(mapperfrom)
up = UOWDependencyProcessor(processor, targettask)
task.dependencies.add(up)
def execute(self):
"""Execute this UOWTransaction.
This will organize all collected UOWTasks into a dependency-sorted
list which is then traversed using the traversal scheme
encoded in the UOWExecutor class. Operations to mappers and dependency
processors are fired off in order to issue SQL to the database and
synchronize instance attributes with database values and related
foreign key values."""
# pre-execute dependency processors. this process may
# result in new tasks, objects and/or dependency processors being added,
# particularly with 'delete-orphan' cascade rules.
# keep running through the full list of tasks until all
# objects have been processed.
while True:
ret = False
for task in self.tasks.values():
for up in list(task.dependencies):
if up.preexecute(self):
ret = True
if not ret:
break
tasks = self._sort_dependencies()
if self._should_log_info:
self.logger.info("Task dump:\n" + self._dump(tasks))
UOWExecutor().execute(self, tasks)
if self._should_log_info:
self.logger.info("Execute Complete")
def _dump(self, tasks):
buf = StringIO.StringIO()
import uowdumper
uowdumper.UOWDumper(tasks, buf)
return buf.getvalue()
def post_exec(self):
"""mark processed objects as clean / deleted after a successful flush().
this method is called within the flush() method after the
execute() method has succeeded and the transaction has been committed.
"""
for task in self.tasks.values():
for elem in task.elements:
if elem.state is None:
continue
if elem.isdelete:
self.uow._remove_deleted(elem.state)
else:
self.uow._register_clean(elem.state)
def _sort_dependencies(self):
nodes = topological.sort_with_cycles(self.dependencies,
[t.mapper for t in self.tasks.values() if t.base_task is t]
)
ret = []
for item, cycles in nodes:
task = self.get_task_by_mapper(item)
if cycles:
for t in task._sort_circular_dependencies(self, [self.get_task_by_mapper(i) for i in cycles]):
ret.append(t)
else:
ret.append(task)
if self._should_log_debug:
self.logger.debug("Dependent tuples:\n" + "\n".join(["(%s->%s)" % (d[0].class_.__name__, d[1].class_.__name__) for d in self.dependencies]))
self.logger.debug("Dependency sort:\n"+ str(ret))
return ret
class UOWTask(object):
"""Represents all of the objects in the UOWTransaction which correspond to
a particular mapper. This is the primary class of three classes used to generate
the elements of the dependency graph.
"""
def __init__(self, uowtransaction, mapper, base_task=None):
self.uowtransaction = uowtransaction
# base_task is the UOWTask which represents the "base mapper"
# in our mapper's inheritance chain. if the mapper does not
# inherit from any other mapper, the base_task is self.
# the _inheriting_tasks dictionary is a dictionary present only
# on the "base_task"-holding UOWTask, which maps all mappers within
# an inheritance hierarchy to their corresponding UOWTask instances.
if base_task is None:
self.base_task = self
self._inheriting_tasks = {mapper:self}
else:
self.base_task = base_task
base_task._inheriting_tasks[mapper] = self
# the Mapper which this UOWTask corresponds to
self.mapper = mapper
# mapping of InstanceState -> UOWTaskElement
self._objects = {}
self.dependencies = util.Set()
self.cyclical_dependencies = util.Set()
def polymorphic_tasks(self):
"""return an iterator of UOWTask objects corresponding to the inheritance sequence
of this UOWTask's mapper.
e.g. if mapper B and mapper C inherit from mapper A, and mapper D inherits from B:
mapperA -> mapperB -> mapperD
-> mapperC
the inheritance sequence starting at mapper A is a depth-first traversal:
[mapperA, mapperB, mapperD, mapperC]
this method will therefore return
[UOWTask(mapperA), UOWTask(mapperB), UOWTask(mapperD), UOWTask(mapperC)]
The concept of "polymporphic iteration" is adapted into several property-based
iterators which return object instances, UOWTaskElements and UOWDependencyProcessors
in an order corresponding to this sequence of parent UOWTasks. This is used to issue
operations related to inheritance-chains of mappers in the proper order based on
dependencies between those mappers.
"""
for mapper in self.mapper.polymorphic_iterator():
t = self.base_task._inheriting_tasks.get(mapper, None)
if t is not None:
yield t
def is_empty(self):
"""return True if this UOWTask is 'empty', meaning it has no child items.
used only for debugging output.
"""
return not self._objects and not self.dependencies
def append(self, state, listonly=False, isdelete=False):
if state not in self._objects:
self._objects[state] = rec = UOWTaskElement(state)
else:
rec = self._objects[state]
rec.update(listonly, isdelete)
def _append_cyclical_childtask(self, task):
if "cyclical" not in self._objects:
self._objects["cyclical"] = UOWTaskElement(None)
self._objects["cyclical"].childtasks.append(task)
def append_postupdate(self, state, post_update_cols):
"""issue a 'post update' UPDATE statement via this object's mapper immediately.
this operation is used only with relations that specify the `post_update=True`
flag.
"""
# postupdates are UPDATED immeditely (for now)
# convert post_update_cols list to a Set so that __hashcode__ is used to compare columns
# instead of __eq__
self.mapper._save_obj([state], self.uowtransaction, postupdate=True, post_update_cols=util.Set(post_update_cols))
def __contains__(self, state):
"""return True if the given object is contained within this UOWTask or inheriting tasks."""
for task in self.polymorphic_tasks():
if state in task._objects:
return True
else:
return False
def is_deleted(self, state):
"""return True if the given object is marked as to be deleted within this UOWTask."""
try:
return self._objects[state].isdelete
except KeyError:
return False
def _polymorphic_collection(callable):
"""return a property that will adapt the collection returned by the
given callable into a polymorphic traversal."""
def collection(self):
for task in self.polymorphic_tasks():
for rec in callable(task):
yield rec
return property(collection)
elements = property(lambda self:self._objects.values())
polymorphic_elements = _polymorphic_collection(lambda task:task.elements)
polymorphic_tosave_elements = property(lambda self: [rec for rec in self.polymorphic_elements
if not rec.isdelete])
polymorphic_todelete_elements = property(lambda self:[rec for rec in self.polymorphic_elements
if rec.isdelete])
polymorphic_tosave_objects = property(lambda self:[rec.state for rec in self.polymorphic_elements
if rec.state is not None and not rec.listonly and rec.isdelete is False])
polymorphic_todelete_objects = property(lambda self:[rec.state for rec in self.polymorphic_elements
if rec.state is not None and not rec.listonly and rec.isdelete is True])
polymorphic_dependencies = _polymorphic_collection(lambda task:task.dependencies)
polymorphic_cyclical_dependencies = _polymorphic_collection(lambda task:task.cyclical_dependencies)
def _sort_circular_dependencies(self, trans, cycles):
"""Create a hierarchical tree of *subtasks*
which associate specific dependency actions with individual
objects. This is used for a *cyclical* task, or a task where
elements of its object list contain dependencies on each
other.
This is not the normal case; this logic only kicks in when
something like a hierarchical tree is being represented.
"""
allobjects = []
for task in cycles:
allobjects += [e.state for e in task.polymorphic_elements]
tuples = []
cycles = util.Set(cycles)
extradeplist = []
dependencies = {}
def get_dependency_task(state, depprocessor):
try:
dp = dependencies[state]
except KeyError:
dp = dependencies.setdefault(state, {})
try:
l = dp[depprocessor]
except KeyError:
l = UOWTask(self.uowtransaction, depprocessor.targettask.mapper)
dp[depprocessor] = l
return l
def dependency_in_cycles(dep):
proctask = trans.get_task_by_mapper(dep.processor.mapper.base_mapper, True)
targettask = trans.get_task_by_mapper(dep.targettask.mapper.base_mapper, True)
return targettask in cycles and (proctask is not None and proctask in cycles)
# organize all original UOWDependencyProcessors by their target task
deps_by_targettask = {}
for task in cycles:
for dep in task.polymorphic_dependencies:
if not dependency_in_cycles(dep):
extradeplist.append(dep)
for t in dep.targettask.polymorphic_tasks():
l = deps_by_targettask.setdefault(t, [])
l.append(dep)
object_to_original_task = {}
for task in cycles:
for subtask in task.polymorphic_tasks():
for taskelement in subtask.elements:
state = taskelement.state
object_to_original_task[state] = subtask
for dep in deps_by_targettask.get(subtask, []):
# is this dependency involved in one of the cycles ?
# (don't count the DetectKeySwitch prop)
if dep.processor.no_dependencies or not dependency_in_cycles(dep):
continue
(processor, targettask) = (dep.processor, dep.targettask)
isdelete = taskelement.isdelete
# list of dependent objects from this object
(added, unchanged, deleted) = dep.get_object_dependencies(state, trans, passive=True)
if not added and not unchanged and not deleted:
continue
# the task corresponding to saving/deleting of those dependent objects
childtask = trans.get_task_by_mapper(processor.mapper)
childlist = added + unchanged + deleted
for o in childlist:
# other object is None. this can occur if the relationship is many-to-one
# or one-to-one, and None was set. the "removed" object will be picked
# up in this iteration via the deleted_items() part of the collection.
if o is None:
continue
# the other object is not in the UOWTransaction ! but if we are many-to-one,
# we need a task in order to attach dependency operations, so establish a "listonly"
# task
if o not in childtask:
childtask.append(o, listonly=True)
object_to_original_task[o] = childtask
# create a tuple representing the "parent/child"
whosdep = dep.whose_dependent_on_who(state, o)
if whosdep is not None:
# append the tuple to the partial ordering.
tuples.append(whosdep)
# create a UOWDependencyProcessor representing this pair of objects.
# append it to a UOWTask
if whosdep[0] is state:
get_dependency_task(whosdep[0], dep).append(whosdep[0], isdelete=isdelete)
else:
get_dependency_task(whosdep[0], dep).append(whosdep[1], isdelete=isdelete)
else:
# TODO: no test coverage here
get_dependency_task(state, dep).append(state, isdelete=isdelete)
head = topological.sort_as_tree(tuples, allobjects)
used_tasks = util.Set()
def make_task_tree(node, parenttask, nexttasks):
(state, cycles, children) = node
originating_task = object_to_original_task[state]
used_tasks.add(originating_task)
t = nexttasks.get(originating_task, None)
if t is None:
t = UOWTask(self.uowtransaction, originating_task.mapper)
nexttasks[originating_task] = t
parenttask._append_cyclical_childtask(t)
t.append(state, originating_task._objects[state].listonly, isdelete=originating_task._objects[state].isdelete)
if state in dependencies:
for depprocessor, deptask in dependencies[state].iteritems():
t.cyclical_dependencies.add(depprocessor.branch(deptask))
nd = {}
for n in children:
t2 = make_task_tree(n, t, nd)
return t
t = UOWTask(self.uowtransaction, self.mapper)
# stick the non-circular dependencies onto the new UOWTask
for d in extradeplist:
t.dependencies.add(d)
if head is not None:
make_task_tree(head, t, {})
ret = [t]
# add tasks that were in the cycle, but didnt get assembled
# into the cyclical tree, to the start of the list
for t2 in cycles:
if t2 not in used_tasks and t2 is not self:
localtask = UOWTask(self.uowtransaction, t2.mapper)
for state in t2.elements:
localtask.append(state, t2.listonly, isdelete=t2._objects[state].isdelete)
for dep in t2.dependencies:
localtask.dependencies.add(dep)
ret.insert(0, localtask)
return ret
def __repr__(self):
if self.mapper is not None:
if self.mapper.__class__.__name__ == 'Mapper':
name = self.mapper.class_.__name__ + "/" + self.mapper.local_table.description
else:
name = repr(self.mapper)
else:
name = '(none)'
return ("UOWTask(%s) Mapper: '%s'" % (hex(id(self)), name))
class UOWTaskElement(object):
"""An element within a UOWTask.
Corresponds to a single object instance to be saved, deleted, or
just part of the transaction as a placeholder for further
dependencies (i.e. 'listonly').
may also store additional sub-UOWTasks.
"""
def __init__(self, state):
self.state = state
self.listonly = True
self.childtasks = []
self.isdelete = False
self.__preprocessed = {}
def update(self, listonly, isdelete):
if not listonly and self.listonly:
self.listonly = False
self.__preprocessed.clear()
if isdelete and not self.isdelete:
self.isdelete = True
self.__preprocessed.clear()
def mark_preprocessed(self, processor):
"""Mark this element as *preprocessed* by a particular ``UOWDependencyProcessor``.
Preprocessing is used by dependency.py to apply
flush-time cascade rules to relations and bring all
required objects into the flush context.
each processor as marked as "processed" when complete, however
changes to the state of this UOWTaskElement will reset
the list of completed processors, so that they
execute again, until no new objects or state changes
are brought in.
"""
self.__preprocessed[processor] = True
def is_preprocessed(self, processor):
return self.__preprocessed.get(processor, False)
def __repr__(self):
return "UOWTaskElement/%d: %s/%d %s" % (id(self), self.state.class_.__name__, id(self.state.obj()), (self.listonly and 'listonly' or (self.isdelete and 'delete' or 'save')) )
class UOWDependencyProcessor(object):
"""In between the saving and deleting of objects, process
*dependent* data, such as filling in a foreign key on a child item
from a new primary key, or deleting association rows before a
delete. This object acts as a proxy to a DependencyProcessor.
"""
def __init__(self, processor, targettask):
self.processor = processor
self.targettask = targettask
def __repr__(self):
return "UOWDependencyProcessor(%s, %s)" % (str(self.processor), str(self.targettask))
def __str__(self):
return repr(self)
def __eq__(self, other):
return other.processor is self.processor and other.targettask is self.targettask
def __hash__(self):
return hash((self.processor, self.targettask))
def preexecute(self, trans):
"""preprocess all objects contained within this ``UOWDependencyProcessor``s target task.
This may locate additional objects which should be part of the
transaction, such as those affected deletes, orphans to be
deleted, etc.
Once an object is preprocessed, its ``UOWTaskElement`` is marked as processed. If subsequent
changes occur to the ``UOWTaskElement``, its processed flag is reset, and will require processing
again.
Return True if any objects were preprocessed, or False if no
objects were preprocessed. If True is returned, the parent ``UOWTransaction`` will
ultimately call ``preexecute()`` again on all processors until no new objects are processed.
"""
def getobj(elem):
elem.mark_preprocessed(self)
return elem.state
ret = False
elements = [getobj(elem) for elem in self.targettask.polymorphic_tosave_elements if elem.state is not None and not elem.is_preprocessed(self)]
if elements:
ret = True
self.processor.preprocess_dependencies(self.targettask, elements, trans, delete=False)
elements = [getobj(elem) for elem in self.targettask.polymorphic_todelete_elements if elem.state is not None and not elem.is_preprocessed(self)]
if elements:
ret = True
self.processor.preprocess_dependencies(self.targettask, elements, trans, delete=True)
return ret
def execute(self, trans, delete):
"""process all objects contained within this ``UOWDependencyProcessor``s target task."""
if not delete:
self.processor.process_dependencies(self.targettask, [elem.state for elem in self.targettask.polymorphic_tosave_elements if elem.state is not None], trans, delete=False)
else:
self.processor.process_dependencies(self.targettask, [elem.state for elem in self.targettask.polymorphic_todelete_elements if elem.state is not None], trans, delete=True)
def get_object_dependencies(self, state, trans, passive):
return trans.get_attribute_history(state, self.processor.key, passive=passive)
def whose_dependent_on_who(self, state1, state2):
"""establish which object is operationally dependent amongst a parent/child
using the semantics stated by the dependency processor.
This method is used to establish a partial ordering (set of dependency tuples)
when toplogically sorting on a per-instance basis.
"""
return self.processor.whose_dependent_on_who(state1, state2)
def branch(self, task):
"""create a copy of this ``UOWDependencyProcessor`` against a new ``UOWTask`` object.
this is used within the instance-level sorting operation when a single ``UOWTask``
is broken up into many individual ``UOWTask`` objects.
"""
return UOWDependencyProcessor(self.processor, task)
class UOWExecutor(object):
"""Encapsulates the execution traversal of a UOWTransaction structure."""
def execute(self, trans, tasks, isdelete=None):
if isdelete is not True:
for task in tasks:
self.execute_save_steps(trans, task)
if isdelete is not False:
for task in util.reversed(tasks):
self.execute_delete_steps(trans, task)
def save_objects(self, trans, task):
task.mapper._save_obj(task.polymorphic_tosave_objects, trans)
def delete_objects(self, trans, task):
task.mapper._delete_obj(task.polymorphic_todelete_objects, trans)
def execute_dependency(self, trans, dep, isdelete):
dep.execute(trans, isdelete)
def execute_save_steps(self, trans, task):
self.save_objects(trans, task)
self.execute_cyclical_dependencies(trans, task, False)
self.execute_per_element_childtasks(trans, task, False)
self.execute_dependencies(trans, task, False)
self.execute_dependencies(trans, task, True)
def execute_delete_steps(self, trans, task):
self.execute_cyclical_dependencies(trans, task, True)
self.execute_per_element_childtasks(trans, task, True)
self.delete_objects(trans, task)
def execute_dependencies(self, trans, task, isdelete=None):
if isdelete is not True:
for dep in task.polymorphic_dependencies:
self.execute_dependency(trans, dep, False)
if isdelete is not False:
for dep in util.reversed(list(task.polymorphic_dependencies)):
self.execute_dependency(trans, dep, True)
def execute_cyclical_dependencies(self, trans, task, isdelete):
for dep in task.polymorphic_cyclical_dependencies:
self.execute_dependency(trans, dep, isdelete)
def execute_per_element_childtasks(self, trans, task, isdelete):
for element in task.polymorphic_tosave_elements + task.polymorphic_todelete_elements:
self.execute_element_childtasks(trans, element, isdelete)
def execute_element_childtasks(self, trans, element, isdelete):
for child in element.childtasks:
self.execute(trans, [child], isdelete)
|
|
"""Test inter-conversion of different polynomial classes.
This tests the convert and cast methods of all the polynomial classes.
"""
from __future__ import division, absolute_import, print_function
import operator as op
from numbers import Number
import pytest
import numpy as np
from numpy.polynomial import (
Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE)
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
)
from numpy.compat import long
#
# fixtures
#
classes = (
Polynomial, Legendre, Chebyshev, Laguerre,
Hermite, HermiteE
)
classids = tuple(cls.__name__ for cls in classes)
@pytest.fixture(params=classes, ids=classids)
def Poly(request):
return request.param
#
# helper functions
#
random = np.random.random
def assert_poly_almost_equal(p1, p2, msg=""):
try:
assert_(np.all(p1.domain == p2.domain))
assert_(np.all(p1.window == p2.window))
assert_almost_equal(p1.coef, p2.coef)
except AssertionError:
msg = "Result: %s\nTarget: %s", (p1, p2)
raise AssertionError(msg)
#
# Test conversion methods that depend on combinations of two classes.
#
Poly1 = Poly
Poly2 = Poly
def test_conversion(Poly1, Poly2):
x = np.linspace(0, 1, 10)
coef = random((3,))
d1 = Poly1.domain + random((2,))*.25
w1 = Poly1.window + random((2,))*.25
p1 = Poly1(coef, domain=d1, window=w1)
d2 = Poly2.domain + random((2,))*.25
w2 = Poly2.window + random((2,))*.25
p2 = p1.convert(kind=Poly2, domain=d2, window=w2)
assert_almost_equal(p2.domain, d2)
assert_almost_equal(p2.window, w2)
assert_almost_equal(p2(x), p1(x))
def test_cast(Poly1, Poly2):
x = np.linspace(0, 1, 10)
coef = random((3,))
d1 = Poly1.domain + random((2,))*.25
w1 = Poly1.window + random((2,))*.25
p1 = Poly1(coef, domain=d1, window=w1)
d2 = Poly2.domain + random((2,))*.25
w2 = Poly2.window + random((2,))*.25
p2 = Poly2.cast(p1, domain=d2, window=w2)
assert_almost_equal(p2.domain, d2)
assert_almost_equal(p2.window, w2)
assert_almost_equal(p2(x), p1(x))
#
# test methods that depend on one class
#
def test_identity(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
x = np.linspace(d[0], d[1], 11)
p = Poly.identity(domain=d, window=w)
assert_equal(p.domain, d)
assert_equal(p.window, w)
assert_almost_equal(p(x), x)
def test_basis(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly.basis(5, domain=d, window=w)
assert_equal(p.domain, d)
assert_equal(p.window, w)
assert_equal(p.coef, [0]*5 + [1])
def test_fromroots(Poly):
# check that requested roots are zeros of a polynomial
# of correct degree, domain, and window.
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
r = random((5,))
p1 = Poly.fromroots(r, domain=d, window=w)
assert_equal(p1.degree(), len(r))
assert_equal(p1.domain, d)
assert_equal(p1.window, w)
assert_almost_equal(p1(r), 0)
# check that polynomial is monic
pdom = Polynomial.domain
pwin = Polynomial.window
p2 = Polynomial.cast(p1, domain=pdom, window=pwin)
assert_almost_equal(p2.coef[-1], 1)
def test_fit(Poly):
def f(x):
return x*(x - 1)*(x - 2)
x = np.linspace(0, 3)
y = f(x)
# check default value of domain and window
p = Poly.fit(x, y, 3)
assert_almost_equal(p.domain, [0, 3])
assert_almost_equal(p(x), y)
assert_equal(p.degree(), 3)
# check with given domains and window
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly.fit(x, y, 3, domain=d, window=w)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, d)
assert_almost_equal(p.window, w)
p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, d)
assert_almost_equal(p.window, w)
# check with class domain default
p = Poly.fit(x, y, 3, [])
assert_equal(p.domain, Poly.domain)
assert_equal(p.window, Poly.window)
p = Poly.fit(x, y, [0, 1, 2, 3], [])
assert_equal(p.domain, Poly.domain)
assert_equal(p.window, Poly.window)
# check that fit accepts weights.
w = np.zeros_like(x)
z = y + random(y.shape)*.25
w[::2] = 1
p1 = Poly.fit(x[::2], z[::2], 3)
p2 = Poly.fit(x, z, 3, w=w)
p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w)
assert_almost_equal(p1(x), p2(x))
assert_almost_equal(p2(x), p3(x))
def test_equal(Poly):
p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3])
p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3])
p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3])
p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2])
assert_(p1 == p1)
assert_(not p1 == p2)
assert_(not p1 == p3)
assert_(not p1 == p4)
def test_not_equal(Poly):
p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3])
p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3])
p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3])
p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2])
assert_(not p1 != p1)
assert_(p1 != p2)
assert_(p1 != p3)
assert_(p1 != p4)
def test_add(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 + p2
assert_poly_almost_equal(p2 + p1, p3)
assert_poly_almost_equal(p1 + c2, p3)
assert_poly_almost_equal(c2 + p1, p3)
assert_poly_almost_equal(p1 + tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) + p1, p3)
assert_poly_almost_equal(p1 + np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) + p1, p3)
assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.add, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.add, p1, Polynomial([0]))
def test_sub(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 - p2
assert_poly_almost_equal(p2 - p1, -p3)
assert_poly_almost_equal(p1 - c2, p3)
assert_poly_almost_equal(c2 - p1, -p3)
assert_poly_almost_equal(p1 - tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) - p1, -p3)
assert_poly_almost_equal(p1 - np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) - p1, -p3)
assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.sub, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.sub, p1, Polynomial([0]))
def test_mul(Poly):
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 * p2
assert_poly_almost_equal(p2 * p1, p3)
assert_poly_almost_equal(p1 * c2, p3)
assert_poly_almost_equal(c2 * p1, p3)
assert_poly_almost_equal(p1 * tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) * p1, p3)
assert_poly_almost_equal(p1 * np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) * p1, p3)
assert_poly_almost_equal(p1 * 2, p1 * Poly([2]))
assert_poly_almost_equal(2 * p1, p1 * Poly([2]))
assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.mul, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.mul, p1, Polynomial([0]))
def test_floordiv(Poly):
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
assert_poly_almost_equal(p4 // p2, p1)
assert_poly_almost_equal(p4 // c2, p1)
assert_poly_almost_equal(c4 // p2, p1)
assert_poly_almost_equal(p4 // tuple(c2), p1)
assert_poly_almost_equal(tuple(c4) // p2, p1)
assert_poly_almost_equal(p4 // np.array(c2), p1)
assert_poly_almost_equal(np.array(c4) // p2, p1)
assert_poly_almost_equal(2 // p2, Poly([0]))
assert_poly_almost_equal(p2 // 2, 0.5*p2)
assert_raises(
TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(
TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.floordiv, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.floordiv, p1, Polynomial([0]))
def test_truediv(Poly):
# true division is valid only if the denominator is a Number and
# not a python bool.
p1 = Poly([1,2,3])
p2 = p1 * 5
for stype in np.ScalarType:
if not issubclass(stype, Number) or issubclass(stype, bool):
continue
s = stype(5)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for stype in (int, long, float):
s = stype(5)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for stype in [complex]:
s = stype(5, 0)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for s in [tuple(), list(), dict(), bool(), np.array([1])]:
assert_raises(TypeError, op.truediv, p2, s)
assert_raises(TypeError, op.truediv, s, p2)
for ptype in classes:
assert_raises(TypeError, op.truediv, p2, ptype(1))
def test_mod(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
assert_poly_almost_equal(p4 % p2, p3)
assert_poly_almost_equal(p4 % c2, p3)
assert_poly_almost_equal(c4 % p2, p3)
assert_poly_almost_equal(p4 % tuple(c2), p3)
assert_poly_almost_equal(tuple(c4) % p2, p3)
assert_poly_almost_equal(p4 % np.array(c2), p3)
assert_poly_almost_equal(np.array(c4) % p2, p3)
assert_poly_almost_equal(2 % p2, Poly([2]))
assert_poly_almost_equal(p2 % 2, Poly([0]))
assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.mod, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.mod, p1, Polynomial([0]))
def test_divmod(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
quo, rem = divmod(p4, p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, c2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(c4, p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, tuple(c2))
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(tuple(c4), p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, np.array(c2))
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(np.array(c4), p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p2, 2)
assert_poly_almost_equal(quo, 0.5*p2)
assert_poly_almost_equal(rem, Poly([0]))
quo, rem = divmod(2, p2)
assert_poly_almost_equal(quo, Poly([0]))
assert_poly_almost_equal(rem, Poly([2]))
assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, divmod, p1, Chebyshev([0]))
else:
assert_raises(TypeError, divmod, p1, Polynomial([0]))
def test_roots(Poly):
d = Poly.domain * 1.25 + .25
w = Poly.window
tgt = np.linspace(d[0], d[1], 5)
res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots())
assert_almost_equal(res, tgt)
# default domain and window
res = np.sort(Poly.fromroots(tgt).roots())
assert_almost_equal(res, tgt)
def test_degree(Poly):
p = Poly.basis(5)
assert_equal(p.degree(), 5)
def test_copy(Poly):
p1 = Poly.basis(5)
p2 = p1.copy()
assert_(p1 == p2)
assert_(p1 is not p2)
assert_(p1.coef is not p2.coef)
assert_(p1.domain is not p2.domain)
assert_(p1.window is not p2.window)
def test_integ(Poly):
P = Polynomial
# Check defaults
p0 = Poly.cast(P([1*2, 2*3, 3*4]))
p1 = P.cast(p0.integ())
p2 = P.cast(p0.integ(2))
assert_poly_almost_equal(p1, P([0, 2, 3, 4]))
assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1]))
# Check with k
p0 = Poly.cast(P([1*2, 2*3, 3*4]))
p1 = P.cast(p0.integ(k=1))
p2 = P.cast(p0.integ(2, k=[1, 1]))
assert_poly_almost_equal(p1, P([1, 2, 3, 4]))
assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1]))
# Check with lbnd
p0 = Poly.cast(P([1*2, 2*3, 3*4]))
p1 = P.cast(p0.integ(lbnd=1))
p2 = P.cast(p0.integ(2, lbnd=1))
assert_poly_almost_equal(p1, P([-9, 2, 3, 4]))
assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1]))
# Check scaling
d = 2*Poly.domain
p0 = Poly.cast(P([1*2, 2*3, 3*4]), domain=d)
p1 = P.cast(p0.integ())
p2 = P.cast(p0.integ(2))
assert_poly_almost_equal(p1, P([0, 2, 3, 4]))
assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1]))
def test_deriv(Poly):
# Check that the derivative is the inverse of integration. It is
# assumes that the integration has been checked elsewhere.
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p1 = Poly([1, 2, 3], domain=d, window=w)
p2 = p1.integ(2, k=[1, 2])
p3 = p1.integ(1, k=[1])
assert_almost_equal(p2.deriv(1).coef, p3.coef)
assert_almost_equal(p2.deriv(2).coef, p1.coef)
# default domain and window
p1 = Poly([1, 2, 3])
p2 = p1.integ(2, k=[1, 2])
p3 = p1.integ(1, k=[1])
assert_almost_equal(p2.deriv(1).coef, p3.coef)
assert_almost_equal(p2.deriv(2).coef, p1.coef)
def test_linspace(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly([1, 2, 3], domain=d, window=w)
# check default domain
xtgt = np.linspace(d[0], d[1], 20)
ytgt = p(xtgt)
xres, yres = p.linspace(20)
assert_almost_equal(xres, xtgt)
assert_almost_equal(yres, ytgt)
# check specified domain
xtgt = np.linspace(0, 2, 20)
ytgt = p(xtgt)
xres, yres = p.linspace(20, domain=[0, 2])
assert_almost_equal(xres, xtgt)
assert_almost_equal(yres, ytgt)
def test_pow(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
tgt = Poly([1], domain=d, window=w)
tst = Poly([1, 2, 3], domain=d, window=w)
for i in range(5):
assert_poly_almost_equal(tst**i, tgt)
tgt = tgt * tst
# default domain and window
tgt = Poly([1])
tst = Poly([1, 2, 3])
for i in range(5):
assert_poly_almost_equal(tst**i, tgt)
tgt = tgt * tst
# check error for invalid powers
assert_raises(ValueError, op.pow, tgt, 1.5)
assert_raises(ValueError, op.pow, tgt, -1)
def test_call(Poly):
P = Polynomial
d = Poly.domain
x = np.linspace(d[0], d[1], 11)
# Check defaults
p = Poly.cast(P([1, 2, 3]))
tgt = 1 + x*(2 + 3*x)
res = p(x)
assert_almost_equal(res, tgt)
def test_cutdeg(Poly):
p = Poly([1, 2, 3])
assert_raises(ValueError, p.cutdeg, .5)
assert_raises(ValueError, p.cutdeg, -1)
assert_equal(len(p.cutdeg(3)), 3)
assert_equal(len(p.cutdeg(2)), 3)
assert_equal(len(p.cutdeg(1)), 2)
assert_equal(len(p.cutdeg(0)), 1)
def test_truncate(Poly):
p = Poly([1, 2, 3])
assert_raises(ValueError, p.truncate, .5)
assert_raises(ValueError, p.truncate, 0)
assert_equal(len(p.truncate(4)), 3)
assert_equal(len(p.truncate(3)), 3)
assert_equal(len(p.truncate(2)), 2)
assert_equal(len(p.truncate(1)), 1)
def test_trim(Poly):
c = [1, 1e-6, 1e-12, 0]
p = Poly(c)
assert_equal(p.trim().coef, c[:3])
assert_equal(p.trim(1e-10).coef, c[:2])
assert_equal(p.trim(1e-5).coef, c[:1])
def test_mapparms(Poly):
# check with defaults. Should be identity.
d = Poly.domain
w = Poly.window
p = Poly([1], domain=d, window=w)
assert_almost_equal([0, 1], p.mapparms())
#
w = 2*d + 1
p = Poly([1], domain=d, window=w)
assert_almost_equal([1, 2], p.mapparms())
def test_ufunc_override(Poly):
p = Poly([1, 2, 3])
x = np.ones(3)
assert_raises(TypeError, np.add, p, x)
assert_raises(TypeError, np.add, x, p)
class TestLatexRepr(object):
"""Test the latex repr used by ipython """
def as_latex(self, obj):
# right now we ignore the formatting of scalars in our tests, since
# it makes them too verbose. Ideally, the formatting of scalars will
# be fixed such that tests below continue to pass
obj._repr_latex_scalar = lambda x: str(x)
try:
return obj._repr_latex_()
finally:
del obj._repr_latex_scalar
def test_simple_polynomial(self):
# default input
p = Polynomial([1, 2, 3])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0 + 2.0\,x + 3.0\,x^{2}$')
# translated input
p = Polynomial([1, 2, 3], domain=[-2, 0])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0 + 2.0\,\left(1.0 + x\right) + 3.0\,\left(1.0 + x\right)^{2}$')
# scaled input
p = Polynomial([1, 2, 3], domain=[-0.5, 0.5])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0 + 2.0\,\left(2.0x\right) + 3.0\,\left(2.0x\right)^{2}$')
# affine input
p = Polynomial([1, 2, 3], domain=[-1, 0])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0 + 2.0\,\left(1.0 + 2.0x\right) + 3.0\,\left(1.0 + 2.0x\right)^{2}$')
def test_basis_func(self):
p = Chebyshev([1, 2, 3])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0\,{T}_{0}(x) + 2.0\,{T}_{1}(x) + 3.0\,{T}_{2}(x)$')
# affine input - check no surplus parens are added
p = Chebyshev([1, 2, 3], domain=[-1, 0])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0\,{T}_{0}(1.0 + 2.0x) + 2.0\,{T}_{1}(1.0 + 2.0x) + 3.0\,{T}_{2}(1.0 + 2.0x)$')
def test_multichar_basis_func(self):
p = HermiteE([1, 2, 3])
assert_equal(self.as_latex(p),
r'$x \mapsto 1.0\,{He}_{0}(x) + 2.0\,{He}_{1}(x) + 3.0\,{He}_{2}(x)$')
#
# Test class method that only exists for some classes
#
class TestInterpolate(object):
def f(self, x):
return x * (x - 1) * (x - 2)
def test_raises(self):
assert_raises(ValueError, Chebyshev.interpolate, self.f, -1)
assert_raises(TypeError, Chebyshev.interpolate, self.f, 10.)
def test_dimensions(self):
for deg in range(1, 5):
assert_(Chebyshev.interpolate(self.f, deg).degree() == deg)
def test_approximation(self):
def powx(x, p):
return x**p
x = np.linspace(0, 2, 10)
for deg in range(0, 10):
for t in range(0, deg + 1):
p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,))
assert_almost_equal(p(x), powx(x, t), decimal=12)
|
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Filters for Google Cloud Bigtable Row classes."""
from gcloud._helpers import _microseconds_from_datetime
from gcloud._helpers import _to_bytes
from gcloud.bigtable._generated_v2 import (
data_pb2 as data_v2_pb2)
class RowFilter(object):
"""Basic filter to apply to cells in a row.
These values can be combined via :class:`RowFilterChain`,
:class:`RowFilterUnion` and :class:`ConditionalRowFilter`.
.. note::
This class is a do-nothing base class for all row filters.
"""
def __ne__(self, other):
return not self.__eq__(other)
class _BoolFilter(RowFilter):
"""Row filter that uses a boolean flag.
:type flag: bool
:param flag: An indicator if a setting is turned on or off.
"""
def __init__(self, flag):
self.flag = flag
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.flag == self.flag
class SinkFilter(_BoolFilter):
"""Advanced row filter to skip parent filters.
:type flag: bool
:param flag: ADVANCED USE ONLY. Hook for introspection into the row filter.
Outputs all cells directly to the output of the read rather
than to any parent filter. Cannot be used within the
``predicate_filter``, ``true_filter``, or ``false_filter``
of a :class:`ConditionalRowFilter`.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(sink=self.flag)
class PassAllFilter(_BoolFilter):
"""Row filter equivalent to not filtering at all.
:type flag: bool
:param flag: Matches all cells, regardless of input. Functionally
equivalent to leaving ``filter`` unset, but included for
completeness.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(pass_all_filter=self.flag)
class BlockAllFilter(_BoolFilter):
"""Row filter that doesn't match any cells.
:type flag: bool
:param flag: Does not match any cells, regardless of input. Useful for
temporarily disabling just part of a filter.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(block_all_filter=self.flag)
class _RegexFilter(RowFilter):
"""Row filter that uses a regular expression.
The ``regex`` must be valid RE2 patterns. See Google's
`RE2 reference`_ for the accepted syntax.
.. _RE2 reference: https://github.com/google/re2/wiki/Syntax
:type regex: bytes or str
:param regex: A regular expression (RE2) for some row filter.
"""
def __init__(self, regex):
self.regex = _to_bytes(regex)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.regex == self.regex
class RowKeyRegexFilter(_RegexFilter):
"""Row filter for a row key regular expression.
The ``regex`` must be valid RE2 patterns. See Google's
`RE2 reference`_ for the accepted syntax.
.. _RE2 reference: https://github.com/google/re2/wiki/Syntax
.. note::
Special care need be used with the expression used. Since
each of these properties can contain arbitrary bytes, the ``\\C``
escape sequence must be used if a true wildcard is desired. The ``.``
character will not match the new line character ``\\n``, which may be
present in a binary value.
:type regex: bytes
:param regex: A regular expression (RE2) to match cells from rows with row
keys that satisfy this regex. For a
``CheckAndMutateRowRequest``, this filter is unnecessary
since the row key is already specified.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(row_key_regex_filter=self.regex)
class RowSampleFilter(RowFilter):
"""Matches all cells from a row with probability p.
:type sample: float
:param sample: The probability of matching a cell (must be in the
interval ``[0, 1]``).
"""
def __init__(self, sample):
self.sample = sample
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.sample == self.sample
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(row_sample_filter=self.sample)
class FamilyNameRegexFilter(_RegexFilter):
"""Row filter for a family name regular expression.
The ``regex`` must be valid RE2 patterns. See Google's
`RE2 reference`_ for the accepted syntax.
.. _RE2 reference: https://github.com/google/re2/wiki/Syntax
:type regex: str
:param regex: A regular expression (RE2) to match cells from columns in a
given column family. For technical reasons, the regex must
not contain the ``':'`` character, even if it is not being
used as a literal.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(family_name_regex_filter=self.regex)
class ColumnQualifierRegexFilter(_RegexFilter):
"""Row filter for a column qualifier regular expression.
The ``regex`` must be valid RE2 patterns. See Google's
`RE2 reference`_ for the accepted syntax.
.. _RE2 reference: https://github.com/google/re2/wiki/Syntax
.. note::
Special care need be used with the expression used. Since
each of these properties can contain arbitrary bytes, the ``\\C``
escape sequence must be used if a true wildcard is desired. The ``.``
character will not match the new line character ``\\n``, which may be
present in a binary value.
:type regex: bytes
:param regex: A regular expression (RE2) to match cells from column that
match this regex (irrespective of column family).
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(column_qualifier_regex_filter=self.regex)
class TimestampRange(object):
"""Range of time with inclusive lower and exclusive upper bounds.
:type start: :class:`datetime.datetime`
:param start: (Optional) The (inclusive) lower bound of the timestamp
range. If omitted, defaults to Unix epoch.
:type end: :class:`datetime.datetime`
:param end: (Optional) The (exclusive) upper bound of the timestamp
range. If omitted, no upper bound is used.
"""
def __init__(self, start=None, end=None):
self.start = start
self.end = end
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.start == self.start and
other.end == self.end)
def __ne__(self, other):
return not self.__eq__(other)
def to_pb(self):
"""Converts the :class:`TimestampRange` to a protobuf.
:rtype: :class:`.data_v2_pb2.TimestampRange`
:returns: The converted current object.
"""
timestamp_range_kwargs = {}
if self.start is not None:
timestamp_range_kwargs['start_timestamp_micros'] = (
_microseconds_from_datetime(self.start))
if self.end is not None:
timestamp_range_kwargs['end_timestamp_micros'] = (
_microseconds_from_datetime(self.end))
return data_v2_pb2.TimestampRange(**timestamp_range_kwargs)
class TimestampRangeFilter(RowFilter):
"""Row filter that limits cells to a range of time.
:type range_: :class:`TimestampRange`
:param range_: Range of time that cells should match against.
"""
def __init__(self, range_):
self.range_ = range_
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.range_ == self.range_
def to_pb(self):
"""Converts the row filter to a protobuf.
First converts the ``range_`` on the current object to a protobuf and
then uses it in the ``timestamp_range_filter`` field.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(
timestamp_range_filter=self.range_.to_pb())
class ColumnRangeFilter(RowFilter):
"""A row filter to restrict to a range of columns.
Both the start and end column can be included or excluded in the range.
By default, we include them both, but this can be changed with optional
flags.
:type column_family_id: str
:param column_family_id: The column family that contains the columns. Must
be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``.
:type start_column: bytes
:param start_column: The start of the range of columns. If no value is
used, the backend applies no upper bound to the
values.
:type end_column: bytes
:param end_column: The end of the range of columns. If no value is used,
the backend applies no upper bound to the values.
:type inclusive_start: bool
:param inclusive_start: Boolean indicating if the start column should be
included in the range (or excluded). Defaults
to :data:`True` if ``start_column`` is passed and
no ``inclusive_start`` was given.
:type inclusive_end: bool
:param inclusive_end: Boolean indicating if the end column should be
included in the range (or excluded). Defaults
to :data:`True` if ``end_column`` is passed and
no ``inclusive_end`` was given.
:raises: :class:`ValueError <exceptions.ValueError>` if ``inclusive_start``
is set but no ``start_column`` is given or if ``inclusive_end``
is set but no ``end_column`` is given
"""
def __init__(self, column_family_id, start_column=None, end_column=None,
inclusive_start=None, inclusive_end=None):
self.column_family_id = column_family_id
if inclusive_start is None:
inclusive_start = True
elif start_column is None:
raise ValueError('Inclusive start was specified but no '
'start column was given.')
self.start_column = start_column
self.inclusive_start = inclusive_start
if inclusive_end is None:
inclusive_end = True
elif end_column is None:
raise ValueError('Inclusive end was specified but no '
'end column was given.')
self.end_column = end_column
self.inclusive_end = inclusive_end
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.column_family_id == self.column_family_id and
other.start_column == self.start_column and
other.end_column == self.end_column and
other.inclusive_start == self.inclusive_start and
other.inclusive_end == self.inclusive_end)
def to_pb(self):
"""Converts the row filter to a protobuf.
First converts to a :class:`.data_v2_pb2.ColumnRange` and then uses it
in the ``column_range_filter`` field.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
column_range_kwargs = {'family_name': self.column_family_id}
if self.start_column is not None:
if self.inclusive_start:
key = 'start_qualifier_closed'
else:
key = 'start_qualifier_open'
column_range_kwargs[key] = _to_bytes(self.start_column)
if self.end_column is not None:
if self.inclusive_end:
key = 'end_qualifier_closed'
else:
key = 'end_qualifier_open'
column_range_kwargs[key] = _to_bytes(self.end_column)
column_range = data_v2_pb2.ColumnRange(**column_range_kwargs)
return data_v2_pb2.RowFilter(column_range_filter=column_range)
class ValueRegexFilter(_RegexFilter):
"""Row filter for a value regular expression.
The ``regex`` must be valid RE2 patterns. See Google's
`RE2 reference`_ for the accepted syntax.
.. _RE2 reference: https://github.com/google/re2/wiki/Syntax
.. note::
Special care need be used with the expression used. Since
each of these properties can contain arbitrary bytes, the ``\\C``
escape sequence must be used if a true wildcard is desired. The ``.``
character will not match the new line character ``\\n``, which may be
present in a binary value.
:type regex: bytes
:param regex: A regular expression (RE2) to match cells with values that
match this regex.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(value_regex_filter=self.regex)
class ValueRangeFilter(RowFilter):
"""A range of values to restrict to in a row filter.
Will only match cells that have values in this range.
Both the start and end value can be included or excluded in the range.
By default, we include them both, but this can be changed with optional
flags.
:type start_value: bytes
:param start_value: The start of the range of values. If no value is used,
the backend applies no lower bound to the values.
:type end_value: bytes
:param end_value: The end of the range of values. If no value is used,
the backend applies no upper bound to the values.
:type inclusive_start: bool
:param inclusive_start: Boolean indicating if the start value should be
included in the range (or excluded). Defaults
to :data:`True` if ``start_value`` is passed and
no ``inclusive_start`` was given.
:type inclusive_end: bool
:param inclusive_end: Boolean indicating if the end value should be
included in the range (or excluded). Defaults
to :data:`True` if ``end_value`` is passed and
no ``inclusive_end`` was given.
:raises: :class:`ValueError <exceptions.ValueError>` if ``inclusive_start``
is set but no ``start_value`` is given or if ``inclusive_end``
is set but no ``end_value`` is given
"""
def __init__(self, start_value=None, end_value=None,
inclusive_start=None, inclusive_end=None):
if inclusive_start is None:
inclusive_start = True
elif start_value is None:
raise ValueError('Inclusive start was specified but no '
'start value was given.')
self.start_value = start_value
self.inclusive_start = inclusive_start
if inclusive_end is None:
inclusive_end = True
elif end_value is None:
raise ValueError('Inclusive end was specified but no '
'end value was given.')
self.end_value = end_value
self.inclusive_end = inclusive_end
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.start_value == self.start_value and
other.end_value == self.end_value and
other.inclusive_start == self.inclusive_start and
other.inclusive_end == self.inclusive_end)
def to_pb(self):
"""Converts the row filter to a protobuf.
First converts to a :class:`.data_v2_pb2.ValueRange` and then uses
it to create a row filter protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
value_range_kwargs = {}
if self.start_value is not None:
if self.inclusive_start:
key = 'start_value_closed'
else:
key = 'start_value_open'
value_range_kwargs[key] = _to_bytes(self.start_value)
if self.end_value is not None:
if self.inclusive_end:
key = 'end_value_closed'
else:
key = 'end_value_open'
value_range_kwargs[key] = _to_bytes(self.end_value)
value_range = data_v2_pb2.ValueRange(**value_range_kwargs)
return data_v2_pb2.RowFilter(value_range_filter=value_range)
class _CellCountFilter(RowFilter):
"""Row filter that uses an integer count of cells.
The cell count is used as an offset or a limit for the number
of results returned.
:type num_cells: int
:param num_cells: An integer count / offset / limit.
"""
def __init__(self, num_cells):
self.num_cells = num_cells
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.num_cells == self.num_cells
class CellsRowOffsetFilter(_CellCountFilter):
"""Row filter to skip cells in a row.
:type num_cells: int
:param num_cells: Skips the first N cells of the row.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(
cells_per_row_offset_filter=self.num_cells)
class CellsRowLimitFilter(_CellCountFilter):
"""Row filter to limit cells in a row.
:type num_cells: int
:param num_cells: Matches only the first N cells of the row.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(cells_per_row_limit_filter=self.num_cells)
class CellsColumnLimitFilter(_CellCountFilter):
"""Row filter to limit cells in a column.
:type num_cells: int
:param num_cells: Matches only the most recent N cells within each column.
This filters a (family name, column) pair, based on
timestamps of each cell.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(
cells_per_column_limit_filter=self.num_cells)
class StripValueTransformerFilter(_BoolFilter):
"""Row filter that transforms cells into empty string (0 bytes).
:type flag: bool
:param flag: If :data:`True`, replaces each cell's value with the empty
string. As the name indicates, this is more useful as a
transformer than a generic query / filter.
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(strip_value_transformer=self.flag)
class ApplyLabelFilter(RowFilter):
"""Filter to apply labels to cells.
Intended to be used as an intermediate filter on a pre-existing filtered
result set. This way if two sets are combined, the label can tell where
the cell(s) originated.This allows the client to determine which results
were produced from which part of the filter.
.. note::
Due to a technical limitation of the backend, it is not currently
possible to apply multiple labels to a cell.
:type label: str
:param label: Label to apply to cells in the output row. Values must be
at most 15 characters long, and match the pattern
``[a-z0-9\\-]+``.
"""
def __init__(self, label):
self.label = label
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.label == self.label
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
return data_v2_pb2.RowFilter(apply_label_transformer=self.label)
class _FilterCombination(RowFilter):
"""Chain of row filters.
Sends rows through several filters in sequence. The filters are "chained"
together to process a row. After the first filter is applied, the second
is applied to the filtered output and so on for subsequent filters.
:type filters: list
:param filters: List of :class:`RowFilter`
"""
def __init__(self, filters=None):
if filters is None:
filters = []
self.filters = filters
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return other.filters == self.filters
class RowFilterChain(_FilterCombination):
"""Chain of row filters.
Sends rows through several filters in sequence. The filters are "chained"
together to process a row. After the first filter is applied, the second
is applied to the filtered output and so on for subsequent filters.
:type filters: list
:param filters: List of :class:`RowFilter`
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
chain = data_v2_pb2.RowFilter.Chain(
filters=[row_filter.to_pb() for row_filter in self.filters])
return data_v2_pb2.RowFilter(chain=chain)
class RowFilterUnion(_FilterCombination):
"""Union of row filters.
Sends rows through several filters simultaneously, then
merges / interleaves all the filtered results together.
If multiple cells are produced with the same column and timestamp,
they will all appear in the output row in an unspecified mutual order.
:type filters: list
:param filters: List of :class:`RowFilter`
"""
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
interleave = data_v2_pb2.RowFilter.Interleave(
filters=[row_filter.to_pb() for row_filter in self.filters])
return data_v2_pb2.RowFilter(interleave=interleave)
class ConditionalRowFilter(RowFilter):
"""Conditional row filter which exhibits ternary behavior.
Executes one of two filters based on another filter. If the ``base_filter``
returns any cells in the row, then ``true_filter`` is executed. If not,
then ``false_filter`` is executed.
.. note::
The ``base_filter`` does not execute atomically with the true and false
filters, which may lead to inconsistent or unexpected results.
Additionally, executing a :class:`ConditionalRowFilter` has poor
performance on the server, especially when ``false_filter`` is set.
:type base_filter: :class:`RowFilter`
:param base_filter: The filter to condition on before executing the
true/false filters.
:type true_filter: :class:`RowFilter`
:param true_filter: (Optional) The filter to execute if there are any cells
matching ``base_filter``. If not provided, no results
will be returned in the true case.
:type false_filter: :class:`RowFilter`
:param false_filter: (Optional) The filter to execute if there are no cells
matching ``base_filter``. If not provided, no results
will be returned in the false case.
"""
def __init__(self, base_filter, true_filter=None, false_filter=None):
self.base_filter = base_filter
self.true_filter = true_filter
self.false_filter = false_filter
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (other.base_filter == self.base_filter and
other.true_filter == self.true_filter and
other.false_filter == self.false_filter)
def to_pb(self):
"""Converts the row filter to a protobuf.
:rtype: :class:`.data_v2_pb2.RowFilter`
:returns: The converted current object.
"""
condition_kwargs = {'predicate_filter': self.base_filter.to_pb()}
if self.true_filter is not None:
condition_kwargs['true_filter'] = self.true_filter.to_pb()
if self.false_filter is not None:
condition_kwargs['false_filter'] = self.false_filter.to_pb()
condition = data_v2_pb2.RowFilter.Condition(**condition_kwargs)
return data_v2_pb2.RowFilter(condition=condition)
|
|
# -*- coding: utf-8 -*-
#
# SelfTest/PublicKey/test_importKey.py: Self-test for importing RSA keys
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
import unittest
import re
from Cryptodome.PublicKey import RSA
from Cryptodome.SelfTest.st_common import *
from Cryptodome.Util.py3compat import *
from Cryptodome.Util.number import inverse
from Cryptodome.Util import asn1
def der2pem(der, text='PUBLIC'):
import binascii
chunks = [ binascii.b2a_base64(der[i:i+48]) for i in range(0, len(der), 48) ]
pem = b('-----BEGIN %s KEY-----\n' % text)
pem += b('').join(chunks)
pem += b('-----END %s KEY-----' % text)
return pem
class ImportKeyTests(unittest.TestCase):
# 512-bit RSA key generated with openssl
rsaKeyPEM = u'''-----BEGIN RSA PRIVATE KEY-----
MIIBOwIBAAJBAL8eJ5AKoIsjURpcEoGubZMxLD7+kT+TLr7UkvEtFrRhDDKMtuII
q19FrL4pUIMymPMSLBn3hJLe30Dw48GQM4UCAwEAAQJACUSDEp8RTe32ftq8IwG8
Wojl5mAd1wFiIOrZ/Uv8b963WJOJiuQcVN29vxU5+My9GPZ7RA3hrDBEAoHUDPrI
OQIhAPIPLz4dphiD9imAkivY31Rc5AfHJiQRA7XixTcjEkojAiEAyh/pJHks/Mlr
+rdPNEpotBjfV4M4BkgGAA/ipcmaAjcCIQCHvhwwKVBLzzTscT2HeUdEeBMoiXXK
JACAr3sJQJGxIQIgarRp+m1WSKV1MciwMaTOnbU7wxFs9DP1pva76lYBzgUCIQC9
n0CnZCJ6IZYqSt0H5N7+Q+2Ro64nuwV/OSQfM6sBwQ==
-----END RSA PRIVATE KEY-----'''
# As above, but this is actually an unencrypted PKCS#8 key
rsaKeyPEM8 = u'''-----BEGIN PRIVATE KEY-----
MIIBVQIBADANBgkqhkiG9w0BAQEFAASCAT8wggE7AgEAAkEAvx4nkAqgiyNRGlwS
ga5tkzEsPv6RP5MuvtSS8S0WtGEMMoy24girX0WsvilQgzKY8xIsGfeEkt7fQPDj
wZAzhQIDAQABAkAJRIMSnxFN7fZ+2rwjAbxaiOXmYB3XAWIg6tn9S/xv3rdYk4mK
5BxU3b2/FTn4zL0Y9ntEDeGsMEQCgdQM+sg5AiEA8g8vPh2mGIP2KYCSK9jfVFzk
B8cmJBEDteLFNyMSSiMCIQDKH+kkeSz8yWv6t080Smi0GN9XgzgGSAYAD+KlyZoC
NwIhAIe+HDApUEvPNOxxPYd5R0R4EyiJdcokAICvewlAkbEhAiBqtGn6bVZIpXUx
yLAxpM6dtTvDEWz0M/Wm9rvqVgHOBQIhAL2fQKdkInohlipK3Qfk3v5D7ZGjrie7
BX85JB8zqwHB
-----END PRIVATE KEY-----'''
# The same RSA private key as in rsaKeyPEM, but now encrypted
rsaKeyEncryptedPEM=(
# PEM encryption
# With DES and passphrase 'test'
('test', u'''-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: DES-CBC,AF8F9A40BD2FA2FC
Ckl9ex1kaVEWhYC2QBmfaF+YPiR4NFkRXA7nj3dcnuFEzBnY5XULupqQpQI3qbfA
u8GYS7+b3toWWiHZivHbAAUBPDIZG9hKDyB9Sq2VMARGsX1yW1zhNvZLIiVJzUHs
C6NxQ1IJWOXzTew/xM2I26kPwHIvadq+/VaT8gLQdjdH0jOiVNaevjWnLgrn1mLP
BCNRMdcexozWtAFNNqSzfW58MJL2OdMi21ED184EFytIc1BlB+FZiGZduwKGuaKy
9bMbdb/1PSvsSzPsqW7KSSrTw6MgJAFJg6lzIYvR5F4poTVBxwBX3+EyEmShiaNY
IRX3TgQI0IjrVuLmvlZKbGWP18FXj7I7k9tSsNOOzllTTdq3ny5vgM3A+ynfAaxp
dysKznQ6P+IoqML1WxAID4aGRMWka+uArOJ148Rbj9s=
-----END RSA PRIVATE KEY-----'''),
# PKCS8 encryption
('winter', u'''-----BEGIN ENCRYPTED PRIVATE KEY-----
MIIBpjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQIeZIsbW3O+JcCAggA
MBQGCCqGSIb3DQMHBAgSM2p0D8FilgSCAWBhFyP2tiGKVpGj3mO8qIBzinU60ApR
3unvP+N6j7LVgnV2lFGaXbJ6a1PbQXe+2D6DUyBLo8EMXrKKVLqOMGkFMHc0UaV6
R6MmrsRDrbOqdpTuVRW+NVd5J9kQQh4xnfU/QrcPPt7vpJvSf4GzG0n666Ki50OV
M/feuVlIiyGXY6UWdVDpcOV72cq02eNUs/1JWdh2uEBvA9fCL0c07RnMrdT+CbJQ
NjJ7f8ULtp7xvR9O3Al/yJ4Wv3i4VxF1f3MCXzhlUD4I0ONlr0kJWgeQ80q/cWhw
ntvgJwnCn2XR1h6LA8Wp+0ghDTsL2NhJpWd78zClGhyU4r3hqu1XDjoXa7YCXCix
jCV15+ViDJzlNCwg+W6lRg18sSLkCT7alviIE0U5tHc6UPbbHwT5QqAxAABaP+nZ
CGqJGyiwBzrKebjgSm/KRd4C91XqcsysyH2kKPfT51MLAoD4xelOURBP
-----END ENCRYPTED PRIVATE KEY-----'''
),
)
rsaPublicKeyPEM = u'''-----BEGIN RSA PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAL8eJ5AKoIsjURpcEoGubZMxLD7+kT+T
Lr7UkvEtFrRhDDKMtuIIq19FrL4pUIMymPMSLBn3hJLe30Dw48GQM4UCAwEAAQ==
-----END RSA PUBLIC KEY-----'''
# Obtained using 'ssh-keygen -i -m PKCS8 -f rsaPublicKeyPEM'
rsaPublicKeyOpenSSH = b('''ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAQQC/HieQCqCLI1EaXBKBrm2TMSw+/pE/ky6+1JLxLRa0YQwyjLbiCKtfRay+KVCDMpjzEiwZ94SS3t9A8OPBkDOF comment\n''')
# The private key, in PKCS#1 format encoded with DER
rsaKeyDER = a2b_hex(
'''3082013b020100024100bf1e27900aa08b23511a5c1281ae6d93312c3efe
913f932ebed492f12d16b4610c328cb6e208ab5f45acbe2950833298f312
2c19f78492dedf40f0e3c190338502030100010240094483129f114dedf6
7edabc2301bc5a88e5e6601dd7016220ead9fd4bfc6fdeb75893898ae41c
54ddbdbf1539f8ccbd18f67b440de1ac30440281d40cfac839022100f20f
2f3e1da61883f62980922bd8df545ce407c726241103b5e2c53723124a23
022100ca1fe924792cfcc96bfab74f344a68b418df578338064806000fe2
a5c99a023702210087be1c3029504bcf34ec713d877947447813288975ca
240080af7b094091b12102206ab469fa6d5648a57531c8b031a4ce9db53b
c3116cf433f5a6f6bbea5601ce05022100bd9f40a764227a21962a4add07
e4defe43ed91a3ae27bb057f39241f33ab01c1
'''.replace(" ",""))
# The private key, in unencrypted PKCS#8 format encoded with DER
rsaKeyDER8 = a2b_hex(
'''30820155020100300d06092a864886f70d01010105000482013f3082013
b020100024100bf1e27900aa08b23511a5c1281ae6d93312c3efe913f932
ebed492f12d16b4610c328cb6e208ab5f45acbe2950833298f3122c19f78
492dedf40f0e3c190338502030100010240094483129f114dedf67edabc2
301bc5a88e5e6601dd7016220ead9fd4bfc6fdeb75893898ae41c54ddbdb
f1539f8ccbd18f67b440de1ac30440281d40cfac839022100f20f2f3e1da
61883f62980922bd8df545ce407c726241103b5e2c53723124a23022100c
a1fe924792cfcc96bfab74f344a68b418df578338064806000fe2a5c99a0
23702210087be1c3029504bcf34ec713d877947447813288975ca240080a
f7b094091b12102206ab469fa6d5648a57531c8b031a4ce9db53bc3116cf
433f5a6f6bbea5601ce05022100bd9f40a764227a21962a4add07e4defe4
3ed91a3ae27bb057f39241f33ab01c1
'''.replace(" ",""))
rsaPublicKeyDER = a2b_hex(
'''305c300d06092a864886f70d0101010500034b003048024100bf1e27900a
a08b23511a5c1281ae6d93312c3efe913f932ebed492f12d16b4610c328c
b6e208ab5f45acbe2950833298f3122c19f78492dedf40f0e3c190338502
03010001
'''.replace(" ",""))
n = long('BF 1E 27 90 0A A0 8B 23 51 1A 5C 12 81 AE 6D 93 31 2C 3E FE 91 3F 93 2E BE D4 92 F1 2D 16 B4 61 0C 32 8C B6 E2 08 AB 5F 45 AC BE 29 50 83 32 98 F3 12 2C 19 F7 84 92 DE DF 40 F0 E3 C1 90 33 85'.replace(" ",""),16)
e = 65537L
d = long('09 44 83 12 9F 11 4D ED F6 7E DA BC 23 01 BC 5A 88 E5 E6 60 1D D7 01 62 20 EA D9 FD 4B FC 6F DE B7 58 93 89 8A E4 1C 54 DD BD BF 15 39 F8 CC BD 18 F6 7B 44 0D E1 AC 30 44 02 81 D4 0C FA C8 39'.replace(" ",""),16)
p = long('00 F2 0F 2F 3E 1D A6 18 83 F6 29 80 92 2B D8 DF 54 5C E4 07 C7 26 24 11 03 B5 E2 C5 37 23 12 4A 23'.replace(" ",""),16)
q = long('00 CA 1F E9 24 79 2C FC C9 6B FA B7 4F 34 4A 68 B4 18 DF 57 83 38 06 48 06 00 0F E2 A5 C9 9A 02 37'.replace(" ",""),16)
# This is q^{-1} mod p). fastmath and slowmath use pInv (p^{-1}
# mod q) instead!
qInv = long('00 BD 9F 40 A7 64 22 7A 21 96 2A 4A DD 07 E4 DE FE 43 ED 91 A3 AE 27 BB 05 7F 39 24 1F 33 AB 01 C1'.replace(" ",""),16)
pInv = inverse(p,q)
def testImportKey1(self):
"""Verify import of RSAPrivateKey DER SEQUENCE"""
key = RSA.importKey(self.rsaKeyDER)
self.failUnless(key.has_private())
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
self.assertEqual(key.d, self.d)
self.assertEqual(key.p, self.p)
self.assertEqual(key.q, self.q)
def testImportKey2(self):
"""Verify import of SubjectPublicKeyInfo DER SEQUENCE"""
key = RSA.importKey(self.rsaPublicKeyDER)
self.failIf(key.has_private())
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
def testImportKey3unicode(self):
"""Verify import of RSAPrivateKey DER SEQUENCE, encoded with PEM as unicode"""
key = RSA.importKey(self.rsaKeyPEM)
self.assertEqual(key.has_private(),True) # assert_
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
self.assertEqual(key.d, self.d)
self.assertEqual(key.p, self.p)
self.assertEqual(key.q, self.q)
def testImportKey3bytes(self):
"""Verify import of RSAPrivateKey DER SEQUENCE, encoded with PEM as byte string"""
key = RSA.importKey(b(self.rsaKeyPEM))
self.assertEqual(key.has_private(),True) # assert_
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
self.assertEqual(key.d, self.d)
self.assertEqual(key.p, self.p)
self.assertEqual(key.q, self.q)
def testImportKey4unicode(self):
"""Verify import of RSAPrivateKey DER SEQUENCE, encoded with PEM as unicode"""
key = RSA.importKey(self.rsaPublicKeyPEM)
self.assertEqual(key.has_private(),False) # failIf
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
def testImportKey4bytes(self):
"""Verify import of SubjectPublicKeyInfo DER SEQUENCE, encoded with PEM as byte string"""
key = RSA.importKey(b(self.rsaPublicKeyPEM))
self.assertEqual(key.has_private(),False) # failIf
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
def testImportKey5(self):
"""Verifies that the imported key is still a valid RSA pair"""
key = RSA.importKey(self.rsaKeyPEM)
idem = key._encrypt(key._decrypt(89L))
self.assertEqual(idem, 89L)
def testImportKey6(self):
"""Verifies that the imported key is still a valid RSA pair"""
key = RSA.importKey(self.rsaKeyDER)
idem = key._encrypt(key._decrypt(65L))
self.assertEqual(idem, 65L)
def testImportKey7(self):
"""Verify import of OpenSSH public key"""
key = RSA.importKey(self.rsaPublicKeyOpenSSH)
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
def testImportKey8(self):
"""Verify import of encrypted PrivateKeyInfo DER SEQUENCE"""
for t in self.rsaKeyEncryptedPEM:
key = RSA.importKey(t[1], t[0])
self.failUnless(key.has_private())
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
self.assertEqual(key.d, self.d)
self.assertEqual(key.p, self.p)
self.assertEqual(key.q, self.q)
def testImportKey9(self):
"""Verify import of unencrypted PrivateKeyInfo DER SEQUENCE"""
key = RSA.importKey(self.rsaKeyDER8)
self.failUnless(key.has_private())
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
self.assertEqual(key.d, self.d)
self.assertEqual(key.p, self.p)
self.assertEqual(key.q, self.q)
def testImportKey10(self):
"""Verify import of unencrypted PrivateKeyInfo DER SEQUENCE, encoded with PEM"""
key = RSA.importKey(self.rsaKeyPEM8)
self.failUnless(key.has_private())
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
self.assertEqual(key.d, self.d)
self.assertEqual(key.p, self.p)
self.assertEqual(key.q, self.q)
def testImportKey11(self):
"""Verify import of RSAPublicKey DER SEQUENCE"""
der = asn1.DerSequence([17, 3]).encode()
key = RSA.importKey(der)
self.assertEqual(key.n, 17)
self.assertEqual(key.e, 3)
def testImportKey12(self):
"""Verify import of RSAPublicKey DER SEQUENCE, encoded with PEM"""
der = asn1.DerSequence([17, 3]).encode()
pem = der2pem(der)
key = RSA.importKey(pem)
self.assertEqual(key.n, 17)
self.assertEqual(key.e, 3)
def test_import_key_windows_cr_lf(self):
pem_cr_lf = "\r\n".join(self.rsaKeyPEM.splitlines())
key = RSA.importKey(pem_cr_lf)
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
self.assertEqual(key.d, self.d)
self.assertEqual(key.p, self.p)
self.assertEqual(key.q, self.q)
###
def testExportKey1(self):
key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])
derKey = key.exportKey("DER")
self.assertEqual(derKey, self.rsaKeyDER)
def testExportKey2(self):
key = RSA.construct([self.n, self.e])
derKey = key.exportKey("DER")
self.assertEqual(derKey, self.rsaPublicKeyDER)
def testExportKey3(self):
key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])
pemKey = key.exportKey("PEM")
self.assertEqual(pemKey, b(self.rsaKeyPEM))
def testExportKey4(self):
key = RSA.construct([self.n, self.e])
pemKey = key.exportKey("PEM")
self.assertEqual(pemKey, b(self.rsaPublicKeyPEM))
def testExportKey5(self):
key = RSA.construct([self.n, self.e])
openssh_1 = key.exportKey("OpenSSH").split()
openssh_2 = self.rsaPublicKeyOpenSSH.split()
self.assertEqual(openssh_1[0], openssh_2[0])
self.assertEqual(openssh_1[1], openssh_2[1])
def testExportKey7(self):
key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])
derKey = key.exportKey("DER", pkcs=8)
self.assertEqual(derKey, self.rsaKeyDER8)
def testExportKey8(self):
key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])
pemKey = key.exportKey("PEM", pkcs=8)
self.assertEqual(pemKey, b(self.rsaKeyPEM8))
def testExportKey9(self):
key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])
self.assertRaises(ValueError, key.exportKey, "invalid-format")
def testExportKey10(self):
# Export and re-import the encrypted key. It must match.
# PEM envelope, PKCS#1, old PEM encryption
key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])
outkey = key.exportKey('PEM', 'test')
self.failUnless(tostr(outkey).find('4,ENCRYPTED')!=-1)
self.failUnless(tostr(outkey).find('BEGIN RSA PRIVATE KEY')!=-1)
inkey = RSA.importKey(outkey, 'test')
self.assertEqual(key.n, inkey.n)
self.assertEqual(key.e, inkey.e)
self.assertEqual(key.d, inkey.d)
def testExportKey11(self):
# Export and re-import the encrypted key. It must match.
# PEM envelope, PKCS#1, old PEM encryption
key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])
outkey = key.exportKey('PEM', 'test', pkcs=1)
self.failUnless(tostr(outkey).find('4,ENCRYPTED')!=-1)
self.failUnless(tostr(outkey).find('BEGIN RSA PRIVATE KEY')!=-1)
inkey = RSA.importKey(outkey, 'test')
self.assertEqual(key.n, inkey.n)
self.assertEqual(key.e, inkey.e)
self.assertEqual(key.d, inkey.d)
def testExportKey12(self):
# Export and re-import the encrypted key. It must match.
# PEM envelope, PKCS#8, old PEM encryption
key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])
outkey = key.exportKey('PEM', 'test', pkcs=8)
self.failUnless(tostr(outkey).find('4,ENCRYPTED')!=-1)
self.failUnless(tostr(outkey).find('BEGIN PRIVATE KEY')!=-1)
inkey = RSA.importKey(outkey, 'test')
self.assertEqual(key.n, inkey.n)
self.assertEqual(key.e, inkey.e)
self.assertEqual(key.d, inkey.d)
def testExportKey13(self):
# Export and re-import the encrypted key. It must match.
# PEM envelope, PKCS#8, PKCS#8 encryption
key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])
outkey = key.exportKey('PEM', 'test', pkcs=8,
protection='PBKDF2WithHMAC-SHA1AndDES-EDE3-CBC')
self.failUnless(tostr(outkey).find('4,ENCRYPTED')==-1)
self.failUnless(tostr(outkey).find('BEGIN ENCRYPTED PRIVATE KEY')!=-1)
inkey = RSA.importKey(outkey, 'test')
self.assertEqual(key.n, inkey.n)
self.assertEqual(key.e, inkey.e)
self.assertEqual(key.d, inkey.d)
def testExportKey14(self):
# Export and re-import the encrypted key. It must match.
# DER envelope, PKCS#8, PKCS#8 encryption
key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])
outkey = key.exportKey('DER', 'test', pkcs=8)
inkey = RSA.importKey(outkey, 'test')
self.assertEqual(key.n, inkey.n)
self.assertEqual(key.e, inkey.e)
self.assertEqual(key.d, inkey.d)
def testExportKey15(self):
# Verify that that error an condition is detected when trying to
# use a password with DER encoding and PKCS#1.
key = RSA.construct([self.n, self.e, self.d, self.p, self.q, self.pInv])
self.assertRaises(ValueError, key.exportKey, 'DER', 'test', 1)
def test_import_key(self):
"""Verify that import_key is an alias to importKey"""
key = RSA.import_key(self.rsaPublicKeyDER)
self.failIf(key.has_private())
self.assertEqual(key.n, self.n)
self.assertEqual(key.e, self.e)
class ImportKeyFromX509Cert(unittest.TestCase):
def test_x509v1(self):
# Sample V1 certificate with a 1024 bit RSA key
x509_v1_cert = """
-----BEGIN CERTIFICATE-----
MIICOjCCAaMCAQEwDQYJKoZIhvcNAQEEBQAwfjENMAsGA1UEChMEQWNtZTELMAkG
A1UECxMCUkQxHDAaBgkqhkiG9w0BCQEWDXNwYW1AYWNtZS5vcmcxEzARBgNVBAcT
Ck1ldHJvcG9saXMxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYDVQQGEwJVUzENMAsG
A1UEAxMEdGVzdDAeFw0xNDA3MTExOTU3MjRaFw0xNzA0MDYxOTU3MjRaME0xCzAJ
BgNVBAYTAlVTMREwDwYDVQQIEwhOZXcgWW9yazENMAsGA1UEChMEQWNtZTELMAkG
A1UECxMCUkQxDzANBgNVBAMTBmxhdHZpYTCBnzANBgkqhkiG9w0BAQEFAAOBjQAw
gYkCgYEAyG+kytdRj3TFbRmHDYp3TXugVQ81chew0qeOxZWOz80IjtWpgdOaCvKW
NCuc8wUR9BWrEQW+39SaRMLiQfQtyFSQZijc3nsEBu/Lo4uWZ0W/FHDRVSvkJA/V
Ex5NL5ikI+wbUeCV5KajGNDalZ8F1pk32+CBs8h1xNx5DyxuEHUCAwEAATANBgkq
hkiG9w0BAQQFAAOBgQCVQF9Y//Q4Psy+umEM38pIlbZ2hxC5xNz/MbVPwuCkNcGn
KYNpQJP+JyVTsPpO8RLZsAQDzRueMI3S7fbbwTzAflN0z19wvblvu93xkaBytVok
9VBAH28olVhy9b1MMeg2WOt5sUEQaFNPnwwsyiY9+HsRpvpRnPSQF+kyYVsshQ==
-----END CERTIFICATE-----
""".strip()
# RSA public key as dumped by openssl
exponent = 65537
modulus_str = """
00:c8:6f:a4:ca:d7:51:8f:74:c5:6d:19:87:0d:8a:
77:4d:7b:a0:55:0f:35:72:17:b0:d2:a7:8e:c5:95:
8e:cf:cd:08:8e:d5:a9:81:d3:9a:0a:f2:96:34:2b:
9c:f3:05:11:f4:15:ab:11:05:be:df:d4:9a:44:c2:
e2:41:f4:2d:c8:54:90:66:28:dc:de:7b:04:06:ef:
cb:a3:8b:96:67:45:bf:14:70:d1:55:2b:e4:24:0f:
d5:13:1e:4d:2f:98:a4:23:ec:1b:51:e0:95:e4:a6:
a3:18:d0:da:95:9f:05:d6:99:37:db:e0:81:b3:c8:
75:c4:dc:79:0f:2c:6e:10:75
"""
modulus = int(re.sub("[^0-9a-f]","", modulus_str), 16)
key = RSA.importKey(x509_v1_cert)
self.assertEqual(key.e, exponent)
self.assertEqual(key.n, modulus)
self.failIf(key.has_private())
def test_x509v3(self):
# Sample V3 certificate with a 1024 bit RSA key
x509_v3_cert = """
-----BEGIN CERTIFICATE-----
MIIEcjCCAlqgAwIBAgIBATANBgkqhkiG9w0BAQsFADBhMQswCQYDVQQGEwJVUzEL
MAkGA1UECAwCTUQxEjAQBgNVBAcMCUJhbHRpbW9yZTEQMA4GA1UEAwwHVGVzdCBD
QTEfMB0GCSqGSIb3DQEJARYQdGVzdEBleGFtcGxlLmNvbTAeFw0xNDA3MTIwOTM1
MTJaFw0xNzA0MDcwOTM1MTJaMEQxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJNRDES
MBAGA1UEBwwJQmFsdGltb3JlMRQwEgYDVQQDDAtUZXN0IFNlcnZlcjCBnzANBgkq
hkiG9w0BAQEFAAOBjQAwgYkCgYEA/S7GJV2OcFdyNMQ4K75KrYFtMEn3VnEFdPHa
jyS37XlMxSh0oS4GeTGVUCJInl5Cpsv8WQdh03FfeOdvzp5IZ46OcjeOPiWnmjgl
2G5j7e2bDH7RSchGV+OD6Fb1Agvuu2/9iy8fdf3rPQ/7eAddzKUrzwacVbnW+tg2
QtSXKRcCAwEAAaOB1TCB0jAdBgNVHQ4EFgQU/WwCX7FfWMIPDFfJ+I8a2COG+l8w
HwYDVR0jBBgwFoAUa0hkif3RMaraiWtsOOZZlLu9wJwwCQYDVR0TBAIwADALBgNV
HQ8EBAMCBeAwSgYDVR0RBEMwQYILZXhhbXBsZS5jb22CD3d3dy5leGFtcGxlLmNv
bYIQbWFpbC5leGFtcGxlLmNvbYIPZnRwLmV4YW1wbGUuY29tMCwGCWCGSAGG+EIB
DQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTANBgkqhkiG9w0BAQsF
AAOCAgEAvO6xfdsGbnoK4My3eJthodTAjMjPwFVY133LH04QLcCv54TxKhtUg1fi
PgdjVe1HpTytPBfXy2bSZbXAN0abZCtw1rYrnn7o1g2pN8iypVq3zVn0iMTzQzxs
zEPO3bpR/UhNSf90PmCsS5rqZpAAnXSaAy1ClwHWk/0eG2pYkhE1m1ABVMN2lsAW
e9WxGk6IFqaI9O37NYQwmEypMs4DC+ECJEvbPFiqi3n0gbXCZJJ6omDA5xJldaYK
Oa7KR3s/qjBsu9UAiWpLBuFoSTHIF2aeRKRFmUdmzwo43eVPep65pY6eQ4AdL2RF
rqEuINbGlzI5oQyYhu71IwB+iPZXaZZPlwjLgOsuad/p2hOgDb5WxUi8FnDPursQ
ujfpIpmrOP/zpvvQWnwePI3lI+5n41kTBSbefXEdv6rXpHk3QRzB90uPxnXPdxSC
16ASA8bQT5an/1AgoE3k9CrcD2K0EmgaX0YI0HUhkyzbkg34EhpWJ6vvRUbRiNRo
9cIbt/ya9Y9u0Ja8GLXv6dwX0l0IdJMkL8KifXUFAVCujp1FBrr/gdmwQn8itANy
+qbnWSxmOvtaY0zcaFAcONuHva0h51/WqXOMO1eb8PhR4HIIYU8p1oBwQp7dSni8
THDi1F+GG5PsymMDj5cWK42f+QzjVw5PrVmFqqrrEoMlx8DWh5Y=
-----END CERTIFICATE-----
""".strip()
# RSA public key as dumped by openssl
exponent = 65537
modulus_str = """
00:fd:2e:c6:25:5d:8e:70:57:72:34:c4:38:2b:be:
4a:ad:81:6d:30:49:f7:56:71:05:74:f1:da:8f:24:
b7:ed:79:4c:c5:28:74:a1:2e:06:79:31:95:50:22:
48:9e:5e:42:a6:cb:fc:59:07:61:d3:71:5f:78:e7:
6f:ce:9e:48:67:8e:8e:72:37:8e:3e:25:a7:9a:38:
25:d8:6e:63:ed:ed:9b:0c:7e:d1:49:c8:46:57:e3:
83:e8:56:f5:02:0b:ee:bb:6f:fd:8b:2f:1f:75:fd:
eb:3d:0f:fb:78:07:5d:cc:a5:2b:cf:06:9c:55:b9:
d6:fa:d8:36:42:d4:97:29:17
"""
modulus = int(re.sub("[^0-9a-f]","", modulus_str), 16)
key = RSA.importKey(x509_v3_cert)
self.assertEqual(key.e, exponent)
self.assertEqual(key.n, modulus)
self.failIf(key.has_private())
if __name__ == '__main__':
unittest.main()
def get_tests(config={}):
tests = []
tests += list_test_cases(ImportKeyTests)
tests += list_test_cases(ImportKeyFromX509Cert)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
from rest_framework import generics
import django_filters.rest_framework
from qmpy.web.serializers.optimade import OptimadeStructureSerializer
from qmpy.materials.formation_energy import FormationEnergy
from qmpy.materials.entry import Composition
from qmpy.models import Formation
from qmpy.utils import query_to_Q, parse_formula_regex
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.response import Response
from rest_framework.renderers import JSONRenderer, BrowsableAPIRenderer
from rest_framework_xml.renderers import XMLRenderer
from rest_framework_yaml.renderers import YAMLRenderer
from qmpy.rester import qmpy_rester
from django.http import HttpResponse, JsonResponse
from collections import OrderedDict
from qmpy.utils import oqmd_optimade as oqop
import time
import datetime
BASE_URL = qmpy_rester.REST_OPTIMADE
class OptimadeStructureDetail(generics.RetrieveAPIView):
queryset = FormationEnergy.objects.filter(fit="standard")
serializer_class = OptimadeStructureSerializer
renderer_classes = [JSONRenderer, XMLRenderer, YAMLRenderer, BrowsableAPIRenderer]
def retrieve(self, request, *args, **kwargs):
structure_id = request.path.strip("/").split("/")[-1]
self.queryset = self.queryset.filter(id=structure_id)
instance = self.get_object()
serializer = self.get_serializer(instance)
_data = [serializer.data]
data = []
for _item in _data:
item = OrderedDict([("id", _item["id"]), ("type", _item["type"])])
del _item["id"]
del _item["type"]
item["attributes"] = _item
data.append(item)
_data = serializer.data
data = OrderedDict([("id", _data["id"]), ("type", _data["type"])])
del _data["id"]
del _data["type"]
data["attributes"] = _data
full_url = request.build_absolute_uri()
representation = full_url.replace(BASE_URL, "")
time_now = time.time()
time_stamp = datetime.datetime.fromtimestamp(time_now).strftime(
"%Y-%m-%d %H:%M:%S"
)
meta_list = [
(
"query",
{
"representation": representation,
},
),
("api_version", "1.0.0"),
("time_stamp", time_stamp),
("data_returned", 1),
("data_available", Formation.objects.filter(fit="standard").count()),
("more_data_available", False),
(
"provider",
OrderedDict(
[
("name", "OQMD"),
("description", "The Open Quantum Materials Database"),
("prefix", "oqmd"),
("homepage", "http://oqmd.org"),
]
),
),
("warnings", []),
("response_message", "OK"),
]
return Response(
OrderedDict(
[
(
"links",
OrderedDict(
[
("next", None),
("previous", None),
(
"base_url",
{
"href": BASE_URL,
"meta": {"_oqmd_version": "1.0"},
},
),
]
),
),
("resource", {}),
("data", data),
("meta", OrderedDict(meta_list)),
]
)
)
class OptimadePagination(LimitOffsetPagination):
default_limit = 50
offset_query_param = "page_offset"
limit_query_param = "page_limit"
def get_paginated_response(self, page_data):
_data = page_data["data"]
data = []
for _item in _data:
item = OrderedDict([("id", _item["id"]), ("type", _item["type"])])
del _item["id"]
del _item["type"]
item["attributes"] = _item
data.append(item)
request = page_data["request"]
full_url = request.build_absolute_uri()
representation = full_url.replace(BASE_URL, "")
time_now = time.time()
time_stamp = datetime.datetime.fromtimestamp(time_now).strftime(
"%Y-%m-%d %H:%M:%S"
)
_oqmd_final_query = (
page_data["meta"]["django_query"]
if "django_query" in page_data["meta"]
else None
)
_warnings = (
page_data["meta"]["warnings"] if "warnings" in page_data["meta"] else []
)
if (not _warnings) and (not _oqmd_final_query):
_warnings = [
{
"type": "warning",
"detail": "_oqmd_NoFilterWarning: No filters were provided in the query",
}
]
meta_list = [
(
"query",
{
"representation": representation,
"_oqmd_final_query": _oqmd_final_query,
},
),
("api_version", "1.0.0"),
("time_stamp", time_stamp),
(
"_oqmd_data_in_response",
min(
self.get_limit(request),
self.count - self.get_offset(request),
),
),
("data_returned", self.count),
("data_available", Formation.objects.filter(fit="standard").count()),
(
"more_data_available",
(self.get_next_link() != None) or (self.get_previous_link() != None),
),
(
"provider",
OrderedDict(
[
("name", "OQMD"),
("description", "The Open Quantum Materials Database"),
("prefix", "oqmd"),
("homepage", "http://oqmd.org"),
]
),
),
("warnings", _warnings),
("response_message", "OK"),
]
return Response(
OrderedDict(
[
(
"links",
OrderedDict(
[
("next", self.get_next_link()),
("previous", self.get_previous_link()),
(
"base_url",
{
"href": BASE_URL,
"meta": {"_oqmd_version": "1.0"},
},
),
]
),
),
("resource", {}),
("data", data),
("meta", OrderedDict(meta_list)),
]
)
)
class OptimadeStructureList(generics.ListAPIView):
serializer_class = OptimadeStructureSerializer
pagination_class = OptimadePagination
renderer_classes = [JSONRenderer, XMLRenderer, YAMLRenderer, BrowsableAPIRenderer]
def get_queryset(self):
fes = FormationEnergy.objects.filter(fit="standard")
fes, meta_info = self.filter(fes)
return (fes, meta_info)
def list(self, request, *args, **kwargs):
query_set, meta_info = self.get_queryset()
page = self.paginate_queryset(query_set)
serializer = self.get_serializer(page, many=True)
page_data = {
"data": serializer.data,
"request": self.request,
"meta": meta_info,
}
return self.get_paginated_response(page_data)
def filter(self, fes):
request = self.request
filters = request.GET.get("filter", False)
if not filters:
meta_data = {
"warnings": [
{
"type": "warning",
"detail": "_oqmd_NoFilterWarning: No filters were provided in the query. Returning all structures",
}
],
}
return fes, meta_data
# shortcut to get all stable phases
filters = filters.replace("stability=0", "stability<=0")
filters = filters.replace("&", " AND ")
filters = filters.replace("|", " OR ")
filters = filters.replace("~", " NOT ")
q, meta_info = query_to_Q(filters)
if not q:
return ([], meta_info)
fes = fes.filter(q)
return (fes, meta_info)
def OptimadeInfoData(request):
data = oqop.get_optimade_data("info")
return HttpResponse(data, content_type="application/json")
def OptimadeVersionsData(request):
data = oqop.get_optimade_data("versions")
return HttpResponse(data, content_type="text/plain")
def OptimadeVersionPage(request):
versions = oqop.get_optimade_data("versions").strip().split("\n")[1:]
versions = ["v{}".format(item) for item in versions]
request_version = request.path.strip("/").split("/")[-1]
data = {"query": request.path}
if request_version in versions:
return JsonResponse(data)
else:
data["error"] = "Version not supported"
return JsonResponse({"status": "false", "message": data}, status=553)
def OptimadeLinksData(request):
data = oqop.get_optimade_data("links")
return HttpResponse(data, content_type="application/json")
def OptimadeStructuresInfoData(request):
data = oqop.get_optimade_data("info.structures")
return HttpResponse(data, content_type="application/json")
|
|
import operator
from copy import deepcopy
from collections import defaultdict
from itertools import chain, product
from StringIO import StringIO
from contextlib import closing
from pymaptools.io import SimplePicklableMixin
class GraphError(Exception):
"""Usually fatal"""
pass
class SkipEdge(Exception):
"""Raise to skip adding edge during misc graph operations"""
pass
def rename_store_weight(cls):
'''
Class decorator to allow Bigraph objects to use
unsorted version of _store_edge() in place of
the sorted one while still allowing Bigraph descendants
to override _store_weight_sorted() on their own
'''
cls._store_weight_sorted = cls._store_weight
return cls
@rename_store_weight
class Bigraph(SimplePicklableMixin):
"""Undirected bipartite graph G = (U & V, E)
Note that U->V mapping is unnecessary if the only goal is to find bicliques.
No sorting in Bigraph -- classes that inherit from Bigraph will have to
define the _store_weight_sorted() method on their own.
Weights by default are assumed to be integers, and the default
instances serve as edge counters.
Example usage:
>>> g = Bigraph()
>>> g.add_clique(([1, 2, 3], [-1, -2, -3]))
>>> h = Bigraph(g)
>>> g.add_clique(([4], [-4, -5]))
>>> g.add_clique(([5], [-5, -6]))
>>> g.add_edge(4, -1)
>>> h.add_edge(2, 100, weight=14)
>>> h.add_edge(5, -5, weight=10)
>>> j = g & h
>>> components = j.find_connected_components()
>>> curr = components.next()
>>> (sorted(curr.U), sorted(curr.V))
([1, 2, 3], [-3, -2, -1])
>>> curr = components.next()
>>> (sorted(curr.U), sorted(curr.V))
([5], [-5])
"""
def __init__(self, base=None, weight_type=int, min_edge_weight=None):
self.weight_type = weight_type
if base is None:
# new empty object
self.U2V = defaultdict(set) # left to rigt mapping dict
self.V2U = defaultdict(set) # right to left mapping dict
self.edges = defaultdict(weight_type)
else:
if not isinstance(base, self.__class__):
raise TypeError("Base object has incorrect type")
if weight_type is not base.edges.default_factory:
raise ValueError("Cannot convert %s factory to %s",
base.edges.default_factory, weight_type)
if min_edge_weight is None:
# simple copy of base object
self.edges = deepcopy(base.edges)
self.U2V = defaultdict(set, deepcopy(base.U2V))
self.V2U = defaultdict(set, deepcopy(base.V2U))
else:
# filter out edges with weight below requested
self.U2V = defaultdict(set)
self.V2U = defaultdict(set)
self.edges = defaultdict(weight_type)
for edge, weight in base.edges.iteritems():
if weight >= min_edge_weight:
u, v = edge
self.add_edge(u, v, weight=deepcopy(weight))
@classmethod
def from_components(cls, components):
"""Constructs a graph from a series of components
"""
return reduce(operator.or_, components, cls())
@classmethod
def from_edgelist(cls, edgelist):
"""Construct a graph from a list of tuples or triples
Tuples represent edges u - v
Triples represent edges u - v plus weight component
"""
g = cls()
for edge in edgelist:
g.add_edge(*edge)
return g
def to_edgelist(self):
for edge, weight in self.edges.iteritems():
yield edge
def rename_nodes(self, unode_renamer=None, vnode_renamer=None):
"""Factory method that produces another graph just like current one
except with renamed nodes (can be used for reducing a graph)
"""
new_graph = self.__class__()
for (u, v), weight in self.edges.iteritems():
try:
u = unode_renamer(u)
v = vnode_renamer(v)
except SkipEdge:
continue
new_graph.add_edge(u, v, weight)
return new_graph
def get_weight(self):
return sum(self.edges.itervalues())
def get_vnode_weight(self, node):
neighbors = self.V2U[node]
node_weight = self.weight_type()
for neighbor in neighbors:
edge = self.make_edge(neighbor, node)
node_weight += self.edges[edge]
return node_weight
def get_unode_weight(self, node):
neighbors = self.U2V[node]
node_weight = self.weight_type()
for neighbor in neighbors:
edge = self.make_edge(node, neighbor)
node_weight += self.edges[edge]
return node_weight
def __and__(self, other):
'''Get intersection of edges of two graphs
This operation chooses the minimum edge weight and is commutative
'''
g = self.__class__() # create another instance of this class
dict1, dict2 = self.edges, other.edges
edge_intersection = set(dict1.keys()) & set(dict2.keys())
# compile edges into dicts and store weights
g_map_edge = g._map_edge
g_store_weight = g._store_weight
this_zero = self.weight_type()
other_zero = other.weight_type()
for e in edge_intersection:
g_map_edge(e)
val = min(dict1.get(e, this_zero), dict2.get(e, other_zero))
g_store_weight(e, val)
return g
def __or__(self, other):
'''Get union of edges of two graphs
This operation involves summing edge weights and is commutative
'''
g = self.__class__()
dict1, dict2 = self.edges, other.edges
edge_union = set(dict1.keys()) | set(dict2.keys())
# compile edges into dicts and store weights
g_map_edge = g._map_edge
g_store_weight = g._store_weight
this_zero = self.weight_type()
other_zero = other.weight_type()
for e in edge_union:
g_map_edge(e)
val = dict1.get(e, this_zero) + dict2.get(e, other_zero)
g_store_weight(e, val)
return g
def __sub__(self, other):
'''Get difference of edges of two graphs (noncommutative)
'''
g = self.__class__() # create another instance of this class
dict1, dict2 = self.edges, other.edges
edge_difference = set(dict1.keys()) - set(dict2.keys())
# hash edges into dicts and store weights
g_map_edge = g._map_edge
g_store_weight = g._store_weight
for e in edge_difference:
g_map_edge(e)
g_store_weight(e, dict1[e])
return g
def get_dot(self, name="bipartite graph", bipartite=True, unode_decorator=None,
vnode_decorator=None, edge_decorator=None):
"""Get a Graphviz representation
"""
import pygraphviz as pgv
if unode_decorator is None:
unode_decorator = lambda g, u: (u, {})
if vnode_decorator is None:
vnode_decorator = lambda g, v: (v, {})
if edge_decorator is None:
edge_decorator = lambda g, u, v, weight: ((u, v), {})
g = pgv.AGraph(name=name, size='10,8')
cluster_prefix = 'cluster_' if bipartite else ''
sU = g.subgraph(name=cluster_prefix + "U", style="dotted")
for node in self.U:
node_name, attrs = unode_decorator(self, node)
sU.add_node(node_name, **attrs)
sV = g.subgraph(name=cluster_prefix + "V", style="dotted")
for node in self.V:
node_name, attrs = vnode_decorator(self, node)
sV.add_node(node_name, **attrs)
for edge, weight in self.edges.iteritems():
unode, vnode = edge
edge, attrs = edge_decorator(self, unode, vnode, weight)
g.add_edge(*edge, **attrs)
return g
def _map_edge(self, edge):
u, v = edge
if u is None or v is None:
raise GraphError("An edge must connect two nodes")
self.U2V[u].add(v)
self.V2U[v].add(u)
def _store_weight(self, edge, weight):
self.edges[edge] += weight
def add_clique(self, clique, weight=1):
'''Adds a complete bipartite subgraph (a 2-clique)
:param clique: a clique descriptor (tuple of U and V vertices)
'''
unodes, vnodes = clique
for u, v in product(unodes, vnodes):
self.add_edge(u, v, weight=weight)
def add_edge(self, u, v, weight=1):
'''Add a single edge (plus two vertices if necessary)
This is a special case of add_clique(), only with
scalar parameters. For reading data from files or adjacency
matrices.
'''
edge = (u, v)
self._map_edge(edge)
# using "sorted" version of adding an edge -- needed
# only for subclasses which redefine this method
self._store_weight_sorted(edge, weight)
def make_edge(self, u, v):
return (u, v)
def __len__(self):
'''Number of edges in an undirected graph
'''
return len(self.edges)
def __eq__(self, other):
return self.edges == other.edges
@property
def U(self):
'''Returns a set of all "left" nodes
'''
return self.U2V.keys()
@property
def V(self):
'''A set of all "right" nodes
'''
return self.V2U.keys()
def get_density(self):
'''Return number of existing edges divided by the number of all possible edges
'''
nU, nV = len(self.U), len(self.V)
nU2V = len(self.edges)
if nU > 0 and nV > 0:
denominator = nU * nV
assert nU2V <= denominator
density = float(nU2V) / denominator
return density
else:
return None
def find_connected_components(self):
"""Return all connected components as a list of sets
"""
stack = []
# create a modifiable copy of the set of all vertices
d = (
self.U2V,
self.V2U
)
remaining = set(chain(
((0, u) for u in self.U),
((1, v) for v in self.V)
))
make_tuple_for = (
(lambda x, y: (x, y)),
(lambda x, y: (y, x))
)
while remaining:
component = self.__class__()
# pick a vertex at random and add it to the stack
stack.append(remaining.pop())
while stack:
# pick an element from the stack and add it to the current component
idx, node = stack.pop()
# expand stack to all unvisited neighbors of the element
# `~idx + 2` flips 0 to 1 and 1 to 0
neighbor_part_id = ~idx + 2
make_tuple = make_tuple_for[idx]
for neighbor in d[idx][node]:
edge = make_tuple(node, neighbor)
edge_weight = self.edges[edge]
component.add_edge(edge[0], edge[1], edge_weight)
neighbor_tuple = (neighbor_part_id, neighbor)
try:
remaining.remove(neighbor_tuple)
except KeyError:
# vertex does not exist or has already been visited;
# continue with the loop
pass
else:
stack.append(neighbor_tuple)
# stack is empty: done with one component
yield component
def find_cliques(self, L=None, P=None):
'''Find cliques (maximally connected components)
Enumerate all maximal bicliques in an undirected bipartite graph.
Adapted from: Zhang, Y., Chesler, E. J. & Langston, M. A.
"On finding bicliques in bipartite graphs: a novel algorithm with
application to the integration of diverse biological data types."
Hawaii International Conference on System Sciences 0, 473+ (2008).
URL http://dx.doi.org/10.1109/HICSS.2008.507.
Terminology:
L - a set of vertices in U that are common neighbors of vertices in R
R - a set of vertices in V belonging to the current biclique
P - a set of vertices in V that can be added to R
Q - a set of vertices in V that have been previously added to R
'''
v2u = self.V2U
L = set(self.U) if L is None else set(L)
P = list(self.V) if P is None else list(P)
stack = [(L, set(), P, set())]
while stack:
L, R, P, Q = stack.pop()
while P:
x = P.pop()
# extend biclique
R_prime = R | {x}
L_prime = v2u[x] & L
# create new sets
P_prime = []
Q_prime = set()
# check maximality
is_maximal = True
for v in Q:
# checks whether L_prime is a subset of all adjacent nodes
# of v in Q
Nv = v2u[v] & L_prime
if len(Nv) == len(L_prime):
is_maximal = False
break
elif Nv:
# some vertices in L_prime are not adjacent to v:
# keep vertices adjacent to some vertex in L_prime
Q_prime.add(v)
if is_maximal:
for v in P:
# get the neighbors of v in L_prime
Nv = v2u[v] & L_prime
if len(Nv) == len(L_prime):
R_prime.add(v)
elif Nv:
# some vertices in L_prime are not adjacent to v:
# keep vertices adjacent to some vertex in L_prime
P_prime.append(v)
yield (L_prime, R_prime) # report maximal biclique
if P_prime:
stack.append((L_prime, R_prime, P_prime, Q_prime))
# move x to former candidate set
Q.add(x)
class Graph(Bigraph):
"""
undirected graph G = (V, E).
"""
def __init__(self, base=None, weight_type=int):
self.weight_type = weight_type
if base is None:
# creating from scratch
self.U2V = self.V2U = defaultdict(set) # right to left mapping dict
self.edges = defaultdict(weight_type)
elif isinstance(base, self.__class__):
# deriving from class instance
self.U2V = self.V2U = deepcopy(base.V2U)
self.edges = deepcopy(base.edges)
elif issubclass(self.__class__, base.__class__):
# deriving from Bigraph instance
self.V2U = deepcopy(base.V2U)
self.V2U.update(deepcopy(base.U2V))
self.U2V = self.V2U
edge_map = defaultdict(weight_type)
for (node1, node2), weight in base.edges.iteritems():
edge_map[self.make_edge(node1, node2)] = deepcopy(weight)
self.edges = edge_map
else:
raise TypeError("Base object has incorrect type")
def rename_nodes(self, vnode_renamer=None):
"""Factory method that produces another graph just like current one
except with renamed nodes (can be used for reducing a graph)
"""
new_graph = self.__class__()
for (u, v), weight in self.edges.iteritems():
u = vnode_renamer(u)
v = vnode_renamer(v)
new_graph.add_edge(u, v, weight)
return new_graph
def _store_weight_sorted(self, edge, weight):
# Storing weights is antisymmetric -- order the tuple
self.edges[self.make_edge(*edge)] += weight
@property
def U(self):
raise NotImplementedError("Set U is only for a bipartite graph")
def add_clique(self, clique, weight=1):
'''Adds a complete bipartite subgraph (a 2-clique)
:param clique: a clique descriptor (a set of vertices)
'''
for u, v in product(clique, clique):
self.add_edge(u, v, weight=weight)
def get_density(self):
'''Return the number of existing edges divided by the number of all possible edges
'''
nV = len(self.V)
nV2V = 2 * len(self.edges)
if nV > 1:
denominator = nV * (nV - 1)
assert nV2V <= denominator
density = float(nV2V) / denominator
return density
else:
return None
def make_edge(self, u, v):
return tuple(sorted((u, v)))
def find_connected_components(self):
"""Return all connected components as a list of sets
"""
stack = []
# create a modifiable copy of the set of all vertices
remaining = set(self.V)
v2u = self.V2U
while remaining:
# pick a vertex at random and add it to the stack
component = self.__class__()
stack.append(remaining.pop())
while stack:
# pick an element from the stack and add it to the current component
v = stack.pop()
# expand stack to all unvisited neighbors of the element
for u in v2u[v]:
edge_weight = self.edges[(u, v)]
component.add_edge(u, v, edge_weight)
try:
remaining.remove(u)
except KeyError:
# vertex does not exist or has already
# been visited; continue with the loop
pass
else:
stack.append(u)
# stack is empty: done with one component
yield component
def get_dot(self, name="graph", edge_decorator=None, vnode_decorator=None, **kwargs):
import pygraphviz as pgv
if edge_decorator is None:
edge_decorator = lambda g, u, v, weight: ((u, v), {})
if vnode_decorator is None:
vnode_decorator = lambda g, v: (v, {})
g = pgv.AGraph(name=name)
for node in self.V:
node_name, attrs = vnode_decorator(self, node)
g.add_node(node_name, **attrs)
for edge, weight in self.edges.iteritems():
unode, vnode = edge
edge, attrs = edge_decorator(self, unode, vnode, weight)
g.add_edge(*edge, **attrs)
return g
def find_cliques(self, nodes=None, min_clique_size=3):
'''Return maximal cliques in a graph
Implements Bron-Kerbosch algorithm, Version 2
(implementation a modified version of http://www.kuchaev.com/files/graph.py)
'''
# subset to search
search_space = set(self.V) if nodes is None else set(nodes)
disc_num = len(search_space)
stack = [(set(), search_space, set(), None, disc_num)]
v2u = self.V2U
while stack:
(c_compsub, c_candidates, c_not, c_nd, c_disc_num) = stack.pop()
if not c_candidates and not c_not and len(c_compsub) >= min_clique_size:
yield c_compsub
continue
for u in list(c_candidates):
Nu = v2u[u] # all neighbors of node u
if c_nd is None or c_nd not in Nu:
c_candidates.remove(u)
new_compsub = set(c_compsub)
new_compsub.add(u)
new_candidates = c_candidates & Nu # candidates that are neighbors of node u
new_not = c_not & Nu # already seen neighbors of node u
if c_nd is None:
stack.append((new_compsub, new_candidates, new_not, c_nd, c_disc_num))
elif c_nd in new_not:
new_disc_num = c_disc_num - 1
if new_disc_num > 0:
stack.append((new_compsub, new_candidates, new_not, c_nd, new_disc_num))
else:
new_disc_num = disc_num
new_nd = c_nd
for cand_nd in new_not:
cand_disc_num = len(new_candidates - v2u[cand_nd])
if cand_disc_num < new_disc_num:
new_disc_num = cand_disc_num
new_nd = cand_nd
stack.append((new_compsub, new_candidates, new_not, new_nd, new_disc_num))
c_not.add(u)
# find the number of candidates that are not adjacent to u
new_disc_num = len(c_candidates - Nu)
if 0 < new_disc_num < c_disc_num:
stack.append((c_compsub, c_candidates, c_not, u, new_disc_num))
else:
stack.append((c_compsub, c_candidates, c_not, c_nd, c_disc_num))
def describe_graph(g, graph_name=None):
with closing(StringIO()) as sio:
if graph_name is not None:
print >>sio, graph_name
print >>sio, "Edges (%d):\n\t%s\n" % (len(g.edges), g.edges)
print >>sio, "V2U mapping (%d):\n\t%s\n" % (len(g.V2U), g.V2U)
print >>sio, "U2V mapping (%d):\n\t%s\n" % (len(g.U2V), g.U2V)
print >>sio, "Nodes (%d):\n\t%s\n" % (len(g.V), g.V)
print >>sio, "Connected components:"
for idx, comp in enumerate(g.find_connected_components(), start=1):
print >>sio, "\tComponent density: %.3f" % comp.get_density()
print >>sio, "\tMaximal cliques:"
for jdx, clique in enumerate(g.find_cliques(comp), start=1):
print >>sio, "\t\t%d: %s" % (jdx, str(clique))
return sio.getvalue()
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ExtensionsV1beta1DeploymentCondition(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, last_transition_time=None, last_update_time=None, message=None, reason=None, status=None, type=None):
"""
ExtensionsV1beta1DeploymentCondition - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'last_transition_time': 'datetime',
'last_update_time': 'datetime',
'message': 'str',
'reason': 'str',
'status': 'str',
'type': 'str'
}
self.attribute_map = {
'last_transition_time': 'lastTransitionTime',
'last_update_time': 'lastUpdateTime',
'message': 'message',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
self._last_transition_time = last_transition_time
self._last_update_time = last_update_time
self._message = message
self._reason = reason
self._status = status
self._type = type
@property
def last_transition_time(self):
"""
Gets the last_transition_time of this ExtensionsV1beta1DeploymentCondition.
Last time the condition transitioned from one status to another.
:return: The last_transition_time of this ExtensionsV1beta1DeploymentCondition.
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""
Sets the last_transition_time of this ExtensionsV1beta1DeploymentCondition.
Last time the condition transitioned from one status to another.
:param last_transition_time: The last_transition_time of this ExtensionsV1beta1DeploymentCondition.
:type: datetime
"""
self._last_transition_time = last_transition_time
@property
def last_update_time(self):
"""
Gets the last_update_time of this ExtensionsV1beta1DeploymentCondition.
The last time this condition was updated.
:return: The last_update_time of this ExtensionsV1beta1DeploymentCondition.
:rtype: datetime
"""
return self._last_update_time
@last_update_time.setter
def last_update_time(self, last_update_time):
"""
Sets the last_update_time of this ExtensionsV1beta1DeploymentCondition.
The last time this condition was updated.
:param last_update_time: The last_update_time of this ExtensionsV1beta1DeploymentCondition.
:type: datetime
"""
self._last_update_time = last_update_time
@property
def message(self):
"""
Gets the message of this ExtensionsV1beta1DeploymentCondition.
A human readable message indicating details about the transition.
:return: The message of this ExtensionsV1beta1DeploymentCondition.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this ExtensionsV1beta1DeploymentCondition.
A human readable message indicating details about the transition.
:param message: The message of this ExtensionsV1beta1DeploymentCondition.
:type: str
"""
self._message = message
@property
def reason(self):
"""
Gets the reason of this ExtensionsV1beta1DeploymentCondition.
The reason for the condition's last transition.
:return: The reason of this ExtensionsV1beta1DeploymentCondition.
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""
Sets the reason of this ExtensionsV1beta1DeploymentCondition.
The reason for the condition's last transition.
:param reason: The reason of this ExtensionsV1beta1DeploymentCondition.
:type: str
"""
self._reason = reason
@property
def status(self):
"""
Gets the status of this ExtensionsV1beta1DeploymentCondition.
Status of the condition, one of True, False, Unknown.
:return: The status of this ExtensionsV1beta1DeploymentCondition.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this ExtensionsV1beta1DeploymentCondition.
Status of the condition, one of True, False, Unknown.
:param status: The status of this ExtensionsV1beta1DeploymentCondition.
:type: str
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`")
self._status = status
@property
def type(self):
"""
Gets the type of this ExtensionsV1beta1DeploymentCondition.
Type of deployment condition.
:return: The type of this ExtensionsV1beta1DeploymentCondition.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this ExtensionsV1beta1DeploymentCondition.
Type of deployment condition.
:param type: The type of this ExtensionsV1beta1DeploymentCondition.
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`")
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ExtensionsV1beta1DeploymentCondition):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
|
#!/usr/bin/env python
"""
@package coverage_model.test.test_bricking_utils
@file coverage_model/test/test_bricking_utils.py
@author Christopher Mueller
@brief Tests for the bricking_utils module
"""
from nose.plugins.attrib import attr
import unittest
from coverage_model import CoverageModelUnitTestCase, CoverageModelIntTestCase
from coverage_model.bricking_utils import *
from coverage_model.persistence_helpers import RTreeProxy
@attr('UNIT', group='cov')
class TestBrickingUtilsUnit(CoverageModelUnitTestCase):
def test_calc_brick_origins_1d(self):
brick_sizes = (5,)
# 1d - even num of bricks
total_domain = (15,)
brick_origins = calc_brick_origins(total_domain, brick_sizes)
want = ((0,), (5,), (10,))
self.assertEqual(brick_origins, want)
# 1d - uneven num of bricks
total_domain = (13,)
brick_origins = calc_brick_origins(total_domain, brick_sizes)
want = ((0,), (5,), (10,))
self.assertEqual(brick_origins, want)
def test_calc_brick_origins_2d(self):
brick_sizes = (5, 5)
# 2d - even num of bricks
total_domain = (15, 10)
brick_origins = calc_brick_origins(total_domain, brick_sizes)
want = ((0, 0), (0, 5), (5, 0), (5, 5), (10, 0), (10, 5))
self.assertEqual(brick_origins, want)
# 2d - uneven num of bricks
total_domain = (13, 17)
brick_origins = calc_brick_origins(total_domain, brick_sizes)
want = ((0, 0), (0, 5), (0, 10), (0, 15), (5, 0), (5, 5), (5, 10), (5, 15), (10, 0), (10, 5), (10, 10), (10, 15))
self.assertEqual(brick_origins, want)
def test_calc_brick_origins_3d(self):
brick_sizes = (5, 5, 5)
# 3d - even num of bricks
total_domain = (10, 15, 5)
brick_origins = calc_brick_origins(total_domain, brick_sizes)
want = ((0, 0, 0), (0, 5, 0), (0, 10, 0), (5, 0, 0), (5, 5, 0), (5, 10, 0))
self.assertEqual(brick_origins, want)
# 3d - uneven num of bricks
total_domain = (13, 19, 3)
brick_origins = calc_brick_origins(total_domain, brick_sizes)
want = ((0, 0, 0), (0, 5, 0), (0, 10, 0), (0, 15, 0), (5, 0, 0), (5, 5, 0), (5, 10, 0), (5, 15, 0), (10, 0, 0), (10, 5, 0), (10, 10, 0), (10, 15, 0))
self.assertEqual(brick_origins, want)
def test_calc_brick_origins_errors(self):
brick_sizes = (5, 5)
total_domain = (10,)
# Non-iterable total_domain
self.assertRaises(ValueError, calc_brick_origins, 10, brick_sizes)
# Non-iterable brick_sizes
self.assertRaises(ValueError, calc_brick_origins, total_domain, 5)
# Incompatible total_domain & brick_sizes
self.assertRaises(ValueError, calc_brick_origins, total_domain, brick_sizes)
def test_calc_brick_and_rtree_extents_1d(self):
sizes = (5,)
origins = ((0,), (5,), (10,))
be, rte = calc_brick_and_rtree_extents(origins, sizes)
be_want = (((0, 4),), ((5, 9),), ((10, 14),))
rte_want = ((0, 0, 4, 0), (5, 0, 9, 0), (10, 0, 14, 0))
self.assertEqual(be, be_want)
self.assertEqual(rte, rte_want)
def test_calc_brick_and_rtree_extents_2d(self):
sizes = (5, 5)
origins = ((0, 0), (0, 5), (5, 0), (5, 5), (10, 0), (10, 5))
be, rte = calc_brick_and_rtree_extents(origins, sizes)
be_want = (((0, 4), (0, 4)), ((0, 4), (5, 9)), ((5, 9), (0, 4)), ((5, 9), (5, 9)), ((10, 14), (0, 4)), ((10, 14), (5, 9)))
rte_want = ((0, 0, 4, 4), (0, 5, 4, 9), (5, 0, 9, 4), (5, 5, 9, 9), (10, 0, 14, 4), (10, 5, 14, 9))
self.assertEqual(be, be_want)
self.assertEqual(rte, rte_want)
def test_calc_brick_and_rtree_extents_3d(self):
sizes = (5, 5, 5)
origins = ((0, 0, 0), (0, 5, 0), (0, 10, 0), (5, 0, 0), (5, 5, 0), (5, 10, 0))
be, rte = calc_brick_and_rtree_extents(origins, sizes)
be_want = (((0, 4), (0, 4), (0, 4)), ((0, 4), (5, 9), (0, 4)), ((0, 4), (10, 14), (0, 4)), ((5, 9), (0, 4), (0, 4)), ((5, 9), (5, 9), (0, 4)), ((5, 9), (10, 14), (0, 4)))
rte_want = ((0, 0, 0, 4, 4, 4), (0, 5, 0, 4, 9, 4), (0, 10, 0, 4, 14, 4), (5, 0, 0, 9, 4, 4), (5, 5, 0, 9, 9, 4), (5, 10, 0, 9, 14, 4))
self.assertEqual(be, be_want)
self.assertEqual(rte, rte_want)
def test_calc_brick_and_rtree_extents_errors(self):
brick_sizes = (5,)
brick_origins = ((0, 0), (0, 5))
# Non-iterable brick_origins
self.assertRaises(ValueError, calc_brick_and_rtree_extents, 10, brick_sizes)
# Non-iterable brick_sizes
self.assertRaises(ValueError, calc_brick_and_rtree_extents, brick_origins, 5)
# Incompatible brick_origins & brick_sizes
self.assertRaises(ValueError, calc_brick_and_rtree_extents, brick_origins, brick_sizes)
# def test_rtree_populator_1d(self):
#
# brick_extents = (((0, 4),), ((5, 9),), ((10, 14),))
# rtree_extents = ((0, 0, 4, 0), (5, 0, 9, 0), (10, 0, 14, 0))
#
# p = index.Property()
# p.dimension = 2 # Minimum is 2 for proper functioning
# rtree = index.Index(rtree_populator(rtree_extents, brick_extents), properties=p)
#
# self.assertIsInstance(rtree, index.Index)
# self.assertEqual(rtree.get_bounds(), [0.0, 0.0, 14.0, 0.0])
# self.assertEqual(rtree.leaves(), [(0, [0, 1, 2], [0.0, 0.0, 14.0, 0.0])])
# self.assertEqual(rtree.properties.dimension, 2)
#
# def test_rtree_populator_2d(self):
#
# brick_extents = (((0, 4), (0, 4)), ((0, 4), (5, 9)), ((5, 9), (0, 4)), ((5, 9), (5, 9)), ((10, 14), (0, 4)), ((10, 14), (5, 9)))
# rtree_extents = ((0, 0, 4, 4), (0, 5, 4, 9), (5, 0, 9, 4), (5, 5, 9, 9), (10, 0, 14, 4), (10, 5, 14, 9))
#
# p = index.Property()
# p.dimension = 2
# rtree = index.Index(rtree_populator(rtree_extents, brick_extents), properties=p)
#
# self.assertIsInstance(rtree, index.Index)
# self.assertEqual(rtree.get_bounds(), [0.0, 0.0, 14.0, 9.0])
# self.assertEqual(rtree.leaves(), [(0, [0, 1, 2, 3, 4, 5], [0.0, 0.0, 14.0, 9.0])])
# self.assertEqual(rtree.properties.dimension, 2)
#
# def test_rtree_populator_3d(self):
#
# brick_extents = (((0, 4), (0, 4), (0, 4)), ((0, 4), (5, 9), (0, 4)), ((0, 4), (10, 14), (0, 4)), ((5, 9), (0, 4), (0, 4)), ((5, 9), (5, 9), (0, 4)), ((5, 9), (10, 14), (0, 4)))
# rtree_extents = ((0, 0, 0, 4, 4, 4), (0, 5, 0, 4, 9, 4), (0, 10, 0, 4, 14, 4), (5, 0, 0, 9, 4, 4), (5, 5, 0, 9, 9, 4), (5, 10, 0, 9, 14, 4))
#
# p = index.Property()
# p.dimension = 3
# rtree = index.Index(rtree_populator(rtree_extents, brick_extents), properties=p)
#
# self.assertIsInstance(rtree, index.Index)
# self.assertEqual(rtree.get_bounds(), [0.0, 0.0, 0.0, 9.0, 14.0, 4.0])
# self.assertEqual(rtree.leaves(), [(0, [0, 1, 2, 3, 4, 5], [0.0, 0.0, 0.0, 9.0, 14.0, 4.0])])
# self.assertEqual(rtree.properties.dimension, 3)
def _get_bricks_assert(self, slice_, rtree, total_domain, size, brick_list):
bricks = get_bricks_from_slice(slice_, rtree, total_domain)
self.assertEqual(len(bricks), size)
self.assertEqual(bricks, brick_list)
def _p_get_bricks_assert(self, slice_, rtree, total_domain, size, brick_list):
bricks = get_bricks_from_slice(slice_, rtree, total_domain)
print
print len(bricks)
print bricks
def test_get_bricks_from_slice_1d(self):
total_domain = (15,)
brick_extents = (((0, 4),), ((5, 9),), ((10, 14),))
rtree_extents = ((0, 0, 4, 0), (5, 0, 9, 0), (10, 0, 14, 0))
brick_0 = (0, ((0, 4),))
brick_1 = (1, ((5, 9),))
brick_2 = (2, ((10, 14),))
from coverage_model.test.bricking_assessment_utility import BrickingAssessor
rtree = RTreeProxy()
for x in BrickingAssessor.rtree_populator(rtree_extents, brick_extents):
rtree.insert(*x)
# Try a variety of slices
self._get_bricks_assert(slice(None), rtree, total_domain, 3, [brick_0, brick_1, brick_2])
self._get_bricks_assert(slice(None, None, 3), rtree, total_domain, 3, [brick_0, brick_1, brick_2])
self._get_bricks_assert(slice(0, 3), rtree, total_domain, 1, [brick_0])
self._get_bricks_assert(slice(5, 9), rtree, total_domain, 1, [brick_1])
self._get_bricks_assert(slice(6, None), rtree, total_domain, 2, [brick_1, brick_2])
self._get_bricks_assert(slice(None, None, 10), rtree, total_domain, 3, [brick_0, brick_1, brick_2]) # three bricks, tho the middle one isn't needed
self._get_bricks_assert(([1, 3],), rtree, total_domain, 1, [brick_0])
self._get_bricks_assert(([2, 4, 7],), rtree, total_domain, 2, [brick_0, brick_1])
self._get_bricks_assert(([3, 12],), rtree, total_domain, 3, [brick_0, brick_1, brick_2]) # three bricks, tho the middle one isn't needed
self._get_bricks_assert(1, rtree, total_domain, 1, [brick_0])
self._get_bricks_assert(6, rtree, total_domain, 1, [brick_1])
self._get_bricks_assert(13, rtree, total_domain, 1, [brick_2])
@unittest.skip('RTreeProxy does not support 2D')
def test_get_bricks_from_slice_2d(self):
total_domain = (15, 10)
brick_extents = (((0, 4), (0, 4)), ((0, 4), (5, 9)), ((5, 9), (0, 4)), ((5, 9), (5, 9)), ((10, 14), (0, 4)), ((10, 14), (5, 9)))
rtree_extents = ((0, 0, 4, 4), (0, 5, 4, 9), (5, 0, 9, 4), (5, 5, 9, 9), (10, 0, 14, 4), (10, 5, 14, 9))
brick_0 = (0, ((0, 4), (0, 4)))
brick_1 = (1, ((0, 4), (5, 9)))
brick_2 = (2, ((5, 9), (0, 4)))
brick_3 = (3, ((5, 9), (5, 9)))
brick_4 = (4, ((10, 14), (0, 4)))
brick_5 = (5, ((10, 14), (5, 9)))
from coverage_model.test.bricking_assessment_utility import BrickingAssessor
rtree = RTreeProxy()
for x in BrickingAssessor.rtree_populator(rtree_extents, brick_extents):
rtree.insert(*x)
# Get all bricks
self._get_bricks_assert((slice(None),) * 2, rtree, total_domain, 6, [brick_0, brick_1, brick_2, brick_3, brick_4, brick_5])
self._get_bricks_assert((slice(None), slice(None, 8)), rtree, total_domain, 6, [brick_0, brick_1, brick_2, brick_3, brick_4, brick_5])
self._get_bricks_assert((slice(None), slice(None, 4)), rtree, total_domain, 3, [brick_0, brick_2, brick_4])
self._get_bricks_assert((slice(7, 12), slice(5, 8)), rtree, total_domain, 2, [brick_3, brick_5])
self._get_bricks_assert((slice(2, 14, 3), slice(2, 7)), rtree, total_domain, 6, [brick_0, brick_1, brick_2, brick_3, brick_4, brick_5])
self._get_bricks_assert((slice(2, 14, 10), slice(2, 7)), rtree, total_domain, 6, [brick_0, brick_1, brick_2, brick_3, brick_4, brick_5])
self._get_bricks_assert((0, slice(2, 8, 3)), rtree, total_domain, 2, [brick_0, brick_1])
self._get_bricks_assert((6, slice(2, 7)), rtree, total_domain, 2, [brick_2, brick_3])
self._get_bricks_assert((slice(None, 12), 7), rtree, total_domain, 3, [brick_1, brick_3, brick_5])
self._get_bricks_assert((12, slice(2, None, 4)), rtree, total_domain, 2, [brick_4, brick_5])
self._get_bricks_assert(([1, 2], 9), rtree, total_domain, 1, [brick_1])
self._get_bricks_assert(([0, 14], 3), rtree, total_domain, 3, [brick_0, brick_2, brick_4])
self._get_bricks_assert((3, [1, 8]), rtree, total_domain, 2, [brick_0, brick_1])
self._get_bricks_assert(([2, 5], [1, 8]), rtree, total_domain, 4, [brick_0, brick_1, brick_2, brick_3])
self._get_bricks_assert(([6, 9], [1, 8]), rtree, total_domain, 2, [brick_2, brick_3])
self._get_bricks_assert(([2, 8, 13], [7, 8]), rtree, total_domain, 3, [brick_1, brick_3, brick_5])
@unittest.skip('RTreeProxy does not support 3D')
def test_get_bricks_from_slice_3d(self):
total_domain = (10, 15, 5)
brick_extents = (((0, 4), (0, 4), (0, 4)), ((0, 4), (5, 9), (0, 4)), ((0, 4), (10, 14), (0, 4)), ((5, 9), (0, 4), (0, 4)), ((5, 9), (5, 9), (0, 4)), ((5, 9), (10, 14), (0, 4)))
rtree_extents = ((0, 0, 0, 4, 4, 4), (0, 5, 0, 4, 9, 4), (0, 10, 0, 4, 14, 4), (5, 0, 0, 9, 4, 4), (5, 5, 0, 9, 9, 4), (5, 10, 0, 9, 14, 4))
brick_0 = (0, ((0, 4), (0, 4), (0, 4)))
brick_1 = (1, ((0, 4), (5, 9), (0, 4)))
brick_2 = (2, ((0, 4), (10, 14), (0, 4)))
brick_3 = (3, ((5, 9), (0, 4), (0, 4)))
brick_4 = (4, ((5, 9), (5, 9), (0, 4)))
brick_5 = (5, ((5, 9), (10, 14), (0, 4)))
from coverage_model.test.bricking_assessment_utility import BrickingAssessor
rtree = RTreeProxy()
for x in BrickingAssessor.rtree_populator(rtree_extents, brick_extents):
rtree.insert(*x)
# Get all bricks
self._get_bricks_assert((slice(None),) * 3, rtree, total_domain, 6, [brick_0, brick_1, brick_2, brick_3, brick_4, brick_5])
self._get_bricks_assert((0, 0, 0), rtree, total_domain, 1, [brick_0])
self._get_bricks_assert((8, 5, 2), rtree, total_domain, 1, [brick_4])
self._get_bricks_assert((4, 12, 1), rtree, total_domain, 1, [brick_2])
self._get_bricks_assert((9, 13, [0, 2]), rtree, total_domain, 1, [brick_5])
self._get_bricks_assert((8, [3, 5, 12], 0), rtree, total_domain, 3, [brick_3, brick_4, brick_5])
self._get_bricks_assert(([5, 9], 10, 0), rtree, total_domain, 1, [brick_5])
self._get_bricks_assert(([5, 9], [4, 12, 13], 0), rtree, total_domain, 3, [brick_3, brick_4, brick_5])
self._get_bricks_assert(([2, 4], [2, 11], [1, 3, 4]), rtree, total_domain, 3, [brick_0, brick_1, brick_2])
self._get_bricks_assert(([2, 3, 9], 12, [1, 3, 4]), rtree, total_domain, 2, [brick_2, brick_5])
self._get_bricks_assert((slice(None), 12, [1, 3, 4]), rtree, total_domain, 2, [brick_2, brick_5])
self._get_bricks_assert((slice(1, 7), 3, [1, 3, 4]), rtree, total_domain, 2, [brick_0, brick_3])
self._get_bricks_assert((slice(3, 4), 7, [1, 3, 4]), rtree, total_domain, 1, [brick_1])
self._get_bricks_assert((slice(2, 8, 7), [1, 6, 12], 4), rtree, total_domain, 6, [brick_0, brick_1, brick_2, brick_3, brick_4, brick_5])
self._get_bricks_assert((slice(2, 4, 7), slice(None), 2), rtree, total_domain, 3, [brick_0, brick_1, brick_2])
self._get_bricks_assert((slice(None, 4), slice(9, None, 2), slice(None)), rtree, total_domain, 2, [brick_1, brick_2])
self._get_bricks_assert((slice(None, 6, 4), slice(12, None, 2), slice(3, None)), rtree, total_domain, 2, [brick_2, brick_5])
self._get_bricks_assert((slice(None, 8), slice(6, 13, 4), slice(None, None, 3)), rtree, total_domain, 4, [brick_1, brick_2, brick_4, brick_5])
def _run_test_slices(self, ba, sl_list, val_arr, verbose):
for sl in sl_list:
ba.reset_bricks()
vals = val_arr[sl]
ba.put_values_to_bricks(sl, vals)
vo = ba.get_values_from_bricks(sl)
self.assertTrue(np.array_equal(vals, vo) or np.array_equal(vals.squeeze(), vo))
def test_set_get_slice_1d(self):
from coverage_model.test.bricking_assessment_utility import test_1d
test_1d(self._run_test_slices, None, persist=False, verbose=False, dtype='float32')
@unittest.skip('RTreeProxy does not support 2D')
def test_set_get_slice_2d(self):
from coverage_model.test.bricking_assessment_utility import test_2d
test_2d(self._run_test_slices, None, persist=False, verbose=False, dtype='float32')
@unittest.skip('RTreeProxy does not support 3D')
def test_set_get_slice_3d(self):
from coverage_model.test.bricking_assessment_utility import test_3d
test_3d(self._run_test_slices, None, persist=False, verbose=False, dtype='float32')
@attr('INT', group='cov')
class TestBrickingUtilsInt(CoverageModelIntTestCase):
def _run_test_slices(self, ba, sl_list, val_arr, verbose):
for sl in sl_list:
ba.reset_bricks()
vals = val_arr[sl]
ba.put_values_to_bricks(sl, vals)
vo = ba.get_values_from_bricks(sl)
self.assertTrue(np.array_equal(vals, vo) or np.array_equal(vals.squeeze(), vo))
def test_set_get_slice_1d(self):
from coverage_model.test.bricking_assessment_utility import test_1d
test_1d(self._run_test_slices, self.working_dir, persist=True, verbose=False, dtype='float32')
@unittest.skip('RTreeProxy does not support 2D')
def test_set_get_slice_2d(self):
from coverage_model.test.bricking_assessment_utility import test_2d
test_2d(self._run_test_slices, self.working_dir, persist=True, verbose=False, dtype='float32')
@unittest.skip('RTreeProxy does not support 3D')
def test_set_get_slice_3d(self):
from coverage_model.test.bricking_assessment_utility import test_3d
test_3d(self._run_test_slices, self.working_dir, persist=True, verbose=False, dtype='float32')
|
|
from __future__ import absolute_import, unicode_literals
from django.core.exceptions import FieldError
from django.db.models import F
from django.test import TestCase
from django.utils import six
from .models import Company, Employee
class ExpressionsTests(TestCase):
def test_filter(self):
Company.objects.create(
name="Example Inc.", num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith")
)
Company.objects.create(
name="Foobar Ltd.", num_employees=3, num_chairs=4,
ceo=Employee.objects.create(firstname="Frank", lastname="Meyer")
)
Company.objects.create(
name="Test GmbH", num_employees=32, num_chairs=1,
ceo=Employee.objects.create(firstname="Max", lastname="Mustermann")
)
company_query = Company.objects.values(
"name", "num_employees", "num_chairs"
).order_by(
"name", "num_employees", "num_chairs"
)
# We can filter for companies where the number of employees is greater
# than the number of chairs.
self.assertQuerysetEqual(
company_query.filter(num_employees__gt=F("num_chairs")), [
{
"num_chairs": 5,
"name": "Example Inc.",
"num_employees": 2300,
},
{
"num_chairs": 1,
"name": "Test GmbH",
"num_employees": 32
},
],
lambda o: o
)
# We can set one field to have the value of another field
# Make sure we have enough chairs
company_query.update(num_chairs=F("num_employees"))
self.assertQuerysetEqual(
company_query, [
{
"num_chairs": 2300,
"name": "Example Inc.",
"num_employees": 2300
},
{
"num_chairs": 3,
"name": "Foobar Ltd.",
"num_employees": 3
},
{
"num_chairs": 32,
"name": "Test GmbH",
"num_employees": 32
}
],
lambda o: o
)
# We can perform arithmetic operations in expressions
# Make sure we have 2 spare chairs
company_query.update(num_chairs=F("num_employees")+2)
self.assertQuerysetEqual(
company_query, [
{
'num_chairs': 2302,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 5,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 34,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
# Law of order of operations is followed
company_query.update(
num_chairs=F('num_employees') + 2 * F('num_employees')
)
self.assertQuerysetEqual(
company_query, [
{
'num_chairs': 6900,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 9,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 96,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
# Law of order of operations can be overridden by parentheses
company_query.update(
num_chairs=((F('num_employees') + 2) * F('num_employees'))
)
self.assertQuerysetEqual(
company_query, [
{
'num_chairs': 5294600,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 15,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 1088,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
# The relation of a foreign key can become copied over to an other
# foreign key.
self.assertEqual(
Company.objects.update(point_of_contact=F('ceo')),
3
)
self.assertQuerysetEqual(
Company.objects.all(), [
"Joe Smith",
"Frank Meyer",
"Max Mustermann",
],
lambda c: six.text_type(c.point_of_contact),
ordered=False
)
c = Company.objects.all()[0]
c.point_of_contact = Employee.objects.create(firstname="Guido", lastname="van Rossum")
c.save()
# F Expressions can also span joins
self.assertQuerysetEqual(
Company.objects.filter(ceo__firstname=F("point_of_contact__firstname")), [
"Foobar Ltd.",
"Test GmbH",
],
lambda c: c.name,
ordered=False
)
Company.objects.exclude(
ceo__firstname=F("point_of_contact__firstname")
).update(name="foo")
self.assertEqual(
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).get().name,
"foo",
)
self.assertRaises(FieldError,
lambda: Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).update(name=F('point_of_contact__lastname'))
)
# F expressions can be used to update attributes on single objects
test_gmbh = Company.objects.get(name="Test GmbH")
self.assertEqual(test_gmbh.num_employees, 32)
test_gmbh.num_employees = F("num_employees") + 4
test_gmbh.save()
test_gmbh = Company.objects.get(pk=test_gmbh.pk)
self.assertEqual(test_gmbh.num_employees, 36)
# F expressions cannot be used to update attributes which are foreign
# keys, or attributes which involve joins.
test_gmbh.point_of_contact = None
test_gmbh.save()
self.assertTrue(test_gmbh.point_of_contact is None)
def test():
test_gmbh.point_of_contact = F("ceo")
self.assertRaises(ValueError, test)
test_gmbh.point_of_contact = test_gmbh.ceo
test_gmbh.save()
test_gmbh.name = F("ceo__last_name")
self.assertRaises(FieldError, test_gmbh.save)
# F expressions cannot be used to update attributes on objects which do
# not yet exist in the database
acme = Company(
name="The Acme Widget Co.", num_employees=12, num_chairs=5,
ceo=test_gmbh.ceo
)
acme.num_employees = F("num_employees") + 16
self.assertRaises(TypeError, acme.save)
def test_ticket_18375_join_reuse(self):
# Test that reverse multijoin F() references and the lookup target
# the same join. Pre #18375 the F() join was generated first, and the
# lookup couldn't reuse that join.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F('company_ceo_set__num_employees'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_kwarg_ordering(self):
# The next query was dict-randomization dependent - if the "gte=1"
# was seen first, then the F() will reuse the join generated by the
# gte lookup, if F() was seen first, then it generated a join the
# other lookups could not reuse.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F('company_ceo_set__num_employees'),
company_ceo_set__num_chairs__gte=1)
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_kwarg_ordering_2(self):
# Another similar case for F() than above. Now we have the same join
# in two filter kwargs, one in the lhs lookup, one in F. Here pre
# #18375 the amount of joins generated was random if dict
# randomization was enabled, that is the generated query dependend
# on which clause was seen first.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F('pk'),
pk=F('company_ceo_set__num_employees')
)
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_chained_filters(self):
# Test that F() expressions do not reuse joins from previous filter.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F('pk')
).filter(
company_ceo_set__num_employees=F('company_ceo_set__num_employees')
)
self.assertEqual(str(qs.query).count('JOIN'), 2)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse # noqa
from django import http
from django.test.utils import override_settings # noqa
from mox import IsA # noqa
from horizon import exceptions
from horizon.workflows import views
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
from openstack_dashboard.dashboards.admin.projects import workflows
INDEX_URL = reverse('horizon:admin:projects:index')
USER_ROLE_PREFIX = workflows.PROJECT_GROUP_MEMBER_SLUG + "_role_"
GROUP_ROLE_PREFIX = workflows.PROJECT_USER_MEMBER_SLUG + "_role_"
@test.create_stubs({api.keystone: ('tenant_list',)})
class TenantsViewTests(test.BaseAdminViewTests):
def test_index(self):
api.keystone.tenant_list(IsA(http.HttpRequest),
domain=None,
paginate=True) \
.AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'admin/projects/index.html')
self.assertItemsEqual(res.context['table'].data, self.tenants.list())
@test.create_stubs({api.keystone: ('tenant_list', )})
def test_index_with_domain_context(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
domain_tenants = [tenant for tenant in self.tenants.list()
if tenant.domain_id == domain.id]
api.keystone.tenant_list(IsA(http.HttpRequest),
domain=domain.id) \
.AndReturn(domain_tenants)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'admin/projects/index.html')
self.assertItemsEqual(res.context['table'].data, domain_tenants)
self.assertContains(res, "<em>test_domain:</em>")
class CreateProjectWorkflowTests(test.BaseAdminViewTests):
def _get_project_info(self, project):
domain = self._get_default_domain()
project_info = {"name": project.name,
"description": project.description,
"enabled": project.enabled,
"domain": domain.id}
return project_info
def _get_workflow_fields(self, project):
domain = self._get_default_domain()
project_info = {"domain_id": domain.id,
"domain_name": domain.name,
"name": project.name,
"description": project.description,
"enabled": project.enabled}
return project_info
def _get_quota_info(self, quota):
cinder_quota = self.cinder_quotas.first()
neutron_quota = self.neutron_quotas.first()
quota_data = {}
for field in quotas.NOVA_QUOTA_FIELDS:
quota_data[field] = int(quota.get(field).limit)
for field in quotas.CINDER_QUOTA_FIELDS:
quota_data[field] = int(cinder_quota.get(field).limit)
for field in quotas.NEUTRON_QUOTA_FIELDS:
quota_data[field] = int(neutron_quota.get(field).limit)
return quota_data
def _get_workflow_data(self, project, quota):
project_info = self._get_workflow_fields(project)
quota_data = self._get_quota_info(quota)
project_info.update(quota_data)
return project_info
def _get_default_domain(self):
default_domain = self.domain
domain = {"id": self.request.session.get('domain_context',
default_domain.id),
"name": self.request.session.get('domain_context_name',
default_domain.name)}
return api.base.APIDictWrapper(domain)
def _get_all_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
def _get_all_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
@test.create_stubs({api.keystone: ('get_default_domain',
'get_default_role',
'user_list',
'group_list',
'role_list'),
quotas: ('get_default_quota_data',)})
def test_add_project_get(self):
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
api.keystone.role_list(IsA(http.HttpRequest)).AndReturn(roles)
self.mox.ReplayAll()
url = reverse('horizon:admin:projects:create')
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertContains(res, '<input type="hidden" name="subnet" '
'id="id_subnet" />', html=True)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.CreateProject.name)
step = workflow.get_step("createprojectinfoaction")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['injected_files'],
quota.get('injected_files').limit)
self.assertQuerysetEqual(workflow.steps,
['<CreateProjectInfo: createprojectinfoaction>',
'<UpdateProjectMembers: update_members>',
'<UpdateProjectGroups: update_group_members>',
'<UpdateProjectQuota: update_quotas>'])
def test_add_project_get_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_get()
@test.create_stubs({api.keystone: ('get_default_role',
'user_list',
'group_list',
'role_list',
'domain_get'),
api.neutron: ('is_extension_supported',
'tenant_quota_get'),
quotas: ('get_default_quota_data',)})
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_add_project_get_with_neutron(self):
quota = self.quotas.first()
neutron_quotas = self.neutron_quotas.first()
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndReturn(quota)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(neutron_quotas)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.roles.first())
api.keystone.user_list(IsA(http.HttpRequest), domain=None) \
.AndReturn(self.users.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
api.keystone.group_list(IsA(http.HttpRequest), domain=None) \
.AndReturn(self.groups.list())
api.keystone.role_list(IsA(http.HttpRequest)) \
.AndReturn(self.roles.list())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:projects:create'))
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertContains(res, '<input name="subnet" id="id_subnet" '
'value="10" type="text" />', html=True)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.CreateProject.name)
step = workflow.get_step("createprojectinfoaction")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['subnet'],
neutron_quotas.get('subnet').limit)
@test.create_stubs({api.keystone: ('get_default_role',
'add_tenant_user_role',
'tenant_create',
'user_list',
'group_list',
'role_list',
'domain_get'),
quotas: ('get_default_quota_data',),
api.cinder: ('tenant_quota_update',),
api.nova: ('tenant_quota_update',)})
def test_add_project_post(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id)
for role in roles:
if GROUP_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[GROUP_ROLE_PREFIX + role.id]
for group_id in ulist:
api.keystone.add_group_role(IsA(http.HttpRequest),
role=role.id,
group=group_id,
project=self.tenant.id)
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, quota_data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:admin:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_post_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_post()
@test.create_stubs({api.neutron: ('is_extension_supported',
'tenant_quota_update')})
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_add_project_post_with_neutron(self):
quota_data = self.neutron_quotas.first()
neutron_updated_quota = dict([(key, quota_data.get(key).limit)
for key in quotas.NEUTRON_QUOTA_FIELDS])
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.tenant_quota_update(IsA(http.HttpRequest),
self.tenant.id,
**neutron_updated_quota)
self.test_add_project_post()
@test.create_stubs({api.keystone: ('user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',)})
def test_add_project_quota_defaults_error(self):
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_default_quota_data(IsA(http.HttpRequest)) \
.AndRaise(self.exceptions.nova)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
self.mox.ReplayAll()
url = reverse('horizon:admin:projects:create')
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
self.assertContains(res, "Unable to retrieve default quota values")
def test_add_project_quota_defaults_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_quota_defaults_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',)})
def test_add_project_tenant_create_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
workflow_data = self._get_workflow_data(project, quota)
url = reverse('horizon:admin:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_tenant_create_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_tenant_create_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role',
'add_tenant_user_role'),
quotas: ('get_default_quota_data',),
api.nova: ('tenant_quota_update',)})
def test_add_project_quota_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id)
for role in roles:
if GROUP_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[GROUP_ROLE_PREFIX + role.id]
for group_id in ulist:
api.keystone.add_group_role(IsA(http.HttpRequest),
role=role.id,
group=group_id,
project=self.tenant.id)
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:admin:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_quota_update_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_quota_update_error()
@test.create_stubs({api.keystone: ('tenant_create',
'user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role',
'add_tenant_user_role'),
quotas: ('get_default_quota_data',),
api.cinder: ('tenant_quota_update',),
api.nova: ('tenant_quota_update',)})
def test_add_project_user_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
# handle
project_details = self._get_project_info(project)
quota_data = self._get_quota_info(quota)
api.keystone.tenant_create(IsA(http.HttpRequest), **project_details) \
.AndReturn(project)
workflow_data = {}
for role in roles:
if USER_ROLE_PREFIX + role.id in workflow_data:
ulist = workflow_data[USER_ROLE_PREFIX + role.id]
for user_id in ulist:
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user=user_id,
role=role.id) \
.AndRaise(self.exceptions.keystone)
break
break
nova_updated_quota = dict([(key, quota_data[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, quota_data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
workflow_data.update(self._get_workflow_data(project, quota))
url = reverse('horizon:admin:projects:create')
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_add_project_user_update_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_user_update_error()
@test.create_stubs({api.keystone: ('user_list',
'role_list',
'group_list',
'get_default_domain',
'get_default_role'),
quotas: ('get_default_quota_data',)})
def test_add_project_missing_field_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
default_domain = self._get_default_domain()
domain_id = default_domain.id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# init
api.keystone.get_default_domain(IsA(http.HttpRequest)) \
.AndReturn(default_domain)
quotas.get_default_quota_data(IsA(http.HttpRequest)).AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
self.mox.ReplayAll()
workflow_data = self._get_workflow_data(project, quota)
workflow_data["name"] = ""
url = reverse('horizon:admin:projects:create')
res = self.client.post(url, workflow_data)
self.assertContains(res, "field is required")
def test_add_project_missing_field_error_domain(self):
domain = self.domains.get(id="1")
self.setSessionValues(domain_context=domain.id,
domain_context_name=domain.name)
self.test_add_project_missing_field_error()
class UpdateProjectWorkflowTests(test.BaseAdminViewTests):
def _get_quota_info(self, quota):
cinder_quota = self.cinder_quotas.first()
neutron_quota = self.neutron_quotas.first()
quota_data = {}
for field in quotas.NOVA_QUOTA_FIELDS:
quota_data[field] = int(quota.get(field).limit)
for field in quotas.CINDER_QUOTA_FIELDS:
quota_data[field] = int(cinder_quota.get(field).limit)
for field in quotas.NEUTRON_QUOTA_FIELDS:
quota_data[field] = int(neutron_quota.get(field).limit)
return quota_data
def _get_all_users(self, domain_id):
if not domain_id:
users = self.users.list()
else:
users = [user for user in self.users.list()
if user.domain_id == domain_id]
return users
def _get_all_groups(self, domain_id):
if not domain_id:
groups = self.groups.list()
else:
groups = [group for group in self.groups.list()
if group.domain_id == domain_id]
return groups
def _get_proj_users(self, project_id):
return [user for user in self.users.list()
if user.project_id == project_id]
def _get_proj_groups(self, project_id):
return [group for group in self.groups.list()
if group.project_id == project_id]
@test.create_stubs({api.keystone: ('get_default_role',
'roles_for_user',
'tenant_get',
'domain_get',
'user_list',
'roles_for_group',
'group_list',
'role_list'),
quotas: ('get_tenant_quota_data',)})
def test_update_project_get(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
api.keystone.tenant_get(IsA(http.HttpRequest),
self.tenant.id, admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
for user in users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
for group in groups:
api.keystone.roles_for_group(IsA(http.HttpRequest),
group=group.id,
project=self.tenant.id) \
.AndReturn(roles)
self.mox.ReplayAll()
url = reverse('horizon:admin:projects:update',
args=[self.tenant.id])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
self.assertEqual(res.context['workflow'].name,
workflows.UpdateProject.name)
step = workflow.get_step("update_info")
self.assertEqual(step.action.initial['ram'], quota.get('ram').limit)
self.assertEqual(step.action.initial['injected_files'],
quota.get('injected_files').limit)
self.assertEqual(step.action.initial['name'], project.name)
self.assertEqual(step.action.initial['description'],
project.description)
self.assertQuerysetEqual(workflow.steps,
['<UpdateProjectInfo: update_info>',
'<UpdateProjectMembers: update_members>',
'<UpdateProjectGroups: update_group_members>',
'<UpdateProjectQuota: update_quotas>'])
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list'),
api.nova: ('tenant_quota_update',),
api.cinder: ('tenant_quota_update',),
quotas: ('get_tenant_quota_data',)})
def test_update_project_save(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
proj_groups = self._get_proj_groups(project.id)
roles = self.roles.list()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest),
self.tenant.id, admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
for user in users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
for group in groups:
api.keystone.roles_for_group(IsA(http.HttpRequest),
group=group.id,
project=self.tenant.id) \
.AndReturn(roles)
workflow_data[USER_ROLE_PREFIX + "1"] = ['3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['2'] # member role
# Group assignment form data
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['2'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id).AndReturn(proj_users)
# admin user - try to remove all roles on current project, warning
api.keystone.roles_for_user(IsA(http.HttpRequest), '1',
self.tenant.id) \
.AndReturn(roles)
# member user 1 - has role 1, will remove it
api.keystone.roles_for_user(IsA(http.HttpRequest), '2',
self.tenant.id) \
.AndReturn((roles[0],))
# remove role 1
api.keystone.remove_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='2',
role='1')
# add role 2
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='2',
role='2')
# member user 3 - has role 2
api.keystone.roles_for_user(IsA(http.HttpRequest), '3',
self.tenant.id) \
.AndReturn((roles[1],))
# remove role 2
api.keystone.remove_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='2')
# add role 1
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='1')
# Group assignments
api.keystone.group_list(IsA(http.HttpRequest),
domain=domain_id,
project=self.tenant.id).AndReturn(proj_groups)
# admin group - try to remove all roles on current project
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='1',
project=self.tenant.id) \
.AndReturn(roles)
for role in roles:
api.keystone.remove_group_role(IsA(http.HttpRequest),
role=role.id,
group='1',
project=self.tenant.id)
# member group 1 - has role 1, will remove it
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='2',
project=self.tenant.id) \
.AndReturn((roles[0],))
# remove role 1
api.keystone.remove_group_role(IsA(http.HttpRequest),
role='1',
group='2',
project=self.tenant.id)
# add role 2
api.keystone.add_group_role(IsA(http.HttpRequest),
role='2',
group='2',
project=self.tenant.id)
# member group 3 - has role 2
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='3',
project=self.tenant.id) \
.AndReturn((roles[1],))
# remove role 2
api.keystone.remove_group_role(IsA(http.HttpRequest),
role='2',
group='3',
project=self.tenant.id)
# add role 1
api.keystone.add_group_role(IsA(http.HttpRequest),
role='1',
group='3',
project=self.tenant.id)
nova_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota)
cinder_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
api.cinder.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**cinder_updated_quota)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:admin:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=0, warning=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.neutron: ('is_extension_supported',
'tenant_quota_get',
'tenant_quota_update')})
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_update_project_save_with_neutron(self):
quota_data = self.neutron_quotas.first()
neutron_updated_quota = dict([(key, quota_data.get(key).limit)
for key in quotas.NEUTRON_QUOTA_FIELDS])
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.MultipleTimes().AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota_data)
api.neutron.tenant_quota_update(IsA(http.HttpRequest),
self.tenant.id,
**neutron_updated_quota)
self.test_update_project_save()
@test.create_stubs({api.keystone: ('tenant_get',)})
def test_update_project_get_error(self):
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
url = reverse('horizon:admin:projects:update',
args=[self.tenant.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list'),
quotas: ('get_tenant_quota_data',),
api.nova: ('tenant_quota_update',)})
def test_update_project_tenant_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
for user in users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
role_ids = [role.id for role in roles]
if role_ids:
workflow_data.setdefault(USER_ROLE_PREFIX + role_ids[0], []) \
.append(user.id)
for group in groups:
api.keystone.roles_for_group(IsA(http.HttpRequest),
group=group.id,
project=self.tenant.id) \
.AndReturn(roles)
role_ids = [role.id for role in roles]
if role_ids:
workflow_data.setdefault(GROUP_ROLE_PREFIX + role_ids[0], []) \
.append(group.id)
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:admin:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list'),
quotas: ('get_tenant_quota_data',),
api.nova: ('tenant_quota_update',)})
def test_update_project_quota_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
proj_groups = self._get_proj_groups(project.id)
roles = self.roles.list()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
for user in users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
for group in groups:
api.keystone.roles_for_group(IsA(http.HttpRequest),
group=group.id,
project=self.tenant.id) \
.AndReturn(roles)
workflow_data[USER_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# Group role assignment data
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota[0].limit = 444
quota[1].limit = -1
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id).AndReturn(proj_users)
# admin user - try to remove all roles on current project, warning
api.keystone.roles_for_user(IsA(http.HttpRequest), '1',
self.tenant.id) \
.AndReturn(roles)
# member user 1 - has role 1, will remove it
api.keystone.roles_for_user(IsA(http.HttpRequest), '2',
self.tenant.id) \
.AndReturn((roles[1],))
# member user 3 - has role 2
api.keystone.roles_for_user(IsA(http.HttpRequest), '3',
self.tenant.id) \
.AndReturn((roles[0],))
# add role 2
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='2')
# Group assignment
api.keystone.group_list(IsA(http.HttpRequest),
domain=domain_id,
project=self.tenant.id).AndReturn(proj_groups)
# admin group 1- try to remove all roles on current project
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='1',
project=self.tenant.id) \
.AndReturn(roles)
# member group 1 - has no change
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='2',
project=self.tenant.id) \
.AndReturn((roles[1],))
# member group 3 - has role 1
api.keystone.roles_for_group(IsA(http.HttpRequest),
group='3',
project=self.tenant.id) \
.AndReturn((roles[0],))
# add role 2
api.keystone.add_group_role(IsA(http.HttpRequest),
role='2',
group='3',
project=self.tenant.id)
nova_updated_quota = dict([(key, updated_quota[key]) for key in
quotas.NOVA_QUOTA_FIELDS])
api.nova.tenant_quota_update(IsA(http.HttpRequest),
project.id,
**nova_updated_quota) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:admin:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=1, warning=0)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('tenant_get',
'domain_get',
'tenant_update',
'get_default_role',
'roles_for_user',
'remove_tenant_user_role',
'add_tenant_user_role',
'user_list',
'roles_for_group',
'remove_group_role',
'add_group_role',
'group_list',
'role_list'),
quotas: ('get_tenant_quota_data',)})
def test_update_project_member_update_error(self):
project = self.tenants.first()
quota = self.quotas.first()
default_role = self.roles.first()
domain_id = project.domain_id
users = self._get_all_users(domain_id)
proj_users = self._get_proj_users(project.id)
groups = self._get_all_groups(domain_id)
roles = self.roles.list()
# get/init
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(default_role)
api.keystone.user_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(users)
api.keystone.role_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(roles)
api.keystone.group_list(IsA(http.HttpRequest), domain=domain_id) \
.AndReturn(groups)
workflow_data = {}
for user in users:
api.keystone.roles_for_user(IsA(http.HttpRequest),
user.id,
self.tenant.id).AndReturn(roles)
for group in groups:
api.keystone.roles_for_group(IsA(http.HttpRequest),
group=group.id,
project=self.tenant.id) \
.AndReturn(roles)
workflow_data[USER_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[USER_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
workflow_data[GROUP_ROLE_PREFIX + "1"] = ['1', '3'] # admin role
workflow_data[GROUP_ROLE_PREFIX + "2"] = ['1', '2', '3'] # member role
# update some fields
project._info["domain_id"] = domain_id
project._info["name"] = "updated name"
project._info["description"] = "updated description"
quota.metadata_items = 444
quota.volumes = 444
updated_project = {"name": project._info["name"],
"description": project._info["description"],
"enabled": project.enabled}
updated_quota = self._get_quota_info(quota)
# handle
api.keystone.tenant_update(IsA(http.HttpRequest),
project.id,
**updated_project) \
.AndReturn(project)
api.keystone.user_list(IsA(http.HttpRequest),
project=self.tenant.id).AndReturn(proj_users)
# admin user - try to remove all roles on current project, warning
api.keystone.roles_for_user(IsA(http.HttpRequest), '1',
self.tenant.id).AndReturn(roles)
# member user 1 - has role 1, will remove it
api.keystone.roles_for_user(IsA(http.HttpRequest), '2',
self.tenant.id).AndReturn((roles[1],))
# member user 3 - has role 2
api.keystone.roles_for_user(IsA(http.HttpRequest), '3',
self.tenant.id).AndReturn((roles[0],))
# add role 2
api.keystone.add_tenant_user_role(IsA(http.HttpRequest),
project=self.tenant.id,
user='3',
role='2')\
.AndRaise(self.exceptions.keystone)
self.mox.ReplayAll()
# submit form data
project_data = {"domain_id": project._info["domain_id"],
"name": project._info["name"],
"id": project.id,
"description": project._info["description"],
"enabled": project.enabled}
workflow_data.update(project_data)
workflow_data.update(updated_quota)
url = reverse('horizon:admin:projects:update',
args=[self.tenant.id])
res = self.client.post(url, workflow_data)
self.assertNoFormErrors(res)
self.assertMessageCount(error=1, warning=0)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.keystone: ('get_default_role',
'tenant_get',
'domain_get'),
quotas: ('get_tenant_quota_data',)})
def test_update_project_when_default_role_does_not_exist(self):
project = self.tenants.first()
domain_id = project.domain_id
quota = self.quotas.first()
api.keystone.get_default_role(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(None) # Default role doesn't exist
api.keystone.tenant_get(IsA(http.HttpRequest), self.tenant.id,
admin=True) \
.AndReturn(project)
api.keystone.domain_get(IsA(http.HttpRequest), domain_id) \
.AndReturn(self.domain)
quotas.get_tenant_quota_data(IsA(http.HttpRequest),
tenant_id=self.tenant.id) \
.AndReturn(quota)
self.mox.ReplayAll()
url = reverse('horizon:admin:projects:update',
args=[self.tenant.id])
try:
# Avoid the log message in the test output when the workflow's
# step action cannot be instantiated
logging.disable(logging.ERROR)
with self.assertRaises(exceptions.NotFound):
self.client.get(url)
finally:
logging.disable(logging.NOTSET)
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient import exceptions as keystone_exceptions
import mock
from oslo_config import cfg
from rally import consts
from rally import exceptions
from rally import objects
from rally import osclients
from tests.unit import fakes
from tests.unit import test
class CachedTestCase(test.TestCase):
def test_cached(self):
foo_client = mock.Mock(
__name__="foo_client",
side_effect=lambda ins, *args, **kw: (args, kw))
ins = mock.Mock(cache={})
cached = osclients.cached(foo_client)
self.assertEqual(((), {}), cached(ins))
self.assertEqual({"foo_client": ((), {})}, ins.cache)
self.assertEqual((("foo",), {"bar": "spam"}),
cached(ins, "foo", bar="spam"))
self.assertEqual(
{"foo_client": ((), {}),
"foo_client('foo',){'bar': 'spam'}": (("foo",),
{"bar": "spam"})},
ins.cache)
ins.cache["foo_client('foo',){'bar': 'spam'}"] = "foo_cached"
self.assertEqual(
"foo_cached", cached(ins, "foo", bar="spam"))
class TestCreateKeystoneClient(test.TestCase):
def setUp(self):
super(TestCreateKeystoneClient, self).setUp()
self.kwargs = {"auth_url": "http://auth_url", "username": "user",
"password": "password", "tenant_name": "tenant",
"https_insecure": False, "https_cacert": None}
def test_create_keystone_client_v2(self):
mock_keystone = mock.MagicMock()
fake_keystoneclient = mock.MagicMock()
mock_keystone.v2_0.client.Client.return_value = fake_keystoneclient
mock_discover = mock.MagicMock(
version_data=mock.MagicMock(return_value=[{"version": [2]}]))
mock_keystone.discover.Discover.return_value = mock_discover
with mock.patch.dict("sys.modules",
{"keystoneclient": mock_keystone,
"keystoneclient.v2_0": mock_keystone.v2_0}):
client = osclients.create_keystone_client(self.kwargs)
mock_discover.version_data.assert_called_once_with()
self.assertEqual(fake_keystoneclient, client)
mock_keystone.v2_0.client.Client.assert_called_once_with(
**self.kwargs)
def test_create_keystone_client_v3(self):
mock_keystone = mock.MagicMock()
fake_keystoneclient = mock.MagicMock()
mock_keystone.v3.client.Client.return_value = fake_keystoneclient
mock_discover = mock.MagicMock(
version_data=mock.MagicMock(return_value=[{"version": [3]}]))
mock_keystone.discover.Discover.return_value = mock_discover
with mock.patch.dict("sys.modules",
{"keystoneclient": mock_keystone,
"keystoneclient.v3": mock_keystone.v3}):
client = osclients.create_keystone_client(self.kwargs)
mock_discover.version_data.assert_called_once_with()
self.assertEqual(fake_keystoneclient, client)
mock_keystone.v3.client.Client.assert_called_once_with(
**self.kwargs)
def test_create_keystone_client_version_not_found(self):
mock_keystone = mock.MagicMock()
mock_discover = mock.MagicMock(
version_data=mock.MagicMock(return_value=[{"version": [100500]}]))
mock_keystone.discover.Discover.return_value = mock_discover
with mock.patch.dict("sys.modules", {"keystoneclient": mock_keystone}):
self.assertRaises(exceptions.RallyException,
osclients.create_keystone_client, self.kwargs)
mock_discover.version_data.assert_called_once_with()
class OSClientsTestCase(test.TestCase):
def setUp(self):
super(OSClientsTestCase, self).setUp()
self.endpoint = objects.Endpoint("http://auth_url", "use", "pass",
"tenant")
self.clients = osclients.Clients(self.endpoint)
self.fake_keystone = fakes.FakeKeystoneClient()
self.fake_keystone.auth_token = mock.MagicMock()
self.service_catalog = self.fake_keystone.service_catalog
self.service_catalog.url_for = mock.MagicMock()
keystone_patcher = mock.patch("rally.osclients.create_keystone_client")
self.mock_create_keystone_client = keystone_patcher.start()
self.addCleanup(keystone_patcher.stop)
self.mock_create_keystone_client.return_value = self.fake_keystone
def tearDown(self):
super(OSClientsTestCase, self).tearDown()
def test_create_from_env(self):
with mock.patch.dict("os.environ",
{"OS_AUTH_URL": "foo_auth_url",
"OS_USERNAME": "foo_username",
"OS_PASSWORD": "foo_password",
"OS_TENANT_NAME": "foo_tenant_name",
"OS_REGION_NAME": "foo_region_name"}):
clients = osclients.Clients.create_from_env()
self.assertEqual("foo_auth_url", clients.endpoint.auth_url)
self.assertEqual("foo_username", clients.endpoint.username)
self.assertEqual("foo_password", clients.endpoint.password)
self.assertEqual("foo_tenant_name", clients.endpoint.tenant_name)
self.assertEqual("foo_region_name", clients.endpoint.region_name)
def test_keystone(self):
self.assertNotIn("keystone", self.clients.cache)
client = self.clients.keystone()
self.assertEqual(client, self.fake_keystone)
endpoint = {"timeout": cfg.CONF.openstack_client_http_timeout,
"insecure": False, "cacert": None}
kwargs = self.endpoint.to_dict()
kwargs.update(endpoint.items())
self.mock_create_keystone_client.assert_called_once_with(kwargs)
self.assertEqual(self.fake_keystone, self.clients.cache["keystone"])
@mock.patch("rally.osclients.Clients.keystone")
def test_verified_keystone_user_not_admin(self, mock_clients_keystone):
mock_clients_keystone.return_value = fakes.FakeKeystoneClient()
mock_clients_keystone.return_value.auth_ref.role_names = ["notadmin"]
self.assertRaises(exceptions.InvalidAdminException,
self.clients.verified_keystone)
@mock.patch("rally.osclients.Clients.keystone")
def test_verified_keystone_unauthorized(self, mock_clients_keystone):
mock_clients_keystone.return_value = fakes.FakeKeystoneClient()
mock_clients_keystone.side_effect = keystone_exceptions.Unauthorized
self.assertRaises(exceptions.InvalidEndpointsException,
self.clients.verified_keystone)
@mock.patch("rally.osclients.Clients.keystone")
def test_verified_keystone_unreachable(self, mock_clients_keystone):
mock_clients_keystone.return_value = fakes.FakeKeystoneClient()
mock_clients_keystone.side_effect = (
keystone_exceptions.AuthorizationFailure
)
self.assertRaises(exceptions.HostUnreachableException,
self.clients.verified_keystone)
def test_nova(self):
fake_nova = fakes.FakeNovaClient()
mock_nova = mock.MagicMock()
mock_nova.client.Client.return_value = fake_nova
self.assertNotIn("nova", self.clients.cache)
with mock.patch.dict("sys.modules", {"novaclient": mock_nova}):
client = self.clients.nova()
self.assertEqual(fake_nova, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="compute",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.endpoint.region_name)
mock_nova.client.Client.assert_called_once_with(
"2",
auth_token=self.fake_keystone.auth_token,
http_log_debug=False,
timeout=cfg.CONF.openstack_client_http_timeout,
insecure=False, cacert=None,
username=self.endpoint.username,
api_key=self.endpoint.password,
project_id=self.endpoint.tenant_name,
auth_url=self.endpoint.auth_url)
client.set_management_url.assert_called_once_with(
self.service_catalog.url_for.return_value)
self.assertEqual(fake_nova, self.clients.cache["nova"])
def test_neutron(self):
fake_neutron = fakes.FakeNeutronClient()
mock_neutron = mock.MagicMock()
mock_neutron.client.Client.return_value = fake_neutron
self.assertNotIn("neutron", self.clients.cache)
with mock.patch.dict("sys.modules", {"neutronclient.neutron":
mock_neutron}):
client = self.clients.neutron()
self.assertEqual(fake_neutron, client)
kw = {
"token": self.fake_keystone.auth_token,
"endpoint_url": self.service_catalog.url_for.return_value,
"timeout": cfg.CONF.openstack_client_http_timeout,
"insecure": self.endpoint.insecure,
"ca_cert": self.endpoint.cacert,
"username": self.endpoint.username,
"password": self.endpoint.password,
"tenant_name": self.endpoint.tenant_name,
"auth_url": self.endpoint.auth_url
}
self.service_catalog.url_for.assert_called_once_with(
service_type="network",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.endpoint.region_name)
mock_neutron.client.Client.assert_called_once_with("2.0", **kw)
self.assertEqual(fake_neutron, self.clients.cache["neutron"])
def test_glance(self):
fake_glance = fakes.FakeGlanceClient()
mock_glance = mock.MagicMock()
mock_glance.Client = mock.MagicMock(return_value=fake_glance)
with mock.patch.dict("sys.modules", {"glanceclient": mock_glance}):
self.assertNotIn("glance", self.clients.cache)
client = self.clients.glance()
self.assertEqual(fake_glance, client)
kw = {"endpoint": self.service_catalog.url_for.return_value,
"token": self.fake_keystone.auth_token,
"timeout": cfg.CONF.openstack_client_http_timeout,
"insecure": False, "cacert": None}
self.service_catalog.url_for.assert_called_once_with(
service_type="image",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.endpoint.region_name)
mock_glance.Client.assert_called_once_with("1", **kw)
self.assertEqual(fake_glance, self.clients.cache["glance"])
def test_cinder(self):
fake_cinder = mock.MagicMock(client=fakes.FakeCinderClient())
mock_cinder = mock.MagicMock()
mock_cinder.client.Client.return_value = fake_cinder
self.assertNotIn("cinder", self.clients.cache)
with mock.patch.dict("sys.modules", {"cinderclient": mock_cinder}):
client = self.clients.cinder()
self.assertEqual(fake_cinder, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="volume",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.endpoint.region_name)
mock_cinder.client.Client.assert_called_once_with(
"1",
http_log_debug=False,
timeout=cfg.CONF.openstack_client_http_timeout,
insecure=False, cacert=None,
username=self.endpoint.username,
api_key=self.endpoint.password,
project_id=self.endpoint.tenant_name,
auth_url=self.endpoint.auth_url)
self.assertEqual(fake_cinder.client.management_url,
self.service_catalog.url_for.return_value)
self.assertEqual(fake_cinder.client.auth_token,
self.fake_keystone.auth_token)
self.assertEqual(fake_cinder, self.clients.cache["cinder"])
def test_manila(self):
mock_manila = mock.MagicMock()
self.assertNotIn("manila", self.clients.cache)
with mock.patch.dict("sys.modules", {"manilaclient": mock_manila}):
client = self.clients.manila()
self.assertEqual(mock_manila.client.Client.return_value, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="share",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.endpoint.region_name)
mock_manila.client.Client.assert_called_once_with(
"1",
http_log_debug=False,
timeout=cfg.CONF.openstack_client_http_timeout,
insecure=False, cacert=None,
username=self.endpoint.username,
api_key=self.endpoint.password,
region_name=self.endpoint.region_name,
project_name=self.endpoint.tenant_name,
auth_url=self.endpoint.auth_url)
self.assertEqual(
mock_manila.client.Client.return_value.client.management_url,
self.service_catalog.url_for.return_value)
self.assertEqual(
mock_manila.client.Client.return_value.client.auth_token,
self.fake_keystone.auth_token)
self.assertEqual(
mock_manila.client.Client.return_value,
self.clients.cache["manila"])
def test_ceilometer(self):
fake_ceilometer = fakes.FakeCeilometerClient()
mock_ceilometer = mock.MagicMock()
mock_ceilometer.client.get_client = mock.MagicMock(
return_value=fake_ceilometer)
self.assertNotIn("ceilometer", self.clients.cache)
with mock.patch.dict("sys.modules",
{"ceilometerclient": mock_ceilometer}):
client = self.clients.ceilometer()
self.assertEqual(fake_ceilometer, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="metering",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.endpoint.region_name)
kw = {"os_endpoint": self.service_catalog.url_for.return_value,
"token": self.fake_keystone.auth_token,
"timeout": cfg.CONF.openstack_client_http_timeout,
"insecure": False, "cacert": None,
"username": self.endpoint.username,
"password": self.endpoint.password,
"tenant_name": self.endpoint.tenant_name,
"auth_url": self.endpoint.auth_url
}
mock_ceilometer.client.get_client.assert_called_once_with("2",
**kw)
self.assertEqual(fake_ceilometer,
self.clients.cache["ceilometer"])
def test_ironic(self):
fake_ironic = fakes.FakeIronicClient()
mock_ironic = mock.MagicMock()
mock_ironic.client.get_client = mock.MagicMock(
return_value=fake_ironic)
self.assertNotIn("ironic", self.clients.cache)
with mock.patch.dict("sys.modules", {"ironicclient": mock_ironic}):
client = self.clients.ironic()
self.assertEqual(fake_ironic, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="baremetal",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.endpoint.region_name)
kw = {
"os_auth_token": self.fake_keystone.auth_token,
"ironic_url": self.service_catalog.url_for.return_value,
"timeout": cfg.CONF.openstack_client_http_timeout,
"insecure": self.endpoint.insecure,
"cacert": self.endpoint.cacert
}
mock_ironic.client.get_client.assert_called_once_with("1", **kw)
self.assertEqual(fake_ironic, self.clients.cache["ironic"])
def test_sahara(self):
fake_sahara = fakes.FakeSaharaClient()
mock_sahara = mock.MagicMock()
mock_sahara.client.Client = mock.MagicMock(return_value=fake_sahara)
self.assertNotIn("sahara", self.clients.cache)
with mock.patch.dict("sys.modules", {"saharaclient": mock_sahara}):
client = self.clients.sahara()
self.assertEqual(fake_sahara, client)
kw = {
"username": self.endpoint.username,
"api_key": self.endpoint.password,
"project_name": self.endpoint.tenant_name,
"auth_url": self.endpoint.auth_url
}
mock_sahara.client.Client.assert_called_once_with("1.1", **kw)
self.assertEqual(fake_sahara, self.clients.cache["sahara"])
def test_zaqar(self):
fake_zaqar = fakes.FakeZaqarClient()
mock_zaqar = mock.MagicMock()
mock_zaqar.client.Client = mock.MagicMock(return_value=fake_zaqar)
self.assertNotIn("zaqar", self.clients.cache)
with mock.patch.dict("sys.modules", {"zaqarclient.queues":
mock_zaqar}):
client = self.clients.zaqar()
self.assertEqual(fake_zaqar, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="messaging",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.endpoint.region_name)
fake_zaqar_url = self.service_catalog.url_for.return_value
conf = {"auth_opts": {"backend": "keystone", "options": {
"os_username": self.endpoint.username,
"os_password": self.endpoint.password,
"os_project_name": self.endpoint.tenant_name,
"os_project_id": self.fake_keystone.auth_tenant_id,
"os_auth_url": self.endpoint.auth_url,
"insecure": self.endpoint.insecure,
}}}
mock_zaqar.client.Client.assert_called_once_with(
url=fake_zaqar_url, version=1.1, conf=conf)
self.assertEqual(fake_zaqar, self.clients.cache["zaqar"])
def test_trove(self):
fake_trove = fakes.FakeTroveClient()
mock_trove = mock.MagicMock()
mock_trove.client.Client = mock.MagicMock(return_value=fake_trove)
self.assertNotIn("trove", self.clients.cache)
with mock.patch.dict("sys.modules", {"troveclient": mock_trove}):
client = self.clients.trove()
self.assertEqual(fake_trove, client)
kw = {
"username": self.endpoint.username,
"api_key": self.endpoint.password,
"project_id": self.endpoint.tenant_name,
"auth_url": self.endpoint.auth_url,
"region_name": self.endpoint.region_name,
"timeout": cfg.CONF.openstack_client_http_timeout,
"insecure": self.endpoint.insecure,
"cacert": self.endpoint.cacert
}
mock_trove.client.Client.assert_called_once_with("1.0", **kw)
self.assertEqual(fake_trove, self.clients.cache["trove"])
def test_mistral(self):
fake_mistral = fakes.FakeMistralClient()
mock_mistral = mock.Mock()
mock_mistral.client.client.return_value = fake_mistral
self.assertNotIn("mistral", self.clients.cache)
with mock.patch.dict(
"sys.modules", {"mistralclient": mock_mistral,
"mistralclient.api": mock_mistral}):
client = self.clients.mistral()
self.assertEqual(fake_mistral, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="workflowv2",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.endpoint.region_name
)
fake_mistral_url = self.service_catalog.url_for.return_value
mock_mistral.client.client.assert_called_once_with(
mistral_url=fake_mistral_url,
service_type="workflowv2",
auth_token=self.fake_keystone.auth_token
)
self.assertEqual(fake_mistral, self.clients.cache["mistral"])
def test_swift(self):
fake_swift = fakes.FakeSwiftClient()
mock_swift = mock.MagicMock()
mock_swift.client.Connection = mock.MagicMock(return_value=fake_swift)
self.assertNotIn("swift", self.clients.cache)
with mock.patch.dict("sys.modules", {"swiftclient": mock_swift}):
client = self.clients.swift()
self.assertEqual(client, fake_swift)
self.service_catalog.url_for.assert_called_once_with(
service_type="object-store",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.endpoint.region_name)
kw = {"retries": 1,
"preauthurl": self.service_catalog.url_for.return_value,
"preauthtoken": self.fake_keystone.auth_token,
"insecure": False,
"cacert": None,
"user": self.endpoint.username,
"key": self.endpoint.password,
"tenant_name": self.endpoint.tenant_name,
"authurl": self.endpoint.auth_url
}
mock_swift.client.Connection.assert_called_once_with(**kw)
self.assertEqual(self.clients.cache["swift"], fake_swift)
def test_ec2(self):
mock_boto = mock.Mock()
self.service_catalog.url_for.return_value = "http://fake.to:1/fake"
self.fake_keystone.ec2 = mock.Mock()
self.fake_keystone.ec2.create.return_value = mock.Mock(
access="fake_access", secret="fake_secret")
fake_ec2 = fakes.FakeEC2Client()
mock_boto.connect_ec2_endpoint.return_value = fake_ec2
self.assertNotIn("ec2", self.clients.cache)
with mock.patch.dict("sys.modules", {"boto": mock_boto}):
client = self.clients.ec2()
self.assertEqual(fake_ec2, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="ec2",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.endpoint.region_name)
kw = {
"url": "http://fake.to:1/fake",
"aws_access_key_id": "fake_access",
"aws_secret_access_key": "fake_secret",
"is_secure": self.endpoint.insecure,
}
mock_boto.connect_ec2_endpoint.assert_called_once_with(**kw)
self.assertEqual(fake_ec2, self.clients.cache["ec2"])
@mock.patch("rally.osclients.Clients.keystone")
def test_services(self, mock_clients_keystone):
available_services = {consts.ServiceType.IDENTITY: {},
consts.ServiceType.COMPUTE: {},
"unknown_service": {}}
mock_clients_keystone.return_value = mock.Mock(
service_catalog=mock.Mock(
get_endpoints=lambda: available_services))
clients = osclients.Clients(self.endpoint)
self.assertEqual(
{consts.ServiceType.IDENTITY: consts.Service.KEYSTONE,
consts.ServiceType.COMPUTE: consts.Service.NOVA},
clients.services())
def test_murano(self):
fake_murano = fakes.FakeMuranoClient()
mock_murano = mock.Mock()
mock_murano.client.Client.return_value = fake_murano
self.assertNotIn("murano", self.clients.cache)
with mock.patch.dict("sys.modules", {"muranoclient": mock_murano}):
client = self.clients.murano()
self.assertEqual(fake_murano, client)
self.service_catalog.url_for.assert_called_once_with(
service_type="application_catalog",
endpoint_type=consts.EndpointType.PUBLIC,
region_name=self.endpoint.region_name
)
kw = {"endpoint": self.service_catalog.url_for.return_value,
"token": self.fake_keystone.auth_token}
mock_murano.client.Client.assert_called_once_with("1", **kw)
self.assertEqual(fake_murano, self.clients.cache["murano"])
@mock.patch("rally.osclients.cached")
def test_register(self, mock_cached):
client_func = mock.Mock(return_value="foo_client")
cached_client_func = mock.Mock(return_value="cached_foo_client")
mock_cached.return_value = cached_client_func
clients = osclients.Clients(mock.Mock())
self.assertFalse(hasattr(clients, "foo"))
func = osclients.Clients.register("foo")(client_func)
mock_cached.assert_called_once_with(client_func)
self.assertEqual("cached_foo_client", clients.foo())
self.assertEqual(client_func, func)
self.assertEqual(cached_client_func, clients.foo)
# Call second time with same name
self.assertRaises(ValueError,
osclients.Clients.register("foo"), client_func)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Apply graph_transforms tool to MetaGraphDefs.
@@meta_graph_transform
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re as _re
from tensorflow.core.framework import graph_pb2 as _graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2 as _meta_graph_pb2
from tensorflow.python.client import session as _session
from tensorflow.python.framework import graph_util as _graph_util
from tensorflow.python.framework import importer as _importer
from tensorflow.python.framework import ops as _ops
from tensorflow.python.platform import tf_logging as _logging
from tensorflow.python.saved_model import constants as _saved_model_constants
from tensorflow.python.training import saver as _saver_lib
from tensorflow.python.util import compat as _compat
from tensorflow.tools import graph_transforms as _graph_transforms
_FREEZE_GRAPH_TRANSFORM = 'freeze_graph'
_SPARSIFY_GATHER_TRANSFORM = 'sparsify_gather'
def _op_name(tensor_name):
"""Get the op name from a tensor name."""
# control dependency inputs start with ^
if tensor_name[0] == '^':
tensor_name = tensor_name[1:]
if ':' in tensor_name:
op_name, _ = tensor_name.split(':')
return op_name
return tensor_name
def _get_shared_init_op(initializer_names):
"""Obtain the shared init op name, if it exists.
Args:
initializer_names: Dictionary of the "infrastructural" nodes (initializers,
save and restore ops, etc.). The keys in this dictionary
indicate the collection where these nodes were obtained from.
Returns:
A string indicating the shared init op name or none if None if none exists.
"""
return_value = initializer_names.get(_saved_model_constants.MAIN_OP_KEY, None)
if not return_value:
return_value = initializer_names.get(
_saved_model_constants.LEGACY_INIT_OP_KEY, None)
return str(return_value[0]) if return_value else None
def _gtt_transforms(graph_def, input_names, output_names, initializer_names,
transforms):
"""Pass through gtt transforms, applying them to the graph_def.
Args:
graph_def: A GraphDef proto to be transformed.
input_names: Names of input nodes.
output_names: Names of output nodes.
initializer_names: Dictionary of the "infrastructural" nodes (initializers,
save and restore ops, etc.) that should be retained even if they are not
transitively reachable from output nodes. The keys in this dictionary
indicate the collection where these nodes were obtained from.
transforms: A list of strings naming the graph transforms to be applied in
order.
Returns:
The transformed GraphDef.
"""
if not transforms:
transformed_graph_def = _graph_pb2.GraphDef()
transformed_graph_def.CopyFrom(graph_def)
return transformed_graph_def
initializer_names_flat = sorted(
[k for l in initializer_names.values() for k in l])
all_output_names = output_names + initializer_names_flat
return _graph_transforms.TransformGraph(graph_def, input_names,
all_output_names, transforms)
def _freeze_transform(graph_def, output_names, initializer_names, saver_def,
checkpoint_path):
"""Handle the freeze transform.
Determine which initializer nodes should be retained by the freeze transform.
Retain those nodes and return an updated dictionary containing them.
Args:
graph_def: A GraphDef proto to be transformed.
output_names: Names of output nodes.
initializer_names: Dictionary of the "infrastructural" nodes (initializers,
save and restore ops, etc.). The keys in this dictionary
indicate the collection where these nodes were obtained from.
saver_def: A SaverDef proto used for restoring a checkpoint during freezing,
if needed (default None).
checkpoint_path: A path to a checkpoint to restore during freezing,
if needed (default None).
Returns:
A tuple containing the GraphDef and a Dict of pruned initializer nodes.
"""
table_initializers = initializer_names.get(_ops.GraphKeys.TABLE_INITIALIZERS,
[])
shared_init_op = _get_shared_init_op(initializer_names)
graph_def = _freeze_graph_with_def_protos(graph_def, output_names,
table_initializers, shared_init_op,
saver_def, checkpoint_path)
pruned_initializer_names = {}
# Freeze graph prunes all initializers and shared init nodes that are not
# explicitly maintained. Create new initializer_names dictionary to reflect
# this.
if table_initializers:
pruned_initializer_names[_ops.GraphKeys.TABLE_INITIALIZERS] = (
table_initializers)
if _saved_model_constants.LEGACY_INIT_OP_KEY in initializer_names:
pruned_initializer_names[_saved_model_constants.LEGACY_INIT_OP_KEY] = (
initializer_names[_saved_model_constants.LEGACY_INIT_OP_KEY])
if _saved_model_constants.MAIN_OP_KEY in initializer_names:
pruned_initializer_names[_saved_model_constants.MAIN_OP_KEY] = (
initializer_names[_saved_model_constants.MAIN_OP_KEY])
return (graph_def, pruned_initializer_names)
def _clean_save_and_restore(graph_def, op, removed_op_names):
"""Clean the specified save and restore op.
Updates the dtypes attribute of the save / restore op and the associated name
and shape tensors to remove entries for variables that have been removed.
Args:
graph_def: A GraphDef proto to be transformed.
op: The save or restore op to update.
removed_op_names: List of op names that have been removed.
"""
name = op.name + '/tensor_names'
shape = op.name + '/shape_and_slices'
name_op = _find_op(graph_def, name)
shape_op = _find_op(graph_def, shape)
name_op_value_tensor = name_op.attr['value'].tensor
shape_op_value_tensor = shape_op.attr['value'].tensor
names = []
shapes = []
dtypes = []
for index, value in enumerate(name_op_value_tensor.string_val):
if not _is_removed(_compat.as_str(value), removed_op_names):
names.append(value)
shapes.append(shape_op_value_tensor.string_val[index])
dtypes.append(op.attr['dtypes'].list.type[index])
name_op_value_tensor.string_val[:] = names
name_op_value_tensor.tensor_shape.dim[0].size = len(names)
shape_op_value_tensor.string_val[:] = shapes
shape_op_value_tensor.tensor_shape.dim[0].size = len(shapes)
op.attr['dtypes'].list.type[:] = dtypes
if not name_op.attr['_output_shapes'].list.shape:
name_op.attr['_output_shapes'].list.shape.add()
name_op.attr['_output_shapes'].list.shape[0].dim.add()
name_op.attr['_output_shapes'].list.shape[0].dim[0].size = len(names)
if not shape_op.attr['_output_shapes'].list.shape:
shape_op.attr['_output_shapes'].list.shape.add()
shape_op.attr['_output_shapes'].list.shape[0].dim.add()
shape_op.attr['_output_shapes'].list.shape[0].dim[0].size = len(shapes)
def _sparsify_gather_transform(graph_def, input_names, output_names,
initializer_names, checkpoint_path):
"""Handle the sparsify gather transform.
Provides the transform the checkpoint and keeps track of the newly created
initializer nodes.
Args:
graph_def: A GraphDef proto to be transformed.
input_names: Names of input nodes.
output_names: Names of output nodes.
initializer_names: Dictionary of the "infrastructural" nodes (initializers,
save and restore ops, etc.). The keys in this dictionary
indicate the collection where these nodes were obtained from.
checkpoint_path: A path to a checkpoint.
Returns:
A tuple containing the GraphDef and a Dict of updated initializer nodes.
Raises:
ValueError: if the restore_op_name does not have the expected format.
"""
# Ensure that sparsify_shared_init_op is unique.
sparsify_shared_init_op = 'sparify_gather_init_op'
while _find_op(graph_def, sparsify_shared_init_op):
sparsify_shared_init_op += '_1'
input_flag = ''
if checkpoint_path:
input_flag = 'input_checkpoint="%s", ' % checkpoint_path
sparsify_cmd = [
'sparsify_gather(%sgroup_init_node="%s")' % (input_flag,
sparsify_shared_init_op)
]
starting_op_names = [node.name for node in graph_def.node]
graph_def = _gtt_transforms(graph_def, input_names, output_names,
initializer_names, sparsify_cmd)
ending_op_names = [node.name for node in graph_def.node]
removed_op_names = list(set(starting_op_names) - set(ending_op_names))
removed_op_names.sort()
for op_index, op_name in enumerate(removed_op_names):
op_name_parts = op_name.rsplit('/', 1)
# Remove part to get the checkpoint names used by the saver.
if len(op_name_parts) == 2 and op_name_parts[1].startswith('part_'):
removed_op_names[op_index] = op_name_parts[0]
else:
removed_op_names[op_index] = op_name
# Obtain newly created table inits from gtt sparsify transform.
added_table_inits = []
for index, node in enumerate(graph_def.node):
if node.name == sparsify_shared_init_op:
added_table_inits = [n.lstrip('^') for n in node.input]
table_initializers = initializer_names.get(
_ops.GraphKeys.TABLE_INITIALIZERS, [])
table_initializers.extend(added_table_inits)
initializer_names[_ops.GraphKeys.TABLE_INITIALIZERS] = table_initializers
del graph_def.node[index]
break
# Add inits to existing shared init op.
node = _find_op(graph_def, _get_shared_init_op(initializer_names))
for init in added_table_inits:
node.input.append('^' + init)
# Update saver.
for node in graph_def.node:
if node.name.endswith('SaveV2'):
_clean_save_and_restore(graph_def, node, removed_op_names)
return (graph_def, initializer_names)
def _do_transforms(graph_def,
input_names,
output_names,
initializer_names,
transforms,
saver_def=None,
checkpoint_path=None):
"""Apply requested transforms to a GraphDef, including freezing.
Args:
graph_def: A GraphDef proto to be transformed.
input_names: Names of input nodes.
output_names: Names of output nodes.
initializer_names: Dictionary of the "infrastructural" nodes (initializers,
save and restore ops, etc.) that should be retained even if they are not
transitively reachable from output nodes. The keys in this dictionary
indicate the collection where these nodes were obtained from.
transforms: A list of strings naming the graph transforms to be applied in
order. These transform names are exactly those supported by the Graph
Transform Tool, with the addition of the 'freeze_graph' and
'sparsify_gather' transforms.
saver_def: A SaverDef proto used for restoring a checkpoint during freezing,
if needed (default None).
checkpoint_path: A path to a checkpoint to restore during freezing,
if needed (default None).
Returns:
A tuple containing the GraphDef and a Dict of updated initializer nodes.
"""
transformed_graph_def = _graph_pb2.GraphDef()
transformed_graph_def.CopyFrom(graph_def)
transformed_initializer_names = initializer_names.copy()
if not transforms:
return transformed_graph_def, transformed_initializer_names
current_gtt_transforms = []
for t in transforms:
if t == _FREEZE_GRAPH_TRANSFORM:
transformed_graph_def = _gtt_transforms(
transformed_graph_def, input_names, output_names,
transformed_initializer_names, current_gtt_transforms)
output_node_names = [_op_name(x) for x in output_names]
transformed_graph_def, transformed_initializer_names = _freeze_transform(
transformed_graph_def, output_node_names,
transformed_initializer_names, saver_def, checkpoint_path)
current_gtt_transforms = []
elif t == _SPARSIFY_GATHER_TRANSFORM:
transformed_graph_def = _gtt_transforms(
transformed_graph_def, input_names, output_names,
transformed_initializer_names, current_gtt_transforms)
transformed_graph_def, transformed_initializer_names = (
_sparsify_gather_transform(
transformed_graph_def, input_names, output_names,
transformed_initializer_names, checkpoint_path))
current_gtt_transforms = []
else:
current_gtt_transforms.append(t)
transformed_graph_def = _gtt_transforms(
transformed_graph_def, input_names, output_names,
transformed_initializer_names, current_gtt_transforms)
return transformed_graph_def, transformed_initializer_names
def _connect_to_shared_init_op(graph_def, shared_init_op_name,
nodes_to_connect):
"""Creates a new shared init node that is connected to via control deps.
Args:
graph_def: The GraphDef proto to add the shared init node to.
shared_init_op_name: A string specifying the name of the shared init node to
create.
nodes_to_connect: A list of strings specifying the names of nodes to connect
to the shared node via control dependencies.
"""
if nodes_to_connect:
init_op = graph_def.node.add()
init_op.name = shared_init_op_name
init_op.op = 'NoOp'
init_op.input.extend(['^' + i for i in nodes_to_connect])
# forked and modified from freeze_graph.py
def _freeze_graph_with_def_protos(input_graph_def, output_node_names,
initializer_names, shared_init_op_name,
input_saver_def, input_checkpoint):
"""Converts all variables in a graph and checkpoint into constants.
During this process, we need to retain certain initializer nodes (e.g. table
initializer nodes). Instead of determining which dependencies
of the shared initializer node (e.g. group_deps) to keep, we
reconstruct the connections between the individual initializer nodes and
the shared node after freezing the graph.
Args:
input_graph_def: A GraphDef proto to be frozen.
output_node_names: Names of output nodes.
initializer_names: Names of initializer nodes to keep.
shared_init_op_name: The name of the shared initializer node to connect the
nodes in initializer names to.
input_saver_def: A SaverDef proto used for restoring a checkpoint.
input_checkpoint: A path to a checkpoint to restore.
Returns:
A frozen GraphDef.
"""
with _ops.Graph().as_default():
_ = _importer.import_graph_def(input_graph_def, name='')
with _session.Session() as sess:
saver = _saver_lib.Saver(saver_def=input_saver_def)
saver.restore(sess, input_checkpoint)
output_graph_def = _graph_util.convert_variables_to_constants(
sess, input_graph_def, output_node_names + initializer_names)
_connect_to_shared_init_op(output_graph_def, shared_init_op_name,
initializer_names)
return output_graph_def
def _find_all_mandatory_retain_ops(base_meta_graph_def):
"""Identify all infrastructural Ops, to ensure that they are retained.
We need to retain infrastructural Ops (init and saver stuff), in addition
to the desired outputs.
For now we retain *all* save and restore ops, variable initializers,
table initializers, and main init ops.
This means that strip_unused_nodes will not remove unused variables.
Args:
base_meta_graph_def: a GraphDef proto in which to identify nodes to retain.
Returns:
A dictionary corresponding to the nodes associated with each collection
that are to be retained.
"""
# TODO(b/63447631): implement variable stripping.
initializer_names = {}
# Primary SaverDef and SAVERS collection
saver_defs = []
if base_meta_graph_def.HasField('saver_def'):
saver_defs.append(base_meta_graph_def.saver_def)
saver_defs.extend(_get_all_protos_from_collection(
base_meta_graph_def, _ops.GraphKeys.SAVERS))
for saver_def in saver_defs:
savers = initializer_names.get(_ops.GraphKeys.SAVERS, [])
savers.extend([
saver_def.filename_tensor_name, saver_def.save_tensor_name,
saver_def.restore_op_name
])
initializer_names[_ops.GraphKeys.SAVERS] = savers
# Variable initializers
variable_collections = [
_ops.GraphKeys.GLOBAL_VARIABLES,
_ops.GraphKeys.TRAINABLE_VARIABLES,
_ops.GraphKeys.MOVING_AVERAGE_VARIABLES,
_ops.GraphKeys.LOCAL_VARIABLES,
_ops.GraphKeys.MODEL_VARIABLES]
for var_coll in variable_collections:
variables = _get_all_protos_from_collection(base_meta_graph_def, var_coll)
var_init_names = [v.initializer_name for v in variables]
if var_init_names:
# Sanity check to ensure we don't overwrite dictionary entries.
assert var_coll not in initializer_names
initializer_names[var_coll] = var_init_names
# Table initializers
op_names = _get_all_node_names_from_collection(
base_meta_graph_def, _ops.GraphKeys.TABLE_INITIALIZERS)
if op_names:
# Sanity check to ensure we don't overwrite dictionary entries.
assert _ops.GraphKeys.TABLE_INITIALIZERS not in initializer_names
table_initializers = [t for t in op_names]
initializer_names[_ops.GraphKeys.TABLE_INITIALIZERS] = table_initializers
# Various init ops
various_init_op_collections = [_saved_model_constants.LEGACY_INIT_OP_KEY,
_saved_model_constants.MAIN_OP_KEY,
_ops.GraphKeys.INIT_OP,
_ops.GraphKeys.LOCAL_INIT_OP,
_ops.GraphKeys.READY_OP,
_ops.GraphKeys.READY_FOR_LOCAL_INIT_OP]
for op_coll in various_init_op_collections:
op_name = _get_single_node_name_from_collection(
base_meta_graph_def, op_coll)
if op_name:
# Sanity check to ensure we don't overwrite dictionary entries.
assert op_coll not in initializer_names
initializer_names[op_coll] = [op_name]
return initializer_names
def _add_pruned_collection(base_meta_graph_def, meta_graph_def,
collection_name, removed_op_names):
"""Copy collection to the transformed MetaGraphDef, omitting removed items."""
base_collection = base_meta_graph_def.collection_def[collection_name]
collection = meta_graph_def.collection_def[collection_name]
if base_collection.HasField('any_list'):
for any_value in base_collection.any_list.value:
# just search the serialized proto as a string
if not _is_removed_mentioned(any_value.value, removed_op_names):
copied_any = collection.any_list.value.add()
copied_any.CopyFrom(any_value)
elif base_collection.HasField('bytes_list'):
collection.bytes_list.value[:] = [
s for s in base_collection.bytes_list.value
if not _is_removed_mentioned(s, removed_op_names)]
_logging.info(
'In collection %s, nodes excluded are: %s', collection_name,
sorted([
s for s in base_collection.bytes_list.value
if _is_removed_mentioned(s, removed_op_names)
]))
elif base_collection.HasField('node_list'):
collection.node_list.value[:] = [
s for s in base_collection.node_list.value
if not _is_removed(s, removed_op_names)]
else:
collection.CopyFrom(base_collection)
def _add_pruned_saver(base_meta_graph_def, meta_graph_def, removed_op_names):
"""Copy the Saver into the transformed MetaGraphDef, if valid.
Currently this copies the Saver as is, after verifying that none of the
referenced Save & Restore ops were removed. A future version will modify
the Save and Restore ops themselves as needed to account for removed
Variables.
Args:
base_meta_graph_def: The untransformed MetaGraphDef.
meta_graph_def: The transformed MetaGraphDef being built.
removed_op_names: An iterable of names of ops that were removed.
"""
# Note this does surgery on meta_graph_def.graph_def too, so that should have
# been copied already.
if base_meta_graph_def.HasField('saver_def'):
filename_tensor_name = base_meta_graph_def.saver_def.filename_tensor_name
save_tensor_name = base_meta_graph_def.saver_def.save_tensor_name
restore_op_name = base_meta_graph_def.saver_def.restore_op_name
_check_tensor_not_removed(filename_tensor_name, removed_op_names)
_check_tensor_not_removed(save_tensor_name, removed_op_names)
_check_tensor_not_removed(restore_op_name, removed_op_names)
# TODO(b/63447631): Once we strip unused variables, remove references to
# them from save and restore ops. Retain those ops only if they also refer
# to retained Variables. See if we can use _clean_save_and_restore() for
# this.
# saver_name, restore_all = restore_op_name.rsplit('/', 1)
# if restore_all != 'restore_all':
# raise ValueError(
# 'SaverDef restore_op_name did not have expected form */restore_all')
# save_tensor_names_op_name = '{}/SaveV2/tensor_names'.format(saver_name)
# restore_tensor_names_op_name = (
# '{}/RestoreV2/tensor_names'.format(saver_name))
# save_tensor_names_op = _find_op(meta_graph_def.graph_def,
# save_tensor_names_op_name)
# save_tensor_names_value_tensor = save_tensor_names_op.attr['value'].tensor
# save_tensor_names_value_tensor.string_val[:] = [
# s for s in save_tensor_names_value_tensor.string_val
# if not _is_removed(s, removed_op_names)]
# restore_tensor_names_op = _find_op(
# meta_graph_def.graph_def, restore_tensor_names_op_name)
# restore_tensor_names_value_tensor = (
# restore_tensor_names_op.attr['value'].tensor)
# restore_tensor_names_value_tensor.string_val[:] = [
# s for s in restore_tensor_names_value_tensor.string_val
# if not _is_removed(s, removed_op_names)]
# if (save_tensor_names_value_tensor.string_val
# or restore_tensor_names_value_tensor.string_val):
meta_graph_def.saver_def.CopyFrom(base_meta_graph_def.saver_def)
def _find_op(graph_def, op_name):
"""Fetch a node from a GraphDef proto by name."""
for node_def in graph_def.node:
if node_def.name == op_name:
return node_def
return None
def _add_pruned_signature(base_meta_graph_def, meta_graph_def,
signature_name, removed_op_names):
"""Copy the named signature into the transformed MetaGraphDef, if valid.
If any input or output mentioned in the signature was removed by the graph
transform, the signature is silently omitted from the transformed
MetaGraphDef.
Args:
base_meta_graph_def: The untransformed MetaGraphDef.
meta_graph_def: The transformed MetaGraphDef being built.
signature_name: The name of the signature to copy.
removed_op_names: An iterable of names of ops that were removed.
"""
try:
base_signature = base_meta_graph_def.signature_def[signature_name]
for key in base_signature.inputs:
_check_tensor_not_removed(base_signature.inputs[key].name,
removed_op_names)
for key in base_signature.outputs:
_check_tensor_not_removed(base_signature.outputs[key].name,
removed_op_names)
meta_graph_def.signature_def[signature_name].CopyFrom(base_signature)
except ValueError:
# exclude any signature that mentions a removed node
pass
def _get_single_node_name_from_collection(meta_graph_def, collection_key):
"""Obtain a node name that is the single element of a collection."""
if collection_key not in meta_graph_def.collection_def:
return None
collection = meta_graph_def.collection_def[collection_key]
if not collection.node_list.value:
raise ValueError(
'Collection {} is present but type is not node_list.'.format(
collection_key))
if len(collection.node_list.value) != 1:
raise ValueError(
'Collection {} is has {} elements; expected exactly one.'.format(
collection_key, collection.bytes_list))
return collection.node_list.value[0]
def _get_all_node_names_from_collection(meta_graph_def, collection_key):
"""Obtain node names from a collection."""
if collection_key not in meta_graph_def.collection_def:
return None
collection = meta_graph_def.collection_def[collection_key]
if not collection.node_list.value:
raise ValueError(
'Collection {} is present but type is not node_list.'.format(
collection_key))
return collection.node_list.value
def _get_all_protos_from_collection(meta_graph_def, collection_key):
"""Obtain node names from a collection."""
if collection_key not in meta_graph_def.collection_def:
return []
collection = meta_graph_def.collection_def[collection_key]
if not collection.bytes_list.value:
raise ValueError(
'Collection {} is present but type is not bytes_list.'.format(
collection_key))
proto_type = _ops.get_collection_proto_type(collection_key)
result = []
for value in collection.bytes_list.value:
proto = proto_type()
proto.ParseFromString(value)
result.append(proto)
return result
def _is_removed(tensor_name, removed_op_names):
"""Determine whether the named tensor is an output of a removed op."""
for removed_op_name in removed_op_names:
if tensor_name.split(':')[0] == removed_op_name:
return True
return False
def _is_removed_mentioned(s, removed_op_names):
"""Determine whether any removed op is mentioned in the given object.
This relies on the string representation of the object. This is used for
proto messages that may mention ops by name in nested fields. The string
representation of the proto includes those field values, so this string
search approach is sufficient.
Args:
s: an object to search for removed op names.
removed_op_names: An iterable of names of ops that were removed.
Returns:
True if any removed op is mentioned in the given object, False otherwise.
"""
# A common approach taken by some of the transforms in gtt is to add new nodes
# that have the same prefix as the node they are removing. For example, if
# the original node name was /foo, they may remove that node and add in
# /foo/bar. This regex ensures that we handle these two nodes
# as separate entities. It matches on nodes having names in the form of
# '/foo/bar_x' as well as nodes having names in the form of 'foo.'
s_names = _re.findall(r'((?:[\/]?[a-zA-Z0-9\_]*)*)', _compat.as_str_any(s))
for removed_op_name in removed_op_names:
for s_name in s_names:
if s_name.endswith(removed_op_name):
return True
return False
def _check_tensor_not_removed(tensor_name, removed_op_names):
"""Verify that the named tensor was not removed.
Args:
tensor_name: the name of a tensor to check.
removed_op_names: An iterable of names of ops that were removed.
Raises:
ValueError: if the tensor was removed.
"""
if not tensor_name:
raise ValueError('Tensor name should not be empty')
if _is_removed(tensor_name, removed_op_names):
raise ValueError(
'Expected Tensor, but it was removed: {}'.format(tensor_name))
def _add_new_inits_to_collection(meta_graph_def, updated_initializer_names):
"""Add new inits to collection.
Args:
meta_graph_def: The MetaGraphDef protocol buffer to update.
updated_initializer_names: Dictionary of the updated "infrastructural" nodes
(initializers, save and restore ops, etc.). The keys in this dictionary
indicate the collection where these nodes were obtained from.
Raises:
ValueError: if the tensor was removed.
"""
# TODO(dzats): Extend this to support all collections.
if _ops.GraphKeys.TABLE_INITIALIZERS in updated_initializer_names:
orig_table_inits = _get_all_node_names_from_collection(
meta_graph_def, _ops.GraphKeys.TABLE_INITIALIZERS)
orig_table_inits = orig_table_inits if orig_table_inits else []
updated_table_inits = updated_initializer_names[
_ops.GraphKeys.TABLE_INITIALIZERS]
new_table_inits = list(set(updated_table_inits) - set(orig_table_inits))
new_table_inits.sort()
meta_graph_def.collection_def[
_ops.GraphKeys.TABLE_INITIALIZERS].node_list.value.extend(
new_table_inits)
def meta_graph_transform(
base_meta_graph_def, input_names, output_names, transforms, tags,
checkpoint_path=None):
"""Apply the Graph Transform tool to a MetaGraphDef.
Args:
base_meta_graph_def: A MetaGraphDef protocol buffer to transform.
input_names: Names of input nodes.
output_names: Names of output nodes.
transforms: A list of strings naming the graph transforms to be applied in
order. These transform names are exactly those supported by the Graph
Transform Tool, with the addition of the 'freeze_graph' and
'sparsify_gather' transforms.
tags: A list of tags with which to annotate the transformed MetaGraphDef.
checkpoint_path: A path to a checkpoint to restore during freezing,
if needed (default None).
Returns:
A new transformed MetaGraphDef protocol buffer.
"""
meta_graph_def = _meta_graph_pb2.MetaGraphDef()
initializer_names = _find_all_mandatory_retain_ops(base_meta_graph_def)
transformed_graph_def, updated_initializer_names = _do_transforms(
base_meta_graph_def.graph_def, input_names, output_names,
initializer_names, transforms, base_meta_graph_def.saver_def,
checkpoint_path)
meta_graph_def.graph_def.CopyFrom(transformed_graph_def)
meta_graph_def.meta_info_def.CopyFrom(base_meta_graph_def.meta_info_def)
meta_graph_def.meta_info_def.ClearField('tags')
for tag in tags:
meta_graph_def.meta_info_def.tags.append(tag)
base_op_names = [_compat.as_str(node.name)
for node in base_meta_graph_def.graph_def.node]
retained_op_names = [_compat.as_str(node.name)
for node in meta_graph_def.graph_def.node]
removed_op_names = set(base_op_names) - set(retained_op_names)
_logging.info('Node names in base graph: %s', sorted(base_op_names))
_logging.info('Node names retained: %s', sorted(retained_op_names))
_logging.info('Node names removed: %s', sorted(removed_op_names))
# Copy saver, excluding any pruned nodes if graph was not frozen.
# TODO(b/63447631): Revisit this once the problem is addressed. Currently
# _add_pruned_saver assumes that the save and restore nodes have not been
# removed but freeze_graph (correctly) removes them.
if _FREEZE_GRAPH_TRANSFORM not in transforms:
_add_pruned_saver(base_meta_graph_def, meta_graph_def, removed_op_names)
# Copy collections, excluding any pruned nodes
for collection_name in base_meta_graph_def.collection_def:
_add_pruned_collection(
base_meta_graph_def, meta_graph_def, collection_name,
removed_op_names)
# Append newly added initializers to collection.
_add_new_inits_to_collection(meta_graph_def, updated_initializer_names)
# Copy signature_defs, excluding any pruned nodes
for signature_name in base_meta_graph_def.signature_def:
_add_pruned_signature(
base_meta_graph_def, meta_graph_def, signature_name,
removed_op_names)
return meta_graph_def
|
|
"""Support for Google Assistant Smart Home API."""
from asyncio import gather
from collections.abc import Mapping
from itertools import product
import logging
from homeassistant.util.decorator import Registry
from homeassistant.core import callback
from homeassistant.const import (
CLOUD_NEVER_EXPOSED_ENTITIES, CONF_NAME, STATE_UNAVAILABLE,
ATTR_SUPPORTED_FEATURES, ATTR_ENTITY_ID,
)
from homeassistant.components import (
climate,
cover,
fan,
group,
input_boolean,
light,
lock,
media_player,
scene,
script,
switch,
vacuum,
)
from . import trait
from .const import (
TYPE_LIGHT, TYPE_LOCK, TYPE_SCENE, TYPE_SWITCH, TYPE_VACUUM,
TYPE_THERMOSTAT, TYPE_FAN,
CONF_ALIASES, CONF_ROOM_HINT,
ERR_FUNCTION_NOT_SUPPORTED, ERR_PROTOCOL_ERROR, ERR_DEVICE_OFFLINE,
ERR_UNKNOWN_ERROR,
EVENT_COMMAND_RECEIVED, EVENT_SYNC_RECEIVED, EVENT_QUERY_RECEIVED
)
from .helpers import SmartHomeError
HANDLERS = Registry()
_LOGGER = logging.getLogger(__name__)
DOMAIN_TO_GOOGLE_TYPES = {
climate.DOMAIN: TYPE_THERMOSTAT,
cover.DOMAIN: TYPE_SWITCH,
fan.DOMAIN: TYPE_FAN,
group.DOMAIN: TYPE_SWITCH,
input_boolean.DOMAIN: TYPE_SWITCH,
light.DOMAIN: TYPE_LIGHT,
lock.DOMAIN: TYPE_LOCK,
media_player.DOMAIN: TYPE_SWITCH,
scene.DOMAIN: TYPE_SCENE,
script.DOMAIN: TYPE_SCENE,
switch.DOMAIN: TYPE_SWITCH,
vacuum.DOMAIN: TYPE_VACUUM,
}
def deep_update(target, source):
"""Update a nested dictionary with another nested dictionary."""
for key, value in source.items():
if isinstance(value, Mapping):
target[key] = deep_update(target.get(key, {}), value)
else:
target[key] = value
return target
class _GoogleEntity:
"""Adaptation of Entity expressed in Google's terms."""
def __init__(self, hass, config, state):
self.hass = hass
self.config = config
self.state = state
@property
def entity_id(self):
"""Return entity ID."""
return self.state.entity_id
@callback
def traits(self):
"""Return traits for entity."""
state = self.state
domain = state.domain
features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
return [Trait(self.hass, state, self.config) for Trait in trait.TRAITS
if Trait.supported(domain, features)]
async def sync_serialize(self):
"""Serialize entity for a SYNC response.
https://developers.google.com/actions/smarthome/create-app#actiondevicessync
"""
state = self.state
# When a state is unavailable, the attributes that describe
# capabilities will be stripped. For example, a light entity will miss
# the min/max mireds. Therefore they will be excluded from a sync.
if state.state == STATE_UNAVAILABLE:
return None
entity_config = self.config.entity_config.get(state.entity_id, {})
name = (entity_config.get(CONF_NAME) or state.name).strip()
# If an empty string
if not name:
return None
traits = self.traits()
# Found no supported traits for this entity
if not traits:
return None
device = {
'id': state.entity_id,
'name': {
'name': name
},
'attributes': {},
'traits': [trait.name for trait in traits],
'willReportState': False,
'type': DOMAIN_TO_GOOGLE_TYPES[state.domain],
}
# use aliases
aliases = entity_config.get(CONF_ALIASES)
if aliases:
device['name']['nicknames'] = aliases
for trt in traits:
device['attributes'].update(trt.sync_attributes())
room = entity_config.get(CONF_ROOM_HINT)
if room:
device['roomHint'] = room
return device
dev_reg, ent_reg, area_reg = await gather(
self.hass.helpers.device_registry.async_get_registry(),
self.hass.helpers.entity_registry.async_get_registry(),
self.hass.helpers.area_registry.async_get_registry(),
)
entity_entry = ent_reg.async_get(state.entity_id)
if not (entity_entry and entity_entry.device_id):
return device
device_entry = dev_reg.devices.get(entity_entry.device_id)
if not (device_entry and device_entry.area_id):
return device
area_entry = area_reg.areas.get(device_entry.area_id)
if area_entry and area_entry.name:
device['roomHint'] = area_entry.name
return device
@callback
def query_serialize(self):
"""Serialize entity for a QUERY response.
https://developers.google.com/actions/smarthome/create-app#actiondevicesquery
"""
state = self.state
if state.state == STATE_UNAVAILABLE:
return {'online': False}
attrs = {'online': True}
for trt in self.traits():
deep_update(attrs, trt.query_attributes())
return attrs
async def execute(self, command, params):
"""Execute a command.
https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute
"""
executed = False
for trt in self.traits():
if trt.can_execute(command, params):
await trt.execute(command, params)
executed = True
break
if not executed:
raise SmartHomeError(
ERR_FUNCTION_NOT_SUPPORTED,
'Unable to execute {} for {}'.format(command,
self.state.entity_id))
@callback
def async_update(self):
"""Update the entity with latest info from Home Assistant."""
self.state = self.hass.states.get(self.entity_id)
async def async_handle_message(hass, config, message):
"""Handle incoming API messages."""
response = await _process(hass, config, message)
if response and 'errorCode' in response['payload']:
_LOGGER.error('Error handling message %s: %s',
message, response['payload'])
return response
async def _process(hass, config, message):
"""Process a message."""
request_id = message.get('requestId') # type: str
inputs = message.get('inputs') # type: list
if len(inputs) != 1:
return {
'requestId': request_id,
'payload': {'errorCode': ERR_PROTOCOL_ERROR}
}
handler = HANDLERS.get(inputs[0].get('intent'))
if handler is None:
return {
'requestId': request_id,
'payload': {'errorCode': ERR_PROTOCOL_ERROR}
}
try:
result = await handler(hass, config, request_id,
inputs[0].get('payload'))
except SmartHomeError as err:
return {
'requestId': request_id,
'payload': {'errorCode': err.code}
}
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Unexpected error')
return {
'requestId': request_id,
'payload': {'errorCode': ERR_UNKNOWN_ERROR}
}
if result is None:
return None
return {'requestId': request_id, 'payload': result}
@HANDLERS.register('action.devices.SYNC')
async def async_devices_sync(hass, config, request_id, payload):
"""Handle action.devices.SYNC request.
https://developers.google.com/actions/smarthome/create-app#actiondevicessync
"""
hass.bus.async_fire(EVENT_SYNC_RECEIVED, {
'request_id': request_id
})
devices = []
for state in hass.states.async_all():
if state.entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
continue
if not config.should_expose(state):
continue
entity = _GoogleEntity(hass, config, state)
serialized = await entity.sync_serialize()
if serialized is None:
_LOGGER.debug("No mapping for %s domain", entity.state)
continue
devices.append(serialized)
response = {
'agentUserId': config.agent_user_id,
'devices': devices,
}
return response
@HANDLERS.register('action.devices.QUERY')
async def async_devices_query(hass, config, request_id, payload):
"""Handle action.devices.QUERY request.
https://developers.google.com/actions/smarthome/create-app#actiondevicesquery
"""
devices = {}
for device in payload.get('devices', []):
devid = device['id']
state = hass.states.get(devid)
hass.bus.async_fire(EVENT_QUERY_RECEIVED, {
'request_id': request_id,
ATTR_ENTITY_ID: devid,
})
if not state:
# If we can't find a state, the device is offline
devices[devid] = {'online': False}
continue
devices[devid] = _GoogleEntity(hass, config, state).query_serialize()
return {'devices': devices}
@HANDLERS.register('action.devices.EXECUTE')
async def handle_devices_execute(hass, config, request_id, payload):
"""Handle action.devices.EXECUTE request.
https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute
"""
entities = {}
results = {}
for command in payload['commands']:
for device, execution in product(command['devices'],
command['execution']):
entity_id = device['id']
hass.bus.async_fire(EVENT_COMMAND_RECEIVED, {
'request_id': request_id,
ATTR_ENTITY_ID: entity_id,
'execution': execution
})
# Happens if error occurred. Skip entity for further processing
if entity_id in results:
continue
if entity_id not in entities:
state = hass.states.get(entity_id)
if state is None:
results[entity_id] = {
'ids': [entity_id],
'status': 'ERROR',
'errorCode': ERR_DEVICE_OFFLINE
}
continue
entities[entity_id] = _GoogleEntity(hass, config, state)
try:
await entities[entity_id].execute(execution['command'],
execution.get('params', {}))
except SmartHomeError as err:
results[entity_id] = {
'ids': [entity_id],
'status': 'ERROR',
'errorCode': err.code
}
final_results = list(results.values())
for entity in entities.values():
if entity.entity_id in results:
continue
entity.async_update()
final_results.append({
'ids': [entity.entity_id],
'status': 'SUCCESS',
'states': entity.query_serialize(),
})
return {'commands': final_results}
@HANDLERS.register('action.devices.DISCONNECT')
async def async_devices_disconnect(hass, config, request_id, payload):
"""Handle action.devices.DISCONNECT request.
https://developers.google.com/actions/smarthome/create#actiondevicesdisconnect
"""
return None
def turned_off_response(message):
"""Return a device turned off response."""
return {
'requestId': message.get('requestId'),
'payload': {'errorCode': 'deviceTurnedOff'}
}
|
|
"""
Various searching, selecting and finding algorithms
"""
##### STDLIB
import sys
##### 3RD PARTY
##### PROJECT
import adsl.common
##### INIT AND DECLARATIONS
if sys.version_info.major >= 3:
xrange = range
##### CLASSES AND FUNCTIONS
def binsearch(array, elem, left=None, right=None, cmp_func=cmp):
"""Classic binary search algorithm.
Args:
array (sequence): the sequence of elements that we are searching
elem (object): the element that we are searching for
left (int): the lower bound index of the sub-sequence to
search for the element. Default is None, in which case it will
start at position 0.
right (int): the upper bound index of the sub-sequence to
search for the element. Default is None, in which case it will
start at len(array) - 1.
cmp_func (function): function to compare two arbitrary
elements. Must conform to the "negative for e1 < e2, 0 for e1 ==
e2, positive for e1 > e2" comparison conventions. Default is the
build-in Python 'cmp' function.
Returns:
int: If the element is found in the sequence, the first
position that it was found at. Else, None.
"""
res = None
if left is None:
left = 0
if right is None:
right = len(array) - 1
while left <= right:
pivot = int((left+right)/2.0)
pval = array[pivot]
if cmp_func(elem, pval) == 0:
# This is a position of the element in the array
res = pivot
break
elif cmp_func(elem, pval) < 0:
# The element must be in the lower half of the range if it
# exists
right = pivot - 1
else:
# The element must be in the upper half of the range if it
# exists
left = pivot + 1
return res
def binsearch_bounds(array, elem):
"""Find the (lower, upper) bounds of some element in the array.
Args:
array (list): a Python ArrayList of elements
elem (object): the element to search for
Returns:
tuple(int, int): the (lower, upper) integer bounds (0-index)
where elem is found in the array. If elem is not found, return
None.
Todo:
Support comparison function for element.
"""
pos = binsearch(array, elem)
if pos is None:
return None
lb = pos
left = 0
right = pos - 1
while left <= right:
pivot = int((left+right)/2.0)
pval = array[pivot]
if elem == pval and (lb is None or pivot < lb):
lb = pivot
elif elem < pval:
right = pivot - 1
else:
left = pivot + 1
ub = pos
left = pos + 1
right = len(array) - 1
while left <= right:
pivot = int((left+right)/2.0)
pval = array[pivot]
if elem == pval and (ub is None or pivot > ub):
ub = pivot
elif elem < pval:
right = pivot - 1
else:
left = pivot + 1
return (lb, ub)
def quickselect(array, K):
"""
Find the K-th most element in the sequence.
If we take an unordered sequence of elements and sorted them,
which element would occupy position K (ie position 0, or 5, or
19)?
quickselect answers the above question in expected linear
time. This is less than the usual N*lg(N) time we get for
comparison-based sorting algorithms.
quickselect works by repeatedly partioning sequences to establish
'some element occupies the K+X or X-Y position'. Since we know the
fixed position of one element, we can use that to determine which
sub-range must contain the K-th element (if any).
NOTE: this is essentially a neat combination of ideas from binary
search and quicksort. It is a destructive search in that it
partially sorts the sequence.
"""
res = None
left = 0
right = len(array) - 1
while left <= right:
pivot = adsl.common.mo3(left, right)
pivot = adsl.common.partition(array, left, right, pivot)
# The pivot is now a fixed position of some element. We KNOW
# that all elements <= array[pivot] are located in the lower
# half, and all elements > array[pivot] are located in the
# upper.
if K == pivot:
res = array[pivot]
break
elif K < pivot:
# The K-th element must be in the lower range relative to pivot
right = pivot - 1
else:
# The K-th element must be in the upper range relative to pivot
left = pivot + 1
return res
def hash_substr(string, i, j, prime=31):
"""
Map a sub-sequence of characters to an integer. This works by
mapping each individual character to an integer via ord(), then
multiplying it by a prime number P raised to some power.
Ex:
Lets hash the string "cat"
ord('t') = 116
ord('a') = 97
ord('c') = 99
Let P = prime = 31. Then hash_substr("cat") is:
ord('c')*pow(P, 2) + ord('a')*pow(P, 1) + ord('t')*pow(P, 0)
Args:
string (string): the sequence of characters containing some subsequence we want to hash
i (int): the starting index of the subsequence to hash
j (int): the ending index of the subsequence to hash
prime (int): Optional. The prime number to multiply each ord(character) by
Returns:
(int) The integer representation of the character subsequence
starting at string[i] and ending at string[j]
"""
if i < 0:
raise IndexError("i must be >= 0, is {}".format(i))
if j >= len(string):
raise IndexError("j must be < len(string), is {}".format(j))
if i > j:
raise IndexError("i must be <= j. (i = {}, j = {})".format(i, j))
res = 0
mult = 1
N = j - i + 1
i = N - 1
while i >= 0:
res += (mult * ord(string[i]))
mult *= prime
i -= 1
return res
def hash_str(word, N, prime=31):
return hash_substr(word, 0, N-1, prime=prime)
def find_all_N(string, words, N, res_list=None, P=31):
"""
Find all words of some fixed length N using Rabin-Karp.
"""
# NOTE: let's be thankful that ord() takes into account Unicode:
# https://docs.python.org/2/library/functions.html#ord
for word in words:
if len(word) != N:
raise ValueError("{} with length = {} is not of required length = {}".format(word,
len(word),
N))
if res_list is None:
res = []
else:
res = res_list
M = len(string)
# Table of hashes to words
table = {hash_str(word, N): word for word in words}
max_pow = pow(P, N-1)
rhash = None
ln = M - N + 1
# Unroll the loop once so that we don't check conditionals inside
# of the loop and compute the initial rolling hash
i = 0
rhash = hash_substr(string, i, i+N-1)
if rhash in table:
word = table[rhash]
match = True
j = 0
while j < N:
if string[j+i] != word[j]:
match = False
break
j += 1
if match:
# (word, start, end)
res.append((word, i, i+N))
i = 1
while i < ln:
# Rolling hash function of Rabin-Karp. This is based on the
# observation that H(i+1) can be computed from H(i) in
# constant time.
# Starting term of the previous hash value.
# t1 = ord(string[i-1]) * max_pow
# Ending term of the current hash value. It is multiplied
# by P^0, which is always 1. So just omit for brevity.
# t2 = ord(string[i+N-1])
rhash = ((rhash - (ord(string[i-1]) * max_pow)) * P) + ord(string[i+N-1])
if rhash in table:
word = table[rhash]
# We have a collision via hashing. By the Pigeonhole
# principle, if we map a set of cardinality M to a set of
# cardinality N and M > N, then there must exist at least
# one bucket containing at least two elements. In other
# words, two different strings can map to the same integer
# via hashing. So compare the substring char-by-char to
# make sure we have a match.
match = True
# Use range since N is typically small
j = 0
while j < N:
if string[j+i] != word[j]:
match = False
break
j += 1
if match:
# (word, start, end)
res.append((word, i, i+N))
i += 1
return res
def find_all(string, words):
"""
Find all matching words in some string by bucket-sorting them by
size and running all strings of the same length through
Rabin-Karp.
Let:
M = len(string)
N = the longest length of any word in words
K = the total number of different word lengths
The expected/best-case running time of Rabin-Karp is O(M+N). We
call it at most K times. This gives us an expected running time of
O(K*(M+N)).
We can usually treat K as a constant. This reduces the expected
running time back down to O(C*(M+N)) = O(M+N). For example, for
the English dictionary locatd at /usr/shard/dict/words, K = 23.
"""
res = []
# Do a bucket sort of words by their length.
table = {}
for word in words:
ln = len(word)
if ln not in table:
table[ln] = []
table[ln].append(word)
# Now use find_all_N with the same result list
for N in table:
# These are all the words of length N
words_N = table[N]
find_all_N(string, words_N, N, res_list=res)
return res
|
|
# mssql/pyodbc.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+pyodbc
:name: PyODBC
:dbapi: pyodbc
:connectstring: mssql+pyodbc://<username>:<password>@<dsnname>
:url: http://pypi.python.org/pypi/pyodbc/
Connecting to PyODBC
--------------------
The URL here is to be translated to PyODBC connection strings, as
detailed in `ConnectionStrings <https://code.google.com/p/pyodbc/wiki/ConnectionStrings>`_.
DSN Connections
^^^^^^^^^^^^^^^
A DSN-based connection is **preferred** overall when using ODBC. A
basic DSN-based connection looks like::
engine = create_engine("mssql+pyodbc://scott:tiger@some_dsn")
Which above, will pass the following connection string to PyODBC::
dsn=mydsn;UID=user;PWD=pass
If the username and password are omitted, the DSN form will also add
the ``Trusted_Connection=yes`` directive to the ODBC string.
Hostname Connections
^^^^^^^^^^^^^^^^^^^^
Hostname-based connections are **not preferred**, however are supported.
The ODBC driver name must be explicitly specified::
engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=SQL+Server+Native+Client+10.0")
.. versionchanged:: 1.0.0 Hostname-based PyODBC connections now require the
SQL Server driver name specified explicitly. SQLAlchemy cannot
choose an optimal default here as it varies based on platform
and installed drivers.
Other keywords interpreted by the Pyodbc dialect to be passed to
``pyodbc.connect()`` in both the DSN and hostname cases include:
``odbc_autotranslate``, ``ansi``, ``unicode_results``, ``autocommit``.
Pass through exact Pyodbc string
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A PyODBC connection string can also be sent exactly as specified in
`ConnectionStrings <https://code.google.com/p/pyodbc/wiki/ConnectionStrings>`_
into the driver using the parameter ``odbc_connect``. The delimeters must be URL escaped, however,
as illustrated below using ``urllib.quote_plus``::
import urllib
params = urllib.quote_plus("DRIVER={SQL Server Native Client 10.0};SERVER=dagger;DATABASE=test;UID=user;PWD=password")
engine = create_engine("mssql+pyodbc:///?odbc_connect=%s" % params)
Unicode Binds
-------------
The current state of PyODBC on a unix backend with FreeTDS and/or
EasySoft is poor regarding unicode; different OS platforms and versions of
UnixODBC versus IODBC versus FreeTDS/EasySoft versus PyODBC itself
dramatically alter how strings are received. The PyODBC dialect attempts to
use all the information it knows to determine whether or not a Python unicode
literal can be passed directly to the PyODBC driver or not; while SQLAlchemy
can encode these to bytestrings first, some users have reported that PyODBC
mis-handles bytestrings for certain encodings and requires a Python unicode
object, while the author has observed widespread cases where a Python unicode
is completely misinterpreted by PyODBC, particularly when dealing with
the information schema tables used in table reflection, and the value
must first be encoded to a bytestring.
It is for this reason that whether or not unicode literals for bound
parameters be sent to PyODBC can be controlled using the
``supports_unicode_binds`` parameter to ``create_engine()``. When
left at its default of ``None``, the PyODBC dialect will use its
best guess as to whether or not the driver deals with unicode literals
well. When ``False``, unicode literals will be encoded first, and when
``True`` unicode literals will be passed straight through. This is an interim
flag that hopefully should not be needed when the unicode situation stabilizes
for unix + PyODBC.
.. versionadded:: 0.7.7
``supports_unicode_binds`` parameter to ``create_engine()``\ .
"""
from .base import MSExecutionContext, MSDialect, VARBINARY
from ...connectors.pyodbc import PyODBCConnector
from ... import types as sqltypes, util
import decimal
class _ms_numeric_pyodbc(object):
"""Turns Decimals with adjusted() < 0 or > 7 into strings.
The routines here are needed for older pyodbc versions
as well as current mxODBC versions.
"""
def bind_processor(self, dialect):
super_process = super(_ms_numeric_pyodbc, self).\
bind_processor(dialect)
if not dialect._need_decimal_fix:
return super_process
def process(value):
if self.asdecimal and \
isinstance(value, decimal.Decimal):
adjusted = value.adjusted()
if adjusted < 0:
return self._small_dec_to_string(value)
elif adjusted > 7:
return self._large_dec_to_string(value)
if super_process:
return super_process(value)
else:
return value
return process
# these routines needed for older versions of pyodbc.
# as of 2.1.8 this logic is integrated.
def _small_dec_to_string(self, value):
return "%s0.%s%s" % (
(value < 0 and '-' or ''),
'0' * (abs(value.adjusted()) - 1),
"".join([str(nint) for nint in value.as_tuple()[1]]))
def _large_dec_to_string(self, value):
_int = value.as_tuple()[1]
if 'E' in str(value):
result = "%s%s%s" % (
(value < 0 and '-' or ''),
"".join([str(s) for s in _int]),
"0" * (value.adjusted() - (len(_int) - 1)))
else:
if (len(_int) - 1) > value.adjusted():
result = "%s%s.%s" % (
(value < 0 and '-' or ''),
"".join(
[str(s) for s in _int][0:value.adjusted() + 1]),
"".join(
[str(s) for s in _int][value.adjusted() + 1:]))
else:
result = "%s%s" % (
(value < 0 and '-' or ''),
"".join(
[str(s) for s in _int][0:value.adjusted() + 1]))
return result
class _MSNumeric_pyodbc(_ms_numeric_pyodbc, sqltypes.Numeric):
pass
class _MSFloat_pyodbc(_ms_numeric_pyodbc, sqltypes.Float):
pass
class _VARBINARY_pyodbc(VARBINARY):
def bind_processor(self, dialect):
if dialect.dbapi is None:
return None
DBAPIBinary = dialect.dbapi.Binary
def process(value):
if value is not None:
return DBAPIBinary(value)
else:
# pyodbc-specific
return dialect.dbapi.BinaryNull
return process
class MSExecutionContext_pyodbc(MSExecutionContext):
_embedded_scope_identity = False
def pre_exec(self):
"""where appropriate, issue "select scope_identity()" in the same
statement.
Background on why "scope_identity()" is preferable to "@@identity":
http://msdn.microsoft.com/en-us/library/ms190315.aspx
Background on why we attempt to embed "scope_identity()" into the same
statement as the INSERT:
http://code.google.com/p/pyodbc/wiki/FAQs#How_do_I_retrieve_autogenerated/identity_values?
"""
super(MSExecutionContext_pyodbc, self).pre_exec()
# don't embed the scope_identity select into an
# "INSERT .. DEFAULT VALUES"
if self._select_lastrowid and \
self.dialect.use_scope_identity and \
len(self.parameters[0]):
self._embedded_scope_identity = True
self.statement += "; select scope_identity()"
def post_exec(self):
if self._embedded_scope_identity:
# Fetch the last inserted id from the manipulated statement
# We may have to skip over a number of result sets with
# no data (due to triggers, etc.)
while True:
try:
# fetchall() ensures the cursor is consumed
# without closing it (FreeTDS particularly)
row = self.cursor.fetchall()[0]
break
except self.dialect.dbapi.Error as e:
# no way around this - nextset() consumes the previous set
# so we need to just keep flipping
self.cursor.nextset()
self._lastrowid = int(row[0])
else:
super(MSExecutionContext_pyodbc, self).post_exec()
class MSDialect_pyodbc(PyODBCConnector, MSDialect):
execution_ctx_cls = MSExecutionContext_pyodbc
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.Numeric: _MSNumeric_pyodbc,
sqltypes.Float: _MSFloat_pyodbc,
VARBINARY: _VARBINARY_pyodbc,
sqltypes.LargeBinary: _VARBINARY_pyodbc,
}
)
def __init__(self, description_encoding=None, **params):
if 'description_encoding' in params:
self.description_encoding = params.pop('description_encoding')
super(MSDialect_pyodbc, self).__init__(**params)
self.use_scope_identity = self.use_scope_identity and \
self.dbapi and \
hasattr(self.dbapi.Cursor, 'nextset')
self._need_decimal_fix = self.dbapi and \
self._dbapi_version() < (2, 1, 8)
dialect = MSDialect_pyodbc
|
|
# Released under The MIT License (MIT)
# http://opensource.org/licenses/MIT
# Copyright (c) 2013-2015 SCoT Development Team
"""
Summary
-------
Object oriented API to SCoT.
Extended Summary
----------------
The object oriented API provides a the `Workspace` class, which provides high-level functionality and serves as an
example usage of the low-level API.
"""
import numpy as np
from . import backend as global_backend
from .varica import mvarica, cspvarica
from .plainica import plainica
from .datatools import dot_special, atleast_3d
from .connectivity import Connectivity
from .connectivity_statistics import surrogate_connectivity, bootstrap_connectivity, test_bootstrap_difference
from .connectivity_statistics import significance_fdr
class Workspace(object):
"""SCoT Workspace
This class provides high-level functionality for source identification, connectivity estimation, and visualization.
Parameters
----------
var : {:class:`~scot.var.VARBase`-like object, dict}
Vector autoregressive model (VAR) object that is used for model fitting.
This can also be a dictionary that is passed as `**kwargs` to backend['var']() in order to
construct a new VAR model object.
locations : array_like, optional
3D Electrode locations. Each row holds the x, y, and z coordinates of an electrode.
reducedim : {int, float, 'no_pca'}, optional
A number of less than 1 in interpreted as the fraction of variance that should remain in the data. All
components that describe in total less than `1-reducedim` of the variance are removed by the PCA step.
An integer number of 1 or greater is interpreted as the number of components to keep after applying the PCA.
If set to 'no_pca' the PCA step is skipped.
nfft : int, optional
Number of frequency bins for connectivity estimation.
backend : dict-like, optional
Specify backend to use. When set to None the backend configured in config.backend is used.
Attributes
----------
`unmixing_` : array
Estimated unmixing matrix.
`mixing_` : array
Estimated mixing matrix.
`plot_diagonal` : str
Configures what is plotted in the diagonal subplots.
**'topo'** (default) plots topoplots on the diagonal,
**'S'** plots the spectral density of each component, and
**'fill'** plots connectivity on the diagonal.
`plot_outside_topo` : bool
Whether to place topoplots in the left column and top row.
`plot_f_range` : (int, int)
Lower and upper frequency limits for plotting. Defaults to [0, fs/2].
"""
def __init__(self, var, locations=None, reducedim=None, nfft=512, fs=2, backend=None):
self.data_ = None
self.cl_ = None
self.fs_ = fs
self.time_offset_ = 0
self.unmixing_ = None
self.mixing_ = None
self.premixing_ = None
self.activations_ = None
self.connectivity_ = None
self.locations_ = locations
self.reducedim_ = reducedim
self.nfft_ = nfft
self.backend_ = backend if backend is not None else global_backend
self.trial_mask_ = []
self.topo_ = None
self.mixmaps_ = []
self.unmixmaps_ = []
self.var_multiclass_ = None
self.var_model_ = None
self.var_cov_ = None
self.plot_diagonal = 'topo'
self.plot_outside_topo = False
self.plot_f_range = [0, fs/2]
self.topo_clipping = "electrodes"
self._plotting = None
try:
self.var_ = self.backend_['var'](**var)
except TypeError:
self.var_ = var
def __str__(self):
if self.data_ is not None:
datastr = '%d trials, %d channels, %d samples' % self.data_.shape
else:
datastr = 'None'
if self.cl_ is not None:
clstr = str(np.unique(self.cl_))
else:
clstr = 'None'
if self.unmixing_ is not None:
sourcestr = str(self.unmixing_.shape[1])
else:
sourcestr = 'None'
if self.var_ is None:
varstr = 'None'
else:
varstr = str(self.var_)
s = 'Workspace:\n'
s += ' Data : ' + datastr + '\n'
s += ' Classes : ' + clstr + '\n'
s += ' Sources : ' + sourcestr + '\n'
s += ' VAR models: ' + varstr + '\n'
return s
def set_locations(self, locations):
""" Set sensor locations.
Parameters
----------
locations : array_like
3D Electrode locations. Each row holds the x, y, and z coordinates of an electrode.
Returns
-------
self : Workspace
The Workspace object.
"""
self.locations_ = locations
return self
def set_premixing(self, premixing):
""" Set premixing matrix.
The premixing matrix maps data to physical channels. If the data is actual channel data,
the premixing matrix can be set to identity. Use this functionality if the data was pre-
transformed with e.g. PCA.
Parameters
----------
premixing : array_like, shape = [n_signals, n_channels]
Matrix that maps data signals to physical channels.
Returns
-------
self : Workspace
The Workspace object.
"""
self.premixing_ = premixing
return self
def set_data(self, data, cl=None, time_offset=0):
""" Assign data to the workspace.
This function assigns a new data set to the workspace. Doing so invalidates currently fitted VAR models,
connectivity estimates, and activations.
Parameters
----------
data : array-like, shape = [n_trials, n_channels, n_samples] or [n_channels, n_samples]
EEG data set
cl : list of valid dict keys
Class labels associated with each trial.
time_offset : float, optional
Trial starting time; used for labelling the x-axis of time/frequency plots.
Returns
-------
self : Workspace
The Workspace object.
"""
self.data_ = atleast_3d(data)
self.cl_ = np.asarray(cl if cl is not None else [None]*self.data_.shape[0])
self.time_offset_ = time_offset
self.var_model_ = None
self.var_cov_ = None
self.connectivity_ = None
self.trial_mask_ = np.ones(self.cl_.size, dtype=bool)
if self.unmixing_ is not None:
self.activations_ = dot_special(self.unmixing_.T, self.data_)
return self
def set_used_labels(self, labels):
""" Specify which trials to use in subsequent analysis steps.
This function masks trials based on their class labels.
Parameters
----------
labels : list of class labels
Marks all trials that have a label that is in the `labels` list for further processing.
Returns
-------
self : Workspace
The Workspace object.
"""
mask = np.zeros(self.cl_.size, dtype=bool)
for l in labels:
mask = np.logical_or(mask, self.cl_ == l)
self.trial_mask_ = mask
return self
def do_mvarica(self, varfit='ensemble', random_state=None):
""" Perform MVARICA
Perform MVARICA source decomposition and VAR model fitting.
Parameters
----------
varfit : string
Determines how to calculate the residuals for source decomposition.
'ensemble' (default) fits one model to the whole data set,
'class' fits a different model for each class, and
'trial' fits a different model for each individual trial.
Returns
-------
self : Workspace
The Workspace object.
Raises
------
RuntimeError
If the :class:`Workspace` instance does not contain data.
See Also
--------
:func:`mvarica` : MVARICA implementation
"""
if self.data_ is None:
raise RuntimeError("MVARICA requires data to be set")
result = mvarica(x=self.data_[self.trial_mask_, :, :],
cl=self.cl_[self.trial_mask_], var=self.var_,
reducedim=self.reducedim_, backend=self.backend_,
varfit=varfit, random_state=random_state)
self.mixing_ = result.mixing
self.unmixing_ = result.unmixing
self.var_ = result.b
self.connectivity_ = Connectivity(result.b.coef, result.b.rescov,
self.nfft_)
self.activations_ = dot_special(self.unmixing_.T, self.data_)
self.mixmaps_ = []
self.unmixmaps_ = []
return self
def do_cspvarica(self, varfit='ensemble', random_state=None):
""" Perform CSPVARICA
Perform CSPVARICA source decomposition and VAR model fitting.
Parameters
----------
varfit : string
Determines how to calculate the residuals for source decomposition.
'ensemble' (default) fits one model to the whole data set,
'class' fits a different model for each class, and
'trial' fits a different model for each individual trial.
Returns
-------
self : Workspace
The Workspace object.
Raises
------
RuntimeError
If the :class:`Workspace` instance does not contain data.
See Also
--------
:func:`cspvarica` : CSPVARICA implementation
"""
if self.data_ is None:
raise RuntimeError("CSPVARICA requires data to be set")
try:
sorted(self.cl_)
for c in self.cl_:
assert(c is not None)
except (TypeError, AssertionError):
raise RuntimeError("CSPVARICA requires orderable and hashable class labels that are not None")
result = cspvarica(x=self.data_, var=self.var_, cl=self.cl_,
reducedim=self.reducedim_, backend=self.backend_,
varfit=varfit, random_state=random_state)
self.mixing_ = result.mixing
self.unmixing_ = result.unmixing
self.var_ = result.b
self.connectivity_ = Connectivity(self.var_.coef, self.var_.rescov, self.nfft_)
self.activations_ = dot_special(self.unmixing_.T, self.data_)
self.mixmaps_ = []
self.unmixmaps_ = []
return self
def do_ica(self, random_state=None):
""" Perform ICA
Perform plain ICA source decomposition.
Returns
-------
self : Workspace
The Workspace object.
Raises
------
RuntimeError
If the :class:`Workspace` instance does not contain data.
"""
if self.data_ is None:
raise RuntimeError("ICA requires data to be set")
result = plainica(x=self.data_[self.trial_mask_, :, :], reducedim=self.reducedim_, backend=self.backend_, random_state=random_state)
self.mixing_ = result.mixing
self.unmixing_ = result.unmixing
self.activations_ = dot_special(self.unmixing_.T, self.data_)
self.var_model_ = None
self.var_cov_ = None
self.connectivity_ = None
self.mixmaps_ = []
self.unmixmaps_ = []
return self
def remove_sources(self, sources):
""" Remove sources from the decomposition.
This function removes sources from the decomposition. Doing so invalidates currently fitted VAR models and
connectivity estimates.
Parameters
----------
sources : {slice, int, array of ints}
Indices of components to remove.
Returns
-------
self : Workspace
The Workspace object.
Raises
------
RuntimeError
If the :class:`Workspace` instance does not contain a source decomposition.
"""
if self.unmixing_ is None or self.mixing_ is None:
raise RuntimeError("No sources available (run do_mvarica first)")
self.mixing_ = np.delete(self.mixing_, sources, 0)
self.unmixing_ = np.delete(self.unmixing_, sources, 1)
if self.activations_ is not None:
self.activations_ = np.delete(self.activations_, sources, 1)
self.var_model_ = None
self.var_cov_ = None
self.connectivity_ = None
self.mixmaps_ = []
self.unmixmaps_ = []
return self
def keep_sources(self, keep):
"""Keep only the specified sources in the decomposition.
"""
if self.unmixing_ is None or self.mixing_ is None:
raise RuntimeError("No sources available (run do_mvarica first)")
n_sources = self.mixing_.shape[0]
self.remove_sources(np.setdiff1d(np.arange(n_sources), np.array(keep)))
return self
def fit_var(self):
""" Fit a VAR model to the source activations.
Returns
-------
self : Workspace
The Workspace object.
Raises
------
RuntimeError
If the :class:`Workspace` instance does not contain source activations.
"""
if self.activations_ is None:
raise RuntimeError("VAR fitting requires source activations (run do_mvarica first)")
self.var_.fit(data=self.activations_[self.trial_mask_, :, :])
self.connectivity_ = Connectivity(self.var_.coef, self.var_.rescov, self.nfft_)
return self
def optimize_var(self):
""" Optimize the VAR model's hyperparameters (such as regularization).
Returns
-------
self : Workspace
The Workspace object.
Raises
------
RuntimeError
If the :class:`Workspace` instance does not contain source activations.
"""
if self.activations_ is None:
raise RuntimeError("VAR fitting requires source activations (run do_mvarica first)")
self.var_.optimize(self.activations_[self.trial_mask_, :, :])
return self
def get_connectivity(self, measure_name, plot=False):
""" Calculate spectral connectivity measure.
Parameters
----------
measure_name : str
Name of the connectivity measure to calculate. See :class:`Connectivity` for supported measures.
plot : {False, None, Figure object}, optional
Whether and where to plot the connectivity. If set to **False**, nothing is plotted. Otherwise set to the
Figure object. If set to **None**, a new figure is created.
Returns
-------
measure : array, shape = [n_channels, n_channels, nfft]
Values of the connectivity measure.
fig : Figure object
Instance of the figure in which was plotted. This is only returned if `plot` is not **False**.
Raises
------
RuntimeError
If the :class:`Workspace` instance does not contain a fitted VAR model.
"""
if self.connectivity_ is None:
raise RuntimeError("Connectivity requires a VAR model (run do_mvarica or fit_var first)")
cm = getattr(self.connectivity_, measure_name)()
cm = np.abs(cm) if np.any(np.iscomplex(cm)) else cm
if plot is None or plot:
fig = plot
if self.plot_diagonal == 'fill':
diagonal = 0
elif self.plot_diagonal == 'S':
diagonal = -1
sm = np.abs(self.connectivity_.S())
sm /= np.max(sm) # scale to 1 since components are scaled arbitrarily anyway
fig = self.plotting.plot_connectivity_spectrum(sm, fs=self.fs_, freq_range=self.plot_f_range,
diagonal=1, border=self.plot_outside_topo, fig=fig)
else:
diagonal = -1
fig = self.plotting.plot_connectivity_spectrum(cm, fs=self.fs_, freq_range=self.plot_f_range,
diagonal=diagonal, border=self.plot_outside_topo, fig=fig)
return cm, fig
return cm
def get_surrogate_connectivity(self, measure_name, repeats=100, plot=False, random_state=None):
""" Calculate spectral connectivity measure under the assumption of no actual connectivity.
Repeatedly samples connectivity from phase-randomized data. This provides estimates of the connectivity
distribution if there was no causal structure in the data.
Parameters
----------
measure_name : str
Name of the connectivity measure to calculate. See :class:`Connectivity` for supported measures.
repeats : int, optional
How many surrogate samples to take.
Returns
-------
measure : array, shape = [`repeats`, n_channels, n_channels, nfft]
Values of the connectivity measure for each surrogate.
See Also
--------
:func:`scot.connectivity_statistics.surrogate_connectivity` : Calculates surrogate connectivity
"""
cs = surrogate_connectivity(measure_name, self.activations_[self.trial_mask_, :, :],
self.var_, self.nfft_, repeats, random_state=random_state)
if plot is None or plot:
fig = plot
if self.plot_diagonal == 'fill':
diagonal = 0
elif self.plot_diagonal == 'S':
diagonal = -1
sb = self.get_surrogate_connectivity('absS', repeats)
sb /= np.max(sb) # scale to 1 since components are scaled arbitrarily anyway
su = np.percentile(sb, 95, axis=0)
fig = self.plotting.plot_connectivity_spectrum([su], fs=self.fs_, freq_range=self.plot_f_range,
diagonal=1, border=self.plot_outside_topo, fig=fig)
else:
diagonal = -1
cu = np.percentile(cs, 95, axis=0)
fig = self.plotting.plot_connectivity_spectrum([cu], fs=self.fs_, freq_range=self.plot_f_range,
diagonal=diagonal, border=self.plot_outside_topo, fig=fig)
return cs, fig
return cs
def get_bootstrap_connectivity(self, measure_names, repeats=100, num_samples=None, plot=False, random_state=None):
""" Calculate bootstrap estimates of spectral connectivity measures.
Bootstrapping is performed on trial level.
Parameters
----------
measure_names : {str, list of str}
Name(s) of the connectivity measure(s) to calculate. See :class:`Connectivity` for supported measures.
repeats : int, optional
How many bootstrap estimates to take.
num_samples : int, optional
How many samples to take for each bootstrap estimates. Defaults to the same number of trials as present in
the data.
Returns
-------
measure : array, shape = [`repeats`, n_channels, n_channels, nfft]
Values of the connectivity measure for each bootstrap estimate. If `measure_names` is a list of strings a
dictionary is returned, where each key is the name of the measure, and the corresponding values are
ndarrays of shape [`repeats`, n_channels, n_channels, nfft].
See Also
--------
:func:`scot.connectivity_statistics.bootstrap_connectivity` : Calculates bootstrap connectivity
"""
if num_samples is None:
num_samples = np.sum(self.trial_mask_)
cb = bootstrap_connectivity(measure_names, self.activations_[self.trial_mask_, :, :],
self.var_, self.nfft_, repeats, num_samples, random_state=random_state)
if plot is None or plot:
fig = plot
if self.plot_diagonal == 'fill':
diagonal = 0
elif self.plot_diagonal == 'S':
diagonal = -1
sb = self.get_bootstrap_connectivity('absS', repeats, num_samples)
sb /= np.max(sb) # scale to 1 since components are scaled arbitrarily anyway
sm = np.median(sb, axis=0)
sl = np.percentile(sb, 2.5, axis=0)
su = np.percentile(sb, 97.5, axis=0)
fig = self.plotting.plot_connectivity_spectrum([sm, sl, su], fs=self.fs_, freq_range=self.plot_f_range,
diagonal=1, border=self.plot_outside_topo, fig=fig)
else:
diagonal = -1
cm = np.median(cb, axis=0)
cl = np.percentile(cb, 2.5, axis=0)
cu = np.percentile(cb, 97.5, axis=0)
fig = self.plotting.plot_connectivity_spectrum([cm, cl, cu], fs=self.fs_, freq_range=self.plot_f_range,
diagonal=diagonal, border=self.plot_outside_topo, fig=fig)
return cb, fig
return cb
def get_tf_connectivity(self, measure_name, winlen, winstep, plot=False, baseline=None, crange='default'):
""" Calculate estimate of time-varying connectivity.
Connectivity is estimated in a sliding window approach on the current data set. The window is stepped
`n_steps` = (`n_samples` - `winlen`) // `winstep` times.
Parameters
----------
measure_name : str
Name of the connectivity measure to calculate. See :class:`Connectivity` for supported measures.
winlen : int
Length of the sliding window (in samples).
winstep : int
Step size for sliding window (in samples).
plot : {False, None, Figure object}, optional
Whether and where to plot the connectivity. If set to **False**, nothing is plotted. Otherwise set to the
Figure object. If set to **None**, a new figure is created.
baseline : [int, int] or None
Start and end of the baseline period in samples. The baseline is subtracted from the connectivity. It is
computed as the average of all windows that contain start or end, or fall between start and end.
If set to None no baseline is subtracted.
Returns
-------
result : array, shape = [n_channels, n_channels, nfft, n_steps]
Values of the connectivity measure.
fig : Figure object, optional
Instance of the figure in which was plotted. This is only returned if `plot` is not **False**.
Raises
------
RuntimeError
If the :class:`Workspace` instance does not contain a fitted VAR model.
"""
if self.activations_ is None:
raise RuntimeError("Time/Frequency Connectivity requires activations (call set_data after do_mvarica)")
_, m, n = self.activations_.shape
steps = list(range(0, n - winlen, winstep))
nstep = len(steps)
result = np.zeros((m, m, self.nfft_, nstep), np.complex64)
for i, j in enumerate(steps):
win = np.arange(winlen) + j
data = self.activations_[:, :, win]
data = data[self.trial_mask_, :, :]
self.var_.fit(data)
con = Connectivity(self.var_.coef, self.var_.rescov, self.nfft_)
result[:, :, :, i] = getattr(con, measure_name)()
if baseline:
inref = np.zeros(nstep, bool)
for i, j in enumerate(steps):
a, b = j, j + winlen - 1
inref[i] = b >= baseline[0] and a <= baseline[1]
if np.any(inref):
ref = np.mean(result[:, :, :, inref], axis=3, keepdims=True)
result -= ref
if plot is None or plot:
fig = plot
t0 = 0.5 * winlen / self.fs_ + self.time_offset_
t1 = self.data_.shape[2] / self.fs_ - 0.5 * winlen / self.fs_ + self.time_offset_
if self.plot_diagonal == 'fill':
diagonal = 0
elif self.plot_diagonal == 'S':
diagonal = -1
s = np.abs(self.get_tf_connectivity('S', winlen, winstep))
if crange == 'default':
crange = [np.min(s), np.max(s)]
fig = self.plotting.plot_connectivity_timespectrum(s, fs=self.fs_, crange=[np.min(s), np.max(s)],
freq_range=self.plot_f_range, time_range=[t0, t1],
diagonal=1, border=self.plot_outside_topo, fig=fig)
else:
diagonal = -1
tfc = self._clean_measure(measure_name, result)
if crange == 'default':
if diagonal == -1:
for m in range(tfc.shape[0]):
tfc[m, m, :, :] = 0
crange = [np.min(tfc), np.max(tfc)]
fig = self.plotting.plot_connectivity_timespectrum(tfc, fs=self.fs_, crange=crange,
freq_range=self.plot_f_range, time_range=[t0, t1],
diagonal=diagonal, border=self.plot_outside_topo, fig=fig)
return result, fig
return result
def compare_conditions(self, labels1, labels2, measure_name, alpha=0.01, repeats=100, num_samples=None, plot=False, random_state=None):
""" Test for significant difference in connectivity of two sets of class labels.
Connectivity estimates are obtained by bootstrapping. Correction for multiple testing is performed by
controlling the false discovery rate (FDR).
Parameters
----------
labels1, labels2 : list of class labels
The two sets of class labels to compare. Each set may contain more than one label.
measure_name : str
Name of the connectivity measure to calculate. See :class:`Connectivity` for supported measures.
alpha : float, optional
Maximum allowed FDR. The ratio of falsely detected significant differences is guaranteed to be less than
`alpha`.
repeats : int, optional
How many bootstrap estimates to take.
num_samples : int, optional
How many samples to take for each bootstrap estimates. Defaults to the same number of trials as present in
the data.
plot : {False, None, Figure object}, optional
Whether and where to plot the connectivity. If set to **False**, nothing is plotted. Otherwise set to the
Figure object. If set to **None**, a new figure is created.
Returns
-------
p : array, shape = [n_channels, n_channels, nfft]
Uncorrected p-values.
s : array, dtype=bool, shape = [n_channels, n_channels, nfft]
FDR corrected significance. True means the difference is significant in this location.
fig : Figure object, optional
Instance of the figure in which was plotted. This is only returned if `plot` is not **False**.
"""
self.set_used_labels(labels1)
ca = self.get_bootstrap_connectivity(measure_name, repeats, num_samples, random_state=random_state)
self.set_used_labels(labels2)
cb = self.get_bootstrap_connectivity(measure_name, repeats, num_samples, random_state=random_state)
p = test_bootstrap_difference(ca, cb)
s = significance_fdr(p, alpha)
if plot is None or plot:
fig = plot
if self.plot_diagonal == 'topo':
diagonal = -1
elif self.plot_diagonal == 'fill':
diagonal = 0
elif self.plot_diagonal is 'S':
diagonal = -1
self.set_used_labels(labels1)
sa = self.get_bootstrap_connectivity('absS', repeats, num_samples)
sm = np.median(sa, axis=0)
sl = np.percentile(sa, 2.5, axis=0)
su = np.percentile(sa, 97.5, axis=0)
fig = self.plotting.plot_connectivity_spectrum([sm, sl, su], fs=self.fs_, freq_range=self.plot_f_range,
diagonal=1, border=self.plot_outside_topo, fig=fig)
self.set_used_labels(labels2)
sb = self.get_bootstrap_connectivity('absS', repeats, num_samples)
sm = np.median(sb, axis=0)
sl = np.percentile(sb, 2.5, axis=0)
su = np.percentile(sb, 97.5, axis=0)
fig = self.plotting.plot_connectivity_spectrum([sm, sl, su], fs=self.fs_, freq_range=self.plot_f_range,
diagonal=1, border=self.plot_outside_topo, fig=fig)
p_s = test_bootstrap_difference(ca, cb)
s_s = significance_fdr(p_s, alpha)
self.plotting.plot_connectivity_significance(s_s, fs=self.fs_, freq_range=self.plot_f_range,
diagonal=1, border=self.plot_outside_topo, fig=fig)
else:
diagonal = -1
cm = np.median(ca, axis=0)
cl = np.percentile(ca, 2.5, axis=0)
cu = np.percentile(ca, 97.5, axis=0)
fig = self.plotting.plot_connectivity_spectrum([cm, cl, cu], fs=self.fs_, freq_range=self.plot_f_range,
diagonal=diagonal, border=self.plot_outside_topo, fig=fig)
cm = np.median(cb, axis=0)
cl = np.percentile(cb, 2.5, axis=0)
cu = np.percentile(cb, 97.5, axis=0)
fig = self.plotting.plot_connectivity_spectrum([cm, cl, cu], fs=self.fs_, freq_range=self.plot_f_range,
diagonal=diagonal, border=self.plot_outside_topo, fig=fig)
self.plotting.plot_connectivity_significance(s, fs=self.fs_, freq_range=self.plot_f_range,
diagonal=diagonal, border=self.plot_outside_topo, fig=fig)
return p, s, fig
return p, s
def show_plots(self):
"""Show current plots.
This is only a convenience wrapper around :func:`matplotlib.pyplot.show_plots`.
"""
self.plotting.show_plots()
def plot_source_topos(self, common_scale=None):
""" Plot topography of the Source decomposition.
Parameters
----------
common_scale : float, optional
If set to None, each topoplot's color axis is scaled individually. Otherwise specifies the percentile
(1-99) of values in all plot. This value is taken as the maximum color scale.
"""
if self.unmixing_ is None and self.mixing_ is None:
raise RuntimeError("No sources available (run do_mvarica first)")
self._prepare_plots(True, True)
self.plotting.plot_sources(self.topo_, self.mixmaps_, self.unmixmaps_, common_scale)
def plot_connectivity_topos(self, fig=None):
""" Plot scalp projections of the sources.
This function only plots the topos. Use in combination with connectivity plotting.
Parameters
----------
fig : {None, Figure object}, optional
Where to plot the topos. f set to **None**, a new figure is created. Otherwise plot into the provided
figure object.
Returns
-------
fig : Figure object
Instance of the figure in which was plotted.
"""
self._prepare_plots(True, False)
if self.plot_outside_topo:
fig = self.plotting.plot_connectivity_topos('outside', self.topo_, self.mixmaps_, fig)
elif self.plot_diagonal == 'topo':
fig = self.plotting.plot_connectivity_topos('diagonal', self.topo_, self.mixmaps_, fig)
return fig
def plot_connectivity_surrogate(self, measure_name, repeats=100, fig=None):
""" Plot spectral connectivity measure under the assumption of no actual connectivity.
Repeatedly samples connectivity from phase-randomized data. This provides estimates of the connectivity
distribution if there was no causal structure in the data.
Parameters
----------
measure_name : str
Name of the connectivity measure to calculate. See :class:`Connectivity` for supported measures.
repeats : int, optional
How many surrogate samples to take.
fig : {None, Figure object}, optional
Where to plot the topos. f set to **None**, a new figure is created. Otherwise plot into the provided
figure object.
Returns
-------
fig : Figure object
Instance of the figure in which was plotted.
"""
cb = self.get_surrogate_connectivity(measure_name, repeats)
self._prepare_plots(True, False)
cu = np.percentile(cb, 95, axis=0)
fig = self.plotting.plot_connectivity_spectrum([cu], self.fs_, freq_range=self.plot_f_range, fig=fig)
return fig
@property
def plotting(self):
if not self._plotting:
from . import plotting
self._plotting = plotting
return self._plotting
def _prepare_plots(self, mixing=False, unmixing=False):
if self.locations_ is None:
raise RuntimeError("Need sensor locations for plotting")
if self.topo_ is None:
from scot.eegtopo.topoplot import Topoplot
self.topo_ = Topoplot(clipping=self.topo_clipping)
self.topo_.set_locations(self.locations_)
if mixing and not self.mixmaps_:
premix = self.premixing_ if self.premixing_ is not None else np.eye(self.mixing_.shape[1])
self.mixmaps_ = self.plotting.prepare_topoplots(self.topo_, np.dot(self.mixing_, premix))
#self.mixmaps_ = self.plotting.prepare_topoplots(self.topo_, self.mixing_)
if unmixing and not self.unmixmaps_:
preinv = np.linalg.pinv(self.premixing_) if self.premixing_ is not None else np.eye(self.unmixing_.shape[0])
self.unmixmaps_ = self.plotting.prepare_topoplots(self.topo_, np.dot(preinv, self.unmixing_).T)
#self.unmixmaps_ = self.plotting.prepare_topoplots(self.topo_, self.unmixing_.transpose())
@staticmethod
def _clean_measure(measure, a):
if measure in ['a', 'H', 'COH', 'pCOH']:
return np.abs(a)
elif measure in ['S', 'g']:
return np.log(np.abs(a))
else:
return np.real(a)
|
|
from random import shuffle
from random import randint
import matplotlib.pyplot as plt
class Session(object):
def __init__(self, n, capacity, course):
self.n = n
self.capacity = capacity
self.enrolled = 0
self.students = []
self.course = course
def add(self, student):
if student in self.students or self.enrolled == self.capacity or student.courses[(self.n + 1) % 2] == self.course:
return False
if True:
self.students.append(student)
student.courses[self.n] = self.course
self.enrolled += 1
return self.course
def remove(self, student):
if student not in self.students:
return False
else:
self.students.remove(student)
student.courses[self.n] = 0
self.enrolled -= 1
return self.course
def __repr__(self):
return self.students.__repr__()
class Course(object):
def __init__(self, course_name, capacity):
self.course_name = course_name
self.capacity = capacity
self.sessions = [Session(0, capacity, self), Session(1, capacity, self)]
def add_student(self, student, session):
return self.sessions[session].add(student)
def remove_student(self, student, session):
return self.sessions[session].remove(student)
def __repr__(self):
return self.course_name + ' ' + ' '.join([s.__repr__() for s in self.sessions])
def __str__(self):
return self.course_name
class Student(object):
def __init__(self, name):
self.name = name
self.preferences = ["" for p in range(3)]
self.broad_preferences = []
self.happiness = 0
self.courses = ["" for c in range(2)]
def add_preference(self, course, rank):
self.preferences[rank] = course
def adjusted_preferences(self, courses):
shuffle(courses)
for course in courses:
if course not in self.preferences:
self.broad_preferences.append(course)
for course in self.preferences:
self.broad_preferences.insert(0, course)
def enroll(self, course, session):
return course.add_student(self, session)
def un_enroll(self, session):
return self.courses[session].remove_student(self, session)
def compute_happiness(self):
self.happiness = 0
for c in self.courses:
if c in self.preferences:
self.happiness += 3 - self.preferences.index(c)
return self.happiness
def __repr__(self):
return self.name + ' ' + ' '.join([c.__str__() for c in self.courses])
class LocalSearchSolver(object):
def __init__(self):
self.N = 0
self.K = 0
self.courses = []
self.students = []
self.course_dict = {}
self.student_dict = {}
self.happiness = 0
self.w_1 = 1
self.w_2 = 1.2
self.points = []
def construct_from_file(self):
with open("studenti.txt", "r") as f:
for student in f.read().split("\n"):
self.add_student(student)
with open("corsi.txt", "r") as f:
for corsi in f.read().split("\n"):
info = corsi.split(" ")
self.add_course(info[0], int(info[1]))
with open("preferenze.txt", "r") as f:
for preferenza, studente in zip(f, self.students):
info = preferenza.split(" ")
for corso, i in zip(info, range(3)):
corso = corso.replace("\n", "")
studente.add_preference(self.course_dict[corso], 2-i)
for student in self.students:
student.adjusted_preferences(self.courses)
return True
def add_student(self, name):
self.N += 1
self.students.append(Student(name))
self.student_dict[name] = self.students[-1]
def add_course(self, course_name, capacity):
self.K += 1
self.courses.append(Course(course_name, capacity))
self.course_dict[course_name] = self.courses[-1]
def heuristic(self):
students_happiness = [student.compute_happiness() for student in self.students]
total_happiness = self.w_1 * float(sum(students_happiness))/float(self.N)
equality = self.w_2 * min(students_happiness)
unhappy_students = students_happiness.count(0)
self.happiness = self.w_1 * total_happiness + self.w_2 * equality - 0.05 * unhappy_students
return self.happiness
def print_equality(self):
students_happiness = [student.compute_happiness() for student in self.students]
equality = self.w_2 * min(students_happiness)
print(equality)
def print_total(self):
students_happiness = [student.compute_happiness() for student in self.students]
total_happiness = self.w_1 * float(sum(students_happiness))/float(self.N)
print(total_happiness)
def count_0s(self):
students_happiness = [student.compute_happiness() for student in self.students]
print(students_happiness.count(0))
def plot_histo(self):
students_happiness = [student.compute_happiness() for student in self.students]
plt.hist(students_happiness, bins=[0, 1, 2, 3, 4, 5])
plt.show()
def greedy_assign(self):
shuffle(self.students)
for student in self.students:
i = 0
while not student.enroll(student.broad_preferences[i], 0):
i += 1
for student in self.students[::-1]:
i = 0
while not student.enroll(student.broad_preferences[i], 1):
i += 1
def swap(self, student_a, session_a, student_b, session_b):
course_a = student_a.un_enroll(session_a)
course_b = student_b.un_enroll(session_b)
student_a.enroll(course_b, session_a)
student_b.enroll(course_a, session_b)
# update
return True
def local_search(self, iterations = 500000):
current_happiness = self.heuristic()
starting_happiness = current_happiness
step = 0
self.plot_histo()
print(self.count_0s())
for i in range(iterations):
session_a = randint(0, 1)
session_b = session_a
student_a = self.students[randint(0, len(self.students) - 1)]
student_b = self.students[randint(0, len(self.students) - 1)]
if student_a.courses[(session_a + 1) % 2] != student_b.courses[session_b] and student_b.courses[(session_b + 1) % 2] != student_a.courses[session_a] and student_a != student_b:
self.swap(student_a, session_a, student_b, session_b)
if self.heuristic() >= current_happiness:
current_happiness = self.happiness
if step % 20 == 0:
self.points.append(current_happiness)
step = 0
step += 1
else:
self.swap(student_a, session_a, student_b, session_b)
print(self.count_0s())
print(starting_happiness, current_happiness)
self.print_equality()
self.print_total()
self.plot_histo()
plt.plot(self.points)
plt.ylabel('Happiness')
plt.show()
LSS = LocalSearchSolver()
LSS.construct_from_file()
LSS.greedy_assign()
LSS.local_search()
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import fixtures
import jsonschema
from oslo_serialization import jsonutils as json
import six
from tempest.lib.common import http
from tempest.lib.common import rest_client
from tempest.lib import exceptions
from tempest.tests import base
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib import fake_http
import tempest.tests.utils as utils
class BaseRestClientTestClass(base.TestCase):
url = 'fake_endpoint'
def setUp(self):
super(BaseRestClientTestClass, self).setUp()
self.fake_auth_provider = fake_auth_provider.FakeAuthProvider()
self.rest_client = rest_client.RestClient(
self.fake_auth_provider, None, None)
self.patchobject(http.ClosingHttp, 'request', self.fake_http.request)
self.useFixture(fixtures.MockPatchObject(self.rest_client,
'_log_request'))
class TestRestClientHTTPMethods(BaseRestClientTestClass):
def setUp(self):
self.fake_http = fake_http.fake_httplib2()
super(TestRestClientHTTPMethods, self).setUp()
self.useFixture(fixtures.MockPatchObject(self.rest_client,
'_error_checker'))
def test_post(self):
__, return_dict = self.rest_client.post(self.url, {}, {})
self.assertEqual('POST', return_dict['method'])
def test_get(self):
__, return_dict = self.rest_client.get(self.url)
self.assertEqual('GET', return_dict['method'])
def test_delete(self):
__, return_dict = self.rest_client.delete(self.url)
self.assertEqual('DELETE', return_dict['method'])
def test_patch(self):
__, return_dict = self.rest_client.patch(self.url, {}, {})
self.assertEqual('PATCH', return_dict['method'])
def test_put(self):
__, return_dict = self.rest_client.put(self.url, {}, {})
self.assertEqual('PUT', return_dict['method'])
def test_head(self):
self.useFixture(fixtures.MockPatchObject(self.rest_client,
'response_checker'))
__, return_dict = self.rest_client.head(self.url)
self.assertEqual('HEAD', return_dict['method'])
def test_copy(self):
__, return_dict = self.rest_client.copy(self.url)
self.assertEqual('COPY', return_dict['method'])
class TestRestClientNotFoundHandling(BaseRestClientTestClass):
def setUp(self):
self.fake_http = fake_http.fake_httplib2(404)
super(TestRestClientNotFoundHandling, self).setUp()
def test_post(self):
self.assertRaises(exceptions.NotFound, self.rest_client.post,
self.url, {}, {})
class TestRestClientHeadersJSON(TestRestClientHTTPMethods):
def _verify_headers(self, resp):
resp = dict((k.lower(), v) for k, v in six.iteritems(resp))
self.assertEqual(self.header_value, resp['accept'])
self.assertEqual(self.header_value, resp['content-type'])
def setUp(self):
super(TestRestClientHeadersJSON, self).setUp()
self.header_value = 'application/json'
def test_post(self):
resp, __ = self.rest_client.post(self.url, {})
self._verify_headers(resp)
def test_get(self):
resp, __ = self.rest_client.get(self.url)
self._verify_headers(resp)
def test_delete(self):
resp, __ = self.rest_client.delete(self.url)
self._verify_headers(resp)
def test_patch(self):
resp, __ = self.rest_client.patch(self.url, {})
self._verify_headers(resp)
def test_put(self):
resp, __ = self.rest_client.put(self.url, {})
self._verify_headers(resp)
def test_head(self):
self.useFixture(fixtures.MockPatchObject(self.rest_client,
'response_checker'))
resp, __ = self.rest_client.head(self.url)
self._verify_headers(resp)
def test_copy(self):
resp, __ = self.rest_client.copy(self.url)
self._verify_headers(resp)
class TestRestClientUpdateHeaders(BaseRestClientTestClass):
def setUp(self):
self.fake_http = fake_http.fake_httplib2()
super(TestRestClientUpdateHeaders, self).setUp()
self.useFixture(fixtures.MockPatchObject(self.rest_client,
'_error_checker'))
self.headers = {'X-Configuration-Session': 'session_id'}
def test_post_update_headers(self):
__, return_dict = self.rest_client.post(self.url, {},
extra_headers=True,
headers=self.headers)
self.assertDictContainsSubset(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
'Accept': 'application/json'},
return_dict['headers']
)
def test_get_update_headers(self):
__, return_dict = self.rest_client.get(self.url,
extra_headers=True,
headers=self.headers)
self.assertDictContainsSubset(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
'Accept': 'application/json'},
return_dict['headers']
)
def test_delete_update_headers(self):
__, return_dict = self.rest_client.delete(self.url,
extra_headers=True,
headers=self.headers)
self.assertDictContainsSubset(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
'Accept': 'application/json'},
return_dict['headers']
)
def test_patch_update_headers(self):
__, return_dict = self.rest_client.patch(self.url, {},
extra_headers=True,
headers=self.headers)
self.assertDictContainsSubset(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
'Accept': 'application/json'},
return_dict['headers']
)
def test_put_update_headers(self):
__, return_dict = self.rest_client.put(self.url, {},
extra_headers=True,
headers=self.headers)
self.assertDictContainsSubset(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
'Accept': 'application/json'},
return_dict['headers']
)
def test_head_update_headers(self):
self.useFixture(fixtures.MockPatchObject(self.rest_client,
'response_checker'))
__, return_dict = self.rest_client.head(self.url,
extra_headers=True,
headers=self.headers)
self.assertDictContainsSubset(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
'Accept': 'application/json'},
return_dict['headers']
)
def test_copy_update_headers(self):
__, return_dict = self.rest_client.copy(self.url,
extra_headers=True,
headers=self.headers)
self.assertDictContainsSubset(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
'Accept': 'application/json'},
return_dict['headers']
)
class TestRestClientParseRespJSON(BaseRestClientTestClass):
TYPE = "json"
keys = ["fake_key1", "fake_key2"]
values = ["fake_value1", "fake_value2"]
item_expected = dict((key, value) for (key, value) in zip(keys, values))
list_expected = {"body_list": [
{keys[0]: values[0]},
{keys[1]: values[1]},
]}
dict_expected = {"body_dict": {
keys[0]: values[0],
keys[1]: values[1],
}}
null_dict = {}
def setUp(self):
self.fake_http = fake_http.fake_httplib2()
super(TestRestClientParseRespJSON, self).setUp()
self.rest_client.TYPE = self.TYPE
def test_parse_resp_body_item(self):
body = self.rest_client._parse_resp(json.dumps(self.item_expected))
self.assertEqual(self.item_expected, body)
def test_parse_resp_body_list(self):
body = self.rest_client._parse_resp(json.dumps(self.list_expected))
self.assertEqual(self.list_expected["body_list"], body)
def test_parse_resp_body_dict(self):
body = self.rest_client._parse_resp(json.dumps(self.dict_expected))
self.assertEqual(self.dict_expected["body_dict"], body)
def test_parse_resp_two_top_keys(self):
dict_two_keys = self.dict_expected.copy()
dict_two_keys.update({"second_key": ""})
body = self.rest_client._parse_resp(json.dumps(dict_two_keys))
self.assertEqual(dict_two_keys, body)
def test_parse_resp_one_top_key_without_list_or_dict(self):
data = {"one_top_key": "not_list_or_dict_value"}
body = self.rest_client._parse_resp(json.dumps(data))
self.assertEqual(data, body)
def test_parse_nullable_dict(self):
body = self.rest_client._parse_resp(json.dumps(self.null_dict))
self.assertEqual(self.null_dict, body)
def test_parse_empty_list(self):
empty_list = []
body = self.rest_client._parse_resp(json.dumps(empty_list))
self.assertEqual(empty_list, body)
class TestRestClientErrorCheckerJSON(base.TestCase):
c_type = "application/json"
def set_data(self, r_code, enc=None, r_body=None, absolute_limit=True):
if enc is None:
enc = self.c_type
resp_dict = {'status': r_code, 'content-type': enc}
resp_body = {'resp_body': 'fake_resp_body'}
if absolute_limit is False:
resp_dict.update({'retry-after': 120})
resp_body.update({'overLimit': {'message': 'fake_message'}})
resp = fake_http.fake_http_response(headers=resp_dict,
status=int(r_code),
body=json.dumps(resp_body))
data = {
"resp": resp,
"resp_body": json.dumps(resp_body)
}
if r_body is not None:
data.update({"resp_body": r_body})
return data
def setUp(self):
super(TestRestClientErrorCheckerJSON, self).setUp()
self.rest_client = rest_client.RestClient(
fake_auth_provider.FakeAuthProvider(), None, None)
def test_response_less_than_400(self):
self.rest_client._error_checker(**self.set_data("399"))
def _test_error_checker(self, exception_type, data):
e = self.assertRaises(exception_type,
self.rest_client._error_checker,
**data)
self.assertEqual(e.resp, data['resp'])
self.assertTrue(hasattr(e, 'resp_body'))
return e
def test_response_400(self):
self._test_error_checker(exceptions.BadRequest, self.set_data("400"))
def test_response_401(self):
self._test_error_checker(exceptions.Unauthorized, self.set_data("401"))
def test_response_403(self):
self._test_error_checker(exceptions.Forbidden, self.set_data("403"))
def test_response_404(self):
self._test_error_checker(exceptions.NotFound, self.set_data("404"))
def test_response_409(self):
self._test_error_checker(exceptions.Conflict, self.set_data("409"))
def test_response_410(self):
self._test_error_checker(exceptions.Gone, self.set_data("410"))
def test_response_412(self):
self._test_error_checker(exceptions.PreconditionFailed,
self.set_data("412"))
def test_response_413(self):
self._test_error_checker(exceptions.OverLimit, self.set_data("413"))
def test_response_413_without_absolute_limit(self):
self._test_error_checker(exceptions.RateLimitExceeded,
self.set_data("413", absolute_limit=False))
def test_response_415(self):
self._test_error_checker(exceptions.InvalidContentType,
self.set_data("415"))
def test_response_422(self):
self._test_error_checker(exceptions.UnprocessableEntity,
self.set_data("422"))
def test_response_500_with_text(self):
# _parse_resp is expected to return 'str'
self._test_error_checker(exceptions.ServerFault, self.set_data("500"))
def test_response_501_with_text(self):
self._test_error_checker(exceptions.NotImplemented,
self.set_data("501"))
def test_response_400_with_dict(self):
r_body = '{"resp_body": {"err": "fake_resp_body"}}'
e = self._test_error_checker(exceptions.BadRequest,
self.set_data("400", r_body=r_body))
if self.c_type == 'application/json':
expected = {"err": "fake_resp_body"}
else:
expected = r_body
self.assertEqual(expected, e.resp_body)
def test_response_401_with_dict(self):
r_body = '{"resp_body": {"err": "fake_resp_body"}}'
e = self._test_error_checker(exceptions.Unauthorized,
self.set_data("401", r_body=r_body))
if self.c_type == 'application/json':
expected = {"err": "fake_resp_body"}
else:
expected = r_body
self.assertEqual(expected, e.resp_body)
def test_response_403_with_dict(self):
r_body = '{"resp_body": {"err": "fake_resp_body"}}'
e = self._test_error_checker(exceptions.Forbidden,
self.set_data("403", r_body=r_body))
if self.c_type == 'application/json':
expected = {"err": "fake_resp_body"}
else:
expected = r_body
self.assertEqual(expected, e.resp_body)
def test_response_404_with_dict(self):
r_body = '{"resp_body": {"err": "fake_resp_body"}}'
e = self._test_error_checker(exceptions.NotFound,
self.set_data("404", r_body=r_body))
if self.c_type == 'application/json':
expected = {"err": "fake_resp_body"}
else:
expected = r_body
self.assertEqual(expected, e.resp_body)
def test_response_404_with_invalid_dict(self):
r_body = '{"foo": "bar"]'
e = self._test_error_checker(exceptions.NotFound,
self.set_data("404", r_body=r_body))
expected = r_body
self.assertEqual(expected, e.resp_body)
def test_response_410_with_dict(self):
r_body = '{"resp_body": {"err": "fake_resp_body"}}'
e = self._test_error_checker(exceptions.Gone,
self.set_data("410", r_body=r_body))
if self.c_type == 'application/json':
expected = {"err": "fake_resp_body"}
else:
expected = r_body
self.assertEqual(expected, e.resp_body)
def test_response_410_with_invalid_dict(self):
r_body = '{"foo": "bar"]'
e = self._test_error_checker(exceptions.Gone,
self.set_data("410", r_body=r_body))
expected = r_body
self.assertEqual(expected, e.resp_body)
def test_response_409_with_dict(self):
r_body = '{"resp_body": {"err": "fake_resp_body"}}'
e = self._test_error_checker(exceptions.Conflict,
self.set_data("409", r_body=r_body))
if self.c_type == 'application/json':
expected = {"err": "fake_resp_body"}
else:
expected = r_body
self.assertEqual(expected, e.resp_body)
def test_response_500_with_dict(self):
r_body = '{"resp_body": {"err": "fake_resp_body"}}'
e = self._test_error_checker(exceptions.ServerFault,
self.set_data("500", r_body=r_body))
if self.c_type == 'application/json':
expected = {"err": "fake_resp_body"}
else:
expected = r_body
self.assertEqual(expected, e.resp_body)
def test_response_501_with_dict(self):
r_body = '{"resp_body": {"err": "fake_resp_body"}}'
self._test_error_checker(exceptions.NotImplemented,
self.set_data("501", r_body=r_body))
def test_response_bigger_than_400(self):
# Any response code, that bigger than 400, and not in
# (401, 403, 404, 409, 412, 413, 422, 500, 501)
self._test_error_checker(exceptions.UnexpectedResponseCode,
self.set_data("402"))
class TestRestClientErrorCheckerTEXT(TestRestClientErrorCheckerJSON):
c_type = "text/plain"
def test_fake_content_type(self):
# This test is required only in one exemplar
# Any response code, that bigger than 400, and not in
# (401, 403, 404, 409, 413, 422, 500, 501)
self._test_error_checker(exceptions.UnexpectedContentType,
self.set_data("405", enc="fake_enc"))
def test_response_413_without_absolute_limit(self):
# Skip this test because rest_client cannot get overLimit message
# from text body.
pass
class TestRestClientUtils(BaseRestClientTestClass):
def _is_resource_deleted(self, resource_id):
if not isinstance(self.retry_pass, int):
return False
if self.retry_count >= self.retry_pass:
return True
self.retry_count = self.retry_count + 1
return False
def setUp(self):
self.fake_http = fake_http.fake_httplib2()
super(TestRestClientUtils, self).setUp()
self.retry_count = 0
self.retry_pass = None
self.original_deleted_method = self.rest_client.is_resource_deleted
self.rest_client.is_resource_deleted = self._is_resource_deleted
def test_wait_for_resource_deletion(self):
self.retry_pass = 2
# Ensure timeout long enough for loop execution to hit retry count
self.rest_client.build_timeout = 500
sleep_mock = self.patch('time.sleep')
self.rest_client.wait_for_resource_deletion('1234')
self.assertEqual(len(sleep_mock.mock_calls), 2)
def test_wait_for_resource_deletion_not_deleted(self):
self.patch('time.sleep')
# Set timeout to be very quick to force exception faster
timeout = 1
self.rest_client.build_timeout = timeout
time_mock = self.patch('time.time')
time_mock.side_effect = utils.generate_timeout_series(timeout)
self.assertRaises(exceptions.TimeoutException,
self.rest_client.wait_for_resource_deletion,
'1234')
# time.time() should be called twice, first to start the timer
# and then to compute the timedelta
self.assertEqual(2, time_mock.call_count)
def test_wait_for_deletion_with_unimplemented_deleted_method(self):
self.rest_client.is_resource_deleted = self.original_deleted_method
self.assertRaises(NotImplementedError,
self.rest_client.wait_for_resource_deletion,
'1234')
def test_get_versions(self):
self.rest_client._parse_resp = lambda x: [{'id': 'v1'}, {'id': 'v2'}]
actual_resp, actual_versions = self.rest_client.get_versions()
self.assertEqual(['v1', 'v2'], list(actual_versions))
def test__str__(self):
def get_token():
return "deadbeef"
self.fake_auth_provider.get_token = get_token
self.assertIsNotNone(str(self.rest_client))
class TestRateLimiting(BaseRestClientTestClass):
def setUp(self):
self.fake_http = fake_http.fake_httplib2()
super(TestRateLimiting, self).setUp()
def test__get_retry_after_delay_with_integer(self):
resp = {'retry-after': '123'}
self.assertEqual(123, self.rest_client._get_retry_after_delay(resp))
def test__get_retry_after_delay_with_http_date(self):
resp = {
'date': 'Mon, 4 Apr 2016 21:56:23 GMT',
'retry-after': 'Mon, 4 Apr 2016 21:58:26 GMT',
}
self.assertEqual(123, self.rest_client._get_retry_after_delay(resp))
def test__get_retry_after_delay_of_zero_with_integer(self):
resp = {'retry-after': '0'}
self.assertEqual(1, self.rest_client._get_retry_after_delay(resp))
def test__get_retry_after_delay_of_zero_with_http_date(self):
resp = {
'date': 'Mon, 4 Apr 2016 21:56:23 GMT',
'retry-after': 'Mon, 4 Apr 2016 21:56:23 GMT',
}
self.assertEqual(1, self.rest_client._get_retry_after_delay(resp))
def test__get_retry_after_delay_with_missing_date_header(self):
resp = {
'retry-after': 'Mon, 4 Apr 2016 21:58:26 GMT',
}
self.assertRaises(ValueError, self.rest_client._get_retry_after_delay,
resp)
def test__get_retry_after_delay_with_invalid_http_date(self):
resp = {
'retry-after': 'Mon, 4 AAA 2016 21:58:26 GMT',
'date': 'Mon, 4 Apr 2016 21:56:23 GMT',
}
self.assertRaises(ValueError, self.rest_client._get_retry_after_delay,
resp)
def test__get_retry_after_delay_with_missing_retry_after_header(self):
self.assertRaises(ValueError, self.rest_client._get_retry_after_delay,
{})
def test_is_absolute_limit_gives_false_with_retry_after(self):
resp = {'retry-after': 123}
# is_absolute_limit() requires the overLimit body to be unwrapped
resp_body = self.rest_client._parse_resp("""{
"overLimit": {
"message": ""
}
}""")
self.assertFalse(self.rest_client.is_absolute_limit(resp, resp_body))
class TestProperties(BaseRestClientTestClass):
def setUp(self):
self.fake_http = fake_http.fake_httplib2()
super(TestProperties, self).setUp()
creds_dict = {
'username': 'test-user',
'user_id': 'test-user_id',
'tenant_name': 'test-tenant_name',
'tenant_id': 'test-tenant_id',
'password': 'test-password'
}
self.rest_client = rest_client.RestClient(
fake_auth_provider.FakeAuthProvider(creds_dict=creds_dict),
None, None)
def test_properties(self):
self.assertEqual('test-user', self.rest_client.user)
self.assertEqual('test-user_id', self.rest_client.user_id)
self.assertEqual('test-tenant_name', self.rest_client.tenant_name)
self.assertEqual('test-tenant_id', self.rest_client.tenant_id)
self.assertEqual('test-password', self.rest_client.password)
self.rest_client.api_version = 'v1'
expected = {'api_version': 'v1',
'endpoint_type': 'publicURL',
'region': None,
'name': None,
'service': None,
'skip_path': True}
self.rest_client.skip_path()
self.assertEqual(expected, self.rest_client.filters)
self.rest_client.reset_path()
self.rest_client.api_version = 'v1'
expected = {'api_version': 'v1',
'endpoint_type': 'publicURL',
'region': None,
'name': None,
'service': None}
self.assertEqual(expected, self.rest_client.filters)
class TestExpectedSuccess(BaseRestClientTestClass):
def setUp(self):
self.fake_http = fake_http.fake_httplib2()
super(TestExpectedSuccess, self).setUp()
def test_expected_succes_int_match(self):
expected_code = 202
read_code = 202
resp = self.rest_client.expected_success(expected_code, read_code)
# Assert None resp on success
self.assertFalse(resp)
def test_expected_succes_int_no_match(self):
expected_code = 204
read_code = 202
self.assertRaises(exceptions.InvalidHttpSuccessCode,
self.rest_client.expected_success,
expected_code, read_code)
def test_expected_succes_list_match(self):
expected_code = [202, 204]
read_code = 202
resp = self.rest_client.expected_success(expected_code, read_code)
# Assert None resp on success
self.assertFalse(resp)
def test_expected_succes_list_no_match(self):
expected_code = [202, 204]
read_code = 200
self.assertRaises(exceptions.InvalidHttpSuccessCode,
self.rest_client.expected_success,
expected_code, read_code)
def test_non_success_expected_int(self):
expected_code = 404
read_code = 202
self.assertRaises(AssertionError, self.rest_client.expected_success,
expected_code, read_code)
def test_non_success_expected_list(self):
expected_code = [404, 202]
read_code = 202
self.assertRaises(AssertionError, self.rest_client.expected_success,
expected_code, read_code)
def test_non_success_read_code_as_string(self):
expected_code = 202
read_code = '202'
self.assertRaises(TypeError, self.rest_client.expected_success,
expected_code, read_code)
def test_non_success_read_code_as_list(self):
expected_code = 202
read_code = [202]
self.assertRaises(TypeError, self.rest_client.expected_success,
expected_code, read_code)
def test_non_success_expected_code_as_non_int(self):
expected_code = ['201', 202]
read_code = 202
self.assertRaises(AssertionError, self.rest_client.expected_success,
expected_code, read_code)
class TestResponseBody(base.TestCase):
def test_str(self):
response = {'status': 200}
body = {'key1': 'value1'}
actual = rest_client.ResponseBody(response, body)
self.assertEqual("response: %s\nBody: %s" % (response, body),
str(actual))
class TestResponseBodyData(base.TestCase):
def test_str(self):
response = {'status': 200}
data = 'data1'
actual = rest_client.ResponseBodyData(response, data)
self.assertEqual("response: %s\nBody: %s" % (response, data),
str(actual))
class TestResponseBodyList(base.TestCase):
def test_str(self):
response = {'status': 200}
body = ['value1', 'value2', 'value3']
actual = rest_client.ResponseBodyList(response, body)
self.assertEqual("response: %s\nBody: %s" % (response, body),
str(actual))
class TestJSONSchemaValidationBase(base.TestCase):
class Response(dict):
def __getattr__(self, attr):
return self[attr]
def __setattr__(self, attr, value):
self[attr] = value
def setUp(self):
super(TestJSONSchemaValidationBase, self).setUp()
self.fake_auth_provider = fake_auth_provider.FakeAuthProvider()
self.rest_client = rest_client.RestClient(
self.fake_auth_provider, None, None)
def _test_validate_pass(self, schema, resp_body, status=200):
resp = self.Response()
resp.status = status
self.rest_client.validate_response(schema, resp, resp_body)
def _test_validate_fail(self, schema, resp_body, status=200,
error_msg="HTTP response body is invalid"):
resp = self.Response()
resp.status = status
ex = self.assertRaises(exceptions.InvalidHTTPResponseBody,
self.rest_client.validate_response,
schema, resp, resp_body)
self.assertIn(error_msg, ex._error_string)
class TestRestClientJSONSchemaValidation(TestJSONSchemaValidationBase):
schema = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'foo': {
'type': 'integer',
},
},
'required': ['foo']
}
}
def test_validate_pass_with_http_success_code(self):
body = {'foo': 12}
self._test_validate_pass(self.schema, body, status=200)
def test_validate_pass_with_http_redirect_code(self):
body = {'foo': 12}
schema = copy.deepcopy(self.schema)
schema['status_code'] = 300
self._test_validate_pass(schema, body, status=300)
def test_validate_not_http_success_code(self):
schema = {
'status_code': [200]
}
body = {}
self._test_validate_pass(schema, body, status=400)
def test_validate_multiple_allowed_type(self):
schema = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'foo': {
'type': ['integer', 'string'],
},
},
'required': ['foo']
}
}
body = {'foo': 12}
self._test_validate_pass(schema, body)
body = {'foo': '12'}
self._test_validate_pass(schema, body)
def test_validate_enable_additional_property_pass(self):
schema = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'foo': {'type': 'integer'}
},
'additionalProperties': True,
'required': ['foo']
}
}
body = {'foo': 12, 'foo2': 'foo2value'}
self._test_validate_pass(schema, body)
def test_validate_disable_additional_property_pass(self):
schema = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'foo': {'type': 'integer'}
},
'additionalProperties': False,
'required': ['foo']
}
}
body = {'foo': 12}
self._test_validate_pass(schema, body)
def test_validate_disable_additional_property_fail(self):
schema = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'foo': {'type': 'integer'}
},
'additionalProperties': False,
'required': ['foo']
}
}
body = {'foo': 12, 'foo2': 'foo2value'}
self._test_validate_fail(schema, body)
def test_validate_wrong_status_code(self):
schema = {
'status_code': [202]
}
body = {}
resp = self.Response()
resp.status = 200
ex = self.assertRaises(exceptions.InvalidHttpSuccessCode,
self.rest_client.validate_response,
schema, resp, body)
self.assertIn("Unexpected http success status code", ex._error_string)
def test_validate_wrong_attribute_type(self):
body = {'foo': 1.2}
self._test_validate_fail(self.schema, body)
def test_validate_unexpected_response_body(self):
schema = {
'status_code': [200],
}
body = {'foo': 12}
self._test_validate_fail(
schema, body,
error_msg="HTTP response body should not exist")
def test_validate_missing_response_body(self):
body = {}
self._test_validate_fail(self.schema, body)
def test_validate_missing_required_attribute(self):
body = {'notfoo': 12}
self._test_validate_fail(self.schema, body)
def test_validate_response_body_not_list(self):
schema = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'list_items': {
'type': 'array',
'items': {'foo': {'type': 'integer'}}
}
},
'required': ['list_items'],
}
}
body = {'foo': 12}
self._test_validate_fail(schema, body)
def test_validate_response_body_list_pass(self):
schema = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'list_items': {
'type': 'array',
'items': {'foo': {'type': 'integer'}}
}
},
'required': ['list_items'],
}
}
body = {'list_items': [{'foo': 12}, {'foo': 10}]}
self._test_validate_pass(schema, body)
class TestRestClientJSONHeaderSchemaValidation(TestJSONSchemaValidationBase):
schema = {
'status_code': [200],
'response_header': {
'type': 'object',
'properties': {
'foo': {'type': 'integer'}
},
'required': ['foo']
}
}
def test_validate_header_schema_pass(self):
resp_body = {}
resp = self.Response()
resp.status = 200
resp.foo = 12
self.rest_client.validate_response(self.schema, resp, resp_body)
def test_validate_header_schema_fail(self):
resp_body = {}
resp = self.Response()
resp.status = 200
resp.foo = 1.2
ex = self.assertRaises(exceptions.InvalidHTTPResponseHeader,
self.rest_client.validate_response,
self.schema, resp, resp_body)
self.assertIn("HTTP response header is invalid", ex._error_string)
class TestRestClientJSONSchemaFormatValidation(TestJSONSchemaValidationBase):
schema = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'email'
}
},
'required': ['foo']
}
}
def test_validate_format_pass(self):
body = {'foo': '[email protected]'}
self._test_validate_pass(self.schema, body)
def test_validate_format_fail(self):
body = {'foo': 'wrong_email'}
self._test_validate_fail(self.schema, body)
def test_validate_formats_in_oneOf_pass(self):
schema = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'oneOf': [
{'format': 'ipv4'},
{'format': 'ipv6'}
]
}
},
'required': ['foo']
}
}
body = {'foo': '10.0.0.0'}
self._test_validate_pass(schema, body)
body = {'foo': 'FE80:0000:0000:0000:0202:B3FF:FE1E:8329'}
self._test_validate_pass(schema, body)
def test_validate_formats_in_oneOf_fail_both_match(self):
schema = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'oneOf': [
{'format': 'ipv4'},
{'format': 'ipv4'}
]
}
},
'required': ['foo']
}
}
body = {'foo': '10.0.0.0'}
self._test_validate_fail(schema, body)
def test_validate_formats_in_oneOf_fail_no_match(self):
schema = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'oneOf': [
{'format': 'ipv4'},
{'format': 'ipv6'}
]
}
},
'required': ['foo']
}
}
body = {'foo': 'wrong_ip_format'}
self._test_validate_fail(schema, body)
def test_validate_formats_in_anyOf_pass(self):
schema = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'anyOf': [
{'format': 'ipv4'},
{'format': 'ipv6'}
]
}
},
'required': ['foo']
}
}
body = {'foo': '10.0.0.0'}
self._test_validate_pass(schema, body)
body = {'foo': 'FE80:0000:0000:0000:0202:B3FF:FE1E:8329'}
self._test_validate_pass(schema, body)
def test_validate_formats_in_anyOf_pass_both_match(self):
schema = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'anyOf': [
{'format': 'ipv4'},
{'format': 'ipv4'}
]
}
},
'required': ['foo']
}
}
body = {'foo': '10.0.0.0'}
self._test_validate_pass(schema, body)
def test_validate_formats_in_anyOf_fail_no_match(self):
schema = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'anyOf': [
{'format': 'ipv4'},
{'format': 'ipv6'}
]
}
},
'required': ['foo']
}
}
body = {'foo': 'wrong_ip_format'}
self._test_validate_fail(schema, body)
def test_validate_formats_pass_for_unknow_format(self):
schema = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'foo': {
'type': 'string',
'format': 'UNKNOWN'
}
},
'required': ['foo']
}
}
body = {'foo': '[email protected]'}
self._test_validate_pass(schema, body)
class TestRestClientJSONSchemaValidatorVersion(TestJSONSchemaValidationBase):
schema = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'foo': {'type': 'string'}
}
}
}
def test_current_json_schema_validator_version(self):
with fixtures.MockPatchObject(jsonschema.Draft4Validator,
"check_schema") as chk_schema:
body = {'foo': 'test'}
self._test_validate_pass(self.schema, body)
chk_schema.mock.assert_called_once_with(
self.schema['response_body'])
|
|
"""
homeassistant.components.media_player.kodi
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides an interface to the XBMC/Kodi JSON-RPC API
Configuration:
To use the Kodi you will need to add something like the following to
your configuration.yaml file.
media_player:
platform: kodi
name: Kodi
url: http://192.168.0.123/jsonrpc
user: kodi
password: my_secure_password
Variables:
name
*Optional
The name of the device.
url
*Required
The URL of the XBMC/Kodi JSON-RPC API. Example: http://192.168.0.123/jsonrpc
user
*Optional
The XBMC/Kodi HTTP username.
password
*Optional
The XBMC/Kodi HTTP password.
"""
import urllib
import logging
from homeassistant.components.media_player import (
MediaPlayerDevice, SUPPORT_PAUSE, SUPPORT_SEEK, SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_MUTE, SUPPORT_PREVIOUS_TRACK, SUPPORT_NEXT_TRACK)
from homeassistant.const import (
STATE_IDLE, STATE_PLAYING, STATE_PAUSED, STATE_OFF)
try:
import jsonrpc_requests
except ImportError:
jsonrpc_requests = None
_LOGGER = logging.getLogger(__name__)
REQUIREMENTS = ['jsonrpc-requests==0.1']
SUPPORT_KODI = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_SEEK
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the kodi platform. """
global jsonrpc_requests # pylint: disable=invalid-name
if jsonrpc_requests is None:
import jsonrpc_requests as jsonrpc_requests_
jsonrpc_requests = jsonrpc_requests_
add_devices([
KodiDevice(
config.get('name', 'Kodi'),
config.get('url'),
auth=(
config.get('user', ''),
config.get('password', ''))),
])
def _get_image_url(kodi_url):
""" Helper function that parses the thumbnail URLs used by Kodi. """
url_components = urllib.parse.urlparse(kodi_url)
if url_components.scheme == 'image':
return urllib.parse.unquote(url_components.netloc)
class KodiDevice(MediaPlayerDevice):
""" Represents a XBMC/Kodi device. """
# pylint: disable=too-many-public-methods
def __init__(self, name, url, auth=None):
self._name = name
self._url = url
self._server = jsonrpc_requests.Server(url, auth=auth)
self._players = None
self._properties = None
self._item = None
self._app_properties = None
self.update()
@property
def name(self):
""" Returns the name of the device. """
return self._name
def _get_players(self):
""" Returns the active player objects or None """
try:
return self._server.Player.GetActivePlayers()
except jsonrpc_requests.jsonrpc.TransportError:
_LOGGER.exception('Unable to fetch kodi data')
return None
@property
def state(self):
""" Returns the state of the device. """
if self._players is None:
return STATE_OFF
if len(self._players) == 0:
return STATE_IDLE
if self._properties['speed'] == 0:
return STATE_PAUSED
else:
return STATE_PLAYING
def update(self):
""" Retrieve latest state. """
self._players = self._get_players()
if self._players is not None and len(self._players) > 0:
player_id = self._players[0]['playerid']
assert isinstance(player_id, int)
self._properties = self._server.Player.GetProperties(
player_id,
['time', 'totaltime', 'speed']
)
self._item = self._server.Player.GetItem(
player_id,
['title', 'file', 'uniqueid', 'thumbnail', 'artist']
)['item']
self._app_properties = self._server.Application.GetProperties(
['volume', 'muted']
)
else:
self._properties = None
self._item = None
self._app_properties = None
@property
def volume_level(self):
""" Volume level of the media player (0..1). """
if self._app_properties is not None:
return self._app_properties['volume'] / 100.0
@property
def is_volume_muted(self):
""" Boolean if volume is currently muted. """
if self._app_properties is not None:
return self._app_properties['muted']
@property
def media_content_id(self):
""" Content ID of current playing media. """
if self._item is not None:
return self._item['uniqueid']
@property
def media_content_type(self):
""" Content type of current playing media. """
if self._players is not None and len(self._players) > 0:
return self._players[0]['type']
@property
def media_duration(self):
""" Duration of current playing media in seconds. """
if self._properties is not None:
total_time = self._properties['totaltime']
return (
total_time['hours'] * 3600 +
total_time['minutes'] * 60 +
total_time['seconds'])
@property
def media_image_url(self):
""" Image url of current playing media. """
if self._item is not None:
return _get_image_url(self._item['thumbnail'])
@property
def media_title(self):
""" Title of current playing media. """
# find a string we can use as a title
if self._item is not None:
return self._item.get(
'title',
self._item.get(
'label',
self._item.get(
'file',
'unknown')))
@property
def supported_media_commands(self):
""" Flags of media commands that are supported. """
return SUPPORT_KODI
def turn_off(self):
""" turn_off media player. """
self._server.System.Shutdown()
self.update_ha_state()
def volume_up(self):
""" volume_up media player. """
assert self._server.Input.ExecuteAction('volumeup') == 'OK'
self.update_ha_state()
def volume_down(self):
""" volume_down media player. """
assert self._server.Input.ExecuteAction('volumedown') == 'OK'
self.update_ha_state()
def set_volume_level(self, volume):
""" set volume level, range 0..1. """
self._server.Application.SetVolume(int(volume * 100))
self.update_ha_state()
def mute_volume(self, mute):
""" mute (true) or unmute (false) media player. """
self._server.Application.SetMute(mute)
self.update_ha_state()
def _set_play_state(self, state):
""" Helper method for play/pause/toggle. """
players = self._get_players()
if len(players) != 0:
self._server.Player.PlayPause(players[0]['playerid'], state)
self.update_ha_state()
def media_play_pause(self):
""" media_play_pause media player. """
self._set_play_state('toggle')
def media_play(self):
""" media_play media player. """
self._set_play_state(True)
def media_pause(self):
""" media_pause media player. """
self._set_play_state(False)
def _goto(self, direction):
""" Helper method used for previous/next track. """
players = self._get_players()
if len(players) != 0:
self._server.Player.GoTo(players[0]['playerid'], direction)
self.update_ha_state()
def media_next_track(self):
""" Send next track command. """
self._goto('next')
def media_previous_track(self):
""" Send next track command. """
# first seek to position 0, Kodi seems to go to the beginning
# of the current track current track is not at the beginning
self.media_seek(0)
self._goto('previous')
def media_seek(self, position):
""" Send seek command. """
players = self._get_players()
time = {}
time['milliseconds'] = int((position % 1) * 1000)
position = int(position)
time['seconds'] = int(position % 60)
position /= 60
time['minutes'] = int(position % 60)
position /= 60
time['hours'] = int(position)
if len(players) != 0:
self._server.Player.Seek(players[0]['playerid'], time)
self.update_ha_state()
def turn_on(self):
""" turn the media player on. """
raise NotImplementedError()
def play_youtube(self, media_id):
""" Plays a YouTube media. """
raise NotImplementedError()
|
|
#!/usr/bin/python
import unittest
import tempfile
import os
import unittest_base
import sys
import shutil
import xenapi_mock
from config import CONFIG
from autocertkit import utils
from datetime import datetime
K = 1024
M = K * 1024
G = M * 1024
class ExpressionMatchingTests(unittest_base.DevTestCase):
"""Tests for checking that the expr_eval function
works appropriately with XenServer version numbers"""
def _exp_true(self, expr, val):
res = utils.eval_expr(expr, val)
self.assertEqual(res, True, "Expected %s %s to be True!" % (val, expr))
def _exp_false(self, expr, val):
res = utils.eval_expr(expr, val)
self.assertEqual(
res, False, "Expected %s %s to be False!" % (val, expr))
def testGreaterThanTrue(self):
self._exp_true('> 5.6', '6.0.0')
self._exp_true('> 5.6', '5.6 FP1')
self._exp_true('> 5.6 FP1', '5.6 SP2')
self._exp_true('> 5.6 SP2', '6.0.0')
def testGreaterThanFalse(self):
self._exp_false('> 6.0.0', '5.6 SP2')
self._exp_false('> 6.0.0', '5.6 FP1')
self._exp_false('> 6.0.0', '5.6')
self._exp_false('> 5.6 SP2', '5.6 FP1')
class IPv4AddrTests(unittest.TestCase):
def test_check_ip_format(self):
utils.IPv4Addr.check_ip_format('192.168.0.1')
self.assertRaises(
Exception, lambda: utils.IPv4Addr.check_ip_format('192.168.0.256'))
self.assertRaises(
Exception, lambda: utils.IPv4Addr.check_ip_format('192.168.1'))
self.assertRaises(
Exception, lambda: utils.IPv4Addr.check_ip_format('192.168.0.0.1'))
self.assertRaises(
Exception, lambda: utils.IPv4Addr.check_ip_format('192.168.0.01'))
def test_check_netwrok_mask(self):
utils.IPv4Addr.check_netwrok_mask('255.255.255.0')
utils.IPv4Addr.check_netwrok_mask('255.255.0.0')
utils.IPv4Addr.check_netwrok_mask('255.0.0.0')
utils.IPv4Addr.check_netwrok_mask('255.255.240.0')
self.assertRaises(
Exception, lambda: utils.IPv4Addr.check_netwrok_mask('255.255.255.255'))
self.assertRaises(
Exception, lambda: utils.IPv4Addr.check_netwrok_mask('0.0.0.0'))
def test_check_special_ip(self):
utils.IPv4Addr.check_special_ip('192.168.0.1', '255.255.255.0')
self.assertRaises(Exception, lambda: utils.IPv4Addr.check_special_ip(
'192.168.0.0', '255.255.255.0'))
self.assertRaises(Exception, lambda: utils.IPv4Addr.check_special_ip(
'192.168.0.255', '255.255.255.0'))
def test_split(self):
subnet, host = utils.IPv4Addr.split('192.168.0.1', '255.255.255.0')
self.assertEqual(subnet, (192 << 24) + (168 << 16) + (0 << 8))
self.assertEqual(host, 1)
def test_aton(self):
n_ip = utils.IPv4Addr.aton('192.168.0.1')
self.assertEqual(n_ip, (192 << 24) + (168 << 16) + (0 << 8) + 1)
self.assertRaises(
Exception, lambda: utils.IPv4Addr.aton('192.168.0.256'))
def test_ntoa(self):
ip = utils.IPv4Addr.ntoa((192 << 24) + (168 << 16) + (0 << 8) + 1)
self.assertEqual(ip, '192.168.0.1')
self.assertRaises(Exception, lambda: utils.IPv4Addr.ntoa(0x100000000))
def test_validate_netmask(self):
utils.IPv4Addr.validate_netmask('255.255.255.0')
def test_validate_ip(self):
utils.IPv4Addr.validate_ip('192.168.255.1', '255.255.255.0')
def test_in_same_subnet(self):
utils.IPv4Addr.in_same_subnet(
'192.168.255.1', '192.168.255.254', '255.255.255.0')
def test_validate(self):
ip = utils.IPv4Addr('192.168.0.10', '255.255.255.0', '192.168.0.1')
ip.validate()
ip = utils.IPv4Addr('192.16.254.10', '255.240.0.0', '192.16.0.1')
ip.validate()
def test_get_subnet_host(self):
ip = utils.IPv4Addr('192.168.0.2', '255.255.255.0', '192.168.0.1')
subnet, host = ip.get_subnet_host()
self.assertEqual(subnet, (192 << 24) + (168 << 16) + (0 << 8))
self.assertEqual(host, 2)
class StaticIPManagerTests(unittest.TestCase):
def setUp(self):
self.conf = {'ip_start': '192.168.0.2',
'ip_end': '192.168.0.5',
'netmask': '255.255.255.0',
'gw': '192.168.0.1'}
self.sm = utils.StaticIPManager(self.conf)
def tearDown(self):
self.sm.release_all()
def test_get_ip(self):
ip = self.sm.get_ip()
self.assertEqual(ip.addr, '192.168.0.2')
self.assertEqual(ip.netmask, '255.255.255.0')
self.assertEqual(ip.gateway, '192.168.0.1')
ip = self.sm.get_ip()
self.assertEqual(ip.addr, '192.168.0.3')
ip = self.sm.get_ip()
self.assertEqual(ip.addr, '192.168.0.4')
ip = self.sm.get_ip()
self.assertEqual(ip.addr, '192.168.0.5')
self.assertRaises(Exception, lambda: self.sm.get_ip())
self.sm.release_all()
def test_return_ip(self):
free1 = self.sm.available_ips()
ip = self.sm.get_ip()
free2 = self.sm.available_ips()
self.assertEqual(free1 - 1, free2)
self.sm.return_ip(ip)
free3 = self.sm.available_ips()
self.assertEqual(free1, free3)
self.assertRaises(Exception, lambda: self.sm.return_ip(ip))
self.sm.release_all()
class ValueInRangeFunctions(unittest.TestCase):
def test_simple(self):
# Assert True
self.assertTrue(utils.value_in_range(5 * G, 4 * G, 8 * G))
self.assertTrue(utils.value_in_range(3 * G, 0, 4 * G))
self.assertTrue(utils.value_in_range(4 * G, 0, 4 * G))
self.assertTrue(utils.value_in_range(3 * G, 3 * G, 4 * G))
# Assert False
self.assertFalse(utils.value_in_range(4, 5, 500))
self.assertFalse(utils.value_in_range(4 * G + 1, 0, 4 * G))
self.assertFalse(utils.value_in_range(-1, 0, 4 * G))
def test_wrap(self):
self.assertTrue(utils.wrapped_value_in_range(8, 5, 15, 10))
self.assertTrue(utils.wrapped_value_in_range(5 * K, 3 * G, 5 * G))
self.assertTrue(utils.wrapped_value_in_range(3 * G, 2 * G, 4 * G))
self.assertFalse(utils.wrapped_value_in_range(1 * G, 2 * G, 4 * G))
self.assertFalse(utils.wrapped_value_in_range(2 * G, 3 * G, 5 * G))
self.assertTrue(utils.wrapped_value_in_range(3965952210,
8248029658,
9067544228))
class ValidatePingResponses(unittest.TestCase):
def test_valid_ping_responses(self):
response = "20 packets transmitted, 19 received, 5% packet loss, time 19008ms"
self.assertTrue(utils.valid_ping_response(response, max_loss=20))
def test_invalid_ping_responses(self):
response = "20 packets transmitted, 19 received, 5% packet loss, time 19008ms"
self.assertFalse(utils.valid_ping_response(response, max_loss=0))
def test_valid_equal_ping_responses(self):
response = "20 packets transmitted, 19 received, 5% packet loss, time 19008ms"
self.assertTrue(utils.valid_ping_response(response, max_loss=5))
class RebootFlagTimestamps(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
if os.path.exists(self.tmpdir):
shutil.rmtree(self.tmpdir)
def test_set_flag(self):
flag = "%s/test_set_flag" % self.tmpdir
utils.set_reboot_flag(flag_loc=flag)
self.assertTrue(os.path.exists(flag))
def test_read_flag(self):
flag = "%s/test_read_flag" % self.tmpdir
ts = datetime.now()
utils.set_reboot_flag(flag_loc=flag)
fts = utils.get_reboot_flag_timestamp(flag)
fmt_str = "%Y-%m-%d %H:%M:%S"
self.assertEqual(fts.strftime(fmt_str), ts.strftime(fmt_str))
class HostLibMethodsTests(unittest.TestCase):
"""
Host related functions unit tests.
"""
def setUp(self):
self.session = xenapi_mock.Session()
self.__enable_all_hosts()
def __enable_all_hosts(self):
for host in self.session.hosts:
host.enabled = True
host.metrics.live = True
def test_wait_for_hosts(self):
utils.wait_for_hosts(self.session)
self.session.hosts[0].enabled = False
self.assertRaises(Exception,
lambda: utils.wait_for_hosts(self.session, timeout=1))
self.session.hosts[0].enabled = True
self.session.hosts[1].metrics.live = False
self.assertRaises(Exception,
lambda: utils.wait_for_hosts(self.session, timeout=1))
self.__enable_all_hosts()
class PoolLibMethodsTests(unittest.TestCase):
"""
Pool related functions unit tests.
"""
def setUp(self):
self.session = xenapi_mock.Session()
def test_get_pool_master(self):
self.assertTrue(utils.get_pool_master(self.session) ==
self.session.hosts[0].opaque)
def test_get_pool_slaves(self):
self.assertTrue(utils.get_pool_slaves(self.session) ==
[host.opaque for host in self.session.hosts[1:]])
class NetworkLibMethodsTests(unittest.TestCase):
"""
Host related functions unit tests.
"""
def setUp(self):
self.session = xenapi_mock.Session()
def test_device_linkstate(self):
utils.set_nic_device_status(self.session, 'eth0', 'down')
utils.set_nic_device_status(self.session, 'eth1', 'up')
self.assertRaises(Exception, lambda: utils.set_nic_device_status(
self.session, 'invalidEth', 'up'))
class SimpleMethodsTests(unittest.TestCase):
"""
Simple methods in utils module test
"""
def setUp(self):
self.session = xenapi_mock.Session()
def test_kis_64_bit(self):
self.assertTrue(utils.is_64_bit("x86_64"))
self.assertFalse(utils.is_64_bit("i386"))
self.assertFalse(utils.is_64_bit("i686"))
def test_logging_methods(self):
utils.init_ack_logging(self.session)
def test_get_xenserver_version(self):
self.session.hosts[0].xs_software_version = {
'product_version': '7.0.93'}
self.assertEqual(utils.get_xenserver_version(self.session), "7.0.93")
def test_get_xcp_version(self):
self.session.hosts[0].xs_software_version = {
'platform_version': '2.1.4'}
self.assertEqual(utils.get_xcp_version(self.session), "2.1.4")
def test_get_ack_version(self):
self.assertEqual(utils.get_ack_version(self.session), "1.2.3")
self.assertEqual(utils.get_ack_version(
self.session, self.session.hosts[1].opaque), "1.2.3")
self.session.hosts[1].setAckVersion(None)
self.assertEqual(utils.get_ack_version(
self.session, self.session.hosts[0].opaque), "1.2.3")
self.assertEqual(utils.get_ack_version(
self.session, self.session.hosts[1].opaque), None)
self.session.fail_plugin = True
self.assertEqual(utils.get_ack_version(self.session), None)
def test_get_system_info(self):
self.session.hosts[0].dmidecode = ""
self.assertDictEqual(utils.get_system_info(self.session), {})
self.session.hosts[0].dmidecode = CONFIG["host"]["dmidecode"][0]
self.assertDictEqual(utils.get_system_info(self.session), CONFIG[
"expected"]["get_system_info"][0])
if __name__ == '__main__':
unittest.main()
|
|
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import math
import os
import random
import numpy as np
import PIL.Image
import PIL.ImagePalette
from digits.utils import image, subclass, override, constants
from digits.utils.constants import COLOR_PALETTE_ATTRIBUTE
from ..interface import DataIngestionInterface
from .forms import DatasetForm
TEMPLATE = "template.html"
@subclass
class DataIngestion(DataIngestionInterface):
"""
A data ingestion extension for an image segmentation dataset
"""
def __init__(self, **kwargs):
"""
the parent __init__ method automatically populates this
instance with attributes from the form
"""
super(DataIngestion, self).__init__(**kwargs)
self.random_indices = None
if not 'seed' in self.userdata:
# choose random seed and add to userdata so it gets persisted
self.userdata['seed'] = random.randint(0, 1000)
random.seed(self.userdata['seed'])
if self.userdata['colormap_method'] == "label":
# open first image in label folder to retrieve palette
# all label images must use the same palette - this is enforced
# during dataset creation
filename = self.make_image_list(self.label_folder)[0]
image = self.load_label(filename)
self.userdata[COLOR_PALETTE_ATTRIBUTE] = image.getpalette()
else:
# read colormap from file
with open(self.colormap_text_file) as f:
palette = []
lines = f.read().splitlines()
for line in lines:
for val in line.split():
palette.append(int(val))
# fill rest with zeros
palette = palette + [0] * (256*3 - len(palette))
self.userdata[COLOR_PALETTE_ATTRIBUTE] = palette
self.palette_img = PIL.Image.new("P", (1,1))
self.palette_img.putpalette(palette)
# get labels if those were provided
if self.class_labels_file:
with open(self.class_labels_file) as f:
self.userdata['class_labels'] = f.read().splitlines()
@override
def encode_entry(self, entry):
"""
Return numpy.ndarray
"""
feature_image_file = entry[0]
label_image_file = entry[1]
# feature image
feature_image = self.encode_PIL_Image(
image.load_image(feature_image_file),
self.channel_conversion)
# label image
label_image = self.load_label(label_image_file)
if label_image.getpalette() != self.userdata[COLOR_PALETTE_ATTRIBUTE]:
raise ValueError("All label images must use the same palette")
label_image = self.encode_PIL_Image(label_image)
return feature_image, label_image
def encode_PIL_Image(self, image, channel_conversion='none'):
if channel_conversion != 'none':
if image.mode != channel_conversion:
# convert to different image mode if necessary
image = image.convert(channel_conversion)
# convert to numpy array
image = np.array(image)
# add channel axis if input is grayscale image
if image.ndim == 2:
image = image[..., np.newaxis]
elif image.ndim != 3:
raise ValueError("Unhandled number of channels: %d" % image.ndim)
# transpose to CHW
image = image.transpose(2, 0, 1)
return image
@staticmethod
@override
def get_category():
return "Images"
@staticmethod
@override
def get_id():
return "image-segmentation"
@staticmethod
@override
def get_dataset_form():
return DatasetForm()
@staticmethod
@override
def get_dataset_template(form):
"""
parameters:
- form: form returned by get_dataset_form(). This may be populated
with values if the job was cloned
returns:
- (template, context) tuple
- template is a Jinja template to use for rendering dataset creation
options
- context is a dictionary of context variables to use for rendering
the form
"""
extension_dir = os.path.dirname(os.path.abspath(__file__))
template = open(os.path.join(extension_dir, TEMPLATE), "r").read()
context = {'form': form}
return (template, context)
@staticmethod
@override
def get_title():
return "Segmentation"
@override
def itemize_entries(self, stage):
if stage == constants.TEST_DB:
# don't retun anything for the test stage
return []
if stage == constants.TRAIN_DB or (not self.has_val_folder):
feature_image_list = self.make_image_list(self.feature_folder)
label_image_list = self.make_image_list(self.label_folder)
else:
# separate validation images
feature_image_list = self.make_image_list(self.validation_feature_folder)
label_image_list = self.make_image_list(self.validation_label_folder)
# make sure filenames match
if len(feature_image_list) != len(label_image_list):
raise ValueError(
"Expect same number of images in feature and label folders (%d!=%d)"
% (len(feature_image_list), len(label_image_list)))
for idx in range(len(feature_image_list)):
feature_name = os.path.splitext(
os.path.split(feature_image_list[idx])[1])[0]
label_name = os.path.splitext(
os.path.split(label_image_list[idx])[1])[0]
if feature_name != label_name:
raise ValueError("No corresponding feature/label pair found for (%s,%s)"
% (feature_name, label_name) )
# split lists if there is no val folder
if not self.has_val_folder:
feature_image_list = self.split_image_list(feature_image_list, stage)
label_image_list = self.split_image_list(label_image_list, stage)
return zip(
feature_image_list,
label_image_list)
def load_label(self, filename):
"""
Load a label image
"""
image = PIL.Image.open(filename)
if self.userdata['colormap_method'] == "label":
if image.mode not in ['P', 'L', '1']:
raise ValueError("Labels are expected to be single-channel (paletted or "
" grayscale) images - %s mode is '%s'"
% (filename, image.mode))
else:
if image.mode not in ['RGB']:
raise ValueError("Labels are expected to be RGB images - %s mode is '%s'"
% (filename, image.mode))
image = image.quantize(palette=self.palette_img)
return image
def make_image_list(self, folder):
image_files = []
for dirpath, dirnames, filenames in os.walk(folder, followlinks=True):
for filename in filenames:
if filename.lower().endswith(image.SUPPORTED_EXTENSIONS):
image_files.append('%s' % os.path.join(folder, filename))
if len(image_files) == 0:
raise ValueError("Unable to find supported images in %s" % folder)
return sorted(image_files)
def split_image_list(self, filelist, stage):
if self.random_indices is None:
self.random_indices = range(len(filelist))
random.shuffle(self.random_indices)
elif len(filelist) != len(self.random_indices):
raise ValueError(
"Expect same number of images in folders (%d!=%d)"
% (len(filelist), len(self.random_indices)))
filelist = [filelist[idx] for idx in self.random_indices]
pct_val = int(self.folder_pct_val)
n_val_entries = int(math.floor(len(filelist) * pct_val / 100))
if stage == constants.VAL_DB:
return filelist[:n_val_entries]
elif stage == constants.TRAIN_DB:
return filelist[n_val_entries:]
else:
raise ValueError("Unknown stage: %s" % stage)
|
|
# Copyright (C) 2015 YouCompleteMe contributors
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
from ycm.test_utils import MockVimModule, ExtendedMock
MockVimModule()
import contextlib
import os
from ycm.tests.server_test import Server_test
from ycmd.responses import ( BuildDiagnosticData, Diagnostic, Location, Range,
UnknownExtraConf, ServerError )
from mock import call, MagicMock, patch
from nose.tools import eq_, ok_
def PresentDialog_Confirm_Call( message ):
"""Return a mock.call object for a call to vimsupport.PresentDialog, as called
why vimsupport.Confirm with the supplied confirmation message"""
return call( message, [ 'Ok', 'Cancel' ] )
def PlaceSign_Call( sign_id, line_num, buffer_num, is_error ):
sign_name = 'YcmError' if is_error else 'YcmWarning'
return call( 'sign place {0} line={1} name={2} buffer={3}'
.format( sign_id, line_num, sign_name, buffer_num ) )
def UnplaceSign_Call( sign_id, buffer_num ):
return call( 'try | exec "sign unplace {0} buffer={1}" |'
' catch /E158/ | endtry'.format( sign_id, buffer_num ) )
@contextlib.contextmanager
def MockArbitraryBuffer( filetype, native_available = True ):
"""Used via the with statement, set up mocked versions of the vim module such
that a single buffer is open with an arbitrary name and arbirary contents. Its
filetype is set to the supplied filetype"""
with patch( 'vim.current' ) as vim_current:
def VimEval( value ):
"""Local mock of the vim.eval() function, used to ensure we get the
correct behvaiour"""
if value == '&omnifunc':
# The omnicompleter is not required here
return ''
if value == 'getbufvar(0, "&mod")':
# Ensure that we actually send the even to the server
return 1
if value == 'getbufvar(0, "&ft")' or value == '&filetype':
return filetype
if value.startswith( 'bufnr(' ):
return 0
if value.startswith( 'bufwinnr(' ):
return 0
raise ValueError( 'Unexpected evaluation' )
# Arbitrary, but valid, cursor position
vim_current.window.cursor = ( 1, 2 )
# Arbitrary, but valid, single buffer open
current_buffer = MagicMock()
current_buffer.number = 0
current_buffer.filename = os.path.realpath( 'TEST_BUFFER' )
current_buffer.name = 'TEST_BUFFER'
current_buffer.window = 0
# The rest just mock up the Vim module so that our single arbitrary buffer
# makes sense to vimsupport module.
with patch( 'vim.buffers', [ current_buffer ] ):
with patch( 'vim.current.buffer', current_buffer ):
with patch( 'vim.eval', side_effect=VimEval ):
yield
@contextlib.contextmanager
def MockEventNotification( response_method, native_filetype_completer = True ):
"""Mock out the EventNotification client request object, replacing the
Response handler's JsonFromFuture with the supplied |response_method|.
Additionally mock out YouCompleteMe's FiletypeCompleterExistsForFiletype
method to return the supplied |native_filetype_completer| parameter, rather
than querying the server"""
# We don't want the event to actually be sent to the server, just have it
# return success
with patch( 'ycm.client.base_request.BaseRequest.PostDataToHandlerAsync',
return_value = MagicMock( return_value=True ) ):
# We set up a fake a Response (as called by EventNotification.Response)
# which calls the supplied callback method. Generally this callback just
# raises an apropriate exception, otherwise it would have to return a mock
# future object.
#
# Note: JsonFromFuture is actually part of ycm.client.base_request, but we
# must patch where an object is looked up, not where it is defined.
# See https://docs.python.org/dev/library/unittest.mock.html#where-to-patch
# for details.
with patch( 'ycm.client.event_notification.JsonFromFuture',
side_effect = response_method ):
# Filetype available information comes from the server, so rather than
# relying on that request, we mock out the check. The caller decides if
# filetype completion is available
with patch(
'ycm.youcompleteme.YouCompleteMe.FiletypeCompleterExistsForFiletype',
return_value = native_filetype_completer ):
yield
class EventNotification_test( Server_test ):
@patch( 'ycm.vimsupport.PostVimMessage', new_callable = ExtendedMock )
def FileReadyToParse_NonDiagnostic_Error_test( self, post_vim_message ):
# This test validates the behaviour of YouCompleteMe.HandleFileParseRequest
# in combination with YouCompleteMe.OnFileReadyToParse when the completer
# raises an exception handling FileReadyToParse event notification
ERROR_TEXT = 'Some completer response text'
def ErrorResponse( *args ):
raise ServerError( ERROR_TEXT )
with MockArbitraryBuffer( 'javascript' ):
with MockEventNotification( ErrorResponse ):
self._server_state.OnFileReadyToParse()
assert self._server_state.FileParseRequestReady()
self._server_state.HandleFileParseRequest()
# The first call raises a warning
post_vim_message.assert_has_exact_calls( [
call( ERROR_TEXT, truncate = False )
] )
# Subsequent calls don't re-raise the warning
self._server_state.HandleFileParseRequest()
post_vim_message.assert_has_exact_calls( [
call( ERROR_TEXT, truncate = False )
] )
# But it does if a subsequent event raises again
self._server_state.OnFileReadyToParse()
assert self._server_state.FileParseRequestReady()
self._server_state.HandleFileParseRequest()
post_vim_message.assert_has_exact_calls( [
call( ERROR_TEXT, truncate = False ),
call( ERROR_TEXT, truncate = False )
] )
@patch( 'vim.command' )
def FileReadyToParse_NonDiagnostic_Error_NonNative_test( self, vim_command ):
with MockArbitraryBuffer( 'javascript' ):
with MockEventNotification( None, False ):
self._server_state.OnFileReadyToParse()
self._server_state.HandleFileParseRequest()
vim_command.assert_not_called()
@patch( 'ycm.client.event_notification._LoadExtraConfFile',
new_callable = ExtendedMock )
@patch( 'ycm.client.event_notification._IgnoreExtraConfFile',
new_callable = ExtendedMock )
def FileReadyToParse_NonDiagnostic_ConfirmExtraConf_test(
self,
ignore_extra_conf,
load_extra_conf,
*args ):
# This test validates the behaviour of YouCompleteMe.HandleFileParseRequest
# in combination with YouCompleteMe.OnFileReadyToParse when the completer
# raises the (special) UnknownExtraConf exception
FILE_NAME = 'a_file'
MESSAGE = ( 'Found ' + FILE_NAME + '. Load? \n\n(Question can be '
'turned off with options, see YCM docs)' )
def UnknownExtraConfResponse( *args ):
raise UnknownExtraConf( FILE_NAME )
with MockArbitraryBuffer( 'javascript' ):
with MockEventNotification( UnknownExtraConfResponse ):
# When the user accepts the extra conf, we load it
with patch( 'ycm.vimsupport.PresentDialog',
return_value = 0,
new_callable = ExtendedMock ) as present_dialog:
self._server_state.OnFileReadyToParse()
assert self._server_state.FileParseRequestReady()
self._server_state.HandleFileParseRequest()
present_dialog.assert_has_exact_calls( [
PresentDialog_Confirm_Call( MESSAGE ),
] )
load_extra_conf.assert_has_exact_calls( [
call( FILE_NAME ),
] )
# Subsequent calls don't re-raise the warning
self._server_state.HandleFileParseRequest()
present_dialog.assert_has_exact_calls( [
PresentDialog_Confirm_Call( MESSAGE )
] )
load_extra_conf.assert_has_exact_calls( [
call( FILE_NAME ),
] )
# But it does if a subsequent event raises again
self._server_state.OnFileReadyToParse()
assert self._server_state.FileParseRequestReady()
self._server_state.HandleFileParseRequest()
present_dialog.assert_has_exact_calls( [
PresentDialog_Confirm_Call( MESSAGE ),
PresentDialog_Confirm_Call( MESSAGE ),
] )
load_extra_conf.assert_has_exact_calls( [
call( FILE_NAME ),
call( FILE_NAME ),
] )
# When the user rejects the extra conf, we reject it
with patch( 'ycm.vimsupport.PresentDialog',
return_value = 1,
new_callable = ExtendedMock ) as present_dialog:
self._server_state.OnFileReadyToParse()
assert self._server_state.FileParseRequestReady()
self._server_state.HandleFileParseRequest()
present_dialog.assert_has_exact_calls( [
PresentDialog_Confirm_Call( MESSAGE ),
] )
ignore_extra_conf.assert_has_exact_calls( [
call( FILE_NAME ),
] )
# Subsequent calls don't re-raise the warning
self._server_state.HandleFileParseRequest()
present_dialog.assert_has_exact_calls( [
PresentDialog_Confirm_Call( MESSAGE )
] )
ignore_extra_conf.assert_has_exact_calls( [
call( FILE_NAME ),
] )
# But it does if a subsequent event raises again
self._server_state.OnFileReadyToParse()
assert self._server_state.FileParseRequestReady()
self._server_state.HandleFileParseRequest()
present_dialog.assert_has_exact_calls( [
PresentDialog_Confirm_Call( MESSAGE ),
PresentDialog_Confirm_Call( MESSAGE ),
] )
ignore_extra_conf.assert_has_exact_calls( [
call( FILE_NAME ),
call( FILE_NAME ),
] )
def FileReadyToParse_Diagnostic_Error_Native_test( self ):
self._Check_FileReadyToParse_Diagnostic_Error()
self._Check_FileReadyToParse_Diagnostic_Warning()
self._Check_FileReadyToParse_Diagnostic_Clean()
@patch( 'vim.command' )
def _Check_FileReadyToParse_Diagnostic_Error( self, vim_command ):
# Tests Vim sign placement and error/warning count python API
# when one error is returned.
def DiagnosticResponse( *args ):
start = Location( 1, 2, 'TEST_BUFFER' )
end = Location( 1, 4, 'TEST_BUFFER' )
extent = Range( start, end )
diagnostic = Diagnostic( [], start, extent, 'expected ;', 'ERROR' )
return [ BuildDiagnosticData( diagnostic ) ]
with MockArbitraryBuffer( 'cpp' ):
with MockEventNotification( DiagnosticResponse ):
self._server_state.OnFileReadyToParse()
ok_( self._server_state.FileParseRequestReady() )
self._server_state.HandleFileParseRequest()
vim_command.assert_has_calls( [
PlaceSign_Call( 1, 1, 0, True )
] )
eq_( self._server_state.GetErrorCount(), 1 )
eq_( self._server_state.GetWarningCount(), 0 )
# Consequent calls to HandleFileParseRequest shouldn't mess with
# existing diagnostics, when there is no new parse request.
vim_command.reset_mock()
ok_( not self._server_state.FileParseRequestReady() )
self._server_state.HandleFileParseRequest()
vim_command.assert_not_called()
eq_( self._server_state.GetErrorCount(), 1 )
eq_( self._server_state.GetWarningCount(), 0 )
@patch( 'vim.command' )
def _Check_FileReadyToParse_Diagnostic_Warning( self, vim_command ):
# Tests Vim sign placement/unplacement and error/warning count python API
# when one warning is returned.
# Should be called after _Check_FileReadyToParse_Diagnostic_Error
def DiagnosticResponse( *args ):
start = Location( 2, 2, 'TEST_BUFFER' )
end = Location( 2, 4, 'TEST_BUFFER' )
extent = Range( start, end )
diagnostic = Diagnostic( [], start, extent, 'cast', 'WARNING' )
return [ BuildDiagnosticData( diagnostic ) ]
with MockArbitraryBuffer( 'cpp' ):
with MockEventNotification( DiagnosticResponse ):
self._server_state.OnFileReadyToParse()
ok_( self._server_state.FileParseRequestReady() )
self._server_state.HandleFileParseRequest()
vim_command.assert_has_calls( [
PlaceSign_Call( 2, 2, 0, False ),
UnplaceSign_Call( 1, 0 )
] )
eq_( self._server_state.GetErrorCount(), 0 )
eq_( self._server_state.GetWarningCount(), 1 )
# Consequent calls to HandleFileParseRequest shouldn't mess with
# existing diagnostics, when there is no new parse request.
vim_command.reset_mock()
ok_( not self._server_state.FileParseRequestReady() )
self._server_state.HandleFileParseRequest()
vim_command.assert_not_called()
eq_( self._server_state.GetErrorCount(), 0 )
eq_( self._server_state.GetWarningCount(), 1 )
@patch( 'vim.command' )
def _Check_FileReadyToParse_Diagnostic_Clean( self, vim_command ):
# Tests Vim sign unplacement and error/warning count python API
# when there are no errors/warnings left.
# Should be called after _Check_FileReadyToParse_Diagnostic_Warning
with MockArbitraryBuffer( 'cpp' ):
with MockEventNotification( MagicMock( return_value = [] ) ):
self._server_state.OnFileReadyToParse()
self._server_state.HandleFileParseRequest()
vim_command.assert_has_calls( [
UnplaceSign_Call( 2, 0 )
] )
eq_( self._server_state.GetErrorCount(), 0 )
eq_( self._server_state.GetWarningCount(), 0 )
|
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import hashlib
import json
import os
import shutil
import socket
import subprocess
import sys
import tempfile
import time
import uuid
from stat import ST_MODE
import grpc
from apache_beam.portability.api.beam_artifact_api_pb2 import GetManifestResponse, ArtifactChunk
from apache_beam.portability.api.beam_artifact_api_pb2_grpc import (
ArtifactRetrievalServiceServicer, add_ArtifactRetrievalServiceServicer_to_server)
from apache_beam.portability.api.beam_provision_api_pb2 import (ProvisionInfo,
GetProvisionInfoResponse)
from apache_beam.portability.api.beam_provision_api_pb2_grpc import (
ProvisionServiceServicer, add_ProvisionServiceServicer_to_server)
from concurrent import futures
from google.protobuf import json_format
from pyflink.fn_execution.boot import (PYTHON_REQUIREMENTS_FILE,
PYTHON_REQUIREMENTS_CACHE,
PYTHON_REQUIREMENTS_INSTALL_DIR)
from pyflink.fn_execution.tests.process_mode_test_data import (manifest, file_data,
test_provision_info_json)
from pyflink.java_gateway import get_gateway
from pyflink.testing.test_case_utils import PyFlinkTestCase
class PythonBootTests(PyFlinkTestCase):
def setUp(self):
manifest_response = json_format.Parse(manifest, GetManifestResponse())
artifact_chunks = dict()
for file_name in file_data:
artifact_chunks[file_name] = json_format.Parse(file_data[file_name], ArtifactChunk())
provision_info = json_format.Parse(test_provision_info_json, ProvisionInfo())
response = GetProvisionInfoResponse(info=provision_info)
def get_unused_port():
sock = socket.socket()
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.close()
return port
class ArtifactService(ArtifactRetrievalServiceServicer):
def GetManifest(self, request, context):
return manifest_response
def GetArtifact(self, request, context):
yield artifact_chunks[request.name]
def start_test_artifact_server():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
add_ArtifactRetrievalServiceServicer_to_server(ArtifactService(), server)
port = get_unused_port()
server.add_insecure_port('[::]:' + str(port))
server.start()
return server, port
class ProvisionService(ProvisionServiceServicer):
def GetProvisionInfo(self, request, context):
return response
def start_test_provision_server():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
add_ProvisionServiceServicer_to_server(ProvisionService(), server)
port = get_unused_port()
server.add_insecure_port('[::]:' + str(port))
server.start()
return server, port
self.artifact_server, self.artifact_port = start_test_artifact_server()
self.provision_server, self.provision_port = start_test_provision_server()
self.env = dict(os.environ)
self.env["python"] = sys.executable
self.env["FLINK_BOOT_TESTING"] = "1"
self.env["BOOT_LOG_DIR"] = os.path.join(self.env["FLINK_HOME"], "log")
self.tmp_dir = tempfile.mkdtemp(str(time.time()), dir=self.tempdir)
# assume that this file is in flink-python source code directory.
flink_python_source_root = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
self.runner_path = os.path.join(
flink_python_source_root, "src", "main", "resources", "pyflink-udf-runner.sh")
def run_boot_py(self):
args = [self.runner_path, "--id", "1",
"--logging_endpoint", "localhost:0000",
"--artifact_endpoint", "localhost:%d" % self.artifact_port,
"--provision_endpoint", "localhost:%d" % self.provision_port,
"--control_endpoint", "localhost:0000",
"--semi_persist_dir", self.tmp_dir]
return subprocess.call(args, stdout=sys.stdout, stderr=sys.stderr, env=self.env)
def check_downloaded_files(self, staged_dir, manifest):
expected_files_info = json.loads(manifest)["manifest"]["artifact"]
files = os.listdir(staged_dir)
self.assertEqual(len(expected_files_info), len(files))
checked = 0
for file_name in files:
for file_info in expected_files_info:
if file_name == file_info["name"]:
self.assertEqual(
oct(os.stat(os.path.join(staged_dir, file_name))[ST_MODE])[-3:],
str(file_info["permissions"]))
with open(os.path.join(staged_dir, file_name), "rb") as f:
sha256obj = hashlib.sha256()
sha256obj.update(f.read())
hash_value = sha256obj.hexdigest()
self.assertEqual(hash_value, file_info["sha256"])
checked += 1
break
self.assertEqual(checked, len(files))
def check_installed_files(self, prefix_dir, package_list):
from distutils.dist import Distribution
install_obj = Distribution().get_command_obj('install', create=True)
install_obj.prefix = prefix_dir
install_obj.finalize_options()
installed_dir = [install_obj.install_purelib]
if install_obj.install_purelib != install_obj.install_platlib:
installed_dir.append(install_obj.install_platlib)
for package_name in package_list:
self.assertTrue(any([os.path.exists(os.path.join(package_dir, package_name))
for package_dir in installed_dir]))
def test_python_boot(self):
exit_code = self.run_boot_py()
self.assertTrue(exit_code == 0, "the boot.py exited with non-zero code.")
self.check_downloaded_files(os.path.join(self.tmp_dir, "staged"), manifest)
def test_param_validation(self):
args = [self.runner_path]
exit_message = subprocess.check_output(args, env=self.env).decode("utf-8")
self.assertIn("No id provided.", exit_message)
args = [self.runner_path, "--id", "1"]
exit_message = subprocess.check_output(args, env=self.env).decode("utf-8")
self.assertIn("No logging endpoint provided.", exit_message)
args = [self.runner_path, "--id", "1", "--logging_endpoint", "localhost:0000"]
exit_message = subprocess.check_output(args, env=self.env).decode("utf-8")
self.assertIn("No artifact endpoint provided.", exit_message)
args = [self.runner_path, "--id", "1",
"--logging_endpoint", "localhost:0000",
"--artifact_endpoint", "localhost:%d" % self.artifact_port]
exit_message = subprocess.check_output(args, env=self.env).decode("utf-8")
self.assertIn("No provision endpoint provided.", exit_message)
args = [self.runner_path, "--id", "1",
"--logging_endpoint", "localhost:0000",
"--artifact_endpoint", "localhost:%d" % self.artifact_port,
"--provision_endpoint", "localhost:%d" % self.provision_port]
exit_message = subprocess.check_output(args, env=self.env).decode("utf-8")
self.assertIn("No control endpoint provided.", exit_message)
def test_constant_consistency(self):
JProcessPythonEnvironmentManager = \
get_gateway().jvm.org.apache.flink.python.env.ProcessPythonEnvironmentManager
self.assertEqual(PYTHON_REQUIREMENTS_FILE,
JProcessPythonEnvironmentManager.PYTHON_REQUIREMENTS_FILE)
self.assertEqual(PYTHON_REQUIREMENTS_CACHE,
JProcessPythonEnvironmentManager.PYTHON_REQUIREMENTS_CACHE)
self.assertEqual(PYTHON_REQUIREMENTS_INSTALL_DIR,
JProcessPythonEnvironmentManager.PYTHON_REQUIREMENTS_INSTALL_DIR)
def test_set_working_directory(self):
JProcessPythonEnvironmentManager = \
get_gateway().jvm.org.apache.flink.python.env.ProcessPythonEnvironmentManager
pyflink_dir = os.path.join(self.tmp_dir, "pyflink")
os.mkdir(pyflink_dir)
# just create an empty file
open(os.path.join(pyflink_dir, "__init__.py"), 'a').close()
fn_execution_dir = os.path.join(pyflink_dir, "fn_execution")
os.mkdir(fn_execution_dir)
open(os.path.join(fn_execution_dir, "__init__.py"), 'a').close()
with open(os.path.join(fn_execution_dir, "boot.py"), "w") as f:
f.write("import os\nimport sys\nsys.stdout.write(os.getcwd())")
# test if the name of working directory variable of udf runner is consist with
# ProcessPythonEnvironmentManager.
self.env[JProcessPythonEnvironmentManager.PYTHON_WORKING_DIR] = self.tmp_dir
self.env["python"] = sys.executable
args = [self.runner_path]
process_cwd = subprocess.check_output(args, env=self.env).decode("utf-8")
self.assertEqual(os.path.realpath(self.tmp_dir),
process_cwd,
"setting working directory variable is not work!")
def test_install_requirements_without_cached_dir(self):
requirements_txt_path = os.path.join(self.tmp_dir, "requirements_txt_" + str(uuid.uuid4()))
with open(requirements_txt_path, 'w') as f:
f.write("#test line continuation\ncloudpickle\\\n==1.2.2\npy4j==0.10.8.1")
self.env[PYTHON_REQUIREMENTS_FILE] = requirements_txt_path
requirements_target_dir_path = \
os.path.join(self.tmp_dir, "requirements_target_dir_" + str(uuid.uuid4()))
self.env[PYTHON_REQUIREMENTS_INSTALL_DIR] = requirements_target_dir_path
exit_code = self.run_boot_py()
self.assertTrue(exit_code == 0, "the boot.py exited with non-zero code.")
self.check_installed_files(requirements_target_dir_path, ["cloudpickle", "py4j"])
def test_install_requirements_with_cached_dir(self):
requirements_txt_path = os.path.join(self.tmp_dir, "requirements_txt_" + str(uuid.uuid4()))
with open(requirements_txt_path, 'w') as f:
f.write("python-package1==0.0.0")
self.env[PYTHON_REQUIREMENTS_FILE] = requirements_txt_path
self.env[PYTHON_REQUIREMENTS_CACHE] = os.path.join(self.tmp_dir, "staged")
requirements_target_dir_path = \
os.path.join(self.tmp_dir, "requirements_target_dir_" + str(uuid.uuid4()))
self.env[PYTHON_REQUIREMENTS_INSTALL_DIR] = requirements_target_dir_path
exit_code = self.run_boot_py()
self.assertTrue(exit_code == 0, "the boot.py exited with non-zero code.")
self.check_installed_files(requirements_target_dir_path, ["python_package1"])
def tearDown(self):
self.artifact_server.stop(0)
self.provision_server.stop(0)
try:
if self.tmp_dir is not None:
shutil.rmtree(self.tmp_dir)
except:
pass
|
|
import sys
import numpy as np
import matplotlib.pyplot as plt
dimensions=[2,3,4,5,6,7,8,9,10]
print("Gaussian dimensional analysis \n")
#Decision Tree
p_1_dt = []
p_2_dt = []
p_3_dt = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../dt_gauss/"+str(dim)+'Dgauss_dt_p_values_1_2_3_std_dev.txt')
p_1_dt.append(temp1), p_2_dt.append(temp2), p_3_dt.append(temp3)
print("Decision tree : ", p_1_dt,p_2_dt,p_3_dt)
p_1_bdt = []
p_2_bdt = []
p_3_bdt = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../bdt_gauss/"+str(dim)+'Dgauss_bdt_p_values_1_2_3_std_dev.txt')
p_1_bdt.append(temp1), p_2_bdt.append(temp2), p_3_bdt.append(temp3)
print("Boosted decision tree : ", p_1_bdt,p_2_bdt,p_3_bdt)
p_1_bdt_AD = []
p_2_bdt_AD = []
p_3_bdt_AD = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../bdt_gauss/"+str(dim)+'Dgauss_bdt_AD_p_values_1_2_3_std_dev.txt')
p_1_bdt_AD.append(temp1), p_2_bdt_AD.append(temp2), p_3_bdt_AD.append(temp3)
print("Boosted decision tree Anderson Darling : ", p_1_bdt_AD,p_2_bdt_AD,p_3_bdt_AD)
p_1_svm = []
p_2_svm = []
p_3_svm = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../svm_gauss/"+str(dim)+'Dgauss_svm_p_values_1_2_3_std_dev.txt')
p_1_svm.append(temp1), p_2_svm.append(temp2), p_3_svm.append(temp3)
print("Support vector machine : ", p_1_svm,p_2_svm,p_3_svm)
p_1_nn_6_200 = []
p_2_nn_6_200 = []
p_3_nn_6_200 = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../nn_gauss/"+str(dim)+'Dgauss_nn_p_values_1_2_3_std_dev.txt')
p_1_nn_6_200.append(temp1), p_2_nn_6_200.append(temp2), p_3_nn_6_200.append(temp3)
print("Neural Network 6 layers 200 neurons : ", p_1_nn_6_200,p_2_nn_6_200,p_3_nn_6_200)
p_1_nn_4_100 = []
p_2_nn_4_100 = []
p_3_nn_4_100 = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../nn_gauss/"+str(dim)+'Dgauss_nn_4layers_100neurons_p_values_1_2_3_std_dev.txt')
p_1_nn_4_100.append(temp1), p_2_nn_4_100.append(temp2), p_3_nn_4_100.append(temp3)
print("Neural Network 4 layers 100 neurons : ", p_1_nn_4_100,p_2_nn_4_100,p_3_nn_4_100)
# Using the old architecture
dim= np.array([10,2,3,4,5,6,7,8,9])
p = dim.argsort()
p_nn_4_100_total = np.loadtxt('gaussian_dimensionality_analysis_nn')
p_1_nn_4_100_old = p_nn_4_100_total[p,0].tolist()
p_2_nn_4_100_old = p_nn_4_100_total[p,1].tolist()
p_3_nn_4_100_old = p_nn_4_100_total[p,2].tolist()
print("Neural Network 4 layers 100 neurons old architecture : ", p_1_nn_4_100_old,p_2_nn_4_100_old,p_3_nn_4_100_old)
p_1_miranda_2bins = []
p_2_miranda_2bins = []
p_3_miranda_2bins = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gauss/"+str(dim)+'Dgauss_miranda_2bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_2bins.append(temp1), p_2_miranda_2bins.append(temp2), p_3_miranda_2bins.append(temp3)
print("Miranda 2 bins: ", p_1_miranda_2bins,p_2_miranda_2bins,p_3_miranda_2bins)
p_1_miranda_3bins = []
p_2_miranda_3bins = []
p_3_miranda_3bins = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gauss/"+str(dim)+'Dgauss_miranda_3bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_3bins.append(temp1), p_2_miranda_3bins.append(temp2), p_3_miranda_3bins.append(temp3)
print("Miranda 3 bins: ", p_1_miranda_3bins,p_2_miranda_3bins,p_3_miranda_3bins)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(dimensions,p_2_dt,label="dt 2$\sigma$",color='black')
ax.plot(dimensions,p_2_bdt,label="bdt 2$\sigma$",color='darkorange')
#ax.plot(dimensions,p_2_bdt_AD,label="bdt AD 2$\sigma$",color='saddlebrown')
ax.plot(dimensions,p_2_svm,label="svm 2$\sigma$",color='lawngreen')
ax.plot(dimensions,p_2_nn_6_200,label="nn 6l 200n 2$\sigma$",color='blue')
ax.plot(dimensions,p_2_nn_4_100,label="nn 4l 100n 2$\sigma$",color='blueviolet')
ax.plot(dimensions,p_2_nn_4_100_old,label="nn 4l 100n old 2$\sigma$",color='cyan')
ax.plot(dimensions,p_2_miranda_2bins,label="Miranda 2bins 2$\sigma$",color='red')
ax.plot(dimensions,p_2_miranda_3bins,label="Miranda 3bins 2$\sigma$",color='darkred')
plt.ylim([0,100])
ax.set_xlabel("Number of dimensions")
ax.set_ylabel("Number of samples")
ax.set_title("Dimensionality analysis")
ax.legend(loc='upper right')
fig_name="dimensionality_analysis"
fig.savefig(fig_name)
fig.savefig("../dt_gauss/"+fig_name)
fig.savefig("../bdt_gauss/"+fig_name)
fig.savefig("../svm_gauss/"+fig_name)
fig.savefig("../nn_gauss/"+fig_name)
fig.savefig("../miranda_gauss/"+fig_name)
print("Saved the figure as" , fig_name+".png")
############################################################################################################################
############################################################################################################################
####################################################### Double Gauss #######################################################
############################################################################################################################
############################################################################################################################
print("\n\nDouble gaussian dimensional analysis distance_to_original 0.1\n")
p_1_miranda_2bins_double = []
p_2_miranda_2bins_double = []
p_3_miranda_2bins_double = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gauss/"+str(dim)+'Dgauss_double_miranda_2bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_2bins_double.append(temp1), p_2_miranda_2bins_double.append(temp2), p_3_miranda_2bins_double.append(temp3)
print("Miranda 2 bins_double: ", p_1_miranda_2bins_double,p_2_miranda_2bins_double,p_3_miranda_2bins_double)
p_1_miranda_3bins_double = []
p_2_miranda_3bins_double = []
p_3_miranda_3bins_double = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gauss/"+str(dim)+'Dgauss_double_miranda_3bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_3bins_double.append(temp1), p_2_miranda_3bins_double.append(temp2), p_3_miranda_3bins_double.append(temp3)
print("Miranda 3 bins: ", p_1_miranda_3bins,p_2_miranda_3bins,p_3_miranda_3bins)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(dimensions,p_2_miranda_2bins_double,label="Miranda 2bins 2$\sigma$",color='red')
ax.plot(dimensions,p_2_miranda_3bins_double,label="Miranda 3bins 2$\sigma$",color='darkred')
plt.ylim([0,100])
ax.set_xlabel("Number of dimensions")
ax.set_ylabel("Number of samples")
ax.set_title("Dimensionality analysis double gaussian dist 0.1")
ax.legend(loc='upper right')
fig_name="dimensionality_analysis_double"
fig.savefig(fig_name)
fig.savefig("../dt_gauss/"+fig_name)
fig.savefig("../bdt_gauss/"+fig_name)
fig.savefig("../svm_gauss/"+fig_name)
fig.savefig("../nn_gauss/"+fig_name)
fig.savefig("../miranda_gauss/"+fig_name)
print("Saved the figure as" , fig_name+".png")
############################################################################################################################
print("\n\nDouble gaussian dimensional analysis distance_to_original 0.02\n")
p_1_miranda_2bins_double_dist02 = []
p_2_miranda_2bins_double_dist02 = []
p_3_miranda_2bins_double_dist02 = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gauss/"+str(dim)+'Dgauss_double_dist02_miranda_2bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_2bins_double_dist02.append(temp1), p_2_miranda_2bins_double_dist02.append(temp2), p_3_miranda_2bins_double_dist02.append(temp3)
print("Miranda 2 bins_double_dist02: ", p_1_miranda_2bins_double_dist02,p_2_miranda_2bins_double_dist02,p_3_miranda_2bins_double_dist02)
p_1_miranda_3bins_double_dist02 = []
p_2_miranda_3bins_double_dist02 = []
p_3_miranda_3bins_double_dist02 = []
for dim in range(2,11):
temp1,temp2,temp3 = np.loadtxt("../miranda_gauss/"+str(dim)+'Dgauss_double_dist02_miranda_3bins_p_values_1_2_3_std_dev.txt')
p_1_miranda_3bins_double_dist02.append(temp1), p_2_miranda_3bins_double_dist02.append(temp2), p_3_miranda_3bins_double_dist02.append(temp3)
print("Miranda 2 bins_double_dist02: ", p_1_miranda_2bins_double_dist02,p_2_miranda_2bins_double_dist02,p_3_miranda_2bins_double_dist02)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(dimensions,p_2_miranda_2bins_double_dist02,label="Miranda 2bins 2$\sigma$",color='red')
ax.plot(dimensions,p_2_miranda_3bins_double_dist02,label="Miranda 3bins 2$\sigma$",color='darkred')
plt.ylim([0,100])
ax.set_xlabel("Number of dimensions")
ax.set_ylabel("Number of samples")
ax.set_title("Dimensionality analysis double gaussian dist 0.02")
ax.legend(loc='upper right')
fig_name="dimensionality_analysis_double_dist02"
fig.savefig(fig_name)
fig.savefig("../dt_gauss/"+fig_name)
fig.savefig("../bdt_gauss/"+fig_name)
fig.savefig("../svm_gauss/"+fig_name)
fig.savefig("../nn_gauss/"+fig_name)
fig.savefig("../miranda_gauss/"+fig_name)
print("Saved the figure as" , fig_name+".png")
|
|
import os
from os import path
import cherrypy
from cherrypy.lib import auth_basic
import subprocess
import signal
import cgi
import tempfile
import json
import schedule
import time
import threading
def validate_password(realm, username, password):
with open('users.json') as users_file:
users = json.load(users_file)
return username in users and users[username] == password
class myFieldStorage(cgi.FieldStorage):
def make_file(self, binary = None):
return tempfile.NamedTemporaryFile()
def noBodyProcess():
"""Sets cherrypy.request.process_request_body = False, giving
us direct control of the file upload destination. By default
cherrypy loads it to memory, we are directing it to disk."""
cherrypy.request.process_request_body = False
cherrypy.tools.noBodyProcess = cherrypy.Tool("before_request_body", noBodyProcess)
class PlayerApp(object):
player = 0
schedule_thread = threading.Event()
def load_schedule(self):
with open('schedule.json') as j:
sch = json.load(j)
for time in sch["play"]:
schedule.every().day.at(time).do(self.play)
print(time)
for time in sch["stop"]:
schedule.every().day.at(time).do(self.stop_mplayer)
def run_schedule(self, cease_continuous_run, interval=1):
class ScheduleThread(threading.Thread):
name = 'schedule'
@classmethod
def run(cls):
while not cease_continuous_run.is_set():
schedule.run_pending()
time.sleep(interval)
continuous_thread = ScheduleThread()
continuous_thread.start()
def __init__(self):
cherrypy.engine.subscribe('stop', self.cherrypy_stopping)
cherrypy.engine.subscribe('start', self.cherrypy_starting)
def cherrypy_starting(self):
self.schedule_thread.set()
self.load_schedule()
def cherrypy_stopping(self):
if self.schedule_thread != 0:
self.schedule_thread.set()
@cherrypy.expose
def index(self):
return file('index.html')
@cherrypy.expose
def play(self):
if self.player == 0:
path = os.getcwd() + '/music/'
files = os.listdir(path)
i = 0
for f in files:
files[i] = path + f
i = i + 1
self.player = subprocess.Popen(["mplayer", "-loop", "0", "-shuffle", "-quiet"] + files,
stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
@cherrypy.expose
def splay(self):
if self.schedule_thread.is_set():
self.schedule_thread.clear()
self.run_schedule(self.schedule_thread)
@cherrypy.expose
def next(self):
if self.player != 0:
self.player.stdin.write(">")
@cherrypy.expose
def prev(self):
if self.player != 0:
self.player.stdin.write("<")
@cherrypy.expose
def volup(self):
if self.player != 0:
self.player.stdin.write("*")
@cherrypy.expose
def voldown(self):
if self.player != 0:
self.player.stdin.write("/")
def stop_mplayer(self):
if self.player != 0:
self.player.terminate()
self.player = 0
@cherrypy.expose
def stop(self, all = False):
self.stop_mplayer()
self.schedule_thread.set()
@cherrypy.expose
@cherrypy.tools.json_out()
def ls(self):
path = os.getcwd() + '/music/'
return os.listdir(path)
@cherrypy.expose
def rm(self, file):
f = os.getcwd() + '/music/' + file
os.remove(f)
@cherrypy.expose
def rmall(self):
path = os.getcwd() + '/music/'
files = os.listdir(path)
for f in files:
os.remove(path + f)
@cherrypy.expose
def state(self):
if not self.schedule_thread.is_set():
return 'schedule'
if self.player == 0:
return 'stopped'
else:
return 'playing'
@cherrypy.expose
@cherrypy.tools.noBodyProcess()
def upload(self, theFile = None):
lcHDRS = {}
for key, val in cherrypy.request.headers.iteritems():
lcHDRS[key.lower()] = val
formFields = myFieldStorage(fp = cherrypy.request.rfile,
headers = lcHDRS,
environ = {'REQUEST_METHOD': 'POST'},
keep_blank_values = True)
theFiles = formFields['theFiles']
if not hasattr(theFiles, '__getslice__'): #if only one file is selected theFiles is not an array
theFiles = [theFiles]
for theFile in theFiles:
if theFile.filename == "record.wav":
if os.path.isfile('./rec/record.wav'):
os.remove('./rec/record.wav')
os.link(theFile.file.name, './rec/' + theFile.filename)
path = os.getcwd()
subprocess.call(["mplayer", "-quiet", './rec/announcement.mp3', './rec/record.wav'])
os.remove('./rec/record.wav')
else:
os.link(theFile.file.name, './music/' + theFile.filename)
if __name__ == '__main__':
musicdir = os.getcwd() + '/music'
if not os.path.exists(musicdir):
os.makedirs(musicdir)
conf = {
'/': {
'tools.sessions.on': True,
'tools.staticdir.root': os.path.abspath(os.getcwd()),
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'localhost',
'tools.auth_basic.checkpassword': validate_password
},
'/static': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "./public"
},
'/music': {
'tools.staticdir.on': True,
'tools.staticdir.dir': "./music"
}
}
webapp = PlayerApp()
cherrypy.config.update("server.conf")
cherrypy.quickstart(webapp, '/', conf)
|
|
"""
Summary:
Contains the Conduit unit type classes.
This holds all of the data read in from the conduit units in the dat file.
Can be called to load in the data and read and update the contents
held in the object.
Author:
Duncan Runnacles
Copyright:
Duncan Runnacles 2020
TODO:
Updates:
"""
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
"""logging references with a __name__ set to this module."""
from ship.fmp.datunits.isisunit import AUnit
from ship.fmp.headdata import HeadDataItem
from ship.datastructures import DATA_TYPES as dt
from ship.fmp.datunits import ROW_DATA_TYPES as rdt
from ship.datastructures import dataobject as do
from ship.datastructures.rowdatacollection import RowDataCollection
class ConduitUnit(AUnit):
'''Class for dealing with conduit type units in the .dat file.'''
# Class constants
UNIT_TYPE = 'conduit'
UNIT_CATEGORY = 'conduit'
FILE_KEY = 'CONDUIT'
FILE_KEY2 = None
def __init__(self):
'''Constructor.
'''
super(ConduitUnit, self).__init__()
self._unit_type = ConduitUnit.UNIT_TYPE
self._unit_category = ConduitUnit.UNIT_CATEGORY
def icLabels(self):
"""Overriddes superclass method."""
return [self._name, self._name_ds]
def linkLabels(self):
"""Overriddes superclass method."""
return {'name': self.name, 'name_ds': self.name_ds}
class RectangularConduitUnit(ConduitUnit):
# Class constants
UNIT_TYPE = 'conduit_rectangular'
UNIT_CATEGORY = 'conduit'
FILE_KEY = 'CONDUIT'
FILE_KEY2 = 'RECTANGULAR'
def __init__(self, **kwargs):
'''Constructor.
'''
super(RectangularConduitUnit, self).__init__(**kwargs)
self._unit_type = RectangularConduitUnit.UNIT_TYPE
self._unit_category = RectangularConduitUnit.UNIT_CATEGORY
self.head_data = {
'comment': HeadDataItem('', '', 0, 0, dtype=dt.STRING),
'distance': HeadDataItem(0.000, '{:>10}', 3, 0, dtype=dt.FLOAT, dps=3),
'roughness_type': HeadDataItem('MANNING', '', 4, 0, dtype=dt.CONSTANT, choices=('MANNING', 'COLEBROOK-WHITE')),
'invert': HeadDataItem(0.000, '{:>10}', 5, 0, dtype=dt.FLOAT, dps=3),
'width': HeadDataItem(0.000, '{:>10}', 5, 1, dtype=dt.FLOAT, dps=3),
'height': HeadDataItem(0.000, '{:>10}', 5, 2, dtype=dt.FLOAT, dps=3),
'bottom_slot_status': HeadDataItem('GLOBAL', '{:>10}', 5, 3, dtype=dt.CONSTANT, choices=('ON', 'OFF', 'GLOBAL', ''), allow_blank=True),
'bottom_slot_distance': HeadDataItem(0.000, '{:>10}', 5, 4, dtype=dt.FLOAT, dps=3, allow_blank=True),
'bottom_slot_depth': HeadDataItem(0.000, '{:>10}', 5, 5, dtype=dt.FLOAT, dps=3, allow_blank=True),
'top_slot_status': HeadDataItem('GLOBAL', '{:>10}', 5, 6, dtype=dt.CONSTANT, choices=('ON', 'OFF', 'GLOBAL', ''), allow_blank=True),
'top_slot_distance': HeadDataItem(0.000, '{:>10}', 5, 7, dtype=dt.FLOAT, dps=3, allow_blank=True),
'top_slot_depth': HeadDataItem(0.000, '{:>10}', 5, 8, dtype=dt.FLOAT, dps=3, allow_blank=True),
'roughness_invert': HeadDataItem(0.000, '{:>10}', 6, 0, dtype=dt.FLOAT, dps=5),
'roughness_walls': HeadDataItem(0.000, '{:>10}', 6, 1, dtype=dt.FLOAT, dps=5),
'roughness_soffit': HeadDataItem(0.000, '{:>10}', 6, 2, dtype=dt.FLOAT, dps=5),
}
def readUnitData(self, unit_data, file_line):
'''Reads the given data into the object.
See Also:
isisunit.
Args:
unit_data (list): The raw file data to be processed.
'''
self.head_data['comment'].value = unit_data[file_line][8:].strip()
self._name = unit_data[file_line + 2][:12].strip()
self._name_ds = unit_data[file_line + 2][12:].strip()
self.head_data['distance'].value = unit_data[file_line + 3][:10].strip()
self.head_data['roughness_type'].value = unit_data[file_line + 4][:15].strip()
self.head_data['invert'].value = unit_data[file_line + 5][:10].strip()
self.head_data['width'].value = unit_data[file_line + 5][10:20].strip()
self.head_data['height'].value = unit_data[file_line + 5][20:30].strip()
self.head_data['bottom_slot_status'].value = unit_data[file_line + 5][30:40].strip()
self.head_data['bottom_slot_distance'].value = unit_data[file_line + 5][40:50].strip()
self.head_data['bottom_slot_depth'].value = unit_data[file_line + 5][50:60].strip()
self.head_data['top_slot_status'].value = unit_data[file_line + 5][60:70].strip()
self.head_data['top_slot_distance'].value = unit_data[file_line + 5][70:80].strip()
self.head_data['top_slot_depth'].value = unit_data[file_line + 5][80:].strip()
self.head_data['roughness_invert'].value = unit_data[file_line + 6][:10].strip()
self.head_data['roughness_walls'].value = unit_data[file_line + 6][10:20].strip()
self.head_data['roughness_soffit'].value = unit_data[file_line + 6][20:].strip()
return file_line + 6
def getData(self):
'''Returns the formatted data for this unit.
See Also:
isisunit.
Returns:
List of strings formatted for writing to the new dat file.
'''
out = []
out.append('CONDUIT ' + self.head_data['comment'].value)
out.append('\nRECTANGULAR')
out.append('\n' + '{:<12}'.format(self._name) + '{:<12}'.format(self._name_ds))
key_order = ['distance', 'roughness_type', 'invert', 'width', 'height',
'bottom_slot_status', 'bottom_slot_distance', 'bottom_slot_depth',
'top_slot_status', 'top_slot_distance', 'top_slot_depth',
'roughness_invert', 'roughness_walls', 'roughness_soffit']
for k in key_order:
out.append(self.head_data[k].format(True))
out_data = ''.join(out).split('\n')
return out_data
class CircularConduitUnit(ConduitUnit):
# Class constants
UNIT_TYPE = 'conduit_circular'
UNIT_CATEGORY = 'conduit'
FILE_KEY = 'CONDUIT'
FILE_KEY2 = 'CIRCULAR'
def __init__(self, **kwargs):
'''Constructor.
'''
super(CircularConduitUnit, self).__init__(**kwargs)
self._unit_type = CircularConduitUnit.UNIT_TYPE
self._unit_category = CircularConduitUnit.UNIT_CATEGORY
self.head_data = {
'comment': HeadDataItem('', '', 0, 0, dtype=dt.STRING),
'distance': HeadDataItem(0.000, '{:>10}', 3, 0, dtype=dt.FLOAT, dps=3),
'roughness_type': HeadDataItem('MANNING', '', 4, 0, dtype=dt.CONSTANT, choices=('MANNING', 'COLEBROOK-WHITE')),
'invert': HeadDataItem(0.000, '{:>10}', 5, 0, dtype=dt.FLOAT, dps=3),
'diameter': HeadDataItem(0.000, '{:>10}', 5, 1, dtype=dt.FLOAT, dps=3),
'bottom_slot_status': HeadDataItem('GLOBAL', '{:>10}', 5, 2, dtype=dt.CONSTANT, choices=('ON', 'OFF', 'GLOBAL', '')),
'bottom_slot_distance': HeadDataItem(0.000, '{:>10}', 5, 3, dtype=dt.FLOAT, dps=3, allow_blank=True),
'bottom_slot_depth': HeadDataItem(0.000, '{:>10}', 5, 4, dtype=dt.FLOAT, dps=3, allow_blank=True),
'top_slot_status': HeadDataItem('GLOBAL', '{:>10}', 5, 5, dtype=dt.CONSTANT, choices=('ON', 'OFF', 'GLOBAL', '')),
'top_slot_distance': HeadDataItem(0.000, '{:>10}', 5, 6, dtype=dt.FLOAT, dps=3, allow_blank=True),
'top_slot_depth': HeadDataItem(0.000, '{:>10}', 5, 7, dtype=dt.FLOAT, dps=3, allow_blank=True),
'roughness_below_axis': HeadDataItem(0.000, '{:>10}', 6, 0, dtype=dt.FLOAT, dps=5),
'roughness_above_axis': HeadDataItem(0.000, '{:>10}', 6, 1, dtype=dt.FLOAT, dps=5),
}
def readUnitData(self, unit_data, file_line):
'''Reads the given data into the object.
See Also:
isisunit.
Args:
unit_data (list): The raw file data to be processed.
'''
self.head_data['comment'].value = unit_data[file_line][8:].strip()
self._name = unit_data[file_line + 2][:12].strip()
self._name_ds = unit_data[file_line + 2][12:].strip()
self.head_data['distance'].value = unit_data[file_line + 3][:10].strip()
self.head_data['roughness_type'].value = unit_data[file_line + 4][:15].strip()
self.head_data['invert'].value = unit_data[file_line + 5][:10].strip()
self.head_data['diameter'].value = unit_data[file_line + 5][10:20].strip()
self.head_data['bottom_slot_status'].value = unit_data[file_line + 5][20:30].strip()
self.head_data['bottom_slot_distance'].value = unit_data[file_line + 5][30:40].strip()
self.head_data['bottom_slot_depth'].value = unit_data[file_line + 5][40:50].strip()
self.head_data['top_slot_status'].value = unit_data[file_line + 5][50:60].strip()
self.head_data['top_slot_distance'].value = unit_data[file_line + 5][60:70].strip()
self.head_data['top_slot_depth'].value = unit_data[file_line + 5][70:].strip()
self.head_data['roughness_below_axis'].value = unit_data[file_line + 6][:10].strip()
self.head_data['roughness_above_axis'].value = unit_data[file_line + 6][10:20].strip()
return file_line + 6
def getData(self):
'''Returns the formatted data for this unit.
See Also:
isisunit.
Returns:
List of strings formatted for writing to the new dat file.
'''
out = []
out.append('CONDUIT ' + self.head_data['comment'].value)
out.append('\nCIRCULAR')
out.append('\n' + '{:<12}'.format(self._name) + '{:<12}'.format(self._name_ds))
key_order = ['distance', 'roughness_type', 'invert', 'diameter',
'bottom_slot_status', 'bottom_slot_distance', 'bottom_slot_depth',
'top_slot_status', 'top_slot_distance', 'top_slot_depth',
'roughness_below_axis', 'roughness_above_axis']
for k in key_order:
out.append(self.head_data[k].format(True))
out_data = ''.join(out).split('\n')
return out_data
class FullarchConduitUnit(ConduitUnit):
# Class constants
UNIT_TYPE = 'conduit_fullarch'
UNIT_CATEGORY = 'conduit'
FILE_KEY = 'CONDUIT'
FILE_KEY2 = 'FULLARCH'
def __init__(self, **kwargs):
'''Constructor.
'''
super(FullarchConduitUnit, self).__init__(**kwargs)
self._unit_type = FullarchConduitUnit.UNIT_TYPE
self._unit_category = FullarchConduitUnit.UNIT_CATEGORY
self.head_data = {
'comment': HeadDataItem('', '', 0, 0, dtype=dt.STRING),
'distance': HeadDataItem(0.000, '{:>10}', 3, 0, dtype=dt.FLOAT, dps=3),
'roughness_type': HeadDataItem('MANNING', '', 4, 0, dtype=dt.CONSTANT, choices=('MANNING', 'COLEBROOK-WHITE')),
'invert': HeadDataItem(0.000, '{:>10}', 5, 0, dtype=dt.FLOAT, dps=3),
'width': HeadDataItem(0.000, '{:>10}', 5, 1, dtype=dt.FLOAT, dps=3),
'height': HeadDataItem(0.000, '{:>10}', 5, 2, dtype=dt.FLOAT, dps=3),
'bottom_slot_status': HeadDataItem('GLOBAL', '{:>10}', 5, 3, dtype=dt.CONSTANT, choices=('ON', 'OFF', 'GLOBAL', '')),
'bottom_slot_distance': HeadDataItem(0.000, '{:>10}', 5, 4, dtype=dt.FLOAT, dps=3, allow_blank=True),
'bottom_slot_depth': HeadDataItem(0.000, '{:>10}', 5, 5, dtype=dt.FLOAT, dps=3, allow_blank=True),
'top_slot_status': HeadDataItem('GLOBAL', '{:>10}', 5, 6, dtype=dt.CONSTANT, choices=('ON', 'OFF', 'GLOBAL', '')),
'top_slot_distance': HeadDataItem(0.000, '{:>10}', 5, 7, dtype=dt.FLOAT, dps=3, allow_blank=True),
'top_slot_depth': HeadDataItem(0.000, '{:>10}', 5, 8, dtype=dt.FLOAT, dps=3, allow_blank=True),
'roughness_below_axis': HeadDataItem(0.000, '{:>10}', 6, 0, dtype=dt.FLOAT, dps=5),
'roughness_above_axis': HeadDataItem(0.000, '{:>10}', 6, 1, dtype=dt.FLOAT, dps=5),
}
def readUnitData(self, unit_data, file_line):
'''Reads the given data into the object.
See Also:
isisunit.
Args:
unit_data (list): The raw file data to be processed.
'''
self.head_data['comment'].value = unit_data[file_line][8:].strip()
self._name = unit_data[file_line + 2][:12].strip()
self._name_ds = unit_data[file_line + 2][12:].strip()
self.head_data['distance'].value = unit_data[file_line + 3][:10].strip()
self.head_data['roughness_type'].value = unit_data[file_line + 4][:15].strip()
self.head_data['invert'].value = unit_data[file_line + 5][:10].strip()
self.head_data['width'].value = unit_data[file_line + 5][10:20].strip()
self.head_data['height'].value = unit_data[file_line + 5][20:30].strip()
self.head_data['bottom_slot_status'].value = unit_data[file_line + 5][30:40].strip()
self.head_data['bottom_slot_distance'].value = unit_data[file_line + 5][40:50].strip()
self.head_data['bottom_slot_depth'].value = unit_data[file_line + 5][50:60].strip()
self.head_data['top_slot_status'].value = unit_data[file_line + 5][60:70].strip()
self.head_data['top_slot_distance'].value = unit_data[file_line + 5][70:80].strip()
self.head_data['top_slot_depth'].value = unit_data[file_line + 5][80:].strip()
self.head_data['roughness_below_axis'].value = unit_data[file_line + 6][:10].strip()
self.head_data['roughness_above_axis'].value = unit_data[file_line + 6][10:20].strip()
return file_line + 6
def getData(self):
'''Returns the formatted data for this unit.
See Also:
isisunit.
Returns:
List of strings formatted for writing to the new dat file.
'''
out = []
out.append('CONDUIT ' + self.head_data['comment'].value)
out.append('\nFULLARCH')
out.append('\n' + '{:<12}'.format(self._name) + '{:<12}'.format(self._name_ds))
key_order = ['distance', 'roughness_type', 'invert', 'width', 'height',
'bottom_slot_status', 'bottom_slot_distance', 'bottom_slot_depth',
'top_slot_status', 'top_slot_distance', 'top_slot_depth',
'roughness_below_axis', 'roughness_above_axis']
for k in key_order:
out.append(self.head_data[k].format(True))
out_data = ''.join(out).split('\n')
return out_data
class SprungarchConduitUnit(ConduitUnit):
# Class constants
UNIT_TYPE = 'conduit_sprungarch'
UNIT_CATEGORY = 'conduit'
FILE_KEY = 'CONDUIT'
FILE_KEY2 = 'SPRUNGARCH'
def __init__(self, **kwargs):
'''Constructor.
'''
super(SprungarchConduitUnit, self).__init__(**kwargs)
self._unit_type = SprungarchConduitUnit.UNIT_TYPE
self._unit_category = SprungarchConduitUnit.UNIT_CATEGORY
self.head_data = {
'comment': HeadDataItem('', '', 0, 0, dtype=dt.STRING),
'distance': HeadDataItem(0.000, '{:>10}', 3, 0, dtype=dt.FLOAT, dps=3),
'roughness_type': HeadDataItem('MANNING', '', 4, 0, dtype=dt.CONSTANT, choices=('MANNING', 'COLEBROOK-WHITE')),
'invert': HeadDataItem(0.000, '{:>10}', 5, 0, dtype=dt.FLOAT, dps=3),
'width': HeadDataItem(0.000, '{:>10}', 5, 1, dtype=dt.FLOAT, dps=3),
'springing_height': HeadDataItem(0.000, '{:>10}', 5, 2, dtype=dt.FLOAT, dps=3),
'crown_height': HeadDataItem(0.000, '{:>10}', 5, 3, dtype=dt.FLOAT, dps=3),
'bottom_slot_status': HeadDataItem('GLOBAL', '{:>10}', 5, 4, dtype=dt.CONSTANT, choices=('ON', 'OFF', 'GLOBAL', '')),
'bottom_slot_distance': HeadDataItem(0.000, '{:>10}', 5, 5, dtype=dt.FLOAT, dps=3, allow_blank=True),
'bottom_slot_depth': HeadDataItem(0.000, '{:>10}', 5, 6, dtype=dt.FLOAT, dps=3, allow_blank=True),
'top_slot_status': HeadDataItem('GLOBAL', '{:>10}', 5, 7, dtype=dt.CONSTANT, choices=('ON', 'OFF', 'GLOBAL', '')),
'top_slot_distance': HeadDataItem(0.000, '{:>10}', 5, 8, dtype=dt.FLOAT, dps=3, allow_blank=True),
'top_slot_depth': HeadDataItem(0.000, '{:>10}', 5, 9, dtype=dt.FLOAT, dps=3, allow_blank=True),
'roughness_invert': HeadDataItem(0.000, '{:>10}', 6, 0, dtype=dt.FLOAT, dps=5),
'roughness_walls': HeadDataItem(0.000, '{:>10}', 6, 1, dtype=dt.FLOAT, dps=5),
'roughness_soffit': HeadDataItem(0.000, '{:>10}', 6, 2, dtype=dt.FLOAT, dps=5),
}
def readUnitData(self, unit_data, file_line):
'''Reads the given data into the object.
See Also:
isisunit.
Args:
unit_data (list): The raw file data to be processed.
'''
self.head_data['comment'].value = unit_data[file_line][8:].strip()
self._name = unit_data[file_line + 2][:12].strip()
self._name_ds = unit_data[file_line + 2][12:].strip()
self.head_data['distance'].value = unit_data[file_line + 3][:10].strip()
self.head_data['roughness_type'].value = unit_data[file_line + 4][:15].strip()
self.head_data['invert'].value = unit_data[file_line + 5][:10].strip()
self.head_data['width'].value = unit_data[file_line + 5][10:20].strip()
self.head_data['springing_height'].value = unit_data[file_line + 5][20:30].strip()
self.head_data['crown_height'].value = unit_data[file_line + 5][30:40].strip()
self.head_data['bottom_slot_status'].value = unit_data[file_line + 5][40:50].strip()
self.head_data['bottom_slot_distance'].value = unit_data[file_line + 5][50:60].strip()
self.head_data['bottom_slot_depth'].value = unit_data[file_line + 5][60:70].strip()
self.head_data['top_slot_status'].value = unit_data[file_line + 5][70:80].strip()
self.head_data['top_slot_distance'].value = unit_data[file_line + 5][80:90].strip()
self.head_data['top_slot_depth'].value = unit_data[file_line + 5][90:].strip()
self.head_data['roughness_invert'].value = unit_data[file_line + 6][:10].strip()
self.head_data['roughness_walls'].value = unit_data[file_line + 6][10:20].strip()
self.head_data['roughness_soffit'].value = unit_data[file_line + 6][20:30].strip()
return file_line + 6
def getData(self):
'''Returns the formatted data for this unit.
See Also:
isisunit.
Returns:
List of strings formatted for writing to the new dat file.
'''
out = []
out.append('CONDUIT ' + self.head_data['comment'].value)
out.append('\nSPRUNGARCH')
out.append('\n' + '{:<12}'.format(self._name) + '{:<12}'.format(self._name_ds))
key_order = ['distance', 'roughness_type', 'invert', 'width', 'springing_height',
'crown_height', 'bottom_slot_status', 'bottom_slot_distance',
'bottom_slot_depth', 'top_slot_status', 'top_slot_distance',
'top_slot_depth', 'roughness_invert', 'roughness_walls',
'roughness_soffit']
for k in key_order:
out.append(self.head_data[k].format(True))
out_data = ''.join(out).split('\n')
return out_data
class RowDataConduitType(ConduitUnit):
def __init__(self, **kwargs):
'''Constructor.
'''
super(RowDataConduitType, self).__init__(**kwargs)
self._setup_headdata()
dobjs = [
# update_callback is called every time a value is added or updated
do.FloatData(rdt.CHAINAGE, format_str='{:>10}', no_of_dps=3),
do.FloatData(rdt.ELEVATION, format_str='{:>10}', no_of_dps=3),
# Note roughness much be Colebrook-White for Symmetrical conduits
do.FloatData(rdt.ROUGHNESS, format_str='{:>10}', default=0.039, no_of_dps=5),
]
self.row_data['main'] = RowDataCollection.bulkInitCollection(dobjs)
self.row_data['main'].setDummyRow({rdt.CHAINAGE: 0, rdt.ELEVATION: 0, rdt.ROUGHNESS: 0})
def _setup_headdata(self):
pass
def readUnitData(self, unit_data, file_line):
'''Reads the given data into the object.
See Also:
isisunit.
Args:
unit_data (list): The raw file data to be processed.
'''
file_line = self._readHeadData(unit_data, file_line)
file_line = self._readRowData(unit_data, file_line)
return file_line - 1
def _readRowData(self, unit_data, file_line):
"""Reads the units rows into the row collection.
This is all the geometry data that occurs after the no of rows variable in
the River Units of the dat file.
Args:
unit_data (list): the data pertaining to this unit.
"""
end_line = int(unit_data[file_line].strip())
file_line += 1
try:
# Load the geometry data
for i in range(file_line, end_line + file_line):
chain = unit_data[i][0:10].strip()
elev = unit_data[i][10:20].strip()
rough = unit_data[i][20:30].strip()
self.row_data['main'].addRow(
{rdt.CHAINAGE: chain, rdt.ELEVATION: elev, rdt.ROUGHNESS: rough},
# We don't need to make backup copies here. If it fails the
# load fails anyway and this will just really slow us down
no_copy=True
)
except NotImplementedError:
logger.ERROR('Unable to read Unit Data(dataRowObject creation) - NotImplementedError')
raise
return end_line + file_line
def getData(self):
"""Retrieve the data in this unit.
The String[] returned is formatted for printing in the fashion
of the .dat file.
Return:
List of strings formated for writing to .dat file.
"""
row_count = self.row_data['main'].numberOfRows()
out_data = self._getHeadData()
out_data.append('{:>10}'.format(row_count))
out_data.extend(self._getRowData(row_count))
return out_data
def _getRowData(self, row_count):
"""Returns the row data in this class.
For all the rows in the river geometry section get the data from
the rowdatacollection class.
Returns:
list = containing the formatted unit rows.
"""
out_data = []
for i in range(0, row_count):
out_data.append(self.row_data['main'].getPrintableRow(i))
return out_data
class SymmetricalConduitUnit(RowDataConduitType):
# Class constants
UNIT_TYPE = 'conduit_symmetrical'
UNIT_CATEGORY = 'conduit'
FILE_KEY = 'CONDUIT'
FILE_KEY2 = 'SECTION'
def __init__(self, **kwargs):
'''Constructor.
'''
super(SymmetricalConduitUnit, self).__init__(**kwargs)
def _setup_headdata(self):
self._unit_type = SymmetricalConduitUnit.UNIT_TYPE
self._unit_category = SymmetricalConduitUnit.UNIT_CATEGORY
self.head_data = {
'comment': HeadDataItem('', '', 0, 0, dtype=dt.STRING),
'distance': HeadDataItem(0.000, '{:>10}', 3, 0, dtype=dt.FLOAT, dps=3),
}
def _readHeadData(self, unit_data, file_line):
"""Format the header data for writing to file.
Args:
unit_data (list): containing the data to read.
"""
self.head_data['comment'].value = unit_data[file_line + 0][8:].strip()
self._name = unit_data[file_line + 2][:12].strip()
self._name_ds = unit_data[file_line + 2][12:24].strip()
self.head_data['distance'].value = unit_data[file_line + 3][:10].strip()
return file_line + 4
def _getHeadData(self):
"""Get the header data formatted for printing out to file.
Returns:
List of strings - The formatted header list.
"""
out = []
out.append('CONDUIT ' + self.head_data['comment'].value)
out.append('SECTION')
out.append('{:<12}'.format(self._name) + '{:<12}'.format(self._name_ds))
out.append('{:<10}'.format(self.head_data['distance'].format()))
return out
class AsymmetricalConduitUnit(RowDataConduitType):
# Class constants
UNIT_TYPE = 'conduit_asymmetrical'
UNIT_CATEGORY = 'conduit'
FILE_KEY = 'CONDUIT'
FILE_KEY2 = 'ASYMMETRIC'
def __init__(self, **kwargs):
'''Constructor.
'''
super(AsymmetricalConduitUnit, self).__init__(**kwargs)
def _setup_headdata(self):
self._unit_type = AsymmetricalConduitUnit.UNIT_TYPE
self._unit_category = AsymmetricalConduitUnit.UNIT_CATEGORY
self.head_data = {
'comment': HeadDataItem('', '', 0, 0, dtype=dt.STRING),
'distance': HeadDataItem(0.000, '{:>10}', 3, 0, dtype=dt.FLOAT, dps=3),
'roughness_type': HeadDataItem('DARCY', '', 4, 0, dtype=dt.CONSTANT, choices=('MANNING', 'DARCY')),
}
def _readHeadData(self, unit_data, file_line):
"""Format the header data for writing to file.
Args:
unit_data (list): containing the data to read.
"""
self.head_data['comment'].value = unit_data[file_line + 0][8:].strip()
self._name = unit_data[file_line + 2][:12].strip()
self._name_ds = unit_data[file_line + 2][12:24].strip()
self.head_data['distance'].value = unit_data[file_line + 3][:10].strip()
self.head_data['roughness_type'].value = unit_data[file_line + 3][10:20].strip()
return file_line + 4
def _getHeadData(self):
"""Get the header data formatted for printing out to file.
Returns:
List of strings - The formatted header list.
"""
out = []
out.append('CONDUIT ' + self.head_data['comment'].value)
out.append('ASYMMETRIC')
out.append('{:<12}'.format(self._name) + '{:<12}'.format(self._name_ds))
out.append(
'{:<10}'.format(self.head_data['distance'].format()) +
'{:>10}'.format(self.head_data['roughness_type'].format())
)
return out
|
|
"""
CUDA driver bridge implementation
NOTE:
The new driver implementation uses a "trashing service" that help prevents a
crashing the system (particularly OSX) when the CUDA context is corrupted at
resource deallocation. The old approach ties resource management directly
into the object destructor; thus, at corruption of the CUDA context,
subsequent deallocation could further corrupt the CUDA context and causes the
system to freeze in some cases.
"""
from __future__ import absolute_import, print_function, division
import sys
import os
import ctypes
import weakref
import functools
import copy
import warnings
from ctypes import (c_int, byref, c_size_t, c_char, c_char_p, addressof,
c_void_p, c_float)
import contextlib
import numpy as np
from collections import namedtuple
from numba import utils, servicelib, mviewbuf
from .error import CudaSupportError, CudaDriverError
from .drvapi import API_PROTOTYPES
from .drvapi import cu_occupancy_b2d_size
from . import enums, drvapi
from numba import config
from numba.utils import longint as long
VERBOSE_JIT_LOG = int(os.environ.get('NUMBAPRO_VERBOSE_CU_JIT_LOG', 1))
MIN_REQUIRED_CC = (2, 0)
class DeadMemoryError(RuntimeError):
pass
class LinkerError(RuntimeError):
pass
class CudaAPIError(CudaDriverError):
def __init__(self, code, msg):
self.code = code
self.msg = msg
super(CudaAPIError, self).__init__(code, msg)
def __str__(self):
return "[%s] %s" % (self.code, self.msg)
def find_driver():
envpath = os.environ.get('NUMBAPRO_CUDA_DRIVER', None)
if envpath == '0':
# Force fail
_raise_driver_not_found()
# Determine DLL type
if sys.platform == 'win32':
dlloader = ctypes.WinDLL
dldir = ['\\windows\\system32']
dlname = 'nvcuda.dll'
elif sys.platform == 'darwin':
dlloader = ctypes.CDLL
dldir = ['/usr/local/cuda/lib']
dlname = 'libcuda.dylib'
else:
# Assume to be *nix like
dlloader = ctypes.CDLL
dldir = ['/usr/lib', '/usr/lib64']
dlname = 'libcuda.so'
if envpath is not None:
try:
envpath = os.path.abspath(envpath)
except ValueError:
raise ValueError("NUMBAPRO_CUDA_DRIVER %s is not a valid path" %
envpath)
if not os.path.isfile(envpath):
raise ValueError("NUMBAPRO_CUDA_DRIVER %s is not a valid file "
"path. Note it must be a filepath of the .so/"
".dll/.dylib or the driver" % envpath)
candidates = [envpath]
else:
# First search for the name in the default library path.
# If that is not found, try the specific path.
candidates = [dlname] + [os.path.join(x, dlname) for x in dldir]
# Load the driver; Collect driver error information
path_not_exist = []
driver_load_error = []
for path in candidates:
try:
dll = dlloader(path)
except OSError as e:
# Problem opening the DLL
path_not_exist.append(not os.path.isfile(path))
driver_load_error.append(e)
else:
return dll
# Problem loading driver
if all(path_not_exist):
_raise_driver_not_found()
else:
errmsg = '\n'.join(str(e) for e in driver_load_error)
_raise_driver_error(errmsg)
DRIVER_NOT_FOUND_MSG = """
CUDA driver library cannot be found.
If you are sure that a CUDA driver is installed,
try setting environment variable NUMBAPRO_CUDA_DRIVER
with the file path of the CUDA driver shared library.
"""
DRIVER_LOAD_ERROR_MSG = """
Possible CUDA driver libraries are found but error occurred during load:
%s
"""
def _raise_driver_not_found():
raise CudaSupportError(DRIVER_NOT_FOUND_MSG)
def _raise_driver_error(e):
raise CudaSupportError(DRIVER_LOAD_ERROR_MSG % e)
def _build_reverse_error_map():
prefix = 'CUDA_ERROR'
map = utils.UniqueDict()
for name in dir(enums):
if name.startswith(prefix):
code = getattr(enums, name)
map[code] = name
return map
def _getpid():
return os.getpid()
ERROR_MAP = _build_reverse_error_map()
MISSING_FUNCTION_ERRMSG = """driver missing function: %s.
Requires CUDA 7.5 or above.
"""
class Driver(object):
"""
Driver API functions are lazily bound.
"""
_singleton = None
def __new__(cls):
obj = cls._singleton
if obj is not None:
return obj
else:
obj = object.__new__(cls)
cls._singleton = obj
return obj
def __init__(self):
self.devices = utils.UniqueDict()
self.is_initialized = False
self.initialization_error = None
self.pid = None
try:
if config.DISABLE_CUDA:
raise CudaSupportError("CUDA disabled by user")
self.lib = find_driver()
except CudaSupportError as e:
self.is_initialized = True
self.initialization_error = e
def initialize(self):
self.is_initialized = True
try:
self.cuInit(0)
except CudaAPIError as e:
self.initialization_error = e
raise CudaSupportError("Error at driver init: \n%s:" % e)
else:
self.pid = _getpid()
@property
def is_available(self):
if not self.is_initialized:
self.initialize()
return self.initialization_error is None
def __getattr__(self, fname):
# First request of a driver API function
try:
proto = API_PROTOTYPES[fname]
except KeyError:
raise AttributeError(fname)
restype = proto[0]
argtypes = proto[1:]
# Initialize driver
if not self.is_initialized:
self.initialize()
if self.initialization_error is not None:
raise CudaSupportError("Error at driver init: \n%s:" %
self.initialization_error)
# Find function in driver library
libfn = self._find_api(fname)
libfn.restype = restype
libfn.argtypes = argtypes
@functools.wraps(libfn)
def safe_cuda_api_call(*args):
retcode = libfn(*args)
self._check_error(fname, retcode)
setattr(self, fname, safe_cuda_api_call)
return safe_cuda_api_call
def _find_api(self, fname):
# Try version 2
try:
return getattr(self.lib, fname + "_v2")
except AttributeError:
pass
# Try regular
try:
return getattr(self.lib, fname)
except AttributeError:
pass
# Not found.
# Delay missing function error to use
def absent_function(*args, **kws):
raise CudaDriverError(MISSING_FUNCTION_ERRMSG % fname)
setattr(self, fname, absent_function)
return absent_function
def _check_error(self, fname, retcode):
if retcode != enums.CUDA_SUCCESS:
errname = ERROR_MAP.get(retcode, "UNKNOWN_CUDA_ERROR")
msg = "Call to %s results in %s" % (fname, errname)
raise CudaAPIError(retcode, msg)
def get_device(self, devnum=0):
dev = self.devices.get(devnum)
if dev is None:
dev = Device(devnum)
self.devices[devnum] = dev
return weakref.proxy(dev)
def get_device_count(self):
count = c_int()
self.cuDeviceGetCount(byref(count))
return count.value
def list_devices(self):
"""Returns a list of active devices
"""
return list(self.devices.values())
def reset(self):
"""Reset all devices
"""
for dev in self.devices.values():
dev.reset()
def get_context(self):
"""Get current active context in CUDA driver runtime.
Note: Lowlevel calls that returns the handle.
"""
# Detect forking
if self.is_initialized and _getpid() != self.pid:
raise CudaDriverError("CUDA initialized before forking")
handle = drvapi.cu_context(0)
driver.cuCtxGetCurrent(byref(handle))
if not handle.value:
return None
return handle
driver = Driver()
class TrashService(servicelib.Service):
"""
We need this to enqueue things to be removed. There are times when you
want to disable deallocation because that would break asynchronous work
queues.
"""
CLEAN_LIMIT = 20
def add_trash(self, item):
self.trash.append(item)
def process(self, _arg):
self.trash = []
yield
while True:
count = 0
# Clean the trash
assert self.CLEAN_LIMIT > count
while self.trash and count < self.CLEAN_LIMIT:
cb = self.trash.pop()
# Invoke callback
cb()
count += 1
yield
def clear(self):
while self.trash:
cb = self.trash.pop()
cb()
@contextlib.contextmanager
def defer_cleanup(self):
orig = self.enabled
self.enabled = False
yield
self.enabled = orig
self.service()
def _build_reverse_device_attrs():
prefix = "CU_DEVICE_ATTRIBUTE_"
map = utils.UniqueDict()
for name in dir(enums):
if name.startswith(prefix):
map[name[len(prefix):]] = getattr(enums, name)
return map
DEVICE_ATTRIBUTES = _build_reverse_device_attrs()
class Device(object):
"""
The device object owns the CUDA contexts. This is owned by the driver
object. User should not construct devices directly.
"""
def __init__(self, devnum):
got_devnum = c_int()
driver.cuDeviceGet(byref(got_devnum), devnum)
assert devnum == got_devnum.value, "Driver returned another device"
self.id = got_devnum.value
self.trashing = trashing = TrashService("cuda.device%d.trash" % self.id)
self.attributes = {}
# Read compute capability
cc_major = c_int()
cc_minor = c_int()
driver.cuDeviceComputeCapability(byref(cc_major), byref(cc_minor),
self.id)
self.compute_capability = (cc_major.value, cc_minor.value)
# Read name
bufsz = 128
buf = (c_char * bufsz)()
driver.cuDeviceGetName(buf, bufsz, self.id)
self.name = buf.value
def finalizer():
trashing.clear()
utils.finalize(self, finalizer)
@property
def COMPUTE_CAPABILITY(self):
"""
For backward compatibility
"""
warnings.warn("Deprecated attribute 'COMPUTE_CAPABILITY'; use lower "
"case version", DeprecationWarning)
return self.compute_capability
def __repr__(self):
return "<CUDA device %d '%s'>" % (self.id, self.name)
def __getattr__(self, attr):
"""Read attributes lazily
"""
try:
code = DEVICE_ATTRIBUTES[attr]
except KeyError:
raise AttributeError(attr)
value = c_int()
driver.cuDeviceGetAttribute(byref(value), code, self.id)
setattr(self, attr, value.value)
return value.value
def __hash__(self):
return hash(self.id)
def __eq__(self, other):
if isinstance(other, Device):
return self.id == other.id
return False
def __ne__(self, other):
return not (self == other)
def create_context(self):
"""Create a CUDA context.
"""
met_requirement_for_device(self)
flags = 0
if self.CAN_MAP_HOST_MEMORY:
flags |= enums.CU_CTX_MAP_HOST
# Clean up any trash
self.trashing.service()
# Create new context
handle = drvapi.cu_context()
driver.cuCtxCreate(byref(handle), flags, self.id)
ctx = Context(weakref.proxy(self), handle,
_context_finalizer(self.trashing, handle))
return ctx
def reset(self):
self.trashing.clear()
def _context_finalizer(trashing, ctxhandle):
def core():
trashing.add_trash(lambda: driver.cuCtxDestroy(ctxhandle))
return core
def met_requirement_for_device(device):
if device.compute_capability < MIN_REQUIRED_CC:
raise CudaSupportError("%s has compute capability < %s" %
(device, MIN_REQUIRED_CC))
class Context(object):
"""
This object wraps a CUDA Context resource.
Contexts should not be constructed directly by user code.
"""
def __init__(self, device, handle, finalizer=None):
self.device = device
self.handle = handle
self.external_finalizer = finalizer
self.trashing = TrashService("cuda.device%d.context%x.trash" %
(self.device.id, self.handle.value))
self.allocations = utils.UniqueDict()
self.modules = utils.UniqueDict()
utils.finalize(self, self._make_finalizer())
# For storing context specific data
self.extras = {}
def _make_finalizer(self):
"""
Make a finalizer function that doesn't keep a reference to this object.
"""
allocations = self.allocations
modules = self.modules
trashing = self.trashing
external_finalizer = self.external_finalizer
def finalize():
allocations.clear()
modules.clear()
trashing.clear()
if external_finalizer is not None:
external_finalizer()
return finalize
def reset(self):
"""
Clean up all owned resources in this context.
"""
# Free owned resources
self.allocations.clear()
self.modules.clear()
# Clear trash
self.trashing.clear()
def get_memory_info(self):
"""Returns (free, total) memory in bytes in the context.
"""
free = c_size_t()
total = c_size_t()
driver.cuMemGetInfo(byref(free), byref(total))
return free.value, total.value
def get_active_blocks_per_multiprocessor(self, func, blocksize, memsize, flags=None):
"""Return occupancy of a function.
:param func: kernel for which occupancy is calculated
:param blocksize: block size the kernel is intended to be launched with
:param memsize: per-block dynamic shared memory usage intended, in bytes"""
retval = c_int()
if not flags:
driver.cuOccupancyMaxActiveBlocksPerMultiprocessor(byref(retval), func.handle, blocksize, memsize)
else:
driver.cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(byref(retval), func.handle, blocksize, memsize, flags)
return retval.value
def get_max_potential_block_size(self, func, b2d_func, memsize, blocksizelimit, flags=None):
"""Suggest a launch configuration with reasonable occupancy.
:param func: kernel for which occupancy is calculated
:param b2d_func: function that calculates how much per-block dynamic shared memory 'func'
uses based on the block size.
:param memsize: per-block dynamic shared memory usage intended, in bytes
:param blocksizelimit: maximum block size the kernel is designed to handle"""
gridsize = c_int()
blocksize = c_int()
b2d_cb = cu_occupancy_b2d_size(b2d_func)
if not flags:
driver.cuOccupancyMaxPotentialBlockSize(byref(gridsize), byref(blocksize),
func.handle,
b2d_cb,
memsize, blocksizelimit)
else:
driver.cuOccupancyMaxPotentialBlockSizeWithFlags(byref(gridsize), byref(blocksize),
func.handle, b2d_cb,
memsize, blocksizelimit, flags)
return (gridsize.value, blocksize.value)
def push(self):
"""
Pushes this context on the current CPU Thread.
"""
driver.cuCtxPushCurrent(self.handle)
def pop(self):
"""
Pops this context off the current CPU thread. Note that this context must
be at the top of the context stack, otherwise an error will occur.
"""
popped = drvapi.cu_context()
driver.cuCtxPopCurrent(byref(popped))
assert popped.value == self.handle.value
def memalloc(self, bytesize):
self.trashing.service()
ptr = drvapi.cu_device_ptr()
driver.cuMemAlloc(byref(ptr), bytesize)
_memory_finalizer = _make_mem_finalizer(driver.cuMemFree)
mem = MemoryPointer(weakref.proxy(self), ptr, bytesize,
_memory_finalizer(self, ptr))
self.allocations[ptr.value] = mem
return mem.own()
def memhostalloc(self, bytesize, mapped=False, portable=False, wc=False):
self.trashing.service()
pointer = c_void_p()
flags = 0
if mapped:
flags |= enums.CU_MEMHOSTALLOC_DEVICEMAP
if portable:
flags |= enums.CU_MEMHOSTALLOC_PORTABLE
if wc:
flags |= enums.CU_MEMHOSTALLOC_WRITECOMBINED
driver.cuMemHostAlloc(byref(pointer), bytesize, flags)
owner = None
if mapped:
_hostalloc_finalizer = _make_mem_finalizer(driver.cuMemFreeHost)
finalizer = _hostalloc_finalizer(self, pointer)
mem = MappedMemory(weakref.proxy(self), owner, pointer,
bytesize, finalizer=finalizer)
self.allocations[mem.handle.value] = mem
return mem.own()
else:
finalizer = _pinnedalloc_finalizer(self.trashing, pointer)
mem = PinnedMemory(weakref.proxy(self), owner, pointer, bytesize,
finalizer=finalizer)
return mem
def mempin(self, owner, pointer, size, mapped=False):
self.trashing.service()
if isinstance(pointer, (int, long)):
pointer = c_void_p(pointer)
if mapped and not self.device.CAN_MAP_HOST_MEMORY:
raise CudaDriverError("%s cannot map host memory" % self.device)
# possible flags are "portable" (between context)
# and "device-map" (map host memory to device thus no need
# for memory transfer).
flags = 0
if mapped:
flags |= enums.CU_MEMHOSTREGISTER_DEVICEMAP
driver.cuMemHostRegister(pointer, size, flags)
if mapped:
_mapped_finalizer = _make_mem_finalizer(driver.cuMemHostUnregister)
finalizer = _mapped_finalizer(self, pointer)
mem = MappedMemory(weakref.proxy(self), owner, pointer, size,
finalizer=finalizer)
self.allocations[mem.handle.value] = mem
return mem.own()
else:
mem = PinnedMemory(weakref.proxy(self), owner, pointer, size,
finalizer=_pinned_finalizer(self.trashing,
pointer))
return mem
def memunpin(self, pointer):
raise NotImplementedError
def create_module_ptx(self, ptx):
if isinstance(ptx, str):
ptx = ptx.encode('utf8')
image = c_char_p(ptx)
return self.create_module_image(image)
def create_module_image(self, image):
self.trashing.service()
module = load_module_image(self, image)
self.modules[module.handle.value] = module
return weakref.proxy(module)
def unload_module(self, module):
del self.modules[module.handle.value]
self.trashing.service()
def create_stream(self):
self.trashing.service()
handle = drvapi.cu_stream()
driver.cuStreamCreate(byref(handle), 0)
return Stream(weakref.proxy(self), handle,
_stream_finalizer(self.trashing, handle))
def create_event(self, timing=True):
self.trashing.service()
handle = drvapi.cu_event()
flags = 0
if not timing:
flags |= enums.CU_EVENT_DISABLE_TIMING
driver.cuEventCreate(byref(handle), flags)
return Event(weakref.proxy(self), handle,
finalizer=_event_finalizer(self.trashing, handle))
def synchronize(self):
driver.cuCtxSynchronize()
def __repr__(self):
return "<CUDA context %s of device %d>" % (self.handle, self.device.id)
def __eq__(self, other):
if isinstance(other, Context):
return self.handle == other.handle
else:
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
def load_module_image(context, image):
"""
image must be a pointer
"""
logsz = os.environ.get('NUMBAPRO_CUDA_LOG_SIZE', 1024)
jitinfo = (c_char * logsz)()
jiterrors = (c_char * logsz)()
options = {
enums.CU_JIT_INFO_LOG_BUFFER: addressof(jitinfo),
enums.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES: c_void_p(logsz),
enums.CU_JIT_ERROR_LOG_BUFFER: addressof(jiterrors),
enums.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES: c_void_p(logsz),
enums.CU_JIT_LOG_VERBOSE: c_void_p(VERBOSE_JIT_LOG),
}
option_keys = (drvapi.cu_jit_option * len(options))(*options.keys())
option_vals = (c_void_p * len(options))(*options.values())
handle = drvapi.cu_module()
try:
driver.cuModuleLoadDataEx(byref(handle), image, len(options),
option_keys, option_vals)
except CudaAPIError as e:
msg = "cuModuleLoadDataEx error:\n%s" % jiterrors.value.decode("utf8")
raise CudaAPIError(e.code, msg)
info_log = jitinfo.value
return Module(weakref.proxy(context), handle, info_log,
_module_finalizer(context, handle))
def _make_mem_finalizer(dtor):
def mem_finalize(context, handle):
trashing = context.trashing
allocations = context.allocations
def core():
def cleanup():
if allocations:
del allocations[handle.value]
dtor(handle)
trashing.add_trash(cleanup)
return core
return mem_finalize
def _pinnedalloc_finalizer(trashing, handle):
def core():
trashing.add_trash(lambda: driver.cuMemFreeHost(handle))
return core
def _pinned_finalizer(trashing, handle):
def core():
trashing.add_trash(lambda: driver.cuMemHostUnregister(handle))
return core
def _event_finalizer(trashing, handle):
def core():
trashing.add_trash(lambda: driver.cuEventDestroy(handle))
return core
def _stream_finalizer(trashing, handle):
def core():
trashing.add_trash(lambda: driver.cuStreamDestroy(handle))
return core
def _module_finalizer(context, handle):
trashing = context.trashing
modules = context.modules
def core():
def cleanup():
# All modules are owned by their parent Context.
# A Module is either released by a call to
# Context.unload_module, which clear the handle (pointer) mapping
# (checked by the following assertion), or, by Context.reset().
# Both releases the sole reference to the Module and trigger the
# finalizer for the Module instance. The actual call to
# cuModuleUnload is deferred to the trashing service to avoid
# further corruption of the CUDA context if a fatal error has
# occurred in the CUDA driver.
assert handle.value not in modules
driver.cuModuleUnload(handle)
trashing.add_trash(cleanup)
return core
class MemoryPointer(object):
__cuda_memory__ = True
def __init__(self, context, pointer, size, finalizer=None, owner=None):
self.context = context
self.device_pointer = pointer
self.size = size
self._cuda_memsize_ = size
self.is_managed = finalizer is not None
self.refct = 0
self.handle = self.device_pointer
self._owner = owner
if finalizer is not None:
self._finalizer = utils.finalize(self, finalizer)
@property
def owner(self):
return self if self._owner is None else self._owner
def own(self):
return OwnedPointer(weakref.proxy(self))
def free(self):
"""
Forces the device memory to the trash.
"""
if self.is_managed:
if not self._finalizer.alive:
raise RuntimeError("Freeing dead memory")
self._finalizer()
assert not self._finalizer.alive
def memset(self, byte, count=None, stream=0):
count = self.size if count is None else count
if stream:
driver.cuMemsetD8Async(self.device_pointer, byte, count,
stream.handle)
else:
driver.cuMemsetD8(self.device_pointer, byte, count)
def view(self, start, stop=None):
base = self.device_pointer.value + start
if stop is None:
size = self.size - start
else:
size = stop - start
assert size > 0, "zero or negative memory size"
pointer = drvapi.cu_device_ptr(base)
view = MemoryPointer(self.context, pointer, size, owner=self.owner)
return OwnedPointer(weakref.proxy(self.owner), view)
@property
def device_ctypes_pointer(self):
return self.device_pointer
class MappedMemory(MemoryPointer):
__cuda_memory__ = True
def __init__(self, context, owner, hostpointer, size,
finalizer=None):
self.owned = owner
self.host_pointer = hostpointer
devptr = drvapi.cu_device_ptr()
driver.cuMemHostGetDevicePointer(byref(devptr), hostpointer, 0)
self.device_pointer = devptr
super(MappedMemory, self).__init__(context, devptr, size,
finalizer=finalizer)
self.handle = self.host_pointer
# For buffer interface
self._buflen_ = self.size
self._bufptr_ = self.host_pointer.value
def own(self):
return MappedOwnedPointer(weakref.proxy(self))
class PinnedMemory(mviewbuf.MemAlloc):
def __init__(self, context, owner, pointer, size, finalizer=None):
self.context = context
self.owned = owner
self.size = size
self.host_pointer = pointer
self.is_managed = finalizer is not None
self.handle = self.host_pointer
# For buffer interface
self._buflen_ = self.size
self._bufptr_ = self.host_pointer.value
if finalizer is not None:
utils.finalize(self, finalizer)
def own(self):
return self
class OwnedPointer(object):
def __init__(self, memptr, view=None):
self._mem = memptr
if view is None:
self._view = self._mem
else:
assert not view.is_managed
self._view = view
mem = self._mem
def deref():
mem.refct -= 1
assert mem.refct >= 0
if mem.refct == 0:
mem.free()
self._mem.refct += 1
utils.finalize(self, deref)
def __getattr__(self, fname):
"""Proxy MemoryPointer methods
"""
return getattr(self._view, fname)
class MappedOwnedPointer(OwnedPointer, mviewbuf.MemAlloc):
pass
class Stream(object):
def __init__(self, context, handle, finalizer):
self.context = context
self.handle = handle
if finalizer is not None:
utils.finalize(self, finalizer)
def __int__(self):
return self.handle.value
def __repr__(self):
return "<CUDA stream %d on %s>" % (self.handle.value, self.context)
def synchronize(self):
'''
Wait for all commands in this stream to execute. This will commit any
pending memory transfers.
'''
driver.cuStreamSynchronize(self.handle)
@contextlib.contextmanager
def auto_synchronize(self):
'''
A context manager that waits for all commands in this stream to execute
and commits any pending memory transfers upon exiting the context.
'''
yield self
self.synchronize()
class Event(object):
def __init__(self, context, handle, finalizer=None):
self.context = context
self.handle = handle
if finalizer is not None:
utils.finalize(self, finalizer)
def query(self):
"""
Returns True if all work before the most recent record has completed;
otherwise, returns False.
"""
try:
driver.cuEventQuery(self.handle)
except CudaAPIError as e:
if e.code == enums.CUDA_ERROR_NOT_READY:
return False
else:
raise
else:
return True
def record(self, stream=0):
"""
Set the record point of the event to the current point in the given
stream.
The event will be considered to have occurred when all work that was
queued in the stream at the time of the call to ``record()`` has been
completed.
"""
hstream = stream.handle if stream else 0
driver.cuEventRecord(self.handle, hstream)
def synchronize(self):
"""
Synchronize the host thread for the completion of the event.
"""
driver.cuEventSynchronize(self.handle)
def wait(self, stream=0):
"""
All future works submitted to stream will wait util the event completes.
"""
hstream = stream.handle if stream else 0
flags = 0
driver.cuStreamWaitEvent(hstream, self.handle, flags)
def elapsed_time(self, evtend):
return event_elapsed_time(self, evtend)
def event_elapsed_time(evtstart, evtend):
'''
Compute the elapsed time between two events in milliseconds.
'''
msec = c_float()
driver.cuEventElapsedTime(byref(msec), evtstart.handle, evtend.handle)
return msec.value
class Module(object):
def __init__(self, context, handle, info_log, finalizer=None):
self.context = context
self.handle = handle
self.info_log = info_log
if finalizer is not None:
self._finalizer = utils.finalize(self, finalizer)
def unload(self):
self.context.unload_module(self)
def get_function(self, name):
handle = drvapi.cu_function()
driver.cuModuleGetFunction(byref(handle), self.handle,
name.encode('utf8'))
return Function(weakref.proxy(self), handle, name)
def get_global_symbol(self, name):
ptr = drvapi.cu_device_ptr()
size = drvapi.c_size_t()
driver.cuModuleGetGlobal(byref(ptr), byref(size), self.handle,
name.encode('utf8'))
return MemoryPointer(self.context, ptr, size), size.value
FuncAttr = namedtuple("FuncAttr", ["regs", "shared", "local", "const",
"maxthreads"])
class Function(object):
griddim = 1, 1, 1
blockdim = 1, 1, 1
stream = 0
sharedmem = 0
def __init__(self, module, handle, name):
self.module = module
self.handle = handle
self.name = name
self.attrs = self._read_func_attr_all()
def __repr__(self):
return "<CUDA function %s>" % self.name
def cache_config(self, prefer_equal=False, prefer_cache=False,
prefer_shared=False):
prefer_equal = prefer_equal or (prefer_cache and prefer_shared)
if prefer_equal:
flag = enums.CU_FUNC_CACHE_PREFER_EQUAL
elif prefer_cache:
flag = enums.CU_FUNC_CACHE_PREFER_L1
elif prefer_shared:
flag = enums.CU_FUNC_CACHE_PREFER_SHARED
else:
flag = enums.CU_FUNC_CACHE_PREFER_NONE
driver.cuFuncSetCacheConfig(self.handle, flag)
def configure(self, griddim, blockdim, sharedmem=0, stream=0):
while len(griddim) < 3:
griddim += (1,)
while len(blockdim) < 3:
blockdim += (1,)
inst = copy.copy(self) # shallow clone the object
inst.griddim = griddim
inst.blockdim = blockdim
inst.sharedmem = sharedmem
if stream:
inst.stream = stream
else:
inst.stream = 0
return inst
def __call__(self, *args):
'''
*args -- Must be either ctype objects of DevicePointer instances.
'''
if self.stream:
streamhandle = self.stream.handle
else:
streamhandle = None
launch_kernel(self.handle, self.griddim, self.blockdim,
self.sharedmem, streamhandle, args)
@property
def device(self):
return self.module.context.device
def _read_func_attr(self, attrid):
"""
Read CUfunction attributes
"""
retval = c_int()
driver.cuFuncGetAttribute(byref(retval), attrid, self.handle)
return retval.value
def _read_func_attr_all(self):
nregs = self._read_func_attr(enums.CU_FUNC_ATTRIBUTE_NUM_REGS)
cmem = self._read_func_attr(enums.CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES)
lmem = self._read_func_attr(enums.CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES)
smem = self._read_func_attr(enums.CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES)
maxtpb = self._read_func_attr(
enums.CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK)
return FuncAttr(regs=nregs, const=cmem, local=lmem, shared=smem,
maxthreads=maxtpb)
def launch_kernel(cufunc_handle, griddim, blockdim, sharedmem, hstream, args):
gx, gy, gz = griddim
bx, by, bz = blockdim
param_vals = []
for arg in args:
if is_device_memory(arg):
param_vals.append(addressof(device_ctypes_pointer(arg)))
else:
param_vals.append(addressof(arg))
params = (c_void_p * len(param_vals))(*param_vals)
driver.cuLaunchKernel(cufunc_handle,
gx, gy, gz,
bx, by, bz,
sharedmem,
hstream,
params,
None)
FILE_EXTENSION_MAP = {
'o': enums.CU_JIT_INPUT_OBJECT,
'ptx': enums.CU_JIT_INPUT_PTX,
'a': enums.CU_JIT_INPUT_LIBRARY,
'cubin': enums.CU_JIT_INPUT_CUBIN,
'fatbin': enums.CU_JIT_INPUT_FATBINAR,
}
class Linker(object):
def __init__(self):
logsz = int(os.environ.get('NUMBAPRO_CUDA_LOG_SIZE', 1024))
linkerinfo = (c_char * logsz)()
linkererrors = (c_char * logsz)()
options = {
enums.CU_JIT_INFO_LOG_BUFFER: addressof(linkerinfo),
enums.CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES: c_void_p(logsz),
enums.CU_JIT_ERROR_LOG_BUFFER: addressof(linkererrors),
enums.CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES: c_void_p(logsz),
enums.CU_JIT_LOG_VERBOSE: c_void_p(1),
}
raw_keys = list(options.keys()) + [enums.CU_JIT_TARGET_FROM_CUCONTEXT]
raw_values = list(options.values())
del options
option_keys = (drvapi.cu_jit_option * len(raw_keys))(*raw_keys)
option_vals = (c_void_p * len(raw_values))(*raw_values)
self.handle = handle = drvapi.cu_link_state()
driver.cuLinkCreate(len(raw_keys), option_keys, option_vals,
byref(self.handle))
utils.finalize(self, driver.cuLinkDestroy, handle)
self.linker_info_buf = linkerinfo
self.linker_errors_buf = linkererrors
self._keep_alive = [linkerinfo, linkererrors, option_keys, option_vals]
@property
def info_log(self):
return self.linker_info_buf.value.decode('utf8')
@property
def error_log(self):
return self.linker_errors_buf.value.decode('utf8')
def add_ptx(self, ptx, name='<cudapy-ptx>'):
ptxbuf = c_char_p(ptx)
namebuf = c_char_p(name.encode('utf8'))
self._keep_alive += [ptxbuf, namebuf]
try:
driver.cuLinkAddData(self.handle, enums.CU_JIT_INPUT_PTX,
ptxbuf, len(ptx), namebuf, 0, None, None)
except CudaAPIError as e:
raise LinkerError("%s\n%s" % (e, self.error_log))
def add_file(self, path, kind):
pathbuf = c_char_p(path.encode("utf8"))
self._keep_alive.append(pathbuf)
try:
driver.cuLinkAddFile(self.handle, kind, pathbuf, 0, None, None)
except CudaAPIError as e:
raise LinkerError("%s\n%s" % (e, self.error_log))
def add_file_guess_ext(self, path):
ext = path.rsplit('.', 1)[1]
kind = FILE_EXTENSION_MAP[ext]
self.add_file(path, kind)
def complete(self):
'''
Returns (cubin, size)
cubin is a pointer to a internal buffer of cubin owned
by the linker; thus, it should be loaded before the linker
is destroyed.
'''
cubin = c_void_p(0)
size = c_size_t(0)
try:
driver.cuLinkComplete(self.handle, byref(cubin), byref(size))
except CudaAPIError as e:
raise LinkerError("%s\n%s" % (e, self.error_log))
size = size.value
assert size > 0, 'linker returned a zero sized cubin'
del self._keep_alive[:]
return cubin, size
# -----------------------------------------------------------------------------
def _device_pointer_attr(devmem, attr, odata):
"""Query attribute on the device pointer
"""
error = driver.cuPointerGetAttribute(byref(odata), attr,
device_ctypes_pointer(devmem))
driver.check_error(error, "Failed to query pointer attribute")
def device_pointer_type(devmem):
"""Query the device pointer type: host, device, array, unified?
"""
ptrtype = c_int(0)
_device_pointer_attr(devmem, enums.CU_POINTER_ATTRIBUTE_MEMORY_TYPE,
ptrtype)
map = {
enums.CU_MEMORYTYPE_HOST: 'host',
enums.CU_MEMORYTYPE_DEVICE: 'device',
enums.CU_MEMORYTYPE_ARRAY: 'array',
enums.CU_MEMORYTYPE_UNIFIED: 'unified',
}
return map[ptrtype.value]
def device_extents(devmem):
"""Find the extents (half open begin and end pointer) of the underlying
device memory allocation.
NOTE: it always returns the extents of the allocation but the extents
of the device memory view that can be a subsection of the entire allocation.
"""
s = drvapi.cu_device_ptr()
n = c_size_t()
devptr = device_ctypes_pointer(devmem)
driver.cuMemGetAddressRange(byref(s), byref(n), devptr)
s, n = s.value, n.value
return s, s + n
def device_memory_size(devmem):
"""Check the memory size of the device memory.
The result is cached in the device memory object.
It may query the driver for the memory size of the device memory allocation.
"""
sz = getattr(devmem, '_cuda_memsize_', None)
if sz is None:
s, e = device_extents(devmem)
sz = e - s
devmem._cuda_memsize_ = sz
assert sz > 0, "zero length array"
return sz
def host_pointer(obj):
"""
NOTE: The underlying data pointer from the host data buffer is used and
it should not be changed until the operation which can be asynchronous
completes.
"""
if isinstance(obj, (int, long)):
return obj
forcewritable = isinstance(obj, np.void)
return mviewbuf.memoryview_get_buffer(obj, forcewritable)
def host_memory_extents(obj):
"Returns (start, end) the start and end pointer of the array (half open)."
return mviewbuf.memoryview_get_extents(obj)
def memory_size_from_info(shape, strides, itemsize):
"""et the byte size of a contiguous memory buffer given the shape, strides
and itemsize.
"""
assert len(shape) == len(strides), "# dim mismatch"
ndim = len(shape)
s, e = mviewbuf.memoryview_get_extents_info(shape, strides, ndim, itemsize)
return e - s
def host_memory_size(obj):
"Get the size of the memory"
s, e = host_memory_extents(obj)
assert e >= s, "memory extend of negative size"
return e - s
def device_pointer(obj):
"Get the device pointer as an integer"
return device_ctypes_pointer(obj).value
def device_ctypes_pointer(obj):
"Get the ctypes object for the device pointer"
if obj is None:
return c_void_p(0)
require_device_memory(obj)
return obj.device_ctypes_pointer
def is_device_memory(obj):
"""All CUDA memory object is recognized as an instance with the attribute
"__cuda_memory__" defined and its value evaluated to True.
All CUDA memory object should also define an attribute named
"device_pointer" which value is an int(or long) object carrying the pointer
value of the device memory address. This is not tested in this method.
"""
return getattr(obj, '__cuda_memory__', False)
def require_device_memory(obj):
"""A sentry for methods that accept CUDA memory object.
"""
if not is_device_memory(obj):
raise Exception("Not a CUDA memory object.")
def device_memory_depends(devmem, *objs):
"""Add dependencies to the device memory.
Mainly used for creating structures that points to other device memory,
so that the referees are not GC and released.
"""
depset = getattr(devmem, "_depends_", [])
depset.extend(objs)
def host_to_device(dst, src, size, stream=0):
"""
NOTE: The underlying data pointer from the host data buffer is used and
it should not be changed until the operation which can be asynchronous
completes.
"""
varargs = []
if stream:
assert isinstance(stream, Stream)
fn = driver.cuMemcpyHtoDAsync
varargs.append(stream.handle)
else:
fn = driver.cuMemcpyHtoD
fn(device_pointer(dst), host_pointer(src), size, *varargs)
def device_to_host(dst, src, size, stream=0):
"""
NOTE: The underlying data pointer from the host data buffer is used and
it should not be changed until the operation which can be asynchronous
completes.
"""
varargs = []
if stream:
assert isinstance(stream, Stream)
fn = driver.cuMemcpyDtoHAsync
varargs.append(stream.handle)
else:
fn = driver.cuMemcpyDtoH
fn(host_pointer(dst), device_pointer(src), size, *varargs)
def device_to_device(dst, src, size, stream=0):
"""
NOTE: The underlying data pointer from the host data buffer is used and
it should not be changed until the operation which can be asynchronous
completes.
"""
varargs = []
if stream:
assert isinstance(stream, Stream)
fn = driver.cuMemcpyDtoDAsync
varargs.append(stream.handle)
else:
fn = driver.cuMemcpyDtoD
fn(device_pointer(dst), device_pointer(src), size, *varargs)
def device_memset(dst, val, size, stream=0):
"""Memset on the device.
If stream is not zero, asynchronous mode is used.
dst: device memory
val: byte value to be written
size: number of byte to be written
stream: a CUDA stream
"""
varargs = []
if stream:
assert isinstance(stream, Stream)
fn = driver.cuMemsetD8Async
varargs.append(stream.handle)
else:
fn = driver.cuMemsetD8
fn(device_pointer(dst), val, size, *varargs)
def profile_start():
'''
Enable profile collection in the current context.
'''
driver.cuProfilerStart()
def profile_stop():
'''
Disable profile collection in the current context.
'''
driver.cuProfilerStop()
@contextlib.contextmanager
def profiling():
"""
Context manager that enables profiling on entry and disables profiling on
exit.
"""
profile_start()
yield
profile_stop()
|
|
import logging
import os
import socket
import mock
from time import sleep
from unittest import TestCase
import zmq
from zmcat import ZMCat, tool
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
TYPES = ("pub", "sub", "push", "pull")
class GetOutOfLoopException(Exception):
pass
class FakeArgs:
type = None
key = "ZMCAT"
uri = "ipc:///dev/null"
bind = False
def port_open(port):
"""
Check to see if a port is open by connecting a vanilla socket to it.
"""
log.debug("Creating vanilla socket")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
log.debug("Connecting vanilla socket to port %d" % port)
result = sock.connect_ex(("127.0.0.1", port))
log.debug("Result (0 connected, >0 did not): %d" % result)
sock.close()
return result == 0
def get_random_bound_zmq_socket(typ):
"""
Find a high port not in use and bind to it.
"""
zmcat = ZMCat()
zmq_sock = None
port = 49152
while not zmq_sock:
if port >= 65536:
raise ValueError("No more ports left to try!")
try:
zmq_sock = zmcat._get_bound_socket(
typ, "tcp://127.0.0.1:%d" % port)
log.debug("Socket bound to port %d" % port)
except zmq.ZMQError as e:
if e.errno == zmq.EADDRINUSE:
port += 1
else:
zmq_sock.close()
raise
return zmq_sock, port
class ZMCatToolTestCase(TestCase):
def truncated_file(self, path):
"""
Truncates file at `path` and asserts that it is indeed empty.
"""
with open(path, "w"):
pass
return path
def test_get_socket(self):
"""
_get_socket() should provide a ZeroMQ socket of the desired type.
"""
zmcat = ZMCat()
for typ in (zmq.PUSH, zmq.PULL, zmq.PUB, zmq.SUB):
socket = zmcat._get_socket(typ)
self.assertEqual(
socket.TYPE, typ, "Socket type should be what we asked for")
def test_get_bound_socket(self):
"""
_get_bound_socket() should provide a ZeroMQ socket bound to interface.
"""
zmq_sock, port = get_random_bound_zmq_socket(zmq.PUB)
self.assertTrue(zmq_sock, "Socket must be able to bind to a port")
try:
self.assertTrue(
port_open(port),
"Port should be open and accepting conections")
finally:
zmq_sock.close()
def test_get_connected_socket(self):
"""
_get_connected_socket() should provide a connected ZeroMQ socket.
"""
zmcat = ZMCat()
uri = "ipc:///tmp/test-get-connected-socket"
bound_sock = zmcat._get_bound_socket(zmq.PUB, uri)
connected_sock = zmcat._get_connected_socket(zmq.SUB, uri)
msg = u"Remember, Sully, when I promised to kill you last? I lied."
prefix = u"ARNIE"
msg = u"%s%s" % (prefix, msg)
try:
connected_sock.setsockopt_string(zmq.SUBSCRIBE, prefix)
sleep(0.1)
bound_sock.send_unicode(msg)
sleep(0.1)
self.assertEqual(
connected_sock.recv(zmq.NOBLOCK),
msg.encode())
finally:
bound_sock.close()
connected_sock.close()
def test_pub(self):
"""
pub() should set up a PUB socket and send its input through it.
"""
prefix = u"ARNIE"
msg = u"Who is your daddy and what does he do?"
# Mock inputf to return without standard input
with mock.patch("zmcat.tool.inputf", side_effect=[msg]):
zmcat = ZMCat(key=prefix)
uri = "ipc:///tmp/test-pub"
sub_sock = zmcat._get_connected_socket(zmq.SUB, uri)
sub_sock.setsockopt_string(zmq.SUBSCRIBE, prefix)
try:
zmcat.pub(uri)
except StopIteration:
pass
sleep(0.1)
exp_msg = (u"%s%s" % (prefix, msg)).encode()
self.assertEqual(sub_sock.recv(zmq.NOBLOCK), exp_msg)
def test_sub(self):
"""
sub() should set up a SUB socket and send its messages to output.
"""
output_file = "/tmp/test-sub.output"
def save_and_raise(msg):
"""
Save the msg to a file and raise an exception to get out of the
while True loop in sub().
"""
with open(output_file, "w") as f:
f.write(msg)
raise GetOutOfLoopException()
zmcat = ZMCat(output=save_and_raise)
uri = "ipc:///tmp/test-sub"
msg = u"Stop whining!"
try:
# Mock the reception of a packet from ZMQ
with mock.patch("zmq.sugar.socket.Socket.recv", return_value=msg):
try:
zmcat.sub(uri)
except GetOutOfLoopException:
pass
with open(output_file) as f:
self.assertEqual(f.read(), msg)
finally:
try:
os.unlink(output_file)
except OSError:
pass # Oh well
def test_push_connected(self):
"""
push() should set up a PUSH socket and send its input through it.
"""
msg = u"I'm a cop, you idiot!"
with mock.patch("zmcat.tool.inputf", side_effect=[msg]):
zmcat = ZMCat()
uri = "ipc:///tmp/test-push"
pull_sock = zmcat._get_bound_socket(zmq.PULL, uri)
try:
zmcat.push(uri, bind=False)
except StopIteration:
pass
sleep(0.1)
self.assertEqual(pull_sock.recv(zmq.NOBLOCK), msg.encode())
def test_push_bound(self):
"""
push() should set up a PUSH socket and send its input through it.
"""
msg = u"I'm a cop, you idiot!"
with mock.patch("zmcat.tool.inputf", side_effect=[msg]):
zmcat = ZMCat()
uri = "ipc:///tmp/test-push"
pull_sock = zmcat._get_connected_socket(zmq.PULL, uri)
try:
zmcat.push(uri, bind=True)
except StopIteration:
pass
sleep(0.1)
self.assertEqual(pull_sock.recv(zmq.NOBLOCK), msg.encode())
def test_pull_connected(self):
"""
pull() should set up a PULL socket and print its messages to output.
"""
output_file = "/tmp/test-sub.output"
def save_and_raise(msg):
"""
Save the msg to a file and raise an exception to get out of the
while True loop in pull().
"""
with open(output_file, "w") as f:
f.write(msg)
raise GetOutOfLoopException()
zmcat = ZMCat(output=save_and_raise)
uri = "ipc:///tmp/test-pull"
msg = u"You son of a bitch. How are you?"
try:
with mock.patch("zmq.sugar.socket.Socket.recv", return_value=msg):
try:
zmcat.pull(uri, bind=False)
except GetOutOfLoopException:
pass
with open(output_file) as f:
self.assertEqual(f.read(), msg)
finally:
try:
os.unlink(output_file)
except OSError:
pass # Oh well
def test_pull_bound(self):
"""
pull() should set up a PULL socket and print its messages to output.
"""
output_file = "/tmp/test-sub.output"
def save_and_raise(msg):
"""
Save the msg to a file and raise an exception to get out of the
while True loop in pull().
"""
with open(output_file, "w") as f:
f.write(msg)
raise GetOutOfLoopException()
zmcat = ZMCat(output=save_and_raise)
uri = "ipc:///tmp/test-pull"
msg = u"You son of a bitch. How are you?"
try:
with mock.patch("zmq.sugar.socket.Socket.recv", return_value=msg):
try:
zmcat.pull(uri, bind=True)
except GetOutOfLoopException:
pass
with open(output_file) as f:
self.assertEqual(f.read(), msg)
finally:
try:
os.unlink(output_file)
except OSError:
pass # Oh well
def test_req(self):
"""
req() should set up a REQ socket and push its input through it. It
should wait for a response and send it to output.
"""
output_file = self.truncated_file("/tmp/test-req.output")
req = u"Milk is for babies"
rep = u"Real men drink beer!"
uri = "ipc:///tmp/test-req"
def check_input(msg):
"""
Make sure `msg` is what we expect.
"""
self.assertEqual(msg, req)
zmcat = ZMCat(
input=lambda: req,
output=lambda msg: open(output_file, "w").write(msg)
)
try:
with mock.patch("zmq.sugar.socket.Socket.recv", return_value=rep):
with mock.patch(
"zmq.sugar.socket.Socket.send_unicode",
side_effect=check_input):
zmcat.req(uri)
with open(output_file) as f:
self.assertEqual(f.read(), rep)
finally:
try:
os.unlink(output_file)
except OSError:
pass # Oh well
def test_rep(self):
"""
rep() should echo and output whatever is REQ'd to it.
"""
output_file = self.truncated_file("/tmp/test-rep.output")
def save_and_raise(msg):
"""
Save the msg to a file and raise an exception to get out of the
while True loop in rep().
"""
with open(output_file, "w") as f:
f.write(msg)
raise GetOutOfLoopException()
zmcat = ZMCat(output=save_and_raise)
uri = "ipc:///tmp/test-rep"
msg = "Echo!"
try:
with mock.patch("zmq.sugar.socket.Socket.recv", return_value=msg):
with mock.patch("zmq.sugar.socket.Socket.send") as mock_send:
try:
zmcat.rep(uri)
except GetOutOfLoopException:
pass
self.assertTrue(mock_send.called)
self.assertEqual(mock_send.call_args[0][0], msg)
with open(output_file) as f:
self.assertEqual(f.read(), msg)
finally:
try:
os.unlink(output_file)
except OSError:
pass # Oh well
def test_main_calls_correct_function(self):
"""
main() should call the correct function when given a type
"""
fake_args = FakeArgs()
fake_function = mock.Mock()
for t in TYPES:
fake_args.type = t
with mock.patch(
"argparse.ArgumentParser.parse_args",
return_value=fake_args):
with mock.patch("zmcat.tool.ZMCat.%s" % t, fake_function):
tool.main()
self.assertTrue(fake_function.called)
def test_main_handles_eof_error(self):
"""
main() should handle EOFError exception from the function it calls
"""
fake_args = FakeArgs()
fake_args.type = "pub"
with mock.patch(
"argparse.ArgumentParser.parse_args", return_value=fake_args):
with mock.patch("zmcat.tool.ZMCat.pub", side_effect=EOFError):
try:
tool.main()
except EOFError:
self.fail("Should catch EOFError and return")
def test_main_handles_keyboard_interrupt(self):
"""
main() should handle EOFError exception from the function it calls
"""
fake_args = FakeArgs()
fake_args.type = "pub"
with mock.patch(
"argparse.ArgumentParser.parse_args", return_value=fake_args):
with mock.patch(
"zmcat.tool.ZMCat.pub", side_effect=KeyboardInterrupt):
try:
tool.main()
except KeyboardInterrupt:
self.fail("Should catch KeyboardInterrupt and return")
|
|
"""
The utils module handles helper routines for equilibrium calculation.
"""
import warnings
import pycalphad.variables as v
from pycalphad.core.halton import halton
from pycalphad.core.constants import MIN_SITE_FRACTION
from sympy.utilities.lambdify import lambdify
from sympy import Symbol
import numpy as np
import operator
import functools
import itertools
import collections
from collections.abc import Iterable, Mapping
def point_sample(comp_count, pdof=10):
"""
Sample 'pdof * (sum(comp_count) - len(comp_count))' points in
composition space for the sublattice configuration specified
by 'comp_count'. Points are sampled quasi-randomly from a Halton sequence.
A Halton sequence is like a uniform random distribution, but the
result will always be the same for a given 'comp_count' and 'pdof'.
Note: For systems with only one component, only one point will be
returned, regardless of 'pdof'. This is because the degrees of freedom
are zero for that case.
Parameters
----------
comp_count : list
Number of components in each sublattice.
pdof : int
Number of points to sample per degree of freedom.
Returns
-------
ndarray of generated points satisfying the mass balance.
Examples
--------
>>> comps = [8,1] # 8 components in sublattice 1; only 1 in sublattice 2
>>> pts = point_sample(comps, pdof=20) # 7 d.o.f, returns a 140x7 ndarray
"""
# Generate Halton sequence with appropriate dimensions and size
pts = halton(sum(comp_count),
pdof * (sum(comp_count) - len(comp_count)), scramble=True)
# Convert low-discrepancy sequence to normalized exponential
# This will be uniformly distributed over the simplices
pts = -np.log(pts)
cur_idx = 0
for ctx in comp_count:
end_idx = cur_idx + ctx
pts[:, cur_idx:end_idx] /= pts[:, cur_idx:end_idx].sum(axis=1)[:, None]
cur_idx = end_idx
if len(pts) == 0:
pts = np.atleast_2d([1] * len(comp_count))
return pts
def make_callable(model, variables, mode=None):
"""
Take a SymPy object and create a callable function.
Parameters
----------
model, SymPy object
Abstract representation of function
variables, list
Input variables, ordered in the way the return function will expect
mode, ['numpy', 'numba', 'sympy'], optional
Method to use when 'compiling' the function. SymPy mode is
slow and should only be used for debugging. If Numba is installed,
it can offer speed-ups when calling the energy function many
times on multi-core CPUs.
Returns
-------
Function that takes arguments in the same order as 'variables'
and returns the energy according to 'model'.
Examples
--------
None yet.
"""
energy = None
if mode is None:
mode = 'numpy'
if mode == 'sympy':
energy = lambda *vs: model.subs(zip(variables, vs)).evalf()
else:
energy = lambdify(tuple(variables), model, dummify=True,
modules=mode)
return energy
def sizeof_fmt(num, suffix='B'):
"""
Human-readable string for a number of bytes.
http://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
"""
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1000.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1000.0
return "%.1f%s%s" % (num, 'Y', suffix)
def unpack_condition(tup):
"""
Convert a condition to a list of values.
Notes
-----
Rules for keys of conditions dicts:
(1) If it's numeric, treat as a point value
(2) If it's a tuple with one element, treat as a point value
(3) If it's a tuple with two elements, treat as lower/upper limits and guess a step size.
(4) If it's a tuple with three elements, treat as lower/upper/step
(5) If it's a list, ndarray or other non-tuple ordered iterable, use those values directly.
"""
if isinstance(tup, tuple):
if len(tup) == 1:
return [float(tup[0])]
elif len(tup) == 2:
return np.arange(tup[0], tup[1], dtype=np.float_)
elif len(tup) == 3:
return np.arange(tup[0], tup[1], tup[2], dtype=np.float_)
else:
raise ValueError('Condition tuple is length {}'.format(len(tup)))
elif isinstance(tup, Iterable):
return [float(x) for x in tup]
else:
return [float(tup)]
def unpack_phases(phases):
"Convert a phases list/dict into a sorted list."
active_phases = None
if isinstance(phases, (list, tuple, set)):
active_phases = sorted(phases)
elif isinstance(phases, dict):
active_phases = sorted(phases.keys())
elif type(phases) is str:
active_phases = [phases]
return active_phases
def generate_dof(phase, active_comps):
"""
Accept a Phase object and a set() of the active components.
Return a tuple of variable names and the sublattice degrees of freedom.
"""
msg = "generate_dof is deprecated and will be removed in a future version "
msg += "of pycalphad. The correct way to determine the degrees of freedom "
msg += "of a particular 'active' phase is to use Model.constituents."
warnings.warn(msg, FutureWarning)
variables = []
sublattice_dof = []
for idx, sublattice in enumerate(phase.constituents):
dof = 0
for component in sorted(set(sublattice).intersection(active_comps)):
variables.append(v.SiteFraction(phase.name.upper(), idx, component))
dof += 1
sublattice_dof.append(dof)
return variables, sublattice_dof
def endmember_matrix(dof, vacancy_indices=None):
"""
Accept the number of components in each sublattice.
Return a matrix corresponding to the compositions of all endmembers.
Parameters
----------
dof : list of int
Number of components in each sublattice.
vacancy_indices, list of list of int, optional
If vacancies are present in every sublattice, specify their indices
in each sublattice to ensure the "pure vacancy" endmembers are excluded.
Examples
--------
Sublattice configuration like: `(AL, NI, VA):(AL, NI, VA):(VA)`
>>> endmember_matrix([3,3,1], vacancy_indices=[[2], [2], [0]])
"""
total_endmembers = functools.reduce(operator.mul, dof, 1)
res_matrix = np.empty((total_endmembers, sum(dof)), dtype=np.float_)
dof_arrays = [np.eye(d).tolist() for d in dof]
row_idx = 0
for row in itertools.product(*dof_arrays):
res_matrix[row_idx, :] = np.concatenate(row, axis=0)
row_idx += 1
if vacancy_indices is not None and len(vacancy_indices) == len(dof):
dof_adj = np.array([sum(dof[0:i]) for i in range(len(dof))])
for vacancy_em in itertools.product(*vacancy_indices):
indices = np.array(vacancy_em) + dof_adj
row_idx_to_delete = np.where(np.all(res_matrix[:, indices] == 1,
axis=1))
res_matrix = np.delete(res_matrix, (row_idx_to_delete), axis=0)
# Adjust site fractions to the numerical limit
cur_idx = 0
res_matrix[res_matrix == 0] = MIN_SITE_FRACTION
for ctx in dof:
end_idx = cur_idx + ctx
res_matrix[:, cur_idx:end_idx] /= res_matrix[:, cur_idx:end_idx].sum(axis=1)[:, None]
cur_idx = end_idx
return res_matrix
def unpack_kwarg(kwarg_obj, default_arg=None):
"""
Keyword arguments in pycalphad can be passed as a constant value, a
dict of phase names and values, or a list containing both of these. If
the latter, then the dict is checked first; if the phase of interest is not
there, then the constant value is used.
This function is a way to construct defaultdicts out of keyword arguments.
Parameters
----------
kwarg_obj : dict, iterable, or None
Argument to unpack into a defaultdict
default_arg : object
Default value to use if iterable isn't specified
Returns
-------
defaultdict for the keyword argument of interest
Examples
--------
>>> test_func = lambda **kwargs: print(unpack_kwarg('opt'))
>>> test_func(opt=100)
>>> test_func(opt={'FCC_A1': 50, 'BCC_B2': 10})
>>> test_func(opt=[{'FCC_A1': 30}, 200])
>>> test_func()
>>> test_func2 = lambda **kwargs: print(unpack_kwarg('opt', default_arg=1))
>>> test_func2()
"""
new_dict = collections.defaultdict(lambda: default_arg)
if isinstance(kwarg_obj, Mapping):
new_dict.update(kwarg_obj)
# kwarg_obj is a list containing a dict and a default
# For now at least, we don't treat ndarrays the same as other iterables
# ndarrays are assumed to be numeric arrays containing "default values", so don't match here
elif isinstance(kwarg_obj, Iterable) and not isinstance(kwarg_obj, np.ndarray):
for element in kwarg_obj:
if isinstance(element, Mapping):
new_dict.update(element)
else:
# element=element syntax to silence var-from-loop warning
new_dict = collections.defaultdict(
lambda element=element: element, new_dict)
elif kwarg_obj is None:
pass
else:
new_dict = collections.defaultdict(lambda: kwarg_obj)
return new_dict
def unpack_components(dbf, comps):
"""
Parameters
----------
dbf : Database
Thermodynamic database containing elements and species.
comps : list
Names of components to consider in the calculation.
Returns
-------
set
Set of Species objects
"""
# Constrain possible components to those within phase's d.o.f
# Assume for the moment that comps contains a list of pure element strings
# We want to add all the species which can be created by a combination of
# the user-specified pure elements
species_dict = {s.name: s for s in dbf.species}
possible_comps = {v.Species(species_dict.get(x, x)) for x in comps}
desired_active_pure_elements = [list(x.constituents.keys()) for x in possible_comps]
# Flatten nested list
desired_active_pure_elements = [el.upper() for constituents in desired_active_pure_elements for el in constituents]
eligible_species_from_database = {x for x in dbf.species if
set(x.constituents.keys()).issubset(desired_active_pure_elements)}
return eligible_species_from_database
def get_pure_elements(dbf, comps):
"""
Return a list of pure elements in the system.
Parameters
----------
dbf : Database
A Database object
comps : list
A list of component names (species and pure elements)
Returns
-------
list
A list of pure elements in the Database
"""
comps = sorted(unpack_components(dbf, comps))
components = [x for x in comps]
desired_active_pure_elements = [list(x.constituents.keys()) for x in components]
desired_active_pure_elements = [el.upper() for constituents in desired_active_pure_elements for el in constituents]
pure_elements = sorted(set([x for x in desired_active_pure_elements if x != 'VA']))
return pure_elements
def filter_phases(dbf, comps, candidate_phases=None):
"""Return phases that are valid for equilibrium calculations for the given database and components
Filters out phases that
* Have no active components in any sublattice of a phase
* Are disordered phases in an order-disorder model
Parameters
----------
dbf : Database
Thermodynamic database containing the relevant parameters.
comps : list of v.Species
Species to consider in the calculation.
candidate_phases : list
Names of phases to consider in the calculation, if not passed all phases from DBF will be considered
Returns
-------
list
Sorted list of phases that are valid for the Database and components
"""
# TODO: filter phases that can not charge balance
def all_sublattices_active(comps, phase):
active_sublattices = [len(set(comps).intersection(subl)) > 0 for
subl in phase.constituents]
return all(active_sublattices)
if candidate_phases == None:
candidate_phases = dbf.phases.keys()
else:
candidate_phases = set(candidate_phases).intersection(dbf.phases.keys())
disordered_phases = [dbf.phases[phase].model_hints.get('disordered_phase') for phase in candidate_phases]
phases = [phase for phase in candidate_phases if
all_sublattices_active(comps, dbf.phases[phase]) and
(phase not in disordered_phases or (phase in disordered_phases and
dbf.phases[phase].model_hints.get('ordered_phase') not in candidate_phases))]
return sorted(phases)
def extract_parameters(parameters):
"""
Extract symbols and values from parameters.
Parameters
----------
parameters : dict
Dictionary of parameters
Returns
-------
tuple
Tuple of parameter symbols (list) and parameter values (parameter_array_length, # parameters)
"""
parameter_array_lengths = set(np.atleast_1d(val).size for val in parameters.values())
if len(parameter_array_lengths) > 1:
raise ValueError('parameters kwarg does not contain arrays of equal length')
if len(parameters) > 0:
param_symbols, param_values = zip(*[(wrap_symbol(key), val) for key, val in sorted(parameters.items(),
key=operator.itemgetter(0))])
param_values = np.atleast_2d(np.ascontiguousarray(np.asarray(param_values, dtype=np.float64).T))
else:
param_symbols = []
param_values = np.empty(0)
return param_symbols, param_values
def instantiate_models(dbf, comps, phases, model=None, parameters=None, symbols_only=True):
"""
Parameters
----------
dbf : Database
Database used to construct the Model instances.
comps : Iterable
Names of components to consider in the calculation.
phases : Iterable
Names of phases to consider in the calculation.
model : Model class, a dict of phase names to Model, or a Iterable of both
Model class to use for each phase.
parameters : dict, optional
Maps SymPy Symbol to numbers, for overriding the values of parameters in
the Database.
symbols_only : bool
If True, symbols will be extracted from the parameters dict and used to
construct the Model instances.
Returns
-------
dict
Dictionary of Model instances corresponding to the passed phases.
"""
from pycalphad import Model # avoid cyclic imports
parameters = parameters if parameters is not None else {}
if symbols_only:
parameters, _ = extract_parameters(parameters)
if isinstance(model, Model): # Check that this instance is compatible with phases
if len(phases) > 1:
raise ValueError("Cannot instantiate models for multiple phases ({}) using a Model instance ({}, phase: {})".format(phases, model, model.phase_name))
else:
if phases[0] != model.phase_name:
raise ValueError("Cannot instantiate models because the desired {} phase does not match the Model instance () {} phase.".format(phases[0], model.phase_name, model))
models_defaultdict = unpack_kwarg(model, Model)
models_dict = {}
for name in phases:
mod = models_defaultdict[name]
if isinstance(mod, type):
models_dict[name] = mod(dbf, comps, name, parameters=parameters)
else:
models_dict[name] = mod
return models_dict
def get_state_variables(models=None, conds=None):
"""
Return a set of StateVariables defined Model instances and/or conditions.
Parameters
----------
models : dict, optional
Dictionary mapping phase names to instances of Model objects
conds : Iterable[v.StateVariable]
An iterable of StateVariables or a dictionary mapping pycalphad StateVariables to values
Returns
-------
set
State variables that are defined in the models and or conditions.
Examples
--------
>>> from pycalphad import variables as v
>>> from pycalphad.core.utils import get_state_variables
>>> get_state_variables(conds={v.P: 101325, v.N: 1, v.X('AL'): 0.2}) == {v.P, v.N, v.T}
True
"""
state_vars = set()
if models is not None:
for mod in models.values():
state_vars.update(mod.state_variables)
if conds is not None:
for c in conds:
# StateVariable instances are ok (e.g. P, T, N, V, S),
# however, subclasses (X, Y, MU, NP) are not ok.
if type(c) is v.StateVariable:
state_vars.add(c)
return state_vars
def wrap_symbol(obj):
if isinstance(obj, Symbol):
return obj
else:
return Symbol(obj)
def wrap_symbol_symengine(obj):
from symengine import Symbol, sympify
from sympy import Symbol as Symbol_sympy
if isinstance(obj, Symbol):
return obj
elif isinstance(obj, Symbol_sympy):
return sympify(obj)
else:
return Symbol(obj)
|
|
# -*- coding: utf8 -*-
# ...
try:
from graphviz import Digraph as Digraph_graphviz
GRAPH=True
except ImportError:
GRAPH=False
# ...
# ...
class Node:
def __init__(self, key, attributs={}):
if key is None:
print ("key is None.")
raise()
self._ID = id(key)
# print "*** node created with id:", self.ID, attributs
self._key = key
self._dict_attributs = attributs
self._connectedTo = {}
@property
def ID(self):
return self._ID
@property
def key(self):
return self._key
@property
def label(self):
try:
value = self.attributs["label"]
except:
value = None
if value is None:
self.init_label()
@property
def color(self):
try:
value = self.attributs["color"]
except:
value = None
if value is None:
self.init_color()
@property
def connectedTo(self):
return self._connectedTo
@property
def attributs(self):
return self._dict_attributs
def set_attribut(self, key, value):
# print "++++++ set_attribut with :", key, value
self._dict_attributs[key] = value
def set_label(self, value):
self.set_attribut("label", value)
def set_color(self, value):
self.set_attribut("color", value)
def init_label(self):
try:
value = self.attributs["label"]
except:
value = None
if value is None:
if self.key.label is not None:
self.set_label(self.key.label)
else:
self.set_label(self.key.name)
def init_color(self):
try:
value = self.attributs["color"]
except:
value = None
if value is None:
try:
self.set_color(self.key.color)
except:
self.set_color("black")
def update_attributs(self):
self.init_color()
self.init_label()
def addNeighbor(self, vertex, attributs={}):
"""
"""
if vertex not in self.connectedTo:
self._connectedTo[vertex] = attributs
def __str__(self):
# return str(self.ID) + ' connectedTo: ' + str([x.ID for x in self.connectedTo])
return ""
def getConnections(self):
return self.connectedTo.keys()
def getWeight(self, vertex):
# return self.connectedTo[str(vertex)]
return self.connectedTo[vertex]
# ...
# ...
class Graph:
def __init__(self, name="", comment=""):
self._nodes = {}
self._numVertices = 0
self._name = name
self._comment = comment
self._list_subgraph = []
self._node_attributs = None
@property
def nodes(self):
return self._nodes
@property
def name(self):
return self._name
@property
def comment(self):
return self._comment
@property
def numVertices(self):
return self._numVertices
@property
def subgraphs(self):
return self._list_subgraph
@property
def node_attributs(self):
return self._node_attributs
def set_node_attributs(self, attributs):
self._node_attributs = attributs
def node(self, *args, **kwargs):
self._numVertices = self._numVertices + 1
newNode = Node(*args, **kwargs)
key = args[0]
self._nodes[key] = newNode
return newNode
def getNode(self,n):
if n in self.nodes:
return self.nodes[n]
else:
return None
def __contains__(self,n):
return n in self.nodes
def edge(self, vertex_f, vertex_t, \
attributs={}):
for vertex in [vertex_f, vertex_t]:
if vertex not in self.nodes:
# print "---- create new vertex :", vertex, vertex.label
v_attributs = {}
try:
v_attributs["label"] = vertex.label
except:
v_attributs["label"] = "" # vertex
try:
if vertex.keyword == "module":
v_attributs["shape"] = "box"
v_attributs["style"] = "filled"
except:
pass
self.node(vertex, attributs=v_attributs)
self._nodes[vertex_f].addNeighbor(self.nodes[vertex_t], \
attributs=attributs)
def getVertices(self):
return self.nodes.keys()
def add_subgraph(self, sub):
self._list_subgraph.append(sub)
def __iter__(self):
return iter(self.nodes.values())
# ...
# ...
class Digraph(Graph):
def __init__(self, *args, **kwargs):
Graph.__init__(self, *args, **kwargs)
def to_graphviz(self):
dot = None
if GRAPH:
dot = Digraph_graphviz(self.name, comment=self.comment, filename='cluster.gv')
# ... insert subgraphs
list_colors = ["gray", "yellow", "white", "blue", "green"]
i = 0
for sub in self.subgraphs:
label = '"process ' + str(i) + '"'
_sub = sub.to_graphviz()
_sub.body.append('style=filled')
_sub.body.append('color='+list_colors[i% len(list_colors)])
_sub.body.append('label = ' + label)
# _sub.node_attr.update(style='filled', color=list_colors[i])
dot.subgraph(_sub)
i += 1
# ...
# ... insert nodes
for node in self:
node.update_attributs()
dot.node(str(node.ID), **node.attributs)
# ...
# ... insert edges
for node in self:
for key, values in node.connectedTo.items():
son = key
attr = values
dot.edge(str(node.ID), str(son.ID), **attr)
# ...
# dot.node('start', shape='Mdiamond')
# dot.node('end', shape='Msquare')
else:
print ("graphviz is not available on this machine.")
return dot
def render(self, filename=None, view=False):
dot = self.to_graphviz()
if dot is not None:
dot.render(filename, view=view)
else:
print ("rendering is provided only by graphviz and is not available on this machine.")
# ...
|
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods for working with WSGI servers
"""
from __future__ import print_function
import errno
import os
import socket
import ssl
import sys
import time
import eventlet.wsgi
from oslo_config import cfg
import oslo_i18n
from oslo_log import log as logging
from oslo_log import loggers
from oslo_serialization import jsonutils
from oslo_utils import excutils
import routes.middleware
import six
import webob.dec
import webob.exc
from neutron.common import exceptions as exception
from neutron import context
from neutron.db import api
from neutron.i18n import _LE, _LI
from neutron.openstack.common import service as common_service
from neutron.openstack.common import systemd
socket_opts = [
cfg.IntOpt('backlog',
default=4096,
help=_("Number of backlog requests to configure "
"the socket with")),
cfg.IntOpt('tcp_keepidle',
default=600,
help=_("Sets the value of TCP_KEEPIDLE in seconds for each "
"server socket. Not supported on OS X.")),
cfg.IntOpt('retry_until_window',
default=30,
help=_("Number of seconds to keep retrying to listen")),
cfg.IntOpt('max_header_line',
default=16384,
help=_("Max header line to accommodate large tokens")),
cfg.BoolOpt('use_ssl',
default=False,
help=_('Enable SSL on the API server')),
cfg.StrOpt('ssl_ca_file',
help=_("CA certificate file to use to verify "
"connecting clients")),
cfg.StrOpt('ssl_cert_file',
help=_("Certificate file to use when starting "
"the server securely")),
cfg.StrOpt('ssl_key_file',
help=_("Private key file to use when starting "
"the server securely")),
cfg.BoolOpt('wsgi_keep_alive',
default=True,
help=_("Determines if connections are allowed to be held "
"open by clients after a request is fulfilled. A value "
"of False will ensure that the socket connection will "
"be explicitly closed once a response has been sent to "
"the client.")),
cfg.IntOpt('client_socket_timeout', default=900,
help=_("Timeout for client connections socket operations. "
"If an incoming connection is idle for this number of "
"seconds it will be closed. A value of '0' means "
"wait forever.")),
]
CONF = cfg.CONF
CONF.register_opts(socket_opts)
LOG = logging.getLogger(__name__)
class WorkerService(object):
"""Wraps a worker to be handled by ProcessLauncher"""
def __init__(self, service, application):
self._service = service
self._application = application
self._server = None
def start(self):
if CONF.use_ssl:
self._service._socket = self._service.wrap_ssl(
self._service._socket)
self._server = self._service.pool.spawn(self._service._run,
self._application,
self._service._socket)
def wait(self):
if isinstance(self._server, eventlet.greenthread.GreenThread):
self._server.wait()
def stop(self):
if isinstance(self._server, eventlet.greenthread.GreenThread):
self._server.kill()
self._server = None
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, name, num_threads=1000):
# Raise the default from 8192 to accommodate large tokens
eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
self.num_threads = num_threads
# Pool for a greenthread in which wsgi server will be running
self.pool = eventlet.GreenPool(1)
self.name = name
self._server = None
# A value of 0 is converted to None because None is what causes the
# wsgi server to wait forever.
self.client_socket_timeout = CONF.client_socket_timeout or None
if CONF.use_ssl:
self._check_ssl_settings()
def _get_socket(self, host, port, backlog):
bind_addr = (host, port)
# TODO(dims): eventlet's green dns/socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
try:
info = socket.getaddrinfo(bind_addr[0],
bind_addr[1],
socket.AF_UNSPEC,
socket.SOCK_STREAM)[0]
family = info[0]
bind_addr = info[-1]
except Exception:
LOG.exception(_LE("Unable to listen on %(host)s:%(port)s"),
{'host': host, 'port': port})
sys.exit(1)
sock = None
retry_until = time.time() + CONF.retry_until_window
while not sock and time.time() < retry_until:
try:
sock = eventlet.listen(bind_addr,
backlog=backlog,
family=family)
except socket.error as err:
with excutils.save_and_reraise_exception() as ctxt:
if err.errno == errno.EADDRINUSE:
ctxt.reraise = False
eventlet.sleep(0.1)
if not sock:
raise RuntimeError(_("Could not bind to %(host)s:%(port)s "
"after trying for %(time)d seconds") %
{'host': host,
'port': port,
'time': CONF.retry_until_window})
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
sock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE,
CONF.tcp_keepidle)
return sock
@staticmethod
def _check_ssl_settings():
if not os.path.exists(CONF.ssl_cert_file):
raise RuntimeError(_("Unable to find ssl_cert_file "
": %s") % CONF.ssl_cert_file)
# ssl_key_file is optional because the key may be embedded in the
# certificate file
if CONF.ssl_key_file and not os.path.exists(CONF.ssl_key_file):
raise RuntimeError(_("Unable to find "
"ssl_key_file : %s") % CONF.ssl_key_file)
# ssl_ca_file is optional
if CONF.ssl_ca_file and not os.path.exists(CONF.ssl_ca_file):
raise RuntimeError(_("Unable to find ssl_ca_file "
": %s") % CONF.ssl_ca_file)
@staticmethod
def wrap_ssl(sock):
ssl_kwargs = {'server_side': True,
'certfile': CONF.ssl_cert_file,
'keyfile': CONF.ssl_key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl_ca_file:
ssl_kwargs['ca_certs'] = CONF.ssl_ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
def start(self, application, port, host='0.0.0.0', workers=0):
"""Run a WSGI server with the given application."""
self._host = host
self._port = port
backlog = CONF.backlog
self._socket = self._get_socket(self._host,
self._port,
backlog=backlog)
self._launch(application, workers)
def _launch(self, application, workers=0):
service = WorkerService(self, application)
if workers < 1:
# The API service should run in the current process.
self._server = service
service.start()
systemd.notify_once()
else:
# dispose the whole pool before os.fork, otherwise there will
# be shared DB connections in child processes which may cause
# DB errors.
api.dispose()
# The API service runs in a number of child processes.
# Minimize the cost of checking for child exit by extending the
# wait interval past the default of 0.01s.
self._server = common_service.ProcessLauncher(wait_interval=1.0)
self._server.launch_service(service, workers=workers)
@property
def host(self):
return self._socket.getsockname()[0] if self._socket else self._host
@property
def port(self):
return self._socket.getsockname()[1] if self._socket else self._port
def stop(self):
self._server.stop()
def wait(self):
"""Wait until all servers have completed running."""
try:
self._server.wait()
except KeyboardInterrupt:
pass
def _run(self, application, socket):
"""Start a WSGI server in a new green thread."""
eventlet.wsgi.server(socket, application,
max_size=self.num_threads,
log=loggers.WritableLogger(LOG),
keepalive=CONF.wsgi_keep_alive,
socket_timeout=self.client_socket_timeout)
class Middleware(object):
"""Base WSGI middleware wrapper.
These classes require an application to be initialized that will be called
next. By default the middleware will simply call its wrapped app, or you
can override __call__ to customize its behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = nova.api.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import nova.api.analytics
analytics.Analytics(app_from_paste, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
return cls(app, **local_config)
return _factory
def __init__(self, application):
self.application = application
def process_request(self, req):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
return self.process_response(response)
class Request(webob.Request):
def best_match_content_type(self):
"""Determine the most acceptable content-type.
Based on:
1) URI extension (.json)
2) Content-type header
3) Accept* headers
"""
# First lookup http request path
parts = self.path.rsplit('.', 1)
if len(parts) > 1:
_format = parts[1]
if _format in ['json']:
return 'application/{0}'.format(_format)
#Then look up content header
type_from_header = self.get_content_type()
if type_from_header:
return type_from_header
ctypes = ['application/json']
#Finally search in Accept-* headers
bm = self.accept.best_match(ctypes)
return bm or 'application/json'
def get_content_type(self):
allowed_types = ("application/json")
if "Content-Type" not in self.headers:
LOG.debug("Missing Content-Type")
return None
_type = self.content_type
if _type in allowed_types:
return _type
return None
def best_match_language(self):
"""Determines best available locale from the Accept-Language header.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not self.accept_language:
return None
all_languages = oslo_i18n.get_available_languages('neutron')
return self.accept_language.best_match(all_languages)
@property
def context(self):
if 'neutron.context' not in self.environ:
self.environ['neutron.context'] = context.get_admin_context()
return self.environ['neutron.context']
class ActionDispatcher(object):
"""Maps method name to local methods through action name."""
def dispatch(self, *args, **kwargs):
"""Find and call local method."""
action = kwargs.pop('action', 'default')
action_method = getattr(self, str(action), self.default)
return action_method(*args, **kwargs)
def default(self, data):
raise NotImplementedError()
class DictSerializer(ActionDispatcher):
"""Default request body serialization."""
def serialize(self, data, action='default'):
return self.dispatch(data, action=action)
def default(self, data):
return ""
class JSONDictSerializer(DictSerializer):
"""Default JSON request body serialization."""
def default(self, data):
def sanitizer(obj):
return unicode(obj)
return jsonutils.dumps(data, default=sanitizer)
class ResponseHeaderSerializer(ActionDispatcher):
"""Default response headers serialization."""
def serialize(self, response, data, action):
self.dispatch(response, data, action=action)
def default(self, response, data):
response.status_int = 200
class ResponseSerializer(object):
"""Encode the necessary pieces into a response object."""
def __init__(self, body_serializers=None, headers_serializer=None):
self.body_serializers = {
'application/json': JSONDictSerializer(),
}
self.body_serializers.update(body_serializers or {})
self.headers_serializer = (headers_serializer or
ResponseHeaderSerializer())
def serialize(self, response_data, content_type, action='default'):
"""Serialize a dict into a string and wrap in a wsgi.Request object.
:param response_data: dict produced by the Controller
:param content_type: expected mimetype of serialized response body
"""
response = webob.Response()
self.serialize_headers(response, response_data, action)
self.serialize_body(response, response_data, content_type, action)
return response
def serialize_headers(self, response, data, action):
self.headers_serializer.serialize(response, data, action)
def serialize_body(self, response, data, content_type, action):
response.headers['Content-Type'] = content_type
if data is not None:
serializer = self.get_body_serializer(content_type)
response.body = serializer.serialize(data, action)
def get_body_serializer(self, content_type):
try:
return self.body_serializers[content_type]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
class TextDeserializer(ActionDispatcher):
"""Default request body deserialization."""
def deserialize(self, datastring, action='default'):
return self.dispatch(datastring, action=action)
def default(self, datastring):
return {}
class JSONDeserializer(TextDeserializer):
def _from_json(self, datastring):
try:
return jsonutils.loads(datastring)
except ValueError:
msg = _("Cannot understand JSON")
raise exception.MalformedRequestBody(reason=msg)
def default(self, datastring):
return {'body': self._from_json(datastring)}
class RequestHeadersDeserializer(ActionDispatcher):
"""Default request headers deserializer."""
def deserialize(self, request, action):
return self.dispatch(request, action=action)
def default(self, request):
return {}
class RequestDeserializer(object):
"""Break up a Request object into more useful pieces."""
def __init__(self, body_deserializers=None, headers_deserializer=None):
self.body_deserializers = {
'application/json': JSONDeserializer(),
}
self.body_deserializers.update(body_deserializers or {})
self.headers_deserializer = (headers_deserializer or
RequestHeadersDeserializer())
def deserialize(self, request):
"""Extract necessary pieces of the request.
:param request: Request object
:returns tuple of expected controller action name, dictionary of
keyword arguments to pass to the controller, the expected
content type of the response
"""
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
action_args.update(self.deserialize_headers(request, action))
action_args.update(self.deserialize_body(request, action))
accept = self.get_expected_content_type(request)
return (action, action_args, accept)
def deserialize_headers(self, request, action):
return self.headers_deserializer.deserialize(request, action)
def deserialize_body(self, request, action):
try:
content_type = request.best_match_content_type()
except exception.InvalidContentType:
LOG.debug("Unrecognized Content-Type provided in request")
return {}
if content_type is None:
LOG.debug("No Content-Type provided in request")
return {}
if not len(request.body) > 0:
LOG.debug("Empty body provided in request")
return {}
try:
deserializer = self.get_body_deserializer(content_type)
except exception.InvalidContentType:
with excutils.save_and_reraise_exception():
LOG.debug("Unable to deserialize body as provided "
"Content-Type")
return deserializer.deserialize(request.body, action)
def get_body_deserializer(self, content_type):
try:
return self.body_deserializers[content_type]
except (KeyError, TypeError):
raise exception.InvalidContentType(content_type=content_type)
def get_expected_content_type(self, request):
return request.best_match_content_type()
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except Exception:
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
class Application(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = nova.api.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import neutron.api.fancy_api
fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(explanation='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError(_('You must implement __call__'))
class Debug(Middleware):
"""Middleware for debugging.
Helper class that can be inserted into any WSGI application chain
to get information about the request and response.
"""
@webob.dec.wsgify
def __call__(self, req):
print(("*" * 40) + " REQUEST ENVIRON")
for key, value in req.environ.items():
print(key, "=", value)
print()
resp = req.get_response(self.application)
print(("*" * 40) + " RESPONSE HEADERS")
for (key, value) in six.iteritems(resp.headers):
print(key, "=", value)
print()
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Print contents of a wrapper string iterator when iterated."""
print(("*" * 40) + " BODY")
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print()
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be a wsgi.Controller, who will route
the request to the action method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, "/svrlist", controller=sc, action="list")
# Actions are all implicitly defined
mapper.resource("network", "networks", controller=nc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""Dispatch a Request.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
language = req.best_match_language()
msg = _('The resource could not be found.')
msg = oslo_i18n.translate(msg, language)
return webob.exc.HTTPNotFound(explanation=msg)
app = match['controller']
return app
class Resource(Application):
"""WSGI app that handles (de)serialization and controller dispatch.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon its controller. All
controller action methods must accept a 'req' argument, which is the
incoming wsgi.Request. If the operation is a PUT or POST, the controller
method must also accept a 'body' argument (the deserialized request body).
They may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
"""
def __init__(self, controller, fault_body_function,
deserializer=None, serializer=None):
"""Object initialization.
:param controller: object that implement methods created by routes lib
:param deserializer: object that can serialize the output of a
controller into a webob response
:param serializer: object that can deserialize a webob request
into necessary pieces
:param fault_body_function: a function that will build the response
body for HTTP errors raised by operations
on this resource object
"""
self.controller = controller
self.deserializer = deserializer or RequestDeserializer()
self.serializer = serializer or ResponseSerializer()
self._fault_body_function = fault_body_function
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
LOG.info(_LI("%(method)s %(url)s"),
{"method": request.method, "url": request.url})
try:
action, args, accept = self.deserializer.deserialize(request)
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
LOG.exception(_LE("InvalidContentType: %s"), msg)
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
LOG.exception(_LE("MalformedRequestBody: %s"), msg)
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
try:
action_result = self.dispatch(request, action, args)
except webob.exc.HTTPException as ex:
LOG.info(_LI("HTTP exception thrown: %s"), ex)
action_result = Fault(ex, self._fault_body_function)
except Exception:
LOG.exception(_LE("Internal error"))
# Do not include the traceback to avoid returning it to clients.
action_result = Fault(webob.exc.HTTPServerError(),
self._fault_body_function)
if isinstance(action_result, dict) or action_result is None:
response = self.serializer.serialize(action_result,
accept,
action=action)
else:
response = action_result
try:
LOG.info(_LI("%(url)s returned with HTTP %(status)d"),
dict(url=request.url, status=response.status_int))
except AttributeError as e:
LOG.info(_LI("%(url)s returned a fault: %(exception)s"),
dict(url=request.url, exception=e))
return response
def dispatch(self, request, action, action_args):
"""Find action-spefic method on controller and call it."""
controller_method = getattr(self.controller, action)
try:
#NOTE(salvatore-orlando): the controller method must have
# an argument whose name is 'request'
return controller_method(request=request, **action_args)
except TypeError as exc:
LOG.exception(exc)
return Fault(webob.exc.HTTPBadRequest())
def _default_body_function(wrapped_exc):
code = wrapped_exc.status_int
fault_data = {
'Error': {
'code': code,
'message': wrapped_exc.explanation}}
# 'code' is an attribute on the fault tag itself
metadata = {'attributes': {'Error': 'code'}}
return fault_data, metadata
class Fault(webob.exc.HTTPException):
"""Generates an HTTP response from a webob HTTP exception."""
def __init__(self, exception, body_function=None):
"""Creates a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
self.status_int = self.wrapped_exc.status_int
self._body_function = body_function or _default_body_function
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
# Replace the body with fault details.
fault_data, metadata = self._body_function(self.wrapped_exc)
content_type = req.best_match_content_type()
serializer = {
'application/json': JSONDictSerializer(),
}[content_type]
self.wrapped_exc.body = serializer.serialize(fault_data)
self.wrapped_exc.content_type = content_type
return self.wrapped_exc
# NOTE(salvatore-orlando): this class will go once the
# extension API framework is updated
class Controller(object):
"""WSGI app that dispatched to methods.
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon itself. All action methods
must, in addition to their normal parameters, accept a 'req' argument
which is the incoming wsgi.Request. They raise a webob.exc exception,
or return a dict which will be serialized by requested content type.
"""
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""Call the method specified in req.environ by RoutesMiddleware."""
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict['action']
method = getattr(self, action)
del arg_dict['controller']
del arg_dict['action']
if 'format' in arg_dict:
del arg_dict['format']
arg_dict['request'] = req
result = method(**arg_dict)
if isinstance(result, dict) or result is None:
if result is None:
status = 204
content_type = ''
body = None
else:
status = 200
content_type = req.best_match_content_type()
body = self._serialize(result, content_type)
response = webob.Response(status=status,
content_type=content_type,
body=body)
LOG.debug("%(url)s returned with HTTP %(status)d",
dict(url=req.url, status=response.status_int))
return response
else:
return result
def _serialize(self, data, content_type):
"""Serialize the given dict to the provided content_type.
Uses self._serialization_metadata if it exists, which is a dict mapping
MIME types to information needed to serialize to that type.
"""
_metadata = getattr(type(self), '_serialization_metadata', {})
serializer = Serializer(_metadata)
try:
return serializer.serialize(data, content_type)
except exception.InvalidContentType:
msg = _('The requested content type %s is invalid.') % content_type
raise webob.exc.HTTPNotAcceptable(msg)
def _deserialize(self, data, content_type):
"""Deserialize the request body to the specefied content type.
Uses self._serialization_metadata if it exists, which is a dict mapping
MIME types to information needed to serialize to that type.
"""
_metadata = getattr(type(self), '_serialization_metadata', {})
serializer = Serializer(_metadata)
return serializer.deserialize(data, content_type)['body']
# NOTE(salvatore-orlando): this class will go once the
# extension API framework is updated
class Serializer(object):
"""Serializes and deserializes dictionaries to certain MIME types."""
def __init__(self, metadata=None):
"""Create a serializer based on the given WSGI environment.
'metadata' is an optional dict mapping MIME types to information
needed to serialize a dictionary to that type.
"""
self.metadata = metadata or {}
def _get_serialize_handler(self, content_type):
handlers = {
'application/json': JSONDictSerializer(),
}
try:
return handlers[content_type]
except Exception:
raise exception.InvalidContentType(content_type=content_type)
def serialize(self, data, content_type):
"""Serialize a dictionary into the specified content type."""
return self._get_serialize_handler(content_type).serialize(data)
def deserialize(self, datastring, content_type):
"""Deserialize a string to a dictionary.
The string must be in the format of a supported MIME type.
"""
try:
return self.get_deserialize_handler(content_type).deserialize(
datastring)
except Exception:
raise webob.exc.HTTPBadRequest(_("Could not deserialize data"))
def get_deserialize_handler(self, content_type):
handlers = {
'application/json': JSONDeserializer(),
}
try:
return handlers[content_type]
except Exception:
raise exception.InvalidContentType(content_type=content_type)
|
|
from __future__ import unicode_literals
import errno
import io
import hashlib
import json
import os.path
import re
import types
import sys
import youtube_dl.extractor
from youtube_dl import YoutubeDL
from youtube_dl.compat import (
compat_os_name,
compat_str,
)
from youtube_dl.utils import (
preferredencoding,
write_string,
)
def get_params(override=None):
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"parameters.json")
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
parameters = json.load(pf)
if override:
parameters.update(override)
return parameters
def try_rm(filename):
""" Remove a file if it exists """
try:
os.remove(filename)
except OSError as ose:
if ose.errno != errno.ENOENT:
raise
def report_warning(message):
'''
Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored
'''
if sys.stderr.isatty() and compat_os_name != 'nt':
_msg_header = '\033[0;33mWARNING:\033[0m'
else:
_msg_header = 'WARNING:'
output = '%s %s\n' % (_msg_header, message)
if 'b' in getattr(sys.stderr, 'mode', '') or sys.version_info[0] < 3:
output = output.encode(preferredencoding())
sys.stderr.write(output)
class FakeYDL(YoutubeDL):
def __init__(self, override=None):
# Different instances of the downloader can't share the same dictionary
# some test set the "sublang" parameter, which would break the md5 checks.
params = get_params(override=override)
super(FakeYDL, self).__init__(params, auto_init=False)
self.result = []
def to_screen(self, s, skip_eol=None):
print(s)
def trouble(self, s, tb=None):
raise Exception(s)
def download(self, x):
self.result.append(x)
def expect_warning(self, regex):
# Silence an expected warning matching a regex
old_report_warning = self.report_warning
def report_warning(self, message):
if re.match(regex, message):
return
old_report_warning(message)
self.report_warning = types.MethodType(report_warning, self)
def gettestcases(include_onlymatching=False):
for ie in youtube_dl.extractor.gen_extractors():
for tc in ie.get_testcases(include_onlymatching):
yield tc
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
def expect_value(self, got, expected, field):
if isinstance(expected, compat_str) and expected.startswith('re:'):
match_str = expected[len('re:'):]
match_rex = re.compile(match_str)
self.assertTrue(
isinstance(got, compat_str),
'Expected a %s object, but got %s for field %s' % (
compat_str.__name__, type(got).__name__, field))
self.assertTrue(
match_rex.match(got),
'field %s (value: %r) should match %r' % (field, got, match_str))
elif isinstance(expected, compat_str) and expected.startswith('startswith:'):
start_str = expected[len('startswith:'):]
self.assertTrue(
isinstance(got, compat_str),
'Expected a %s object, but got %s for field %s' % (
compat_str.__name__, type(got).__name__, field))
self.assertTrue(
got.startswith(start_str),
'field %s (value: %r) should start with %r' % (field, got, start_str))
elif isinstance(expected, compat_str) and expected.startswith('contains:'):
contains_str = expected[len('contains:'):]
self.assertTrue(
isinstance(got, compat_str),
'Expected a %s object, but got %s for field %s' % (
compat_str.__name__, type(got).__name__, field))
self.assertTrue(
contains_str in got,
'field %s (value: %r) should contain %r' % (field, got, contains_str))
elif isinstance(expected, type):
self.assertTrue(
isinstance(got, expected),
'Expected type %r for field %s, but got value %r of type %r' % (expected, field, got, type(got)))
elif isinstance(expected, dict) and isinstance(got, dict):
expect_dict(self, got, expected)
elif isinstance(expected, list) and isinstance(got, list):
self.assertEqual(
len(expected), len(got),
'Expect a list of length %d, but got a list of length %d for field %s' % (
len(expected), len(got), field))
for index, (item_got, item_expected) in enumerate(zip(got, expected)):
type_got = type(item_got)
type_expected = type(item_expected)
self.assertEqual(
type_expected, type_got,
'Type mismatch for list item at index %d for field %s, expected %r, got %r' % (
index, field, type_expected, type_got))
expect_value(self, item_got, item_expected, field)
else:
if isinstance(expected, compat_str) and expected.startswith('md5:'):
got = 'md5:' + md5(got)
elif isinstance(expected, compat_str) and expected.startswith('mincount:'):
self.assertTrue(
isinstance(got, (list, dict)),
'Expected field %s to be a list or a dict, but it is of type %s' % (
field, type(got).__name__))
expected_num = int(expected.partition(':')[2])
assertGreaterEqual(
self, len(got), expected_num,
'Expected %d items in field %s, but only got %d' % (expected_num, field, len(got)))
return
self.assertEqual(
expected, got,
'Invalid value for field %s, expected %r, got %r' % (field, expected, got))
def expect_dict(self, got_dict, expected_dict):
for info_field, expected in expected_dict.items():
got = got_dict.get(info_field)
expect_value(self, got, expected, info_field)
def expect_info_dict(self, got_dict, expected_dict):
expect_dict(self, got_dict, expected_dict)
# Check for the presence of mandatory fields
if got_dict.get('_type') not in ('playlist', 'multi_video'):
for key in ('id', 'url', 'title', 'ext'):
self.assertTrue(got_dict.get(key), 'Missing mandatory field %s' % key)
# Check for mandatory fields that are automatically set by YoutubeDL
for key in ['webpage_url', 'extractor', 'extractor_key']:
self.assertTrue(got_dict.get(key), 'Missing field: %s' % key)
# Are checkable fields missing from the test case definition?
test_info_dict = dict((key, value if not isinstance(value, compat_str) or len(value) < 250 else 'md5:' + md5(value))
for key, value in got_dict.items()
if value and key in ('id', 'title', 'description', 'uploader', 'upload_date', 'timestamp', 'uploader_id', 'location', 'age_limit'))
missing_keys = set(test_info_dict.keys()) - set(expected_dict.keys())
if missing_keys:
def _repr(v):
if isinstance(v, compat_str):
return "'%s'" % v.replace('\\', '\\\\').replace("'", "\\'").replace('\n', '\\n')
else:
return repr(v)
info_dict_str = ''
if len(missing_keys) != len(expected_dict):
info_dict_str += ''.join(
' %s: %s,\n' % (_repr(k), _repr(v))
for k, v in test_info_dict.items() if k not in missing_keys)
if info_dict_str:
info_dict_str += '\n'
info_dict_str += ''.join(
' %s: %s,\n' % (_repr(k), _repr(test_info_dict[k]))
for k in missing_keys)
write_string(
'\n\'info_dict\': {\n' + info_dict_str + '},\n', out=sys.stderr)
self.assertFalse(
missing_keys,
'Missing keys in test definition: %s' % (
', '.join(sorted(missing_keys))))
def assertRegexpMatches(self, text, regexp, msg=None):
if hasattr(self, 'assertRegexp'):
return self.assertRegexp(text, regexp, msg)
else:
m = re.match(regexp, text)
if not m:
note = 'Regexp didn\'t match: %r not found' % (regexp)
if len(text) < 1000:
note += ' in %r' % text
if msg is None:
msg = note
else:
msg = note + ', ' + msg
self.assertTrue(m, msg)
def assertGreaterEqual(self, got, expected, msg=None):
if not (got >= expected):
if msg is None:
msg = '%r not greater than or equal to %r' % (got, expected)
self.assertTrue(got >= expected, msg)
def expect_warnings(ydl, warnings_re):
real_warning = ydl.report_warning
def _report_warning(w):
if not any(re.search(w_re, w) for w_re in warnings_re):
real_warning(w)
ydl.report_warning = _report_warning
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Non-differentiable utility functions."""
import collections
from concurrent import futures
import contextlib
import functools
import time
from typing import List, Union
import jax
from jax import tree_util
import jax.numpy as jnp
import numpy as np
from scipy import interpolate
from scipy.spatial import transform as scipy_transform
def clip_gradients(grad, grad_max_val=0.0, grad_max_norm=0.0, eps=1e-7):
"""Gradient clipping."""
# Clip the gradient by value.
if grad_max_val > 0:
clip_fn = lambda z: jnp.clip(z, -grad_max_val, grad_max_val)
grad = jax.tree_util.tree_map(clip_fn, grad)
# Clip the (possibly value-clipped) gradient by norm.
if grad_max_norm > 0:
grad_norm = safe_sqrt(
jax.tree_util.tree_reduce(
lambda x, y: x + jnp.sum(y**2), grad, initializer=0))
mult = jnp.minimum(1, grad_max_norm / (eps + grad_norm))
grad = jax.tree_util.tree_map(lambda z: mult * z, grad)
return grad
def matmul(a, b):
"""jnp.matmul defaults to bfloat16, but this helper function doesn't."""
return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
# pylint: disable=unused-argument
@functools.partial(jax.custom_jvp, nondiff_argnums=(1, 2, 3))
def safe_norm(x, axis=-1, keepdims=False, tol=1e-9):
"""Calculates a np.linalg.norm(d) that's safe for gradients at d=0.
These gymnastics are to avoid a poorly defined gradient for np.linal.norm(0)
see https://github.com/google/jax/issues/3058 for details
Args:
x: A np.array
axis: The axis along which to compute the norm
keepdims: if True don't squeeze the axis.
tol: the absolute threshold within which to zero out the gradient.
Returns:
Equivalent to np.linalg.norm(d)
"""
return jnp.linalg.norm(x, axis=axis, keepdims=keepdims)
@safe_norm.defjvp
def _safe_norm_jvp(axis, keepdims, tol, primals, tangents):
"""Custom JVP rule for safe_norm."""
x, = primals
x_dot, = tangents
safe_tol = max(tol, 1e-30)
y = safe_norm(x, tol=safe_tol, axis=axis, keepdims=True)
y_safe = jnp.maximum(y, tol) # Prevent divide by zero.
y_dot = jnp.where(y > safe_tol, x_dot * x / y_safe, jnp.zeros_like(x))
y_dot = jnp.sum(y_dot, axis=axis, keepdims=True)
# Squeeze the axis if `keepdims` is True.
if not keepdims:
y = jnp.squeeze(y, axis=axis)
y_dot = jnp.squeeze(y_dot, axis=axis)
return y, y_dot
def jacobian_to_curl(jacobian):
"""Computes the curl from the Jacobian."""
dfx_dy = jacobian[..., 0, 1]
dfx_dz = jacobian[..., 0, 2]
dfy_dx = jacobian[..., 1, 0]
dfy_dz = jacobian[..., 1, 2]
dfz_dx = jacobian[..., 2, 0]
dfz_dy = jacobian[..., 2, 1]
return jnp.stack([
dfz_dy - dfy_dz,
dfx_dz - dfz_dx,
dfy_dx - dfx_dy,
], axis=-1)
def jacobian_to_div(jacobian):
"""Computes the divergence from the Jacobian."""
# If F : x -> x + f(x) then dF/dx = 1 + df/dx, so subtract 1 for each
# diagonal of the Jacobian.
return jnp.trace(jacobian, axis1=-2, axis2=-1) - 3.0
def compute_psnr(mse):
"""Compute psnr value given mse (we assume the maximum pixel value is 1).
Args:
mse: float, mean square error of pixels.
Returns:
psnr: float, the psnr value.
"""
return -10. * jnp.log(mse) / jnp.log(10.)
@jax.jit
def robust_whiten(x):
median = jnp.nanmedian(x)
mad = jnp.nanmean(jnp.abs(x - median))
return (x - median) / mad
def interpolate_codes(codes: Union[np.ndarray, List[np.ndarray]],
num_samples: int,
method='spline',
bc_type='natural'):
"""Interpolates latent codes.
Args:
codes: the codes to interpolate.
num_samples: the number of samples to interpolate to.
method: which method to use for interpolation.
bc_type: interpolation type for spline interpolation.
Returns:
(np.ndarray): the interpolated codes.
"""
if isinstance(codes, list):
codes = np.array(codes)
t = np.arange(len(codes))
xs = np.linspace(0, len(codes) - 1, num_samples)
if method == 'spline':
cs = interpolate.CubicSpline(t, codes, bc_type=bc_type)
return cs(xs).astype(np.float32)
elif method in {'linear', 'cubic', 'quadratic', 'slinear'}:
interp = interpolate.interp1d(t, codes, axis=0)
return interp(xs).astype(np.float32)
raise ValueError(f'Unknown method {method!r}')
def interpolate_cameras(cameras, num_samples: int):
"""Interpolates the cameras to the number of output samples.
Uses a spherical linear interpolation (Slerp) to interpolate the camera
orientations and a cubic spline to interpolate the camera positions.
Args:
cameras: the input cameras to interpolate.
num_samples: the number of output cameras.
Returns:
(List[vision_sfm.Camera]): a list of interpolated cameras.
"""
rotations = []
positions = []
for camera in cameras:
rotations.append(camera.orientation)
positions.append(camera.position)
in_times = np.linspace(0, 1, len(rotations))
slerp = scipy_transform.Slerp(
in_times, scipy_transform.Rotation.from_dcm(rotations))
spline = interpolate.CubicSpline(in_times, positions)
out_times = np.linspace(0, 1, num_samples)
out_rots = slerp(out_times).as_dcm()
out_positions = spline(out_times)
ref_camera = cameras[0]
out_cameras = []
for out_rot, out_pos in zip(out_rots, out_positions):
out_camera = ref_camera.copy()
out_camera.orientation = out_rot
out_camera.position = out_pos
out_cameras.append(out_camera)
return out_cameras
def safe_sqrt(x, eps=1e-7):
safe_x = jnp.where(x == 0, jnp.ones_like(x) * eps, x)
return jnp.sqrt(safe_x)
@jax.jit
def general_loss_with_squared_residual(x_sq, alpha, scale):
r"""Implements the general form of the loss.
This implements the rho(x, \alpha, c) function described in "A General and
Adaptive Robust Loss Function", Jonathan T. Barron,
https://arxiv.org/abs/1701.03077.
Args:
x_sq: The residual for which the loss is being computed. x can have any
shape, and alpha and scale will be broadcasted to match x's shape if
necessary.
alpha: The shape parameter of the loss (\alpha in the paper), where more
negative values produce a loss with more robust behavior (outliers "cost"
less), and more positive values produce a loss with less robust behavior
(outliers are penalized more heavily). Alpha can be any value in
[-infinity, infinity], but the gradient of the loss with respect to alpha
is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth
interpolation between several discrete robust losses:
alpha=-Infinity: Welsch/Leclerc Loss.
alpha=-2: Geman-McClure loss.
alpha=0: Cauchy/Lortentzian loss.
alpha=1: Charbonnier/pseudo-Huber loss.
alpha=2: L2 loss.
scale: The scale parameter of the loss. When |x| < scale, the loss is an
L2-like quadratic bowl, and when |x| > scale the loss function takes on a
different shape according to alpha.
Returns:
The losses for each element of x, in the same shape as x.
"""
eps = jnp.finfo(jnp.float32).eps
# `scale` must be > 0.
scale = jnp.maximum(eps, scale)
# The loss when alpha == 2. This will get reused repeatedly.
loss_two = 0.5 * x_sq / (scale**2)
# "Safe" versions of log1p and expm1 that will not NaN-out.
log1p_safe = lambda x: jnp.log1p(jnp.minimum(x, 3e37))
expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 87.5))
# The loss when not in one of the special casess.
# Clamp |alpha| to be >= machine epsilon so that it's safe to divide by.
a = jnp.where(alpha >= 0, jnp.ones_like(alpha),
-jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha))
# Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by.
b = jnp.maximum(eps, jnp.abs(alpha - 2))
loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * alpha) - 1)
# Select which of the cases of the loss to return as a function of alpha.
return scale * jnp.where(
alpha == -jnp.inf, -expm1_safe(-loss_two),
jnp.where(
alpha == 0, log1p_safe(loss_two),
jnp.where(alpha == 2, loss_two,
jnp.where(alpha == jnp.inf, expm1_safe(loss_two),
loss_ow))))
def points_bound(points):
"""Computes the min and max dims of the points."""
min_dim = np.min(points, axis=0)
max_dim = np.max(points, axis=0)
return np.stack((min_dim, max_dim), axis=1)
def points_centroid(points):
"""Computes the centroid of the points from the bounding box."""
return points_bound(points).mean(axis=1)
def points_bounding_size(points):
"""Computes the bounding size of the points from the bounding box."""
bounds = points_bound(points)
return np.linalg.norm(bounds[:, 1] - bounds[:, 0])
def shard(xs, device_count=None):
"""Split data into shards for multiple devices along the first dimension."""
if device_count is None:
device_count = jax.local_device_count()
return jax.tree_map(lambda x: x.reshape((device_count, -1) + x.shape[1:]), xs)
def to_device(xs):
"""Transfer data to devices (GPU/TPU)."""
return jax.tree_map(jnp.array, xs)
def unshard(x, padding=0):
"""Collect the sharded tensor to the shape before sharding."""
if padding > 0:
return x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))[:-padding]
else:
return x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:]))
def normalize(x):
"""Normalization helper function."""
return x / np.linalg.norm(x)
def parallel_map(f, iterable, max_threads=None, show_pbar=False, **kwargs):
"""Parallel version of map()."""
with futures.ThreadPoolExecutor(max_threads) as executor:
if show_pbar:
# pylint: disable=g-import-not-at-top
import tqdm
results = tqdm.tqdm(
executor.map(f, iterable, **kwargs), total=len(iterable))
else:
results = executor.map(f, iterable, **kwargs)
return list(results)
def parallel_tree_map(f, tree, **kwargs):
"""Parallel version of jax.tree_map."""
leaves, treedef = jax.tree_flatten(tree)
results = parallel_map(f, leaves, **kwargs)
return jax.tree_unflatten(treedef, results)
def strided_subset(sequence, count):
"""Returns a strided subset of a list."""
if count:
stride = max(1, len(sequence) // count)
return sequence[::stride]
return sequence
def tree_collate(list_of_pytrees):
"""Collates a list of pytrees with the same structure."""
return tree_util.tree_multimap(lambda *x: np.stack(x), *list_of_pytrees)
@contextlib.contextmanager
def print_time(name):
"""Records the time elapsed."""
start = time.time()
yield
elapsed = time.time() - start
print(f'[{name}] time elapsed: {elapsed:.04f}')
class ValueMeter:
"""Tracks the average of a value."""
def __init__(self):
self._values = []
def reset(self):
"""Resets the meter."""
self._values.clear()
def update(self, value):
"""Adds a value to the meter."""
self._values.append(value)
def reduce(self, reduction='mean'):
"""Reduces the tracked values."""
if reduction == 'mean':
return np.mean(self._values)
elif reduction == 'std':
return np.std(self._values)
elif reduction == 'last':
return self._values[-1]
else:
raise ValueError(f'Unknown reduction {reduction}')
class TimeTracker:
"""Tracks the average time elapsed over multiple steps."""
def __init__(self):
self._meters = collections.defaultdict(ValueMeter)
self._marked_time = collections.defaultdict(float)
@contextlib.contextmanager
def record_time(self, key: str):
"""Records the time elapsed."""
start = time.time()
yield
elapsed = time.time() - start
self.update(key, elapsed)
def update(self, key, value):
"""Updates the time value for a given key."""
self._meters[key].update(value)
def tic(self, *args):
"""Marks the starting time of an event."""
for key in args:
self._marked_time[key] = time.time()
def toc(self, *args):
"""Records the time elapsed based on the previous call to `tic`."""
for key in args:
self.update(key, time.time() - self._marked_time[key])
del self._marked_time[key]
def reset(self):
"""Resets all time meters."""
for meter in self._meters.values():
meter.reset()
def summary(self, reduction='mean'):
"""Returns a dictionary of reduced times."""
time_dict = {k: v.reduce(reduction) for k, v in self._meters.items()}
if 'total' not in time_dict:
time_dict['total'] = sum(time_dict.values())
time_dict['steps_per_sec'] = 1.0 / time_dict['total']
return time_dict
def summary_str(self, reduction='mean'):
"""Returns a string of reduced times."""
strings = [f'{k}={v:.04f}' for k, v in self.summary(reduction).items()]
return ', '.join(strings)
|
|
from __future__ import print_function, division
from collections import MutableMapping, defaultdict
from sympy.core import (Add, Mul, Pow, Integer, Number, NumberSymbol,)
from sympy.core.numbers import ImaginaryUnit
from sympy.core.sympify import _sympify
from sympy.core.rules import Transform
from sympy.core.logic import fuzzy_or, fuzzy_and
from sympy.matrices.expressions import MatMul
from sympy.functions.elementary.complexes import Abs
from sympy.assumptions.ask import Q
from sympy.assumptions.assume import Predicate, AppliedPredicate
from sympy.logic.boolalg import (Equivalent, Implies, And, Or,
BooleanFunction, Not)
# APIs here may be subject to change
# XXX: Better name?
class UnevaluatedOnFree(BooleanFunction):
"""
Represents a Boolean function that remains unevaluated on free predicates
This is intended to be a superclass of other classes, which define the
behavior on singly applied predicates.
A free predicate is a predicate that is not applied, or a combination
thereof. For example, Q.zero or Or(Q.positive, Q.negative).
A singly applied predicate is a free predicate applied everywhere to a
single expression. For instance, Q.zero(x) and Or(Q.positive(x*y),
Q.negative(x*y)) are singly applied, but Or(Q.positive(x), Q.negative(y))
and Or(Q.positive, Q.negative(y)) are not.
The boolean literals True and False are considered to be both free and
singly applied.
This class raises ValueError unless the input is a free predicate or a
singly applied predicate.
On a free predicate, this class remains unevaluated. On a singly applied
predicate, the method apply() is called and returned, or the original
expression returned if apply() returns None. When apply() is called,
self.expr is set to the unique expression that the predicates are applied
at. self.pred is set to the free form of the predicate.
The typical usage is to create this class with free predicates and
evaluate it using .rcall().
"""
def __new__(cls, arg):
# Mostly type checking here
arg = _sympify(arg)
predicates = arg.atoms(Predicate)
applied_predicates = arg.atoms(AppliedPredicate)
if predicates and applied_predicates:
raise ValueError("arg must be either completely free or singly applied")
if not applied_predicates:
obj = BooleanFunction.__new__(cls, arg)
obj.pred = arg
obj.expr = None
return obj
predicate_args = set([pred.args[0] for pred in applied_predicates])
if len(predicate_args) > 1:
raise ValueError("The AppliedPredicates in arg must be applied to a single expression.")
obj = BooleanFunction.__new__(cls, arg)
obj.expr = predicate_args.pop()
obj.pred = arg.xreplace(Transform(lambda e: e.func, lambda e:
isinstance(e, AppliedPredicate)))
applied = obj.apply()
if applied is None:
return obj
return applied
def apply(self):
return
class AllArgs(UnevaluatedOnFree):
"""
Class representing vectorizing a predicate over all the .args of an
expression
See the docstring of UnevaluatedOnFree for more information on this
class.
The typical usage is to evaluate predicates with expressions using .rcall().
Example
=======
>>> from sympy.assumptions.sathandlers import AllArgs
>>> from sympy import symbols, Q
>>> x, y = symbols('x y')
>>> a = AllArgs(Q.positive | Q.negative)
>>> a
AllArgs(Or(Q.negative, Q.positive))
>>> a.rcall(x*y)
And(Or(Q.negative(x), Q.positive(x)), Or(Q.negative(y), Q.positive(y)))
"""
def apply(self):
return And(*[self.pred.rcall(arg) for arg in self.expr.args])
class AnyArgs(UnevaluatedOnFree):
"""
Class representing vectorizing a predicate over any of the .args of an
expression.
See the docstring of UnevaluatedOnFree for more information on this
class.
The typical usage is to evaluate predicates with expressions using .rcall().
Example
=======
>>> from sympy.assumptions.sathandlers import AnyArgs
>>> from sympy import symbols, Q
>>> x, y = symbols('x y')
>>> a = AnyArgs(Q.positive & Q.negative)
>>> a
AnyArgs(And(Q.negative, Q.positive))
>>> a.rcall(x*y)
Or(And(Q.negative(x), Q.positive(x)), And(Q.negative(y), Q.positive(y)))
"""
def apply(self):
return Or(*[self.pred.rcall(arg) for arg in self.expr.args])
class ExactlyOneArg(UnevaluatedOnFree):
"""
Class representing a predicate holding on exactly one of the .args of an
expression.
See the docstring of UnevaluatedOnFree for more information on this
class.
The typical usage is to evaluate predicate with expressions using
.rcall().
Example
=======
>>> from sympy.assumptions.sathandlers import ExactlyOneArg
>>> from sympy import symbols, Q
>>> x, y = symbols('x y')
>>> a = ExactlyOneArg(Q.positive)
>>> a
ExactlyOneArg(Q.positive)
>>> a.rcall(x*y)
Or(And(Not(Q.positive(x)), Q.positive(y)), And(Not(Q.positive(y)), Q.positive(x)))
"""
def apply(self):
expr = self.expr
pred = self.pred
pred_args = [pred.rcall(arg) for arg in expr.args]
# Technically this is xor, but if one term in the disjunction is true,
# it is not possible for the remainder to be true, so regular or is
# fine in this case.
return Or(*[And(pred_args[i], *map(Not, pred_args[:i] +
pred_args[i+1:])) for i in range(len(pred_args))])
# Note: this is the equivalent cnf form. The above is more efficient
# as the first argument of an implication, since p >> q is the same as
# q | ~p, so the the ~ will convert the Or to and, and one just needs
# to distribute the q across it to get to cnf.
# return And(*[Or(*map(Not, c)) for c in combinations(pred_args, 2)]) & Or(*pred_args)
def _old_assump_replacer(obj):
# Things to be careful of:
# - real means real or infinite in the old assumptions.
# - nonzero does not imply real in the old assumptions.
# - finite means finite and not zero in the old assumptions.
if not isinstance(obj, AppliedPredicate):
return obj
e = obj.args[0]
ret = None
if obj.func == Q.positive:
ret = fuzzy_and([e.is_finite, e.is_positive])
if obj.func == Q.zero:
ret = e.is_zero
if obj.func == Q.negative:
ret = fuzzy_and([e.is_finite, e.is_negative])
if obj.func == Q.nonpositive:
ret = fuzzy_and([e.is_finite, e.is_nonpositive])
if obj.func == Q.nonzero:
ret = fuzzy_and([e.is_real, e.is_finite, e.is_nonzero])
if obj.func == Q.nonnegative:
ret = fuzzy_and([fuzzy_or([e.is_zero, e.is_finite]),
e.is_nonnegative])
if obj.func == Q.rational:
ret = e.is_rational
if obj.func == Q.irrational:
ret = e.is_irrational
if obj.func == Q.even:
ret = e.is_even
if obj.func == Q.odd:
ret = e.is_odd
if obj.func == Q.integer:
ret = e.is_integer
if obj.func == Q.imaginary:
ret = e.is_imaginary
if obj.func == Q.commutative:
ret = e.is_commutative
if ret is None:
return obj
return ret
def evaluate_old_assump(pred):
"""
Replace assumptions of expressions replaced with their values in the old
assumptions (like Q.negative(-1) => True). Useful because some direct
computations for numeric objects is defined most conveniently in the old
assumptions.
"""
return pred.xreplace(Transform(_old_assump_replacer))
class CheckOldAssump(UnevaluatedOnFree):
def apply(self):
return Equivalent(self.args[0], evaluate_old_assump(self.args[0]))
class CheckIsPrime(UnevaluatedOnFree):
def apply(self):
from sympy import isprime
return Equivalent(self.args[0], isprime(self.expr))
class CustomLambda(object):
"""
Interface to lambda with rcall
Workaround until we get a better way to represent certain facts.
"""
def __init__(self, lamda):
self.lamda = lamda
def rcall(self, *args):
return self.lamda(*args)
class ClassFactRegistry(MutableMapping):
"""
Register handlers against classes
``registry[C] = handler`` registers ``handler`` for class
``C``. ``registry[C]`` returns a set of handlers for class ``C``, or any
of its superclasses.
"""
def __init__(self, d=None):
d = d or {}
self.d = defaultdict(frozenset, d)
super(ClassFactRegistry, self).__init__()
def __setitem__(self, key, item):
self.d[key] = frozenset(item)
def __getitem__(self, key):
ret = self.d[key]
for k in self.d:
if issubclass(key, k):
ret |= self.d[k]
return ret
def __delitem__(self, key):
del self.d[key]
def __iter__(self):
return self.d.__iter__()
def __len__(self):
return len(self.d)
def __repr__(self):
return repr(self.d)
fact_registry = ClassFactRegistry()
def register_fact(klass, fact, registry=fact_registry):
registry[klass] |= set([fact])
for klass, fact in [
(Mul, Equivalent(Q.zero, AnyArgs(Q.zero))),
(MatMul, Implies(AllArgs(Q.square), Equivalent(Q.invertible, AllArgs(Q.invertible)))),
(Add, Implies(AllArgs(Q.positive), Q.positive)),
(Mul, Implies(AllArgs(Q.positive), Q.positive)),
(Mul, Implies(AllArgs(Q.commutative), Q.commutative)),
(Mul, Implies(AllArgs(Q.real), Q.commutative)),
# This one can still be made easier to read. I think we need basic pattern
# matching, so that we can just write Equivalent(Q.zero(x**y), Q.zero(x) & Q.positive(y))
(Pow, CustomLambda(lambda power: Equivalent(Q.zero(power), Q.zero(power.base) & Q.positive(power.exp)))),
(Integer, CheckIsPrime(Q.prime)),
# Implicitly assumes Mul has more than one arg
# Would be AllArgs(Q.prime | Q.composite) except 1 is composite
(Mul, Implies(AllArgs(Q.prime), ~Q.prime)),
# More advanced prime assumptions will require inequalities, as 1 provides
# a corner case.
(Mul, Implies(AllArgs(Q.imaginary | Q.real), Implies(ExactlyOneArg(Q.imaginary), Q.imaginary))),
(Mul, Implies(AllArgs(Q.real), Q.real)),
(Add, Implies(AllArgs(Q.real), Q.real)),
#General Case: Odd number of imaginary args implies mul is imaginary(To be implemented)
(Mul, Implies(AllArgs(Q.real), Implies(ExactlyOneArg(Q.irrational),
Q.irrational))),
(Add, Implies(AllArgs(Q.real), Implies(ExactlyOneArg(Q.irrational),
Q.irrational))),
(Mul, Implies(AllArgs(Q.rational), Q.rational)),
(Add, Implies(AllArgs(Q.rational), Q.rational)),
(Abs, Q.nonnegative),
(Abs, Equivalent(AllArgs(~Q.zero), ~Q.zero)),
# Including the integer qualification means we don't need to add any facts
# for odd, since the assumptions already know that every integer is
# exactly one of even or odd.
(Mul, Implies(AllArgs(Q.integer), Equivalent(AnyArgs(Q.even), Q.even))),
(Abs, Implies(AllArgs(Q.even), Q.even)),
(Abs, Implies(AllArgs(Q.odd), Q.odd)),
(Add, Implies(AllArgs(Q.integer), Q.integer)),
(Add, Implies(ExactlyOneArg(~Q.integer), ~Q.integer)),
(Mul, Implies(AllArgs(Q.integer), Q.integer)),
(Mul, Implies(ExactlyOneArg(~Q.rational), ~Q.integer)),
(Abs, Implies(AllArgs(Q.integer), Q.integer)),
(Number, CheckOldAssump(Q.negative)),
(Number, CheckOldAssump(Q.zero)),
(Number, CheckOldAssump(Q.positive)),
(Number, CheckOldAssump(Q.nonnegative)),
(Number, CheckOldAssump(Q.nonzero)),
(Number, CheckOldAssump(Q.nonpositive)),
(Number, CheckOldAssump(Q.rational)),
(Number, CheckOldAssump(Q.irrational)),
(Number, CheckOldAssump(Q.even)),
(Number, CheckOldAssump(Q.odd)),
(Number, CheckOldAssump(Q.integer)),
(Number, CheckOldAssump(Q.imaginary)),
# For some reason NumberSymbol does not subclass Number
(NumberSymbol, CheckOldAssump(Q.negative)),
(NumberSymbol, CheckOldAssump(Q.zero)),
(NumberSymbol, CheckOldAssump(Q.positive)),
(NumberSymbol, CheckOldAssump(Q.nonnegative)),
(NumberSymbol, CheckOldAssump(Q.nonzero)),
(NumberSymbol, CheckOldAssump(Q.nonpositive)),
(NumberSymbol, CheckOldAssump(Q.rational)),
(NumberSymbol, CheckOldAssump(Q.irrational)),
(NumberSymbol, CheckOldAssump(Q.imaginary)),
(ImaginaryUnit, CheckOldAssump(Q.negative)),
(ImaginaryUnit, CheckOldAssump(Q.zero)),
(ImaginaryUnit, CheckOldAssump(Q.positive)),
(ImaginaryUnit, CheckOldAssump(Q.nonnegative)),
(ImaginaryUnit, CheckOldAssump(Q.nonzero)),
(ImaginaryUnit, CheckOldAssump(Q.nonpositive)),
(ImaginaryUnit, CheckOldAssump(Q.rational)),
(ImaginaryUnit, CheckOldAssump(Q.irrational)),
(ImaginaryUnit, CheckOldAssump(Q.imaginary))
]:
register_fact(klass, fact)
|
|
import math
import sortedcontainers
class UnorderableElements(TypeError):
pass
class UnhashableType(TypeError):
pass
class Histogram(sortedcontainers.SortedDict):
@classmethod
def from_dict(cls, in_dict, *args, **kwargs):
self = cls(*args, **kwargs)
for key, value in in_dict.items():
self[key] = value
return self
def __init__(self, data=(), **kwargs):
if "key" in kwargs:
super(Histogram, self).__init__(kwargs["key"])
else:
super(Histogram, self).__init__()
for datum in data:
self[datum] += 1
def __getitem__(self, key):
try:
result = super(Histogram, self).__getitem__(key)
except KeyError:
result = 0
except TypeError as error:
if "unorderable" in str(error):
raise UnorderableElements(error)
if "unhashable" in str(error):
msg = "Can't make histogram of unhashable type ({})".format(
type(key)
)
raise UnhashableType(msg)
raise error
return result
def __setitem__(self, key, value):
try:
result = super(Histogram, self).__setitem__(key, value)
except TypeError as error:
if "unorderable" in str(error):
raise UnorderableElements(error)
if "not supported between instances of" in str(error):
raise UnorderableElements(error)
if "unhashable" in str(error):
msg = "Can't make histogram of unhashable type ({})".format(
type(key)
)
raise UnhashableType(msg)
raise error
return result
def total(self):
"""Sum of values."""
return sum(self.values())
def _prepare_for_stats(self):
"""Removes None values and calculates total."""
clean = self._discard_value(None)
total = float(clean.total())
return clean, total
def mean(self):
"""Mean of the distribution."""
clean, total = self._prepare_for_stats()
if not total:
return None
weighted_sum = sum(key * value for key, value in clean.items())
return weighted_sum / total
def variance(self):
"""Variance of the distribution."""
clean, total = self._prepare_for_stats()
if not total:
return None
mean = self.mean()
weighted_central_moment = sum(
count * (value - mean) ** 2 for value, count in clean.items()
)
return weighted_central_moment / total
def standard_deviation(self):
"""Standard deviation of the distribution."""
clean, total = self._prepare_for_stats()
if not total:
return None
return math.sqrt(clean.variance())
def normalized(self):
"""Return a normalized version of the histogram where the values sum
to one.
"""
total = self.total()
result = Histogram()
for value, count in self.items():
try:
result[value] = count / float(total)
except UnorderableElements:
result = Histogram.from_dict(dict(result), key=hash)
result[value] = count / float(total)
return result
def _discard_value(self, value):
if value not in self:
return self
else:
return self.__class__.from_dict(
{k: v for k, v in self.items() if k is not value}
)
def max(self, include_zero=False):
"""Maximum observed value with non-zero count."""
for key, value in reversed(self.items()):
if value > 0 or include_zero:
return key
def min(self, include_zero=False):
"""Minimum observed value with non-zero count."""
for key, value in self.items():
if value > 0 or include_zero:
return key
def _quantile_function(self, alpha=0.5, smallest_count=None):
"""Return a function that returns the quantile values for this
histogram.
"""
clean, total = self._prepare_for_stats()
if not total:
return lambda q: None
smallest_observed_count = min(clean.values())
if smallest_count is None:
smallest_count = smallest_observed_count
else:
smallest_count = min(smallest_count, smallest_observed_count)
beta = alpha * smallest_count
debug_plot = []
cumulative_sum = 0.0
inverse = sortedcontainers.SortedDict()
for value, count in clean.items():
debug_plot.append((cumulative_sum / total, value))
inverse[(cumulative_sum + beta) / total] = value
cumulative_sum += count
inverse[(cumulative_sum - beta) / total] = value
debug_plot.append((cumulative_sum / total, value))
# get maximum and minumum q values
q_min = inverse.keys()[0]
q_max = inverse.keys()[-1]
# this stuff if helpful for debugging -- keep it in here
# for i, j in debug_plot:
# print i, j
# print ''
# for i, j in inverse.items():
# print i, j
# print ''
def function(q):
if q < 0.0 or q > 1.0:
msg = "invalid quantile {}, need `0 <= q <= 1`".format(q)
raise ValueError(msg)
elif q < q_min:
q = q_min
elif q > q_max:
q = q_max
# if beta is
if beta > 0:
if q in inverse:
result = inverse[q]
else:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.keys()[previous_index]
x2 = inverse.keys()[previous_index + 1]
y1 = inverse[x1]
y2 = inverse[x2]
result = (y2 - y1) * (q - x1) / float(x2 - x1) + y1
else:
if q in inverse:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.keys()[previous_index]
x2 = inverse.keys()[previous_index + 1]
y1 = inverse[x1]
y2 = inverse[x2]
result = 0.5 * (y1 + y2)
else:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.keys()[previous_index]
result = inverse[x1]
return float(result)
return function
def median(self, alpha=0.5, smallest_count=None):
return self.quantile(0.5, alpha=alpha, smallest_count=smallest_count)
def quantiles(self, q_list, alpha=0.5, smallest_count=None):
f = self._quantile_function(alpha=alpha, smallest_count=smallest_count)
return [f(q) for q in q_list]
def quantile(self, q, alpha=0.5, smallest_count=None):
return self.quantiles(
[q], alpha=alpha, smallest_count=smallest_count,
)[0]
def add(self, other):
result = Histogram()
for key, value in self.items():
result[key] += value
for key, value in other.items():
result[key] += value
return result
__add__ = add
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import inspect
import math
import time
import warnings
from django import template
from django.core.cache import cache
from django.template.base import get_library, InvalidTemplateLibrary, TemplateSyntaxError, TOKEN_BLOCK
from django.template.defaulttags import LoadNode, CommentNode, IfNode
from django.template.smartif import Literal
from django.utils.safestring import mark_safe
from django.utils.encoding import smart_text
from django.utils.html import escape
from django.utils.translation import ugettext as _
from django.utils import dateformat
from django.utils.timezone import timedelta
from django.utils.timezone import now as tznow
try:
import pytils
pytils_enabled = True
except ImportError:
pytils_enabled = False
from pybb.models import TopicReadTracker, ForumReadTracker, PollAnswerUser, Topic, Post
from pybb.permissions import perms
from pybb import defaults, util
register = template.Library()
#noinspection PyUnusedLocal
@register.tag
def pybb_time(parser, token):
try:
tag, context_time = token.split_contents()
except ValueError:
raise template.TemplateSyntaxError('pybb_time requires single argument')
else:
return PybbTimeNode(context_time)
class PybbTimeNode(template.Node):
def __init__(self, time):
#noinspection PyRedeclaration
self.time = template.Variable(time)
def render(self, context):
context_time = self.time.resolve(context)
delta = tznow() - context_time
today = tznow().replace(hour=0, minute=0, second=0)
yesterday = today - timedelta(days=1)
tomorrow = today + timedelta(days=1)
if delta.days == 0:
if delta.seconds < 60:
if context['LANGUAGE_CODE'].startswith('ru') and pytils_enabled:
msg = _('seconds ago,seconds ago,seconds ago')
msg = pytils.numeral.choose_plural(delta.seconds, msg)
else:
msg = _('seconds ago')
return '%d %s' % (delta.seconds, msg)
elif delta.seconds < 3600:
minutes = int(delta.seconds / 60)
if context['LANGUAGE_CODE'].startswith('ru') and pytils_enabled:
msg = _('minutes ago,minutes ago,minutes ago')
msg = pytils.numeral.choose_plural(minutes, msg)
else:
msg = _('minutes ago')
return '%d %s' % (minutes, msg)
if context['user'].is_authenticated():
if time.daylight:
tz1 = time.altzone
else:
tz1 = time.timezone
tz = tz1 + util.get_pybb_profile(context['user']).time_zone * 60 * 60
context_time = context_time + timedelta(seconds=tz)
if today < context_time < tomorrow:
return _('today, %s') % context_time.strftime('%I:%M %p')
elif yesterday < context_time < today:
return _('yesterday, %s') % context_time.strftime('%I:%M %p')
else:
return dateformat.format(context_time, 'd M, Y H:i')
@register.simple_tag
def pybb_link(object, anchor=''):
"""
Return A tag with link to object.
"""
url = hasattr(object, 'get_absolute_url') and object.get_absolute_url() or None
#noinspection PyRedeclaration
anchor = anchor or smart_text(object)
return mark_safe('<a href="%s">%s</a>' % (url, escape(anchor)))
@register.filter
def pybb_topic_moderated_by(topic, user):
"""
Check if user is moderator of topic's forum.
"""
warnings.warn("pybb_topic_moderated_by filter is deprecated and will be removed in later releases. "
"Use pybb_may_moderate_topic(user, topic) filter instead",
DeprecationWarning)
return perms.may_moderate_topic(user, topic)
@register.filter
def pybb_editable_by(post, user):
"""
Check if the post could be edited by the user.
"""
warnings.warn("pybb_editable_by filter is deprecated and will be removed in later releases. "
"Use pybb_may_edit_post(user, post) filter instead",
DeprecationWarning)
return perms.may_edit_post(user, post)
@register.filter
def pybb_posted_by(post, user):
"""
Check if the post is writed by the user.
"""
return post.user == user
@register.filter
def pybb_is_topic_unread(topic, user):
if not user.is_authenticated():
return False
last_topic_update = topic.updated or topic.created
unread = not ForumReadTracker.objects.filter(
forum=topic.forum,
user=user.id,
time_stamp__gte=last_topic_update).exists()
unread &= not TopicReadTracker.objects.filter(
topic=topic,
user=user.id,
time_stamp__gte=last_topic_update).exists()
return unread
@register.filter
def pybb_topic_unread(topics, user):
"""
Mark all topics in queryset/list with .unread for target user
"""
topic_list = list(topics)
if user.is_authenticated():
for topic in topic_list:
topic.unread = True
forums_ids = [f.forum_id for f in topic_list]
forum_marks = dict([(m.forum_id, m.time_stamp)
for m
in ForumReadTracker.objects.filter(user=user, forum__in=forums_ids)])
if len(forum_marks):
for topic in topic_list:
topic_updated = topic.updated or topic.created
if topic.forum.id in forum_marks and topic_updated <= forum_marks[topic.forum.id]:
topic.unread = False
qs = TopicReadTracker.objects.filter(user=user, topic__in=topic_list).select_related('topic')
topic_marks = list(qs)
topic_dict = dict(((topic.id, topic) for topic in topic_list))
for mark in topic_marks:
if topic_dict[mark.topic.id].updated <= mark.time_stamp:
topic_dict[mark.topic.id].unread = False
return topic_list
@register.filter
def pybb_forum_unread(forums, user):
"""
Check if forum has unread messages.
"""
forum_list = list(forums)
if user.is_authenticated():
for forum in forum_list:
forum.unread = forum.topic_count > 0
forum_marks = ForumReadTracker.objects.filter(
user=user,
forum__in=forum_list
).select_related('forum')
forum_dict = dict(((forum.id, forum) for forum in forum_list))
for mark in forum_marks:
curr_forum = forum_dict[mark.forum.id]
if (curr_forum.updated is None) or (curr_forum.updated <= mark.time_stamp):
if not any((f.unread for f in pybb_forum_unread(curr_forum.child_forums.all(), user))):
forum_dict[mark.forum.id].unread = False
return forum_list
@register.filter
def pybb_topic_inline_pagination(topic):
page_count = int(math.ceil(topic.post_count / float(defaults.PYBB_TOPIC_PAGE_SIZE)))
if page_count <= 5:
return range(1, page_count+1)
return range(1, 5) + ['...', page_count]
@register.filter
def pybb_topic_poll_not_voted(topic, user):
return not PollAnswerUser.objects.filter(poll_answer__topic=topic, user=user).exists()
@register.filter
def endswith(str, substr):
return str.endswith(substr)
@register.assignment_tag
def pybb_get_profile(*args, **kwargs):
try:
return util.get_pybb_profile(kwargs.get('user') or args[0])
except:
return util.get_pybb_profile_model().objects.none()
@register.assignment_tag(takes_context=True)
def pybb_get_latest_topics(context, cnt=5, user=None):
qs = Topic.objects.all().order_by('-updated', '-created', '-id')
if not user:
user = context['user']
qs = perms.filter_topics(user, qs)
return qs[:cnt]
@register.assignment_tag(takes_context=True)
def pybb_get_latest_posts(context, cnt=5, user=None):
qs = Post.objects.all().order_by('-created', '-id')
if not user:
user = context['user']
qs = perms.filter_posts(user, qs)
return qs[:cnt]
def load_perms_filters():
def partial(func_name, perms_obj):
def newfunc(user, obj):
return getattr(perms_obj, func_name)(user, obj)
return newfunc
def partial_no_param(func_name, perms_obj):
def newfunc(user):
return getattr(perms_obj, func_name)(user)
return newfunc
for method in inspect.getmembers(perms):
if inspect.ismethod(method[1]) and inspect.getargspec(method[1]).args[0] == 'self' and\
(method[0].startswith('may') or method[0].startswith('filter')):
if len(inspect.getargspec(method[1]).args) == 3:
register.filter('%s%s' % ('pybb_', method[0]), partial(method[0], perms))
elif len(inspect.getargspec(method[1]).args) == 2: # only user should be passed to permission method
register.filter('%s%s' % ('pybb_', method[0]), partial_no_param(method[0], perms))
load_perms_filters()
# next two tags copied from https://bitbucket.org/jaap3/django-friendly-tag-loader
@register.tag
def friendly_load(parser, token):
"""
Tries to load a custom template tag set. Non existing tag libraries
are ignored.
This means that, if used in conjuction with ``if_has_tag``, you can try to
load the comments template tag library to enable comments even if the
comments framework is not installed.
For example::
{% load friendly_loader %}
{% friendly_load comments webdesign %}
{% if_has_tag render_comment_list %}
{% render_comment_list for obj %}
{% else %}
{% if_has_tag lorem %}
{% lorem %}
{% endif_has_tag %}
{% endif_has_tag %}
"""
bits = token.contents.split()
for taglib in bits[1:]:
try:
lib = get_library(taglib)
parser.add_library(lib)
except InvalidTemplateLibrary:
pass
return LoadNode()
@register.tag
def if_has_tag(parser, token):
"""
The logic for both ``{% if_has_tag %}`` and ``{% if not_has_tag %}``.
Checks if all the given tags exist (or not exist if ``negate`` is ``True``)
and then only parses the branch that will not error due to non-existing
tags.
This means that the following is essentially the same as a
``{% comment %}`` tag::
{% if_has_tag non_existing_tag %}
{% non_existing_tag %}
{% endif_has_tag %}
Another example is checking a built-in tag. This will alway render the
current year and never FAIL::
{% if_has_tag now %}
{% now \"Y\" %}
{% else %}
FAIL
{% endif_has_tag %}
"""
bits = list(token.split_contents())
if len(bits) < 2:
raise TemplateSyntaxError("%r takes at least one arguments" % bits[0])
end_tag = 'end%s' % bits[0]
has_tag = all([tag in parser.tags for tag in bits[1:]])
nodelist_true = nodelist_false = CommentNode()
if has_tag:
nodelist_true = parser.parse(('else', end_tag))
token = parser.next_token()
if token.contents == 'else':
parser.skip_past(end_tag)
else:
while parser.tokens:
token = parser.next_token()
if token.token_type == TOKEN_BLOCK and token.contents == end_tag:
try:
return IfNode([(Literal(has_tag), nodelist_true),
(None, nodelist_false)])
except TypeError: # < 1.4
return IfNode(Literal(has_tag), nodelist_true, nodelist_false)
elif token.token_type == TOKEN_BLOCK and token.contents == 'else':
break
nodelist_false = parser.parse((end_tag,))
token = parser.next_token()
try:
return IfNode([(Literal(has_tag), nodelist_true),
(None, nodelist_false)])
except TypeError: # < 1.4
return IfNode(Literal(has_tag), nodelist_true, nodelist_false)
@register.filter
def pybbm_calc_topic_views(topic):
cache_key = util.build_cache_key('anonymous_topic_views', topic_id=topic.id)
return topic.views + cache.get(cache_key, 0)
|
|
##########################################################################
#
# Copyright (c) 2007, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
from IECore import *
import IECoreRI
import os
class InstancingTest( unittest.TestCase ) :
def test( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/instancing.rib" )
r.instanceBegin( "myObject", {} )
r.geometry( "teapot", {}, {} )
r.instanceEnd()
r.worldBegin()
r.instance( "myObject" )
r.worldEnd()
rib = "".join( open( "test/IECoreRI/output/instancing.rib" ).readlines() )
self.assert_( "ObjectBegin" in rib )
self.assert_( "ObjectEnd" in rib )
self.assert_( "ObjectInstance" in rib )
def test2( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/instancing.rib" )
r.instanceBegin( "myObject", {} )
r.geometry( "teapot", {}, {} )
r.instanceEnd()
r.worldBegin()
r.instance( "myObject" )
r.worldEnd()
rib = "".join( open( "test/IECoreRI/output/instancing.rib" ).readlines() )
self.assert_( "ObjectBegin" in rib )
self.assert_( "ObjectEnd" in rib )
self.assert_( "ObjectInstance" in rib )
def testInstancingAPI( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/instancing3.rib" )
r.instanceBegin( "myObject", {} )
r.geometry( "teapot", {}, {} )
r.instanceEnd()
r.worldBegin()
r.instance( "myObject" )
r.worldEnd()
rib = "".join( open( "test/IECoreRI/output/instancing3.rib" ).readlines() )
self.assert_( "ObjectBegin" in rib )
self.assert_( "ObjectEnd" in rib )
self.assert_( "ObjectInstance" in rib )
def testInstancingAcrossProcedurals( self ) :
class InstanceInheritingProcedural( Renderer.Procedural ) :
def __init__( self, root = True ) :
Renderer.Procedural.__init__( self )
self.__root = root
def bound( self ) :
return Box3f( V3f( -1 ), V3f( 1 ) )
def render( self, renderer ) :
if self.__root :
renderer.instanceBegin( "myLovelySphere", {} )
renderer.sphere( 1, -1, 1, 360, {} )
renderer.instanceEnd()
renderer.procedural( InstanceInheritingProcedural( False ) )
else :
renderer.instance( "myLovelySphere" )
def hash( self ) :
h = MurmurHash()
return h
# test writing a rib
r = IECoreRI.Renderer( "test/IECoreRI/output/instancing.rib" )
with WorldBlock( r ) :
r.procedural( InstanceInheritingProcedural() )
rib = "".join( open( "test/IECoreRI/output/instancing.rib" ).readlines() )
self.failUnless( "ObjectInstance \"myLovelySphere\"" in rib )
# test doing an actual render
r = IECoreRI.Renderer( "" )
r.display( "test", "ie", "rgba",
{
"driverType" : StringData( "ImageDisplayDriver" ),
"handle" : StringData( "myLovelySphere" ),
"quantize" : FloatVectorData( [ 0, 0, 0, 0 ] ),
}
)
with WorldBlock( r ) :
r.concatTransform( M44f.createTranslated( V3f( 0, 0, -10 ) ) )
r.procedural( InstanceInheritingProcedural() )
image = ImageDisplayDriver.removeStoredImage( "myLovelySphere" )
e = ImagePrimitiveEvaluator( image )
r = e.createResult()
e.pointAtUV( V2f( 0.5 ), r )
self.failUnless( r.floatPrimVar( image["R"] ) > 0.95 )
def testInstancingWithThreadedProcedurals( self ) :
class InstanceMakingProcedural( Renderer.Procedural ) :
def __init__( self, instanceName ) :
Renderer.Procedural.__init__( self )
self.__instanceName = instanceName
def bound( self ) :
return Box3f( V3f( -10 ), V3f( 10 ) )
def render( self, renderer ) :
renderer.instanceBegin( self.__instanceName, {} )
renderer.sphere( 1, -1, 1, 360, {} )
renderer.instanceEnd()
renderer.instance( self.__instanceName )
def hash( self ) :
h = MurmurHash()
return h
initThreads()
r = IECoreRI.Renderer( "" )
with WorldBlock( r ) :
r.concatTransform( M44f.createTranslated( V3f( 0, 0, -20 ) ) )
for i in range( 0, 100 ) :
r.procedural( InstanceMakingProcedural( "instance%d" % i ) )
def testProceduralLevelInstancing( self ) :
if IECoreRI.withRiProceduralV():
class InstanceTestProcedural( Renderer.Procedural ) :
renderCount = 0
def __init__( self, instanceHash ) :
Renderer.Procedural.__init__( self )
self.__instanceHash = instanceHash
def bound( self ) :
return Box3f( V3f( -10 ), V3f( 10 ) )
def render( self, renderer ) :
InstanceTestProcedural.renderCount = InstanceTestProcedural.renderCount + 1
pass
def hash( self ) :
return self.__instanceHash
r = IECoreRI.Renderer("")
# give it a camera using the ray trace hider, and turn shareinstances on:
r.camera( "main", {
"resolution" : V2iData( V2i( 1024, 200 ) ),
"screenWindow" : Box2fData( Box2f( V2f( -1 ), V2f( 1 ) ) ),
"cropWindow" : Box2fData( Box2f( V2f( 0.1, 0.1 ), V2f( 0.9, 0.9 ) ) ),
"clippingPlanes" : V2fData( V2f( 1, 1000 ) ),
"projection" : StringData( "perspective" ),
"projection:fov" : FloatData( 45 ),
"ri:hider" : StringData( "raytrace" ),
} )
r.setOption( "ri:trace:shareinstances", IntData( 1 ) )
# chuck a couple of procedurals at it:
h1 = MurmurHash()
h2 = MurmurHash()
h1.append( "instance1" )
h2.append( "instance2" )
with WorldBlock( r ) :
r.procedural( InstanceTestProcedural(h1) )
r.procedural( InstanceTestProcedural(h1) )
r.procedural( InstanceTestProcedural(h2) )
r.procedural( InstanceTestProcedural(h2) )
# only two unique instances here, as there were 2 unique hashes....
self.assertEqual( InstanceTestProcedural.renderCount, 2 )
InstanceTestProcedural.renderCount = 0
# the system shouldn't perform instancing when the hash method returns empty hashes:
with WorldBlock( r ) :
r.procedural( InstanceTestProcedural( MurmurHash() ) )
r.procedural( InstanceTestProcedural( MurmurHash() ) )
r.procedural( InstanceTestProcedural( MurmurHash() ) )
r.procedural( InstanceTestProcedural( MurmurHash() ) )
self.assertEqual( InstanceTestProcedural.renderCount, 4 )
def testParameterisedProceduralInstancing( self ) :
if IECoreRI.withRiProceduralV():
class InstanceTestParamProcedural( ParameterisedProcedural ) :
renderCount = 0
def __init__( self ) :
ParameterisedProcedural.__init__( self, "Instancing test" )
self.parameters().addParameters(
[
BoolParameter(
name = "p1",
description = "whatever.",
defaultValue = True,
),
StringParameter(
name = "p2",
description = "yup.",
defaultValue = "blah"
),
]
)
def doBound( self, args ) :
return Box3f( V3f( -10 ), V3f( 10 ) )
def doRender( self, renderer, args ) :
InstanceTestParamProcedural.renderCount = InstanceTestParamProcedural.renderCount + 1
pass
r = IECoreRI.Renderer("")
# give it a camera using the ray trace hider, and turn shareinstances on:
r.camera( "main", {
"resolution" : V2iData( V2i( 1024, 200 ) ),
"screenWindow" : Box2fData( Box2f( V2f( -1 ), V2f( 1 ) ) ),
"cropWindow" : Box2fData( Box2f( V2f( 0.1, 0.1 ), V2f( 0.9, 0.9 ) ) ),
"clippingPlanes" : V2fData( V2f( 1, 1000 ) ),
"projection" : StringData( "perspective" ),
"projection:fov" : FloatData( 45 ),
"ri:hider" : StringData( "raytrace" ),
} )
r.setOption( "ri:trace:shareinstances", IntData( 1 ) )
# chuck a couple of procedurals at it:
proc1 = InstanceTestParamProcedural()
proc2 = InstanceTestParamProcedural()
proc1["p1"].setValue( False )
proc1["p2"].setValue( StringData( "humpf" ) )
with WorldBlock( r ) :
proc1.render( r )
proc2.render( r )
proc2.render( r )
proc2.render( r )
# only two unique instances here....
self.assertEqual( InstanceTestParamProcedural.renderCount, 2 )
def tearDown( self ) :
files = [
"test/IECoreRI/output/instancing.rib",
"test/IECoreRI/output/instancing2.rib",
"test/IECoreRI/output/instancing3.rib",
]
for f in files :
if os.path.exists( f ):
os.remove( f )
if __name__ == "__main__":
unittest.main()
|
|
from __future__ import division
import numpy as np
na = np.newaxis
import scipy.weave
import pyhsmm
import os
eigen_path = os.path.join(os.path.dirname(__file__),'../../../deps/Eigen3')
######################################################
# used by pyhsmm.plugins.factorial.factorial class #
######################################################
class FactorialStates(object):
def __init__(self,component_models,data=None,T=None,keep=False,**kwargs):
# kwargs is for changepoints, passed to
# component_model.add_factorial_sumdata
# keep is used only when calling models.factorial.generate()
self.component_models = component_models
self.states_list = []
if data is not None:
assert data.ndim == 1 or data.ndim == 2
self.data = np.reshape(data,(-1,1))
T = data.shape[0]
for c in component_models:
c.add_factorial_sumdata(data=data,**kwargs)
self.states_list.append(c.states_list[-1])
self.states_list[-1].allstates_obj = self # give a reference to self
# the added states object will get its resample() method called, but
# since that object doesn't do anything at the moment,
# resample_factorial needs to be called here to intialize
# s.stateseq
self.states_list[-1].generate_states()
else:
# generating from the prior
allobs = np.zeros((T,len(component_models)))
allstates = np.zeros((T,len(component_models)),dtype=np.int32)
assert T is not None, 'need to pass in either T (when generating) or data'
for idx,c in enumerate(component_models):
allobs[:,idx],allstates[:,idx] = c.generate(T=T,keep=keep,**kwargs)
self.states_list.append(c.states_list[-1])
self.states_list[-1].allstates_obj = self # give a reference to self
self.sumobs = allobs.sum(1)
self.data = np.reshape(self.sumobs,(-1,1))
self.allstates = allstates
self.allobs = allobs
# track museqs and varseqs so they don't have to be rebuilt too much
# NOTE: component_models must have scalar gaussian observation
# distributions! this part is one of those that requires it!
self.museqs = np.zeros((T,len(self.component_models)))
self.varseqs = np.zeros((T,len(self.component_models)))
for idx, (c,s) in enumerate(zip(component_models,self.states_list)):
self.museqs[:,idx] = c.means[s.stateseq]
self.varseqs[:,idx] = c.vars[s.stateseq]
# build eigen codestr
self.codestr = base_codestr % {'T':T,'K':len(component_models)}
# just to avoid extra malloc calls... used in
# self._get_other_mean_var_seqs
self.summers = np.ones((len(self.component_models),len(self.component_models))) \
- np.eye(len(self.component_models))
def resample(self,**kwargs): # kwargs is for temp stuff
# tell each chain to resample its statesequence, then update the
# corresponding rows of museqs and varseqs
# also, delete instantiated emissions
for idx, (c,s) in enumerate(zip(self.component_models,self.states_list)):
if 'data' in s.__dict__:
del s.data
s.resample_factorial(**kwargs)
self.museqs[:,idx] = c.means[s.stateseq]
self.varseqs[:,idx] = c.vars[s.stateseq]
def instantiate_component_emissions(self,temp_noise=0.):
# get the emissions
emissions = self._sample_component_emissions(temp_noise).T.copy() # emissions is now ncomponents x T
# add the emissions to each comopnent states list
for e, s in zip(emissions,self.states_list):
s.data = e
# this method is called by the members of self.states_list; it's them asking
# for a sum of part of self.museqs and self.varseqs
def _get_other_mean_var_seqs(self,statesobj):
statesobjindex = self.states_list.index(statesobj)
return self.museqs.dot(self.summers[statesobjindex]), \
self.varseqs.dot(self.summers[statesobjindex])
def _sample_component_emissions_python(self,temp_noise=0.):
# this algorithm is 'safe' but it computes lots of unnecessary cholesky
# factorizations. the eigen code uses a smart custom cholesky downdate
K,T = len(self.component_models), self.data.shape[0]
contributions = np.zeros((T,K))
meanseq = self.museqs
varseq = self.varseqs
tots = varseq.sum(1)[:,na] + temp_noise
post_meanseq = meanseq + varseq * ((self.data - meanseq.sum(1)[:,na]) / tots)
for t in range(T):
contributions[t] = np.dot(np.linalg.cholesky(np.diag(varseq[t]) -
1./tots[t] * np.outer(varseq[t],varseq[t])),np.random.randn(K)) + post_meanseq[t]
return contributions
def _sample_component_emissions_eigen(self,temp_noise=0.):
# NOTE: this version does a smart cholesky downdate
K,T = len(self.component_models), self.data.shape[0]
contributions = np.zeros((T,K))
G = np.random.randn(T,K)
meanseq = self.museqs
varseq = self.varseqs
tots = varseq.sum(1)[:,na] + temp_noise
post_meanseq = meanseq + varseq * ((self.data - meanseq.sum(1)[:,na]) / tots)
noise_variance = temp_noise
scipy.weave.inline(self.codestr,['varseq','meanseq','post_meanseq','G','contributions','noise_variance'],
headers=['<Eigen/Core>'],include_dirs=[eigen_path],extra_compile_args=['-O3'],
verbose=0)
return contributions
_sample_component_emissions = _sample_component_emissions_eigen # NOTE: set this to choose python or eigen
####################################################################
# used by pyhsmm.plugins.factorial.factorial_component_* classes #
####################################################################
# the only difference between these and standard hsmm or hmm states classes is
# that they have special resample_factorial methods for working with
# the case where component emissions are marginalized out. they also have a
# no-op resample method, since that method might be called by the resample
# method in an hsmm or hmm model class and assumes instantiated data
# essentially, we only want these state sequences to be resampled when a
# factorial_allstates objects tells them to be resampled
# NOTE: component_models must have scalar gaussian observation
# distributions! this code, which references the same cached means and vars as
# the models, requires it!
class FactorialComponentHSMMStates(pyhsmm.internals.states.HSMMStatesPython):
def __init__(self,means,vars,**kwargs):
self.means = means
self.vars = vars
super(FactorialComponentHSMMStates,self).__init__(**kwargs)
def resample(self):
pass
def resample_factorial(self,temp_noise=0.):
self.temp_noise = temp_noise
super(FactorialComponentHSMMStates,self).resample()
del self.temp_noise
# NOTE: component_models must have scalar gaussian observation
# distributions! this code requires it!
@property
def aBl(self):
if (not hasattr(self,'_aBl')) or (self._aBl is None):
mymeans = self.means # 1D, length state_dim
myvars = self.vars # 1D, length state_dim
sumothermeansseq, sumothervarsseq = self.allstates_obj._get_other_mean_var_seqs(self)
sumothermeansseq.shape = (-1,1) # 2D, T x 1
sumothervarsseq.shape = (-1,1) # 2D, T x 1
sigmasq = myvars + sumothervarsseq + self.temp_noise
self._aBl = -0.5*(self.allstates_obj.data - sumothermeansseq - mymeans)**2/sigmasq \
- np.log(np.sqrt(2*np.pi*sigmasq))
return self._aBl
class FactorialComponentHSMMStatesPossibleChangepoints(
FactorialComponentHSMMStates,
pyhsmm.internals.states.HSMMStatesPossibleChangepoints):
def __init__(self,means,vars,**kwargs):
assert 'changepoints' in kwargs, 'must pass in a changepoints argument!'
self.means = means
self.vars = vars
pyhsmm.internals.states.HSMMStatesPossibleChangepoints.__init__(self,**kwargs) # second parent
# TODO hmm versions below here
# class factorial_component_hmm_states(pyhsmm.internals.states.hmm_states_python):
# def resample(self):
# pass
# def resample_factorial(self,temp_noise=0.):
# self.temp_noise = temp_noise
# super(factorial_component_hsmm_states,self).resample()
# del self.temp_noise
# def get_abl(self,data):
# raise notimplementederror
# class factorial_component_hmm_states_possiblechangepoints(
# factorial_component_hmm_states,
# pyhsmm.internals.states.hmm_states_possiblechangepoints
# ):
# def resample(self):
# pass
# def resample_factorial(self,temp_noise=0.):
# self.temp_noise = temp_noise
# super(factorial_component_hsmm_states,self).resample()
# del self.temp_noise
########################
# global eigen stuff #
########################
# this simple method could be trouble:
# http://stackoverflow.com/questions/2632199/how-do-i-get-the-path-of-the-current-executed-file-in-python
import os
eigen_codestr_path = \
os.path.join(os.path.dirname(os.path.realpath(__file__)),'eigen_sample_component_emissions.cpp')
with open(eigen_codestr_path,'r') as infile:
base_codestr = infile.read()
|
|
from parameter_domain import ParameterDomain
from vector_calculus.containers import Vector, Tensor
from vector_calculus.operators import dot, cross
from sympy import Number, symbols, diff, Matrix, sqrt, Rational
from numpy import array, ndarray
from numpy.linalg import det
# Symbols in terms of which the mapping is defined
__symbols__ = symbols('s, t, r')
class ParametrizedSet(object):
'''Set described by mapping parameters from their ParameterDomain to R^d.'''
def __init__(self, domain, mapping, orientation='+'):
'''
Construct the set. Orientation is used to construct normal if relevant.
Positive for 2d-3d means normal = cross(d(mapping)/ds, d(mapping)/dt)
Positve for 1d-2d means normal = rotate tangent counter-clockwie
'''
assert isinstance(domain, ParameterDomain)
# Topological dimension
self._tdim = len(domain)
# Map is a an iterable with length = gdim that specifies [x, y, z] as
# functions of parameters of domain
if not hasattr(mapping, '__len__'):
mapping = (mapping, )
# Check that the component uses known params or numbers
for comp in mapping:
comp_extras = comp.atoms() - domain.parameters
params_only = len(comp_extras) == 0
extras_are_numbers = all(isinstance(item, (float, int, Number))
for item in comp_extras)
assert params_only or extras_are_numbers, 'Invalid mapping'
# Geometrical dimension
self._gdim = len(mapping)
# I am only interested in at most 3d gdim
assert 0 < self._gdim < 4, 'Geometrical dimension %s not supported' % self._gdim
assert 0 < self._tdim < 4, 'Topological dimension %s not supported' % self._tdim
assert self._tdim <= self._gdim, 'Topolog. dim > geometric. dim not allowed'
# We rememeber mapping as dictionary x->mapping[0], y->mapping[1], ...
xyz = symbols('x, y, z')
self._mapping = dict((var, comp) for var, comp in zip(xyz, mapping))
# Remeber the domain
self._pdomain = domain
params = domain.parameters
# Every mapping has a Jacobian but not every has normal and tangent
self._n = None
self._tau = None
# Volumes, Square matrix only Jacobian
if self._tdim == self._gdim:
Jac = Matrix([[diff(comp, var) for var in params] for comp in mapping])
self._J = abs(Jac.det())
# Curves and surfaces have normals or tangents in addition to Jacobian
else:
# Curves
if self._tdim == 1:
# Tagent
self._tau = Vector([diff(comp, list(params)[0]) for comp in mapping])
# Jacobian is length of tangent
self._J = sqrt(sum(v**2 for v in self._tau))
# And in 2d we can define a normal
if self._gdim == 2:
R = Tensor([[0, -1], [1, 0]])
R = R if orientation == '+' else -R
self._n = dot(R, self._tau)
# Surface in 3d has normal
elif self._tdim == 2 and self._gdim == 3:
u0 = Vector([diff(comp, list(params)[0]) for comp in mapping])
u1 = Vector([diff(comp, list(params)[1]) for comp in mapping])
self._n = cross(u0, u1)
self._n = self._n if orientation == '+' else -self._n
self._J = sqrt(sum(v**2 for v in self._n))
@property
def tdim(self):
'''Topological dimension of set.'''
return self._tdim
@property
def gdim(self):
'''Geometrical dimension of set.'''
return self._gdim
def substitute(self, f):
'''Cartesian coordinates as functions of parameters.'''
return f.subs(self._mapping)
@property
def items(self):
'''Iterator over parameters of the set and their bounds.'''
return self._pdomain.items
@property
def J(self):
'''Jacobian.'''
return self._J
@property
def tau(self):
if self._tau is not None:
return self._tau
else:
raise ValueError('No tangent for set with tdim=%d and gdim=%d' %\
(self.tdim, self.gdim))
@property
def n(self):
if self._n is not None:
return self._n
else:
raise ValueError('No normal for set with tdim=%d and gdim=%d' %\
(self.tdim, self.gdim))
class SimplexSet(ParametrizedSet):
'''Simplex domain = smallest convex envelope of vertices.'''
def __init__(self, vertices):
assert 1 < len(vertices) < 5, 'Only line, triangle, tetrahedron'
gdim = len(vertices[0])
assert all(gdim == len(v) for v in vertices[1:]), \
'Vertices of different length'
# Check degeneracy
mat = array([vertex if isinstance(vertex, ndarray) else array(vertex)
for vertex in vertices])
mat -= mat[0, :]
mat = mat[1:]
assert det(mat.dot(mat.T)) > 1E-15, 'Degenerate simplex'
smbls = __symbols__
tdim = len(vertices) - 1
# Build mapping, A*(1-s) + B*s etc
foo = [1-sum([s for s in smbls[:tdim]])] + list(smbls)
mapping = tuple(sum(vtx[dim]*sym for vtx, sym in zip(vertices, foo))
for dim in range(gdim))
# Build domain (s, (0, 1)), (r, (0, 1-s)), ...
domain = tuple((smbls[dim], (0, 1-sum(s for s in smbls[:dim])))
for dim in range(tdim))
domain = ParameterDomain(*domain)
ParametrizedSet.__init__(self, domain, mapping)
class Line(SimplexSet):
'''
Line between points A, B in R^d characterized by x=A*s + (B-A)*(1-s) for s
in [0, 1]. Here x is x, (x, y) or (x, y, z) for d=1, 2, 3.
'''
def __init__(self, A, B):
SimplexSet.__init__(self, [A, B])
class Triangle(SimplexSet):
'''
Triangle formed by vertices A, B, C in R^d, d=2, 3. The map is
A(1-s-t) + Bs + Ct for s in [0, 1] and t in [0, 1-s].
'''
def __init__(self, A, B, C):
SimplexSet.__init__(self, [A, B, C])
class Tetrahedron(SimplexSet):
'''
Tetrahedron formed by vertices A, B, C, D in R^3. The map is
A(1-s-t-r) + Bs + Ct + Dr for s in [0, 1], t in [0, 1-s] and r in [0, 1-s-t].
'''
def __init__(self, A, B, C, D):
SimplexSet.__init__(self, [A, B, C, D])
class CartesianSet(ParametrizedSet):
'''Domain as cartesian product of intervals.'''
def __init__(self, intervals):
assert 0 < len(intervals) < 4, 'Only 1d, 2d, 3d'
assert all(len(interval) == 2 for interval in intervals),\
'Too many points in some interval'
assert all(map(lambda pair: abs(pair[1] - pair[0]) > 1E-15, intervals)),\
'Some interval is degenerate'
assert all(map(lambda pair: pair[1] > pair[0], intervals)),\
'Not increasing interval'
tdim = len(intervals)
smbls = __symbols__
# Build domain
domain = tuple((smbl, (-1, 1)) for smbl in smbls[:tdim])
domain = ParameterDomain(*domain)
gdim = tdim
# Build mapping
xi_map = lambda (interval, sym): Rational(1, 2)*(interval[0]*(1-sym) + interval[1]*(1+sym))
mapping = tuple(map(xi_map, zip(intervals, smbls)))
ParametrizedSet.__init__(self, domain, mapping)
class Interval(CartesianSet):
'''Interval [a, b] desribed as x = 0.5*a(1-s) + 0.5*b(1+s), s in [-1, 1].'''
def __init__(self, a, b):
CartesianSet.__init__(self, [[a, b]])
class Rectangle(CartesianSet):
'''
Rectanle [a0, b0] x [a1, b1] as x = 0.5*a0*(1-s) + 0.5*b0*(1+s) and
y = 0.5*a1*(1-t) + 0.5*b1*(1+t) for (s, t) in [-1, 1] x [-1, 1].
'''
def __init__(self, xI, yI):
CartesianSet.__init__(self, [xI, yI])
class Box(CartesianSet):
'''
Rectanle [a0, b0] x [a1, b1] x [a2, b2] as x = 0.5*a0*(1-s) + 0.5*b0*(1+s) and
y = 0.5*a1*(1-t) + 0.5*b1*(1+t) and z = 0.5*a2*(1-t) + 0.5*b2*(1+t) for
(s, t, r) in [-1, 1] x [-1, 1] x [-1, 1].
'''
def __init__(self, xI, yI, zI):
CartesianSet.__init__(self, [xI, yI, zI])
# -----------------------------------------------------------------------------
if __name__ == '__main__':
A = [0, 0, 0]
B = [1, 0, 0]
C = [0, 1, 0]
D = [0, 0, 1]
line = Line(A[:2], B[:2])
tri = Triangle(A[:], B[:], C[:])
tet = Tetrahedron(A, B, C, D)
intrval = Interval(0, 1)
# rect = Rectangle([0, 1], [0, 1])
box = Box([0, 1], [0, 1], [0, 1])
print box.J
print line.J, line.tau, line.n
print tri.J, tri.n
from sympy import sin, cos, pi
s = symbols('s')
arc = ParametrizedSet(ParameterDomain((s, (0, pi))), (sin(s), cos(s)))
print arc.J, arc.tau
|
|
# coding: utf-8
from datetime import date, datetime
from typing import List, Dict, Type
from openapi_server.models.base_model_ import Model
from openapi_server.models.hudson_master_computerexecutors import HudsonMasterComputerexecutors
from openapi_server.models.hudson_master_computermonitor_data import HudsonMasterComputermonitorData
from openapi_server.models.label1 import Label1
from openapi_server import util
class HudsonMasterComputer(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, _class: str=None, display_name: str=None, executors: List[HudsonMasterComputerexecutors]=None, icon: str=None, icon_class_name: str=None, idle: bool=None, jnlp_agent: bool=None, launch_supported: bool=None, load_statistics: Label1=None, manual_launch_allowed: bool=None, monitor_data: HudsonMasterComputermonitorData=None, num_executors: int=None, offline: bool=None, offline_cause: str=None, offline_cause_reason: str=None, temporarily_offline: bool=None):
"""HudsonMasterComputer - a model defined in OpenAPI
:param _class: The _class of this HudsonMasterComputer.
:param display_name: The display_name of this HudsonMasterComputer.
:param executors: The executors of this HudsonMasterComputer.
:param icon: The icon of this HudsonMasterComputer.
:param icon_class_name: The icon_class_name of this HudsonMasterComputer.
:param idle: The idle of this HudsonMasterComputer.
:param jnlp_agent: The jnlp_agent of this HudsonMasterComputer.
:param launch_supported: The launch_supported of this HudsonMasterComputer.
:param load_statistics: The load_statistics of this HudsonMasterComputer.
:param manual_launch_allowed: The manual_launch_allowed of this HudsonMasterComputer.
:param monitor_data: The monitor_data of this HudsonMasterComputer.
:param num_executors: The num_executors of this HudsonMasterComputer.
:param offline: The offline of this HudsonMasterComputer.
:param offline_cause: The offline_cause of this HudsonMasterComputer.
:param offline_cause_reason: The offline_cause_reason of this HudsonMasterComputer.
:param temporarily_offline: The temporarily_offline of this HudsonMasterComputer.
"""
self.openapi_types = {
'_class': str,
'display_name': str,
'executors': List[HudsonMasterComputerexecutors],
'icon': str,
'icon_class_name': str,
'idle': bool,
'jnlp_agent': bool,
'launch_supported': bool,
'load_statistics': Label1,
'manual_launch_allowed': bool,
'monitor_data': HudsonMasterComputermonitorData,
'num_executors': int,
'offline': bool,
'offline_cause': str,
'offline_cause_reason': str,
'temporarily_offline': bool
}
self.attribute_map = {
'_class': '_class',
'display_name': 'displayName',
'executors': 'executors',
'icon': 'icon',
'icon_class_name': 'iconClassName',
'idle': 'idle',
'jnlp_agent': 'jnlpAgent',
'launch_supported': 'launchSupported',
'load_statistics': 'loadStatistics',
'manual_launch_allowed': 'manualLaunchAllowed',
'monitor_data': 'monitorData',
'num_executors': 'numExecutors',
'offline': 'offline',
'offline_cause': 'offlineCause',
'offline_cause_reason': 'offlineCauseReason',
'temporarily_offline': 'temporarilyOffline'
}
self.__class = _class
self._display_name = display_name
self._executors = executors
self._icon = icon
self._icon_class_name = icon_class_name
self._idle = idle
self._jnlp_agent = jnlp_agent
self._launch_supported = launch_supported
self._load_statistics = load_statistics
self._manual_launch_allowed = manual_launch_allowed
self._monitor_data = monitor_data
self._num_executors = num_executors
self._offline = offline
self._offline_cause = offline_cause
self._offline_cause_reason = offline_cause_reason
self._temporarily_offline = temporarily_offline
@classmethod
def from_dict(cls, dikt: dict) -> 'HudsonMasterComputer':
"""Returns the dict as a model
:param dikt: A dict.
:return: The HudsonMasterComputer of this HudsonMasterComputer.
"""
return util.deserialize_model(dikt, cls)
@property
def _class(self):
"""Gets the _class of this HudsonMasterComputer.
:return: The _class of this HudsonMasterComputer.
:rtype: str
"""
return self.__class
@_class.setter
def _class(self, _class):
"""Sets the _class of this HudsonMasterComputer.
:param _class: The _class of this HudsonMasterComputer.
:type _class: str
"""
self.__class = _class
@property
def display_name(self):
"""Gets the display_name of this HudsonMasterComputer.
:return: The display_name of this HudsonMasterComputer.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this HudsonMasterComputer.
:param display_name: The display_name of this HudsonMasterComputer.
:type display_name: str
"""
self._display_name = display_name
@property
def executors(self):
"""Gets the executors of this HudsonMasterComputer.
:return: The executors of this HudsonMasterComputer.
:rtype: List[HudsonMasterComputerexecutors]
"""
return self._executors
@executors.setter
def executors(self, executors):
"""Sets the executors of this HudsonMasterComputer.
:param executors: The executors of this HudsonMasterComputer.
:type executors: List[HudsonMasterComputerexecutors]
"""
self._executors = executors
@property
def icon(self):
"""Gets the icon of this HudsonMasterComputer.
:return: The icon of this HudsonMasterComputer.
:rtype: str
"""
return self._icon
@icon.setter
def icon(self, icon):
"""Sets the icon of this HudsonMasterComputer.
:param icon: The icon of this HudsonMasterComputer.
:type icon: str
"""
self._icon = icon
@property
def icon_class_name(self):
"""Gets the icon_class_name of this HudsonMasterComputer.
:return: The icon_class_name of this HudsonMasterComputer.
:rtype: str
"""
return self._icon_class_name
@icon_class_name.setter
def icon_class_name(self, icon_class_name):
"""Sets the icon_class_name of this HudsonMasterComputer.
:param icon_class_name: The icon_class_name of this HudsonMasterComputer.
:type icon_class_name: str
"""
self._icon_class_name = icon_class_name
@property
def idle(self):
"""Gets the idle of this HudsonMasterComputer.
:return: The idle of this HudsonMasterComputer.
:rtype: bool
"""
return self._idle
@idle.setter
def idle(self, idle):
"""Sets the idle of this HudsonMasterComputer.
:param idle: The idle of this HudsonMasterComputer.
:type idle: bool
"""
self._idle = idle
@property
def jnlp_agent(self):
"""Gets the jnlp_agent of this HudsonMasterComputer.
:return: The jnlp_agent of this HudsonMasterComputer.
:rtype: bool
"""
return self._jnlp_agent
@jnlp_agent.setter
def jnlp_agent(self, jnlp_agent):
"""Sets the jnlp_agent of this HudsonMasterComputer.
:param jnlp_agent: The jnlp_agent of this HudsonMasterComputer.
:type jnlp_agent: bool
"""
self._jnlp_agent = jnlp_agent
@property
def launch_supported(self):
"""Gets the launch_supported of this HudsonMasterComputer.
:return: The launch_supported of this HudsonMasterComputer.
:rtype: bool
"""
return self._launch_supported
@launch_supported.setter
def launch_supported(self, launch_supported):
"""Sets the launch_supported of this HudsonMasterComputer.
:param launch_supported: The launch_supported of this HudsonMasterComputer.
:type launch_supported: bool
"""
self._launch_supported = launch_supported
@property
def load_statistics(self):
"""Gets the load_statistics of this HudsonMasterComputer.
:return: The load_statistics of this HudsonMasterComputer.
:rtype: Label1
"""
return self._load_statistics
@load_statistics.setter
def load_statistics(self, load_statistics):
"""Sets the load_statistics of this HudsonMasterComputer.
:param load_statistics: The load_statistics of this HudsonMasterComputer.
:type load_statistics: Label1
"""
self._load_statistics = load_statistics
@property
def manual_launch_allowed(self):
"""Gets the manual_launch_allowed of this HudsonMasterComputer.
:return: The manual_launch_allowed of this HudsonMasterComputer.
:rtype: bool
"""
return self._manual_launch_allowed
@manual_launch_allowed.setter
def manual_launch_allowed(self, manual_launch_allowed):
"""Sets the manual_launch_allowed of this HudsonMasterComputer.
:param manual_launch_allowed: The manual_launch_allowed of this HudsonMasterComputer.
:type manual_launch_allowed: bool
"""
self._manual_launch_allowed = manual_launch_allowed
@property
def monitor_data(self):
"""Gets the monitor_data of this HudsonMasterComputer.
:return: The monitor_data of this HudsonMasterComputer.
:rtype: HudsonMasterComputermonitorData
"""
return self._monitor_data
@monitor_data.setter
def monitor_data(self, monitor_data):
"""Sets the monitor_data of this HudsonMasterComputer.
:param monitor_data: The monitor_data of this HudsonMasterComputer.
:type monitor_data: HudsonMasterComputermonitorData
"""
self._monitor_data = monitor_data
@property
def num_executors(self):
"""Gets the num_executors of this HudsonMasterComputer.
:return: The num_executors of this HudsonMasterComputer.
:rtype: int
"""
return self._num_executors
@num_executors.setter
def num_executors(self, num_executors):
"""Sets the num_executors of this HudsonMasterComputer.
:param num_executors: The num_executors of this HudsonMasterComputer.
:type num_executors: int
"""
self._num_executors = num_executors
@property
def offline(self):
"""Gets the offline of this HudsonMasterComputer.
:return: The offline of this HudsonMasterComputer.
:rtype: bool
"""
return self._offline
@offline.setter
def offline(self, offline):
"""Sets the offline of this HudsonMasterComputer.
:param offline: The offline of this HudsonMasterComputer.
:type offline: bool
"""
self._offline = offline
@property
def offline_cause(self):
"""Gets the offline_cause of this HudsonMasterComputer.
:return: The offline_cause of this HudsonMasterComputer.
:rtype: str
"""
return self._offline_cause
@offline_cause.setter
def offline_cause(self, offline_cause):
"""Sets the offline_cause of this HudsonMasterComputer.
:param offline_cause: The offline_cause of this HudsonMasterComputer.
:type offline_cause: str
"""
self._offline_cause = offline_cause
@property
def offline_cause_reason(self):
"""Gets the offline_cause_reason of this HudsonMasterComputer.
:return: The offline_cause_reason of this HudsonMasterComputer.
:rtype: str
"""
return self._offline_cause_reason
@offline_cause_reason.setter
def offline_cause_reason(self, offline_cause_reason):
"""Sets the offline_cause_reason of this HudsonMasterComputer.
:param offline_cause_reason: The offline_cause_reason of this HudsonMasterComputer.
:type offline_cause_reason: str
"""
self._offline_cause_reason = offline_cause_reason
@property
def temporarily_offline(self):
"""Gets the temporarily_offline of this HudsonMasterComputer.
:return: The temporarily_offline of this HudsonMasterComputer.
:rtype: bool
"""
return self._temporarily_offline
@temporarily_offline.setter
def temporarily_offline(self, temporarily_offline):
"""Sets the temporarily_offline of this HudsonMasterComputer.
:param temporarily_offline: The temporarily_offline of this HudsonMasterComputer.
:type temporarily_offline: bool
"""
self._temporarily_offline = temporarily_offline
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Shared functions and classes for tfdbg command-line interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import tensor_format
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
# Default threshold number of elements above which ellipses will be used
# when printing the value of the tensor.
DEFAULT_NDARRAY_DISPLAY_THRESHOLD = 2000
def bytes_to_readable_str(num_bytes, include_b=False):
"""Generate a human-readable string representing number of bytes.
The units B, kB, MB and GB are used.
Args:
num_bytes: (`int` or None) Number of bytes.
include_b: (`bool`) Include the letter B at the end of the unit.
Returns:
(`str`) A string representing the number of bytes in a human-readable way,
including a unit at the end.
"""
if num_bytes is None:
return str(num_bytes)
if num_bytes < 1024:
result = "%d" % num_bytes
elif num_bytes < 1048576:
result = "%.2fk" % (num_bytes / 1024.0)
elif num_bytes < 1073741824:
result = "%.2fM" % (num_bytes / 1048576.0)
else:
result = "%.2fG" % (num_bytes / 1073741824.0)
if include_b:
result += "B"
return result
def parse_ranges_highlight(ranges_string):
"""Process ranges highlight string.
Args:
ranges_string: (str) A string representing a numerical range of a list of
numerical ranges. See the help info of the -r flag of the print_tensor
command for more details.
Returns:
An instance of tensor_format.HighlightOptions, if range_string is a valid
representation of a range or a list of ranges.
"""
ranges = None
def ranges_filter(x):
r = np.zeros(x.shape, dtype=bool)
for range_start, range_end in ranges:
r = np.logical_or(r, np.logical_and(x >= range_start, x <= range_end))
return r
if ranges_string:
ranges = command_parser.parse_ranges(ranges_string)
return tensor_format.HighlightOptions(
ranges_filter, description=ranges_string)
else:
return None
def format_tensor(tensor,
tensor_name,
np_printoptions,
print_all=False,
tensor_slicing=None,
highlight_options=None):
"""Generate formatted str to represent a tensor or its slices.
Args:
tensor: (numpy ndarray) The tensor value.
tensor_name: (str) Name of the tensor, e.g., the tensor's debug watch key.
np_printoptions: (dict) Numpy tensor formatting options.
print_all: (bool) Whether the tensor is to be displayed in its entirety,
instead of printing ellipses, even if its number of elements exceeds
the default numpy display threshold.
(Note: Even if this is set to true, the screen output can still be cut
off by the UI frontend if it consist of more lines than the frontend
can handle.)
tensor_slicing: (str or None) Slicing of the tensor, e.g., "[:, 1]". If
None, no slicing will be performed on the tensor.
highlight_options: (tensor_format.HighlightOptions) options to highlight
elements of the tensor. See the doc of tensor_format.format_tensor()
for more details.
Returns:
(str) Formatted str representing the (potentially sliced) tensor.
"""
if tensor_slicing:
# Validate the indexing.
value = command_parser.evaluate_tensor_slice(tensor, tensor_slicing)
sliced_name = tensor_name + tensor_slicing
else:
value = tensor
sliced_name = tensor_name
if print_all:
np_printoptions["threshold"] = value.size
else:
np_printoptions["threshold"] = DEFAULT_NDARRAY_DISPLAY_THRESHOLD
return tensor_format.format_tensor(
value,
sliced_name,
include_metadata=True,
np_printoptions=np_printoptions,
highlight_options=highlight_options)
def error(msg):
"""Generate a RichTextLines output for error.
Args:
msg: (str) The error message.
Returns:
(debugger_cli_common.RichTextLines) A representation of the error message
for screen output.
"""
full_msg = "ERROR: " + msg
return debugger_cli_common.RichTextLines(
[full_msg], font_attr_segs={0: [(0, len(full_msg), "red")]})
def _get_fetch_name(fetch):
"""Obtain the name or string representation of a fetch.
Args:
fetch: The fetch in question.
Returns:
If the attribute 'name' is available, return the name. Otherwise, return
str(fetch).
"""
return fetch.name if hasattr(fetch, "name") else str(fetch)
def _get_fetch_names(fetches):
"""Get a flattened list of the names in run() call fetches.
Args:
fetches: Fetches of the `Session.run()` call. It maybe a Tensor, an
Operation or a Variable. It may also be nested lists, tuples or
dicts. See doc of `Session.run()` for more details.
Returns:
(list of str) A flattened list of fetch names from `fetches`.
"""
lines = []
if isinstance(fetches, (list, tuple)):
for fetch in fetches:
lines.extend(_get_fetch_names(fetch))
elif isinstance(fetches, dict):
for key in fetches:
lines.extend(_get_fetch_names(fetches[key]))
else:
# This ought to be a Tensor, an Operation or a Variable, for which the name
# attribute should be available. (Bottom-out condition of the recursion.)
lines.append(_get_fetch_name(fetches))
return lines
def _recommend_command(command, description, indent=2, create_link=False):
"""Generate a RichTextLines object that describes a recommended command.
Args:
command: (str) The command to recommend.
description: (str) A description of what the the command does.
indent: (int) How many spaces to indent in the beginning.
create_link: (bool) Whether a command link is to be applied to the command
string.
Returns:
(RichTextLines) Formatted text (with font attributes) for recommending the
command.
"""
indent_str = " " * indent
lines = [indent_str + command + ":", indent_str + " " + description]
if create_link:
font_attr_segs = {
0: [(indent, indent + len(command), [
debugger_cli_common.MenuItem("", command), "bold"])]}
else:
font_attr_segs = {0: [(indent, indent + len(command), "bold")]}
return debugger_cli_common.RichTextLines(lines, font_attr_segs=font_attr_segs)
def get_tfdbg_logo():
"""Make an ASCII representation of the tfdbg logo."""
lines = [
"",
"TTTTTT FFFF DDD BBBB GGG ",
" TT F D D B B G ",
" TT FFF D D BBBB G GG",
" TT F D D B B G G",
" TT F DDD BBBB GGG ",
"",
]
return debugger_cli_common.RichTextLines(lines)
def get_run_start_intro(run_call_count,
fetches,
feed_dict,
tensor_filters):
"""Generate formatted intro for run-start UI.
Args:
run_call_count: (int) Run call counter.
fetches: Fetches of the `Session.run()` call. See doc of `Session.run()`
for more details.
feed_dict: Feeds to the `Session.run()` call. See doc of `Session.run()`
for more details.
tensor_filters: (dict) A dict from tensor-filter name to tensor-filter
callable.
Returns:
(RichTextLines) Formatted intro message about the `Session.run()` call.
"""
fetch_lines = _get_fetch_names(fetches)
if not feed_dict:
feed_dict_lines = ["(Empty)"]
else:
feed_dict_lines = []
for feed_key in feed_dict:
if isinstance(feed_key, six.string_types):
feed_dict_lines.append(feed_key)
else:
feed_dict_lines.append(feed_key.name)
intro_lines = [
"======================================",
"Session.run() call #%d:" % run_call_count,
"", "Fetch(es):"
]
intro_lines.extend([" " + line for line in fetch_lines])
intro_lines.extend(["", "Feed dict(s):"])
intro_lines.extend([" " + line for line in feed_dict_lines])
intro_lines.extend([
"======================================", "",
"Select one of the following commands to proceed ---->"
])
out = debugger_cli_common.RichTextLines(intro_lines)
out.extend(
_recommend_command(
"run",
"Execute the run() call with debug tensor-watching",
create_link=True))
out.extend(
_recommend_command(
"run -n",
"Execute the run() call without debug tensor-watching",
create_link=True))
out.extend(
_recommend_command(
"run -t <T>",
"Execute run() calls (T - 1) times without debugging, then "
"execute run() one more time and drop back to the CLI"))
out.extend(
_recommend_command(
"run -f <filter_name>",
"Keep executing run() calls until a dumped tensor passes a given, "
"registered filter (conditional breakpoint mode)"))
more_font_attr_segs = {}
more_lines = [" Registered filter(s):"]
if tensor_filters:
filter_names = []
for filter_name in tensor_filters:
filter_names.append(filter_name)
more_lines.append(" * " + filter_name)
command_menu_node = debugger_cli_common.MenuItem(
"", "run -f %s" % filter_name)
more_font_attr_segs[len(more_lines) - 1] = [
(10, len(more_lines[-1]), command_menu_node)]
else:
more_lines.append(" (None)")
out.extend(
debugger_cli_common.RichTextLines(
more_lines, font_attr_segs=more_font_attr_segs))
out.extend(
_recommend_command(
"invoke_stepper",
"Use the node-stepper interface, which allows you to interactively "
"step through nodes involved in the graph run() call and "
"inspect/modify their values", create_link=True))
out.append("")
suggest_help = "For more details, see help."
out.append(
suggest_help,
font_attr_segs=[(len(suggest_help) - 5, len(suggest_help) - 1,
debugger_cli_common.MenuItem("", "help"))])
out.append("")
# Make main menu for the run-start intro.
menu = debugger_cli_common.Menu()
menu.append(debugger_cli_common.MenuItem("run", "run"))
menu.append(debugger_cli_common.MenuItem(
"invoke_stepper", "invoke_stepper"))
menu.append(debugger_cli_common.MenuItem("exit", "exit"))
out.annotations[debugger_cli_common.MAIN_MENU_KEY] = menu
return out
def get_run_short_description(run_call_count, fetches, feed_dict):
"""Get a short description of the run() call.
Args:
run_call_count: (int) Run call counter.
fetches: Fetches of the `Session.run()` call. See doc of `Session.run()`
for more details.
feed_dict: Feeds to the `Session.run()` call. See doc of `Session.run()`
for more details.
Returns:
(str) A short description of the run() call, including information about
the fetche(s) and feed(s).
"""
description = "run #%d: " % run_call_count
if isinstance(fetches, (ops.Tensor, ops.Operation, variables.Variable)):
description += "1 fetch (%s); " % _get_fetch_name(fetches)
else:
# Could be (nested) list, tuple, dict or namedtuple.
num_fetches = len(_get_fetch_names(fetches))
if num_fetches > 1:
description += "%d fetches; " % num_fetches
else:
description += "%d fetch; " % num_fetches
if not feed_dict:
description += "0 feeds"
else:
if len(feed_dict) == 1:
for key in feed_dict:
description += "1 feed (%s)" % (
key if isinstance(key, six.string_types) else key.name)
else:
description += "%d feeds" % len(feed_dict)
return description
def get_error_intro(tf_error):
"""Generate formatted intro for TensorFlow run-time error.
Args:
tf_error: (errors.OpError) TensorFlow run-time error object.
Returns:
(RichTextLines) Formatted intro message about the run-time OpError, with
sample commands for debugging.
"""
op_name = tf_error.op.name
intro_lines = [
"--------------------------------------",
"!!! An error occurred during the run !!!",
"",
"You may use the following commands to debug:",
]
intro_font_attr_segs = {1: [(0, len(intro_lines[1]), "blink")]}
out = debugger_cli_common.RichTextLines(
intro_lines, font_attr_segs=intro_font_attr_segs)
out.extend(
_recommend_command("ni -a -d -t %s" % op_name,
"Inspect information about the failing op.",
create_link=True))
out.extend(
_recommend_command("li -r %s" % op_name,
"List inputs to the failing op, recursively.",
create_link=True))
out.extend(
_recommend_command(
"lt",
"List all tensors dumped during the failing run() call.",
create_link=True))
more_lines = [
"",
"Op name: " + op_name,
"Error type: " + str(type(tf_error)),
"",
"Details:",
str(tf_error),
"",
"WARNING: Using client GraphDef due to the error, instead of "
"executor GraphDefs.",
"--------------------------------------",
"",
]
out.extend(debugger_cli_common.RichTextLines(more_lines))
return out
|
|
# Copyright 2015 IBM Corp.
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Volume FC driver for IBM Storwize family and SVC storage systems.
Notes:
1. If you specify both a password and a key file, this driver will use the
key file only.
2. When using a key file for authentication, it is up to the user or
system administrator to store the private key in a safe manner.
3. The defaults for creating volumes are "-rsize 2% -autoexpand
-grainsize 256 -warning 0". These can be changed in the configuration
file or by using volume types(recommended only for advanced users).
Limitations:
1. The driver expects CLI output in English, error messages may be in a
localized format.
2. Clones and creating volumes from snapshots, where the source and target
are of different sizes, is not supported.
"""
import collections
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import coordination
from cinder import exception
from cinder.i18n import _
from cinder import interface
from cinder.volume import configuration
from cinder.volume.drivers.ibm.storwize_svc import (
storwize_svc_common as storwize_common)
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
storwize_svc_fc_opts = [
cfg.BoolOpt('storwize_svc_multipath_enabled',
default=False,
help='Connect with multipath (FC only; iSCSI multipath is '
'controlled by Nova)'),
]
CONF = cfg.CONF
CONF.register_opts(storwize_svc_fc_opts, group=configuration.SHARED_CONF_GROUP)
@interface.volumedriver
class StorwizeSVCFCDriver(storwize_common.StorwizeSVCCommonDriver):
"""IBM Storwize V7000 and SVC FC volume driver.
Version history:
.. code-block:: none
1.0 - Initial driver
1.1 - FC support, create_cloned_volume, volume type support,
get_volume_stats, minor bug fixes
1.2.0 - Added retype
1.2.1 - Code refactor, improved exception handling
1.2.2 - Fix bug #1274123 (races in host-related functions)
1.2.3 - Fix Fibre Channel connectivity: bug #1279758 (add delim
to lsfabric, clear unused data from connections, ensure
matching WWPNs by comparing lower case
1.2.4 - Fix bug #1278035 (async migration/retype)
1.2.5 - Added support for manage_existing (unmanage is inherited)
1.2.6 - Added QoS support in terms of I/O throttling rate
1.3.1 - Added support for volume replication
1.3.2 - Added support for consistency group
1.3.3 - Update driver to use ABC metaclasses
2.0 - Code refactor, split init file and placed shared methods
for FC and iSCSI within the StorwizeSVCCommonDriver class
2.0.1 - Added support for multiple pools with model update
2.1 - Added replication V2 support to the global/metro mirror
mode
2.1.1 - Update replication to version 2.1
2.2 - Add CG capability to generic volume groups
2.2.1 - Add vdisk mirror/stretch cluster support
2.2.2 - Add npiv support
2.2.3 - Add replication group support
2.2.4 - Add backup snapshots support
2.2.5 - Add hyperswap support
"""
VERSION = "2.2.5"
# ThirdPartySystems wiki page
CI_WIKI_NAME = "IBM_STORAGE_CI"
def __init__(self, *args, **kwargs):
super(StorwizeSVCFCDriver, self).__init__(*args, **kwargs)
self.protocol = 'FC'
self.configuration.append_config_values(
storwize_svc_fc_opts)
def validate_connector(self, connector):
"""Check connector for at least one enabled FC protocol."""
if 'wwpns' not in connector:
LOG.error('The connector does not contain the required '
'information.')
raise exception.InvalidConnectorException(
missing='wwpns')
def initialize_connection_snapshot(self, snapshot, connector):
"""Perform attach snapshot for backup snapshots."""
# If the snapshot's source volume is a replication volume and the
# replication volume has failed over to aux_backend,
# attach the snapshot will be failed.
self._check_snapshot_replica_volume_status(snapshot)
vol_attrs = ['id', 'name', 'volume_type_id', 'display_name']
Volume = collections.namedtuple('Volume', vol_attrs)
volume = Volume(id=snapshot.id,
name=snapshot.name,
volume_type_id=snapshot.volume_type_id,
display_name='backup-snapshot')
return self.initialize_connection(volume, connector)
@fczm_utils.add_fc_zone
def initialize_connection(self, volume, connector):
"""Perform necessary work to make a FC connection."""
@coordination.synchronized('storwize-host-{system_id}-{host}')
def _do_initialize_connection_locked(system_id, host):
return self._do_initialize_connection(volume, connector)
return _do_initialize_connection_locked(self._state['system_id'],
connector['host'])
def _do_initialize_connection(self, volume, connector):
"""Perform necessary work to make a FC connection.
To be able to create an FC connection from a given host to a
volume, we must:
1. Translate the given WWNN to a host name
2. Create new host on the storage system if it does not yet exist
3. Map the volume to the host if it is not already done
4. Return the connection information for relevant nodes (in the
proper I/O group)
"""
LOG.debug('enter: initialize_connection: volume %(vol)s with connector'
' %(conn)s', {'vol': volume.id, 'conn': connector})
if volume.display_name == 'backup-snapshot':
LOG.debug('It is a virtual volume %(vol)s for attach snapshot.',
{'vol': volume.id})
volume_name = volume.name
backend_helper = self._helpers
node_state = self._state
else:
volume_name, backend_helper, node_state = self._get_vol_sys_info(
volume)
host_site = self._get_volume_host_site_from_conf(volume,
connector)
is_hyper_volume = backend_helper.is_volume_hyperswap(volume_name)
# The host_site is necessary for hyperswap volume.
if is_hyper_volume and host_site is None:
msg = (_('There is no correct storwize_preferred_host_site '
'configured for a hyperswap volume %s.') % volume.name)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
# Check if a host object is defined for this host name
host_name = backend_helper.get_host_from_connector(connector)
if host_name is None:
# Host does not exist - add a new host to Storwize/SVC
host_name = backend_helper.create_host(connector, site=host_site)
elif is_hyper_volume:
self._update_host_site_for_hyperswap_volume(host_name, host_site)
volume_attributes = backend_helper.get_vdisk_attributes(volume_name)
if volume_attributes is None:
msg = (_('initialize_connection: Failed to get attributes'
' for volume %s.') % volume_name)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
multihostmap = self.configuration.storwize_svc_multihostmap_enabled
lun_id = backend_helper.map_vol_to_host(volume_name, host_name,
multihostmap)
try:
preferred_node = volume_attributes['preferred_node_id']
IO_group = volume_attributes['IO_group_id']
except KeyError as e:
LOG.error('Did not find expected column name in '
'lsvdisk: %s.', e)
raise exception.VolumeBackendAPIException(
data=_('initialize_connection: Missing volume attribute for '
'volume %s.') % volume_name)
try:
# Get preferred node and other nodes in I/O group
preferred_node_entry = None
io_group_nodes = []
for node in node_state['storage_nodes'].values():
if node['id'] == preferred_node:
preferred_node_entry = node
if node['IO_group'] == IO_group:
io_group_nodes.append(node)
if not len(io_group_nodes):
msg = (_('initialize_connection: No node found in '
'I/O group %(gid)s for volume %(vol)s.') %
{'gid': IO_group, 'vol': volume_name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if not preferred_node_entry:
# Get 1st node in I/O group
preferred_node_entry = io_group_nodes[0]
LOG.warning('initialize_connection: Did not find a '
'preferred node for volume %s.', volume_name)
properties = {}
properties['target_discovered'] = False
properties['target_lun'] = lun_id
properties['volume_id'] = volume.id
conn_wwpns = backend_helper.get_conn_fc_wwpns(host_name)
# If conn_wwpns is empty, then that means that there were
# no target ports with visibility to any of the initiators
# so we return all target ports.
if len(conn_wwpns) == 0:
for node in node_state['storage_nodes'].values():
# The Storwize/svc release 7.7.0.0 introduced NPIV feature,
# Different commands be used to get the wwpns for host I/O
if node_state['code_level'] < (7, 7, 0, 0):
conn_wwpns.extend(node['WWPN'])
else:
npiv_wwpns = backend_helper.get_npiv_wwpns(
node_id=node['id'],
host_io="yes")
conn_wwpns.extend(npiv_wwpns)
properties['target_wwn'] = conn_wwpns
i_t_map = self._make_initiator_target_map(connector['wwpns'],
conn_wwpns)
properties['initiator_target_map'] = i_t_map
# specific for z/VM, refer to cinder bug 1323993
if "zvm_fcp" in connector:
properties['zvm_fcp'] = connector['zvm_fcp']
except Exception as ex:
with excutils.save_and_reraise_exception():
LOG.error('initialize_connection: Failed to export volume '
'%(vol)s due to %(ex)s.', {'vol': volume.name,
'ex': ex})
self._do_terminate_connection(volume, connector)
LOG.error('initialize_connection: Failed '
'to collect return '
'properties for volume %(vol)s and connector '
'%(conn)s.\n', {'vol': volume,
'conn': connector})
LOG.debug('leave: initialize_connection:\n volume: %(vol)s\n '
'connector %(conn)s\n properties: %(prop)s',
{'vol': volume.id, 'conn': connector,
'prop': properties})
return {'driver_volume_type': 'fibre_channel', 'data': properties, }
def _make_initiator_target_map(self, initiator_wwpns, target_wwpns):
"""Build a simplistic all-to-all mapping."""
i_t_map = {}
for i_wwpn in initiator_wwpns:
i_t_map[str(i_wwpn)] = []
for t_wwpn in target_wwpns:
i_t_map[i_wwpn].append(t_wwpn)
return i_t_map
def terminate_connection_snapshot(self, snapshot, connector, **kwargs):
"""Perform detach snapshot for backup snapshots."""
vol_attrs = ['id', 'name', 'display_name']
Volume = collections.namedtuple('Volume', vol_attrs)
volume = Volume(id=snapshot.id,
name=snapshot.name,
display_name='backup-snapshot')
return self.terminate_connection(volume, connector, **kwargs)
@fczm_utils.remove_fc_zone
def terminate_connection(self, volume, connector, **kwargs):
"""Cleanup after an FC connection has been terminated."""
# If a fake connector is generated by nova when the host
# is down, then the connector will not have a host property,
# In this case construct the lock without the host property
# so that all the fake connectors to an SVC are serialized
host = connector['host'] if 'host' in connector else ""
@coordination.synchronized('storwize-host-{system_id}-{host}')
def _do_terminate_connection_locked(system_id, host):
return self._do_terminate_connection(volume, connector,
**kwargs)
return _do_terminate_connection_locked(self._state['system_id'], host)
def _do_terminate_connection(self, volume, connector, **kwargs):
"""Cleanup after an FC connection has been terminated.
When we clean up a terminated connection between a given connector
and volume, we:
1. Translate the given connector to a host name
2. Remove the volume-to-host mapping if it exists
3. Delete the host if it has no more mappings (hosts are created
automatically by this driver when mappings are created)
"""
LOG.debug('enter: terminate_connection: volume %(vol)s with connector'
' %(conn)s', {'vol': volume.id, 'conn': connector})
(info, host_name, vol_name, backend_helper,
node_state) = self._get_map_info_from_connector(volume, connector)
if not backend_helper:
return info
# Unmap volumes, if hostname is None, need to get value from vdiskmap
host_name = backend_helper.unmap_vol_from_host(vol_name, host_name)
# Host_name could be none
if host_name:
resp = backend_helper.check_host_mapped_vols(host_name)
if not len(resp):
LOG.info("Need to remove FC Zone, building initiator "
"target map.")
# Build info data structure for zone removing
if 'wwpns' in connector and host_name:
target_wwpns = []
# Returning all target_wwpns in storage_nodes, since
# we cannot determine which wwpns are logged in during
# a VM deletion.
for node in node_state['storage_nodes'].values():
target_wwpns.extend(node['WWPN'])
init_targ_map = (self._make_initiator_target_map
(connector['wwpns'],
target_wwpns))
info['data'] = {'initiator_target_map': init_targ_map}
# No volume mapped to the host, delete host from array
backend_helper.delete_host(host_name)
LOG.debug('leave: terminate_connection: volume %(vol)s with '
'connector %(conn)s, info %(info)s', {'vol': volume.id,
'conn': connector,
'info': info})
return info
|
|
from .ddt_container import ddt_container
from .ddt_tile import ddt_tile
from .ddt_tile_html import ddt_tile_html
class ddt_container_nSvgTable(ddt_container):
def make_nSvgTable(self,
data1,data2,
data1_keys,data1_nestkeys,data1_keymap,
data2_keys,data2_nestkeys,data2_keymap,
tileheader,
svgtype,
tabletype,
svgx1axislabel='',
svgy1axislabel='',
single_plot_I=True,
svgkeymap = [],
svgtile2datamap=[0],
svgfilters=None,
tablefilters=None,
tableheaders=None
):
'''Make a filter menu + n SVGs + Table
INPUT:
data1 = listDict of all data
data2 = listDict of all data (single plot with a different data from data 1 or a single plot with data 1/2)
dictColumn of all data (dictionary of data split into different SVGs, required for multiple plots);
parameters for filtermenu and table
data1_keys
data1_nestkeys
data1_keymap
parameters for the svg objects
data2_keys
data2_nestkeys
data2_keymap
tileheader = title for each of the tiles
svgtype = type of svg (TODO: add optional input for specifying specific svgs for multiple plots)
tabletype = type of table
single_plot_I = plot all data on a single svg or partition into seperate SVGs
True, only data1 will be used
False, data2 must specified
OPTIONAL INPUT for single plot:
svgkeymap = default, [data2_keymap],
svgtile2datamap= default, [0],
'''
#make the form
form = ddt_tile();
form.make_tileparameters(
tileparameters={
'tileheader':'Filter menu',
'tiletype':'html',
'tileid':"filtermenu1",
'rowid':"row1",
'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-6"}
);
form.make_htmlparameters(
htmlparameters = {
'htmlid':'filtermenuform1',
"htmltype":'form_01',
"formsubmitbuttonidtext":{'id':'submit1','text':'submit'},
"formresetbuttonidtext":{'id':'reset1','text':'reset'},
"formupdatebuttonidtext":{'id':'update1','text':'update'}},
);
self.add_parameters(form.get_parameters());
self.update_tile2datamap("filtermenu1",[0]);
self.add_filtermenu(
{"filtermenuid":"filtermenu1",
"filtermenuhtmlid":"filtermenuform1",
"filtermenusubmitbuttonid":"submit1",
"filtermenuresetbuttonid":"reset1",
"filtermenuupdatebuttonid":"update1"}
);
# data 1:
self.add_data(
data1,
data1_keys,
data1_nestkeys
);
# tile 1-n features: count
if not single_plot_I:
rowcnt = 1;
colcnt = 1;
cnt = 0;
for k,v in data2.items():
svgtileid = "tilesvg"+str(cnt);
svgid = 'svg'+str(cnt);
iter=cnt+1; #start at 1
if (cnt % 2 == 0):
rowcnt = rowcnt+1;#even
colcnt = 1;
else:
colcnt = colcnt+1;
# svg
svg = ddt_tile();
svg.make_tileparameters(
tileparameters={
'tileheader':tileheader,
'tiletype':'svg',
'tileid':svgtileid,
'rowid':"row"+str(rowcnt),
'colid':"col"+str(colcnt),
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-6"
});
svg.make_svgparameters(
svgparameters={
"svgtype":svgtype,
"svgkeymap":[data2_keymap],
'svgid':'svg'+str(cnt),
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":500,
"svgheight":350,
"svgx1axislabel":data2_keymap['xdata'],
"svgy1axislabel":data2_keymap['ydata']
}
);
self.add_parameters(svg.get_parameters());
self.update_tile2datamap(svgtileid,[iter]);
self.add_data(
v,
data2_keys,
data2_nestkeys
);
cnt+=1;
else:
cnt = 0;
svgtileid = "tilesvg"+str(cnt);
svgid = 'svg'+str(cnt);
rowcnt = 2;
colcnt = 1;
# make the svg object
svg = ddt_tile();
svg.make_tileparameters(
tileparameters={
'tileheader':tileheader,
'tiletype':'svg',
'tileid':svgtileid,
'rowid':"row"+str(rowcnt),
'colid':"col"+str(colcnt),
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-6"
});
# make the svg parameters
if not svgkeymap:
svgkeymap = [data2_keymap];
svg.make_svgparameters(
svgparameters={
"svgtype":svgtype,
"svgkeymap":svgkeymap,
'svgid':svgid,
"svgmargin":{ 'top': 50, 'right': 150, 'bottom': 50, 'left': 50 },
"svgwidth":350,
"svgheight":250,
"svgx1axislabel":data2_keymap['xdata'],
"svgy1axislabel":data2_keymap['ydata']
}
);
self.add_parameters(svg.get_parameters());
self.update_tile2datamap(svgtileid,svgtile2datamap);
#add data 2
if data2:
self.add_data(
data2,
data2_keys,
data2_nestkeys
);
cnt+=1;
# make the table object
crosstable = ddt_tile();
crosstable.make_tileparameters(
tileparameters = {
'tileheader':'Table',
'tiletype':'table',
'tileid':"tabletile1",
'rowid':"row"+str(rowcnt+1),
'colid':"col1",
'tileclass':"panel panel-default",
'rowclass':"row",
'colclass':"col-sm-12"
}
);
crosstable.make_tableparameters(
tableparameters = {
"tabletype":tabletype,
'tableid':'table1',
"tablefilters":tablefilters,
"tableclass":"table table-condensed table-hover",
'tableformtileid':'tile1',
"tablekeymap":[data2_keymap],
"tableheaders":tableheaders,}
);
self.add_parameters(crosstable.get_parameters());
self.update_tile2datamap("tabletile1",[0]);
|
|
from duplication_handler import DuplicationHandler
from unittest import TestCase
from mock import patch
class TestUtils(TestCase):
def test_get_media(self):
subject = DuplicationHandler()
dummy_entry = {
'booktitle': 'Dragon balls',
'year': '1991'
}
casual_entry = {
'journal': 'Dragon balls Z',
'year': '1092'
}
oh_oh_entry = {
'year': '1111'
}
assert subject.get_media(dummy_entry) == 'Dragon balls'
assert subject.get_media(casual_entry) == 'Dragon balls Z'
assert subject.get_media(oh_oh_entry) == None
def test_has_media(self):
subject = DuplicationHandler()
dummy_entry = {
'booktitle': 'Dragon balls',
'year': '1991'
}
casual_entry = {
'journal': 'Dragon ball Z',
'year': '1092'
}
oh_oh_entry = {
'year': '1111'
}
assert subject.has_media(dummy_entry)
assert subject.has_media(casual_entry)
assert not subject.has_media(oh_oh_entry)
def test_is_in_journal(self):
subject = DuplicationHandler()
dummy_entry = {
'booktitle': 'Dragon balls',
'year': '1991'
}
casual_entry = {
'journal': 'IEEE Transactions Dragon balls',
'year': '1092'
}
oh_oh_entry = {
'year': '1111'
}
good_entry = {
'year': '1948',
'title': 'A mathematical theory of communication',
'journal': 'Bell system technical journal',
'author': 'Shannon, C. E.',
'volume': '27'
}
assert not subject.is_in_journal(dummy_entry)
assert subject.is_in_journal(casual_entry)
assert not subject.is_in_journal(oh_oh_entry)
assert subject.is_in_journal(good_entry)
def test_is_in_conference(self):
subject = DuplicationHandler()
dummy_entry = {
'booktitle': 'Dragon balls conference',
'year': '1991'
}
casual_entry = {
'journal': 'Universe meeting about Dragon balls',
'year': '1092'
}
oh_oh_entry = {
'year': '1111'
}
good_entry = {
'year': '1948',
'title': 'A mathematical theory of communication',
'journal': 'Bell system technical journal',
'author': 'Shannon, C. E.',
'volume': '27'
}
assert subject.is_in_conference(dummy_entry)
assert subject.is_in_conference(casual_entry)
assert not subject.is_in_conference(oh_oh_entry)
assert not subject.is_in_conference(good_entry)
def test_is_in_arxiv(self):
subject = DuplicationHandler()
dummy_entry = {
'booktitle': 'arxiv: Dragon balls',
'year': '1991'
}
casual_entry = {
'journal': 'IEEE Transactions Dragon balls',
'year': '1092'
}
oh_oh_entry = {
'year': '1111'
}
good_entry = {
'year': '1948',
'title': 'A mathematical theory of communication',
'journal': 'Bell system technical journal',
'author': 'Shannon, C. E.',
'volume': '27'
}
assert subject.is_in_arxiv(dummy_entry)
assert not subject.is_in_arxiv(casual_entry)
assert not subject.is_in_arxiv(oh_oh_entry)
assert not subject.is_in_arxiv(good_entry)
def test_resolve_duplicates_in_primal_mode(self):
with patch("utils.load_configuration", return_value={"duplicationResolveMode": "primal"}) as mock:
subject = DuplicationHandler()
dummy_entry = {
'booktitle': 'arxiv: Dragon balls',
'year': '1991'
}
casual_entry = {
'journal': 'IEEE Transactions Dragon balls',
'year': '1092'
}
oh_oh_entry = {
'year': '1111'
}
conf_entry = {
'journal': 'Blizzard diablo conference',
'year': '1992',
'title': 'Diablo 1 will be remade, no diablo 4'
}
hmm_entry = {
'journal': 'random wording'
}
assert subject.resolve_duplicates([dummy_entry, casual_entry, oh_oh_entry]) == casual_entry
assert subject.resolve_duplicates([dummy_entry, oh_oh_entry]) == dummy_entry
assert subject.resolve_duplicates([oh_oh_entry]) == oh_oh_entry
assert subject.resolve_duplicates([conf_entry, dummy_entry]) == conf_entry
assert subject.resolve_duplicates([hmm_entry, oh_oh_entry]) == hmm_entry
def test_resolve_duplicates_in_first_mode(self):
with patch("utils.load_configuration", return_value={"duplicationResolveMode": "first"}) as mock:
subject = DuplicationHandler()
dummy_entry = {
'booktitle': 'arxiv: Dragon balls',
'year': '1991'
}
casual_entry = {
'journal': 'IEEE Transactions Dragon balls',
'year': '1092'
}
oh_oh_entry = {
'year': '1111'
}
conf_entry = {
'journal': 'Blizzard diablo conference',
'year': '1992',
'title': 'Diablo 1 will be remade, no diablo 4'
}
hmm_entry = {
'journal': 'random wording'
}
assert subject.resolve_duplicates([dummy_entry, casual_entry, oh_oh_entry]) == dummy_entry
assert subject.resolve_duplicates([dummy_entry, oh_oh_entry]) == dummy_entry
assert subject.resolve_duplicates([oh_oh_entry]) == oh_oh_entry
assert subject.resolve_duplicates([conf_entry, dummy_entry]) == conf_entry
assert subject.resolve_duplicates([hmm_entry, oh_oh_entry]) == hmm_entry
def test_resolve_duplicates_in_manual_mode(self):
with patch("utils.load_configuration", return_value={"duplicationResolveMode": "manual"}) as mock:
with patch("utils.get_answer_from", side_effect=['1', '2', '1']) as another_mock:
subject = DuplicationHandler()
dummy_entry = {
'booktitle': 'arxiv: Dragon balls',
'year': '1991'
}
casual_entry = {
'journal': 'IEEE Transactions Dragon balls',
'year': '1092'
}
oh_oh_entry = {
'year': '1111'
}
assert subject.resolve_duplicates([dummy_entry, casual_entry, oh_oh_entry]) == dummy_entry
assert subject.resolve_duplicates([dummy_entry, oh_oh_entry]) == oh_oh_entry
assert subject.resolve_duplicates([oh_oh_entry]) == oh_oh_entry
def test_resolve_duplicates_in_invalid_mode(self):
with patch("utils.load_configuration", return_value={"duplicationResolveMode": "invalid"}) as mock:
subject = DuplicationHandler()
dummy_entry = {
'booktitle': 'arxiv: Dragon balls',
'year': '1991'
}
with self.assertRaises(Exception) as context:
assert subject.resolve_duplicates([dummy_entry])
def test_remove_duplicates(self):
with patch("utils.load_configuration", return_value={"duplicationResolveMode": "primal"}) as mock:
subject = DuplicationHandler()
dummy_entry = {
'booktitle': 'arxiv: Dragon balls',
'year': '1991',
'title': 'Diablo 3: Rise of Necromancer'
}
casual_entry = {
'journal': 'IEEE Transactions Dragon balls',
'year': '1092',
'title': 'Diablo 3: Reaper of Souls'
}
oh_oh_entry = {
'year': '1111',
'title': 'Odyssey'
}
conf_entry = {
'journal': 'Blizzard diablo conference',
'year': '1992',
'title': 'Diablo 3: Reaper of Souls'
}
to_test = [dummy_entry, casual_entry, oh_oh_entry, conf_entry]
expected = [dummy_entry, oh_oh_entry, casual_entry]
assert subject.remove_duplicates(to_test) == expected
|
|
#!/usr/bin/python
from __future__ import print_function
import configparser
import os
import sys
import re
import MySQLdb
import argparse
import getpass
import subprocess
import logging
import uuid
def update():
## Assemble list of all bookworms on the system.
bookworms = [] ### ...
## Create on-disk versions of memory tables if 'fastcat_' does not exists.
pass
## Allow "'bookworm'@'localhost' IDENTIFIED BY ''" to have select access on each bookworm.
pass
## Print a message about enabling access.
pass
def create(ask_about_defaults=True, database=None):
"""
Through interactive prompts at the command line, builds up a file at
bookworm.cnf that can be used to set preferences for the installation.
"""
if ask_about_defaults:
print("""
Welcome to Bookworm.
~~~~~~~~~~~~~~~~~~~~
First off, let's build a configuration file. This will live
at bookworm.cnf in the current directory: if you mistype anything,
or want to change settings, edit it directly in that location.
For each of the following entries, type the value you want, or hit
enter to accept the default:
""")
else:
logging.info("Auto-generating config file.")
"""
First, we go to great efforts to find some sensible defaults
Usually the user can just hit enter.
"""
systemConfigFile = configparser.SafeConfigParser(allow_no_value=True)
defaults = dict()
# The default bookwormname is just the current location
if database is None:
defaults['database'] = os.path.relpath(".", "..")
else:
defaults['database'] = database
defaults["user"] = "bookworm"
defaults["password"] = ""
config = configparser.ConfigParser()
for section in ["client"]:
config.add_section(section)
if ask_about_defaults:
database = input("What is the name of the bookworm [" + defaults['database'] + "]: ")
else:
database = defaults['database']
config.set("client", "database", re.sub(" ","_",database))
config.write(open("bookworm.cnf", "w"))
class Configfile(object):
def __init__(self, usertype, possible_locations=None, default=None, ask_about_defaults=True):
"""
Initialize with the type of the user. The last encountered file on
the list is the one that will be used.
If default is set, a file will be created at that location if none
of the files in possible_locations exist.
If ask_about_defaults is false, it will do a force installation.
"""
if not usertype in ['read_only', 'admin']:
raise NotImplementedError("Only read_only and admin supported")
self.ask_about_defaults = ask_about_defaults
logging.info("Creating configuration as " + usertype)
self.usertype = usertype
if possible_locations is None:
possible_locations = self.default_locations_from_type(usertype)
self.location = None
self.config = configparser.ConfigParser(allow_no_value=True)
if usertype=="admin":
self.ensure_section("client")
self.ensure_section("mysqld")
self.config.set("client", "host", "localhost")
self.config.set("client", "user", "root")
self.config.set("client", "password", "")
else:
self.ensure_section("client")
self.config.set("client", "host", "localhost")
self.config.set("client", "user", "bookworm")
self.config.set("client", "password", "")
self.read_config_files(possible_locations)
for string in possible_locations:
if os.path.exists(string):
self.location = string
def read_config_files(self, used_files):
try:
self.config.read(used_files)
except configparser.MissingSectionHeaderError:
"""
Some files throw this error if you have an empty
my.cnf. This throws those out of the list, and tries again.
"""
for file in used_files:
try:
self.config.read(file)
except configparser.MissingSectionHeaderError:
used_files.remove(file)
successes = self.config.read(used_files)
def default_locations_from_type(self,usertype):
"""
The default locations for each usertype.
Note that these are in ascending order of importance:
so the preferred location for admin and read_only configuration
is in /etc/bookworm/admin.cnf
and /etc/bookworm/client.cnf
"""
if usertype=="admin":
return [os.path.abspath(os.path.expanduser("~/.my.cnf")),
os.path.abspath(os.path.expanduser("~/my.cnf")),
"/etc/bookworm/admin.cnf"]
if usertype == "read_only":
return ["~/.bookworm-sql.cnf", "/etc/bookworm/client.cnf"]
else:
return []
def ensure_section(self,section):
if not self.config.has_section(section):
self.config.add_section(section)
def set_bookworm_options(self):
"""
A number of specific MySQL changes to ensure fast queries on Bookworm.
"""
self.ensure_section("mysqld")
mysqldoptions = {"### = =": "THIS FILE SHOULD GENERALLY BE PLACED AT /etc/mysql/my.cnf = = = ###", "max_allowed_packet":"512M","sort_buffer_size":"8M","read_buffer_size":"8M","read_rnd_buffer_size":"8M","bulk_insert_buffer_size":"512M","myisam_sort_buffer_size":"5512M","myisam_max_sort_file_size":"5500G","key_buffer_size":"2500M","query_cache_size":"32M","tmp_table_size":"1024M","max_heap_table_size":"2048M","character_set_server":"utf8","query_cache_type":"1","query_cache_limit":"8M"}
for option in list(mysqldoptions.keys()):
if not self.config.has_option("mysqld",option):
self.config.set("mysqld", option, mysqldoptions[option])
else:
if mysqldoptions[option] != self.config.get("mysqld",option):
choice = input("Do you want to change the value for " + option + " from " + self.config.get("mysqld",option) + " to the bookworm-recommended " + mysqldoptions[option] + "? (y/N): ")
if choice=="y":
self.config.set("mysqld",option,mysqldoptions[option])
self.write_out()
def write_out(self):
"""
Write out a new version of the configfile to stdout.
The user is responsible for putting this somewhere it will
affect the MySQL preferences
"""
self.config.write(sys.stdout)
def recommend_my_cnf(known_loc = None):
if known_loc is None:
for loc in ["/usr/etc/my.cnf","/etc/mysql/my.cnf","/etc/my.cnf"]:
if os.path.exists(loc):
known_loc = loc
if known_loc is None:
raise FileNotFoundError("Could not find MySQL folder: pass one.")
cnf = Configfile(usertype = 'admin', possible_locations = [known_loc])
cnf.set_bookworm_options()
cnf.write_out()
def apache(self = None):
print("""
Instructions for Apache:
First: Serve the Bookworm API over port 10012. (`bookworm serve`).
Then: Install an Apache host on port 80.
Then: enable proxy servers and turn off any existing cgi.
# If you were previously using the CGI bookworm.
`sudo a2dismod cgi`
`sudo a2enmod proxy proxy_ajp proxy_http rewrite deflate headers proxy_balancer proxy_connect proxy_html`
Then: Add the following to your '/etc/apache2/sites-available/000-default.conf'
(or whatever site from which you run your apache.
~~~~~~~~~~~~~~~~
<Proxy *>
Order deny,allow
Allow from all
</Proxy>
ProxyPreserveHost On
<Location "/cgi-bin">
ProxyPass "http://127.0.0.1:10012/"
ProxyPassReverse "http://127.0.0.1:10012/"
</Location>
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
""")
|
|
import os
import re
import errno
import gzip
import shutil
import xml.etree.ElementTree as ET
from datetime import datetime
from .models import Video
from groups.models import Result, Row
from policies.models import Policy, Operation
from .constants import STORED_FILEPATH
def get_filename(original_name):
if original_name.endswith('.gz'):
original_name = os.path.splitext(original_name)[0]
name = os.path.splitext(original_name)[0]
return name
def search_result(start_field, end_field, keyword):
start = datetime.strptime(start_field,
"%Y/%m/%d %H:%M")
end = datetime.strptime(end_field,
"%Y/%m/%d %H:%M")
results = Video.objects.filter(upload_time__range=[start, end])
if keyword is not None:
results = results.filter(filename__contains=keyword)
return results
def check_file_exist(file_name):
original_file_name = file_name + '.xml.gz'
file_name = os.path.join(STORED_FILEPATH, original_file_name)
return os.path.exists(file_name)
def get_full_path_file_name(original_file_name):
original_file_name = original_file_name + '.xml'
original_file_name = os.path.join(STORED_FILEPATH, original_file_name)
if os.path.isfile(original_file_name) is False:
file_name = original_file_name + '.gz'
new_file_name = os.path.splitext(file_name)[0]
with gzip.open(file_name, 'rb') as f_in:
with open(new_file_name, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
return original_file_name
def process_file_original(file_name):
st = ""
nsmap = {}
datadict = {}
countdict = {}
specialdict = {}
specialdict['BRNGover002count'] = 0
specialdict['mse_y_over_1000'] = 0
specialdict['TOUT over 0.005'] = 0
specialdict['SATMAX over 88.7'] = 0
specialdict['SATMAX over 118.2'] = 0
countdict['yhigh-ylow'] = 0
datadict['yhigh-ylow'] = 0
count = 0
yhigh = 0
ylow = 0
for event, elem in ET.iterparse(file_name,
events=('start',
'end')):
if event == 'start':
if elem.tag == 'frame':
count += 1
if event == 'end':
key = elem.get("key")
if key is not None:
if key not in datadict:
datadict[key] = 0
countdict[key] = 0
value = elem.get("value")
if key == 'lavfi.signalstats.YHIGH':
yhigh = float(value)
if key == 'lavfi.signalstats.YLOW':
ylow = float(value)
if key == 'lavfi.signalstats.SATMAX' and float(value) > 88.7:
specialdict['SATMAX over 88.7'] += 1
if key == 'lavfi.signalstats.SATMAX' and float(value) > 118.2:
specialdict['SATMAX over 118.2'] += 1
if key == 'lavfi.signalstats.TOUT' and float(value) > 0.005:
specialdict['TOUT over 0.005'] += 1
if key == 'lavfi.psnr.mse.y' and float(value) > 1000:
specialdict['mse_y_over_1000'] += 1
if key == 'lavfi.signalstats.BRNG' and float(value) > 0.02:
specialdict['BRNGover002count'] += 1
datadict[key] += float(value)
diff = yhigh - ylow
datadict['yhigh-ylow'] += diff
elem.clear()
resultst = ''
for k in datadict.keys():
v = datadict[k]
ave = v/count
st = "{0} has average {1} <br />".format(k, ave)
resultst += st
for k, v in specialdict.items():
st = "{0} has {1} frames in {2} <br />".format(k, v, count)
resultst += st
return resultst
def process_file_with_policy(file_name, policy_id, original_name):
count = 0
datadict = {}
specialdict = {}
valuedict = {}
highdict = {}
lowdict = {}
newst = ''
new_key = ''
policy = Policy.objects.get(id=policy_id)
operations = Operation.objects.filter(policy=policy)
for op in operations:
if op.op_name == 'average':
datadict[op.signal_name] = 0
elif op.op_name == 'exceeds':
specialdict[op.signal_name] = 0
valuedict[op.signal_name] = op.cut_off_number
else:
new_key = op.signal_name + "-" + op.second_signal_name
datadict[new_key] = 0
highdict[op.signal_name] = 0
lowdict[op.second_signal_name] = 0
yhigh = 0
ylow = 0
for event, elem in ET.iterparse(file_name,
events=('start',
'end')):
if event == 'start':
if elem.tag == 'frame':
count += 1
if event == 'end':
key = elem.get("key")
if key is not None:
if key in datadict:
value = elem.get("value")
datadict[key] += float(value)
if key in specialdict and float(value) > valuedict[key]:
specialdict[key] += 1
if key in highdict:
yhigh = float(value)
if key in lowdict:
ylow = float(value)
diff = abs(yhigh - ylow)
datadict[new_key] += diff
elem.clear()
result_name = original_name + ".xml"
current_time_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
current_time = datetime.strptime(current_time_str,
"%Y-%m-%d %H:%M:%S")
new_result = Result(
filename=result_name,
policy_id=policy_id,
policy_name=policy.policy_name,
processed_time=current_time,
task_id=1,
status=True)
new_result.save()
result = Result.objects.get(filename=result_name,
processed_time=current_time_str)
for k in datadict.keys():
v = datadict[k]
ave = v/count
new_row = Row(
result=result,
signal_name=k,
result_number=ave,
op_name='average'
)
new_row.save()
for k, v in specialdict.items():
new_row = Row(
result=result,
signal_name=k,
result_number=v,
op_name='exceeded (out of {0} frames)'.format(count),
cut_off_number=valuedict[k]
)
new_row.save()
return result
def delete_file(file_name):
Video.objects.get(filename=file_name).delete()
original_file_name = file_name + '.xml'
original_file_name = os.path.join(STORED_FILEPATH, original_file_name)
try:
os.remove(original_file_name)
except OSError as e:
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a different error occured
#check xml.gz file
full_path_file_name_with_gz = original_file_name + ".gz"
try:
os.remove(full_path_file_name_with_gz)
except OSError as e:
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a different error occured
for f in os.listdir(STORED_FILEPATH):
pattern = file_name + ".xml" + "_\w*.gz"
if re.match(pattern, f):
os.remove(STORED_FILEPATH + "/" + f)
|
|
#!/usr/bin/python
import sys
sys.path += ['lib']
import os
import time
import ConfigParser
import flask
import requests
import utils
import math
import MySQLdb
import MySQLdb.cursors
from thread import start_new_thread as daemonize
from pprint import pprint
# Attempt to read the configuration file
cfg = ConfigParser.ConfigParser()
cnf_location = "main.cfg"
# Check to see if the configuration file exists
if not os.path.isfile(cnf_location):
print("Unable to read main.cfg. Does it exist? Exiting.")
os._exit(1)
# Attempt to load and parse the configuration file
try:
cfg.read(cnf_location)
except Exception as e:
print("There was an issue parsing main.cfg (%s)" % str(e))
print("Please fix these issues then restart dropbin!")
os._exit(1)
app = flask.Flask(__name__)
prefix = cfg.get('Database', 'db-prefix').rstrip('_')
def db_connect():
return MySQLdb.connect(
host=str(cfg.get('Database', 'db-hostname')),
port=int(cfg.get('Database', 'db-port')),
user=str(cfg.get('Database', 'db-user')),
passwd=str(cfg.get('Database', 'db-password')),
db=cfg.get('Database', 'db-name'),
cursorclass=MySQLdb.cursors.DictCursor,
charset='utf8',
use_unicode=True)
def query(*args):
with db_connect() as cursor:
cursor.execute(*args)
return list(cursor.fetchall())
def write(*args):
with db_connect() as cursor:
cursor.execute(*args)
return list(cursor.fetchall())
def bg_write(*args):
""" Run a query in the background if it's not runtime dependant """
return daemonize(write, tuple(args))
def uuid():
reserved = ['login', 'logout', 'signin', 'signout', 'about', 'index', 'api']
while True:
_tmp = utils.gen_word(2, 3)
if _tmp in reserved:
continue
if not query("""SELECT id FROM {}_content WHERE id = %s""".format(prefix), [_tmp]):
return _tmp
@app.route('/')
def base():
return flask.render_template('new.html', paste=False)
@app.route('/dup/<paste>')
@app.route('/dup/<paste>.<lang>')
def duplicate(paste, lang=None):
if not utils.validate(paste):
return flask.redirect('/')
data = get_paste(paste, lang)
if not data:
flask.redirect('/')
return flask.render_template('new.html', dup=data)
@app.route('/api/pastes', methods=['POST'])
@app.route('/api/pastes/<int:page>', methods=['POST'])
@app.route('/api/pastes/<sortmode>', methods=['POST'])
@app.route('/api/pastes/<int:page>/<sortmode>', methods=['POST'])
@utils.auth
def api_user(page=1, sortmode="last_modified"):
def out(text):
print("\n\n\n" + str(text) + "\n\n\n")
limit = 8 # Assuming they want 8 results per page
if page < 1:
_page = 1
_page = int(page) - 1
_page = _page * limit
data = {}
try:
data['posts'] = query("""SELECT id, LEFT(content, 200) as preview, language, language_short, created, last_modified, hits FROM {}_content WHERE author = %s ORDER BY last_modified DESC LIMIT %s,%s""".format(prefix), [flask.session['git']['id'], _page, limit])
for i in range(len(data['posts'])):
data['posts'][i]['hrt'] = utils.hrt(int(data['posts'][i]['last_modified']))
if data['posts'][i]['language_short']:
data['posts'][i]['language_short'] = '.' + data['posts'][i]['language_short']
data['count'] = query("""SELECT COUNT(id) AS cnt FROM {}_content WHERE author = %s""".format(prefix), [flask.session['git']['id']])[0]['cnt']
data['pages'] = int(math.ceil(float(data['count']) / float(limit)))
data['page_current'] = int(page)
# First make sure we've got the low alright
data['page_range'] = [pagenum for pagenum in range(int(page) - 2, int(page) + 3) if pagenum > 0]
data['page_range'] += range(data['page_range'][-1] + 1, data['page_range'][-1] + 5 - len(data['page_range']))
data['page_range'] = [pagenum for pagenum in data['page_range'] if pagenum <= data['pages']]
data['success'] = True
return flask.jsonify(data)
except Exception as e:
print repr(str(e))
data['success'] = False
return flask.jsonify(data)
@app.route('/api/stats', methods=['POST'])
def api_stats():
data = {}
limit = 5
try:
_lang = query("""SELECT language, COUNT(language) AS ct FROM {}_content WHERE author = %s GROUP BY language ORDER BY ct DESC""".format(prefix), [flask.session['git']['id']])
for i in range(len(_lang)):
if _lang[i]['ct'] < 1:
_lang[i]['ct'] = 1
if len(_lang) > limit:
data['languages'] = _lang[:limit] + [{
'ct': sum([i['ct'] for i in _lang[limit:]]),
'language': 'other'
}]
data['languages'] = sorted(data['languages'], key=lambda k: k['ct'], reverse=True)
else:
data['languages'] = _lang
data['graph'] = {
'languages': {
'labels': [x['language'] for x in data['languages']],
'values': [x['ct'] for x in data['languages']]
}
}
data['success'] = True
return flask.jsonify(data)
except Exception as e:
print repr(str(e))
data['success'] = False
return flask.jsonify(data)
@app.route('/api/submit', methods=['POST'])
def api_submit():
form = flask.request.form
req = ['paste', 'language', 'short']
for item in req:
if item not in form:
return flask.jsonify({
'success': False,
'message': 'Invalid submission data.'
})
# Too short.. anything below 5 characters is really small and is likely bullshit
if len(form['paste']) < 5:
return flask.jsonify({
'success': False,
'message': 'Paste is too short!'
})
# Too long. It's set to 70,000 characters as this is both degrading to the end
# user, as well as the server. Anything above this is getting to be rediculous.
if len(form['paste']) > 70000:
return flask.jsonify({
'success': False,
'message': 'Paste is too long!'
})
# Theoretically at this point there shouldn't be any other errors, maybe in
# the future, check to see if a user is in a blacklisted set of IP's/users?
id = uuid()
author = int(flask.session['git']['id']) if 'authed' in flask.session else None
language = form['language'] if form['language'] else None
language_short = form['short'] if form['short'] else None
created = int(time.time())
last_view = created
last_modified = created
ip = flask.request.remote_addr
hits = 1
if language_short:
uri = "%s.%s" % (id, language_short)
elif language:
uri = "%s.%s" % (id, language)
else:
uri = id
write("""INSERT INTO {}_content VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)""".format(prefix), (
id, form['paste'], author, language, language_short, created, last_view, last_modified, ip, hits))
return flask.jsonify({
'success': True,
'message': 'Created paste',
'uri': uri,
})
@app.route('/<paste>')
@app.route('/<paste>.<lang>')
def pull_paste(paste, lang=None):
if not utils.validate(paste):
return flask.redirect('/')
if paste.lower() == 'about' and str(lang).lower() == 'md':
try:
with open('README.md', 'r') as f:
file = f.read()
data = {
'name': "about.md",
'paste': file,
'lines': len(file.split('\n')),
'chars': len(file),
'language': 'markdown'
}
return flask.render_template('paste.html', paste=data)
except:
pass
data = get_paste(paste, lang)
if not data:
flask.redirect('/')
return flask.render_template('paste.html', paste=data)
def get_paste(paste, lang=None):
try:
_tmp = query("""SELECT * FROM {}_content WHERE id = %s""".format(prefix), [paste.lower()])
if len(_tmp) > 1 or len(_tmp) < 1:
return False
bg_write("""UPDATE {}_content SET last_view = %s WHERE id = %s""".format(prefix), [int(time.time()), paste.lower()])
bg_write("""UPDATE {}_content SET hits = hits + 1 WHERE id = %s""".format(prefix), [paste.lower()])
if not lang:
lang = _tmp[0]['language']
name = str(paste)
else:
lang = lang.lower()
name = "{}.{}".format(str(paste), str(lang))
return {
'name': name,
'paste': _tmp[0]['content'],
'lines': len(_tmp[0]['content'].split('\n')),
'chars': len(_tmp[0]['content']),
'language': lang,
'hits': int(_tmp[0]['hits'])
}
except:
return False
@app.route('/api/<paste>')
@app.route('/api/<paste>.<lang>')
def api(paste, lang=None):
if not utils.validate(paste):
return flask.redirect('/')
_tmp = query("""SELECT * FROM {}_content WHERE id = %s""".format(prefix), [paste.lower()])
if len(_tmp) > 1 or len(_tmp) < 1:
return flask.redirect('/')
bg_write("""UPDATE {}_content SET last_view = %s WHERE id = %s""".format(prefix), [int(time.time()), paste.lower()])
bg_write("""UPDATE {}_content SET hits = hits + 1 WHERE id = %s""".format(prefix), [paste.lower()])
if not lang:
lang = _tmp[0]['language']
else:
lang = lang.lower()
data = {
'paste': _tmp[0]['content'],
'lines': len(_tmp[0]['content'].split('\n')),
'chars': len(_tmp[0]['content']),
'language': lang
}
return flask.jsonify(data)
@app.route('/t/<paste>')
@app.route('/t/<paste>.<lang>')
def plaintext(paste, lang=None):
if not utils.validate(paste):
return flask.redirect('/')
if paste.lower() == 'about' and str(lang).lower() == 'md':
try:
with open('README.md', 'r') as f:
return flask.Response(f.read(), mimetype='text/plain')
except:
pass
_tmp = query("""SELECT * FROM {}_content WHERE id = %s""".format(prefix), [paste.lower()])
if len(_tmp) > 1 or len(_tmp) < 1:
return flask.redirect('/')
bg_write("""UPDATE {}_content SET last_view = %s WHERE id = %s""".format(prefix), [int(time.time()), paste.lower()])
bg_write("""UPDATE {}_content SET hits = hits + 1 WHERE id = %s""".format(prefix), [paste.lower()])
return flask.Response(_tmp[0]['content'], mimetype='text/plain')
@app.route('/login')
def process_login():
if 'authed' in flask.session:
if flask.session['authed']:
return flask.redirect('/')
errors, warnings, msgs = [], [], []
args = flask.request.args
err = args.get('error')
# Support using next for anything inter-website
if args.get('next'):
flask.session['next'] = args.get('next')
if err:
# More info: http://git.io/veeEM
# We've got to figure out what specifically is the issue, then depending
# on what it is, send the end user a response as a heads-up
if err == 'application_suspended':
errors.append('An internal error occurred. Please contact <a href="mailto:%s">%s</a> if the issue persists.' % (
cfg.get('Contact-info', 'email'), cfg.get('Contact-info', 'name')
))
elif err == 'redirect_uri_mismatch':
errors.append('An internal error occurred. Please contact <a href="mailto:%s">%s</a> if the issue persists.' % (
cfg.get('Contact-info', 'email'), cfg.get('Contact-info', 'name')
))
elif err == 'access_denied':
msgs.append(
'To be able to use this service, you will need to login to Github and validate paste.ml.<br><br>'
'<a href="/login" class="btn btn-md btn-success">Please try again <i class="fa fa-chevron-right"></i></a>'
)
else:
errors.append('An unknown response from Github was received. Unable to authenticate you.')
elif args.get('code'):
# "If the user accepts your request, GitHub redirects back to your site
# with a temporary code in a code parameter as well as the state you
# provided in the previous step in a state parameter. If the states don't
# match, the request has been created by a third party and the process
# should be aborted."
if args.get('state') == flask.session['state']:
# Actually do the stuff here, as we've confirmed it's a response
# from Github.
# This is what we need to get the oauth token, to pull api data
flask.session['code'] = args['code']
uri = 'https://github.com/login/oauth/access_token'
headers = {
'Accept': 'application/json'
}
payload = {
'client_id': cfg.get('Github', 'client-id'),
'client_secret': cfg.get('Github', 'client-secret'),
'code': flask.session['code']
}
try:
data = requests.post(uri, headers=headers, data=payload, timeout=10).json()
# pprint(data)
if 'error' in data:
if data['error'] == 'incorrect_client_credentials':
errors.append('An internal error occurred. Please contact <a href="mailto:%s">%s</a> if the issue persists.' % (
cfg.get('Contact-info', 'email'), cfg.get('Contact-info', 'name')
))
elif data['error'] == 'redirect_uri_mismatch':
errors.append('An internal error occurred. Please contact <a href="mailto:%s">%s</a> if the issue persists.' % (
cfg.get('Contact-info', 'email'), cfg.get('Contact-info', 'name')
))
elif data['error'] == 'bad_verification_code':
msgs.append(
'It seems when attempting to login you in, your session has expired.<br><br>'
'<a href="/login" class="btn btn-md btn-success">Please try again <i class="fa fa-chevron-right"></i></a>'
)
else:
errors.append('An unknown response from Github was received. Unable to authenticate you.')
else:
flask.session['token'] = data['access_token']
# Now, we should be good to make API calls
# In the future, store the last etag from the last call
# just in case, because then we can:
# * Prevent rate-limiting more
# * Allow quicker API checks to finish the login process
uri = 'https://api.github.com/user'
headers = {
'Authorization': 'token %s' % flask.session['token'],
# Per Githubs request, we're adding a user-agent just
# in case they need to get ahold of us.
'User-Agent': 'https://github.com/lrstanley/dropbin.git'
}
api_call = requests.get(uri, headers=headers).json()
pprint(api_call)
flask.session['git'] = api_call
flask.session['authed'] = True
except:
errors.append('There was an error authenticating with Github. Please try again.')
else:
# The state GET attribute either exists and doesn't match, or doesn't
# exist. Either way, it's not legitimate.
errors.append('Invalid information returned from Github. Was the authentication spoofed?')
else:
# We need to start constructing the authorization URL here.
uri = 'https://github.com/login/oauth/authorize?client_id={id}&state={rand}'
flask.session['state'] = utils.gen_rand(10)
return flask.redirect(uri.format(id=cfg.get('Github', 'client-id'), rand=flask.session['state']))
if errors or warnings or msgs:
return flask.render_template('messages.html', errors=errors, warnings=warnings, msgs=msgs)
else:
# Support using next for anything inter-website
if 'next' in flask.session:
return flask.redirect('/%s' % flask.session['next'])
return flask.redirect('/') # Eventually we'll redirect to a controlpanel?
@app.route('/logout')
def process_logout():
if 'authed' in flask.session:
flask.session.clear()
# Support using next for anything inter-website
if flask.request.args.get('next'):
return flask.redirect('/%s' % flask.request.args.get('next'))
return flask.redirect('/')
@app.context_processor
def utility_processor():
def commas(number):
return "{:,d}".format(number)
return dict(
commas=commas
)
@app.errorhandler(404)
def page_not_found(error):
""" Catch all for any outdated pastes, or anything of that sort """
return flask.redirect('/')
# @app.after_request
# def add_header(response):
# """
# Add headers to both force latest IE rendering engine or Chrome Frame,
# and also to cache the rendered page for 10 minutes.
# """
# response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
# response.headers['Cache-Control'] = 'public, max-age=600'
# return response
def main():
app.secret_key = cfg.get('General', 'salt')
# Do some initial stuff with tables to ensure everythings been added:
query("""CREATE TABLE IF NOT EXISTS {}_content (
id VARCHAR(50) NOT NULL,
content MEDIUMTEXT,
author INTEGER,
language VARCHAR(50),
language_short VARCHAR(50),
created INTEGER NOT NULL,
last_view INTEGER NOT NULL,
last_modified INTEGER NOT NULL,
ip VARCHAR(255) NOT NULL,
PRIMARY KEY (id)
)""".format(prefix))
query("""CREATE TABLE IF NOT EXISTS {}_users (
uid INTEGER NOT NULL,
login VARCHAR(255) NOT NULL,
name VARCHAR(255),
avatar VARCHAR(255),
location VARCHAR(255),
email VARCHAR(255),
created INTEGER NOT NULL,
last_login INTEGER NOT NULL,
admin BOOLEAN,
PRIMARY KEY (uid)
)""".format(prefix))
main()
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0', port=8080, threaded=True)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Metacloud
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from dogpile.cache import api
from dogpile.cache import proxy
from keystone.common import cache
from keystone import config
from keystone import exception
from keystone import tests
CONF = config.CONF
NO_VALUE = api.NO_VALUE
def _copy_value(value):
if value is not NO_VALUE:
value = copy.deepcopy(value)
return value
# NOTE(morganfainberg): WARNING - It is not recommended to use the Memory
# backend for dogpile.cache in a real deployment under any circumstances. The
# backend does no cleanup of expired values and therefore will leak memory. The
# backend is not implemented in a way to share data across processes (e.g.
# Keystone in HTTPD. This proxy is a hack to get around the lack of isolation
# of values in memory. Currently it blindly stores and retrieves the values
# from the cache, and modifications to dicts/lists/etc returned can result in
# changes to the cached values. In short, do not use the dogpile.cache.memory
# backend unless you are running tests or expecting odd/strange results.
class CacheIsolatingProxy(proxy.ProxyBackend):
"""Proxy that forces a memory copy of stored values.
The default in-memory cache-region does not perform a copy on values it
is meant to cache. Therefore if the value is modified after set or after
get, the cached value also is modified. This proxy does a copy as the last
thing before storing data.
"""
def get(self, key):
return _copy_value(self.proxied.get(key))
def set(self, key, value):
self.proxied.set(key, _copy_value(value))
class TestProxy(proxy.ProxyBackend):
def get(self, key):
value = _copy_value(self.proxied.get(key))
if value is not NO_VALUE:
if isinstance(value[0], TestProxyValue):
value[0].cached = True
return value
class TestProxyValue(object):
def __init__(self, value):
self.value = value
self.cached = False
class CacheRegionTest(tests.TestCase):
def __init__(self, *args, **kwargs):
super(CacheRegionTest, self).__init__(*args, **kwargs)
self.region = None
def setUp(self):
super(CacheRegionTest, self).setUp()
self.region = cache.make_region()
cache.configure_cache_region(self.region)
self.region.wrap(TestProxy)
self.test_value = TestProxyValue('Decorator Test')
def _add_test_caching_option(self):
test_cache_opt = config.config.cfg.BoolOpt('caching', default=True)
def reset_and_unregister_opt():
# NOTE(morganfainberg): Reset is required before unregistering
# arguments or ArgsAlreadyParsedError is raised.
CONF.reset()
CONF.unregister_opt(test_cache_opt, group='cache')
self.addCleanup(reset_and_unregister_opt)
CONF.register_opt(test_cache_opt, group='cache')
def _get_cacheable_function(self):
SHOULD_CACHE_FN = cache.should_cache_fn('cache')
@self.region.cache_on_arguments(should_cache_fn=SHOULD_CACHE_FN)
def cacheable_function(value):
return value
return cacheable_function
def test_region_built_with_proxy_direct_cache_test(self):
# Verify cache regions are properly built with proxies.
test_value = TestProxyValue('Direct Cache Test')
self.region.set('cache_test', test_value)
cached_value = self.region.get('cache_test')
self.assertTrue(cached_value.cached)
def test_cache_region_no_error_multiple_config(self):
# Verify configuring the CacheRegion again doesn't error.
cache.configure_cache_region(self.region)
cache.configure_cache_region(self.region)
def test_should_cache_fn_global_cache_enabled(self):
# Verify should_cache_fn generates a sane function for subsystem and
# functions as expected with caching globally enabled.
cacheable_function = self._get_cacheable_function()
self.opt_in_group('cache', enabled=True)
cacheable_function(self.test_value)
cached_value = cacheable_function(self.test_value)
self.assertTrue(cached_value.cached)
def test_should_cache_fn_global_cache_disabled(self):
# Verify should_cache_fn generates a sane function for subsystem and
# functions as expected with caching globally disabled.
cacheable_function = self._get_cacheable_function()
self.opt_in_group('cache', enabled=False)
cacheable_function(self.test_value)
cached_value = cacheable_function(self.test_value)
self.assertFalse(cached_value.cached)
def test_should_cache_fn_global_cache_disabled_section_cache_enabled(self):
# Verify should_cache_fn generates a sane function for subsystem and
# functions as expected with caching globally disabled and the specific
# section caching enabled.
cacheable_function = self._get_cacheable_function()
self._add_test_caching_option()
self.opt_in_group('cache', enabled=False)
self.opt_in_group('cache', caching=True)
cacheable_function(self.test_value)
cached_value = cacheable_function(self.test_value)
self.assertFalse(cached_value.cached)
def test_should_cache_fn_global_cache_enabled_section_cache_disabled(self):
# Verify should_cache_fn generates a sane function for subsystem and
# functions as expected with caching globally enabled and the specific
# section caching disabled.
cacheable_function = self._get_cacheable_function()
self._add_test_caching_option()
self.opt_in_group('cache', enabled=True)
self.opt_in_group('cache', caching=False)
cacheable_function(self.test_value)
cached_value = cacheable_function(self.test_value)
self.assertFalse(cached_value.cached)
def test_should_cache_fn_global_cache_enabled_section_cache_enabled(self):
# Verify should_cache_fn generates a sane function for subsystem and
# functions as expected with caching globally enabled and the specific
# section caching enabled.
cacheable_function = self._get_cacheable_function()
self._add_test_caching_option()
self.opt_in_group('cache', enabled=True)
self.opt_in_group('cache', caching=True)
cacheable_function(self.test_value)
cached_value = cacheable_function(self.test_value)
self.assertTrue(cached_value.cached)
def test_cache_dictionary_config_builder(self):
"""Validate we build a sane dogpile.cache dictionary config."""
self.opt_in_group('cache',
config_prefix='test_prefix',
backend='some_test_backend',
expiration_time=86400,
backend_argument=['arg1:test', 'arg2:test:test',
'arg3.invalid'])
config_dict = cache.build_cache_config()
self.assertEqual(
config_dict['test_prefix.backend'], CONF.cache.backend)
self.assertEqual(
config_dict['test_prefix.expiration_time'],
CONF.cache.expiration_time)
self.assertEqual(config_dict['test_prefix.arguments.arg1'], 'test')
self.assertEqual(config_dict['test_prefix.arguments.arg2'],
'test:test')
self.assertFalse('test_prefix.arguments.arg3' in config_dict)
def test_cache_debug_proxy(self):
single_value = 'Test Value'
single_key = 'testkey'
multi_values = {'key1': 1, 'key2': 2, 'key3': 3}
self.region.set(single_key, single_value)
self.assertEqual(single_value, self.region.get(single_key))
self.region.delete(single_key)
self.assertEqual(NO_VALUE, self.region.get(single_key))
self.region.set_multi(multi_values)
cached_values = self.region.get_multi(multi_values.keys())
for value in multi_values.values():
self.assertIn(value, cached_values)
self.assertEqual(len(multi_values.values()), len(cached_values))
self.region.delete_multi(multi_values.keys())
for value in self.region.get_multi(multi_values.keys()):
self.assertEqual(NO_VALUE, value)
def test_configure_non_region_object_raises_error(self):
self.assertRaises(exception.ValidationError,
cache.configure_cache_region,
"bogus")
class CacheNoopBackendTest(tests.TestCase):
def __init__(self, *args, **kwargs):
super(CacheNoopBackendTest, self).__init__(*args, **kwargs)
self.region = None
def setUp(self):
super(CacheNoopBackendTest, self).setUp()
self.region = cache.make_region()
self.opt_in_group('cache', backend='keystone.common.cache.noop')
cache.configure_cache_region(self.region)
def test_noop_backend(self):
single_value = 'Test Value'
single_key = 'testkey'
multi_values = {'key1': 1, 'key2': 2, 'key3': 3}
self.region.set(single_key, single_value)
self.assertEqual(NO_VALUE, self.region.get(single_key))
self.region.set_multi(multi_values)
cached_values = self.region.get_multi(multi_values.keys())
self.assertEqual(len(cached_values), len(multi_values.values()))
for value in cached_values:
self.assertEqual(NO_VALUE, value)
# Delete should not raise exceptions
self.region.delete(single_key)
self.region.delete_multi(multi_values.keys())
|
|
# -*- coding: utf-8 -*-
'''WebSocket_ Protocol is implemented via the :class:`Frame` and
:class:`FrameParser` classes.
To obtain a frame parser one should use the :func:`frame_parser` function.
frame parser
~~~~~~~~~~~~~~~~~~~
.. autofunction:: frame_parser
Frame
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: Frame
:members:
:member-order: bysource
Frame Parser
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: FrameParser
:members:
:member-order: bysource
parse_close
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autofunction:: parse_close
.. _WebSocket: http://tools.ietf.org/html/rfc6455'''
import os
from struct import pack, unpack
from array import array
from .pep import to_bytes
from .exceptions import ProtocolError
try:
from .lib import FrameParser as CFrameParser
except: # pragma nocover
CFrameParser = None
CLOSE_CODES = {
1000: "OK",
1001: "going away",
1002: "protocol error",
1003: "unsupported type",
# 1004: - (reserved)
# 1005: no status code (internal)
# 1006: connection closed abnormally (internal)
1007: "invalid data",
1008: "policy violation",
1009: "message too big",
1010: "extension required",
1011: "unexpected error",
# 1015: TLS failure (internal)
}
DEFAULT_VERSION = 13
SUPPORTED_VERSIONS = (DEFAULT_VERSION,)
WS_EXTENSIONS = {}
WS_PROTOCOLS = {}
def get_version(version):
try:
version = int(version or DEFAULT_VERSION)
except Exception:
pass
if version not in SUPPORTED_VERSIONS:
raise ProtocolError('Version %s not supported.' % version)
return version
class Extension(object):
def receive(self, data):
return data
def send(self, data):
return data
def frame_parser(version=None, kind=0, extensions=None, protocols=None,
pyparser=False):
'''Create a new :class:`FrameParser` instance.
:param version: protocol version, the default is 13
:param kind: the kind of parser, and integer between 0 and 3 (check the
:class:`FrameParser` documentation for details)
:param extensions: not used at the moment
:param protocols: not used at the moment
:param pyparser: if ``True`` (default ``False``) uses the python frame
parser implementation rather than the much faster cython
implementation.
'''
version = get_version(version)
Parser = FrameParser if pyparser else CFrameParser
# extensions, protocols
return Parser(version, kind, ProtocolError, close_codes=CLOSE_CODES)
def websocket_mask(data, masking_key):
mask_size = len(masking_key)
key = array('B', masking_key)
data = array('B', data)
for i in range(len(data)):
data[i] ^= key[i % mask_size]
return data.tobytes()
class Frame:
_body = None
_masking_key = None
def __init__(self, opcode, final, payload_length):
self._opcode = opcode
self._final = final
self._payload_length = payload_length
@property
def final(self):
return self._final
@property
def opcode(self):
return self._opcode
@property
def body(self):
return self._body
@property
def masking_key(self):
return self._masking_key
@property
def is_message(self):
return self._opcode == 1
@property
def is_bytes(self):
return self._opcode == 2
@property
def is_close(self):
return self._opcode == 8
@property
def is_ping(self):
return self._opcode == 9
@property
def is_pong(self):
return self._opcode == 10
class FrameParser(object):
'''Decoder and encoder for the websocket protocol.
.. attribute:: version
Optional protocol version (Default 13).
.. attribute:: kind
* 0 for parsing client's frames and sending server frames (to be used
in the server)
* 1 for parsing server frames and sending client frames (to be used
by the client)
* 2 Assumes always unmasked data
'''
def __init__(self, version, kind, ProtocolError, extensions=None,
protocols=None, close_codes=None):
self.version = version
self.kind = kind
self.frame = None
self.buffer = bytearray()
self._opcodes = (0, 1, 2, 8, 9, 10)
self._encode_mask_length = 0
self._decode_mask_length = 0
if kind == 0:
self._decode_mask_length = 4
elif kind == 1:
self._encode_mask_length = 4
elif kind == 3:
self._decode_mask_length = 4
self._encode_mask_length = 4
self._max_payload = 1 << 63
self._extensions = extensions
self._protocols = protocols
self._close_codes = close_codes or CLOSE_CODES
@property
def max_payload(self):
return self._max_payload
@property
def decode_mask_length(self):
return self._decode_mask_length
@property
def encode_mask_length(self):
return self._encode_mask_length
@property
def extensions(self):
return self._extensions
@property
def protocols(self):
return self._protocols
def ping(self, body=None):
'''return a `ping` :class:`Frame`.'''
return self.encode(body, opcode=0x9)
def pong(self, body=None):
'''return a `pong` :class:`Frame`.'''
return self.encode(body, opcode=0xA)
def close(self, code=None):
'''return a `close` :class:`Frame`.
'''
code = code or 1000
body = pack('!H', code)
body += self._close_codes.get(code, '').encode('utf-8')
return self.encode(body, opcode=0x8)
def continuation(self, body=None, final=True):
'''return a `continuation` :class:`Frame`.'''
return self.encode(body, opcode=0, final=final)
def encode(self, message, final=True, masking_key=None,
opcode=None, rsv1=0, rsv2=0, rsv3=0):
'''Encode a ``message`` for writing into the wire.
To produce several frames for a given large message use
:meth:`multi_encode` method.
'''
fin = 1 if final else 0
opcode, masking_key, data = self._info(message, opcode, masking_key)
return self._encode(data, opcode, masking_key, fin,
rsv1, rsv2, rsv3)
def multi_encode(self, message, masking_key=None, opcode=None,
rsv1=0, rsv2=0, rsv3=0, max_payload=0):
'''Encode a ``message`` into several frames depending on size.
Returns a generator of bytes to be sent over the wire.
'''
max_payload = max(2, max_payload or self._max_payload)
opcode, masking_key, data = self._info(message, opcode, masking_key)
#
while data:
if len(data) >= max_payload:
chunk, data, fin = (data[:max_payload],
data[max_payload:], 0)
else:
chunk, data, fin = data, b'', 1
yield self._encode(chunk, opcode, masking_key, fin,
rsv1, rsv2, rsv3)
def decode(self, data=None):
frame = self.frame
mask_length = self._decode_mask_length
if data:
self.buffer.extend(data)
if frame is None:
if len(self.buffer) < 2:
return
chunk = self._chunk(2)
first_byte, second_byte = unpack("BB", chunk)
fin = (first_byte >> 7) & 1
# rsv1 = (first_byte >> 6) & 1
# rsv2 = (first_byte >> 5) & 1
# rsv3 = (first_byte >> 4) & 1
opcode = first_byte & 0xf
if fin not in (0, 1):
raise ProtocolError('FIN must be 0 or 1')
if bool(mask_length) != bool(second_byte & 0x80):
if mask_length:
raise ProtocolError('unmasked client frame.')
else:
raise ProtocolError('masked server frame.')
payload_length = second_byte & 0x7f
# All control frames MUST have a payload length of 125 bytes
# or less
if opcode > 7:
if payload_length > 125:
raise ProtocolError(
'WEBSOCKET control frame too large')
elif not fin:
raise ProtocolError(
'WEBSOCKET control frame fragmented')
self.frame = frame = Frame(opcode, bool(fin), payload_length)
if frame._masking_key is None:
if frame._payload_length == 0x7e: # 126
if len(self.buffer) < 2 + mask_length: # 2 + 4 for mask
return
chunk = self._chunk(2)
frame._payload_length = unpack("!H", chunk)[0]
elif frame._payload_length == 0x7f: # 127
if len(self.buffer) < 8 + mask_length: # 8 + 4 for mask
return
chunk = self._chunk(8)
frame._payload_length = unpack("!Q", chunk)[0]
elif len(self.buffer) < mask_length:
return
if mask_length:
frame._masking_key = self._chunk(mask_length)
else:
frame._masking_key = b''
if len(self.buffer) >= frame._payload_length:
self.frame = None
chunk = self._chunk(frame._payload_length)
if self._extensions:
for extension in self._extensions:
chunk = extension.receive(frame, self.buffer)
if frame._masking_key:
chunk = websocket_mask(chunk, frame._masking_key)
if frame.opcode == 1:
frame._body = chunk.decode("utf-8", "replace")
else:
frame._body = chunk
return frame
def _encode(self, data, opcode, masking_key, fin, rsv1, rsv2, rsv3):
buffer = bytearray()
length = len(data)
mask_bit = 128 if masking_key else 0
buffer.append(((fin << 7) | (rsv1 << 6) | (rsv2 << 5) |
(rsv3 << 4) | opcode))
if length < 126:
buffer.append(mask_bit | length)
elif length < 65536:
buffer.append(mask_bit | 126)
buffer.extend(pack('!H', length))
elif length < self._max_payload:
buffer.append(mask_bit | 127)
buffer.extend(pack('!Q', length))
else:
raise ProtocolError('WEBSOCKET frame too large')
if masking_key:
buffer.extend(masking_key)
buffer.extend(websocket_mask(data, masking_key))
else:
buffer.extend(data)
return bytes(buffer)
def _info(self, message, opcode, masking_key):
mask_length = self._encode_mask_length
if mask_length:
masking_key = to_bytes(masking_key or os.urandom(4))
assert len(masking_key) == mask_length, "bad masking key"
else:
masking_key = b''
if opcode is None:
opcode = 1 if isinstance(message, str) else 2
data = to_bytes(message or b'', 'utf-8')
if opcode not in self._opcodes:
raise ProtocolError('WEBSOCKET opcode a reserved value')
elif opcode > 7:
if len(data) > 125:
raise ProtocolError('WEBSOCKET control frame too large')
if opcode == 8:
# TODO CHECK CLOSE FRAME STATUS CODE
pass
return opcode, masking_key, data
def _chunk(self, length):
chunk = bytes(self.buffer[:length])
self.buffer = self.buffer[length:]
return chunk
def parse_close(data):
'''Parse the body of a close :class:`Frame`.
Returns a tuple (``code``, ``reason``) if successful otherwise
raise :class:`.ProtocolError`.
'''
length = len(data)
if length == 0:
return 1005, ''
elif length == 1:
raise ProtocolError("Close frame too short")
else:
code, = unpack('!H', data[:2])
if not (code in CLOSE_CODES or 3000 <= code < 5000):
raise ProtocolError("Invalid status code for websocket")
reason = data[2:].decode('utf-8')
return code, reason
if CFrameParser is None: # pragma nocover
CFrameParser = FrameParser
|
|
#----------------------------------------------------------------------
# Copyright (c) 2010-2013 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
'''
Credential creation and verification utilities.
'''
import os
import logging
import xmlrpclib
import sys
import datetime
import dateutil
import ext.sfa.trust.credential as cred
import ext.sfa.trust.gid as gid
import ext.sfa.trust.rights as rights
from ext.sfa.util.xrn import hrn_authfor_hrn
def naiveUTC(dt):
"""Converts dt to a naive datetime in UTC.
if 'dt' has a timezone then
convert to UTC
strip off timezone (make it "naive" in Python parlance)
"""
if dt.tzinfo:
tz_utc = dateutil.tz.tzutc()
dt = dt.astimezone(tz_utc)
dt = dt.replace(tzinfo=None)
return dt
class CredentialVerifier(object):
"""Utilities to verify signed credentials from a given set of
root certificates. Will compare target and source URNs, and privileges.
See verify and verify_from_strings methods in particular."""
CATEDCERTSFNAME = 'CATedCACerts.pem'
# root_cert_fileordir is a trusted root cert file or directory of
# trusted roots for verifying credentials
def __init__(self, root_cert_fileordir, crl_files_path):
self.logger = logging.getLogger('cred-verifier')
if root_cert_fileordir is None:
raise Exception("Missing Root certs argument")
elif os.path.isdir(root_cert_fileordir):
files = os.listdir(root_cert_fileordir)
self.root_cert_files = []
for file in files:
# FIXME: exclude files that aren't cert files?
if file == CredentialVerifier.CATEDCERTSFNAME:
continue
self.root_cert_files.append(os.path.expanduser(os.path.join(root_cert_fileordir, file)))
self.logger.info('Will accept credentials signed by any of %d root certs found in %s: %r' % (len(self.root_cert_files), root_cert_fileordir, self.root_cert_files))
elif os.path.isfile(root_cert_fileordir):
self.logger.info('Will accept credentials signed by the single root cert %s' % root_cert_fileordir)
self.root_cert_files = [root_cert_fileordir]
else:
raise Exception("Couldn't find Root certs in %s" % root_cert_fileordir)
self.crl_files_path = crl_files_path
@classmethod
def getCAsFileFromDir(cls, caCerts):
'''Take a directory of CA certificates and concatenate them into a single
file suitable for use by the Python SSL library to validate client
credentials. Existing file is replaced.'''
if caCerts is None:
raise Exception ('Missing caCerts argument')
if os.path.isfile(os.path.expanduser(caCerts)):
return caCerts
if not os.path.isdir(os.path.expanduser(caCerts)):
raise Exception ('caCerts arg Not a file or a dir: %s' % caCerts)
logger = logging.getLogger('cred-verifier')
# Now we have a dir of caCerts files
# For each file in the dir (isfile), concatenate them into a new file
comboFullPath = os.path.join(caCerts, CredentialVerifier.CATEDCERTSFNAME)
caFiles = os.listdir(caCerts)
#logger.debug('Got %d potential caCert files in the dir', len(caFiles))
outfile = open(comboFullPath, "w")
okFileCount = 0
for filename in caFiles:
filepath = os.path.join(caCerts, filename)
# Confirm it's a CA file?
# if not file.endswith('.pem'):
# continue
if not os.path.isfile(os.path.expanduser(filepath)):
logger.debug('Skipping non file %s', filepath)
continue
if filename == CredentialVerifier.CATEDCERTSFNAME:
# logger.debug('Skipping previous cated certs file')
continue
okFileCount += 1
logger.info("Adding trusted cert file %s", filename)
certfile = open(filepath)
for line in certfile:
outfile.write(line)
certfile.close()
outfile.close()
if okFileCount == 0:
sys.exit('Found NO trusted certs in %s!' % caCerts)
else:
logger.info('Combined dir of %d trusted certs %s into file %s for Python SSL support', okFileCount, caCerts, comboFullPath)
return comboFullPath
def verify_from_strings(self, gid_string, cred_strings, target_urn,
privileges):
'''Create Credential and GID objects from the given strings,
and then verify the GID has the right privileges according
to the given credentials on the given target.'''
def make_cred(cred_string):
return cred.Credential(string=cred_string)
return self.verify(gid.GID(string=gid_string),
map(make_cred, cred_strings),
target_urn,
privileges)
def verify_source(self, source_gid, credential):
'''Ensure the credential is giving privileges to the caller/client.
Return True iff the given source (client) GID's URN
is == the given credential's Caller (Owner) URN'''
source_urn = source_gid.get_urn()
cred_source_urn = credential.get_gid_caller().get_urn()
#self.logger.debug('Verifying source %r against credential source %r (cred target %s)',
# source_urn, cred_source_urn, credential.get_gid_object().get_urn())
result = (cred_source_urn == source_urn)
if result:
# self.logger.debug('Source URNs match')
pass
else:
self.logger.debug('Source URNs do not match. Source URN %r != credential source URN %r', source_urn, cred_source_urn)
return result
def verify_target(self, target_urn, credential):
'''Ensure the credential is giving privileges on the right subject/target.
Return True if no target is specified, or the target URN
matches the credential's Object's (target's) URN, else return False.
No target is required, for example, to ListResources.'''
if not target_urn:
# self.logger.debug('No target specified, considering it a match.')
return True
else:
cred_target_urn = credential.get_gid_object().get_urn()
# self.logger.debug('Verifying target %r against credential target %r',
# target_urn, cred_target_urn)
result = target_urn == cred_target_urn
if result:
# self.logger.debug('Target URNs match.')
pass
else:
self.logger.debug('Target URNs do NOT match. Target URN %r != Credential URN %r', target_urn, cred_target_urn)
return result
def verify_privileges(self, privileges, credential):
''' Return True iff the given credential gives the privilege
to perform ALL of the privileges (actions) in the given list.
In particular, the given list of 'privileges' is really a list
of names of operations. The privileges in credentials are
each turned in to Rights objects (see sfa/trust/rights.py).
And the SFA rights table is used to map from names of privileges
as specified in credentials, to names of operations.'''
result = True
privs = credential.get_privileges()
for priv in privileges:
if not privs.can_perform(priv):
self.logger.debug('Privilege %s not found on credential %s of %s', priv, credential.get_gid_object().get_urn(), credential.get_gid_caller().get_urn())
result = False
return result
def verify(self, gid, credentials, target_urn, privileges):
'''Verify that the given Source GID supplied at least one credential
in the given list of credentials that has all the privileges required
in the privileges list on the given target.
IE if any of the supplied credentials has a caller that matches gid
and a target that matches target_urn, and has all the privileges in
the given list, then return the list of credentials that were ok.
Throw an Exception if we fail to verify any credential.'''
# Note that here we treat a list of credentials as being options
# Alternatively could accumulate privileges for example
# The semantics of the list of credentials is under specified.
self.logger.debug('Verifying privileges')
result = list()
failure = ""
tried_creds = ""
for cred in credentials:
if tried_creds != "":
tried_creds = "%s, %s" % (tried_creds, cred.get_gid_caller().get_urn())
else:
tried_creds = cred.get_gid_caller().get_urn()
if not self.verify_source(gid, cred):
failure = "Cred %s fails: Credential doesn't grant rights to you (%s), but to %s (over object %s)" % (cred.get_gid_caller().get_urn(), gid.get_urn(), cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn())
continue
if not self.verify_target(target_urn, cred):
failure = "Cred granting rights to %s on %s fails: It grants permissions over a different target, not %s (URNs dont match)" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn(), target_urn)
continue
if not self.verify_privileges(privileges, cred):
failure = "Cred for %s over %s doesn't provide sufficient privileges" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn())
continue
try:
if not cred.verify(self.root_cert_files, crl_path=self.crl_files_path):
failure = "Couldn't validate credential for caller %s with target %s with any of %d known root certs" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn(), len(self.root_cert_files))
continue
except Exception, exc:
failure = "Couldn't validate credential for caller %s with target %s with any of %d known root certs: %s: %s" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn(), len(self.root_cert_files), exc.__class__.__name__, exc)
self.logger.info(failure)
continue
# If got here it verified
result.append(cred)
if result and result != list():
# At least one credential verified ok and was added to the list
# return that list
return result
else:
# We did not find any credential with sufficient privileges
# Raise an exception.
fault_code = 'Insufficient privileges'
fault_string = 'No credential was found with appropriate privileges. Tried %s. Last failure: %s' % (tried_creds, failure)
self.logger.error(fault_string)
# GCF ticket #120 - do not raise an xmlrpclib Fault here -
# just an Exception. But the caller may want to turn this
# into one
# raise xmlrpclib.Fault(fault_code, fault_string)
raise Exception(fault_string)
def create_credential(caller_gid, object_gid, expiration, typename, issuer_keyfile, issuer_certfile, trusted_roots, delegatable=False):
'''Create and Return a Credential object issued by given key/cert for the given caller
and object GID objects, given life in seconds, and given type.
Privileges are determined by type per sfa/trust/rights.py
Privileges are delegatable if requested.'''
# FIXME: Validate args: my gids, >0 life,
# type of cred one I can issue
# and readable key and cert files
if caller_gid is None:
raise ValueError("Missing Caller GID")
if object_gid is None:
raise ValueError("Missing Object GID")
if expiration is None:
raise ValueError("Missing expiration")
naive_expiration = naiveUTC(expiration)
duration = naive_expiration - datetime.datetime.utcnow()
life_secs = duration.seconds + duration.days * 24 * 3600
if life_secs < 1:
raise ValueError("Credential expiration is in the past")
if trusted_roots is None:
raise ValueError("Missing list of trusted roots")
if typename is None or typename.strip() == '':
raise ValueError("Missing credential type")
typename = typename.strip().lower()
if typename not in ("user", "sa", "ma", "authority", "slice", "component"):
raise ValueError("Unknown credential type %s" % typename)
if not os.path.isfile(issuer_keyfile):
raise ValueError("Cant read issuer key file %s" % issuer_keyfile)
if not os.path.isfile(issuer_certfile):
raise ValueError("Cant read issuer cert file %s" % issuer_certfile)
issuer_gid = gid.GID(filename=issuer_certfile)
if not (object_gid.get_urn() == issuer_gid.get_urn() or
(issuer_gid.get_type().find('authority') == 0 and
hrn_authfor_hrn(issuer_gid.get_hrn(), object_gid.get_hrn()))):
raise ValueError("Issuer not authorized to issue credential: Issuer=%s Target=%s" % (issuer_gid.get_urn(), object_gid.get_urn()))
ucred = cred.Credential()
# FIXME: Validate the caller_gid and object_gid
# are my user and slice
# Do get_issuer and compare to the issuer cert?
# Or do gid.is_signed_by_cert(issuer_certfile)?
ucred.set_gid_caller(caller_gid)
ucred.set_gid_object(object_gid)
ucred.set_expiration(expiration)
# Use sfa/trust/rights.py to figure out what privileges
# the credential should have.
# user means refresh, resolve, info
# per the privilege_table that lets users do
# remove, update, resolve, list, getcredential,
# listslices, listnodes, getpolicy
# Note that it does not allow manipulating slivers
# And every right is delegatable if any are delegatable (default False)
privileges = rights.determine_rights(typename, None)
privileges.delegate_all_privileges(delegatable)
ucred.set_privileges(privileges)
ucred.encode()
ucred.set_issuer_keys(issuer_keyfile, issuer_certfile)
ucred.sign()
try:
ucred.verify(trusted_roots)
except Exception, exc:
raise Exception("Create Credential failed to verify new credential from trusted roots: %s" % exc)
return ucred
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class fis(base_resource) :
""" Configuration for "FIS" resource. """
def __init__(self) :
self._name = ""
self._ownernode = 0
self._ifaces = ""
self.___count = 0
@property
def name(self) :
"""Name for the FIS to be created. Leading character must be a number or letter. Other characters allowed, after the first character, are @ _ - . (period) : (colon) # and space ( ). Note: In a cluster setup, the FIS name on each node must be unique.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name for the FIS to be created. Leading character must be a number or letter. Other characters allowed, after the first character, are @ _ - . (period) : (colon) # and space ( ). Note: In a cluster setup, the FIS name on each node must be unique.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def ownernode(self) :
"""ID of the cluster node for which you are creating the FIS. Can be configured only through the cluster IP address.<br/>Maximum length = 31.
"""
try :
return self._ownernode
except Exception as e:
raise e
@ownernode.setter
def ownernode(self, ownernode) :
"""ID of the cluster node for which you are creating the FIS. Can be configured only through the cluster IP address.<br/>Maximum length = 31
"""
try :
self._ownernode = ownernode
except Exception as e:
raise e
@property
def ifaces(self) :
"""Interfaces to be bound to the FIS, in slot/port notation (for example, 1/3).
"""
try :
return self._ifaces
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(fis_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.fis
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add fis.
"""
try :
if type(resource) is not list :
addresource = fis()
addresource.name = resource.name
addresource.ownernode = resource.ownernode
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ fis() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].ownernode = resource[i].ownernode
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete fis.
"""
try :
if type(resource) is not list :
deleteresource = fis()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ fis() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ fis() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the fis resources that are configured on netscaler.
"""
try :
if not name :
obj = fis()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = fis()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [fis() for _ in range(len(name))]
obj = [fis() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = fis()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of fis resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = fis()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the fis resources configured on NetScaler.
"""
try :
obj = fis()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of fis resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = fis()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class fis_response(base_response) :
def __init__(self, length=1) :
self.fis = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.fis = [fis() for _ in range(length)]
|
|
#!/usr/bin/env python
"""
A command line tool for running reports on XML files.
trimxslt allows you to rapidly extract details from large XML files
on the command line.
Run "trimxslt --help" for details of the command line parameters, but
here are some pointers to get you started.
Let's say you have a simple database dump format with the following
form:
<db>
<record id="1">
<name>Alex</name>
<address>123 Maple St.</address>
</record>
<record id="2">
<name>Bob</name>
<address>456 Birch Rd.</address>
</record>
<record id="3">
<name>Chris</name>
<address>789 Pine St.</address>
</record>
</db>
You can:
Get all the full contents of name elements
$ inspectxml file.xml name
<name>Alex</name>
<name>Bob</name>
<name>Chris</name>
Get the full contents of the record with ID 2
$ inspectxml file.xml record "@id='2'"
<record id="2">
<name>Bob</name>
<address>456 Birch Rd.</address>
</record>
Get the full contents of the first two name elements
$ inspectxml -c 2 file.xml name
<name>Alex</name>
<name>Bob</name>
Get the name of the record with ID 2
$ inspectxml -d "name" file.xml record "@id='2'"
<name>Bob</name>
You could display the id and each correspoding name as follows:
$ inspectxml file.xml "@id|name"
1
<name>Alex</name>
2
<name>Bob</name>
3
<name>Chris</name>
Or a more precise approach might be (demonstrating the use of XPath functions):
$ inspectxml -d "concat(@id, ': ', name)" file.xml record
1: Alex
2: Bob
3: Chris
inspectxml uses namespaces declared on the document element, so you can
conveniently make queries without needing to separately declare prefixes.
So to get the URLs of all a links in an XHTML document you could do:
inspectxml -d "@href" file.xhtml "html:a"
As long as there is a namespace declaration
xmlns:ht="http://www.w3.org/1999/xhtml" in the document. If not
(many XHTML documents use the default namespace, which courtesy XPath 1.0
restrictions prevents inspectxml from doing any guesswork for you) you have
to declare the prefix.
inspectxml --ns=ht="http://www.w3.org/1999/xhtml" -d "@href" http://www.w3.org/2000/07/8378/xhtml/media-types/test4.xhtml "ht:a"
Notice how this example loads the source XML (XHTML) from a Web URL rather than a local file. Of course, a shortcut for this is simply:
inspectxml http://www.w3.org/2000/07/8378/xhtml/media-types/test4.xhtml "@href"
"""
#The following won't work because EXSLT is only supported in XsltContext and we use Ft.Xml.XPath.Context
#We can probably revisit when we make bindery nodes subclasses of Domlette
#inspectxml --ns=str="http://exslt.org/strings" -d "str:replace(., 'http://', '')" http://www.w3.org/2000/07/8378/xhtml/media-types/test4.xhtml "@href"
import os
import re
import sys
import codecs
import optparse
#import cStringIO
import amara
#from amara import tree
#from xml.dom import EMPTY_NAMESPACE as NULL_NAMESPACE
#from xml.dom import EMPTY_PREFIX as NULL_PREFIX
#FIXME: Use 4Suite L10N
def _(t): return t
def run(source, xpattern, xpath, limit, sentinel, display, prefixes):
prefixes = prefixes or {}
try:
prefixes = dict([ p.split('=') for p in prefixes ])
except ValueError:
raise ValueError("Invalid prefix declaration")
#if hasattr(source, 'read'):
# if hasattr(source, 'rewind'):
# nss = saxtools.sniff_namespace(source)
# source.rewind()
# else:
# source = source.read()
# nss = saxtools.sniff_namespace(source)
#else:
# nss = saxtools.sniff_namespace(source)
#nss.update(prefixes)
nss = prefixes
doc = amara.parse(source)
#nodes = amara.pushbind(source, xpattern, prefixes=nss)
count = 0
search_space = doc.xml_select(u'//' + xpattern.lstrip(u'//'))
#FIXME: Until we have something pushbind-like trim all nodes not in the search space
for node in search_space:
if not xpath or node.xml_select(xpath):
count += 1
if display:
#Print specified subset
result = node.xml_select(display)
if hasattr(result, 'next'):
#print '\n'.join([ n.xml_type == tree.attribute.xml_type and n.xml_value or amara.xml_print(n) for n in result ])
print '\n'.join( (n.xml_type == tree.attribute.xml_type and n.xml_value or amara.xml_print(n) for n in result) )
else:
print result
else:
#Print the whole thing
try:
amara.xml_print(node)
except AttributeError:
print unicode(node).encode('utf-8')
if limit != -1 and count >= limit:
break
if sentinel and node.xml_select(sentinel):
break
print
return
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def command_line_prep():
from optparse import OptionParser
usage = "%prog [options] source xpattern [xpath]"
parser = OptionParser(usage=usage)
parser.add_option("-c", "--limit",
action="store", type="int", dest="limit", default=-1,
help="limit the number of xpattern matches retrieved; files will not be parsed beyond this number, so it serves as optimization", metavar="NUMBER")
parser.add_option("-d", "--display",
action="store", type="string", dest="display",
help="xpath expression indicating what nodes to be displayed from matched and screened patterns", metavar="XPATH")
parser.add_option("-n", "--ns",
action="append", type="string", dest="ns",
help="prefix to namespace mapping", metavar="<PREFIX=URI>")
parser.add_option("--sentinel",
action="store", type="string", dest="sentinel",
help="xpath expression to be checked for each pattern match. If true it causes the reporting to stop, with no further parsing", metavar="XPATH")
#parser.add_option("-q", "--quiet",
# action="store_false", dest="verbose", default=1,
# help="don't print status messages to stdout")
return parser
def main(argv=None):
#Ideas borrowed from
# http://www.artima.com/forums/flat.jsp?forum=106&thread=4829
#But with better integration of entry points
if argv is None:
argv = sys.argv
# By default, optparse usage errors are terminated by SystemExit
try:
optparser = command_line_prep()
options, args = optparser.parse_args(argv[1:])
# Process mandatory arguments with IndexError try...except blocks
try:
source = args[0]
except IndexError:
optparser.error("Missing filename/URL to parse")
try:
xpattern = args[1]
except IndexError:
optparser.error("Missing main xpattern")
except SystemExit, status:
return status
# Perform additional setup work here before dispatching to run()
# Detectable errors encountered here should be handled and a status
# code of 1 should be returned. Note, this would be the default code
# for a SystemExit exception with a string message.
try:
xpath = args[2].decode('utf-8')
except IndexError:
xpath = None
xpattern = xpattern.decode('utf-8')
sentinel = options.sentinel and options.sentinel.decode('utf-8')
display = options.display and options.display.decode('utf-8')
prefixes = options.ns
limit = options.limit
if source == '-':
source = sys.stdin
run(source, xpattern, xpath, limit, sentinel, display, prefixes)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
|
#!/usr/bin/python
import json
import os
import random
import unittest
import numpy as np
from pymatgen.analysis.structure_matcher import StructureMatcher
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.surface import (
ReconstructionGenerator,
Slab,
SlabGenerator,
generate_all_slabs,
get_d,
get_slab_regions,
get_symmetrically_distinct_miller_indices,
get_symmetrically_equivalent_miller_indices,
miller_index_from_sites,
)
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.groups import SpaceGroup
from pymatgen.util.testing import PymatgenTest
def get_path(path_str):
cwd = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(cwd, "..", "..", "..", "test_files", "surface_tests", path_str)
return path
class SlabTest(PymatgenTest):
def setUp(self):
zno1 = Structure.from_file(get_path("ZnO-wz.cif"), primitive=False)
zno55 = SlabGenerator(zno1, [1, 0, 0], 5, 5, lll_reduce=False, center_slab=False).get_slab()
Ti = Structure(
Lattice.hexagonal(4.6, 2.82),
["Ti", "Ti", "Ti"],
[
[0.000000, 0.000000, 0.000000],
[0.333333, 0.666667, 0.500000],
[0.666667, 0.333333, 0.500000],
],
)
Ag_fcc = Structure(
Lattice.cubic(4.06),
["Ag", "Ag", "Ag", "Ag"],
[
[0.000000, 0.000000, 0.000000],
[0.000000, 0.500000, 0.500000],
[0.500000, 0.000000, 0.500000],
[0.500000, 0.500000, 0.000000],
],
)
m = [[3.913449, 0, 0], [0, 3.913449, 0], [0, 0, 5.842644]]
latt = Lattice(m)
fcoords = [[0.5, 0, 0.222518], [0, 0.5, 0.777482], [0, 0, 0], [0, 0, 0.5], [0.5, 0.5, 0]]
non_laue = Structure(latt, ["Nb", "Nb", "N", "N", "N"], fcoords)
self.ti = Ti
self.agfcc = Ag_fcc
self.zno1 = zno1
self.zno55 = zno55
self.nonlaue = non_laue
self.h = Structure(Lattice.cubic(3), ["H"], [[0, 0, 0]])
self.libcc = Structure(Lattice.cubic(3.51004), ["Li", "Li"], [[0, 0, 0], [0.5, 0.5, 0.5]])
def test_init(self):
zno_slab = Slab(
self.zno55.lattice,
self.zno55.species,
self.zno55.frac_coords,
self.zno55.miller_index,
self.zno55.oriented_unit_cell,
0,
self.zno55.scale_factor,
)
m = self.zno55.lattice.matrix
area = np.linalg.norm(np.cross(m[0], m[1]))
self.assertAlmostEqual(zno_slab.surface_area, area)
self.assertEqual(zno_slab.lattice.parameters, self.zno55.lattice.parameters)
self.assertEqual(zno_slab.oriented_unit_cell.composition, self.zno1.composition)
self.assertEqual(len(zno_slab), 8)
# check reorient_lattice. get a slab not oriented and check that orientation
# works even with cartesian coordinates.
zno_not_or = SlabGenerator(
self.zno1,
[1, 0, 0],
5,
5,
lll_reduce=False,
center_slab=False,
reorient_lattice=False,
).get_slab()
zno_slab_cart = Slab(
zno_not_or.lattice,
zno_not_or.species,
zno_not_or.cart_coords,
zno_not_or.miller_index,
zno_not_or.oriented_unit_cell,
0,
zno_not_or.scale_factor,
coords_are_cartesian=True,
reorient_lattice=True,
)
self.assertArrayAlmostEqual(zno_slab.frac_coords, zno_slab_cart.frac_coords)
c = zno_slab_cart.lattice.matrix[2]
self.assertArrayAlmostEqual([0, 0, np.linalg.norm(c)], c)
def test_add_adsorbate_atom(self):
zno_slab = Slab(
self.zno55.lattice,
self.zno55.species,
self.zno55.frac_coords,
self.zno55.miller_index,
self.zno55.oriented_unit_cell,
0,
self.zno55.scale_factor,
)
zno_slab.add_adsorbate_atom([1], "H", 1)
self.assertEqual(len(zno_slab), 9)
self.assertEqual(str(zno_slab[8].specie), "H")
self.assertAlmostEqual(zno_slab.get_distance(1, 8), 1.0)
self.assertTrue(zno_slab[8].c > zno_slab[0].c)
m = self.zno55.lattice.matrix
area = np.linalg.norm(np.cross(m[0], m[1]))
self.assertAlmostEqual(zno_slab.surface_area, area)
self.assertEqual(zno_slab.lattice.parameters, self.zno55.lattice.parameters)
def test_get_sorted_structure(self):
species = [str(site.specie) for site in self.zno55.get_sorted_structure()]
self.assertEqual(species, ["Zn2+"] * 4 + ["O2-"] * 4)
def test_methods(self):
# Test various structure methods
self.zno55.get_primitive_structure()
def test_as_from_dict(self):
d = self.zno55.as_dict()
obj = Slab.from_dict(d)
self.assertEqual(obj.miller_index, (1, 0, 0))
def test_dipole_and_is_polar(self):
self.assertArrayAlmostEqual(self.zno55.dipole, [0, 0, 0])
self.assertFalse(self.zno55.is_polar())
cscl = self.get_structure("CsCl")
cscl.add_oxidation_state_by_element({"Cs": 1, "Cl": -1})
slab = SlabGenerator(
cscl,
[1, 0, 0],
5,
5,
reorient_lattice=False,
lll_reduce=False,
center_slab=False,
).get_slab()
self.assertArrayAlmostEqual(slab.dipole, [-4.209, 0, 0])
self.assertTrue(slab.is_polar())
def test_surface_sites_and_symmetry(self):
# test if surfaces are equivalent by using
# Laue symmetry and surface site equivalence
for bool in [True, False]:
# We will also set the slab to be centered and
# off centered in order to test the center of mass
slabgen = SlabGenerator(self.agfcc, (3, 1, 0), 10, 10, center_slab=bool)
slab = slabgen.get_slabs()[0]
surf_sites_dict = slab.get_surface_sites()
self.assertEqual(len(surf_sites_dict["top"]), len(surf_sites_dict["bottom"]))
total_surf_sites = sum([len(surf_sites_dict[key]) for key in surf_sites_dict.keys()])
self.assertTrue(slab.is_symmetric())
self.assertEqual(total_surf_sites / 2, 4)
# Test if the ratio of surface sites per area is
# constant, ie are the surface energies the same
r1 = total_surf_sites / (2 * slab.surface_area)
slabgen = SlabGenerator(self.agfcc, (3, 1, 0), 10, 10, primitive=False)
slab = slabgen.get_slabs()[0]
surf_sites_dict = slab.get_surface_sites()
total_surf_sites = sum([len(surf_sites_dict[key]) for key in surf_sites_dict.keys()])
r2 = total_surf_sites / (2 * slab.surface_area)
self.assertArrayAlmostEqual(r1, r2)
def test_symmetrization(self):
# Restricted to primitive_elemental materials due to the risk of
# broken stoichiometry. For compound materials, use is_polar()
# Get all slabs for P6/mmm Ti and Fm-3m Ag up to index of 2
all_Ti_slabs = generate_all_slabs(
self.ti,
2,
10,
10,
bonds=None,
tol=1e-3,
max_broken_bonds=0,
lll_reduce=False,
center_slab=False,
primitive=True,
max_normal_search=2,
symmetrize=True,
)
all_Ag_fcc_slabs = generate_all_slabs(
self.agfcc,
2,
10,
10,
bonds=None,
tol=1e-3,
max_broken_bonds=0,
lll_reduce=False,
center_slab=False,
primitive=True,
max_normal_search=2,
symmetrize=True,
)
all_slabs = [all_Ti_slabs, all_Ag_fcc_slabs]
for i, slabs in enumerate(all_slabs):
assymetric_count = 0
symmetric_count = 0
for i, slab in enumerate(slabs):
sg = SpacegroupAnalyzer(slab)
# Check if a slab is symmetric
if not sg.is_laue():
assymetric_count += 1
else:
symmetric_count += 1
# Check if slabs are all symmetric
self.assertEqual(assymetric_count, 0)
self.assertEqual(symmetric_count, len(slabs))
# Check if we can generate symmetric slabs from bulk with no inversion
all_non_laue_slabs = generate_all_slabs(self.nonlaue, 1, 15, 15, symmetrize=True)
self.assertTrue(len(all_non_laue_slabs) > 0)
def test_get_symmetric_sites(self):
# Check to see if we get an equivalent site on one
# surface if we add a new site to the other surface
all_Ti_slabs = generate_all_slabs(
self.ti,
2,
10,
10,
bonds=None,
tol=1e-3,
max_broken_bonds=0,
lll_reduce=False,
center_slab=False,
primitive=True,
max_normal_search=2,
symmetrize=True,
)
for slab in all_Ti_slabs:
sorted_sites = sorted(slab, key=lambda site: site.frac_coords[2])
site = sorted_sites[-1]
point = np.array(site.frac_coords)
point[2] = point[2] + 0.1
point2 = slab.get_symmetric_site(point)
slab.append("O", point)
slab.append("O", point2)
# Check if slab is all symmetric
sg = SpacegroupAnalyzer(slab)
self.assertTrue(sg.is_laue())
def test_oriented_unit_cell(self):
# Check to see if we get the fully reduced oriented unit
# cell. This will also ensure that the constrain_latt
# parameter for get_primitive_structure is working properly
def surface_area(s):
m = s.lattice.matrix
return np.linalg.norm(np.cross(m[0], m[1]))
all_slabs = generate_all_slabs(self.agfcc, 2, 10, 10, max_normal_search=3)
for slab in all_slabs:
ouc = slab.oriented_unit_cell
self.assertAlmostEqual(surface_area(slab), surface_area(ouc))
self.assertGreaterEqual(len(slab), len(ouc))
def test_get_slab_regions(self):
# If a slab layer in the slab cell is not completely inside
# the cell (noncontiguous), check that get_slab_regions will
# be able to identify where the slab layers are located
s = self.get_structure("LiFePO4")
slabgen = SlabGenerator(s, (0, 0, 1), 15, 15)
slab = slabgen.get_slabs()[0]
slab.translate_sites([i for i, site in enumerate(slab)], [0, 0, -0.25])
bottom_c, top_c = [], []
for site in slab:
if site.frac_coords[2] < 0.5:
bottom_c.append(site.frac_coords[2])
else:
top_c.append(site.frac_coords[2])
ranges = get_slab_regions(slab)
self.assertEqual(tuple(ranges[0]), (0, max(bottom_c)))
self.assertEqual(tuple(ranges[1]), (min(top_c), 1))
def test_as_dict(self):
slabs = generate_all_slabs(
self.ti,
1,
10,
10,
bonds=None,
tol=1e-3,
max_broken_bonds=0,
lll_reduce=False,
center_slab=False,
primitive=True,
)
slab = slabs[0]
s = json.dumps(slab.as_dict())
d = json.loads(s)
self.assertEqual(slab, Slab.from_dict(d))
# test initialising with a list scale_factor
slab = Slab(
self.zno55.lattice,
self.zno55.species,
self.zno55.frac_coords,
self.zno55.miller_index,
self.zno55.oriented_unit_cell,
0,
self.zno55.scale_factor.tolist(),
)
s = json.dumps(slab.as_dict())
d = json.loads(s)
self.assertEqual(slab, Slab.from_dict(d))
class SlabGeneratorTest(PymatgenTest):
def setUp(self):
lattice = Lattice.cubic(3.010)
frac_coords = [
[0.00000, 0.00000, 0.00000],
[0.00000, 0.50000, 0.50000],
[0.50000, 0.00000, 0.50000],
[0.50000, 0.50000, 0.00000],
[0.50000, 0.00000, 0.00000],
[0.50000, 0.50000, 0.50000],
[0.00000, 0.00000, 0.50000],
[0.00000, 0.50000, 0.00000],
]
species = ["Mg", "Mg", "Mg", "Mg", "O", "O", "O", "O"]
self.MgO = Structure(lattice, species, frac_coords)
self.MgO.add_oxidation_state_by_element({"Mg": 2, "O": -6})
lattice_Dy = Lattice.hexagonal(3.58, 25.61)
frac_coords_Dy = [
[0.00000, 0.00000, 0.00000],
[0.66667, 0.33333, 0.11133],
[0.00000, 0.00000, 0.222],
[0.66667, 0.33333, 0.33333],
[0.33333, 0.66666, 0.44467],
[0.66667, 0.33333, 0.55533],
[0.33333, 0.66667, 0.66667],
[0.00000, 0.00000, 0.778],
[0.33333, 0.66667, 0.88867],
]
species_Dy = ["Dy", "Dy", "Dy", "Dy", "Dy", "Dy", "Dy", "Dy", "Dy"]
self.Dy = Structure(lattice_Dy, species_Dy, frac_coords_Dy)
def test_get_slab(self):
s = self.get_structure("LiFePO4")
gen = SlabGenerator(s, [0, 0, 1], 10, 10)
s = gen.get_slab(0.25)
self.assertAlmostEqual(s.lattice.abc[2], 20.820740000000001)
fcc = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Fe"], [[0, 0, 0]])
gen = SlabGenerator(fcc, [1, 1, 1], 10, 10, max_normal_search=1)
slab = gen.get_slab()
self.assertEqual(len(slab), 6)
gen = SlabGenerator(fcc, [1, 1, 1], 10, 10, primitive=False, max_normal_search=1)
slab_non_prim = gen.get_slab()
self.assertEqual(len(slab_non_prim), len(slab) * 4)
# Some randomized testing of cell vectors
for i in range(1, 231):
i = random.randint(1, 230)
sg = SpaceGroup.from_int_number(i)
if sg.crystal_system == "hexagonal" or (
sg.crystal_system == "trigonal"
and (
sg.symbol.endswith("H")
or sg.int_number
in [
143,
144,
145,
147,
149,
150,
151,
152,
153,
154,
156,
157,
158,
159,
162,
163,
164,
165,
]
)
):
latt = Lattice.hexagonal(5, 10)
else:
# Cubic lattice is compatible with all other space groups.
latt = Lattice.cubic(5)
s = Structure.from_spacegroup(i, latt, ["H"], [[0, 0, 0]])
miller = (0, 0, 0)
while miller == (0, 0, 0):
miller = (
random.randint(0, 6),
random.randint(0, 6),
random.randint(0, 6),
)
gen = SlabGenerator(s, miller, 10, 10)
a, b, c = gen.oriented_unit_cell.lattice.matrix
self.assertAlmostEqual(np.dot(a, gen._normal), 0)
self.assertAlmostEqual(np.dot(b, gen._normal), 0)
def test_normal_search(self):
fcc = Structure.from_spacegroup("Fm-3m", Lattice.cubic(3), ["Fe"], [[0, 0, 0]])
for miller in [(1, 0, 0), (1, 1, 0), (1, 1, 1), (2, 1, 1)]:
gen = SlabGenerator(fcc, miller, 10, 10)
gen_normal = SlabGenerator(fcc, miller, 10, 10, max_normal_search=max(miller))
slab = gen_normal.get_slab()
self.assertAlmostEqual(slab.lattice.alpha, 90)
self.assertAlmostEqual(slab.lattice.beta, 90)
self.assertGreaterEqual(len(gen_normal.oriented_unit_cell), len(gen.oriented_unit_cell))
graphite = self.get_structure("Graphite")
for miller in [(1, 0, 0), (1, 1, 0), (0, 0, 1), (2, 1, 1)]:
gen = SlabGenerator(graphite, miller, 10, 10)
gen_normal = SlabGenerator(graphite, miller, 10, 10, max_normal_search=max(miller))
self.assertGreaterEqual(len(gen_normal.oriented_unit_cell), len(gen.oriented_unit_cell))
sc = Structure(
Lattice.hexagonal(3.32, 5.15),
["Sc", "Sc"],
[[1 / 3, 2 / 3, 0.25], [2 / 3, 1 / 3, 0.75]],
)
gen = SlabGenerator(sc, (1, 1, 1), 10, 10, max_normal_search=1)
self.assertAlmostEqual(gen.oriented_unit_cell.lattice.angles[1], 90)
def test_get_slabs(self):
gen = SlabGenerator(self.get_structure("CsCl"), [0, 0, 1], 10, 10)
# Test orthogonality of some internal variables.
a, b, c = gen.oriented_unit_cell.lattice.matrix
self.assertAlmostEqual(np.dot(a, gen._normal), 0)
self.assertAlmostEqual(np.dot(b, gen._normal), 0)
self.assertEqual(len(gen.get_slabs()), 1)
s = self.get_structure("LiFePO4")
gen = SlabGenerator(s, [0, 0, 1], 10, 10)
self.assertEqual(len(gen.get_slabs()), 5)
self.assertEqual(len(gen.get_slabs(bonds={("P", "O"): 3})), 2)
# There are no slabs in LFP that does not break either P-O or Fe-O
# bonds for a miller index of [0, 0, 1].
self.assertEqual(len(gen.get_slabs(bonds={("P", "O"): 3, ("Fe", "O"): 3})), 0)
# If we allow some broken bonds, there are a few slabs.
self.assertEqual(
len(gen.get_slabs(bonds={("P", "O"): 3, ("Fe", "O"): 3}, max_broken_bonds=2)),
2,
)
# At this threshold, only the origin and center Li results in
# clustering. All other sites are non-clustered. So the of
# slabs is of sites in LiFePO4 unit cell - 2 + 1.
self.assertEqual(len(gen.get_slabs(tol=1e-4, ftol=1e-4)), 15)
LiCoO2 = Structure.from_file(get_path("icsd_LiCoO2.cif"), primitive=False)
gen = SlabGenerator(LiCoO2, [0, 0, 1], 10, 10)
lco = gen.get_slabs(bonds={("Co", "O"): 3})
self.assertEqual(len(lco), 1)
a, b, c = gen.oriented_unit_cell.lattice.matrix
self.assertAlmostEqual(np.dot(a, gen._normal), 0)
self.assertAlmostEqual(np.dot(b, gen._normal), 0)
scc = Structure.from_spacegroup("Pm-3m", Lattice.cubic(3), ["Fe"], [[0, 0, 0]])
gen = SlabGenerator(scc, [0, 0, 1], 10, 10)
slabs = gen.get_slabs()
self.assertEqual(len(slabs), 1)
gen = SlabGenerator(scc, [1, 1, 1], 10, 10, max_normal_search=1)
slabs = gen.get_slabs()
self.assertEqual(len(slabs), 1)
# Test whether using units of hkl planes instead of Angstroms for
# min_slab_size and min_vac_size will give us the same number of atoms
natoms = []
for a in [1, 1.4, 2.5, 3.6]:
s = Structure.from_spacegroup("Im-3m", Lattice.cubic(a), ["Fe"], [[0, 0, 0]])
slabgen = SlabGenerator(s, (1, 1, 1), 10, 10, in_unit_planes=True, max_normal_search=2)
natoms.append(len(slabgen.get_slab()))
n = natoms[0]
for i in natoms:
self.assertEqual(n, i)
def test_triclinic_TeI(self):
# Test case for a triclinic structure of TeI. Only these three
# Miller indices are used because it is easier to identify which
# atoms should be in a surface together. The closeness of the sites
# in other Miller indices can cause some ambiguity when choosing a
# higher tolerance.
numb_slabs = {(0, 0, 1): 5, (0, 1, 0): 3, (1, 0, 0): 7}
TeI = Structure.from_file(get_path("icsd_TeI.cif"), primitive=False)
for k, v in numb_slabs.items():
trclnc_TeI = SlabGenerator(TeI, k, 10, 10)
TeI_slabs = trclnc_TeI.get_slabs()
self.assertEqual(v, len(TeI_slabs))
def test_get_orthogonal_c_slab(self):
TeI = Structure.from_file(get_path("icsd_TeI.cif"), primitive=False)
trclnc_TeI = SlabGenerator(TeI, (0, 0, 1), 10, 10)
TeI_slabs = trclnc_TeI.get_slabs()
slab = TeI_slabs[0]
norm_slab = slab.get_orthogonal_c_slab()
self.assertAlmostEqual(norm_slab.lattice.angles[0], 90)
self.assertAlmostEqual(norm_slab.lattice.angles[1], 90)
def test_get_orthogonal_c_slab_site_props(self):
TeI = Structure.from_file(get_path("icsd_TeI.cif"), primitive=False)
trclnc_TeI = SlabGenerator(TeI, (0, 0, 1), 10, 10)
TeI_slabs = trclnc_TeI.get_slabs()
slab = TeI_slabs[0]
# Add site property to slab
sd_list = [[True, True, True] for site in slab.sites]
new_sp = slab.site_properties
new_sp["selective_dynamics"] = sd_list
slab_with_site_props = slab.copy(site_properties=new_sp)
# Get orthogonal slab
norm_slab = slab_with_site_props.get_orthogonal_c_slab()
# Check if site properties is consistent (or kept)
self.assertEqual(slab_with_site_props.site_properties, norm_slab.site_properties)
def test_get_tasker2_slabs(self):
# The uneven distribution of ions on the (111) facets of Halite
# type slabs are typical examples of Tasker 3 structures. We
# will test this algo to generate a Tasker 2 structure instead
slabgen = SlabGenerator(self.MgO, (1, 1, 1), 10, 10, max_normal_search=1)
# We generate the Tasker 3 structure first
slab = slabgen.get_slabs()[0]
self.assertFalse(slab.is_symmetric())
self.assertTrue(slab.is_polar())
# Now to generate the Tasker 2 structure, we must
# ensure there are enough ions on top to move around
slab.make_supercell([2, 1, 1])
slabs = slab.get_tasker2_slabs()
# Check if our Tasker 2 slab is nonpolar and symmetric
for slab in slabs:
self.assertTrue(slab.is_symmetric())
self.assertFalse(slab.is_polar())
def test_nonstoichiometric_symmetrized_slab(self):
# For the (111) halite slab, sometimes a nonstoichiometric
# system is preferred over the stoichiometric Tasker 2.
slabgen = SlabGenerator(self.MgO, (1, 1, 1), 10, 10, max_normal_search=1)
slabs = slabgen.get_slabs(symmetrize=True)
# We should end up with two terminations, one with
# an Mg rich surface and another O rich surface
self.assertEqual(len(slabs), 2)
for slab in slabs:
self.assertTrue(slab.is_symmetric())
# For a low symmetry primitive_elemental system such as
# R-3m, there should be some nonsymmetric slabs
# without using nonstoichiometric_symmetrized_slab
slabs = generate_all_slabs(self.Dy, 1, 30, 30, center_slab=True, symmetrize=True)
for s in slabs:
self.assertTrue(s.is_symmetric())
self.assertGreater(len(s), len(self.Dy))
def test_move_to_other_side(self):
# Tests to see if sites are added to opposite side
s = self.get_structure("LiFePO4")
slabgen = SlabGenerator(s, (0, 0, 1), 10, 10, center_slab=True)
slab = slabgen.get_slab()
surface_sites = slab.get_surface_sites()
# check if top sites are moved to the bottom
top_index = [ss[1] for ss in surface_sites["top"]]
slab = slabgen.move_to_other_side(slab, top_index)
all_bottom = [slab[i].frac_coords[2] < slab.center_of_mass[2] for i in top_index]
self.assertTrue(all(all_bottom))
# check if bottom sites are moved to the top
bottom_index = [ss[1] for ss in surface_sites["bottom"]]
slab = slabgen.move_to_other_side(slab, bottom_index)
all_top = [slab[i].frac_coords[2] > slab.center_of_mass[2] for i in bottom_index]
self.assertTrue(all(all_top))
def test_bonds_broken(self):
# Querying the Materials Project database for Si
s = self.get_structure("Si")
# Conventional unit cell is supplied to ensure miller indices
# correspond to usual crystallographic definitions
conv_bulk = SpacegroupAnalyzer(s).get_conventional_standard_structure()
slabgen = SlabGenerator(conv_bulk, [1, 1, 1], 10, 10, center_slab=True)
# Setting a generous estimate for max_broken_bonds
# so that all terminations are generated. These slabs
# are ordered by ascending number of bonds broken
# which is assigned to Slab.energy
slabs = slabgen.get_slabs(bonds={("Si", "Si"): 2.40}, max_broken_bonds=30)
# Looking at the two slabs generated in VESTA, we
# expect 2 and 6 bonds broken so we check for this.
# Number of broken bonds are floats due to primitive
# flag check and subsequent transformation of slabs.
self.assertTrue(slabs[0].energy, 2.0)
self.assertTrue(slabs[1].energy, 6.0)
class ReconstructionGeneratorTests(PymatgenTest):
def setUp(self):
l = Lattice.cubic(3.51)
species = ["Ni"]
coords = [[0, 0, 0]]
self.Ni = Structure.from_spacegroup("Fm-3m", l, species, coords)
l = Lattice.cubic(2.819000)
species = ["Fe"]
coords = [[0, 0, 0]]
self.Fe = Structure.from_spacegroup("Im-3m", l, species, coords)
self.Si = Structure.from_spacegroup("Fd-3m", Lattice.cubic(5.430500), ["Si"], [(0, 0, 0.5)])
with open(
os.path.join(
os.path.abspath(os.path.dirname(__file__)),
"..",
"reconstructions_archive.json",
)
) as data_file:
self.rec_archive = json.load(data_file)
def test_build_slab(self):
# First lets test a reconstruction where we only remove atoms
recon = ReconstructionGenerator(self.Ni, 10, 10, "fcc_110_missing_row_1x2")
slab = recon.get_unreconstructed_slabs()[0]
recon_slab = recon.build_slabs()[0]
self.assertTrue(recon_slab.reconstruction)
self.assertEqual(len(slab), len(recon_slab) + 2)
self.assertTrue(recon_slab.is_symmetric())
# Test if the ouc corresponds to the reconstructed slab
recon_ouc = recon_slab.oriented_unit_cell
ouc = slab.oriented_unit_cell
self.assertEqual(ouc.lattice.b * 2, recon_ouc.lattice.b)
self.assertEqual(len(ouc) * 2, len(recon_ouc))
# Test a reconstruction where we simply add atoms
recon = ReconstructionGenerator(self.Ni, 10, 10, "fcc_111_adatom_t_1x1")
slab = recon.get_unreconstructed_slabs()[0]
recon_slab = recon.build_slabs()[0]
self.assertEqual(len(slab), len(recon_slab) - 2)
self.assertTrue(recon_slab.is_symmetric())
# If a slab references another slab,
# make sure it is properly generated
recon = ReconstructionGenerator(self.Ni, 10, 10, "fcc_111_adatom_ft_1x1")
slab = recon.build_slabs()[0]
self.assertTrue(slab.is_symmetric)
# Test a reconstruction where it works on a specific
# termination (Fd-3m (111))
recon = ReconstructionGenerator(self.Si, 10, 10, "diamond_111_1x2")
slab = recon.get_unreconstructed_slabs()[0]
recon_slab = recon.build_slabs()[0]
self.assertEqual(len(slab), len(recon_slab) - 8)
self.assertTrue(recon_slab.is_symmetric())
# Test a reconstruction where terminations give
# different reconstructions with a non-primitive_elemental system
def test_get_d(self):
# Ensure that regardles of the size of the vacuum or slab
# layer, the spacing between atomic layers should be the same
recon = ReconstructionGenerator(self.Si, 10, 10, "diamond_100_2x1")
recon2 = ReconstructionGenerator(self.Si, 20, 10, "diamond_100_2x1")
s1 = recon.get_unreconstructed_slabs()[0]
s2 = recon2.get_unreconstructed_slabs()[0]
self.assertAlmostEqual(get_d(s1), get_d(s2))
@unittest.skip("This test relies on neighbor orders and is hard coded. Disable temporarily")
def test_previous_reconstructions(self):
# Test to see if we generated all reconstruction
# types correctly and nothing changes
m = StructureMatcher()
for n in self.rec_archive.keys():
if "base_reconstruction" in self.rec_archive[n].keys():
arch = self.rec_archive[self.rec_archive[n]["base_reconstruction"]]
sg = arch["spacegroup"]["symbol"]
else:
sg = self.rec_archive[n]["spacegroup"]["symbol"]
if sg == "Fm-3m":
rec = ReconstructionGenerator(self.Ni, 20, 20, n)
el = self.Ni[0].species_string
elif sg == "Im-3m":
rec = ReconstructionGenerator(self.Fe, 20, 20, n)
el = self.Fe[0].species_string
elif sg == "Fd-3m":
rec = ReconstructionGenerator(self.Si, 20, 20, n)
el = self.Si[0].species_string
slabs = rec.build_slabs()
s = Structure.from_file(get_path(os.path.join("reconstructions", el + "_" + n + ".cif")))
self.assertTrue(any([len(m.group_structures([s, slab])) == 1 for slab in slabs]))
class MillerIndexFinderTests(PymatgenTest):
def setUp(self):
self.cscl = Structure.from_spacegroup("Pm-3m", Lattice.cubic(4.2), ["Cs", "Cl"], [[0, 0, 0], [0.5, 0.5, 0.5]])
self.Fe = Structure.from_spacegroup("Im-3m", Lattice.cubic(2.82), ["Fe"], [[0, 0, 0]])
mglatt = Lattice.from_parameters(3.2, 3.2, 5.13, 90, 90, 120)
self.Mg = Structure(mglatt, ["Mg", "Mg"], [[1 / 3, 2 / 3, 1 / 4], [2 / 3, 1 / 3, 3 / 4]])
self.lifepo4 = self.get_structure("LiFePO4")
self.tei = Structure.from_file(get_path("icsd_TeI.cif"), primitive=False)
self.LiCoO2 = Structure.from_file(get_path("icsd_LiCoO2.cif"), primitive=False)
self.p1 = Structure(
Lattice.from_parameters(3, 4, 5, 31, 43, 50),
["H", "He"],
[[0, 0, 0], [0.1, 0.2, 0.3]],
)
self.graphite = self.get_structure("Graphite")
self.trigBi = Structure(
Lattice.from_parameters(3, 3, 10, 90, 90, 120),
["Bi", "Bi", "Bi", "Bi", "Bi", "Bi"],
[
[0.3333, 0.6666, 0.39945113],
[0.0000, 0.0000, 0.26721554],
[0.0000, 0.0000, 0.73278446],
[0.6666, 0.3333, 0.60054887],
[0.6666, 0.3333, 0.06611779],
[0.3333, 0.6666, 0.93388221],
],
)
def test_get_symmetrically_distinct_miller_indices(self):
# Tests to see if the function obtains the known number of unique slabs
indices = get_symmetrically_distinct_miller_indices(self.cscl, 1)
self.assertEqual(len(indices), 3)
indices = get_symmetrically_distinct_miller_indices(self.cscl, 2)
self.assertEqual(len(indices), 6)
self.assertEqual(len(get_symmetrically_distinct_miller_indices(self.lifepo4, 1)), 7)
# The TeI P-1 structure should have 13 unique millers (only inversion
# symmetry eliminates pairs)
indices = get_symmetrically_distinct_miller_indices(self.tei, 1)
self.assertEqual(len(indices), 13)
# P1 and P-1 should have the same # of miller indices since surfaces
# always have inversion symmetry.
indices = get_symmetrically_distinct_miller_indices(self.p1, 1)
self.assertEqual(len(indices), 13)
indices = get_symmetrically_distinct_miller_indices(self.graphite, 2)
self.assertEqual(len(indices), 12)
# Now try a trigonal system.
indices = get_symmetrically_distinct_miller_indices(self.trigBi, 2, return_hkil=True)
self.assertEqual(len(indices), 17)
self.assertTrue(all([len(hkl) == 4 for hkl in indices]))
def test_get_symmetrically_equivalent_miller_indices(self):
# Tests to see if the function obtains all equivalent hkl for cubic (100)
indices001 = [
(1, 0, 0),
(0, 1, 0),
(0, 0, 1),
(0, 0, -1),
(0, -1, 0),
(-1, 0, 0),
]
indices = get_symmetrically_equivalent_miller_indices(self.cscl, (1, 0, 0))
self.assertTrue(all([hkl in indices for hkl in indices001]))
# Tests to see if it captures expanded Miller indices in the family e.g. (001) == (002)
hcp_indices_100 = get_symmetrically_equivalent_miller_indices(self.Mg, (1, 0, 0))
hcp_indices_200 = get_symmetrically_equivalent_miller_indices(self.Mg, (2, 0, 0))
self.assertEqual(len(hcp_indices_100) * 2, len(hcp_indices_200))
self.assertEqual(len(hcp_indices_100), 6)
self.assertTrue(all([len(hkl) == 4 for hkl in hcp_indices_100]))
def test_generate_all_slabs(self):
slabs = generate_all_slabs(self.cscl, 1, 10, 10)
# Only three possible slabs, one each in (100), (110) and (111).
self.assertEqual(len(slabs), 3)
# make sure it generates reconstructions
slabs = generate_all_slabs(self.Fe, 1, 10, 10, include_reconstructions=True)
# Four possible slabs, (100), (110), (111) and the zigzag (100).
self.assertEqual(len(slabs), 4)
slabs = generate_all_slabs(self.cscl, 1, 10, 10, bonds={("Cs", "Cl"): 4})
# No slabs if we don't allow broken Cs-Cl
self.assertEqual(len(slabs), 0)
slabs = generate_all_slabs(self.cscl, 1, 10, 10, bonds={("Cs", "Cl"): 4}, max_broken_bonds=100)
self.assertEqual(len(slabs), 3)
slabs2 = generate_all_slabs(self.lifepo4, 1, 10, 10, bonds={("P", "O"): 3, ("Fe", "O"): 3})
self.assertEqual(len(slabs2), 0)
# There should be only one possible stable surfaces, all of which are
# in the (001) oriented unit cell
slabs3 = generate_all_slabs(self.LiCoO2, 1, 10, 10, bonds={("Co", "O"): 3})
self.assertEqual(len(slabs3), 1)
mill = (0, 0, 1)
for s in slabs3:
self.assertEqual(s.miller_index, mill)
slabs1 = generate_all_slabs(self.lifepo4, 1, 10, 10, tol=0.1, bonds={("P", "O"): 3})
self.assertEqual(len(slabs1), 4)
# Now we test this out for repair_broken_bonds()
slabs1_repair = generate_all_slabs(self.lifepo4, 1, 10, 10, tol=0.1, bonds={("P", "O"): 3}, repair=True)
self.assertGreater(len(slabs1_repair), len(slabs1))
# Lets see if there are no broken PO4 polyhedrons
miller_list = get_symmetrically_distinct_miller_indices(self.lifepo4, 1)
all_miller_list = []
for slab in slabs1_repair:
hkl = tuple(slab.miller_index)
if hkl not in all_miller_list:
all_miller_list.append(hkl)
broken = []
for site in slab:
if site.species_string == "P":
neighbors = slab.get_neighbors(site, 3)
cn = 0
for nn in neighbors:
cn += 1 if nn[0].species_string == "O" else 0
broken.append(cn != 4)
self.assertFalse(any(broken))
# check if we were able to produce at least one
# termination for each distinct Miller _index
self.assertEqual(len(miller_list), len(all_miller_list))
def test_miller_index_from_sites(self):
"""Test surface miller index convenience function"""
# test on a cubic system
m = Lattice.cubic(1)
s1 = np.array([0.5, -1.5, 3])
s2 = np.array([0.5, 3.0, -1.5])
s3 = np.array([2.5, 1.5, -4.0])
self.assertEqual(miller_index_from_sites(m, [s1, s2, s3]), (2, 1, 1))
# test casting from matrix to Lattice
m = [[2.319, -4.01662582, 0.0], [2.319, 4.01662582, 0.0], [0.0, 0.0, 7.252]]
s1 = np.array([2.319, 1.33887527, 6.3455])
s2 = np.array([1.1595, 0.66943764, 4.5325])
s3 = np.array([1.1595, 0.66943764, 0.9065])
hkl = miller_index_from_sites(m, [s1, s2, s3])
self.assertEqual(hkl, (2, -1, 0))
if __name__ == "__main__":
unittest.main()
|
|
from __future__ import division, print_function
import numpy as np
import os
from auxiliary import *
from scipy.integrate import odeint, ode
import matplotlib.pyplot as plt
from smatrix import compute_S_matrix, compute_S_matrix_fast, mean_poisson
from ttsolution import TTsolution
import sys
try:
xrange
except NameError:
xrange = range
def takagitaupin(scantype,scan,constant,hkl,crystal,thickness,bending = 'None'):
'''
1D TT-solver.
Input:
scantype = 'energy' or 'angle'
scan = relative to the Bragg's energy in meV (energy scan) OR relative to the Bragg's angle in arcsec (angle scan)
constant = incidence angle in degrees (energy scan) OR photon energy in keV (angle scan)
hkl = [h,k,l] (Miller indices)
crystal = currently only 'si' is supported
thickness = crystal thickness in microns
bending = 'None' OR ('spherical',R_bend) OR ('cylindrical',R1,R2), where R_bend, R1, and R2 are in meters
'''
if scantype == 'energy':
is_escan = True
scantype = 'energy'
elif scantype == 'angle' or scantype == 'angular':
is_escan = False
scantype = 'angle'
#type conversions
scan=np.array(scan)
#Unit conversions
thickness_tuple = (thickness, 'microns')
thickness = thickness*1e-6 #wafer thickness in meters
#constants
hc=1.23984193*0.001 #in meV/m
d=dspace(hkl,crystal)*1e-10 #in m
#Setup scan variables and constants
if is_escan:
escan=scan
th=np.radians(constant)
#Direction cosines
gamma0=np.sin(th)
gammah=-np.sin(th)
#Conversion of incident photon energy to wavelength
E0 = hc/(2*d*np.sin(th)) #in meV
wavelength = hc/(E0+escan) #in m
else:
E0 = constant*1e6 #in meV
wavelength = hc/E0 #in m
if not hc/(2*d*E0) > 1:
th = np.arcsin(hc/(2*d*E0))
else:
th = np.pi/2
ascan = scan*np.pi/648000 #from arcsec to rad
#Direction cosines
gamma0=np.sin(th+ascan)
gammah=-np.sin(th+ascan)
#construct the path for chitables
hklstring = str(hkl[0]) + '_' + str(hkl[1]) + '_' + str(hkl[2])
filename = 'chitable_' + crystal.lower() + '_' + hklstring + '.dat'
filestring = os.path.join(os.path.dirname(__file__),'chitables_300K',filename)
#load the chitable
try:
chi = np.loadtxt(filestring)
except:
print('Error loading chitable! Check that ' + filestring \
+ ' exists and is correctly formatted!')
raise Exception()
#conversion to meV
chienergy = chi[:,0]*1e6
print('Computing elastic line for ' + str(hkl) + ' reflection of ' \
+ crystal[0].upper() + crystal[1:].lower() + '-crystal')
if is_escan:
print('Scanning the incident energy')
else:
print('Scanning the incidence angle')
#Polarization (TODO: include pi-polarization)
C = 1;
print('Assuming sigma-polarization')
#Interpolation
if is_escan:
chi0 = np.interp(E0+escan, chienergy, chi[:,1]) + 1j*np.interp(E0+escan, chienergy, chi[:,2])
chih = np.interp(E0+escan, chienergy, chi[:,3]) + 1j*np.interp(E0+escan, chienergy, chi[:,4])
chihbar = np.interp(E0+escan, chienergy, chi[:,5]) + 1j*np.interp(E0+escan, chienergy, chi[:,6])
else:
chi0 = np.interp(E0, chienergy, chi[:,1]) + 1j*np.interp(E0, chienergy, chi[:,2])
chih = np.interp(E0, chienergy, chi[:,3]) + 1j*np.interp(E0, chienergy, chi[:,4])
chihbar = np.interp(E0, chienergy, chi[:,5]) + 1j*np.interp(E0, chienergy, chi[:,6])
#Deviation from backscattering
deltawavelength = wavelength-2*d
if is_escan:
th2 = th
else:
th2 = th+ascan
#if is_escan:
# deltath = th-np.pi/2
#else:
# deltath = th+ascan-np.pi/2
#Extinction length
L = wavelength * np.sqrt(gamma0*np.abs(gammah)) / (np.abs(C)*np.sqrt(chih*chihbar))
#Incidence parameter
eta = np.sqrt(gamma0/np.abs(gammah)) / (np.abs(C)*np.sqrt(chih*chihbar)) \
* (-wavelength/d*(wavelength/(2*d)-np.sin(th2)) - chi0*(gammah/gamma0-1)/2)
#normalization coefficient
normcoef = np.sqrt(chih*chihbar)/chihbar*np.sign(C)*np.sqrt(gamma0/np.abs(gammah))
#Calculate mean poisson's ratio
nu = 0
if not bending == 'None':
#TODO: different bendings have their own quirks, check for cylindrical
S_matrix, C_matrix = compute_S_matrix_fast(hkl,crystal)
#nu = mean_poisson(S_matrix)
#test
S=S_matrix
if bending[0] == 'cylindrical':
if bending[1] == 'inf':
invR1 = 0
else:
invR1 = 1/bending[1]
if bending[2] == 'inf':
invR2 = 0
else:
invR2 = 1/bending[2]
elif bending[0] == 'spherical':
if bending[1] == 'inf':
invR1 = 0
invR2 = 0
else:
invR1 = 1/bending[1]
invR2 = 1/bending[1]
#This takes into account the rotation of the diffractive planes by the bending deep in the crystal
rotational_parameter = np.sqrt(gamma0/np.abs(gammah)) / (np.abs(C)*np.sqrt(chih*chihbar)) \
*wavelength/d*np.cos(th2)**2/np.sin(th2)*invR1
#Parameter according to http://arxiv.org/abs/1502.03059
bending_parameter = S[2,0]*(S[0,1]*invR2-S[1,1]*invR1)+S[2,1]*(S[1,0]*invR1-S[0,0]*invR2)
bending_parameter = -0.5*bending_parameter/(S[0,1]*S[1,0]-S[0,0]*S[1,1])
print(bending_parameter)
#INTEGRATION
reflectivity=[]
#Define ODE and its Jacobian
def tt_equation(z,ksi,L,gamma0,gammah,eta,d,bending,thickness,nu,rot):
if bending == 'None':
return np.pi*1j/L*(ksi**2-2*(np.sign(gammah)*eta)*ksi-np.sign(gammah))
else:
return np.pi*1j/L*(ksi**2-2*(np.sign(gammah)*(eta+rot*z)+L*2*bending_parameter*(z-thickness/2)/d)*ksi-np.sign(gammah))
def tt_jacobian(z,ksi,L,gamma0,gammah,eta,d,bending,thickness,nu,rot):
if bending == 'None':
return np.pi*1j/L*(2*ksi-2*(np.sign(gammah)*eta))
else:
return np.pi*1j/L*(2*ksi-2*(np.sign(gammah)*(eta+rot*z)+L*2*bending_parameter*(z-thickness/2)/d))
#Solve the equation
sys.stdout.write('Solving...0%')
sys.stdout.flush()
for step in xrange(len(scan)):
def tt2solve(z,ksi):
if is_escan:
return tt_equation(z,ksi,L[step],gamma0,gammah,eta[step],d,bending,thickness,nu,rotational_parameter[step])
else:
return tt_equation(z,ksi,L[step],gamma0[step],gammah[step],eta[step],d,bending,thickness,nu,rotational_parameter[step])
def jac(z,ksi):
if is_escan:
return tt_jacobian(z,ksi,L[step],gamma0,gammah,eta[step],d,bending,thickness,nu,rotational_parameter[step])
else:
return tt_jacobian(z,ksi,L[step],gamma0[step],gammah[step],eta[step],d,bending,thickness,nu,rotational_parameter[step])
r=ode(tt2solve,jac).set_integrator('zvode',method='bdf',with_jacobian=True,min_step=1e-10,max_step=1e-4,nsteps=50000)
r.set_initial_value(0,thickness)
res=r.integrate(0)
reflectivity.append(np.abs(normcoef[step]*res[0])**2)
sys.stdout.write('\rSolving...%0.1f%%' % (100*(step+1)/len(scan),))
sys.stdout.flush()
sys.stdout.write('\r\nDone.\n')
sys.stdout.flush()
#solution class
if is_escan:
scan = (scan, 'meV')
constant = (constant,'deg')
else:
scan = (scan, 'arcsec')
constant = (constant,'keV')
#TODO: add also the type of bending to the ttsolution
if bending == 'None':
R_bend = 0
else:
R_bend = bending[1]
result = TTsolution(scan,reflectivity,scantype,crystal.lower(),hkl,(R_bend,'m'),thickness_tuple,constant)
return result
|
|
import re
from calendar import monthrange
import datetime
class Card(object):
"""
A credit card that may be valid or invalid.
"""
# A regexp for matching non-digit values
non_digit_regexp = re.compile(r'\D')
# A mapping from common credit card brands to their number regexps
BRAND_VISA = 'visa'
BRAND_MASTERCARD = 'mastercard'
BRAND_AMEX = 'amex'
BRAND_DISCOVER = 'discover'
BRAND_DANKORT = 'dankort'
BRAND_MAESTRO = 'maestro'
BRAND_DINERS = 'diners'
BRAND_UNKNOWN = u'unknown'
BRANDS = {
BRAND_VISA: re.compile(r'^4\d{12}(\d{3})?$'),
BRAND_MASTERCARD: re.compile(r'''
^(5[1-5]\d{4}|677189)\d{10}$| # Traditional 5-series + RU support
^(222[1-9]|2[3-6]\d{2}|27[0-1]\d|2720)\d{12}$ # 2016 2-series
''', re.VERBOSE),
BRAND_AMEX: re.compile(r'^3[47]\d{13}$'),
BRAND_DISCOVER: re.compile(r'^(6011|65\d{2})\d{12}$'),
BRAND_DANKORT: re.compile(r'^(5019)\d{12}$'),
BRAND_MAESTRO:
re.compile(r'^(?:5[0678]\d\d|6304|6390|67\d\d)\d{8,15}$'),
BRAND_DINERS:
re.compile(r'^3(?:0[0-5]|[68][0-9])[0-9]{11}$'),
}
FRIENDLY_BRANDS = {
BRAND_VISA: 'Visa',
BRAND_MASTERCARD: 'MasterCard',
BRAND_AMEX: 'American Express',
BRAND_DISCOVER: 'Discover',
BRAND_DANKORT: 'Dankort',
BRAND_MAESTRO: 'Maestro',
BRAND_DINERS: 'Diners Club',
}
# Common test credit cards
TESTS = (
'4444333322221111',
'378282246310005',
'371449635398431',
'378734493671000',
'30569309025904',
'38520000023237',
'6011111111111117',
'6011000990139424',
'555555555554444',
'5105105105105100',
'4111111111111111',
'4012888888881881',
'4222222222222',
)
# Stripe test credit cards
TESTS += (
'4242424242424242',
)
def __init__(self, number, month, year, cvc, holder=None):
"""
Attaches the provided card data and holder to the card after removing
non-digits from the provided number.
"""
self.number = self.non_digit_regexp.sub('', number)
self.exp_date = ExpDate(month, year)
self.cvc = cvc
self.holder = holder
def __repr__(self):
"""
Returns a typical repr with a simple representation of the masked card
number and the exp date.
"""
return u'<Card brand={b} number={n}, exp_date={e}>'.format(
b=self.brand,
n=self.mask,
e=self.exp_date.mmyyyy
)
@property
def mask(self):
"""
Returns the credit card number with each of the number's digits but the
first six and the last four digits replaced by an X, formatted the way
they appear on their respective brands' cards.
"""
# If the card is invalid, return an "invalid" message
if not self.is_mod10_valid:
return u'invalid'
# If the card is an Amex, it will have special formatting
if self.brand == self.BRAND_AMEX:
return u'XXXX-XXXXXX-X{e}'.format(e=self.number[11:15])
# All other cards
return u'XXXX-XXXX-XXXX-{e}'.format(e=self.number[12:16])
@property
def brand(self):
"""
Returns the brand of the card, if applicable, else an "unknown" brand.
"""
# Check if the card is of known type
for brand, regexp in self.BRANDS.items():
if regexp.match(self.number):
return brand
# Default to unknown brand
return self.BRAND_UNKNOWN
@property
def friendly_brand(self):
"""
Returns the human-friendly brand name of the card.
"""
return self.FRIENDLY_BRANDS.get(self.brand, 'unknown')
@property
def is_test(self):
"""
Returns whether or not the card's number is a known test number.
"""
return self.number in self.TESTS
@property
def is_expired(self):
"""
Returns whether or not the card is expired.
"""
return self.exp_date.is_expired
@property
def is_valid(self):
"""
Returns whether or not the card is a valid card for making payments.
"""
return not self.is_expired and self.is_mod10_valid
@property
def is_mod10_valid(self):
"""
Returns whether or not the card's number validates against the mod10
algorithm (Luhn algorithm), automatically returning False on an empty
value.
"""
# Check for empty string
if not self.number:
return False
# Run mod10 on the number
dub, tot = 0, 0
for i in range(len(self.number) - 1, -1, -1):
for c in str((dub + 1) * int(self.number[i])):
tot += int(c)
dub = (dub + 1) % 2
return (tot % 10) == 0
class ExpDate(object):
"""
An expiration date of a credit card.
"""
def __init__(self, month, year):
"""
Attaches the last possible datetime for the given month and year, as
well as the raw month and year values.
"""
# Attach month and year
self.month = month
self.year = year
# Get the month's day count
weekday, day_count = monthrange(year, month)
# Attach the last possible datetime for the provided month and year
self.expired_after = datetime.datetime(
year,
month,
day_count,
23,
59,
59,
999999
)
def __repr__(self):
"""
Returns a typical repr with a simple representation of the exp date.
"""
return u'<ExpDate expired_after={d}>'.format(
d=self.expired_after.strftime('%m/%Y')
)
@property
def is_expired(self):
"""
Returns whether or not the expiration date has passed in American Samoa
(the last timezone).
"""
# Get the current datetime in UTC
utcnow = datetime.datetime.utcnow()
# Get the datetime minus 11 hours (Samoa is UTC-11)
samoa_now = utcnow - datetime.timedelta(hours=11)
# Return whether the exipred after time has passed in American Samoa
return samoa_now > self.expired_after
@property
def mmyyyy(self):
"""
Returns the expiration date in MM/YYYY format.
"""
return self.expired_after.strftime('%m/%Y')
@property
def mmyy(self):
"""
Returns the expiration date in MM/YY format (the same as is printed on
cards.
"""
return self.expired_after.strftime('%m/%y')
@property
def MMYY(self):
"""
Returns the expiration date in MMYY format
"""
return self.expired_after.strftime('%m%y')
@property
def mm(self):
"""
Returns the expiration date in MM format.
"""
return self.expired_after.strftime('%m')
@property
def yyyy(self):
"""
Returns the expiration date in YYYY format.
"""
return self.expired_after.strftime('%Y')
|
|
# Copyright (c) 2013 eBay Software Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_log import log as logging
from oslo_service import periodic_task
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common import instance as rd_instance
from trove.guestagent import backup
from trove.guestagent.datastore.experimental.couchbase import service
from trove.guestagent.datastore.experimental.couchbase import system
from trove.guestagent import dbaas
from trove.guestagent import volume
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
MANAGER = CONF.datastore_manager
class Manager(periodic_task.PeriodicTasks):
"""
This is Couchbase Manager class. It is dynamically loaded
based off of the datastore of the trove instance
"""
def __init__(self):
self.appStatus = service.CouchbaseAppStatus()
self.app = service.CouchbaseApp(self.appStatus)
super(Manager, self).__init__(CONF)
@periodic_task.periodic_task
def update_status(self, context):
"""
Updates the couchbase trove instance. It is decorated with
perodic task so it is automatically called every 3 ticks.
"""
self.appStatus.update()
def rpc_ping(self, context):
LOG.debug("Responding to RPC ping.")
return True
def change_passwords(self, context, users):
raise exception.DatastoreOperationNotSupported(
operation='change_passwords', datastore=MANAGER)
def reset_configuration(self, context, configuration):
self.app.reset_configuration(configuration)
def prepare(self, context, packages, databases, memory_mb, users,
device_path=None, mount_point=None, backup_info=None,
config_contents=None, root_password=None, overrides=None,
cluster_config=None, snapshot=None):
"""
This is called when the trove instance first comes online.
It is the first rpc message passed from the task manager.
prepare handles all the base configuration of the Couchbase instance.
"""
self.appStatus.begin_install()
self.app.install_if_needed(packages)
if device_path:
device = volume.VolumeDevice(device_path)
# unmount if device is already mounted
device.unmount_device(device_path)
device.format()
device.mount(mount_point)
LOG.debug('Mounted the volume (%s).' % device_path)
self.app.start_db_with_conf_changes(config_contents)
LOG.debug('Securing couchbase now.')
if root_password:
self.app.enable_root(root_password)
self.app.initial_setup()
if backup_info:
LOG.debug('Now going to perform restore.')
self._perform_restore(backup_info,
context,
mount_point)
self.app.complete_install_or_restart()
LOG.info(_('Completed setup of Couchbase database instance.'))
def restart(self, context):
"""
Restart this couchbase instance.
This method is called when the guest agent
gets a restart message from the taskmanager.
"""
self.app.restart()
def start_db_with_conf_changes(self, context, config_contents):
self.app.start_db_with_conf_changes(config_contents)
def stop_db(self, context, do_not_start_on_reboot=False):
"""
Stop this couchbase instance.
This method is called when the guest agent
gets a stop message from the taskmanager.
"""
self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot)
def get_filesystem_stats(self, context, fs_path):
"""Gets the filesystem stats for the path given."""
mount_point = CONF.get(
'mysql' if not MANAGER else MANAGER).mount_point
return dbaas.get_filesystem_volume_stats(mount_point)
def update_attributes(self, context, username, hostname, user_attrs):
raise exception.DatastoreOperationNotSupported(
operation='update_attributes', datastore=MANAGER)
def create_database(self, context, databases):
raise exception.DatastoreOperationNotSupported(
operation='create_database', datastore=MANAGER)
def create_user(self, context, users):
raise exception.DatastoreOperationNotSupported(
operation='create_user', datastore=MANAGER)
def delete_database(self, context, database):
raise exception.DatastoreOperationNotSupported(
operation='delete_database', datastore=MANAGER)
def delete_user(self, context, user):
raise exception.DatastoreOperationNotSupported(
operation='delete_user', datastore=MANAGER)
def get_user(self, context, username, hostname):
raise exception.DatastoreOperationNotSupported(
operation='get_user', datastore=MANAGER)
def grant_access(self, context, username, hostname, databases):
raise exception.DatastoreOperationNotSupported(
operation='grant_access', datastore=MANAGER)
def revoke_access(self, context, username, hostname, database):
raise exception.DatastoreOperationNotSupported(
operation='revoke_access', datastore=MANAGER)
def list_access(self, context, username, hostname):
raise exception.DatastoreOperationNotSupported(
operation='list_access', datastore=MANAGER)
def list_databases(self, context, limit=None, marker=None,
include_marker=False):
raise exception.DatastoreOperationNotSupported(
operation='list_databases', datastore=MANAGER)
def list_users(self, context, limit=None, marker=None,
include_marker=False):
raise exception.DatastoreOperationNotSupported(
operation='list_users', datastore=MANAGER)
def enable_root(self, context):
LOG.debug("Enabling root.")
return self.app.enable_root()
def enable_root_with_password(self, context, root_password=None):
LOG.debug("Enabling root with password.")
raise exception.DatastoreOperationNotSupported(
operation='enable_root_with_password', datastore=MANAGER)
def is_root_enabled(self, context):
LOG.debug("Checking if root is enabled.")
return os.path.exists(system.pwd_file)
def _perform_restore(self, backup_info, context, restore_location):
"""
Restores all couchbase buckets and their documents from the
backup.
"""
LOG.info(_("Restoring database from backup %s") %
backup_info['id'])
try:
backup.restore(context, backup_info, restore_location)
except Exception as e:
LOG.error(_("Error performing restore from backup %s") %
backup_info['id'])
LOG.error(e)
self.status.set_status(rd_instance.ServiceStatuses.FAILED)
raise
LOG.info(_("Restored database successfully"))
def create_backup(self, context, backup_info):
"""
Backup all couchbase buckets and their documents.
"""
backup.backup(context, backup_info)
def mount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.mount(mount_point, write_to_fstab=False)
LOG.debug("Mounted the device %s at the mount_point %s." %
(device_path, mount_point))
def unmount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.unmount(mount_point)
LOG.debug("Unmounted the device %s from the mount point %s." %
(device_path, mount_point))
def resize_fs(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.resize_fs(mount_point)
LOG.debug("Resized the filesystem at %s." % mount_point)
def update_overrides(self, context, overrides, remove=False):
LOG.debug("Updating overrides.")
raise exception.DatastoreOperationNotSupported(
operation='update_overrides', datastore=MANAGER)
def apply_overrides(self, context, overrides):
LOG.debug("Applying overrides.")
raise exception.DatastoreOperationNotSupported(
operation='apply_overrides', datastore=MANAGER)
def get_replication_snapshot(self, context, snapshot_info,
replica_source_config=None):
raise exception.DatastoreOperationNotSupported(
operation='get_replication_snapshot', datastore=MANAGER)
def attach_replication_slave(self, context, snapshot, slave_config):
LOG.debug("Attaching replication slave.")
raise exception.DatastoreOperationNotSupported(
operation='attach_replication_slave', datastore=MANAGER)
def detach_replica(self, context, for_failover=False):
raise exception.DatastoreOperationNotSupported(
operation='detach_replica', datastore=MANAGER)
def get_replica_context(self, context):
raise exception.DatastoreOperationNotSupported(
operation='get_replica_context', datastore=MANAGER)
def make_read_only(self, context, read_only):
raise exception.DatastoreOperationNotSupported(
operation='make_read_only', datastore=MANAGER)
def enable_as_master(self, context, replica_source_config):
raise exception.DatastoreOperationNotSupported(
operation='enable_as_master', datastore=MANAGER)
def get_txn_count(self):
raise exception.DatastoreOperationNotSupported(
operation='get_txn_count', datastore=MANAGER)
def get_latest_txn_id(self):
raise exception.DatastoreOperationNotSupported(
operation='get_latest_txn_id', datastore=MANAGER)
def wait_for_txn(self, txn):
raise exception.DatastoreOperationNotSupported(
operation='wait_for_txn', datastore=MANAGER)
def demote_replication_master(self, context):
LOG.debug("Demoting replication slave.")
raise exception.DatastoreOperationNotSupported(
operation='demote_replication_master', datastore=MANAGER)
|
|
"""
Tool Name: Create Filled Contours
Source Name: CreateFilledContours.py
Version: ArcGIS 10.0
Author: ESRI
This utility creates filled polygons from an input raster.
Limitations:
- Cannot output to shapefile because of string length>254 required
in SpatialJoin
- If more than 3 contour lines cross a single cell you might want to
Resample using half the original cellsize
"""
import os
import sys
import arcpy
from arcpy.sa import *
def int_if_youCan(x):
""" Return string without decimals if value has none"""
if x % 1.0 == 0:
strX = str(int(x))
else:
strX = "%.6f" % (x)
return strX
def FindZ(outContoursPolygons, in_raster):
""" Use the point within the polygon to determine the low and high
sides of the polygon"""
outEVT = 'outEVT'
outEVTjoinedLayer = 'outEVTjoinedLayer'
outPolygPoints = 'outPolygPoints'
print(" FeatureToPoint_management...")
try:
arcpy.FeatureToPoint_management(outContoursPolygons,
outPolygPoints, 'INSIDE')
except:
if arcpy.Describe(
outContoursPolygons).spatialReference.name == 'Unknown':
print('This might be caused by data with '+
'Unknown spatial reference.' +
' Define a projection and re-run')
sys.exit()
print(" ExtractValuesToPoints...")
ExtractValuesToPoints(outPolygPoints, in_raster, outEVT,
'NONE', 'ALL')
arcpy.MakeFeatureLayer_management(outContoursPolygons,
outEVTjoinedLayer)
print(" MakeFeatureLayer_management...")
descFlayer = arcpy.Describe(outEVTjoinedLayer)
descOutEVT = arcpy.Describe(outEVT)
print(" AddJoin_management...")
arcpy.AddJoin_management(outEVTjoinedLayer, descFlayer.OIDFieldName,
outEVT, descOutEVT.OIDFieldName, 'KEEP_ALL')
return outEVTjoinedLayer, outEVT, outPolygPoints
def delete_trailing_zeros(strValue):
""" Remove all the trailing zeros"""
newStr = strValue
if '.' in strValue:
lStr = strValue.split('.')[0]
rStr = strValue.split('.')[1].rstrip('0')
newStr = lStr + '.' + rStr
if rStr == '':
newStr = lStr
return newStr
def findUniqueContours(inlist):
""" Find list of unique contours"""
uniqueContourList = []
for item in inlist:
if item not in uniqueContourList:
uniqueContourList.append(item)
return uniqueContourList
def PerformSpatialJoin(target_fc, join_fc, out_fc, contour_interval):
""" Perform Spatial Join between contours and filled contours to
create low and high contour label"""
try:
# add a temp field called range
field = arcpy.Field()
field.name = "range"
field.aliasName = "range"
field.length = 65534
field.type = "Text"
# this is the field from where the contour values are coming
fm = arcpy.FieldMap()
fm.mergeRule = "Join"
fm.joinDelimiter = ";"
fm.addInputField(join_fc, "Contour")
fm.outputField = field
# add the field map to the fieldmappings
fms = arcpy.FieldMappings()
fms.addFieldMap(fm)
# add a temp field called elevation
field = arcpy.Field()
field.name = "elevation"
field.aliasName = "Elevation from raster"
field.type = "Double"
# this is the field from where raster elevation values are in
fm2 = arcpy.FieldMap()
fieldnames = [f.name for f in arcpy.ListFields(target_fc)]
# find index of elevation field (RASTERVALU) in output
# created by ExtractValuesToPoints
rastervalu_index = [index for index, item in
enumerate(fieldnames) if 'RASTERVALU' in item][0]
fm2.addInputField(target_fc, fieldnames[rastervalu_index])
fm2.outputField = field
fms.addFieldMap(fm2)
print(" SpatialJoin_analysis...")
arcpy.SpatialJoin_analysis(target_fc, join_fc, out_fc, '', '',
fms, 'SHARE_A_LINE_SEGMENT_WITH')
print(" AddField_management...")
CreateOutputContourFields(out_fc, contour_interval)
except Exception as ex:
print(ex.args[0])
def CreateOutputContourFields(inFC, contour_interval):
""" Create and populate the contour fields in the
output feature class"""
newFields = ['low_cont', 'high_cont', 'range_cont']
newFieldAlias = ['Low contour', 'High contour', 'Contour range']
icnt = 0
for newField in newFields:
arcpy.AddField_management(inFC, newField, 'TEXT', '#', '#', '#',
newFieldAlias[icnt], 'NULLABLE',
'REQUIRED', '#')
icnt+=1
cur = arcpy.UpdateCursor(inFC)
icnt=0
for row in cur:
icnt+=1
joinCount = row.getValue('Join_Count')
contourString = row.getValue('range')
pointElevation = row.getValue('elevation')
contourList = []
for i in contourString.split(';'):
contourList.append(float(i))
nuniques = findUniqueContours(contourList)
try:
if len(nuniques) > 2:
contourList = [x for x in contourList if x > -999999]
minValue = min(contourList)
maxValue = max(contourList)
if minValue == maxValue:
joinCount = 1
if minValue < -999999 or joinCount == 1:
if pointElevation > maxValue:
minValue = maxValue
maxValue = minValue + contour_interval
else:
minValue = maxValue - contour_interval
sminValue = int_if_youCan(minValue)
smaxValue = int_if_youCan(maxValue)
except:
sminValue = int_if_youCan(-1000000)
smaxValue = int_if_youCan(-1000000)
row.setValue(newFields[0], sminValue)
row.setValue(newFields[1], smaxValue)
row.setValue(newFields[2], delete_trailing_zeros(sminValue) + ' - ' + \
delete_trailing_zeros(smaxValue))
if minValue < -999999:
row.setValue(newFields[2], '<NoData>')
cur.updateRow(row)
del cur, row
def doFillContour(in_raster, outWorkspace, out_polygon_features, contour_interval, base_contour=0, z_factor=1):
print "Doing filled contour"
# Setting variable names for temporary feature classes
outContours = 'outContours'
outPolygonBndry = 'outPolygonBndry'
outContoursPolygons = 'outContoursPolygons'
outBuffer = 'outBuffer'
outBufferContourLine = 'outBufferContourLine'
outBufferContourLineLyr = 'outBufferContourLineLyr'
outContoursPolygonsWithPoints = 'outContoursPolygonsWithPoints'
# # Input parameters
# if ".shp" in out_polygon_features:
# print("Only file geodatabase output is supported.")
# sys.exit()
# outFCext = os.path.splitext(out_polygon_features)
# if (os.path.splitext(out_polygon_features)[1]).lower() == ".shp":
# print("Only file geodatabase output is supported.")
# sys.exit()
# currentWS = arcpy.env.workspace
# if outWorkspace.lower() != ".gdb":
# print("Only file geodatabase workspace is supported.")
# sys.exit()
arcpy.env.workspace = outWorkspace
ras_DEM = Raster(in_raster)
ras_cellsize = ras_DEM.meanCellHeight
print(" Contour...")
arcpy.sa.Contour(in_raster, outContours, contour_interval, base_contour,
z_factor)
print(" RasterToPolygon_conversion...")
arcpy.RasterToPolygon_conversion(IsNull(ras_DEM), outPolygonBndry,
"NO_SIMPLIFY")
print(" Buffer_analysis...")
try:
arcpy.Buffer_analysis(outPolygonBndry, outBuffer, str(-ras_cellsize)
+ ' Unknown', 'FULL', 'ROUND', 'NONE', '#')
except:
print('This might be caused by insufficient memory.'+
'Use a smaller extent or try another computer.')
arcpy.Delete_management(outContours)
arcpy.Delete_management(outPolygonBndry)
sys.exit()
print(" FeatureToLine_management...")
arcpy.FeatureToLine_management([outContours, outBuffer],
outBufferContourLine, '#', 'ATTRIBUTES')
arcpy.MakeFeatureLayer_management(outBufferContourLine,
outBufferContourLineLyr)
arcpy.SelectLayerByAttribute_management(outBufferContourLineLyr,
'NEW_SELECTION',
'"BUFF_DIST" <> 0')
arcpy.CalculateField_management(outBufferContourLineLyr, 'Contour',
'-1000000', 'VB', '#')
arcpy.SelectLayerByAttribute_management(outBufferContourLineLyr,
'CLEAR_SELECTION')
print(" FeatureToPolygon_management...")
arcpy.FeatureToPolygon_management([outBuffer, outContours],
outContoursPolygons, '#',
'NO_ATTRIBUTES', '#')
outContoursPolygonsWithPoints, outEVT, outPolygPoints = \
FindZ(outContoursPolygons, in_raster)
# Spatial Join and update contour labels
PerformSpatialJoin(outContoursPolygonsWithPoints,
outBufferContourLineLyr, out_polygon_features,
contour_interval)
fields = arcpy.ListFields(out_polygon_features)
fields2delete = []
for field in fields:
if not field.required:
fields2delete.append(field.name)
print(" DeleteField_management...")
# these fields include all the temp fields like
# 'Join_Count', 'TARGET_FID', 'range', and 'elevation'
arcpy.DeleteField_management(out_polygon_features, fields2delete)
arcpy.AddField_management(out_polygon_features, "Height", "LONG")
arcpy.CalculateField_management(out_polygon_features, "Height",
"!high_cont!",
"PYTHON_9.3")
arcpy.AddField_management(out_polygon_features, "Age", "LONG")
age = out_polygon_features.split("/")[-1].replace("LIG_", "")
age = age.replace("k", "")
age = int(age)*100
arcpy.CalculateField_management(out_polygon_features, "Age",
age,
"PYTHON_9.3")
with arcpy.da.UpdateCursor(out_polygon_features, "*") as cursor:
for row in cursor:
if row[7] < 100:
cursor.deleteRow()
print(' Deleting temp files.')
arcpy.Delete_management(outBuffer)
arcpy.Delete_management(outContours)
arcpy.Delete_management(outContoursPolygons)
arcpy.Delete_management(outBufferContourLine)
arcpy.Delete_management(outPolygonBndry)
arcpy.Delete_management(outEVT)
arcpy.Delete_management(outPolygPoints)
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-interfaces - based on the path /interfaces/interface/subinterfaces/subinterface/ipv6/neighbors/neighbor/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State data for each IPv6 address on the
interface
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__ip",
"__link_layer_address",
"__origin",
"__is_router",
"__neighbor_state",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__ip = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9a-fA-F:\\.]*"},
),
is_leaf=True,
yang_name="ip",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="inet:ipv6-address-no-zone",
is_config=False,
)
self.__link_layer_address = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={"pattern": "([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?"},
),
is_leaf=True,
yang_name="link-layer-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="yang:phys-address",
is_config=False,
)
self.__origin = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"OTHER": {}, "STATIC": {}, "DYNAMIC": {}},
),
is_leaf=True,
yang_name="origin",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="neighbor-origin",
is_config=False,
)
self.__is_router = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="is-router",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="empty",
is_config=False,
)
self.__neighbor_state = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"INCOMPLETE": {},
"REACHABLE": {},
"STALE": {},
"DELAY": {},
"PROBE": {},
},
),
is_leaf=True,
yang_name="neighbor-state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="enumeration",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"interfaces",
"interface",
"subinterfaces",
"subinterface",
"ipv6",
"neighbors",
"neighbor",
"state",
]
def _get_ip(self):
"""
Getter method for ip, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/neighbors/neighbor/state/ip (inet:ipv6-address-no-zone)
YANG Description: [adapted from IETF IP model RFC 7277]
The IPv6 address of the neighbor node.
"""
return self.__ip
def _set_ip(self, v, load=False):
"""
Setter method for ip, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/neighbors/neighbor/state/ip (inet:ipv6-address-no-zone)
If this variable is read-only (config: false) in the
source YANG file, then _set_ip is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ip() directly.
YANG Description: [adapted from IETF IP model RFC 7277]
The IPv6 address of the neighbor node.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9a-fA-F:\\.]*"},
),
is_leaf=True,
yang_name="ip",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="inet:ipv6-address-no-zone",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """ip must be of a type compatible with inet:ipv6-address-no-zone""",
"defined-type": "inet:ipv6-address-no-zone",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': '[0-9a-fA-F:\\.]*'}), is_leaf=True, yang_name="ip", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='inet:ipv6-address-no-zone', is_config=False)""",
}
)
self.__ip = t
if hasattr(self, "_set"):
self._set()
def _unset_ip(self):
self.__ip = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=six.text_type,
restriction_dict={
"pattern": "((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?"
},
),
restriction_dict={"pattern": "[0-9a-fA-F:\\.]*"},
),
is_leaf=True,
yang_name="ip",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="inet:ipv6-address-no-zone",
is_config=False,
)
def _get_link_layer_address(self):
"""
Getter method for link_layer_address, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/neighbors/neighbor/state/link_layer_address (yang:phys-address)
YANG Description: [adapted from IETF IP model RFC 7277]
The link-layer address of the neighbor node.
"""
return self.__link_layer_address
def _set_link_layer_address(self, v, load=False):
"""
Setter method for link_layer_address, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/neighbors/neighbor/state/link_layer_address (yang:phys-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_layer_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_layer_address() directly.
YANG Description: [adapted from IETF IP model RFC 7277]
The link-layer address of the neighbor node.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={"pattern": "([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?"},
),
is_leaf=True,
yang_name="link-layer-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="yang:phys-address",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """link_layer_address must be of a type compatible with yang:phys-address""",
"defined-type": "yang:phys-address",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_dict={'pattern': '([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?'}), is_leaf=True, yang_name="link-layer-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='yang:phys-address', is_config=False)""",
}
)
self.__link_layer_address = t
if hasattr(self, "_set"):
self._set()
def _unset_link_layer_address(self):
self.__link_layer_address = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_dict={"pattern": "([0-9a-fA-F]{2}(:[0-9a-fA-F]{2})*)?"},
),
is_leaf=True,
yang_name="link-layer-address",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="yang:phys-address",
is_config=False,
)
def _get_origin(self):
"""
Getter method for origin, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/neighbors/neighbor/state/origin (neighbor-origin)
YANG Description: [adapted from IETF IP model RFC 7277]
The origin of this neighbor entry.
"""
return self.__origin
def _set_origin(self, v, load=False):
"""
Setter method for origin, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/neighbors/neighbor/state/origin (neighbor-origin)
If this variable is read-only (config: false) in the
source YANG file, then _set_origin is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_origin() directly.
YANG Description: [adapted from IETF IP model RFC 7277]
The origin of this neighbor entry.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"OTHER": {}, "STATIC": {}, "DYNAMIC": {}},
),
is_leaf=True,
yang_name="origin",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="neighbor-origin",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """origin must be of a type compatible with neighbor-origin""",
"defined-type": "openconfig-if-ip:neighbor-origin",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'OTHER': {}, 'STATIC': {}, 'DYNAMIC': {}},), is_leaf=True, yang_name="origin", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='neighbor-origin', is_config=False)""",
}
)
self.__origin = t
if hasattr(self, "_set"):
self._set()
def _unset_origin(self):
self.__origin = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={"OTHER": {}, "STATIC": {}, "DYNAMIC": {}},
),
is_leaf=True,
yang_name="origin",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="neighbor-origin",
is_config=False,
)
def _get_is_router(self):
"""
Getter method for is_router, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/neighbors/neighbor/state/is_router (empty)
YANG Description: [adapted from IETF IP model RFC 7277]
Indicates that the neighbor node acts as a router.
"""
return self.__is_router
def _set_is_router(self, v, load=False):
"""
Setter method for is_router, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/neighbors/neighbor/state/is_router (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_is_router is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_is_router() directly.
YANG Description: [adapted from IETF IP model RFC 7277]
Indicates that the neighbor node acts as a router.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
is_leaf=True,
yang_name="is-router",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="empty",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """is_router must be of a type compatible with empty""",
"defined-type": "empty",
"generated-type": """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="is-router", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='empty', is_config=False)""",
}
)
self.__is_router = t
if hasattr(self, "_set"):
self._set()
def _unset_is_router(self):
self.__is_router = YANGDynClass(
base=YANGBool,
is_leaf=True,
yang_name="is-router",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="empty",
is_config=False,
)
def _get_neighbor_state(self):
"""
Getter method for neighbor_state, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/neighbors/neighbor/state/neighbor_state (enumeration)
YANG Description: [adapted from IETF IP model RFC 7277]
The Neighbor Unreachability Detection state of this
entry.
"""
return self.__neighbor_state
def _set_neighbor_state(self, v, load=False):
"""
Setter method for neighbor_state, mapped from YANG variable /interfaces/interface/subinterfaces/subinterface/ipv6/neighbors/neighbor/state/neighbor_state (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_neighbor_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_neighbor_state() directly.
YANG Description: [adapted from IETF IP model RFC 7277]
The Neighbor Unreachability Detection state of this
entry.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"INCOMPLETE": {},
"REACHABLE": {},
"STALE": {},
"DELAY": {},
"PROBE": {},
},
),
is_leaf=True,
yang_name="neighbor-state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="enumeration",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """neighbor_state must be of a type compatible with enumeration""",
"defined-type": "openconfig-if-ip:enumeration",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=six.text_type, restriction_type="dict_key", restriction_arg={'INCOMPLETE': {}, 'REACHABLE': {}, 'STALE': {}, 'DELAY': {}, 'PROBE': {}},), is_leaf=True, yang_name="neighbor-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/interfaces/ip', defining_module='openconfig-if-ip', yang_type='enumeration', is_config=False)""",
}
)
self.__neighbor_state = t
if hasattr(self, "_set"):
self._set()
def _unset_neighbor_state(self):
self.__neighbor_state = YANGDynClass(
base=RestrictedClassType(
base_type=six.text_type,
restriction_type="dict_key",
restriction_arg={
"INCOMPLETE": {},
"REACHABLE": {},
"STALE": {},
"DELAY": {},
"PROBE": {},
},
),
is_leaf=True,
yang_name="neighbor-state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/interfaces/ip",
defining_module="openconfig-if-ip",
yang_type="enumeration",
is_config=False,
)
ip = __builtin__.property(_get_ip)
link_layer_address = __builtin__.property(_get_link_layer_address)
origin = __builtin__.property(_get_origin)
is_router = __builtin__.property(_get_is_router)
neighbor_state = __builtin__.property(_get_neighbor_state)
_pyangbind_elements = OrderedDict(
[
("ip", ip),
("link_layer_address", link_layer_address),
("origin", origin),
("is_router", is_router),
("neighbor_state", neighbor_state),
]
)
|
|
import pytz
import datetime
from inflector import Inflector
from collections import OrderedDict
from rethinkengine.connection import get_conn
from rethinkengine.fields import BaseField, ObjectIdField, ReferenceField
from rethinkengine.query_set import QuerySetManager
from rethinkengine.errors import DoesNotExist, \
MultipleObjectsReturned, RqlOperationError, ValidationError
import inspect
import rethinkdb as r
__all__ = ['BaseDocument', 'Document']
class BaseDocument(type):
def __new__(mcs, name, bases, attrs):
new_class = super(BaseDocument, mcs).__new__(mcs, name, bases, attrs)
# If new_class is of type Document, return straight away
if object in new_class.__bases__:
return new_class
# Process schema
fields = sorted(
inspect.getmembers(
new_class,
lambda o: isinstance(o, BaseField)
),
key=lambda i: i[1]._creation_order)
new_class._fields = attrs.get('_fields', OrderedDict())
new_class._fields['id'] = ObjectIdField()
for field_name, field in fields:
new_class._fields[field_name] = field
delattr(new_class, field_name)
new_class.objects = QuerySetManager()
# Merge exceptions
classes_to_merge = (DoesNotExist, MultipleObjectsReturned)
for c in classes_to_merge:
exc = type(c.__name__, (c,), {'__module__': name})
setattr(new_class, c.__name__, exc)
# Populate table_name if not privided
if new_class.__table_name__ is None:
new_class.__table_name__ = Inflector().pluralize(name).lower()
return new_class
class Document(object):
__metaclass__ = BaseDocument
__table_name__ = None
__primary_key__ = 'id'
__order_by__ = None
def __init__(self, **kwargs):
super(Document, self).__init__()
self.__dict__['_data'] = {}
self.__dict__['_iter'] = None
self.__dict__['_dirty'] = True
for name, value in kwargs.items():
setattr(self, name, value)
def __setattr__(self, key, value):
field = self._fields.get(key, None)
if field is not None:
#Fix timezone for datetime
if isinstance(value, datetime.datetime) and not value.tzinfo:
value = pytz.utc.localize(value)
if self._get_value(key) != value:
self._dirty = True
#Add _id if field if ReferenceField
if isinstance(self._fields.get(key), ReferenceField):
key += '_id'
self._data[key] = value
super(Document, self).__setattr__(key, value)
def __getattr__(self, key):
field = self._fields.get(key)
if field:
return self._get_value(key)
raise AttributeError
def __str__(self):
return '<%s object>' % self.__class__.__name__
def __iter__(self):
return self
def next(self):
if not self._iter:
self.__dict__['_iter'] = iter(self._fields)
return self._iter.next()
def __repr__(self):
return '<%s object>' % self.__class__.__name__
def items(self):
return [(k, self._get_value(k)) for k in self._fields]
@classmethod
def index_create(cls, name, fields=None, mutil=False):
if fields is None:
fields = []
if mutil is None:
mutil = False
table = r.table(cls.__table_name__)
if len(fields) is 0 and not mutil:
return table.index_create(name).run(get_conn())
if len(fields) > 0:
return table.index_create(name, [r.row[x] for x in fields]).run(get_conn())
if mutil:
return table.index_create(name, multi=True).run(get_conn())
return False
@classmethod
def index_drop(cls, name):
return r.table(cls.__table_name__).index_drop(name).run(get_conn())
@classmethod
def index_list(cls):
return r.table(cls.__table_name__).index_list().run(get_conn())
@classmethod
def index_wait(cls, name):
return r.table(cls.__table_name__).index_wait(name).run(get_conn())
@classmethod
def index_status(cls, name):
return r.table(cls.__table_name__).index_status(name).run(get_conn())
@classmethod
def table_create(cls, if_not_exists=True):
if (
if_not_exists and
(cls.__table_name__ in r.table_list().run(get_conn()))
):
return
return r.table_create(
cls.__table_name__,
primary_key=cls.__primary_key__
).run(get_conn())
@classmethod
def table_drop(cls):
return r.table_drop(cls.__table_name__).run(get_conn())
def validate(self):
data = [(name, field, getattr(self, name)) for name, field in
self._fields.items()]
for name, field, value in data:
if name == 'id' and self.__primary_key__ != 'id':
continue
if isinstance(field, ObjectIdField) and value is None:
continue
if not field.is_valid(value):
raise ValidationError('Field %s: %s is of wrong type %s' %
(name, field.__class__.__name__, type(value)))
@classmethod
def get_all(cls, *args, **kwargs):
result = r.table(cls.__table_name__).get_all(*args, **kwargs).run(get_conn())
return [cls(**o) for o in result]
def save(self):
if not self._dirty:
return True
self.validate()
is_update = False
try:
if self.id:
is_update = True
self._pre_update()
else:
self._pre_save()
except AttributeError:
pass
doc = self._doc
table = r.table(self.__table_name__)
if is_update:
# TODO: implement atomic updates instead of updating entire doc
result = table.get(self.id).update(doc).run(get_conn())
else:
result = table.insert(doc).run(get_conn())
if result.get('errors', False) == 1:
raise RqlOperationError(result['first_error'])
self._dirty = False
if 'generated_keys' in result:
self._data['id'] = result['generated_keys'][0]
try:
if is_update:
self._post_update()
else:
self._post_save()
except AttributeError:
pass
return True
def delete(self):
table = r.table(self.__table_name__)
if self._get_value('id'):
try:
self._pre_delete()
except AttributeError:
pass
result = table.get(self._get_value('id')).delete().run(get_conn())
try:
self._post_delete()
except AttributeError:
pass
return result
def _get_value(self, field_name):
key = field_name
if isinstance(self._fields[field_name], ReferenceField):
key += '_id'
return self._data.get(key) or self._fields[field_name]._default
def _to_python(self, field_name, value):
if field_name in self._fields:
return self._fields[field_name].to_python(value)
else:
return value
def _to_rethink(self, field_name, value):
if field_name in self._fields:
return self._fields[field_name].to_rethink(value)
else:
return value
@property
def _doc(self):
doc = {}
for name, field_obj in self._fields.items():
key = self.__primary_key__ if name == 'id' else name
value = self._get_value(name)
if key == self.__primary_key__ and value is None:
continue
if isinstance(field_obj, ReferenceField):
key += '_id'
doc[key] = None if not value else field_obj.to_rethink(value)
return doc
|
|
#!/usr/bin/env python
# coding:utf-8
# vi:tabstop=4:shiftwidth=4:expandtab:sts=4
import theano
import theano.tensor as T
import lasagne
import math
from .. import stacked
from ..stacked import Layers, register_layers_class
from ..stacked import register_concat_handler, register_inputs_handler
from ..stacked import register_flag_handler, register_flag_handler_closer
from ..stacked import register_layer_handler, register_nonlinearities
from ..stacked import *
from ..utils.curry import curry
from .. import utils
from .argmax import goroshin_max, goroshin_argmax, goroshin_unargmax
def replace_input(layer, m, done=set({})):
if layer in m:
return m[layer]
if layer in done:
return layer
done.add(layer)
if hasattr(layer, 'input_layer'):
if layer.input_layer in m:
layer.input_layer = m[layer.input_layer]
else:
replace_input(layer.input_layer, m, done)
if hasattr(layer, 'input_layers'):
for i, t in enumerate(layer.input_layers):
if t in m:
layer.input_layers[i] = m[t]
else:
replace_input(t, m, done)
return layer
def stacks_replace_input(stacks, m):
for k in stacks:
a = stacks[k]
if type(a) == list:
for i, t in enumerate(a):
a[i] = replace_input(t, m)
else:
for k in a:
aa = a[k]
for i, t in enumerate(aa):
aa[i] = replace_input(t, m)
class PlaceHolderLayer(lasagne.layers.InputLayer):
pass
class ZeroLayer(lasagne.layers.InputLayer):
pass
class LasagneLayers(Layers):
def get_layer(self, k):
if type(k) == list and len(k) == 1:
res = lasagne.layers.NonlinearityLayer(
self.get_layer(k[0]), theano.gradient.disconnected_grad)
return res
if type(k) == int:
return self.layers[::-1][k]
if type(k) == list and len(k) > 1:
assert len(k) == 3
if not k[0] in self.future:
batchsize = lasagne.layers.get_output_shape(self.layers[0])[0]
self.future[k[0]] = PlaceHolderLayer(shape=(batchsize, )+k[1])
return self.future[k[0]]
return self.stacks[k][-1]
def finish(self):
m = {}
for k in self.future:
m[self.future[k]] = self.stacks[k][0]
if stacked.verbose:
print m
stacks_replace_input(self.stacks, m)
register_layers_class(LasagneLayers)
def concat_handler(layers, flags, stacks, this_model):
if 'axis' in flags:
axis = flags['axis']
else:
axis = 1
return lasagne.layers.ConcatLayer(layers, axis=axis)
def merge_handler(layers, flags, stacks, this_model):
return lasagne.layers.ElemwiseMergeLayer(layers, flags['op'])
def add_handler(layers, flags, stacks, this_model):
return lasagne.layers.ElemwiseMergeLayer(layers, T.add)
def sub_handler(layers, flags, stacks, this_model):
return lasagne.layers.ElemwiseMergeLayer(layers, T.sub)
register_concat_handler(concat_handler)
register_inputs_handler('op', merge_handler)
register_inputs_handler('add', add_handler)
register_inputs_handler('sub', sub_handler)
def reshape_handler(network, flags, stacks, this_model):
if 'raw' in flags:
network = lasagne.layers.ReshapeLayer(network, flags['reshape'])
else:
network = lasagne.layers.ReshapeLayer(network, (-1, )+flags['reshape'])
return network, ()
def slice_handler(network, flags, stacks, this_model):
if 'axis' in flags:
axis = flags['axis']
else:
axis = 1
network = lasagne.layers.SliceLayer(network, flags['slice'], axis=axis)
return network, ()
def maxpool_handler(network, flags, stacks, this_model):
layername = flags['layername'] if 'layername' in flags else None
filter_size = flags['filter_size'] if 'filter_size' in flags else 0
conv_stride = flags['stride'] if 'stride' in flags else 0
if conv_stride == 0 or conv_stride == 1:
pad = filter_size//2
elif conv_stride > 0:
if filter_size == conv_stride:
pad = 0
else:
pad = filter_size//2
if 'pad' in flags:
pad = flags['pad']
# else: #conv_stride<0
# num_filters=num_filters*(-conv_stride)*(-conv_stride)
# if not 'nopad' in flags:
# pad='same'
# else:
# pad=0
dim = len(lasagne.layers.get_output_shape(network))-2
convs = {
1: lasagne.layers.Pool1DLayer,
2: lasagne.layers.Pool2DLayer,
3: lasagne.layers.Pool3DLayer,
}
assert dim in convs
conv = convs[dim]
assert filter_size > 0
network = conv(
network,
pool_size=filter_size,
stride=max(1, conv_stride),
pad=pad,
mode='max',
name=layername,
)
return network, ()
def meanpool_handler(network, flags, stacks, this_model):
layername = flags['layername'] if 'layername' in flags else None
filter_size = flags['filter_size'] if 'filter_size' in flags else 0
conv_stride = flags['stride'] if 'stride' in flags else 0
if conv_stride == 0 or conv_stride == 1:
pad = filter_size//2
elif conv_stride > 0:
if filter_size == conv_stride:
pad = 0
else:
pad = filter_size//2
if 'pad' in flags:
pad = flags['pad']
# else: #conv_stride<0
# num_filters=num_filters*(-conv_stride)*(-conv_stride)
# if not 'nopad' in flags:
# pad='same'
# else:
# pad=0
dim = len(lasagne.layers.get_output_shape(network))-2
convs = {
1: lasagne.layers.Pool1DLayer,
2: lasagne.layers.Pool2DLayer,
3: lasagne.layers.Pool3DLayer,
}
assert dim in convs
conv = convs[dim]
assert filter_size > 0
network = conv(
network,
pool_size=filter_size,
stride=max(1, conv_stride),
pad=pad,
mode='average_inc_pad',
name=layername,
)
return network, ()
def upscale_handler(network, flags, stacks, this_model):
layername = flags['layername'] if 'layername' in flags else None
filter_size = flags['filter_size'] if 'filter_size' in flags else 0
dim = len(lasagne.layers.get_output_shape(network))-2
assert filter_size > 0
convs = {
1: lasagne.layers.Upscale1DLayer,
2: lasagne.layers.Upscale2DLayer,
3: lasagne.layers.Upscale3DLayer,
}
assert dim in convs
conv = convs[dim]
network = conv(
network,
scale_factor=filter_size,
name=layername,
mode='repeat',
)
return network, ()
def num_filters_handler(network, flags, stacks, this_model):
paramlayers = []
if 'sharegroup2params' not in this_model:
this_model['sharegroup2params'] = {}
sharegroup2params = this_model['sharegroup2params']
num_filters0 = flags['num_filters']
num_filters = flags['num_filters']
conv_stride = flags['stride'] if 'stride' in flags else 0
layername = flags['layername'] if 'layername' in flags else None
filter_size = flags['filter_size'] if 'filter_size' in flags else 0
if conv_stride == 0 or conv_stride == 1:
pad = 'same'
elif conv_stride > 0:
if filter_size == conv_stride:
pad = 0
else:
pad = 'same'
else: # conv_stride<0
num_filters = num_filters*(-conv_stride)*(-conv_stride)
if 'nopad' not in flags:
pad = 'same'
else:
pad = 0
if 'pad' in flags:
pad = flags['pad']
nonlinearity = None
if 'linear' in flags:
pass
elif 'nonlinearity' in flags:
nonlinearity = flags['nonlinearity']
else:
nonlinearity = this_model.get('relu', lasagne.nonlinearities.rectify)
sharegroup = flags['sharegroup'] if 'sharegroup' in flags else 0
if sharegroup and sharegroup in sharegroup2params:
ww = sharegroup2params[sharegroup][0]
bb = sharegroup2params[sharegroup][1]
if 'const' in flags:
ww = theano.gradient.disconnected_grad(ww)
if bb is not None:
bb = theano.gradient.disconnected_grad(bb)
else:
init = this_model.get('init', lasagne.init.GlorotUniform)
if 'init' in flags:
init = flags['init']
if 'init_gain' in flags:
ww = init(gain=flags['init_gain'])
else:
if nonlinearity == lasagne.nonlinearities.leaky_rectify:
alpha = 0.01
ww = init(gain=math.sqrt(2/(1+alpha**2)))
elif nonlinearity == lasagne.nonlinearities.rectify:
ww = init(gain='relu')
else:
ww = init()
if 'nobias' in flags:
bb = None
else:
bb = lasagne.init.Constant(0.0)
dim = len(lasagne.layers.get_output_shape(network))-2
if 'dense' in flags or dim == 0:
if 'bn' in flags:
network = lasagne.layers.DenseLayer(
network,
num_units=num_filters,
W=ww,
b=None,
nonlinearity=None,
name=layername,
)
savew = network.W
paramlayers += [network]
network = lasagne.layers.BatchNormLayer(network, beta=bb)
saveb = network.beta
paramlayers += [network]
network = lasagne.layers.NonlinearityLayer(
network, nonlinearity=nonlinearity)
else:
network = lasagne.layers.DenseLayer(
network,
num_units=num_filters,
W=ww,
b=bb,
nonlinearity=nonlinearity,
name=layername,
)
savew = network.W
saveb = network.b
paramlayers += [network]
else:
# input_shape = lasagne.layers.get_output_shape(network)
if 'local' not in flags:
convs = {
1: lasagne.layers.Conv1DLayer,
2: lasagne.layers.Conv2DLayer,
3: lasagne.layers.Conv3DLayer,
}
assert dim in convs
conv = convs[dim]
assert filter_size > 0
if 'bn' in flags:
network = conv(
network, num_filters=num_filters,
filter_size=filter_size,
stride=max(1, conv_stride),
pad=pad,
W=ww,
b=None,
nonlinearity=None,
name=layername,
)
savew = network.W
paramlayers += [network]
network = lasagne.layers.BatchNormLayer(network, beta=bb)
saveb = network.beta
paramlayers += [network]
network = lasagne.layers.NonlinearityLayer(
network, nonlinearity=nonlinearity)
else:
network = conv(
network, num_filters=num_filters,
filter_size=filter_size,
stride=max(1, conv_stride),
pad=pad,
W=ww,
b=bb,
nonlinearity=nonlinearity,
name=layername,
)
savew = network.W
saveb = network.b
paramlayers += [network]
else:
convs = {
1: lasagne.layers.LocallyConnected1DLayer,
2: lasagne.layers.LocallyConnected2DLayer,
3: lasagne.layers.LocallyConnected3DLayer,
}
assert dim in convs
conv = convs[dim]
assert conv_stride == 1
assert filter_size > 0
if 'bn':
network = conv(
network, num_filters=num_filters,
filter_size=filter_size,
stride=max(1, conv_stride),
pad=pad,
W=ww,
b=None,
nonlinearity=None,
name=layername,
untie_biases=True,
)
savew = network.W
paramlayers += [network]
network = lasagne.layers.BatchNormLayer(network, beta=bb)
saveb = network.beta
paramlayers += [network]
network = lasagne.layers.NonlinearityLayer(
network, nonlinearity=nonlinearity)
else:
network = conv(
network, num_filters=num_filters,
filter_size=filter_size,
stride=max(1, conv_stride),
pad=pad,
W=ww,
b=bb,
nonlinearity=nonlinearity,
name=layername,
untie_biases=True,
)
paramlayers += [network]
savew = network.W
saveb = network.b
# paramlayers += [network]
if sharegroup and sharegroup not in sharegroup2params:
sharegroup2params[sharegroup] = [savew, saveb]
if 'saveparamlayer' in flags and flags['saveparamlayer'] is not None:
g = flags['saveparamlayer']
if g not in stacks:
stacks[g] = []
stacks[g] += [network]
if conv_stride < 0:
b, c, width, height = lasagne.layers.get_output_shape(network)
network = lasagne.layers.ReshapeLayer(
network,
(b, num_filters0, -conv_stride, -conv_stride, width, height))
network = lasagne.layers.DimshuffleLayer(network, (0, 1, 4, 2, 5, 3))
network = lasagne.layers.ReshapeLayer(
network,
(b, num_filters0, width*(-conv_stride), height*(-conv_stride)))
return network, paramlayers
def dimshuffle_handler(network, flags, stacks, this_model):
return lasagne.layers.DimshuffleLayer(network, flags['dimshuffle']), ()
def noise_handler(network, flags, stacks, this_model):
sigma = flags['noise']
if sigma is True:
sigma = 0.1
return lasagne.layers.GaussianNoiseLayer(network, sigma), ()
def lrn_handler(network, flags, stacks, this_model):
if type(flags['lrn']) == dict:
return lasagne.layers.LocalResponseNormalization2DLayer(
network, **flags['lrn']), ()
else:
return lasagne.layers.LocalResponseNormalization2DLayer(network), ()
def dropout_handler(network, flags, stacks, this_model):
p = flags['dropout']
if p is True:
p = 0.5
return lasagne.layers.DropoutLayer(network,p=p), ()
def watch_handler(network, flags, stacks, this_model):
get_layer = this_model['get_layer']
if 'watchpoints' not in this_model:
this_model['watchpoints'] = {}
watchpoints = this_model['watchpoints']
tmp = None
g = None
if type(flags['watch']) == str:
g = flags['watch']
tmp = network
else:
if len(flags['watch']) == 2:
to, g = flags['watch']
eq = lasagne.objectives.squared_error
else:
to, g, eq = flags['watch']
if callable(to): # type(to)==type(lambda x:x):
#batchsize = lasagne.layers.get_output_shape(network)[0]
tmp = lasagne.layers.NonlinearityLayer(
network, to)
elif to == 'zeros':
s0 = lasagne.layers.get_output_shape(network)
target = ZeroLayer(
shape=s0,
input_var=T.zeros(s0, dtype=theano.config.floatX))
# tmp=lasagne.layers.NonlinearityLayer(network,
# nonlinearity=lambda x:x**2.0
# )
tmp = lasagne.layers.ElemwiseMergeLayer((network, target), eq)
else:
target = get_layer(to)
tmp = lasagne.layers.ElemwiseMergeLayer((network, target), eq)
if 'sum' in flags:
if type(flags['sum']) == int:
n = flags['sum']
else:
n = 1
shape = lasagne.layers.get_output_shape(tmp)[:n]
tmp = lasagne.layers.ExpressionLayer(
tmp,
curry(
lambda n, shape, x: x.flatten(ndim=n+1).sum(axis=n),
n, shape),
output_shape=shape)
if g not in watchpoints:
watchpoints[g] = []
watchpoints[g] += [tmp]
return network, ()
def equal_handler(network, flags, stacks, this_model):
get_layer = this_model['get_layer']
if 'errors' not in this_model:
this_model['errors'] = {}
errors = this_model['errors']
if len(flags['equal']) == 2:
to, g = flags['equal']
eq = lasagne.objectives.squared_error
w = None
elif len(flags['equal']) == 3:
to, g, eq = flags['equal']
w = None
else:
to, g, eq, w = flags['equal']
if g not in errors:
errors[g] = []
if callable(to): # type(to)==type(lambda x:x):
#batchsize = lasagne.layers.get_output_shape(network)[0]
tmp = lasagne.layers.NonlinearityLayer(
network, to)
elif to == 'zeros':
s0 = lasagne.layers.get_output_shape(network)
target = ZeroLayer(
shape=s0,
input_var=T.zeros(s0, dtype=theano.config.floatX))
tmp = lasagne.layers.ElemwiseMergeLayer((network, target), eq)
else:
target = get_layer(to)
tmp = lasagne.layers.ElemwiseMergeLayer((network, target), eq)
if w is not None:
w = get_layer(w)
tmp = lasagne.layers.ElemwiseMergeLayer((tmp,w),lambda x,y:x*y/(y.sum(dtype=theano.config.floatX)+utils.floatX(1e-4))*T.prod(y.shape,dtype=theano.config.floatX))
if 'sum' in flags:
if type(flags['sum']) == int:
n = flags['sum']
else:
n = 1
shape = lasagne.layers.get_output_shape(tmp)[:n]
tmp = lasagne.layers.ExpressionLayer(
tmp,
curry(
lambda n, shape, x: x.flatten(ndim=n+1).sum(axis=n),
n, shape),
output_shape=shape)
errors[g] += [tmp]
return network, ()
def relu_handler(network, flags, stacks, this_model):
assert flags['relu'] is True
nonlinearity = this_model.get('relu', lasagne.nonlinearities.rectify)
if 'shape' in flags:
shape = flags['shape']
if type(shape) == tuple:
shape = list(shape)
if type(shape) == list and shape[0] is None:
shape[0] = lasagne.layers.get_output_shape(network)[0]
network = lasagne.layers.ExpressionLayer(
network, nonlinearity, output_shape=shape)
else:
network = lasagne.layers.NonlinearityLayer(
network, nonlinearity=nonlinearity)
return network, ()
def nonlinearity_handler(network, flags, stacks, this_model):
relu = this_model.get('relu', lasagne.nonlinearities.rectify)
if type(flags) == dict:
if 'nonlinearity' in flags:
nonlinearity = flags['nonlinearity']
if not callable(nonlinearity):
nonlinearity = relu
else:
nonlinearity = relu
if 'shape' in flags:
shape = flags['shape']
if type(shape) == tuple:
shape = list(shape)
if type(shape) == list and shape[0] is None:
shape[0] = lasagne.layers.get_output_shape(network)[0]
network = lasagne.layers.ExpressionLayer(
network, nonlinearity, output_shape=shape)
else:
network = lasagne.layers.NonlinearityLayer(
network, nonlinearity=nonlinearity)
return network, ()
def argmax_handler(network, flags, stacks, this_model):
if type(flags['argmax']) == tuple:
axis = flags['argmax']
else:
axis = (1, )
shape = lasagne.layers.get_output_shape(network)
output_shape = ()
for idx, w in enumerate(shape):
if idx not in axis:
output_shape += (w, )
network = lasagne.layers.ExpressionLayer(
network, curry(
lambda shape, axis, beta, x: goroshin_argmax(
x, shape, axis=axis, beta=beta
).astype(theano.config.floatX),
shape, axis, flags['beta']),
output_shape=output_shape[0:1]+(len(axis), )+output_shape[1:])
return network, ()
def unargmax_handler(network, flags, stacks, this_model):
if type(flags['unargmax']) == tuple:
axis = flags['unargmax']
else:
axis = (1, )
shape = flags['shape']
sigma = flags['sigma'] if 'sigma' in flags else 1.0
if type(shape) == tuple:
shape = list(shape)
if type(shape) == list and shape[0] is None:
shape[0] = lasagne.layers.get_output_shape(network)[0]
network = lasagne.layers.ExpressionLayer(
network,
curry(
lambda shape, axis, x: goroshin_unargmax(
x, shape, axis=axis, sigma=sigma
).astype(theano.config.floatX),
shape, axis),
output_shape=shape)
return network, ()
def max_handler(network, flags, stacks, this_model):
if type(flags['max']) == tuple:
axis = flags['max']
else:
axis = (1, )
shape = list(lasagne.layers.get_output_shape(network))
for i in axis:
shape[i] = 1
network = lasagne.layers.ExpressionLayer(
network, curry(
lambda axis, beta, x: goroshin_max(
x, axis=axis, beta=beta, keepdims=True
).astype(theano.config.floatX),
axis, flags['beta']),
output_shape=shape)
return network, ()
register_flag_handler('equal', equal_handler)
register_flag_handler('watch', watch_handler)
register_flag_handler('relu', relu_handler)
register_flag_handler('nonlinearity', nonlinearity_handler, ('num_filters', ))
register_flag_handler('noise', noise_handler)
register_flag_handler('lrn', lrn_handler)
register_flag_handler('dropout', dropout_handler)
register_flag_handler('unargmax', unargmax_handler)
register_flag_handler('argmax', argmax_handler)
register_flag_handler('max', max_handler)
register_flag_handler('dimshuffle', dimshuffle_handler)
# register_flag_handler_closer(num_filters_handler, num_filters_handler_closer)
register_flag_handler('num_filters', num_filters_handler, (
'maxpool', 'meanpool', 'upscale'))
register_flag_handler('upscale', upscale_handler)
register_flag_handler('meanpool', meanpool_handler)
register_flag_handler('maxpool', maxpool_handler)
register_flag_handler('slice', slice_handler)
register_flag_handler('reshape', reshape_handler)
def layer_handler(network):
if stacked.verbose:
print 'output_shape:', lasagne.layers.get_output_shape(network)
register_layer_handler(layer_handler)
register_nonlinearities({
'softmax': lasagne.nonlinearities.softmax,
'rectify': lasagne.nonlinearities.rectify,
'sigmoid': lasagne.nonlinearities.sigmoid,
'tanh': lasagne.nonlinearities.tanh,
'linear': lasagne.nonlinearities.linear,
})
|
|
import socket
from struct import *
import sys
import time
import ipaddress
def server_greeting(received_data):
(mode,) = unpack('!I', received_data[12:16]) # get Mode field; bytes 12,13,14 and 15
return mode
def set_up_response():
mode = pack('!I', 1)
rest_of_packet = pack('!20Q', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
return mode + rest_of_packet
def server_start(received_data):
accept = int(received_data[15])
return accept
def request_tw_session(session_sender, session_reflector, tos=0):
# https://tools.ietf.org/html/rfc5357#section-3.5
command_number = bytes([5]) # One Byte with the value of decimal 5
ipvn = bytes([4]) # One Byte with value 4 for IPv4; also include the MBZ field
conf_sender_receiver = pack('!H', 0) # Both the Conf-Sender field and Conf-Receiver field MUST be set to 0
num_of_schedule_slots = pack('!I', 0) # the Number of Scheduled Slots and Number of Packets MUST be set to 0
num_of_pkts = pack('!I', 0)
sender_port = pack('!H', session_sender[1]) # This is the local UDP port at the session-sender (used by TWAMP-Test)
# Right below is the remote UDP port at the session-reflector (used by TWAMP-Test):
receiver_port = pack('!H', session_reflector[1])
# According to https://tools.ietf.org/html/rfc5357#section-3.5 , I could have set these both to zero (0) since I am
# using the same addresses for TWAMP-Test as I did with TWAMP-Control. Unfortunately this did not work with Cisco.
# Therefore I just set them again... to be the same as TWAMP-Control.
sender_address = bytes([int(x) for x in session_sender[0].split('.')] + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
receiver_address = bytes([int(x) for x in session_reflector[0].split('.')] + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
sid = pack('!QQ', 0, 0) # the SID in the Request-TW-Session message MUST be set to 0
padding_length = pack('!I', 1372) # Session-Reflector shall add 1372 Bytes padding to its reponse packet
# https://docs.python.org/2/library/time.html#time.time > Gives number of seconds since Unix Epoch (0h Jan 1 1970)
# https://tools.ietf.org/html/rfc868 > Gives number of seconds between Unix Epoch and 0h Jan 1 1900 (!)
localtime = time.time() + 2208988800
# Start Time -> Time when the TWAMP-Test session is to be started (but not before Start-Sessions command is issued)
start_time_integer_part = int(localtime) # Start in zero (0) seconds from now
start_time_fractional_part = int(str(localtime % 1)[2:11]) # Take 9 decimal places
start_time = pack('!I', start_time_integer_part) + pack('!I', start_time_fractional_part)
timeout_integer_part = 10 # Session-Reflector will reflect TWAMP-Test packets for 10 seconds after Stop-Sessions
timeout_fractional_part = 0
timeout = pack('!I', timeout_integer_part) + pack('!I', timeout_fractional_part)
type_p_descriptor = bytes([tos >> 2, 0, 0, 0]) # I need to shift right (Bitwise) twice the ToS value to get the
# corresponding DSCP. Now ... regarding the overall structure of Type-P Descriptor field ... the first byte has to
# start with two zero bits (00), followed by the DSCP value.
mbz = pack('!Q', 0)
hmac = pack('!QQ', 0, 0) # In open mode, the HMAC fields are unused and have the same semantics as MBZ fields
msg = command_number + ipvn + conf_sender_receiver + num_of_schedule_slots + num_of_pkts + sender_port
msg += receiver_port + sender_address + receiver_address + sid + padding_length + start_time + timeout
msg += type_p_descriptor + mbz + hmac
return msg
def accept_session(received_data):
accept = received_data[0]
(port,) = unpack('!H', received_data[2:4]) # Bytes 2 and 3
# The SID (session identifier) is generated by the TWAMP-Server
sid = unpack('!IQI', received_data[4:20]) # Bytes 4 up to 19 (inclusive)
# Info about how SID is constructed > page 17 of https://tools.ietf.org/html/rfc4656#section-3.5
return accept, port, sid
def start_sessions():
command_number = bytes([2]) # One Byte with the value of decimal 2
mbz = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # Fifteen Bytes of 0
hmac = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) # Sixteen Bytes of 0
return command_number + mbz + hmac
def start_ack(received_data):
accept = received_data[0]
return accept
def stop_sessions():
# https://tools.ietf.org/html/rfc5357#section-3.8
command_number = bytes([3]) # One Byte with the value of decimal 2
accept = bytes([0])
mbz = pack('!H', 0)
number_of_sessions = pack('!I', 1) # I have only started one session
mbz_hmac = pack('!QQQ', 0, 0, 0)
return command_number + accept + mbz + number_of_sessions + mbz_hmac
# --- Main ---
# Limit the IP block of Servers / Session-Reflectors for security purposes ...
ALLOWED_SERVER_BLOCK = '192.168.1.0/24'
allowed_server_block = ipaddress.IPv4Network(ALLOWED_SERVER_BLOCK)
if len(sys.argv) == 3:
print('\nYou have defined the Server / Session-Reflector ', sys.argv[1], 'and asked for the TWAMP-Test to last ',
sys.argv[2], ' minutes.')
target_ip = ipaddress.ip_address(sys.argv[1])
test_duration_minutes = int(sys.argv[2])
if target_ip not in allowed_server_block.hosts():
print("Unfortunately the IPv4 address that you provided is not within allowed block "
+ ALLOWED_SERVER_BLOCK + '\n')
sys.exit(1)
elif test_duration_minutes <= 0:
print("Test duration (minutes) has to an integer greater than zero (0). E.g. 1, 2, 3, 4, 5, etc\n")
sys.exit(1)
else:
print('\nThis script requires two (2) command-line arguments; the IPv4 address of the Server / Session-Reflector as'
' well as the TWAMP-Test duration (in minutes).\n')
sys.exit(1)
CONTROL_CLIENT = ('192.168.1.38', 862) # This is the local host
SESSION_SENDER = (CONTROL_CLIENT[0], 21337)
server = (str(sys.argv[1]), 862)
session_reflector = (server[0], 21337)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.IPPROTO_IP, socket.IP_TTL, 255)
s.setsockopt(socket.IPPROTO_IP, socket.IP_TOS, 96) # Set IP ToS Byte to 96 (CS3). "The Server SHOULD use the DSCP of
# the Control-Client's TCP SYN in ALL subsequent packets on that connection as noted in:
# https://tools.ietf.org/html/rfc5357#section-3.1
s.settimeout(5) # Set timeout of 5 seconds to blocking operations such as recv()
s.bind(CONTROL_CLIENT)
s.connect(server)
data = s.recv(1024)
# https://tools.ietf.org/html/rfc4656#section-3.1
mode = server_greeting(data)
print('[TWAMP-Control] Control-Client ', CONTROL_CLIENT, ' received Server Greeting msg from ', server)
if mode != 1:
print('[Server Greeting] This script only supports unauthenicated mode and as such it expected Mode to be 1.')
print('However, it received mode value' + str(mode) + '.')
s.close()
sys.exit(1)
set_up_response_msg = set_up_response()
s.send(set_up_response_msg)
print('[TWAMP-Control] Control-Client ', CONTROL_CLIENT, ' sent Set-up-Response msg to ', server)
data = s.recv(1024)
accept = server_start(data)
print('[TWAMP-Control] Control-Client ', CONTROL_CLIENT, ' received Server-Start msg from ', server)
if accept != 0:
print('[Server Start] The remote server is not willing to continue communication as the Accept field was ' +
str(accept) + 'instead of zero (0)')
s.close()
sys.exit(1)
# --- Preparing TWAMP-Test ---
from session_sender import Listening
from session_sender import Sending
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # AF_INET for IPv4 and SOCK_DGRAM for UDP
# Set IP TTL to 255 according to https://tools.ietf.org/html/rfc4656#section-4.1.2
sock.setsockopt(socket.IPPROTO_IP, socket.IP_TTL, 255)
sock.settimeout(5) # Set timeout of 5 seconds to blocking operations such as recvfrom()
sock.bind(SESSION_SENDER)
# --- End of Preparation ---
for tos in [0, 32, 64, 96, 128, 160, 192, 224]:
request_tw_session_msg = request_tw_session(SESSION_SENDER, session_reflector, tos)
s.send(request_tw_session_msg)
print('[TWAMP-Control] Control-Client ', CONTROL_CLIENT, ' sent Request-TW-Session msg to ', server, 'for ToS', tos)
data = s.recv(1024)
accept, session_reflector_port, sid = accept_session(data)
print('[TWAMP-Control] Control-Client ', CONTROL_CLIENT, ' received Accept-Session msg from ', server, ' with SID ',
sid)
if accept != 0:
print('[Accept Session] The remote server is not willing to continue communication as the Accept field was ' +
str(accept) + 'instead of zero (0)')
s.close()
sys.exit(1)
elif session_reflector_port != session_reflector[1]:
print('[Accept Session] The remote server cannot / will not create a TWAMP-test session on UDP port ' +
str(session_reflector[1]) + ' but instead replied with ' + str(session_reflector_port) +
'.\n Stopping ...')
s.close()
sys.exit(1)
start_sessions_msg = start_sessions()
s.send(start_sessions_msg)
print('[TWAMP-Control] Control-Client ', CONTROL_CLIENT, ' sent Start-Sessions msg to ', server)
data = s.recv(1024)
accept = start_ack(data)
print('[TWAMP-Control] Control-Client ', CONTROL_CLIENT, ' received Start-Ack msg from ', server)
if accept != 0:
print('[Start Ack] The remote server is not willing to continue communication as the Accept field was ' +
str(accept) + 'instead of zero (0)')
s.close()
sys.exit(1)
else:
# --- Start TWAMP Test ---
print('\n[TWAMP-Test] Starting UDP traffic with ToS', tos)
print('Session-Sender is', SESSION_SENDER, 'and Session-Reflector is ', session_reflector)
# Using classes from file session_sender.py
listener = Listening(sock, session_reflector[0], session_reflector[1], tos)
sender = Sending(sock, session_reflector[0], session_reflector[1], tos, 15)
listener.setName('TWAMP_TEST_SESSION_SENDER___LISTENING_THREAD__TOS_' + str(tos))
sender.setName('TWAMP_TEST_SESSION_SENDER___SENDING_THREAD__TOS_' + str(tos))
listener.start()
sender.start()
sender.join()
listener.join() # This (main) thread must wait until the Listening thread is finished
print('[TWAMP-Test] Test with ToS ' + str(tos) + ' has finished.\n')
# --- End of Test ---
stop_sessions_msg = stop_sessions()
s.send(stop_sessions_msg)
print('[TWAMP-Control] Control-Client ', CONTROL_CLIENT, ' sent Stop-Sessions msg to ', server)
sock.close()
s.close()
|
|
import sqlalchemy as sa
from sqlalchemy import Column
from sqlalchemy import event
from sqlalchemy import Integer
from sqlalchemy import Table
from sqlalchemy import util
from sqlalchemy.ext import instrumentation
from sqlalchemy.orm import attributes
from sqlalchemy.orm import class_mapper
from sqlalchemy.orm import clear_mappers
from sqlalchemy.orm import events
from sqlalchemy.orm.attributes import del_attribute
from sqlalchemy.orm.attributes import get_attribute
from sqlalchemy.orm.attributes import set_attribute
from sqlalchemy.orm.instrumentation import is_instrumented
from sqlalchemy.orm.instrumentation import manager_of_class
from sqlalchemy.orm.instrumentation import register_class
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_not
from sqlalchemy.testing import ne_
from sqlalchemy.testing.util import decorator
@decorator
def modifies_instrumentation_finders(fn, *args, **kw):
pristine = instrumentation.instrumentation_finders[:]
try:
fn(*args, **kw)
finally:
del instrumentation.instrumentation_finders[:]
instrumentation.instrumentation_finders.extend(pristine)
class _ExtBase:
@classmethod
def teardown_test_class(cls):
instrumentation._reinstall_default_lookups()
class MyTypesManager(instrumentation.InstrumentationManager):
def instrument_attribute(self, class_, key, attr):
pass
def install_descriptor(self, class_, key, attr):
pass
def uninstall_descriptor(self, class_, key):
pass
def instrument_collection_class(self, class_, key, collection_class):
return MyListLike
def get_instance_dict(self, class_, instance):
return instance._goofy_dict
def initialize_instance_dict(self, class_, instance):
instance.__dict__["_goofy_dict"] = {}
def install_state(self, class_, instance, state):
instance.__dict__["_my_state"] = state
def state_getter(self, class_):
return lambda instance: instance.__dict__["_my_state"]
class MyListLike(list):
# add @appender, @remover decorators as needed
_sa_iterator = list.__iter__
_sa_linker = None
_sa_converter = None
def _sa_appender(self, item, _sa_initiator=None):
if _sa_initiator is not False:
self._sa_adapter.fire_append_event(item, _sa_initiator)
list.append(self, item)
append = _sa_appender
def _sa_remover(self, item, _sa_initiator=None):
self._sa_adapter.fire_pre_remove_event(_sa_initiator)
if _sa_initiator is not False:
self._sa_adapter.fire_remove_event(item, _sa_initiator)
list.remove(self, item)
remove = _sa_remover
MyBaseClass, MyClass = None, None
class DisposeTest(_ExtBase, fixtures.TestBase):
def test_unregister(self, registry):
class MyClassState(instrumentation.InstrumentationManager):
def manage(self, class_, manager):
setattr(class_, "xyz", manager)
def unregister(self, class_, manager):
delattr(class_, "xyz")
def manager_getter(self, class_):
def get(cls):
return cls.xyz
return get
class MyClass:
__sa_instrumentation_manager__ = MyClassState
assert attributes.manager_of_class(MyClass) is None
t = Table(
"my_table",
registry.metadata,
Column("id", Integer, primary_key=True),
)
registry.map_imperatively(MyClass, t)
manager = attributes.manager_of_class(MyClass)
is_not(manager, None)
is_(manager, MyClass.xyz)
registry.configure()
registry.dispose()
manager = attributes.manager_of_class(MyClass)
is_(manager, None)
assert not hasattr(MyClass, "xyz")
class UserDefinedExtensionTest(_ExtBase, fixtures.ORMTest):
@classmethod
def setup_test_class(cls):
global MyBaseClass, MyClass
class MyBaseClass:
__sa_instrumentation_manager__ = (
instrumentation.InstrumentationManager
)
class MyClass:
# This proves that a staticmethod will work here; don't
# flatten this back to a class assignment!
def __sa_instrumentation_manager__(cls):
return MyTypesManager(cls)
__sa_instrumentation_manager__ = staticmethod(
__sa_instrumentation_manager__
)
# This proves SA can handle a class with non-string dict keys
if util.cpython:
locals()[42] = 99 # Don't remove this line!
def __init__(self, **kwargs):
for k in kwargs:
setattr(self, k, kwargs[k])
def __getattr__(self, key):
if is_instrumented(self, key):
return get_attribute(self, key)
else:
try:
return self._goofy_dict[key]
except KeyError:
raise AttributeError(key)
def __setattr__(self, key, value):
if is_instrumented(self, key):
set_attribute(self, key, value)
else:
self._goofy_dict[key] = value
def __hasattr__(self, key):
if is_instrumented(self, key):
return True
else:
return key in self._goofy_dict
def __delattr__(self, key):
if is_instrumented(self, key):
del_attribute(self, key)
else:
del self._goofy_dict[key]
def teardown_test(self):
clear_mappers()
def test_instance_dict(self):
class User(MyClass):
pass
register_class(User)
attributes.register_attribute(
User, "user_id", uselist=False, useobject=False
)
attributes.register_attribute(
User, "user_name", uselist=False, useobject=False
)
attributes.register_attribute(
User, "email_address", uselist=False, useobject=False
)
u = User()
u.user_id = 7
u.user_name = "john"
u.email_address = "[email protected]"
eq_(
u.__dict__,
{
"_my_state": u._my_state,
"_goofy_dict": {
"user_id": 7,
"user_name": "john",
"email_address": "[email protected]",
},
},
)
def test_basic(self):
for base in (object, MyBaseClass, MyClass):
class User(base):
pass
register_class(User)
attributes.register_attribute(
User, "user_id", uselist=False, useobject=False
)
attributes.register_attribute(
User, "user_name", uselist=False, useobject=False
)
attributes.register_attribute(
User, "email_address", uselist=False, useobject=False
)
u = User()
u.user_id = 7
u.user_name = "john"
u.email_address = "[email protected]"
eq_(u.user_id, 7)
eq_(u.user_name, "john")
eq_(u.email_address, "[email protected]")
attributes.instance_state(u)._commit_all(
attributes.instance_dict(u)
)
eq_(u.user_id, 7)
eq_(u.user_name, "john")
eq_(u.email_address, "[email protected]")
u.user_name = "heythere"
u.email_address = "[email protected]"
eq_(u.user_id, 7)
eq_(u.user_name, "heythere")
eq_(u.email_address, "[email protected]")
def test_deferred(self):
for base in (object, MyBaseClass, MyClass):
class Foo(base):
pass
data = {"a": "this is a", "b": 12}
def loader(state, keys, passive):
for k in keys:
state.dict[k] = data[k]
return attributes.ATTR_WAS_SET
manager = register_class(Foo)
manager.expired_attribute_loader = loader
attributes.register_attribute(
Foo, "a", uselist=False, useobject=False
)
attributes.register_attribute(
Foo, "b", uselist=False, useobject=False
)
if base is object:
assert Foo not in (
instrumentation._instrumentation_factory._state_finders
)
else:
assert Foo in (
instrumentation._instrumentation_factory._state_finders
)
f = Foo()
attributes.instance_state(f)._expire(
attributes.instance_dict(f), set()
)
eq_(f.a, "this is a")
eq_(f.b, 12)
f.a = "this is some new a"
attributes.instance_state(f)._expire(
attributes.instance_dict(f), set()
)
eq_(f.a, "this is a")
eq_(f.b, 12)
attributes.instance_state(f)._expire(
attributes.instance_dict(f), set()
)
f.a = "this is another new a"
eq_(f.a, "this is another new a")
eq_(f.b, 12)
attributes.instance_state(f)._expire(
attributes.instance_dict(f), set()
)
eq_(f.a, "this is a")
eq_(f.b, 12)
del f.a
eq_(f.a, None)
eq_(f.b, 12)
attributes.instance_state(f)._commit_all(
attributes.instance_dict(f)
)
eq_(f.a, None)
eq_(f.b, 12)
def test_inheritance(self):
"""tests that attributes are polymorphic"""
for base in (object, MyBaseClass, MyClass):
class Foo(base):
pass
class Bar(Foo):
pass
register_class(Foo)
register_class(Bar)
def func1(state, passive):
return "this is the foo attr"
def func2(state, passive):
return "this is the bar attr"
def func3(state, passive):
return "this is the shared attr"
attributes.register_attribute(
Foo, "element", uselist=False, callable_=func1, useobject=True
)
attributes.register_attribute(
Foo, "element2", uselist=False, callable_=func3, useobject=True
)
attributes.register_attribute(
Bar, "element", uselist=False, callable_=func2, useobject=True
)
x = Foo()
y = Bar()
assert x.element == "this is the foo attr"
assert y.element == "this is the bar attr", y.element
assert x.element2 == "this is the shared attr"
assert y.element2 == "this is the shared attr"
def test_collection_with_backref(self):
for base in (object, MyBaseClass, MyClass):
class Post(base):
pass
class Blog(base):
pass
register_class(Post)
register_class(Blog)
attributes.register_attribute(
Post,
"blog",
uselist=False,
backref="posts",
trackparent=True,
useobject=True,
)
attributes.register_attribute(
Blog,
"posts",
uselist=True,
backref="blog",
trackparent=True,
useobject=True,
)
b = Blog()
(p1, p2, p3) = (Post(), Post(), Post())
b.posts.append(p1)
b.posts.append(p2)
b.posts.append(p3)
self.assert_(b.posts == [p1, p2, p3])
self.assert_(p2.blog is b)
p3.blog = None
self.assert_(b.posts == [p1, p2])
p4 = Post()
p4.blog = b
self.assert_(b.posts == [p1, p2, p4])
p4.blog = b
p4.blog = b
self.assert_(b.posts == [p1, p2, p4])
# assert no failure removing None
p5 = Post()
p5.blog = None
del p5.blog
def test_history(self):
for base in (object, MyBaseClass, MyClass):
class Foo(base):
pass
class Bar(base):
pass
register_class(Foo)
register_class(Bar)
attributes.register_attribute(
Foo, "name", uselist=False, useobject=False
)
attributes.register_attribute(
Foo, "bars", uselist=True, trackparent=True, useobject=True
)
attributes.register_attribute(
Bar, "name", uselist=False, useobject=False
)
f1 = Foo()
f1.name = "f1"
eq_(
attributes.get_state_history(
attributes.instance_state(f1), "name"
),
(["f1"], (), ()),
)
b1 = Bar()
b1.name = "b1"
f1.bars.append(b1)
eq_(
attributes.get_state_history(
attributes.instance_state(f1), "bars"
),
([b1], [], []),
)
attributes.instance_state(f1)._commit_all(
attributes.instance_dict(f1)
)
attributes.instance_state(b1)._commit_all(
attributes.instance_dict(b1)
)
eq_(
attributes.get_state_history(
attributes.instance_state(f1), "name"
),
((), ["f1"], ()),
)
eq_(
attributes.get_state_history(
attributes.instance_state(f1), "bars"
),
((), [b1], ()),
)
f1.name = "f1mod"
b2 = Bar()
b2.name = "b2"
f1.bars.append(b2)
eq_(
attributes.get_state_history(
attributes.instance_state(f1), "name"
),
(["f1mod"], (), ["f1"]),
)
eq_(
attributes.get_state_history(
attributes.instance_state(f1), "bars"
),
([b2], [b1], []),
)
f1.bars.remove(b1)
eq_(
attributes.get_state_history(
attributes.instance_state(f1), "bars"
),
([b2], [], [b1]),
)
def test_null_instrumentation(self):
class Foo(MyBaseClass):
pass
register_class(Foo)
attributes.register_attribute(
Foo, "name", uselist=False, useobject=False
)
attributes.register_attribute(
Foo, "bars", uselist=True, trackparent=True, useobject=True
)
assert Foo.name == attributes.manager_of_class(Foo)["name"]
assert Foo.bars == attributes.manager_of_class(Foo)["bars"]
def test_alternate_finders(self):
"""Ensure the generic finder front-end deals with edge cases."""
class Unknown:
pass
class Known(MyBaseClass):
pass
register_class(Known)
k, u = Known(), Unknown()
assert instrumentation.manager_of_class(Unknown) is None
assert instrumentation.manager_of_class(Known) is not None
assert instrumentation.manager_of_class(None) is None
assert attributes.instance_state(k) is not None
assert_raises((AttributeError, KeyError), attributes.instance_state, u)
assert_raises(
(AttributeError, KeyError), attributes.instance_state, None
)
def test_unmapped_not_type_error(self):
"""extension version of the same test in test_mapper.
fixes #3408
"""
assert_raises_message(
sa.exc.ArgumentError,
"Class object expected, got '5'.",
class_mapper,
5,
)
def test_unmapped_not_type_error_iter_ok(self):
"""extension version of the same test in test_mapper.
fixes #3408
"""
assert_raises_message(
sa.exc.ArgumentError,
r"Class object expected, got '\(5, 6\)'.",
class_mapper,
(5, 6),
)
class FinderTest(_ExtBase, fixtures.ORMTest):
def test_standard(self):
class A:
pass
register_class(A)
eq_(type(manager_of_class(A)), instrumentation.ClassManager)
def test_nativeext_interfaceexact(self):
class A:
__sa_instrumentation_manager__ = (
instrumentation.InstrumentationManager
)
register_class(A)
ne_(type(manager_of_class(A)), instrumentation.ClassManager)
def test_nativeext_submanager(self):
class Mine(instrumentation.ClassManager):
pass
class A:
__sa_instrumentation_manager__ = Mine
register_class(A)
eq_(type(manager_of_class(A)), Mine)
@modifies_instrumentation_finders
def test_customfinder_greedy(self):
class Mine(instrumentation.ClassManager):
pass
class A:
pass
def find(cls):
return Mine
instrumentation.instrumentation_finders.insert(0, find)
register_class(A)
eq_(type(manager_of_class(A)), Mine)
@modifies_instrumentation_finders
def test_customfinder_pass(self):
class A:
pass
def find(cls):
return None
instrumentation.instrumentation_finders.insert(0, find)
register_class(A)
eq_(type(manager_of_class(A)), instrumentation.ClassManager)
class InstrumentationCollisionTest(_ExtBase, fixtures.ORMTest):
def test_none(self):
class A:
pass
register_class(A)
def mgr_factory(cls):
return instrumentation.ClassManager(cls)
class B:
__sa_instrumentation_manager__ = staticmethod(mgr_factory)
register_class(B)
class C:
__sa_instrumentation_manager__ = instrumentation.ClassManager
register_class(C)
def test_single_down(self):
class A:
pass
register_class(A)
def mgr_factory(cls):
return instrumentation.ClassManager(cls)
class B(A):
__sa_instrumentation_manager__ = staticmethod(mgr_factory)
assert_raises_message(
TypeError,
"multiple instrumentation implementations",
register_class,
B,
)
def test_single_up(self):
class A:
pass
# delay registration
def mgr_factory(cls):
return instrumentation.ClassManager(cls)
class B(A):
__sa_instrumentation_manager__ = staticmethod(mgr_factory)
register_class(B)
assert_raises_message(
TypeError,
"multiple instrumentation implementations",
register_class,
A,
)
def test_diamond_b1(self):
def mgr_factory(cls):
return instrumentation.ClassManager(cls)
class A:
pass
class B1(A):
pass
class B2(A):
__sa_instrumentation_manager__ = staticmethod(mgr_factory)
class C:
pass
assert_raises_message(
TypeError,
"multiple instrumentation implementations",
register_class,
B1,
)
def test_diamond_b2(self):
def mgr_factory(cls):
return instrumentation.ClassManager(cls)
class A:
pass
class B1(A):
pass
class B2(A):
__sa_instrumentation_manager__ = staticmethod(mgr_factory)
class C:
pass
register_class(B2)
assert_raises_message(
TypeError,
"multiple instrumentation implementations",
register_class,
B1,
)
def test_diamond_c_b(self):
def mgr_factory(cls):
return instrumentation.ClassManager(cls)
class A:
pass
class B1(A):
pass
class B2(A):
__sa_instrumentation_manager__ = staticmethod(mgr_factory)
class C:
pass
register_class(C)
assert_raises_message(
TypeError,
"multiple instrumentation implementations",
register_class,
B1,
)
class ExtendedEventsTest(_ExtBase, fixtures.ORMTest):
"""Allow custom Events implementations."""
@modifies_instrumentation_finders
def test_subclassed(self):
class MyEvents(events.InstanceEvents):
pass
class MyClassManager(instrumentation.ClassManager):
dispatch = event.dispatcher(MyEvents)
instrumentation.instrumentation_finders.insert(
0, lambda cls: MyClassManager
)
class A:
pass
register_class(A)
manager = instrumentation.manager_of_class(A)
assert issubclass(manager.dispatch._events, MyEvents)
|
|
import collections
import re
from datetime import datetime
"""
State machine concept...
States:
BeforeBlock
LeadingComments
Summary
Postings
Line classifications:
Empty
GlobalComment
Summary
TransactionComment
Posting
"""
BLANK = 0
COMMENT = 1
SUMMARY = 2
POSTING = 3
TXNCOMMENT = 4
INVALID = 5
COMMENT_CHARS = ";#|*%"
BLANK_CHARS = " \t"
reBlank = re.compile(r'^\s*$')
reComment = re.compile(r'^[;#|\*%].*$')
reSummary = re.compile(r'^(?P<date>\d{4}/\d\d/\d\d)(?: +(?P<cleared>[!\*]))?(?: +\((?P<code>.*?)\))? *(?:(?P<summary>.*?))? *$')
rePosting = re.compile(r'^\s+(?P<account>[^;#|\*% ].*?)(?:\s{2,}(?P<amount>.*))?$')
reTxnComment = re.compile(r'^\s+[;#|\*%].*$')
"""
Blocks are a literal encapsulation of a Ledger transaction. They
are not called transcactions because the actual ledger file strings
and comments are preserved. A ledger file is a sequence of blocks.
Textually, a block is defined as:
<0+ comment lines>
<0 or 1 summary line: a) left justified b) starting with a yyyy/mm/dd date
<0+ acccount lines or comments: a) indented at least one space>
Whitespace between blocks is ignored.
"""
Posting = collections.namedtuple("Posting", "account amount")
class Block:
def __init__(self):
self.lines = []
self.postings = []
self.date = datetime.now()
self.valid = False
self.summary = ""
self.cleared = None
def write(self, output):
if not self.is_transaction():
for line in self.lines:
output.write(line + '\n')
else:
output.write("{} {} {}\n".format(self.date, self.cleared or " ", self.summary))
for posting in self.postings:
output.writelines(" {:<50} {}".format(posting.account, posting.amount or "").rstrip() + "\n")
output.write("\n")
def is_transaction(self):
return len(self.postings) > 0
def is_empty(self):
for line in self.lines:
if len(line.rstrip()) > 0:
return False
return True
def __repr__(self):
return repr(self.lines)
def st_raw(line, block):
if reSummary.match(line):
match = reSummary.match(line)
block = Block()
matches = match.groupdict()
block.date = datetime.strptime(matches["date"], "%Y/%m/%d").date()
block.cleared = matches["cleared"]
block.summary = matches["summary"]
block.lines.append(line)
return block, st_in_txn
if line.startswith(' ') and len(line.rstrip()) > 0:
raise Exception(line)
block.lines.append(line)
return block, st_raw
def st_in_txn(line, block):
assert block is not None
if rePosting.match(line):
match = rePosting.match(line)
posting = match.groupdict()
block.postings.append(Posting(posting["account"], posting["amount"]))
block.lines.append(line)
return block, st_in_txn
if reTxnComment.match(line):
return block, st_in_txn
if reBlank.match(line):
return Block(), st_raw
raise Exception(line)
def parse(f):
state = st_raw
block = Block()
blocks = [block]
for line in f:
next_block, state = state(line.rstrip(), block)
if next_block is not block:
if block.is_empty():
blocks[-1] = next_block
else:
blocks.append(next_block)
block = next_block
if blocks[-1].is_empty():
blocks = blocks[0:-1]
return blocks
"""
BeforeBlock
LeadingComments
Summary
Postings
"""
r"""
// Note: values will not have '-', intentionally
var acctAmtRegex = regexp.MustCompile(`^\s+(.*?\S)(?:\s{2,}.*?([\d,\.]+))?\s*$`)
// ParseLines turns a chunk of text into a group of Blocks.
func ParseLines(data io.Reader) []Block {
const (
stBeforeBlock = iota
stLeadingComments
stSummary
stPostings
)
var block Block
var blocks []Block
var state = stBeforeBlock
scanner := bufio.NewScanner(data)
for scanner.Scan() {
//_ = "breakpoint"
line := scanner.Text()
switch state {
//case stBeforeBlock:
// if len(strings.TrimSpace(line)) > 0 {
// block.lines = append(block.lines, line)
// }
}
if len(strings.TrimSpace(line)) == 0 {
if !block.Empty() {
blocks = append(blocks, block)
block = Block{}
}
} else {
t, err := time.Parse("2006/01/02", line[0:10])
if err == nil {
// Start a new block
if !block.Empty() {
blocks = append(blocks, block)
block = Block{}
}
block.date = t
}
block.lines = append(block.lines, line)
}
}
if !block.Empty() {
blocks = append(blocks, block)
}
return blocks
}
func (b *Block) Empty() bool {
return len(b.lines) == 0
}
func (b Block) Accounts() []string {
var ret []string
for _, l := range b.lines {
m := acctAmtRegex.FindStringSubmatch(l)
if len(m) > 0 {
ret = append(ret, m[1])
}
}
sort.Strings(ret)
return ret
}
func (b Block) Amounts() []string {
var ret []string
for _, l := range b.lines {
m := acctAmtRegex.FindStringSubmatch(l)
if len(m) > 0 {
ret = append(ret, m[2])
}
}
sort.Strings(ret)
return ret
}
// IsDupe returns true if other is a likely duplicate based on:
// date
// affected accounts
// amounts
func (b Block) IsDupe(other Block, tolerance time.Duration) bool {
// Check time
timeDiff := b.date.Sub(other.date)
if timeDiff < 0 {
timeDiff = -timeDiff
}
if timeDiff > tolerance {
return false
}
// Check affected accounts
accts := b.Accounts()
acctsOther := other.Accounts()
if len(accts) != len(acctsOther) {
return false
}
for i := range accts {
if accts[i] != acctsOther[i] {
return false
}
}
// Check affected accounts
amts := b.Amounts()
amtsOther := other.Amounts()
if len(amts) != len(amtsOther) {
return false
}
for i := range amts {
if amts[i] != amtsOther[i] {
return false
}
}
return true
}
// prepareBlock processes []lines data, checking for errors and
// populating internal fields
func (b *Block) prepareBlock() {
b.valid = true
}
func classifyLine(line string) (int, map[string]string) {
var cls = clsInvalid
var data map[string]string
var captures []string
var matchingRe *regexp.Regexp
if reBlank.MatchString(line) {
cls = clsBlank
} else if reComment.MatchString(line) {
cls = clsComment
} else if rePosting.MatchString(line) {
cls = clsPosting
} else if reTxnComment.MatchString(line) {
cls = clsTxnComment
} else if captures = reSummary.FindStringSubmatch(line); len(captures) > 0 {
cls = clsSummary
matchingRe = reSummary
}
if captures != nil {
data = make(map[string]string)
for i, key := range matchingRe.SubexpNames() {
if i > 0 {
data[key] = captures[i]
}
}
}
return cls, data
}
// FindDupes returns a list of likely duplicate blocks. Duplicates
// are block with the same date and transaction structure. The same
// accounts and amounts must be present in both for it to be dupe.
func FindDupes(ledger Ledger) {
blocks := ledger.blocks
for i := range blocks {
for j := i + 1; j < len(blocks); j++ {
if blocks[i].IsDupe(blocks[j], 0) {
fmt.Printf("%v,%v:%v\n", i, j, blocks[i].lines[0])
}
}
}
}
func NewBlock(t transaction, config AccountConfig) Block {
lines := fmt.Sprintf("%s %s\n", t.date, t.description)
lines += fmt.Sprintf(" %s %s\n", importAcct, t.amount)
lines += fmt.Sprintf(" %s", config.TargetAccount)
blocks := ParseLines(strings.NewReader(lines))
if len(blocks) != 1 {
log.Fatalf("Expected 1 block, got %+v", blocks)
}
return blocks[0]
}
*/
"""
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import inspect
import argparse
import abc
from fnmatch import fnmatch
from collections import OrderedDict
from six import add_metaclass, text_type
from .resource import Resource
from .resource import Collection, RootCollection
from .schema import ResourceNotDefined
from .utils import Path, classproperty, parallel_map
from .exceptions import CommandError, NotFound
from .context import Context
class ArgumentParser(argparse.ArgumentParser):
def exit(self, status=0, message=None):
raise CommandError(message or '')
class BaseOption(object):
_creation_idx = 0
def __init__(self, *args, **kwargs):
self.attr = ''
self.complete = None
if 'complete' in kwargs:
self.complete = kwargs.pop('complete')
self.kwargs = kwargs
# keep track of options order
self._creation_idx = BaseOption._creation_idx
BaseOption._creation_idx += 1
@property
def help(self):
return self.kwargs.get('help', '') % self.kwargs
@property
def dest(self):
return self.kwargs.get('dest', self.attr)
@property
def is_multiple(self):
return self.kwargs.get('nargs') in ('*', '+') or \
self.kwargs.get('action') in ('append',)
@property
def nargs(self):
if self.kwargs.get('nargs') == '?':
return 1
return self.kwargs.get('nargs', 1)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.attr)
class Option(BaseOption):
def __init__(self, short_name=None, **kwargs):
BaseOption.__init__(self, **kwargs)
self.short_name = short_name
@property
def need_value(self):
return self.kwargs.get('action') not in ('store_true', 'store_false')
@property
def long_name(self):
return '--%s' % self.attr.replace('_', '-')
@property
def option_strings(self):
return [n for n in (self.long_name, self.short_name) if n is not None]
class Arg(BaseOption):
pass
def experimental(cls):
old_call = cls.__call__
def new_call(self, *args, **kwargs):
print("This command is experimental. Use at your own risk.")
old_call(self, *args, **kwargs)
cls.__call__ = new_call
return cls
def _path_to_resources(path, predicate=None, filters=None, parent_uuid=None):
if any([c in text_type(path) for c in ('*', '?')]):
if any([c in path.base for c in ('*', '?')]):
col = RootCollection(fetch=True,
filters=filters,
parent_uuid=parent_uuid)
else:
col = Collection(path.base, fetch=True,
filters=filters,
parent_uuid=parent_uuid)
for r in col:
if predicate and not predicate(r):
continue
# list of paths to match against
paths = [r.path,
Path('/', r.type, text_type(r.fq_name))]
if any([fnmatch(text_type(p), text_type(path)) for p in paths]):
yield r
elif path.is_resource:
if path.is_uuid:
kwargs = {'uuid': path.name}
else:
kwargs = {'fq_name': path.name}
try:
r = Resource(path.base,
check=True,
**kwargs)
except ResourceNotDefined as e:
raise CommandError(text_type(e))
if predicate and not predicate(r):
raise StopIteration
yield r
elif path.is_collection:
c = Collection(path.base,
filters=filters,
parent_uuid=parent_uuid)
if predicate and not predicate(c):
raise StopIteration
yield c
def expand_paths(paths=None, predicate=None, filters=None, parent_uuid=None):
"""Return an unique list of resources or collections from a list of paths.
Supports fq_name and wilcards resolution.
>>> expand_paths(['virtual-network',
'floating-ip/2a0a54b4-a420-485e-8372-42f70a627ec9'])
[Collection('virtual-network'),
Resource('floating-ip', uuid='2a0a54b4-a420-485e-8372-42f70a627ec9')]
:param paths: list of paths relative to the current path
that may contain wildcards (*, ?) or fq_names
:type paths: [str]
:param predicate: function to filter found resources
:type predicate: f(resource) -> bool
:param filters: list of filters for Collections
:type filters: [(name, value), ...]
:rtype: [Resource or Collection]
:raises BadPath: path cannot be resolved
"""
if not paths:
paths = [Context().shell.current_path]
else:
paths = [Context().shell.current_path / res for res in paths]
# use a dict to have unique paths
# but keep them ordered
result = OrderedDict()
for res in parallel_map(_path_to_resources, paths,
kwargs={'predicate': predicate,
'filters': filters,
'parent_uuid': parent_uuid},
workers=50):
for r in res:
result[r.path] = r
resources = list(result.values())
if not resources:
raise NotFound()
return resources
@add_metaclass(abc.ABCMeta)
class Command(object):
"""Base class for commands
"""
description = ""
"""Description of the command"""
aliases = []
"""Command aliases"""
_options = None
_args = None
def __init__(self, name):
self.parser = ArgumentParser(prog=name, description=self.description)
self.add_arguments_to_parser(self.parser)
self._is_piped = False
def current_path(self, resource):
"""Return current path for resource
:param resource: resource or collection
:type resource: Resource|Collection
:rtype: str
"""
return text_type(resource.path.relative_to(Context().shell.current_path))
@property
def is_piped(self):
"""Return True if the command result is beeing piped
to another command.
:rtype: bool
"""
return not sys.stdout.isatty() or self._is_piped
@is_piped.setter
def is_piped(self, value):
self._is_piped = value
@classproperty
def options(cls):
if cls._options is not None:
return cls._options
cls._options = OrderedDict()
for attr, option in sorted(
inspect.getmembers(cls, lambda o: isinstance(o, Option)),
key=lambda i: i[1]._creation_idx):
option.attr = attr
cls._options[text_type(attr)] = option
return cls._options
@classproperty
def args(cls):
if cls._args is not None:
return cls._args
cls._args = OrderedDict()
for attr, arg in sorted(
inspect.getmembers(cls, lambda o: isinstance(o, Arg)),
key=lambda i: i[1]._creation_idx):
arg.attr = attr
cls._args[text_type(attr)] = arg
return cls._args
@classmethod
def add_arguments_to_parser(cls, parser):
for (arg_name, arg) in cls.args.items():
parser.add_argument(arg_name, **arg.kwargs)
for (option_name, option) in cls.options.items():
parser.add_argument(*option.option_strings, **option.kwargs)
def parse_and_call(self, *args):
args = self.parser.parse_args(args=args)
return self.__call__(**vars(args))
@abc.abstractmethod
def __call__(self, **kwargs):
"""Command must implement this method.
The command must return an unicode string
(unicode in python2 or str in python3)
:param kwargs: options of the command
:rtype: unicode | str
"""
|
|
import hmac
import json
import urllib.parse
from .main import PullReqState, parse_commands, db_query, INTERRUPTED_BY_HOMU_RE, synchronize
from . import utils
from .utils import lazy_debug
import github3
import jinja2
import requests
import pkg_resources
from bottle import get, post, run, request, redirect, abort, response
import hashlib
from threading import Thread
import sys
import os
import traceback
import bottle
bottle.BaseRequest.MEMFILE_MAX = 1024 * 1024 * 10
class G:
pass
g = G()
def find_state(sha):
for repo_label, repo_states in g.states.items():
for state in repo_states.values():
if state.merge_sha == sha:
return state, repo_label
raise ValueError('Invalid SHA')
def get_repo(repo_label, repo_cfg):
repo = g.repos[repo_label]
if not repo:
g.repos[repo_label] = repo = g.gh.repository(repo_cfg['owner'], repo_cfg['name'])
assert repo.owner.login == repo_cfg['owner']
assert repo.name == repo_cfg['name']
return repo
@get('/')
def index():
return g.tpls['index'].render(repos=sorted(g.repos))
@get('/queue/<repo_label:path>')
def queue(repo_label):
logger = g.logger.getChild('queue')
lazy_debug(logger, lambda: 'repo_label: {}'.format(repo_label))
if repo_label == 'all':
labels = g.repos.keys()
multiple = True
repo_url = None
else:
labels = repo_label.split('+')
multiple = len(labels) > 1
repo_url = 'https://github.com/{}/{}'.format(
g.cfg['repo'][repo_label]['owner'],
g.cfg['repo'][repo_label]['name'])
states = []
for label in labels:
try:
states += g.states[label].values()
except KeyError:
abort(404, 'No such repository: {}'.format(label))
pull_states = sorted(states)
rows = []
for state in pull_states:
rows.append({
'status': state.get_status(),
'status_ext': ' (try)' if state.try_ else '',
'priority': 'rollup' if state.rollup else state.priority,
'url': 'https://github.com/{}/{}/pull/{}'.format(state.owner, state.name, state.num),
'num': state.num,
'approved_by': state.approved_by,
'title': state.title,
'head_ref': state.head_ref,
'mergeable': 'yes' if state.mergeable is True else 'no' if state.mergeable is False else '',
'assignee': state.assignee,
'repo_label': state.repo_label,
'repo_url': 'https://github.com/{}/{}'.format(state.owner, state.name),
})
return g.tpls['queue'].render(
repo_url=repo_url,
repo_label=repo_label,
states=rows,
oauth_client_id=g.cfg['github']['app_client_id'],
total=len(pull_states),
approved=len([x for x in pull_states if x.approved_by]),
rolled_up=len([x for x in pull_states if x.rollup]),
failed=len([x for x in pull_states if x.status == 'failure' or x.status == 'error']),
multiple=multiple,
)
@get('/callback')
def callback():
logger = g.logger.getChild('callback')
response.content_type = 'text/plain'
code = request.query.code
state = json.loads(request.query.state)
lazy_debug(logger, lambda: 'state: {}'.format(state))
try:
res = requests.post('https://github.com/login/oauth/access_token', data={
'client_id': g.cfg['github']['app_client_id'],
'client_secret': g.cfg['github']['app_client_secret'],
'code': code,
})
except Exception as ex:
logger.warn('/callback encountered an error during github oauth callback')
lazy_debug(logger, lambda: 'github oauth callback err: {}'.format(ex))
abort(502, 'Bad Gateway')
args = urllib.parse.parse_qs(res.text)
token = args['access_token'][0]
repo_label = state['repo_label']
repo_cfg = g.repo_cfgs[repo_label]
repo = get_repo(repo_label, repo_cfg)
user_gh = github3.login(token=token)
if state['cmd'] == 'rollup':
return rollup(user_gh, state, repo_label, repo_cfg, repo)
elif state['cmd'] == 'synch':
return synch(user_gh, state, repo_label, repo_cfg, repo)
else:
abort(400, 'Invalid command')
def rollup(user_gh, state, repo_label, repo_cfg, repo):
user_repo = user_gh.repository(user_gh.user().login, repo.name)
base_repo = user_gh.repository(repo.owner.login, repo.name)
nums = state.get('nums', [])
if nums:
try:
rollup_states = [g.states[repo_label][num] for num in nums]
except KeyError as e:
return 'Invalid PR number: {}'.format(e.args[0])
else:
rollup_states = [x for x in g.states[repo_label].values() if x.rollup]
rollup_states = [x for x in rollup_states if x.approved_by]
rollup_states.sort(key=lambda x: x.num)
if not rollup_states:
return 'No pull requests are marked as rollup'
base_ref = rollup_states[0].base_ref
base_sha = repo.ref('heads/' + base_ref).object.sha
utils.github_set_ref(
user_repo,
'heads/' + repo_cfg.get('branch', {}).get('rollup', 'rollup'),
base_sha,
force=True,
)
successes = []
failures = []
for state in rollup_states:
if base_ref != state.base_ref:
failures.append(state.num)
continue
merge_msg = 'Rollup merge of #{} - {}, r={}\n\n{}\n\n{}'.format(
state.num,
state.head_ref,
state.approved_by,
state.title,
state.body,
)
try:
user_repo.merge(repo_cfg.get('branch', {}).get('rollup', 'rollup'), state.head_sha, merge_msg)
except github3.models.GitHubError as e:
if e.code != 409:
raise
failures.append(state.num)
else:
successes.append(state.num)
title = 'Rollup of {} pull requests'.format(len(successes))
body = '- Successful merges: {}\n- Failed merges: {}'.format(
', '.join('#{}'.format(x) for x in successes),
', '.join('#{}'.format(x) for x in failures),
)
try:
pull = base_repo.create_pull(
title,
state.base_ref,
user_repo.owner.login + ':' + repo_cfg.get('branch', {}).get('rollup', 'rollup'),
body,
)
except github3.models.GitHubError as e:
return e.response.text
else:
redirect(pull.html_url)
@post('/github')
def github():
logger = g.logger.getChild('github')
response.content_type = 'text/plain'
payload = request.body.read()
info = request.json
lazy_debug(logger, lambda: 'info: {}'.format(utils.remove_url_keys_from_json(info)))
owner_info = info['repository']['owner']
owner = owner_info.get('login') or owner_info['name']
repo_label = g.repo_labels[owner, info['repository']['name']]
repo_cfg = g.repo_cfgs[repo_label]
hmac_table = {
"sha1": hashlib.sha1,
"sha256": hashlib.sha256,
"md5": hashlib.md5,
}
hmac_method_string, hmac_sig = request.headers['X-Hub-Signature'].split('=')
hmac_method = hmac_table.get(hmac_method_string.lower())
if hmac_method is None:
abort(400, 'Invalid hash method')
if hmac_sig != hmac.new(
repo_cfg['github']['secret'].encode('utf-8'),
payload,
hmac_method,
).hexdigest():
abort(400, 'Invalid signature')
event_type = request.headers['X-Github-Event']
if event_type == 'pull_request_review_comment':
action = info['action']
original_commit_id = info['comment']['original_commit_id']
head_sha = info['pull_request']['head']['sha']
if action == 'created' and original_commit_id == head_sha:
pull_num = info['pull_request']['number']
body = info['comment']['body']
username = info['sender']['login']
state = g.states[repo_label].get(pull_num)
if state:
state.title = info['pull_request']['title']
state.body = info['pull_request']['body']
if parse_commands(
body,
username,
repo_cfg,
state,
g.my_username,
g.db,
g.states,
realtime=True,
sha=original_commit_id,
):
state.save()
g.queue_handler()
elif event_type == 'pull_request':
action = info['action']
pull_num = info['number']
head_sha = info['pull_request']['head']['sha']
if action == 'synchronize':
state = g.states[repo_label][pull_num]
state.head_advanced(head_sha)
state.save()
elif action in ['opened', 'reopened']:
state = PullReqState(pull_num, head_sha, '', g.db, repo_label, g.mergeable_que, g.gh, info['repository']['owner']['login'], info['repository']['name'], g.repos)
state.title = info['pull_request']['title']
state.body = info['pull_request']['body']
state.head_ref = info['pull_request']['head']['repo']['owner']['login'] + ':' + info['pull_request']['head']['ref']
state.base_ref = info['pull_request']['base']['ref']
state.set_mergeable(info['pull_request']['mergeable'])
state.assignee = info['pull_request']['assignee']['login'] if info['pull_request']['assignee'] else ''
found = False
if action == 'reopened':
# FIXME: Review comments are ignored here
for comment in state.get_repo().issue(pull_num).iter_comments():
found = parse_commands(
comment.body,
comment.user.login,
repo_cfg,
state,
g.my_username,
g.db,
g.states,
) or found
status = ''
for info in utils.github_iter_statuses(state.get_repo(), state.head_sha):
if info.context == 'homu':
status = info.state
break
state.set_status(status)
state.save()
g.states[repo_label][pull_num] = state
if found:
g.queue_handler()
elif action == 'closed':
state = g.states[repo_label][pull_num]
if hasattr(state, 'fake_merge_sha'):
def inner():
utils.github_set_ref(
state.get_repo(),
'heads/' + state.base_ref,
state.merge_sha,
force=True,
)
def fail(err):
state.add_comment(':boom: Failed to recover from the artificial commit. See {} for details. ({})'.format(state.fake_merge_sha, err))
utils.retry_until(inner, fail, state)
del g.states[repo_label][pull_num]
db_query(g.db, 'DELETE FROM pull WHERE repo = ? AND num = ?', [repo_label, pull_num])
db_query(g.db, 'DELETE FROM build_res WHERE repo = ? AND num = ?', [repo_label, pull_num])
db_query(g.db, 'DELETE FROM mergeable WHERE repo = ? AND num = ?', [repo_label, pull_num])
g.queue_handler()
elif action in ['assigned', 'unassigned']:
state = g.states[repo_label][pull_num]
state.assignee = info['pull_request']['assignee']['login'] if info['pull_request']['assignee'] else ''
state.save()
else:
lazy_debug(logger, lambda: 'Invalid pull_request action: {}'.format(action))
elif event_type == 'push':
ref = info['ref'][len('refs/heads/'):]
for state in list(g.states[repo_label].values()):
if state.base_ref == ref:
state.set_mergeable(None, cause={
'sha': info['head_commit']['id'],
'title': info['head_commit']['message'].splitlines()[0],
})
if state.head_sha == info['before']:
state.head_advanced(info['after'])
state.save()
elif event_type == 'issue_comment':
body = info['comment']['body']
username = info['comment']['user']['login']
pull_num = info['issue']['number']
state = g.states[repo_label].get(pull_num)
if 'pull_request' in info['issue'] and state:
state.title = info['issue']['title']
state.body = info['issue']['body']
if parse_commands(
body,
username,
repo_cfg,
state,
g.my_username,
g.db,
g.states,
realtime=True,
):
state.save()
g.queue_handler()
elif event_type == 'status':
try:
state, repo_label = find_state(info['sha'])
except ValueError:
return 'OK'
status_name = ""
if 'status' in repo_cfg:
for name, value in repo_cfg['status'].items():
if 'context' in value and value['context'] == info['context']:
status_name = name
if status_name is "":
return 'OK'
if info['state'] == 'pending':
return 'OK'
for row in info['branches']:
if row['name'] == state.base_ref:
return 'OK'
report_build_res(info['state'] == 'success', info['target_url'], 'status-' + status_name, state, logger, repo_cfg)
return 'OK'
def report_build_res(succ, url, builder, state, logger, repo_cfg):
lazy_debug(logger,
lambda: 'build result {}: builder = {}, succ = {}, current build_res = {}'
.format(state, builder, succ, state.build_res_summary()))
state.set_build_res(builder, succ, url)
if succ:
if all(x['res'] for x in state.build_res.values()):
state.set_status('success')
desc = 'Test successful'
utils.github_create_status(state.get_repo(), state.head_sha, 'success', url, desc, context='homu')
urls = ', '.join('[{}]({})'.format(builder, x['url']) for builder, x in sorted(state.build_res.items()))
test_comment = ':sunny: {} - {}'.format(desc, urls)
if state.approved_by and not state.try_:
comment = test_comment + '\n' + 'Approved by: {}\nPushing {} to {}...'.format(state.approved_by, state.merge_sha, state.base_ref)
state.add_comment(comment)
try:
try:
utils.github_set_ref(state.get_repo(), 'heads/' + state.base_ref, state.merge_sha)
except github3.models.GitHubError:
utils.github_create_status(state.get_repo(), state.merge_sha, 'success', '', 'Branch protection bypassed', context='homu')
utils.github_set_ref(state.get_repo(), 'heads/' + state.base_ref, state.merge_sha)
state.fake_merge(repo_cfg)
except github3.models.GitHubError as e:
state.set_status('error')
desc = 'Test was successful, but fast-forwarding failed: {}'.format(e)
utils.github_create_status(state.get_repo(), state.head_sha, 'error', url, desc, context='homu')
state.add_comment(':eyes: ' + desc)
else:
comment = test_comment + '\n' + 'State: approved={} try={}'.format(state.approved_by, state.try_)
state.add_comment(comment)
else:
if state.status == 'pending':
state.set_status('failure')
desc = 'Test failed'
utils.github_create_status(state.get_repo(), state.head_sha, 'failure', url, desc, context='homu')
state.add_comment(':broken_heart: {} - [{}]({})'.format(desc, builder, url))
g.queue_handler()
@post('/buildbot')
def buildbot():
logger = g.logger.getChild('buildbot')
response.content_type = 'text/plain'
for row in json.loads(request.forms.packets):
if row['event'] == 'buildFinished':
info = row['payload']['build']
lazy_debug(logger, lambda: 'info: {}'.format(info))
props = dict(x[:2] for x in info['properties'])
if 'retry' in info['text']:
continue
if not props['revision']:
continue
try:
state, repo_label = find_state(props['revision'])
except ValueError:
lazy_debug(logger,
lambda: 'Invalid commit ID from Buildbot: {}'.format(props['revision']))
continue
lazy_debug(logger, lambda: 'state: {}, {}'.format(state, state.build_res_summary()))
if info['builderName'] not in state.build_res:
lazy_debug(logger,
lambda: 'Invalid builder from Buildbot: {}'.format(info['builderName']))
continue
repo_cfg = g.repo_cfgs[repo_label]
if request.forms.secret != repo_cfg['buildbot']['secret']:
abort(400, 'Invalid secret')
build_succ = 'successful' in info['text'] or info['results'] == 0
url = '{}/builders/{}/builds/{}'.format(
repo_cfg['buildbot']['url'],
info['builderName'],
props['buildnumber'],
)
if 'interrupted' in info['text']:
step_name = ''
for step in reversed(info['steps']):
if 'interrupted' in step.get('text', []):
step_name = step['name']
break
if step_name:
try:
res = requests.get('{}/builders/{}/builds/{}/steps/{}/logs/interrupt'.format(
repo_cfg['buildbot']['url'],
info['builderName'],
props['buildnumber'],
step_name,
))
except Exception as ex:
logger.warn('/buildbot encountered an error during github logs request')
lazy_debug(logger, lambda: 'buildbot logs err: {}'.format(ex))
abort(502, 'Bad Gateway')
mat = INTERRUPTED_BY_HOMU_RE.search(res.text)
if mat:
interrupt_token = mat.group(1)
if getattr(state, 'interrupt_token', '') != interrupt_token:
state.interrupt_token = interrupt_token
if state.status == 'pending':
state.set_status('')
desc = ':snowman: The build was interrupted to prioritize another pull request.'
state.add_comment(desc)
utils.github_create_status(state.get_repo(), state.head_sha, 'error', url, desc, context='homu')
g.queue_handler()
continue
else:
logger.error('Corrupt payload from Buildbot')
report_build_res(build_succ, url, info['builderName'], state, logger, repo_cfg)
elif row['event'] == 'buildStarted':
info = row['payload']['build']
lazy_debug(logger, lambda: 'info: {}'.format(info))
props = dict(x[:2] for x in info['properties'])
if not props['revision']:
continue
try:
state, repo_label = find_state(props['revision'])
except ValueError:
pass
else:
if info['builderName'] in state.build_res:
repo_cfg = g.repo_cfgs[repo_label]
if request.forms.secret != repo_cfg['buildbot']['secret']:
abort(400, 'Invalid secret')
url = '{}/builders/{}/builds/{}'.format(
repo_cfg['buildbot']['url'],
info['builderName'],
props['buildnumber'],
)
state.set_build_res(info['builderName'], None, url)
if g.buildbot_slots[0] == props['revision']:
g.buildbot_slots[0] = ''
g.queue_handler()
return 'OK'
@post('/travis')
def travis():
logger = g.logger.getChild('travis')
info = json.loads(request.forms.payload)
lazy_debug(logger, lambda: 'info: {}'.format(utils.remove_url_keys_from_json(info)))
try:
state, repo_label = find_state(info['commit'])
except ValueError:
lazy_debug(logger, lambda: 'Invalid commit ID from Travis: {}'.format(info['commit']))
return 'OK'
lazy_debug(logger, lambda: 'state: {}, {}'.format(state, state.build_res_summary()))
if 'travis' not in state.build_res:
lazy_debug(logger, lambda: 'travis is not a monitored target for {}'.format(state))
return 'OK'
repo_cfg = g.repo_cfgs[repo_label]
token = repo_cfg['travis']['token']
auth_header = request.headers['Authorization']
code = hashlib.sha256(('{}/{}{}'.format(state.owner, state.name, token)).encode('utf-8')).hexdigest()
if auth_header != code:
# this isn't necessarily an error, e.g. maybe someone is
# fabricating travis notifications to try to trick Homu, but,
# I imagine that this will most often occur because a repo is
# misconfigured.
logger.warn('authorization failed for {}, maybe the repo has the wrong travis token? '
'header = {}, computed = {}'
.format(state, auth_header, code))
abort(400, 'Authorization failed')
succ = info['result'] == 0
report_build_res(succ, info['build_url'], 'travis', state, logger, repo_cfg)
return 'OK'
def synch(user_gh, state, repo_label, repo_cfg, repo):
if not repo.is_collaborator(user_gh.user().login):
abort(400, 'You are not a collaborator')
Thread(target=synchronize, args=[repo_label, repo_cfg, g.logger, g.gh, g.states, g.repos, g.db, g.mergeable_que, g.my_username, g.repo_labels]).start()
return 'Synchronizing {}...'.format(repo_label)
@post('/admin')
def admin():
if request.json['secret'] != g.cfg['web']['secret']:
return 'Authentication failure'
if request.json['cmd'] == 'repo_new':
repo_label = request.json['repo_label']
repo_cfg = request.json['repo_cfg']
g.states[repo_label] = {}
g.repos[repo_label] = None
g.repo_cfgs[repo_label] = repo_cfg
g.repo_labels[repo_cfg['owner'], repo_cfg['name']] = repo_label
Thread(target=synchronize, args=[repo_label, repo_cfg, g.logger, g.gh, g.states, g.repos, g.db, g.mergeable_que, g.my_username, g.repo_labels]).start()
return 'OK'
elif request.json['cmd'] == 'repo_del':
repo_label = request.json['repo_label']
repo_cfg = g.repo_cfgs[repo_label]
db_query(g.db, 'DELETE FROM pull WHERE repo = ?', [repo_label])
db_query(g.db, 'DELETE FROM build_res WHERE repo = ?', [repo_label])
db_query(g.db, 'DELETE FROM mergeable WHERE repo = ?', [repo_label])
del g.states[repo_label]
del g.repos[repo_label]
del g.repo_cfgs[repo_label]
del g.repo_labels[repo_cfg['owner'], repo_cfg['name']]
return 'OK'
elif request.json['cmd'] == 'repo_edit':
repo_label = request.json['repo_label']
repo_cfg = request.json['repo_cfg']
assert repo_cfg['owner'] == g.repo_cfgs[repo_label]['owner']
assert repo_cfg['name'] == g.repo_cfgs[repo_label]['name']
g.repo_cfgs[repo_label] = repo_cfg
return 'OK'
elif request.json['cmd'] == 'sync_all':
def inner():
for repo_label in g.repos:
try:
synchronize(repo_label, g.repo_cfgs[repo_label], g.logger, g.gh, g.states, g.repos, g.db, g.mergeable_que, g.my_username, g.repo_labels)
except:
print('* Error while synchronizing {}'.format(repo_label))
traceback.print_exc()
print('* Done synchronizing all')
Thread(target=inner).start()
return 'OK'
return 'Unrecognized command'
def setup(cfg, states, queue_handler, repo_cfgs, repos, logger, buildbot_slots, my_username, db, repo_labels, mergeable_que, gh):
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(pkg_resources.resource_filename(__name__, 'html')),
autoescape=True,
)
tpls = {}
tpls['index'] = env.get_template('index.html')
tpls['queue'] = env.get_template('queue.html')
g.cfg = cfg
g.states = states
g.queue_handler = queue_handler
g.repo_cfgs = repo_cfgs
g.repos = repos
g.logger = logger.getChild('server')
g.buildbot_slots = buildbot_slots
g.tpls = tpls
g.my_username = my_username
g.db = db
g.repo_labels = repo_labels
g.mergeable_que = mergeable_que
g.gh = gh
def start(cfg, states, queue_handler, repo_cfgs, repos, logger, buildbot_slots, my_username, db, repo_labels, mergeable_que, gh):
setup(cfg, states, queue_handler, repo_cfgs, repos, logger, buildbot_slots, my_username, db, repo_labels, mergeable_que, gh)
try:
run(host=cfg['web'].get('host', ''), port=cfg['web']['port'], server='waitress')
except OSError as e:
print(e, file=sys.stderr)
os._exit(1)
|
|
import codecs
import configparser
import errno
import os
import pty
import shutil
from subprocess import (
DEVNULL,
PIPE,
CalledProcessError,
Popen,
check_call,
check_output
)
import yaml
from termcolor import colored
from bundleplacer.bundle import Bundle
from bundleplacer.charmstore_api import MetadataController
from bundleplacer.config import Config
from conjureup import charm
from conjureup.app_config import app
from conjureup.async import submit
def run(cmd, **kwargs):
""" Compatibility function to support python 3.4
"""
try:
from subprocess import run as _run
return _run(cmd, **kwargs)
except ImportError:
if 'check' in kwargs:
del kwargs['check']
return check_call(cmd, **kwargs)
else:
return check_output(cmd, **kwargs)
def run_script(path, stderr=PIPE, stdout=PIPE):
return run(path, shell=True, stderr=stderr, stdout=stdout, env=app.env)
def run_attach(cmd, output_cb=None):
""" run command and attach output to cb
Arguments:
cmd: shell command
output_cb: where to display output
"""
stdoutmaster, stdoutslave = pty.openpty()
subproc = Popen(cmd, shell=True,
stdout=stdoutslave,
stderr=PIPE)
os.close(stdoutslave)
decoder = codecs.getincrementaldecoder('utf-8')()
def last_ten_lines(s):
chunk = s[-1500:]
lines = chunk.splitlines(True)
return ''.join(lines[-10:]).replace('\r', '')
decoded_output = ""
try:
while subproc.poll() is None:
try:
b = os.read(stdoutmaster, 512)
except OSError as e:
if e.errno != errno.EIO:
raise
break
else:
final = False
if not b:
final = True
decoded_chars = decoder.decode(b, final)
if decoded_chars is None:
continue
decoded_output += decoded_chars
if output_cb:
ls = last_ten_lines(decoded_output)
output_cb(ls)
if final:
break
finally:
os.close(stdoutmaster)
if subproc.poll() is None:
subproc.kill()
subproc.wait()
errors = [l.decode('utf-8') for l in subproc.stderr.readlines()]
if output_cb:
output_cb(last_ten_lines(decoded_output))
errors = ''.join(errors)
if subproc.returncode == 0:
return decoded_output.strip()
else:
raise Exception("Problem running {0} "
"{1}:{2}".format(cmd,
subproc.returncode))
def check_bridge_exists():
""" Checks that an LXD network bridge exists
"""
config_string = "[dummy]\n"
if os.path.isfile('/etc/default/lxd-bridge'):
with open('/etc/default/lxd-bridge') as f:
config_string = config_string + f.read()
cfg = configparser.ConfigParser()
cfg.read_string(config_string)
ready = cfg.get('dummy', 'LXD_IPV4_ADDR')
if not ready.strip('"'):
return False
return True
def check_deb_installed(pkg):
""" Checks if a debian package is installed
"""
try:
run('dpkg-query -W {}'.format(pkg),
shell=True, check=True, stdout=DEVNULL, stderr=DEVNULL)
except CalledProcessError:
return False
return True
def info(msg):
prefix = colored('[info]', 'green', attrs=['bold'])
print("{} {}".format(prefix, msg))
def error(msg):
prefix = colored('[error]', 'red', attrs=['bold'])
print("{} {}".format(prefix, msg))
def warning(msg):
prefix = colored('[warning]', 'yellow', attrs=['bold'])
print("{} {}".format(prefix, msg))
def install_home():
""" returns installer user home
"""
return os.path.expanduser("~" + install_user())
def juju_path():
""" returns juju path for $user
"""
return os.getenv('JUJU_DATA',
os.path.expanduser('~/.local/share/juju'))
def mkdir(path):
if not os.path.isdir(path):
os.makedirs(path)
chown(path, install_user(), recursive=True)
def chown(path, user, group=None, recursive=False):
""" Change user/group ownership of file
Arguments:
path: path of file or directory
user: new owner username
group: new owner group name
recursive: set files/dirs recursively
"""
if group is None:
group = user
try:
if not recursive or os.path.isfile(path):
shutil.chown(path, user, group)
else:
for root, dirs, files in os.walk(path):
shutil.chown(root, user, group)
for item in dirs:
shutil.chown(os.path.join(root, item), user, group)
for item in files:
shutil.chown(os.path.join(root, item), user, group)
except OSError as e:
raise e
def spew(path, data, owner=None):
""" Writes data to path
Arguments:
path: path of file to write to
data: contents to write
owner: optional owner of file
"""
with open(path, 'w') as f:
f.write(data)
if owner:
try:
chown(path, owner)
except:
raise Exception(
"Unable to set ownership of {}".format(path))
def slurp(path):
""" Reads data from path
Arguments:
path: path of file
"""
try:
with open(path) as f:
return f.read().strip()
except IOError:
raise IOError
def install_user():
""" returns sudo user
"""
user = os.getenv('USER', None)
if user is None:
raise Exception("Unable to determine current user.")
return user
def pollinate(session, tag):
""" fetches random seed
Tag definitions:
W001 - welcome shown
B001 - bundle selected
CS - cloud selected
CC - cloud creation started
CA - cloud credentials added
L001 - LXD Setup started
L002 - LXD Setup completed
J001 - juju post-bootstrap started
J002 - juju post-bootstrap completed
J003 - juju bootstrap started
J004 - juju bootstrap completed
CS - controller selected
PM - placement/bundle editor shown (maas)
PS - placement/bundle editor shown (other)
PC - placements committed
SS - deploy summary shown
DS - deploy started
DC - deploy complete
XA - pre processing started
XB - post processing started
UC - user cancelled
EC - error getting credentials
EP - error in placement/bundle editor
EB - error juju bootstrap
ED - error deploying
E001 - error in post bootstrap phase
E002 - error in post processor
E003 - error in pre processor
E004 - error creating model in existing controller
E005 - error in picking spells
Arguments:
session: randomly generated session id
tag: custom tag
"""
agent_str = 'conjure/{}/{}'.format(session, tag)
def do_pollinate():
try:
cmd = ("curl -A {} --connect-timeout 3 --max-time 3 "
"--data /dev/null https://entropy.ubuntu.com "
"> /dev/null 2>&1".format(
agent_str))
app.log.debug("pollinate: {}".format(cmd))
check_call(cmd, shell=True)
except CalledProcessError as e:
app.log.warning("Generating random seed failed: {}".format(e))
if not app.argv.debug:
submit(do_pollinate, lambda _: None)
def load_global_conf():
""" loads global configuration
Returns:
dictionary of config items
"""
global_conf_file = '/etc/conjure-up.conf'
if not os.path.exists(global_conf_file):
global_conf_file = os.path.join(
os.path.dirname(__file__), '..', 'etc', 'conjure-up.conf')
try:
with open(global_conf_file) as fp:
return yaml.safe_load(fp.read())
except:
return {}
def setup_metadata_controller():
bundle_filename = os.path.join(app.config['spell-dir'], 'bundle.yaml')
if not os.path.isfile(bundle_filename):
if 'bundle-location' not in app.config['metadata']:
raise Exception(
"Could not determine bundle location: no local bundle "
"was found and bundle-location not set in spell metadata.")
bundle_filename = charm.get_bundle(
app.config['metadata']['bundle-location'], True)
bundle = Bundle(filename=bundle_filename)
bundleplacer_cfg = Config(
'bundle-placer',
{
'bundle_filename': bundle_filename,
'bundle_key': None,
})
app.metadata_controller = MetadataController(bundle, bundleplacer_cfg)
def set_chosen_spell(spell_name, spell_dir):
app.env = os.environ.copy()
app.env['CONJURE_UP_SPELL'] = spell_name
app.config.update({'spell-dir': spell_dir,
'spell': spell_name})
def set_spell_metadata():
metadata_path = os.path.join(app.config['spell-dir'],
'metadata.yaml')
with open(metadata_path) as fp:
metadata = yaml.safe_load(fp.read())
app.config['metadata'] = metadata
def find_spells_matching(key):
if key in app.spells_index:
return app.spells_index[key]['spells']
for k, d in app.spells_index.items():
for spell in d['spells']:
if spell['key'] == key:
return [spell]
return []
def get_options_whitelist(service_name):
"""returns list of whitelisted option names.
If there is no whitelist, returns []
"""
metadata = app.config.get('metadata', None)
if metadata is None:
return []
options_whitelist = metadata.get('options-whitelist', None)
if options_whitelist is None:
return []
svc_opts_whitelist = options_whitelist.get(service_name, [])
return svc_opts_whitelist
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Thierry Lemeunier <thierry at lemeunier dot net>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import asyncio
import socket
import ssl
import time
import concurrent.futures
import os
import stat
import json
from ..util.Configuration import Configuration
from ..util.funcutils import Subject
from .protocol.ProtocolHandler import ProtocolHandler
from ...common.SecretInfoBlock import SecretInfoBlock
"""
Client part of MnemoPwd application.
"""
class ClientCore(Subject):
"""
Client module of the application
Attribute(s):
- loop: an i/o asynchronous loop (see the official asyncio module)
- queue: a FIFO task queue for handling commands coming from the UI layer
- transport: a SSL/TLS asynchronous socket (see the official ssl module)
- protocol: a communication handler (see the official asyncio module)
- table: table of blocks (a dictionary)
Method(s):
- start: start the domain layer
- stop: close the domain loop
- command: execute a command coming from the UI layer
- close: close the connection
"""
# Internal methods
def __init__(self):
"""Initialization"""
Subject.__init__(self)
# Create an i/o asynchronous loop
self.loop = asyncio.get_event_loop()
# Set logging configuration
if Configuration.loglevel is not None:
self.loop.set_debug(Configuration.loglevel == 'DEBUG')
logging.basicConfig(filename="client.log",
level=Configuration.loglevel,
format='%(asctime)s %(levelname)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
else:
logging.basicConfig(filename=os.devnull)
# Create a task queue
self.queue = asyncio.Queue(
maxsize=Configuration.queuesize, loop=self.loop)
# Set attributes
self.cmdH = None # The command handler co-routine
self.task = None # The current task executed by the command handler
self.taskInProgress = False # Flag to indicate a task is in progress
self.last_block = None # The last block used
self.last_index = None # The last index used
self.notify = True # Flag for UI layer notification or not
# Create and set an executor
executor = concurrent.futures.ThreadPoolExecutor(Configuration.poolsize)
self.loop.set_default_executor(executor)
# Create a SSL context
self.context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
self.context.options |= ssl.OP_NO_SSLv2 # SSL v2 not allowed
self.context.options |= ssl.OP_NO_SSLv3 # SSL v3 not allowed
# Server certificate is optional
self.context.verify_mode = ssl.CERT_OPTIONAL
# Don't check hostname because of shared certificate
self.context.check_hostname = False
if Configuration.certfile != 'None':
# Load certificate
self.context.load_verify_locations(cafile=Configuration.certfile)
else:
self.context.set_ciphers("AECDH-AES256-SHA") # Cipher suite to use
# Transport handler
self.transport = None
if Configuration.action == 'status':
self._open() # Try to open a connection to server
print("the server seems running at " +
str(self.transport.get_extra_info('peername')))
def _open(self):
"""Open a new connection to the server"""
# Block table
self.table = {}
# Create an asynchronous SSL socket
coro = self.loop.create_connection(
lambda: ProtocolHandler(self), Configuration.server,
Configuration.port, family=socket.AF_INET, ssl=self.context)
# Try to open SSL socket
try:
if not self.loop.is_running():
self.transport, self.protocol = self.loop.run_until_complete(coro)
else:
future = asyncio.run_coroutine_threadsafe(coro, self.loop)
self.transport, self.protocol = future.result(Configuration.timeout)
except asyncio.TimeoutError:
future.cancel()
self.update(
'connection.state',
'Enable to connect to server: retry or verify your configuration')
raise
except ConnectionRefusedError as e:
if not self.loop.is_running():
print(e)
print("Enable to connect to server: retry or verify your configuration")
exit(1)
else:
self.update(
'connection.state',
'Enable to connect to server: retry or verify your configuration')
raise
except ssl.SSLError as e:
if not self.loop.is_running():
print(e)
print("There is a problem with the certificate.")
exit(1)
else:
self.update('connection.state',
'There is a problem with the certificate.')
raise
except Exception as e:
if not self.loop.is_running():
print(e)
exit(1)
else:
self.update('connection.state',
'An unexpected exception occurred')
raise
@asyncio.coroutine
def _command_handler(self):
"""Loop for the execution of tasks"""
while True:
try:
coro = yield from self.queue.get()
self.task = asyncio.ensure_future(coro, loop=self.loop)
yield from asyncio.wait_for(asyncio.shield(self.task),
Configuration.timeout_task,
loop=self.loop)
self.task = None
except asyncio.TimeoutError:
self.taskInProgress = False
self.task = None
except asyncio.CancelledError:
if self.task is not None and self.task.cancelled():
self.taskInProgress = False
self.task = None
else:
raise
@asyncio.coroutine
def _task_set_credentials(self, login, password):
"""Store credentials, start S1 then wait for the end of the task"""
if self.transport is not None:
self.protocol.login = login.encode()
self.protocol.password = password.encode()
self.taskInProgress = True
# Wait for being in state number one
while self.protocol.state != self.protocol.states['1S'] \
and self.taskInProgress:
yield from asyncio.sleep(0.01, loop=self.loop)
# Execute protocol state
if self.taskInProgress and self.transport is not None:
yield from self.loop.run_in_executor(
None, self.protocol.data_received, None)
# Waiting for the end of the task
while self.taskInProgress:
yield from asyncio.sleep(0.01, loop=self.loop)
@asyncio.coroutine
def _task_close(self):
"""Close the connection with the server"""
yield from self.loop.run_in_executor(
None, self.update, 'connection.state.logout', 'Connection closed')
self.taskInProgress = False
self.task = None
self.transport.close()
self.transport = None
@asyncio.coroutine
def _task_deletion(self):
"""User account deletion request"""
self.protocol.state = self.protocol.states['33R'] # Deletion
# Execute protocol state
self.taskInProgress = True
yield from self.loop.run_in_executor(
None, self.protocol.data_received, None)
while self.taskInProgress:
yield from asyncio.sleep(0.01, loop=self.loop)
@asyncio.coroutine
def _task_add_data(self, sib, notify=True):
"""Add a new block"""
self.protocol.state = self.protocol.states['35R'] # Add a new block
# Remember the block
self.last_block = sib
# Execute protocol state
self.notify = notify
self.taskInProgress = True
yield from self.loop.run_in_executor(
None, self.protocol.data_received, sib)
# Waiting for the end of the task
while self.taskInProgress:
yield from asyncio.sleep(0.01, loop=self.loop)
self.notify = True
# Assign new block
yield from self._assign_last_block(self.last_index, 'add')
@asyncio.coroutine
def _task_update_data(self, idblock, sib, notify=True):
"""Update an existing block"""
self.protocol.state = self.protocol.states['37R'] # Update a block
# Remember the block
self.last_block = sib
# Execute protocol state
self.notify = notify
self.taskInProgress = True
yield from self.loop.run_in_executor(
None, self.protocol.data_received, (idblock, sib))
# Waiting for the end of the task
while self.taskInProgress:
yield from asyncio.sleep(0.01, loop=self.loop)
self.notify = True
# Assign updated block
yield from self._assign_last_block(idblock, 'update')
@asyncio.coroutine
def _task_delete_data(self, idblock):
"""Delete an existing block"""
self.protocol.state = self.protocol.states['36R'] # Delete
# Execute protocol state
self.taskInProgress = True
yield from self.loop.run_in_executor(
None, self.protocol.data_received, idblock)
# Waiting for the end of the task
while self.taskInProgress:
yield from asyncio.sleep(0.01, loop=self.loop)
# Remove block
del self.table[idblock]
# Notify the result to UI layer
yield from self.loop.run_in_executor(
None, self.update, 'application.searchblock.removeresult', idblock)
@asyncio.coroutine
def _task_search_data(self, pattern):
"""Search blocks matching a pattern"""
self.protocol.state = self.protocol.states['34R'] # Search
self.searchTable = list() # Reset search table
# Execute protocol state
self.taskInProgress = True
yield from self.loop.run_in_executor(
None, self.protocol.data_received, pattern)
while self.taskInProgress:
yield from asyncio.sleep(0.01, loop=self.loop)
# Notify the result to the UI layer
if len(self.searchTable) > 0:
yield from self.loop.run_in_executor(
None, self.update, 'application.searchblock.result',
self.searchTable)
@asyncio.coroutine
def _task_export_data(self, notify=True):
"""Get all blocks"""
self.protocol.state = self.protocol.states['32R'] # Export
self.searchTable = list() # Reset search table
# Execute protocol state
self.notify = notify
self.taskInProgress = True
yield from self.loop.run_in_executor(
None, self.protocol.data_received, None)
while self.taskInProgress:
yield from asyncio.sleep(0.01, loop=self.loop)
self.notify = True
# Notify the result to UI layer
if len(self.searchTable) > 0 and notify:
yield from self.loop.run_in_executor(
None, self.update, 'application.searchblock.result',
self.searchTable)
@asyncio.coroutine
def _task_import_data(self, sib, notify=False):
"""Add some SIBs"""
self.protocol.state = self.protocol.states['35R'] # Add a new block
# Execute protocol state
self.notify = notify
self.taskInProgress = True
yield from self.loop.run_in_executor(
None, self.protocol.data_received, sib)
# Waiting for the end of the task
while self.taskInProgress:
yield from asyncio.sleep(0.01, loop=self.loop)
self.notify = True
# Assign new block
self.assign_result_search_block(self.last_index, sib)
@asyncio.coroutine
def _task_get_block_values(self, idblock):
"""Return values of a block"""
sib = self.table[idblock]
# Notify the result to UI layer
yield from self.loop.run_in_executor(
None, self.update, 'application.searchblock.oneresult',
(idblock, sib))
@asyncio.coroutine
def _task_export_file(self, filename, secure):
"""Exportation to a JSON file"""
# Inform UI layer about progression
yield from self.loop.run_in_executor(
None, self.update, 'application.exportation.result',
"Exporting (can take few minutes)...")
# Control no existence and write permission
try:
# Try to create file
with open(filename, 'x'):
pass
# Change file permissions
os.chmod(filename,
stat.S_IRUSR | stat.S_IWUSR | stat.S_IREAD | stat.S_IWRITE)
except FileExistsError:
yield from self.loop.run_in_executor(
None, self.update, 'application.exportation.result',
"Exportation failed: file already exists")
return
except OSError:
yield from self.loop.run_in_executor(
None, self.update, 'application.exportation.result',
"Exportation failed: no write permission")
return
# Get SIBs from server
yield from self._task_export_data(notify=False)
# Exportation
to_export = dict() # Dictionary of all information of all SIBs
to_export["size"] = len(self.table) # Number of SIBs
if not secure:
to_export["secure"] = "False" # Information in clear
else:
to_export["secure"] = "True" # Information encrypted
# Loop on SIBs to export
for i, sib in enumerate(self.table.values(), start=1):
if secure:
to_export[str(i)] = sib.exportation(secure, self.protocol.ms)
else:
to_export[str(i)] = sib.exportation(secure)
yield from self.loop.run_in_executor(
None, self.update, "application.state.loadbar",
(i, len(self.table)))
try:
# Save to JSON file
with open(filename, 'w') as file:
json.dump(to_export, file,
ensure_ascii=False, indent=4, sort_keys=True)
# Notify the result to UI layer
yield from self.loop.run_in_executor(
None, self.update, 'application.exportation.result',
"Exportation done")
except OSError:
yield from self.loop.run_in_executor(
None, self.update, 'application.exportation.result',
"Exportation failed: no enough disk space?")
@asyncio.coroutine
def _task_import_file(self, filename, login=None, passwd=None):
# Inform UI layer about progression
yield from self.loop.run_in_executor(
None, self.update, 'application.importation.result',
"Importing (can take few minutes)...")
# Control existence, read permission and JSON format
try:
with open(filename, 'r') as file:
table = json.load(file) # Load information from JSON file
secure = table["secure"] == "True" # Is a secure export file ?
size = table["size"] # Number of information blocks
except OSError:
yield from self.loop.run_in_executor(
None, self.update, 'application.importation.result',
"Importation failed: file not exist or wrong read permission")
return
except ValueError:
yield from self.loop.run_in_executor(
None, self.update, 'application.importation.result',
"Importation failed: wrong file format?")
return
except KeyError:
yield from self.loop.run_in_executor(
None, self.update, 'application.importation.result',
"Importation failed: wrong file format?")
return
# Do importation
try:
self.searchTable = list() # Reset search table
i = 1
while True:
sib = SecretInfoBlock(self.protocol.keyH)
sib.importation(
table[str(i)], secure, login=login, passwd=passwd)
# Send new SIB to the server
yield from self._task_import_data(sib, notify=False)
yield from self.loop.run_in_executor(
None, self.update, "application.state.loadbar", (i, size))
i += 1
except KeyError:
# Notify the result to UI layer
if len(self.searchTable) > 0:
yield from self.loop.run_in_executor(
None, self.update, 'application.searchblock.result',
self.searchTable)
# Inform UI layer about progression
yield from self.loop.run_in_executor(
None, self.update, 'application.importation.result',
"Importation done")
except AssertionError:
yield from self.loop.run_in_executor(
None, self.update, 'application.importation.result',
"Importation failed: integrity not respected")
# External methods
@asyncio.coroutine
def _assign_last_block(self, idblock, task):
"""Co-routine for assignation of the last block used"""
# Update table
self.table[idblock] = self.last_block
# Notify the result to UI layer
if task == 'add':
yield from self.loop.run_in_executor(
None, self.update, 'application.searchblock.tryoneresult',
(idblock, self.last_block))
elif task == 'update':
yield from self.loop.run_in_executor(
None, self.update, 'application.searchblock.updateresult',
(idblock, self.last_block))
def assign_result_search_block(self, idblock, sib):
"""Callback method for assignation of a search result"""
self.table[idblock] = sib
self.searchTable.append(idblock)
@asyncio.coroutine
def close(self):
"""Cancel the actual task, empty the queue and close the connection"""
self.taskInProgress = False
# Cancel the actual task if it exists
if self.task is not None:
self.task.cancel()
# Empty the queue
self.queue = asyncio.Queue(
maxsize=Configuration.queuesize, loop=self.loop)
# Close the transport if not already closed
if self.transport is not None:
self.transport.close()
self.transport = None
def start(self):
"""Start the main loop"""
# Command loop
self.cmdH = self.loop.create_task(self._command_handler())
# Run until the end of the main loop
self.loop.run_forever()
# Close the main loop
self.loop.close()
def stop(self):
"""Close the connection to the server then stop the main loop"""
if self.loop.is_running():
# Waiting for the queue becomes empty
while not self.queue.empty():
time.sleep(0.01)
# Ask for cancelling the command loop
self.loop.call_soon_threadsafe(self.cmdH.cancel)
# Waiting for cancellation
while not self.cmdH.cancelled():
time.sleep(0.01)
# Ask for stopping the main loop
self.loop.call_soon_threadsafe(self.loop.stop)
else:
self.transport.close()
self.loop.close()
def command(self, key, value):
"""Create and enqueue a coroutine from a command of the UI layer"""
coro = None
if key == "connection.open.credentials":
try:
self._open() # Direct execution because queue is empty here
coro = self._task_set_credentials(*value)
except:
pass
if key == "connection.close":
coro = self._task_close()
if key == "connection.close.deletion":
coro = self._task_deletion()
if key == "application.addblock":
coro = self._task_add_data(value)
if key == "application.updateblock":
coro = self._task_update_data(*value)
if key == "application.deleteblock":
coro = self._task_delete_data(value)
if key == "application.searchblock":
coro = self._task_search_data(value)
if key == "application.exportblock":
coro = self._task_export_data()
if key == "application.searchblock.blockvalues":
coro = self._task_get_block_values(value)
if key == "application.exportation.clear":
coro = self._task_export_file(value, False)
if key == "application.exportation.cypher":
coro = self._task_export_file(value, True)
if key == "application.importation.clear":
coro = self._task_import_file(value)
if key == "application.importation.cypher":
coro = self._task_import_file(*value)
if coro is not None:
asyncio.run_coroutine_threadsafe(self.queue.put(coro), self.loop)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a format decision state object that manages whitespace decisions.
Each token is processed one at a time, at which point its whitespace formatting
decisions are made. A graph of potential whitespace formattings is created,
where each node in the graph is a format decision state object. The heuristic
tries formatting the token with and without a newline before it to determine
which one has the least penalty. Therefore, the format decision state object for
each decision needs to be its own unique copy.
Once the heuristic determines the best formatting, it makes a non-dry run pass
through the code to commit the whitespace formatting.
FormatDecisionState: main class exported by this module.
"""
import copy
from yapf.yapflib import format_token
from yapf.yapflib import style
class FormatDecisionState(object):
"""The current state when indenting an unwrapped line.
The FormatDecisionState object is meant to be copied instead of referenced.
Attributes:
first_indent: The indent of the first token.
column: The number of used columns in the current line.
next_token: The next token to be formatted.
paren_level: The level of nesting inside (), [], and {}.
start_of_line_level: The paren_level at the start of this line.
lowest_level_on_line: The lowest paren_level on the current line.
newline: Indicates if a newline is added along the edge to this format
decision state node.
previous: The previous format decision state in the decision tree.
stack: A stack (of _ParenState) keeping track of properties applying to
parenthesis levels.
ignore_stack_for_comparison: Ignore the stack of _ParenState for state
comparison.
"""
def __init__(self, line, first_indent):
"""Initializer.
Initializes to the state after placing the first token from 'line' at
'first_indent'.
Arguments:
line: (UnwrappedLine) The unwrapped line we're currently processing.
first_indent: (int) The indent of the first token.
"""
self.next_token = line.first
self.column = first_indent
self.paren_level = 0
self.start_of_line_level = 0
self.lowest_level_on_line = 0
self.ignore_stack_for_comparison = False
self.stack = [_ParenState(first_indent, first_indent)]
self.first_indent = first_indent
self.newline = False
self.previous = None
self._MoveStateToNextToken()
def Clone(self):
new = copy.copy(self)
new.stack = copy.deepcopy(self.stack)
return new
def __eq__(self, other):
# Note: 'first_indent' is implicit in the stack. Also, we ignore 'previous',
# because it shouldn't have a bearing on this comparison. (I.e., it will
# report equal if 'next_token' does.)
return (self.next_token == other.next_token and
self.column == other.column and
self.paren_level == other.paren_level and
self.start_of_line_level == other.start_of_line_level and
self.lowest_level_on_line == other.lowest_level_on_line and
(self.ignore_stack_for_comparison or
other.ignore_stack_for_comparison or self.stack == other.stack))
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.next_token, self.column, self.paren_level,
self.start_of_line_level, self.lowest_level_on_line))
def __repr__(self):
return ('column::%d, next_token::%s, paren_level::%d, stack::[\n\t%s' %
(self.column, repr(self.next_token), self.paren_level,
'\n\t'.join(repr(s) for s in self.stack) + ']'))
def CanSplit(self):
"""Returns True if the line can be split before the next token."""
current = self.next_token
if not current.can_break_before:
return False
return True
def MustSplit(self):
"""Returns True if the line must split before the next token."""
current = self.next_token
previous_token = current.previous_token
if current.must_break_before:
return True
if (self.stack[-1].split_before_closing_bracket and
# FIXME(morbo): Use the 'matching_bracket' instead of this.
# FIXME(morbo): Don't forget about tuples!
current.value in ']}'):
# Split if we need to split before the closing bracket and the next
# token is a closing bracket.
return True
if previous_token:
length = _GetLengthToMatchingParen(previous_token)
if (previous_token.value == '{' and # TODO(morbo): List initializers?
length + self.column > style.Get('COLUMN_LIMIT')):
return True
# TODO(morbo): This should be controlled with a knob.
if (format_token.Subtype.DICTIONARY_KEY in current.subtypes and
not current.is_comment):
# Place each dictionary entry on its own line.
return True
# TODO(morbo): This should be controlled with a knob.
if format_token.Subtype.DICT_SET_GENERATOR in current.subtypes:
return True
if (previous_token.value not in '(=' and current.value not in '=,)' and
format_token.Subtype.DEFAULT_OR_NAMED_ASSIGN_ARG_LIST in
current.subtypes):
return style.Get('SPLIT_BEFORE_NAMED_ASSIGNS')
if (previous_token.value in '{[(' and
current.lineno != previous_token.lineno):
self.stack[-1].split_before_closing_bracket = True
return True
return False
def AddTokenToState(self, newline, dry_run, must_split=False):
"""Add a token to the format decision state.
Allow the heuristic to try out adding the token with and without a newline.
Later on, the algorithm will determine which one has the lowest penalty.
Arguments:
newline: (bool) Add the token on a new line if True.
dry_run: (bool) Don't commit whitespace changes to the FormatToken if
True.
must_split: (bool) A newline was required before this token.
Returns:
The penalty of splitting after the current token.
"""
penalty = 0
if newline:
penalty = self._AddTokenOnNewline(dry_run, must_split)
else:
self._AddTokenOnCurrentLine(dry_run)
return self._MoveStateToNextToken() + penalty
def _AddTokenOnCurrentLine(self, dry_run):
"""Puts the token on the current line.
Appends the next token to the state and updates information necessary for
indentation.
Arguments:
dry_run: (bool) Commit whitespace changes to the FormatToken if True.
"""
current = self.next_token
previous = current.previous_token
spaces = current.spaces_required_before
if not dry_run:
current.AddWhitespacePrefix(newlines_before=0, spaces=spaces)
if previous.OpensScope():
if not current.is_comment:
# Align closing scopes that are on a newline with the opening scope:
#
# foo = [a,
# b,
# ]
self.stack[-1].closing_scope_indent = previous.column
if style.Get('ALIGN_CLOSING_BRACKET_WITH_VISUAL_INDENT'):
self.stack[-1].closing_scope_indent += 1
self.stack[-1].indent = self.column + spaces
else:
self.stack[-1].closing_scope_indent = (
self.stack[-1].indent - style.Get('CONTINUATION_INDENT_WIDTH')
)
self.column += spaces
def _AddTokenOnNewline(self, dry_run, must_split):
"""Adds a line break and necessary indentation.
Appends the next token to the state and updates information necessary for
indentation.
Arguments:
dry_run: (bool) Don't commit whitespace changes to the FormatToken if
True.
must_split: (bool) A newline was required before this token.
Returns:
The split penalty for splitting after the current state.
"""
current = self.next_token
previous = current.previous_token
self.column = self._GetNewlineColumn()
if not dry_run:
current.AddWhitespacePrefix(newlines_before=1, spaces=self.column)
if not current.is_comment:
self.stack[-1].last_space = self.column
self.start_of_line_level = self.paren_level
self.lowest_level_on_line = self.paren_level
# Any break on this level means that the parent level has been broken and we
# need to avoid bin packing there.
for paren_state in self.stack:
paren_state.split_before_parameter = True
if (previous.value != ',' and not previous.is_binary_op and
not current.is_binary_op and not previous.OpensScope()):
self.stack[-1].split_before_parameter = True
if (previous.OpensScope() or
(previous.is_comment and previous.previous_token is not None and
previous.previous_token.OpensScope())):
self.stack[-1].closing_scope_indent = max(
0, self.stack[-1].indent - style.Get('CONTINUATION_INDENT_WIDTH'))
self.stack[-1].split_before_closing_bracket = True
# Calculate the split penalty.
penalty = current.split_penalty
# Add a penalty for each increasing newline we add.
last = self.stack[-1]
penalty += (
style.Get('SPLIT_PENALTY_FOR_ADDED_LINE_SPLIT') * last.num_line_splits
)
if not must_split and current.value not in {'if', 'for'}:
# Don't penalize for a must split or for splitting before an
# if-expression or list comprehension.
last.num_line_splits += 1
return penalty + 10
def _GetNewlineColumn(self):
"""Return the new column on the newline."""
current = self.next_token
previous = current.previous_token
top_of_stack = self.stack[-1]
if current.spaces_required_before > 2:
return current.spaces_required_before
if current.OpensScope():
return self.first_indent if not self.paren_level else top_of_stack.indent
if current.ClosesScope():
if (previous.OpensScope() or
(previous.is_comment and previous.previous_token is not None and
previous.previous_token.OpensScope())):
return max(
0, self.stack[-1].indent - style.Get('CONTINUATION_INDENT_WIDTH'))
return top_of_stack.closing_scope_indent
if (previous and previous.is_string and current.is_string and
format_token.Subtype.DICTIONARY_VALUE in current.subtypes):
return previous.column
if format_token.Subtype.IF_TEST_EXPR in current.subtypes:
return top_of_stack.indent + style.Get('INDENT_IF_EXPR_CONTINUATION')
return top_of_stack.indent
def _MoveStateToNextToken(self):
"""Calculate format decision state information and move onto the next token.
Before moving onto the next token, we first calculate the format decision
state given the current token and its formatting decisions. Then the format
decision state is set up so that the next token can be added.
Returns:
The penalty for the number of characters over the column limit.
"""
current = self.next_token
if not current.OpensScope() and not current.ClosesScope():
self.lowest_level_on_line = min(self.lowest_level_on_line,
self.paren_level)
# If we encounter an opening bracket, we add a level to our stack to prepare
# for the subsequent tokens.
if current.OpensScope():
last = self.stack[-1]
new_indent = style.Get('CONTINUATION_INDENT_WIDTH') + last.last_space
self.stack.append(_ParenState(new_indent, self.stack[-1].last_space))
self.stack[-1].break_before_paremeter = False
self.paren_level += 1
# If we encounter a closing bracket, we can remove a level from our
# parenthesis stack.
if len(self.stack) > 1 and current.ClosesScope():
self.stack[-2].last_space = self.stack[-1].last_space
self.stack.pop()
self.paren_level -= 1
is_multiline_string = current.is_string and '\n' in current.value
if is_multiline_string:
# This is a multiline string. Only look at the first line.
self.column += len(current.value.split('\n')[0])
else:
self.column += len(current.value)
self.next_token = self.next_token.next_token
# Calculate the penalty for overflowing the column limit.
penalty = 0
if self.column > style.Get('COLUMN_LIMIT') and not current.is_comment:
excess_characters = self.column - style.Get('COLUMN_LIMIT')
penalty = style.Get('SPLIT_PENALTY_EXCESS_CHARACTER') * excess_characters
if is_multiline_string:
# If this is a multiline string, the column is actually the
# end of the last line in the string.
self.column = len(current.value.split('\n')[-1])
return penalty
def _GetLengthToMatchingParen(token):
"""Returns the length from one bracket to the matching bracket.
Arguments:
token: (FormatToken) The opening bracket token.
Returns:
The length to the closing paren or up to the first point where we can split
the line. The length includes the brackets.
"""
if not token.matching_bracket:
return 0
end = token.matching_bracket
while end.next_token and not end.next_token.can_break_before:
end = end.next_token
return end.total_length - token.total_length + 1
class _ParenState(object):
"""Maintains the state of the bracket enclosures.
A stack of _ParenState objects are kept so that we know how to indent relative
to the brackets.
Attributes:
indent: The column position to which a specified parenthesis level needs to
be indented.
last_space: The column position of the last space on each level.
split_before_closing_bracket: Whether a newline needs to be inserted before
the closing bracket. We only want to insert a newline before the closing
bracket if there also was a newline after the beginning left bracket.
split_before_parameter: Split the line after the next comma.
num_line_splits: Number of line splits this _ParenState contains already.
Each subsequent line split gets an increasing penalty.
"""
# TODO(morbo): This doesn't track "bin packing."
def __init__(self, indent, last_space):
self.indent = indent
self.last_space = last_space
self.closing_scope_indent = 0
self.split_before_closing_bracket = False
self.split_before_parameter = False
self.num_line_splits = 0
def __repr__(self):
return '[indent::%d, last_space::%d, closing_scope_indent::%d]' % (
self.indent, self.last_space, self.closing_scope_indent)
|
|
"""
Support for MQTT lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.mqtt/
"""
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components import mqtt
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_EFFECT, ATTR_HS_COLOR,
ATTR_WHITE_VALUE, Light, SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT, SUPPORT_COLOR, SUPPORT_WHITE_VALUE)
from homeassistant.const import (
CONF_BRIGHTNESS, CONF_COLOR_TEMP, CONF_DEVICE, CONF_EFFECT, CONF_HS,
CONF_NAME, CONF_OPTIMISTIC, CONF_PAYLOAD_OFF, CONF_PAYLOAD_ON, STATE_ON,
CONF_RGB, CONF_STATE, CONF_VALUE_TEMPLATE, CONF_WHITE_VALUE, CONF_XY)
from homeassistant.components.mqtt import (
CONF_COMMAND_TOPIC, CONF_QOS, CONF_RETAIN, CONF_STATE_TOPIC,
CONF_UNIQUE_ID, MqttAttributes, MqttAvailability, MqttDiscoveryUpdate,
MqttEntityDeviceInfo, subscription)
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
from . import MQTT_LIGHT_SCHEMA_SCHEMA
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['mqtt']
CONF_BRIGHTNESS_COMMAND_TOPIC = 'brightness_command_topic'
CONF_BRIGHTNESS_SCALE = 'brightness_scale'
CONF_BRIGHTNESS_STATE_TOPIC = 'brightness_state_topic'
CONF_BRIGHTNESS_VALUE_TEMPLATE = 'brightness_value_template'
CONF_COLOR_TEMP_COMMAND_TEMPLATE = 'color_temp_command_template'
CONF_COLOR_TEMP_COMMAND_TOPIC = 'color_temp_command_topic'
CONF_COLOR_TEMP_STATE_TOPIC = 'color_temp_state_topic'
CONF_COLOR_TEMP_VALUE_TEMPLATE = 'color_temp_value_template'
CONF_EFFECT_COMMAND_TOPIC = 'effect_command_topic'
CONF_EFFECT_LIST = 'effect_list'
CONF_EFFECT_STATE_TOPIC = 'effect_state_topic'
CONF_EFFECT_VALUE_TEMPLATE = 'effect_value_template'
CONF_HS_COMMAND_TOPIC = 'hs_command_topic'
CONF_HS_STATE_TOPIC = 'hs_state_topic'
CONF_HS_VALUE_TEMPLATE = 'hs_value_template'
CONF_RGB_COMMAND_TEMPLATE = 'rgb_command_template'
CONF_RGB_COMMAND_TOPIC = 'rgb_command_topic'
CONF_RGB_STATE_TOPIC = 'rgb_state_topic'
CONF_RGB_VALUE_TEMPLATE = 'rgb_value_template'
CONF_STATE_VALUE_TEMPLATE = 'state_value_template'
CONF_XY_COMMAND_TOPIC = 'xy_command_topic'
CONF_XY_STATE_TOPIC = 'xy_state_topic'
CONF_XY_VALUE_TEMPLATE = 'xy_value_template'
CONF_WHITE_VALUE_COMMAND_TOPIC = 'white_value_command_topic'
CONF_WHITE_VALUE_SCALE = 'white_value_scale'
CONF_WHITE_VALUE_STATE_TOPIC = 'white_value_state_topic'
CONF_WHITE_VALUE_TEMPLATE = 'white_value_template'
CONF_ON_COMMAND_TYPE = 'on_command_type'
DEFAULT_BRIGHTNESS_SCALE = 255
DEFAULT_NAME = 'MQTT Light'
DEFAULT_OPTIMISTIC = False
DEFAULT_PAYLOAD_OFF = 'OFF'
DEFAULT_PAYLOAD_ON = 'ON'
DEFAULT_WHITE_VALUE_SCALE = 255
DEFAULT_ON_COMMAND_TYPE = 'last'
VALUES_ON_COMMAND_TYPE = ['first', 'last', 'brightness']
PLATFORM_SCHEMA_BASIC = mqtt.MQTT_RW_PLATFORM_SCHEMA.extend({
vol.Optional(CONF_BRIGHTNESS_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_BRIGHTNESS_SCALE, default=DEFAULT_BRIGHTNESS_SCALE):
vol.All(vol.Coerce(int), vol.Range(min=1)),
vol.Optional(CONF_BRIGHTNESS_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_BRIGHTNESS_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_COLOR_TEMP_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_COLOR_TEMP_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_COLOR_TEMP_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_COLOR_TEMP_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_EFFECT_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_EFFECT_LIST): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_EFFECT_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_EFFECT_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_HS_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_HS_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_HS_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(CONF_RGB_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_RGB_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_RGB_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_RGB_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_STATE_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_WHITE_VALUE_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_WHITE_VALUE_SCALE, default=DEFAULT_WHITE_VALUE_SCALE):
vol.All(vol.Coerce(int), vol.Range(min=1)),
vol.Optional(CONF_WHITE_VALUE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_WHITE_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_XY_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_XY_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_XY_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_ON_COMMAND_TYPE, default=DEFAULT_ON_COMMAND_TYPE):
vol.In(VALUES_ON_COMMAND_TYPE),
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
}).extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema).extend(
mqtt.MQTT_JSON_ATTRS_SCHEMA.schema).extend(MQTT_LIGHT_SCHEMA_SCHEMA.schema)
async def async_setup_entity_basic(config, async_add_entities, config_entry,
discovery_hash=None):
"""Set up a MQTT Light."""
config.setdefault(
CONF_STATE_VALUE_TEMPLATE, config.get(CONF_VALUE_TEMPLATE))
async_add_entities([MqttLight(config, config_entry, discovery_hash)])
# pylint: disable=too-many-ancestors
class MqttLight(MqttAttributes, MqttAvailability, MqttDiscoveryUpdate,
MqttEntityDeviceInfo, Light, RestoreEntity):
"""Representation of a MQTT light."""
def __init__(self, config, config_entry, discovery_hash):
"""Initialize MQTT light."""
self._state = False
self._sub_state = None
self._brightness = None
self._hs = None
self._color_temp = None
self._effect = None
self._white_value = None
self._topic = None
self._payload = None
self._templates = None
self._optimistic = False
self._optimistic_rgb = False
self._optimistic_brightness = False
self._optimistic_color_temp = False
self._optimistic_effect = False
self._optimistic_hs = False
self._optimistic_white_value = False
self._optimistic_xy = False
self._unique_id = config.get(CONF_UNIQUE_ID)
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_hash,
self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA_BASIC(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._config = config
topic = {
key: config.get(key) for key in (
CONF_BRIGHTNESS_COMMAND_TOPIC,
CONF_BRIGHTNESS_STATE_TOPIC,
CONF_COLOR_TEMP_COMMAND_TOPIC,
CONF_COLOR_TEMP_STATE_TOPIC,
CONF_COMMAND_TOPIC,
CONF_EFFECT_COMMAND_TOPIC,
CONF_EFFECT_STATE_TOPIC,
CONF_HS_COMMAND_TOPIC,
CONF_HS_STATE_TOPIC,
CONF_RGB_COMMAND_TOPIC,
CONF_RGB_STATE_TOPIC,
CONF_STATE_TOPIC,
CONF_WHITE_VALUE_COMMAND_TOPIC,
CONF_WHITE_VALUE_STATE_TOPIC,
CONF_XY_COMMAND_TOPIC,
CONF_XY_STATE_TOPIC,
)
}
self._topic = topic
self._payload = {
'on': config.get(CONF_PAYLOAD_ON),
'off': config.get(CONF_PAYLOAD_OFF),
}
self._templates = {
CONF_BRIGHTNESS: config.get(CONF_BRIGHTNESS_VALUE_TEMPLATE),
CONF_COLOR_TEMP: config.get(CONF_COLOR_TEMP_VALUE_TEMPLATE),
CONF_COLOR_TEMP_COMMAND_TEMPLATE:
config.get(CONF_COLOR_TEMP_COMMAND_TEMPLATE),
CONF_EFFECT: config.get(CONF_EFFECT_VALUE_TEMPLATE),
CONF_HS: config.get(CONF_HS_VALUE_TEMPLATE),
CONF_RGB: config.get(CONF_RGB_VALUE_TEMPLATE),
CONF_RGB_COMMAND_TEMPLATE: config.get(CONF_RGB_COMMAND_TEMPLATE),
CONF_STATE: config.get(CONF_STATE_VALUE_TEMPLATE),
CONF_WHITE_VALUE: config.get(CONF_WHITE_VALUE_TEMPLATE),
CONF_XY: config.get(CONF_XY_VALUE_TEMPLATE),
}
optimistic = config.get(CONF_OPTIMISTIC)
self._optimistic = optimistic or topic[CONF_STATE_TOPIC] is None
self._optimistic_rgb = \
optimistic or topic[CONF_RGB_STATE_TOPIC] is None
self._optimistic_brightness = (
optimistic or
(topic[CONF_BRIGHTNESS_COMMAND_TOPIC] is not None and
topic[CONF_BRIGHTNESS_STATE_TOPIC] is None) or
(topic[CONF_BRIGHTNESS_COMMAND_TOPIC] is None and
topic[CONF_RGB_STATE_TOPIC] is None))
self._optimistic_color_temp = (
optimistic or topic[CONF_COLOR_TEMP_STATE_TOPIC] is None)
self._optimistic_effect = (
optimistic or topic[CONF_EFFECT_STATE_TOPIC] is None)
self._optimistic_hs = \
optimistic or topic[CONF_HS_STATE_TOPIC] is None
self._optimistic_white_value = (
optimistic or topic[CONF_WHITE_VALUE_STATE_TOPIC] is None)
self._optimistic_xy = \
optimistic or topic[CONF_XY_STATE_TOPIC] is None
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
topics = {}
templates = {}
for key, tpl in list(self._templates.items()):
if tpl is None:
templates[key] = lambda value: value
else:
tpl.hass = self.hass
templates[key] = tpl.async_render_with_possible_json_value
last_state = await self.async_get_last_state()
@callback
def state_received(msg):
"""Handle new MQTT messages."""
payload = templates[CONF_STATE](msg.payload)
if not payload:
_LOGGER.debug("Ignoring empty state message from '%s'",
msg.topic)
return
if payload == self._payload['on']:
self._state = True
elif payload == self._payload['off']:
self._state = False
self.async_write_ha_state()
if self._topic[CONF_STATE_TOPIC] is not None:
topics[CONF_STATE_TOPIC] = {
'topic': self._topic[CONF_STATE_TOPIC],
'msg_callback': state_received,
'qos': self._config.get(CONF_QOS)}
elif self._optimistic and last_state:
self._state = last_state.state == STATE_ON
@callback
def brightness_received(msg):
"""Handle new MQTT messages for the brightness."""
payload = templates[CONF_BRIGHTNESS](msg.payload)
if not payload:
_LOGGER.debug("Ignoring empty brightness message from '%s'",
msg.topic)
return
device_value = float(payload)
percent_bright = \
device_value / self._config.get(CONF_BRIGHTNESS_SCALE)
self._brightness = percent_bright * 255
self.async_write_ha_state()
if self._topic[CONF_BRIGHTNESS_STATE_TOPIC] is not None:
topics[CONF_BRIGHTNESS_STATE_TOPIC] = {
'topic': self._topic[CONF_BRIGHTNESS_STATE_TOPIC],
'msg_callback': brightness_received,
'qos': self._config.get(CONF_QOS)}
self._brightness = 255
elif self._optimistic_brightness and last_state\
and last_state.attributes.get(ATTR_BRIGHTNESS):
self._brightness = last_state.attributes.get(ATTR_BRIGHTNESS)
elif self._topic[CONF_BRIGHTNESS_COMMAND_TOPIC] is not None:
self._brightness = 255
else:
self._brightness = None
@callback
def rgb_received(msg):
"""Handle new MQTT messages for RGB."""
payload = templates[CONF_RGB](msg.payload)
if not payload:
_LOGGER.debug("Ignoring empty rgb message from '%s'",
msg.topic)
return
rgb = [int(val) for val in payload.split(',')]
self._hs = color_util.color_RGB_to_hs(*rgb)
if self._topic[CONF_BRIGHTNESS_STATE_TOPIC] is None:
percent_bright = \
float(color_util.color_RGB_to_hsv(*rgb)[2]) / 100.0
self._brightness = percent_bright * 255
self.async_write_ha_state()
if self._topic[CONF_RGB_STATE_TOPIC] is not None:
topics[CONF_RGB_STATE_TOPIC] = {
'topic': self._topic[CONF_RGB_STATE_TOPIC],
'msg_callback': rgb_received,
'qos': self._config.get(CONF_QOS)}
self._hs = (0, 0)
if self._optimistic_rgb and last_state\
and last_state.attributes.get(ATTR_HS_COLOR):
self._hs = last_state.attributes.get(ATTR_HS_COLOR)
elif self._topic[CONF_RGB_COMMAND_TOPIC] is not None:
self._hs = (0, 0)
@callback
def color_temp_received(msg):
"""Handle new MQTT messages for color temperature."""
payload = templates[CONF_COLOR_TEMP](msg.payload)
if not payload:
_LOGGER.debug("Ignoring empty color temp message from '%s'",
msg.topic)
return
self._color_temp = int(payload)
self.async_write_ha_state()
if self._topic[CONF_COLOR_TEMP_STATE_TOPIC] is not None:
topics[CONF_COLOR_TEMP_STATE_TOPIC] = {
'topic': self._topic[CONF_COLOR_TEMP_STATE_TOPIC],
'msg_callback': color_temp_received,
'qos': self._config.get(CONF_QOS)}
self._color_temp = 150
if self._optimistic_color_temp and last_state\
and last_state.attributes.get(ATTR_COLOR_TEMP):
self._color_temp = last_state.attributes.get(ATTR_COLOR_TEMP)
elif self._topic[CONF_COLOR_TEMP_COMMAND_TOPIC] is not None:
self._color_temp = 150
else:
self._color_temp = None
@callback
def effect_received(msg):
"""Handle new MQTT messages for effect."""
payload = templates[CONF_EFFECT](msg.payload)
if not payload:
_LOGGER.debug("Ignoring empty effect message from '%s'",
msg.topic)
return
self._effect = payload
self.async_write_ha_state()
if self._topic[CONF_EFFECT_STATE_TOPIC] is not None:
topics[CONF_EFFECT_STATE_TOPIC] = {
'topic': self._topic[CONF_EFFECT_STATE_TOPIC],
'msg_callback': effect_received,
'qos': self._config.get(CONF_QOS)}
self._effect = 'none'
if self._optimistic_effect and last_state\
and last_state.attributes.get(ATTR_EFFECT):
self._effect = last_state.attributes.get(ATTR_EFFECT)
elif self._topic[CONF_EFFECT_COMMAND_TOPIC] is not None:
self._effect = 'none'
else:
self._effect = None
@callback
def hs_received(msg):
"""Handle new MQTT messages for hs color."""
payload = templates[CONF_HS](msg.payload)
if not payload:
_LOGGER.debug("Ignoring empty hs message from '%s'", msg.topic)
return
try:
hs_color = [float(val) for val in payload.split(',', 2)]
self._hs = hs_color
self.async_write_ha_state()
except ValueError:
_LOGGER.debug("Failed to parse hs state update: '%s'",
payload)
if self._topic[CONF_HS_STATE_TOPIC] is not None:
topics[CONF_HS_STATE_TOPIC] = {
'topic': self._topic[CONF_HS_STATE_TOPIC],
'msg_callback': hs_received,
'qos': self._config.get(CONF_QOS)}
self._hs = (0, 0)
if self._optimistic_hs and last_state\
and last_state.attributes.get(ATTR_HS_COLOR):
self._hs = last_state.attributes.get(ATTR_HS_COLOR)
elif self._topic[CONF_HS_COMMAND_TOPIC] is not None:
self._hs = (0, 0)
@callback
def white_value_received(msg):
"""Handle new MQTT messages for white value."""
payload = templates[CONF_WHITE_VALUE](msg.payload)
if not payload:
_LOGGER.debug("Ignoring empty white value message from '%s'",
msg.topic)
return
device_value = float(payload)
percent_white = \
device_value / self._config.get(CONF_WHITE_VALUE_SCALE)
self._white_value = percent_white * 255
self.async_write_ha_state()
if self._topic[CONF_WHITE_VALUE_STATE_TOPIC] is not None:
topics[CONF_WHITE_VALUE_STATE_TOPIC] = {
'topic': self._topic[CONF_WHITE_VALUE_STATE_TOPIC],
'msg_callback': white_value_received,
'qos': self._config.get(CONF_QOS)}
self._white_value = 255
elif self._optimistic_white_value and last_state\
and last_state.attributes.get(ATTR_WHITE_VALUE):
self._white_value = last_state.attributes.get(ATTR_WHITE_VALUE)
elif self._topic[CONF_WHITE_VALUE_COMMAND_TOPIC] is not None:
self._white_value = 255
else:
self._white_value = None
@callback
def xy_received(msg):
"""Handle new MQTT messages for xy color."""
payload = templates[CONF_XY](msg.payload)
if not payload:
_LOGGER.debug("Ignoring empty xy-color message from '%s'",
msg.topic)
return
xy_color = [float(val) for val in payload.split(',')]
self._hs = color_util.color_xy_to_hs(*xy_color)
self.async_write_ha_state()
if self._topic[CONF_XY_STATE_TOPIC] is not None:
topics[CONF_XY_STATE_TOPIC] = {
'topic': self._topic[CONF_XY_STATE_TOPIC],
'msg_callback': xy_received,
'qos': self._config.get(CONF_QOS)}
self._hs = (0, 0)
if self._optimistic_xy and last_state\
and last_state.attributes.get(ATTR_HS_COLOR):
self._hs = last_state.attributes.get(ATTR_HS_COLOR)
elif self._topic[CONF_XY_COMMAND_TOPIC] is not None:
self._hs = (0, 0)
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state,
topics)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
brightness = self._brightness
if brightness:
brightness = min(round(brightness), 255)
return brightness
@property
def hs_color(self):
"""Return the hs color value."""
return self._hs
@property
def color_temp(self):
"""Return the color temperature in mired."""
return self._color_temp
@property
def white_value(self):
"""Return the white property."""
white_value = self._white_value
if white_value:
white_value = min(round(white_value), 255)
return white_value
@property
def should_poll(self):
"""No polling needed for a MQTT light."""
return False
@property
def name(self):
"""Return the name of the device if any."""
return self._config.get(CONF_NAME)
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def effect_list(self):
"""Return the list of supported effects."""
return self._config.get(CONF_EFFECT_LIST)
@property
def effect(self):
"""Return the current effect."""
return self._effect
@property
def supported_features(self):
"""Flag supported features."""
supported_features = 0
supported_features |= (
self._topic[CONF_RGB_COMMAND_TOPIC] is not None and
(SUPPORT_COLOR | SUPPORT_BRIGHTNESS))
supported_features |= (
self._topic[CONF_BRIGHTNESS_COMMAND_TOPIC] is not None and
SUPPORT_BRIGHTNESS)
supported_features |= (
self._topic[CONF_COLOR_TEMP_COMMAND_TOPIC] is not None and
SUPPORT_COLOR_TEMP)
supported_features |= (
self._topic[CONF_EFFECT_COMMAND_TOPIC] is not None and
SUPPORT_EFFECT)
supported_features |= (
self._topic[CONF_HS_COMMAND_TOPIC] is not None and SUPPORT_COLOR)
supported_features |= (
self._topic[CONF_WHITE_VALUE_COMMAND_TOPIC] is not None and
SUPPORT_WHITE_VALUE)
supported_features |= (
self._topic[CONF_XY_COMMAND_TOPIC] is not None and SUPPORT_COLOR)
return supported_features
async def async_turn_on(self, **kwargs):
"""Turn the device on.
This method is a coroutine.
"""
should_update = False
on_command_type = self._config.get(CONF_ON_COMMAND_TYPE)
if on_command_type == 'first':
mqtt.async_publish(
self.hass, self._topic[CONF_COMMAND_TOPIC],
self._payload['on'], self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
should_update = True
# If brightness is being used instead of an on command, make sure
# there is a brightness input. Either set the brightness to our
# saved value or the maximum value if this is the first call
elif on_command_type == 'brightness':
if ATTR_BRIGHTNESS not in kwargs:
kwargs[ATTR_BRIGHTNESS] = self._brightness if \
self._brightness else 255
if ATTR_HS_COLOR in kwargs and \
self._topic[CONF_RGB_COMMAND_TOPIC] is not None:
hs_color = kwargs[ATTR_HS_COLOR]
# If there's a brightness topic set, we don't want to scale the RGB
# values given using the brightness.
if self._topic[CONF_BRIGHTNESS_COMMAND_TOPIC] is not None:
brightness = 255
else:
brightness = kwargs.get(
ATTR_BRIGHTNESS, self._brightness if self._brightness else
255)
rgb = color_util.color_hsv_to_RGB(
hs_color[0], hs_color[1], brightness / 255 * 100)
tpl = self._templates[CONF_RGB_COMMAND_TEMPLATE]
if tpl:
rgb_color_str = tpl.async_render({
'red': rgb[0],
'green': rgb[1],
'blue': rgb[2],
})
else:
rgb_color_str = '{},{},{}'.format(*rgb)
mqtt.async_publish(
self.hass, self._topic[CONF_RGB_COMMAND_TOPIC],
rgb_color_str, self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
if self._optimistic_rgb:
self._hs = kwargs[ATTR_HS_COLOR]
should_update = True
if ATTR_HS_COLOR in kwargs and \
self._topic[CONF_HS_COMMAND_TOPIC] is not None:
hs_color = kwargs[ATTR_HS_COLOR]
mqtt.async_publish(
self.hass, self._topic[CONF_HS_COMMAND_TOPIC],
'{},{}'.format(*hs_color), self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
if self._optimistic_hs:
self._hs = kwargs[ATTR_HS_COLOR]
should_update = True
if ATTR_HS_COLOR in kwargs and \
self._topic[CONF_XY_COMMAND_TOPIC] is not None:
xy_color = color_util.color_hs_to_xy(*kwargs[ATTR_HS_COLOR])
mqtt.async_publish(
self.hass, self._topic[CONF_XY_COMMAND_TOPIC],
'{},{}'.format(*xy_color), self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
if self._optimistic_xy:
self._hs = kwargs[ATTR_HS_COLOR]
should_update = True
if ATTR_BRIGHTNESS in kwargs and \
self._topic[CONF_BRIGHTNESS_COMMAND_TOPIC] is not None:
percent_bright = float(kwargs[ATTR_BRIGHTNESS]) / 255
brightness_scale = self._config.get(CONF_BRIGHTNESS_SCALE)
device_brightness = \
min(round(percent_bright * brightness_scale), brightness_scale)
mqtt.async_publish(
self.hass, self._topic[CONF_BRIGHTNESS_COMMAND_TOPIC],
device_brightness, self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
if self._optimistic_brightness:
self._brightness = kwargs[ATTR_BRIGHTNESS]
should_update = True
elif ATTR_BRIGHTNESS in kwargs and ATTR_HS_COLOR not in kwargs and\
self._topic[CONF_RGB_COMMAND_TOPIC] is not None:
rgb = color_util.color_hsv_to_RGB(
self._hs[0], self._hs[1], kwargs[ATTR_BRIGHTNESS] / 255 * 100)
tpl = self._templates[CONF_RGB_COMMAND_TEMPLATE]
if tpl:
rgb_color_str = tpl.async_render({
'red': rgb[0],
'green': rgb[1],
'blue': rgb[2],
})
else:
rgb_color_str = '{},{},{}'.format(*rgb)
mqtt.async_publish(
self.hass, self._topic[CONF_RGB_COMMAND_TOPIC],
rgb_color_str, self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
if self._optimistic_brightness:
self._brightness = kwargs[ATTR_BRIGHTNESS]
should_update = True
if ATTR_COLOR_TEMP in kwargs and \
self._topic[CONF_COLOR_TEMP_COMMAND_TOPIC] is not None:
color_temp = int(kwargs[ATTR_COLOR_TEMP])
tpl = self._templates[CONF_COLOR_TEMP_COMMAND_TEMPLATE]
if tpl:
color_temp = tpl.async_render({
'value': color_temp,
})
mqtt.async_publish(
self.hass, self._topic[CONF_COLOR_TEMP_COMMAND_TOPIC],
color_temp, self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
if self._optimistic_color_temp:
self._color_temp = kwargs[ATTR_COLOR_TEMP]
should_update = True
if ATTR_EFFECT in kwargs and \
self._topic[CONF_EFFECT_COMMAND_TOPIC] is not None:
effect = kwargs[ATTR_EFFECT]
if effect in self._config.get(CONF_EFFECT_LIST):
mqtt.async_publish(
self.hass, self._topic[CONF_EFFECT_COMMAND_TOPIC],
effect, self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
if self._optimistic_effect:
self._effect = kwargs[ATTR_EFFECT]
should_update = True
if ATTR_WHITE_VALUE in kwargs and \
self._topic[CONF_WHITE_VALUE_COMMAND_TOPIC] is not None:
percent_white = float(kwargs[ATTR_WHITE_VALUE]) / 255
white_scale = self._config.get(CONF_WHITE_VALUE_SCALE)
device_white_value = \
min(round(percent_white * white_scale), white_scale)
mqtt.async_publish(
self.hass, self._topic[CONF_WHITE_VALUE_COMMAND_TOPIC],
device_white_value, self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
if self._optimistic_white_value:
self._white_value = kwargs[ATTR_WHITE_VALUE]
should_update = True
if on_command_type == 'last':
mqtt.async_publish(self.hass, self._topic[CONF_COMMAND_TOPIC],
self._payload['on'], self._config.get(CONF_QOS),
self._config.get(CONF_RETAIN))
should_update = True
if self._optimistic:
# Optimistically assume that the light has changed state.
self._state = True
should_update = True
if should_update:
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the device off.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass, self._topic[CONF_COMMAND_TOPIC], self._payload['off'],
self._config.get(CONF_QOS), self._config.get(CONF_RETAIN))
if self._optimistic:
# Optimistically assume that the light has changed state.
self._state = False
self.async_write_ha_state()
|
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
int_or_none,
parse_resolution,
try_get,
unified_timestamp,
url_or_none,
urljoin,
)
class PeerTubeIE(InfoExtractor):
_INSTANCES_RE = r'''(?:
# Taken from https://instances.joinpeertube.org/instances
peertube\.rainbowswingers\.net|
tube\.stanisic\.nl|
peer\.suiri\.us|
medias\.libox\.fr|
videomensoif\.ynh\.fr|
peertube\.travelpandas\.eu|
peertube\.rachetjay\.fr|
peertube\.montecsys\.fr|
tube\.eskuero\.me|
peer\.tube|
peertube\.umeahackerspace\.se|
tube\.nx-pod\.de|
video\.monsieurbidouille\.fr|
tube\.openalgeria\.org|
vid\.lelux\.fi|
video\.anormallostpod\.ovh|
tube\.crapaud-fou\.org|
peertube\.stemy\.me|
lostpod\.space|
exode\.me|
peertube\.snargol\.com|
vis\.ion\.ovh|
videosdulib\.re|
v\.mbius\.io|
videos\.judrey\.eu|
peertube\.osureplayviewer\.xyz|
peertube\.mathieufamily\.ovh|
www\.videos-libr\.es|
fightforinfo\.com|
peertube\.fediverse\.ru|
peertube\.oiseauroch\.fr|
video\.nesven\.eu|
v\.bearvideo\.win|
video\.qoto\.org|
justporn\.cc|
video\.vny\.fr|
peervideo\.club|
tube\.taker\.fr|
peertube\.chantierlibre\.org|
tube\.ipfixe\.info|
tube\.kicou\.info|
tube\.dodsorf\.as|
videobit\.cc|
video\.yukari\.moe|
videos\.elbinario\.net|
hkvideo\.live|
pt\.tux\.tf|
www\.hkvideo\.live|
FIGHTFORINFO\.com|
pt\.765racing\.com|
peertube\.gnumeria\.eu\.org|
nordenmedia\.com|
peertube\.co\.uk|
tube\.darfweb\.eu|
tube\.kalah-france\.org|
0ch\.in|
vod\.mochi\.academy|
film\.node9\.org|
peertube\.hatthieves\.es|
video\.fitchfamily\.org|
peertube\.ddns\.net|
video\.ifuncle\.kr|
video\.fdlibre\.eu|
tube\.22decembre\.eu|
peertube\.harmoniescreatives\.com|
tube\.fabrigli\.fr|
video\.thedwyers\.co|
video\.bruitbruit\.com|
peertube\.foxfam\.club|
peer\.philoxweb\.be|
videos\.bugs\.social|
peertube\.malbert\.xyz|
peertube\.bilange\.ca|
libretube\.net|
diytelevision\.com|
peertube\.fedilab\.app|
libre\.video|
video\.mstddntfdn\.online|
us\.tv|
peertube\.sl-network\.fr|
peertube\.dynlinux\.io|
peertube\.david\.durieux\.family|
peertube\.linuxrocks\.online|
peerwatch\.xyz|
v\.kretschmann\.social|
tube\.otter\.sh|
yt\.is\.nota\.live|
tube\.dragonpsi\.xyz|
peertube\.boneheadmedia\.com|
videos\.funkwhale\.audio|
watch\.44con\.com|
peertube\.gcaillaut\.fr|
peertube\.icu|
pony\.tube|
spacepub\.space|
tube\.stbr\.io|
v\.mom-gay\.faith|
tube\.port0\.xyz|
peertube\.simounet\.net|
play\.jergefelt\.se|
peertube\.zeteo\.me|
tube\.danq\.me|
peertube\.kerenon\.com|
tube\.fab-l3\.org|
tube\.calculate\.social|
peertube\.mckillop\.org|
tube\.netzspielplatz\.de|
vod\.ksite\.de|
peertube\.laas\.fr|
tube\.govital\.net|
peertube\.stephenson\.cc|
bistule\.nohost\.me|
peertube\.kajalinifi\.de|
video\.ploud\.jp|
video\.omniatv\.com|
peertube\.ffs2play\.fr|
peertube\.leboulaire\.ovh|
peertube\.tronic-studio\.com|
peertube\.public\.cat|
peertube\.metalbanana\.net|
video\.1000i100\.fr|
peertube\.alter-nativ-voll\.de|
tube\.pasa\.tf|
tube\.worldofhauru\.xyz|
pt\.kamp\.site|
peertube\.teleassist\.fr|
videos\.mleduc\.xyz|
conf\.tube|
media\.privacyinternational\.org|
pt\.forty-two\.nl|
video\.halle-leaks\.de|
video\.grosskopfgames\.de|
peertube\.schaeferit\.de|
peertube\.jackbot\.fr|
tube\.extinctionrebellion\.fr|
peertube\.f-si\.org|
video\.subak\.ovh|
videos\.koweb\.fr|
peertube\.zergy\.net|
peertube\.roflcopter\.fr|
peertube\.floss-marketing-school\.com|
vloggers\.social|
peertube\.iriseden\.eu|
videos\.ubuntu-paris\.org|
peertube\.mastodon\.host|
armstube\.com|
peertube\.s2s\.video|
peertube\.lol|
tube\.open-plug\.eu|
open\.tube|
peertube\.ch|
peertube\.normandie-libre\.fr|
peertube\.slat\.org|
video\.lacaveatonton\.ovh|
peertube\.uno|
peertube\.servebeer\.com|
peertube\.fedi\.quebec|
tube\.h3z\.jp|
tube\.plus200\.com|
peertube\.eric\.ovh|
tube\.metadocs\.cc|
tube\.unmondemeilleur\.eu|
gouttedeau\.space|
video\.antirep\.net|
nrop\.cant\.at|
tube\.ksl-bmx\.de|
tube\.plaf\.fr|
tube\.tchncs\.de|
video\.devinberg\.com|
hitchtube\.fr|
peertube\.kosebamse\.com|
yunopeertube\.myddns\.me|
peertube\.varney\.fr|
peertube\.anon-kenkai\.com|
tube\.maiti\.info|
tubee\.fr|
videos\.dinofly\.com|
toobnix\.org|
videotape\.me|
voca\.tube|
video\.heromuster\.com|
video\.lemediatv\.fr|
video\.up\.edu\.ph|
balafon\.video|
video\.ivel\.fr|
thickrips\.cloud|
pt\.laurentkruger\.fr|
video\.monarch-pass\.net|
peertube\.artica\.center|
video\.alternanet\.fr|
indymotion\.fr|
fanvid\.stopthatimp\.net|
video\.farci\.org|
v\.lesterpig\.com|
video\.okaris\.de|
tube\.pawelko\.net|
peertube\.mablr\.org|
tube\.fede\.re|
pytu\.be|
evertron\.tv|
devtube\.dev-wiki\.de|
raptube\.antipub\.org|
video\.selea\.se|
peertube\.mygaia\.org|
video\.oh14\.de|
peertube\.livingutopia\.org|
peertube\.the-penguin\.de|
tube\.thechangebook\.org|
tube\.anjara\.eu|
pt\.pube\.tk|
video\.samedi\.pm|
mplayer\.demouliere\.eu|
widemus\.de|
peertube\.me|
peertube\.zapashcanon\.fr|
video\.latavernedejohnjohn\.fr|
peertube\.pcservice46\.fr|
peertube\.mazzonetto\.eu|
video\.irem\.univ-paris-diderot\.fr|
video\.livecchi\.cloud|
alttube\.fr|
video\.coop\.tools|
video\.cabane-libre\.org|
peertube\.openstreetmap\.fr|
videos\.alolise\.org|
irrsinn\.video|
video\.antopie\.org|
scitech\.video|
tube2\.nemsia\.org|
video\.amic37\.fr|
peertube\.freeforge\.eu|
video\.arbitrarion\.com|
video\.datsemultimedia\.com|
stoptrackingus\.tv|
peertube\.ricostrongxxx\.com|
docker\.videos\.lecygnenoir\.info|
peertube\.togart\.de|
tube\.postblue\.info|
videos\.domainepublic\.net|
peertube\.cyber-tribal\.com|
video\.gresille\.org|
peertube\.dsmouse\.net|
cinema\.yunohost\.support|
tube\.theocevaer\.fr|
repro\.video|
tube\.4aem\.com|
quaziinc\.com|
peertube\.metawurst\.space|
videos\.wakapo\.com|
video\.ploud\.fr|
video\.freeradical\.zone|
tube\.valinor\.fr|
refuznik\.video|
pt\.kircheneuenburg\.de|
peertube\.asrun\.eu|
peertube\.lagob\.fr|
videos\.side-ways\.net|
91video\.online|
video\.valme\.io|
video\.taboulisme\.com|
videos-libr\.es|
tv\.mooh\.fr|
nuage\.acostey\.fr|
video\.monsieur-a\.fr|
peertube\.librelois\.fr|
videos\.pair2jeux\.tube|
videos\.pueseso\.club|
peer\.mathdacloud\.ovh|
media\.assassinate-you\.net|
vidcommons\.org|
ptube\.rousset\.nom\.fr|
tube\.cyano\.at|
videos\.squat\.net|
video\.iphodase\.fr|
peertube\.makotoworkshop\.org|
peertube\.serveur\.slv-valbonne\.fr|
vault\.mle\.party|
hostyour\.tv|
videos\.hack2g2\.fr|
libre\.tube|
pire\.artisanlogiciel\.net|
videos\.numerique-en-commun\.fr|
video\.netsyms\.com|
video\.die-partei\.social|
video\.writeas\.org|
peertube\.swarm\.solvingmaz\.es|
tube\.pericoloso\.ovh|
watching\.cypherpunk\.observer|
videos\.adhocmusic\.com|
tube\.rfc1149\.net|
peertube\.librelabucm\.org|
videos\.numericoop\.fr|
peertube\.koehn\.com|
peertube\.anarchmusicall\.net|
tube\.kampftoast\.de|
vid\.y-y\.li|
peertube\.xtenz\.xyz|
diode\.zone|
tube\.egf\.mn|
peertube\.nomagic\.uk|
visionon\.tv|
videos\.koumoul\.com|
video\.rastapuls\.com|
video\.mantlepro\.com|
video\.deadsuperhero\.com|
peertube\.musicstudio\.pro|
peertube\.we-keys\.fr|
artitube\.artifaille\.fr|
peertube\.ethernia\.net|
tube\.midov\.pl|
peertube\.fr|
watch\.snoot\.tube|
peertube\.donnadieu\.fr|
argos\.aquilenet\.fr|
tube\.nemsia\.org|
tube\.bruniau\.net|
videos\.darckoune\.moe|
tube\.traydent\.info|
dev\.videos\.lecygnenoir\.info|
peertube\.nayya\.org|
peertube\.live|
peertube\.mofgao\.space|
video\.lequerrec\.eu|
peertube\.amicale\.net|
aperi\.tube|
tube\.ac-lyon\.fr|
video\.lw1\.at|
www\.yiny\.org|
videos\.pofilo\.fr|
tube\.lou\.lt|
choob\.h\.etbus\.ch|
tube\.hoga\.fr|
peertube\.heberge\.fr|
video\.obermui\.de|
videos\.cloudfrancois\.fr|
betamax\.video|
video\.typica\.us|
tube\.piweb\.be|
video\.blender\.org|
peertube\.cat|
tube\.kdy\.ch|
pe\.ertu\.be|
peertube\.social|
videos\.lescommuns\.org|
tv\.datamol\.org|
videonaute\.fr|
dialup\.express|
peertube\.nogafa\.org|
megatube\.lilomoino\.fr|
peertube\.tamanoir\.foucry\.net|
peertube\.devosi\.org|
peertube\.1312\.media|
tube\.bootlicker\.party|
skeptikon\.fr|
video\.blueline\.mg|
tube\.homecomputing\.fr|
tube\.ouahpiti\.info|
video\.tedomum\.net|
video\.g3l\.org|
fontube\.fr|
peertube\.gaialabs\.ch|
tube\.kher\.nl|
peertube\.qtg\.fr|
video\.migennes\.net|
tube\.p2p\.legal|
troll\.tv|
videos\.iut-orsay\.fr|
peertube\.solidev\.net|
videos\.cemea\.org|
video\.passageenseine\.fr|
videos\.festivalparminous\.org|
peertube\.touhoppai\.moe|
sikke\.fi|
peer\.hostux\.social|
share\.tube|
peertube\.walkingmountains\.fr|
videos\.benpro\.fr|
peertube\.parleur\.net|
peertube\.heraut\.eu|
tube\.aquilenet\.fr|
peertube\.gegeweb\.eu|
framatube\.org|
thinkerview\.video|
tube\.conferences-gesticulees\.net|
peertube\.datagueule\.tv|
video\.lqdn\.fr|
tube\.mochi\.academy|
media\.zat\.im|
video\.colibris-outilslibres\.org|
tube\.svnet\.fr|
peertube\.video|
peertube3\.cpy\.re|
peertube2\.cpy\.re|
videos\.tcit\.fr|
peertube\.cpy\.re
)'''
_UUID_RE = r'[\da-fA-F]{8}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{12}'
_VALID_URL = r'''(?x)
(?:
peertube:(?P<host>[^:]+):|
https?://(?P<host_2>%s)/(?:videos/(?:watch|embed)|api/v\d/videos)/
)
(?P<id>%s)
''' % (_INSTANCES_RE, _UUID_RE)
_TESTS = [{
'url': 'https://peertube.cpy.re/videos/watch/2790feb0-8120-4e63-9af3-c943c69f5e6c',
'md5': '80f24ff364cc9d333529506a263e7feb',
'info_dict': {
'id': '2790feb0-8120-4e63-9af3-c943c69f5e6c',
'ext': 'mp4',
'title': 'wow',
'description': 'wow such video, so gif',
'thumbnail': r're:https?://.*\.(?:jpg|png)',
'timestamp': 1519297480,
'upload_date': '20180222',
'uploader': 'Luclu7',
'uploader_id': '7fc42640-efdb-4505-a45d-a15b1a5496f1',
'uploder_url': 'https://peertube.nsa.ovh/accounts/luclu7',
'license': 'Unknown',
'duration': 3,
'view_count': int,
'like_count': int,
'dislike_count': int,
'tags': list,
'categories': list,
}
}, {
'url': 'https://peertube.tamanoir.foucry.net/videos/watch/0b04f13d-1e18-4f1d-814e-4979aa7c9c44',
'only_matching': True,
}, {
# nsfw
'url': 'https://tube.22decembre.eu/videos/watch/9bb88cd3-9959-46d9-9ab9-33d2bb704c39',
'only_matching': True,
}, {
'url': 'https://tube.22decembre.eu/videos/embed/fed67262-6edb-4d1c-833b-daa9085c71d7',
'only_matching': True,
}, {
'url': 'https://tube.openalgeria.org/api/v1/videos/c1875674-97d0-4c94-a058-3f7e64c962e8',
'only_matching': True,
}, {
'url': 'peertube:video.blender.org:b37a5b9f-e6b5-415c-b700-04a5cd6ec205',
'only_matching': True,
}]
@staticmethod
def _extract_peertube_url(webpage, source_url):
mobj = re.match(
r'https?://(?P<host>[^/]+)/videos/(?:watch|embed)/(?P<id>%s)'
% PeerTubeIE._UUID_RE, source_url)
if mobj and any(p in webpage for p in (
'<title>PeerTube<',
'There will be other non JS-based clients to access PeerTube',
'>We are sorry but it seems that PeerTube is not compatible with your web browser.<')):
return 'peertube:%s:%s' % mobj.group('host', 'id')
@staticmethod
def _extract_urls(webpage, source_url):
entries = re.findall(
r'''(?x)<iframe[^>]+\bsrc=["\'](?P<url>(?:https?:)?//%s/videos/embed/%s)'''
% (PeerTubeIE._INSTANCES_RE, PeerTubeIE._UUID_RE), webpage)
if not entries:
peertube_url = PeerTubeIE._extract_peertube_url(webpage, source_url)
if peertube_url:
entries = [peertube_url]
return entries
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
host = mobj.group('host') or mobj.group('host_2')
video_id = mobj.group('id')
video = self._download_json(
'https://%s/api/v1/videos/%s' % (host, video_id), video_id)
title = video['name']
formats = []
for file_ in video['files']:
if not isinstance(file_, dict):
continue
file_url = url_or_none(file_.get('fileUrl'))
if not file_url:
continue
file_size = int_or_none(file_.get('size'))
format_id = try_get(
file_, lambda x: x['resolution']['label'], compat_str)
f = parse_resolution(format_id)
f.update({
'url': file_url,
'format_id': format_id,
'filesize': file_size,
})
formats.append(f)
self._sort_formats(formats)
def account_data(field):
return try_get(video, lambda x: x['account'][field], compat_str)
category = try_get(video, lambda x: x['category']['label'], compat_str)
categories = [category] if category else None
nsfw = video.get('nsfw')
if nsfw is bool:
age_limit = 18 if nsfw else 0
else:
age_limit = None
return {
'id': video_id,
'title': title,
'description': video.get('description'),
'thumbnail': urljoin(url, video.get('thumbnailPath')),
'timestamp': unified_timestamp(video.get('publishedAt')),
'uploader': account_data('displayName'),
'uploader_id': account_data('uuid'),
'uploder_url': account_data('url'),
'license': try_get(
video, lambda x: x['licence']['label'], compat_str),
'duration': int_or_none(video.get('duration')),
'view_count': int_or_none(video.get('views')),
'like_count': int_or_none(video.get('likes')),
'dislike_count': int_or_none(video.get('dislikes')),
'age_limit': age_limit,
'tags': try_get(video, lambda x: x['tags'], list),
'categories': categories,
'formats': formats,
}
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2019, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import command
import mesh_cop
import thread_cert
from pktverify.consts import MLE_DATA_RESPONSE, LEAD_PET_URI, LEAD_KA_URI, MGMT_COMMISSIONER_SET_URI, NM_CHANNEL_TLV, NM_COMMISSIONER_ID_TLV, NM_COMMISSIONER_SESSION_ID_TLV, NM_STATE_TLV, NM_STEERING_DATA_TLV, NM_BORDER_AGENT_LOCATOR_TLV, LEADER_DATA_TLV, NETWORK_DATA_TLV, ACTIVE_TIMESTAMP_TLV, SOURCE_ADDRESS_TLV, NWD_COMMISSIONING_DATA_TLV, MESHCOP_ACCEPT, MESHCOP_REJECT, LEADER_ALOC
from pktverify.packet_verifier import PacketVerifier
from pktverify.bytes import Bytes
COMMISSIONER = 1
LEADER = 2
# Test Purpose and Description:
# -----------------------------
# The purpose of this test case is to verify Leader's and active Commissioner's behavior via
# MGMT_COMMISSIONER_SET request and response
#
# Test Topology:
# -------------
# Commissioner
# |
# Leader
#
# DUT Types:
# ----------
# Leader
# Commissioner
class Cert_9_2_02_MGMTCommissionerSet(thread_cert.TestCase):
SUPPORT_NCP = False
TOPOLOGY = {
COMMISSIONER: {
'name': 'COMMISSIONER',
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER]
},
LEADER: {
'name': 'LEADER',
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [COMMISSIONER]
},
}
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[COMMISSIONER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.simulator.get_messages_sent_by(LEADER)
self.collect_rlocs()
self.collect_rloc16s()
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(3)
leader_messages = self.simulator.get_messages_sent_by(LEADER)
msg = leader_messages.next_coap_message('2.04', assert_enabled=True)
commissioner_session_id_tlv = command.get_sub_tlv(msg.coap.payload, mesh_cop.CommissionerSessionId)
steering_data_tlv = mesh_cop.SteeringData(bytes([0xff]))
self.nodes[COMMISSIONER].commissioner_mgmtset_with_tlvs([steering_data_tlv])
self.simulator.go(5)
self.nodes[COMMISSIONER].commissioner_mgmtset_with_tlvs([steering_data_tlv, commissioner_session_id_tlv])
self.simulator.go(5)
border_agent_locator_tlv = mesh_cop.BorderAgentLocator(0x0400)
self.nodes[COMMISSIONER].commissioner_mgmtset_with_tlvs(
[commissioner_session_id_tlv, border_agent_locator_tlv])
self.simulator.go(5)
self.nodes[COMMISSIONER].commissioner_mgmtset_with_tlvs([
steering_data_tlv,
commissioner_session_id_tlv,
border_agent_locator_tlv,
])
self.simulator.go(5)
self.nodes[COMMISSIONER].commissioner_mgmtset_with_tlvs(
[mesh_cop.CommissionerSessionId(0xffff), steering_data_tlv])
self.simulator.go(5)
self.nodes[COMMISSIONER].commissioner_mgmtset_with_tlvs([
commissioner_session_id_tlv,
steering_data_tlv,
mesh_cop.Channel(0x0, 0x0),
])
self.simulator.go(5)
leader_rloc = self.nodes[LEADER].get_rloc()
commissioner_rloc = self.nodes[COMMISSIONER].get_rloc()
self.assertTrue(self.nodes[COMMISSIONER].ping(leader_rloc))
self.simulator.go(1)
self.assertTrue(self.nodes[LEADER].ping(commissioner_rloc))
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
LEADER = pv.vars['LEADER']
LEADER_RLOC = pv.vars['LEADER_RLOC']
LEADER_RLOC16 = pv.vars['LEADER_RLOC16']
COMMISSIONER = pv.vars['COMMISSIONER']
COMMISSIONER_RLOC = pv.vars['COMMISSIONER_RLOC']
# Step 1: Ensure topology is formed correctly
pv.verify_attached('COMMISSIONER', 'LEADER')
# Step 2: Commissioner sends a Set Commissioner Dataset Request (MGMT_COMMISSIONER_SET.req)
# to Leader Anycast or Routing Locator:
# CoAP Request URI
# CON POST coap://<L>:MM/c/cs
# CoAP Payload
# (missing Commissioner Session ID TLV)
# Steering Data TLV (0xFF)
_mgmt_set_pkt = pkts.filter_wpan_src64(COMMISSIONER).\
filter_ipv6_2dsts(LEADER_ALOC, LEADER_RLOC).\
filter_coap_request(MGMT_COMMISSIONER_SET_URI).\
filter(lambda p:
[NM_STEERING_DATA_TLV] == p.coap.tlv.type and\
p.thread_meshcop.tlv.steering_data == Bytes('ff')
).\
must_next()
# Step 3: Leader sends a Set Commissioner Dataset Response (MGMT_COMMISSIONER_SET.rsp) to
# Commissioner:
# CoAP Response Code
# 2.04 Changed
# CoAP Payload
# State TLV (value = Reject)
pkts.filter_ipv6_src_dst(_mgmt_set_pkt.ipv6.dst, COMMISSIONER_RLOC).\
filter_coap_ack(MGMT_COMMISSIONER_SET_URI).\
filter(lambda p:
[NM_STATE_TLV] == p.coap.tlv.type and\
p.thread_meshcop.tlv.state == MESHCOP_REJECT
).\
must_next()
# Step 4: Commissioner sends a Set Commissioner Dataset Request (MGMT_COMMISSIONER_SET.req)
# to Leader Anycast or Routing Locator:
# CoAP Request URI
# CON POST coap://<L>:MM/c/cs
# CoAP Payload
# Commissioner Session ID TLV
# Steering Data TLV (0xFF)
_mgmt_set_pkt = pkts.filter_wpan_src64(COMMISSIONER).\
filter_ipv6_2dsts(LEADER_ALOC, LEADER_RLOC).\
filter_coap_request(MGMT_COMMISSIONER_SET_URI).\
filter(lambda p: {
NM_COMMISSIONER_SESSION_ID_TLV,
NM_STEERING_DATA_TLV
} == set(p.thread_meshcop.tlv.type) and\
p.thread_meshcop.tlv.steering_data == Bytes('ff')
).\
must_next()
# Step 5: Leader sends a Set Commissioner Dataset Response (MGMT_COMMISSIONER_SET.rsp) to
# Commissioner:
# CoAP Response Code
# 2.04 Changed
# CoAP Payload
# State TLV (value = Accept)
pkts.filter_ipv6_src_dst(_mgmt_set_pkt.ipv6.dst, COMMISSIONER_RLOC).\
filter_coap_ack(MGMT_COMMISSIONER_SET_URI).\
filter(lambda p:
[NM_STATE_TLV] == p.coap.tlv.type and\
p.thread_meshcop.tlv.state == MESHCOP_ACCEPT
).\
must_next()
# Step 6: Leader sends a MLE Data Response to the network with the
# following TLVs:
# - Active Timestamp TLV
# - Leader Data TLV
# - Network Data TLV
# - Source Address TLV
pkts.filter_wpan_src64(LEADER).\
filter_LLANMA().\
filter_mle_cmd(MLE_DATA_RESPONSE).\
filter(lambda p: {
NETWORK_DATA_TLV,
SOURCE_ADDRESS_TLV,
ACTIVE_TIMESTAMP_TLV,
LEADER_DATA_TLV
} == set(p.mle.tlv.type) and\
{
NWD_COMMISSIONING_DATA_TLV
} == set(p.thread_nwd.tlv.type) and\
{
NM_BORDER_AGENT_LOCATOR_TLV,
NM_COMMISSIONER_SESSION_ID_TLV,
NM_STEERING_DATA_TLV
} == set(p.thread_meshcop.tlv.type) and\
p.thread_nwd.tlv.stable == [0]
).\
must_next()
# Step 7: Commissioner sends a Set Commissioner Dataset Request (MGMT_COMMISSIONER_SET.req)
# to Leader Anycast or Routing Locator:
# CoAP Request URI
# CON POST coap://<L>:MM/c/cs
# CoAP Payload
# Commissioner Session ID TLV
# Border Agent Locator TLV (0x0400) (not allowed TLV)
_mgmt_set_pkt = pkts.filter_wpan_src64(COMMISSIONER).\
filter_ipv6_2dsts(LEADER_ALOC, LEADER_RLOC).\
filter_coap_request(MGMT_COMMISSIONER_SET_URI).\
filter(lambda p: {
NM_COMMISSIONER_SESSION_ID_TLV,
NM_BORDER_AGENT_LOCATOR_TLV
} == set(p.thread_meshcop.tlv.type) and\
p.thread_meshcop.tlv.ba_locator == 0x0400
).\
must_next()
# Step 8: Leader sends a Set Commissioner Dataset Response (MGMT_COMMISSIONER_SET.rsp) to
# Commissioner:
# CoAP Response Code
# 2.04 Changed
# CoAP Payload
# State TLV (value = Reject)
pkts.filter_ipv6_src_dst(_mgmt_set_pkt.ipv6.dst, COMMISSIONER_RLOC).\
filter_coap_ack(MGMT_COMMISSIONER_SET_URI).\
filter(lambda p:
[NM_STATE_TLV] == p.coap.tlv.type and\
p.thread_meshcop.tlv.state == MESHCOP_REJECT
).\
must_next()
# Step 9: Commissioner sends a Set Commissioner Dataset Request (MGMT_COMMISSIONER_SET.req)
# to Leader Anycast or Routing Locator:
# CoAP Request URI
# CON POST coap://<L>:MM/c/cs
# CoAP Payload
# Commissioner Session ID TLV
# Steering Data TLV (0xFF)
# Border Agent Locator TLV (0x0400) (not allowed TLV)
_mgmt_set_pkt = pkts.filter_wpan_src64(COMMISSIONER).\
filter_ipv6_2dsts(LEADER_ALOC, LEADER_RLOC).\
filter_coap_request(MGMT_COMMISSIONER_SET_URI).\
filter(lambda p: {
NM_COMMISSIONER_SESSION_ID_TLV,
NM_STEERING_DATA_TLV,
NM_BORDER_AGENT_LOCATOR_TLV
} == set(p.thread_meshcop.tlv.type) and\
p.thread_meshcop.tlv.ba_locator == 0x0400 and\
p.thread_meshcop.tlv.steering_data == Bytes('ff')
).\
must_next()
# Step 10: Leader sends a Set Commissioner Dataset Response (MGMT_COMMISSIONER_SET.rsp) to
# Commissioner:
# CoAP Response Code
# 2.04 Changed
# CoAP Payload
# State TLV (value = Reject)
pkts.filter_ipv6_src_dst(_mgmt_set_pkt.ipv6.dst, COMMISSIONER_RLOC).\
filter_coap_ack(MGMT_COMMISSIONER_SET_URI).\
filter(lambda p:
[NM_STATE_TLV] == p.coap.tlv.type and\
p.thread_meshcop.tlv.state == MESHCOP_REJECT
).\
must_next()
# Step 11: Commissioner sends a Set Commissioner Dataset Request (MGMT_COMMISSIONER_SET.req)
# to Leader Anycast or Routing Locator:
# CoAP Request URI
# CON POST coap://<L>:MM/c/cs
# CoAP Payload
# Commissioner Session ID TLV (0xFFFF) (invalid value)
# Steering Data TLV (0xFF)
_mgmt_set_pkt = pkts.filter_wpan_src64(COMMISSIONER).\
filter_ipv6_2dsts(LEADER_ALOC, LEADER_RLOC).\
filter_coap_request(MGMT_COMMISSIONER_SET_URI).\
filter(lambda p: {
NM_COMMISSIONER_SESSION_ID_TLV,
NM_STEERING_DATA_TLV
} == set(p.thread_meshcop.tlv.type) and\
p.thread_meshcop.tlv.commissioner_sess_id == 0xFFFF and\
p.thread_meshcop.tlv.steering_data == Bytes('ff')
).\
must_next()
# Step 12: Leader sends a Set Commissioner Dataset Response (MGMT_COMMISSIONER_SET.rsp) to
# Commissioner:
# CoAP Response Code
# 2.04 Changed
# CoAP Payload
# State TLV (value = Reject)
pkts.filter_ipv6_src_dst(_mgmt_set_pkt.ipv6.dst, COMMISSIONER_RLOC).\
filter_coap_ack(MGMT_COMMISSIONER_SET_URI).\
filter(lambda p:
[NM_STATE_TLV] == p.coap.tlv.type and\
p.thread_meshcop.tlv.state == MESHCOP_REJECT
).\
must_next()
# Step 13: Commissioner sends a Set Commissioner Dataset Request (MGMT_COMMISSIONER_SET.req)
# to Leader Anycast or Routing Locator:
# CoAP Request URI
# CON POST coap://<L>:MM/c/cs
# CoAP Payload
# Commissioner Session ID TLV
# Steering Data TLV (0xFF)
# Channel TLV (not allowed TLV)
_mgmt_set_pkt = pkts.filter_wpan_src64(COMMISSIONER).\
filter_ipv6_2dsts(LEADER_ALOC, LEADER_RLOC).\
filter_coap_request(MGMT_COMMISSIONER_SET_URI).\
filter(lambda p: {
NM_COMMISSIONER_SESSION_ID_TLV,
NM_STEERING_DATA_TLV,
NM_CHANNEL_TLV
} == set(p.thread_meshcop.tlv.type) and\
p.thread_meshcop.tlv.steering_data == Bytes('ff')
).\
must_next()
# Step 14: Leader sends a Set Commissioner Dataset Response (MGMT_COMMISSIONER_SET.rsp) to
# Commissioner:
# CoAP Response Code
# 2.04 Changed
# CoAP Payload
# State TLV (value = Accept)
pkts.filter_ipv6_src_dst(_mgmt_set_pkt.ipv6.dst, COMMISSIONER_RLOC).\
filter_coap_ack(MGMT_COMMISSIONER_SET_URI).\
filter(lambda p:
[NM_STATE_TLV] == p.coap.tlv.type and\
p.thread_meshcop.tlv.state == MESHCOP_ACCEPT
).\
must_next()
# Step 15: Verify connectivity by sending an ICMPv6 Echo Request to the DUT mesh local address
_pkt = pkts.filter_ping_request().\
filter_ipv6_src_dst(COMMISSIONER_RLOC, LEADER_RLOC).\
must_next()
pkts.filter_ping_reply(identifier=_pkt.icmpv6.echo.identifier).\
filter_ipv6_src_dst(LEADER_RLOC, COMMISSIONER_RLOC).\
must_next()
_pkt = pkts.filter_ping_request().\
filter_ipv6_src_dst(LEADER_RLOC, COMMISSIONER_RLOC).\
must_next()
pkts.filter_ping_reply(identifier=_pkt.icmpv6.echo.identifier).\
filter_ipv6_src_dst(COMMISSIONER_RLOC, LEADER_RLOC).\
must_next()
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = '[email protected] (Jeff Scudder)'
"""Provides classes and methods for working with JSON-C.
This module is experimental and subject to backwards incompatible changes.
Jsonc: Class which represents JSON-C data and provides pythonic member
access which is a bit cleaner than working with plain old dicts.
parse_json: Converts a JSON-C string into a Jsonc object.
jsonc_to_string: Converts a Jsonc object into a string of JSON-C.
"""
try:
import simplejson
except ImportError:
try:
# Try to import from django, should work on App Engine
from django.utils import simplejson
except ImportError:
# Should work for Python2.6 and higher.
import json as simplejson
def _convert_to_jsonc(x):
"""Builds a Jsonc objects which wraps the argument's members."""
if isinstance(x, dict):
jsonc_obj = Jsonc()
# Recursively transform all members of the dict.
# When converting a dict, we do not convert _name items into private
# Jsonc members.
for key, value in list(x.items()):
jsonc_obj._dict[key] = _convert_to_jsonc(value)
return jsonc_obj
elif isinstance(x, list):
# Recursively transform all members of the list.
members = []
for item in x:
members.append(_convert_to_jsonc(item))
return members
else:
# Return the base object.
return x
def parse_json(json_string):
"""Converts a JSON-C string into a Jsonc object.
Args:
json_string: str or unicode The JSON to be parsed.
Returns:
A new Jsonc object.
"""
return _convert_to_jsonc(simplejson.loads(json_string))
def parse_json_file(json_file):
return _convert_to_jsonc(simplejson.load(json_file))
def jsonc_to_string(jsonc_obj):
"""Converts a Jsonc object into a string of JSON-C."""
return simplejson.dumps(_convert_to_object(jsonc_obj))
def prettify_jsonc(jsonc_obj, indentation=2):
"""Converts a Jsonc object to a pretified (intented) JSON string."""
return simplejson.dumps(_convert_to_object(jsonc_obj), indent=indentation)
def _convert_to_object(jsonc_obj):
"""Creates a new dict or list which has the data in the Jsonc object.
Used to convert the Jsonc object to a plain old Python object to simplify
conversion to a JSON-C string.
Args:
jsonc_obj: A Jsonc object to be converted into simple Python objects
(dicts, lists, etc.)
Returns:
Either a dict, list, or other object with members converted from Jsonc
objects to the corresponding simple Python object.
"""
if isinstance(jsonc_obj, Jsonc):
plain = {}
for key, value in list(jsonc_obj._dict.items()):
plain[key] = _convert_to_object(value)
return plain
elif isinstance(jsonc_obj, list):
plain = []
for item in jsonc_obj:
plain.append(_convert_to_object(item))
return plain
else:
return jsonc_obj
def _to_jsonc_name(member_name):
"""Converts a Python style member name to a JSON-C style name.
JSON-C uses camelCaseWithLower while Python tends to use
lower_with_underscores so this method converts as follows:
spam becomes spam
spam_and_eggs becomes spamAndEggs
Args:
member_name: str or unicode The Python syle name which should be
converted to JSON-C style.
Returns:
The JSON-C style name as a str or unicode.
"""
characters = []
uppercase_next = False
for character in member_name:
if character == '_':
uppercase_next = True
elif uppercase_next:
characters.append(character.upper())
uppercase_next = False
else:
characters.append(character)
return ''.join(characters)
class Jsonc(object):
"""Represents JSON-C data in an easy to access object format.
To access the members of a JSON structure which looks like this:
{
"data": {
"totalItems": 800,
"items": [
{
"content": {
"1": "rtsp://v5.cache3.c.youtube.com/CiILENy.../0/0/0/video.3gp"
},
"viewCount": 220101,
"commentCount": 22,
"favoriteCount": 201
}
]
},
"apiVersion": "2.0"
}
You would do the following:
x = gdata.core.parse_json(the_above_string)
# Gives you 800
x.data.total_items
# Should be 22
x.data.items[0].comment_count
# The apiVersion is '2.0'
x.api_version
To create a Jsonc object which would produce the above JSON, you would do:
gdata.core.Jsonc(
api_version='2.0',
data=gdata.core.Jsonc(
total_items=800,
items=[
gdata.core.Jsonc(
view_count=220101,
comment_count=22,
favorite_count=201,
content={
'1': ('rtsp://v5.cache3.c.youtube.com'
'/CiILENy.../0/0/0/video.3gp')})]))
or
x = gdata.core.Jsonc()
x.api_version = '2.0'
x.data = gdata.core.Jsonc()
x.data.total_items = 800
x.data.items = []
# etc.
How it works:
The JSON-C data is stored in an internal dictionary (._dict) and the
getattr, setattr, and delattr methods rewrite the name which you provide
to mirror the expected format in JSON-C. (For more details on name
conversion see _to_jsonc_name.) You may also access members using
getitem, setitem, delitem as you would for a dictionary. For example
x.data.total_items is equivalent to x['data']['totalItems']
(Not all dict methods are supported so if you need something other than
the item operations, then you will want to use the ._dict member).
You may need to use getitem or the _dict member to access certain
properties in cases where the JSON-C syntax does not map neatly to Python
objects. For example the YouTube Video feed has some JSON like this:
"content": {"1": "rtsp://v5.cache3.c.youtube.com..."...}
You cannot do x.content.1 in Python, so you would use the getitem as
follows:
x.content['1']
or you could use the _dict member as follows:
x.content._dict['1']
If you need to create a new object with such a mapping you could use.
x.content = gdata.core.Jsonc(_dict={'1': 'rtsp://cache3.c.youtube.com...'})
"""
def __init__(self, _dict=None, **kwargs):
json = _dict or {}
for key, value in list(kwargs.items()):
if key.startswith('_'):
object.__setattr__(self, key, value)
else:
json[_to_jsonc_name(key)] = _convert_to_jsonc(value)
object.__setattr__(self, '_dict', json)
def __setattr__(self, name, value):
if name.startswith('_'):
object.__setattr__(self, name, value)
else:
object.__getattribute__(
self, '_dict')[_to_jsonc_name(name)] = _convert_to_jsonc(value)
def __getattr__(self, name):
if name.startswith('_'):
object.__getattribute__(self, name)
else:
try:
return object.__getattribute__(self, '_dict')[_to_jsonc_name(name)]
except KeyError:
raise AttributeError(
'No member for %s or [\'%s\']' % (name, _to_jsonc_name(name)))
def __delattr__(self, name):
if name.startswith('_'):
object.__delattr__(self, name)
else:
try:
del object.__getattribute__(self, '_dict')[_to_jsonc_name(name)]
except KeyError:
raise AttributeError(
'No member for %s (or [\'%s\'])' % (name, _to_jsonc_name(name)))
# For container methods pass-through to the underlying dict.
def __getitem__(self, key):
return self._dict[key]
def __setitem__(self, key, value):
self._dict[key] = value
def __delitem__(self, key):
del self._dict[key]
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
from copy import deepcopy
import logging
from os import path
import subprocess
import jinja2
from rejviz import libvirt_nets
from rejviz import utils
LOG = logging.getLogger(__file__)
NIC_CONFIG_DIR = '/etc/sysconfig/network-scripts'
NIC_CONFIG_PREFIX = 'ifcfg-'
NIC_CONFIG_FULL_PREFIX = path.join(NIC_CONFIG_DIR, NIC_CONFIG_PREFIX)
FETCH_SCRIPT_TEMPLATE = path.join(
path.dirname(path.realpath(__file__)),
'templates', 'fetch_nic.guestfish.j2')
def process_nic_mappings(args):
if not _has_nic_mapping_args(args):
return args
LOG.info('Looking for NIC configurations in the image...')
nics = _fetch_nics_from_image(args)
LOG.info('NICs found: %s', ', '.join([n['name'] for n in nics]))
for nic in nics:
LOG.debug('NIC %s: %s', nic['name'], str(nic))
networks = libvirt_nets.get_libvirt_networks()
mapped_nics = nics
if _auto_nic_mappings_enabled(args):
mapped_nics = _map_nics_auto(nics, networks)
manual_mappings = _parse_manual_nic_mappings(args)
mapped_nics = _map_nics_manual(mapped_nics, manual_mappings)
# TODO(jistr): check mappings' sanity
return _convert_nic_mappings_args(args, mapped_nics)
def _has_nic_mapping_args(args):
return '--nic-mappings' in args or '--auto-nic-mappings' in args
def _auto_nic_mappings_enabled(args):
return '--auto-nic-mappings' in args
def _fetch_nics_from_image(args):
image_args = utils.extract_image_args_from_disks(args)
nic_names = _get_nic_names_from_image(image_args)
command = ['guestfish', '-i', '--ro'] + image_args
with open(FETCH_SCRIPT_TEMPLATE) as template_file:
template = jinja2.Template(template_file.read())
script = template.render(
nic_config_dir=NIC_CONFIG_DIR,
nic_config_prefix=NIC_CONFIG_PREFIX,
nic_names=nic_names)
LOG.debug('Running guestfish to get NIC config details: %s', str(command))
fetcher = subprocess.Popen(command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, _ = fetcher.communicate(script)
LOG.debug('guestfish returned: %s', output)
return _filter_ethernet_nics(_parse_nics_output(output))
def _get_nic_names_from_image(image_args):
command = ['virt-ls'] + image_args + [NIC_CONFIG_DIR]
LOG.debug('Running virt-ls to list NIC configs: %s', str(command))
network_scripts = subprocess.check_output(command).splitlines()
ifcfg_scripts = [s for s in network_scripts
if s.startswith(NIC_CONFIG_PREFIX)]
prefix_len = len(NIC_CONFIG_PREFIX)
nic_names = [s[prefix_len:] for s in ifcfg_scripts]
return nic_names
def _parse_nics_output(output):
lines = output.splitlines()
nics = []
current_nic = {}
for i, line in enumerate(lines):
# if line is a separator, start a new NIC
if line == '@-----':
nics.append(current_nic)
current_nic = {}
continue
# if line is a key, assign a value
if line.startswith('@'):
next_line = lines[i + 1] if i + 1 < len(lines) else None
if next_line and not next_line.startswith('@'):
current_nic[line[1:]] = next_line
# if next line is a key again, assign None to the current key
else:
current_nic[line[1:]] = None
return nics
def _filter_ethernet_nics(nics):
return [nic for nic in nics
if nic['type'] and nic['type'].lower() == 'ethernet']
def _map_nics_auto(nics, networks):
mapped_nics = deepcopy(nics)
for nic in mapped_nics:
if not nic.get('network'):
continue
for network in networks:
if network['network'] == nic['network']:
nic['libvirt_network'] = network['name']
return mapped_nics
def _map_nics_manual(nics, manual_mappings):
mapped_nics = deepcopy(nics)
for nic in mapped_nics:
if manual_mappings.get(nic['name']):
nic['libvirt_network'] = manual_mappings[nic['name']]
return mapped_nics
def _parse_manual_nic_mappings(args):
if '--nic-mappings' not in args:
return {}
raw_mappings = args[args.index('--nic-mappings') + 1]
keyvals = raw_mappings.split(',')
return dict(keyval.split('=', 1) for keyval in keyvals)
def _convert_nic_mappings_args(args, mapped_nics):
def get_nic_names(manual_mappings):
keyvals = manual_mappings.split(',')
return [keyval.split('=', 1)[0] for keyval in keyvals]
inserted_nic_names = set()
# convert manual mappings
converted_manual = []
args_iter = iter(args)
for arg in args_iter:
if arg == '--nic-mappings':
mappings_value = next(args_iter)
nic_names = get_nic_names(mappings_value)
inserted_nic_names = inserted_nic_names.union(set(nic_names))
converted_manual.extend(_network_args(nic_names, mapped_nics))
else:
converted_manual.append(arg)
# convert automatic mappings
converted_auto = []
for arg in converted_manual:
if arg == '--auto-nic-mappings':
all_nic_names = set(nic['name'] for nic in mapped_nics)
names_to_insert = all_nic_names.difference(inserted_nic_names)
inserted_nic_names = inserted_nic_names.union(names_to_insert)
converted_auto.extend(_network_args(names_to_insert, mapped_nics))
else:
converted_auto.append(arg)
return converted_auto
def _network_args(nic_names, mapped_nics):
args = []
for nic_name in nic_names:
nic = _nic_by_name(nic_name, mapped_nics)
args.append('--network')
args.append('network=%(libvirt_network)s,mac=%(hwaddr)s,model=virtio'
% nic)
return args
def _nic_by_name(nic_name, nics):
for nic in nics:
if nic['name'] == nic_name:
return nic
raise ValueError("NIC with name '%s' not found" % nic_name)
|
|
#!/usr/bin/env python
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS-IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script that simplifies releases by collecting various information.
Should be run from the oppia root dir.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import collections
import os
import re
import sys
import python_utils
import release_constants
from scripts import common
_PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
_PY_GITHUB_PATH = os.path.join(
_PARENT_DIR, 'oppia_tools', 'PyGithub-%s' % common.PYGITHUB_VERSION)
sys.path.insert(0, _PY_GITHUB_PATH)
import github # isort:skip pylint: disable=wrong-import-position
GIT_CMD_GET_STATUS = 'git status'
GIT_CMD_TEMPLATE_GET_NEW_COMMITS = 'git cherry %s -v'
GIT_CMD_GET_LOGS_FORMAT_STRING = (
'git log -z --no-color --pretty=format:%H{0}%aN{0}%aE{0}%B {1}..{2}')
GIT_CMD_DIFF_NAMES_ONLY_FORMAT_STRING = 'git diff --name-only %s %s'
GIT_CMD_SHOW_FORMAT_STRING = 'git show %s:feconf.py'
ISSUE_URL_FORMAT_STRING = 'https://github.com/oppia/oppia/issues/%s'
ISSUE_REGEX = re.compile(r'#(\d+)')
PR_NUMBER_REGEX = re.compile(r'\(#(\d+)\)$')
GROUP_SEP = '\x1D'
VERSION_RE_FORMAT_STRING = r'%s\s*=\s*(\d+|\.)+'
FECONF_VAR_NAMES = ['CURRENT_STATE_SCHEMA_VERSION',
'CURRENT_COLLECTION_SCHEMA_VERSION']
FIRST_OPPIA_COMMIT = '6a7138f5f603375e58d1dc3e1c4f1c80a126e249'
NO_LABEL_CHANGELOG_CATEGORY = 'Uncategorized'
FECONF_FILEPATH = os.path.join('', 'feconf.py')
Log = collections.namedtuple('Log', ['sha1', 'author', 'email', 'message'])
def get_current_version_tag(repo):
"""Retrieves the most recent version tag.
Args:
repo: github.Repository.Repository. The PyGithub object for the repo.
Returns:
github.Tag.Tag. The most recent version tag.
"""
# In case of hotfix, the first version tag will be the version of the
# release for which the hotfix branch is. So, if we require generation
# of release summary in case of hotfix, we need to get the second version
# tag. For example, if branch is release-1.2.3-hotfix-1, the first tag
# on github will be 1.2.3 but the correct tag of previous release will be
# 1.2.2 which is required for release summary generation.
if 'hotfix' in common.get_current_branch_name():
return repo.get_tags()[1]
# This is for the normal release without any hotfix. In this case, the
# first tag will be of current release serving on prod. For example, if we
# are deploying release-1.2.3, the first tag on github will be 1.2.2 which
# is required for release summary generation.
else:
return repo.get_tags()[0]
def get_extra_commits_in_new_release(base_commit, repo):
"""Gets extra commits in the new release.
Args:
base_commit: str. The base commit common between current branch and the
latest release.
repo: github.Repository.Repository. The PyGithub object for the repo.
Returns:
list(github.Commit.Commit). List of commits from the base commit up to
the current commit, which haven't been cherrypicked already.
"""
get_commits_cmd = GIT_CMD_TEMPLATE_GET_NEW_COMMITS % base_commit
out = python_utils.UNICODE(
common.run_cmd(get_commits_cmd.split(' ')), 'utf-8').split('\n')
commits = []
for line in out:
# Lines that start with a - are already cherrypicked. The commits of
# interest are on lines that start with +.
if line[0] == '+':
line = line[2:]
commit = repo.get_commit(line[:line.find(' ')])
commits.append(commit)
return commits
def gather_logs(start, stop='HEAD'):
"""Gathers the logs between the start and endpoint.
Args:
start: str. Tag, Branch or SHA1 of start point.
stop: str. Tag, Branch or SHA1 of end point, defaults to HEAD.
Returns:
list(Log). List of Logs.
"""
get_logs_cmd = GIT_CMD_GET_LOGS_FORMAT_STRING.format(
GROUP_SEP, start, stop)
# The unicode conversion is required because there can be non-ascii
# characters in the logs and it can result in breaking the flow
# of release summary generation.
out = python_utils.UNICODE(
common.run_cmd(get_logs_cmd.split(' ')), 'utf-8').split('\x00')
if len(out) == 1 and out[0] == '':
return []
else:
return [Log(*line.strip().split(GROUP_SEP)) for line in out]
def extract_issues(logs):
"""Extract references to issues out of a list of Logs
Args:
logs: list(Log). List of Logs to parse.
Returns:
set(str). Set of found issues as links to Github.
"""
issues = ISSUE_REGEX.findall(' '.join([log.message for log in logs]))
links = {ISSUE_URL_FORMAT_STRING % issue for issue in issues}
return links
def extract_pr_numbers(logs):
"""Extract PR numbers out of a list of Logs.
Args:
logs: list(Log). List of Logs to parse.
Returns:
set(int). Set of PR numbers extracted from the log.
"""
pr_numbers = []
for log in logs:
pr_numbers.extend(PR_NUMBER_REGEX.findall(log.message.split('\n')[0]))
# Delete duplicates.
pr_numbers = list(set(pr_numbers))
pr_numbers.sort(reverse=True)
return pr_numbers
def get_prs_from_pr_numbers(pr_numbers, repo):
"""Returns a list of PRs corresponding to the numbers provided.
Args:
pr_numbers: list(int). List of PR numbers.
repo: github.Repository.Repository. The PyGithub object for the repo.
Returns:
list(github.PullRequest.PullRequest). The list of references to the PRs.
"""
pulls = [repo.get_pull(int(num)) for num in pr_numbers]
return list(pulls)
def get_changelog_categories(pulls):
"""Categorizes the given PRs into various changelog categories
Args:
pulls: list(github.PullRequest.PullRequest). The list of PRs to be
categorized.
Returns:
dict(str, list(str)). A list where the keys are the various changelog
labels, and the values are the titles of the PRs that fall under
that category.
"""
result = collections.defaultdict(list)
for pull in pulls:
labels = pull.labels
added_to_dict = False
formatted_title = '%s (#%d)' % (pull.title, pull.number)
for label in labels:
if 'CHANGELOG:' in label.name:
category = label.name[
label.name.find(':') + 2:label.name.find(' --')]
added_to_dict = True
result[category].append(formatted_title)
break
if not added_to_dict:
result[NO_LABEL_CHANGELOG_CATEGORY].append(formatted_title)
return dict(result)
def check_versions(current_release):
"""Checks if the versions for the exploration or collection schemas have
changed.
Args:
current_release: str. The current release tag to diff against.
Returns:
list(str). List of variable names that changed.
"""
feconf_changed_version = []
git_show_cmd = (GIT_CMD_SHOW_FORMAT_STRING % current_release)
old_feconf = common.run_cmd(git_show_cmd.split(' '))
with python_utils.open_file(FECONF_FILEPATH, 'r') as feconf_file:
new_feconf = feconf_file.read()
for variable in FECONF_VAR_NAMES:
old_version = re.findall(
VERSION_RE_FORMAT_STRING % variable, old_feconf)[0]
new_version = re.findall(
VERSION_RE_FORMAT_STRING % variable, new_feconf)[0]
if old_version != new_version:
feconf_changed_version.append(variable)
return feconf_changed_version
def _git_diff_names_only(left, right='HEAD'):
"""Get names of changed files from git.
Args:
left: str. Lefthand timepoint.
right: str. Rightand timepoint.
Returns:
list(str). List of files that are different between the two points.
"""
diff_cmd = (GIT_CMD_DIFF_NAMES_ONLY_FORMAT_STRING % (left, right))
return common.run_cmd(diff_cmd.split(' ')).splitlines()
def check_setup_scripts(base_release_tag, changed_only=True):
"""Check if setup scripts have changed.
Args:
base_release_tag: str. The current release tag to diff against.
changed_only: bool. If set to False will return all tested files
instead of just the changed ones.
Returns:
dict. Dict consisting of script or boolean indicating whether or not it
has changed (filtered by default to those that are modified).
"""
setup_scripts = ['scripts/%s' % item for item in
['setup.py', 'setup_gae.py', 'install_third_party_libs.py',
'install_third_party.py']]
changed_files = _git_diff_names_only(base_release_tag)
changes_dict = {script: script in changed_files
for script in setup_scripts}
if changed_only:
return {name: status for name, status
in changes_dict.items() if status}
else:
return changes_dict
def check_storage_models(current_release):
"""Check if files in core/storage have changed and returns them.
Args:
current_release: str. The current release version.
Returns:
list(str). The changed files (if any).
"""
diff_list = _git_diff_names_only(current_release)
return [item for item in diff_list if item.startswith('core/storage')]
def main(personal_access_token):
"""Collects necessary info and dumps it to disk.
Args:
personal_access_token: str. The personal access token for the
GitHub id of user.
"""
if not common.is_current_branch_a_release_branch():
raise Exception(
'This script should only be run from the latest release branch.')
g = github.Github(personal_access_token)
repo = g.get_organization('oppia').get_repo('oppia')
current_release = get_current_version_tag(repo)
current_release_tag = current_release.name
base_commit = current_release.commit.sha
new_commits = get_extra_commits_in_new_release(base_commit, repo)
new_release_logs = gather_logs(base_commit)
for index, log in enumerate(new_release_logs):
is_cherrypicked = all(
[log.sha1 != commit.sha for commit in new_commits])
if is_cherrypicked:
del new_release_logs[index]
past_logs = gather_logs(FIRST_OPPIA_COMMIT, stop=base_commit)
issue_links = extract_issues(new_release_logs)
feconf_version_changes = check_versions(current_release_tag)
setup_changes = check_setup_scripts(current_release_tag)
storage_changes = check_storage_models(current_release_tag)
pr_numbers = extract_pr_numbers(new_release_logs)
prs = get_prs_from_pr_numbers(pr_numbers, repo)
categorized_pr_titles = get_changelog_categories(prs)
with python_utils.open_file(
release_constants.RELEASE_SUMMARY_FILEPATH, 'w') as out:
out.write('## Collected release information\n')
if feconf_version_changes:
out.write(
'\n### Feconf version changes:\nThis indicates that a '
'migration may be needed\n\n')
for var in feconf_version_changes:
out.write('* %s\n' % var)
if setup_changes:
out.write('\n### Changed setup scripts:\n')
for var in setup_changes.keys():
out.write('* %s\n' % var)
if storage_changes:
out.write('\n### Changed storage models:\n')
for item in storage_changes:
out.write('* %s\n' % item)
past_authors = {
log.email: log.author for log in past_logs
}
release_authors = {(log.author, log.email) for log in new_release_logs}
new_authors = sorted(set(
[(name, email) for name, email in release_authors
if email not in past_authors]))
existing_authors = sorted(set(
[(name, email) for name, email in release_authors
if email in past_authors]))
new_author_names = [name for name, _ in new_authors]
existing_author_names = [name for name, _ in existing_authors]
# TODO(apb7): Duplicate author handling due to email changes.
out.write('\n%s' % release_constants.NEW_AUTHORS_HEADER)
for name, email in new_authors:
out.write('* %s <%s>\n' % (name, email))
out.write('\n%s' % release_constants.EXISTING_AUTHORS_HEADER)
for name, email in existing_authors:
out.write('* %s <%s>\n' % (name, email))
out.write('\n%s' % release_constants.NEW_CONTRIBUTORS_HEADER)
for name, email in new_authors:
out.write('* %s <%s>\n' % (name, email))
# Generate the author sections of the email.
out.write('\n%s' % release_constants.EMAIL_HEADER)
new_author_comma_list = (
'%s, and %s' % (', '.join(
new_author_names[:-1]), new_author_names[-1]))
existing_author_comma_list = (
'%s, and %s' % (', '.join(
existing_author_names[:-1]), existing_author_names[-1]))
out.write(
'``Please welcome %s for whom this release marks their first '
'contribution to Oppia!``\n\n' % new_author_comma_list)
out.write(
'``Thanks to %s, our returning contributors who made this release '
'possible.``\n' % existing_author_comma_list)
if personal_access_token:
out.write('\n%s' % release_constants.CHANGELOG_HEADER)
for category in categorized_pr_titles:
out.write('%s\n' % category)
for pr_title in categorized_pr_titles[category]:
out.write('* %s\n' % pr_title)
out.write('\n')
out.write('\n%s' % release_constants.COMMIT_HISTORY_HEADER)
for name, title in [(log.author, log.message.split('\n\n')[0])
for log in new_release_logs]:
out.write('* %s\n' % title)
if issue_links:
out.write('\n%s' % release_constants.ISSUES_HEADER)
for link in issue_links:
out.write('* [%s](%s)\n' % (link, link))
python_utils.PRINT('Done. Summary file generated in %s' % (
release_constants.RELEASE_SUMMARY_FILEPATH))
# The 'no coverage' pragma is used as this line is un-testable. This is because
# it will only be called when generate_release_info.py is used as
# a script.
if __name__ == '__main__': # pragma: no cover
main(common.get_personal_access_token())
|
|
#!/usr/bin/env python
#
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
import sys
# Adds testrunner to the path hence it has to be imported at the beggining.
import base_runner
from testrunner.local import utils
from testrunner.testproc import fuzzer
from testrunner.testproc.base import TestProcProducer
from testrunner.testproc.combiner import CombinerProc
from testrunner.testproc.execution import ExecutionProc
from testrunner.testproc.expectation import ForgiveTimeoutProc
from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
from testrunner.testproc.loader import LoadProc
from testrunner.testproc.progress import ResultsTracker, TestsCounter
from testrunner.utils import random_utils
DEFAULT_SUITES = ["mjsunit", "webkit", "benchmarks"]
class NumFuzzer(base_runner.BaseTestRunner):
def __init__(self, *args, **kwargs):
super(NumFuzzer, self).__init__(*args, **kwargs)
def _add_parser_options(self, parser):
parser.add_option("--fuzzer-random-seed", default=0,
help="Default seed for initializing fuzzer random "
"generator")
parser.add_option("--tests-count", default=5, type="int",
help="Number of tests to generate from each base test. "
"Can be combined with --total-timeout-sec with "
"value 0 to provide infinite number of subtests. "
"When --combine-tests is set it indicates how many "
"tests to create in total")
# Stress gc
parser.add_option("--stress-marking", default=0, type="int",
help="probability [0-10] of adding --stress-marking "
"flag to the test")
parser.add_option("--stress-scavenge", default=0, type="int",
help="probability [0-10] of adding --stress-scavenge "
"flag to the test")
parser.add_option("--stress-compaction", default=0, type="int",
help="probability [0-10] of adding --stress-compaction "
"flag to the test")
parser.add_option("--stress-gc", default=0, type="int",
help="probability [0-10] of adding --random-gc-interval "
"flag to the test")
parser.add_option("--stress-thread-pool-size", default=0, type="int",
help="probability [0-10] of adding --thread-pool-size "
"flag to the test")
# Stress deopt
parser.add_option("--stress-deopt", default=0, type="int",
help="probability [0-10] of adding --deopt-every-n-times "
"flag to the test")
parser.add_option("--stress-deopt-min", default=1, type="int",
help="extends --stress-deopt to have minimum interval "
"between deopt points")
# Stress interrupt budget
parser.add_option("--stress-interrupt-budget", default=0, type="int",
help="probability [0-10] of adding --interrupt-budget "
"flag to the test")
# Combine multiple tests
parser.add_option("--combine-tests", default=False, action="store_true",
help="Combine multiple tests as one and run with "
"try-catch wrapper")
parser.add_option("--combine-max", default=100, type="int",
help="Maximum number of tests to combine")
parser.add_option("--combine-min", default=2, type="int",
help="Minimum number of tests to combine")
# Miscellaneous
parser.add_option("--variants", default='default',
help="Comma-separated list of testing variants")
return parser
def _process_options(self, options):
if not options.fuzzer_random_seed:
options.fuzzer_random_seed = random_utils.random_seed()
if options.total_timeout_sec:
options.tests_count = 0
if options.combine_tests:
if options.combine_min > options.combine_max:
print ('min_group_size (%d) cannot be larger than max_group_size (%d)' %
options.min_group_size, options.max_group_size)
raise base_runner.TestRunnerError()
if options.variants != 'default':
print ('Only default testing variant is supported with numfuzz')
raise base_runner.TestRunnerError()
return True
def _get_default_suite_names(self):
return DEFAULT_SUITES
def _timeout_scalefactor(self, options):
factor = super(NumFuzzer, self)._timeout_scalefactor(options)
if options.stress_interrupt_budget:
# TODO(machenbach): This should be moved to a more generic config.
# Fuzzers have too much timeout in debug mode.
factor = max(int(factor * 0.25), 1)
return factor
def _get_statusfile_variables(self, options):
variables = (
super(NumFuzzer, self)._get_statusfile_variables(options))
variables.update({
'deopt_fuzzer': bool(options.stress_deopt),
'endurance_fuzzer': bool(options.combine_tests),
'gc_stress': bool(options.stress_gc),
'gc_fuzzer': bool(max([options.stress_marking,
options.stress_scavenge,
options.stress_compaction,
options.stress_gc,
options.stress_thread_pool_size])),
})
return variables
def _do_execute(self, tests, args, options):
loader = LoadProc()
fuzzer_rng = random.Random(options.fuzzer_random_seed)
combiner = self._create_combiner(fuzzer_rng, options)
results = self._create_result_tracker(options)
execproc = ExecutionProc(options.j)
sigproc = self._create_signal_proc()
indicators = self._create_progress_indicators(options)
procs = [
loader,
NameFilterProc(args) if args else None,
StatusFileFilterProc(None, None),
# TODO(majeski): Improve sharding when combiner is present. Maybe select
# different random seeds for shards instead of splitting tests.
self._create_shard_proc(options),
ForgiveTimeoutProc(),
combiner,
self._create_fuzzer(fuzzer_rng, options),
sigproc,
] + indicators + [
results,
self._create_timeout_proc(options),
self._create_rerun_proc(options),
execproc,
]
self._prepare_procs(procs)
loader.load_tests(tests)
# TODO(majeski): maybe some notification from loader would be better?
if combiner:
combiner.generate_initial_tests(options.j * 4)
# This starts up worker processes and blocks until all tests are
# processed.
execproc.run()
for indicator in indicators:
indicator.finished()
print '>>> %d tests ran' % results.total
if results.failed:
return utils.EXIT_CODE_FAILURES
# Indicate if a SIGINT or SIGTERM happened.
return sigproc.exit_code
def _load_suites(self, names, options):
suites = super(NumFuzzer, self)._load_suites(names, options)
if options.combine_tests:
suites = [s for s in suites if s.test_combiner_available()]
if options.stress_interrupt_budget:
# Changing interrupt budget forces us to suppress certain test assertions.
for suite in suites:
suite.do_suppress_internals()
return suites
def _create_combiner(self, rng, options):
if not options.combine_tests:
return None
return CombinerProc(rng, options.combine_min, options.combine_max,
options.tests_count)
def _create_fuzzer(self, rng, options):
return fuzzer.FuzzerProc(
rng,
self._tests_count(options),
self._create_fuzzer_configs(options),
self._disable_analysis(options),
)
def _tests_count(self, options):
if options.combine_tests:
return 1
return options.tests_count
def _disable_analysis(self, options):
"""Disable analysis phase when options are used that don't support it."""
return options.combine_tests or options.stress_interrupt_budget
def _create_fuzzer_configs(self, options):
fuzzers = []
def add(name, prob, *args):
if prob:
fuzzers.append(fuzzer.create_fuzzer_config(name, prob, *args))
add('compaction', options.stress_compaction)
add('marking', options.stress_marking)
add('scavenge', options.stress_scavenge)
add('gc_interval', options.stress_gc)
add('threads', options.stress_thread_pool_size)
add('interrupt_budget', options.stress_interrupt_budget)
add('deopt', options.stress_deopt, options.stress_deopt_min)
return fuzzers
if __name__ == '__main__':
sys.exit(NumFuzzer().execute())
|
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple tool for generating a client library.
Relevant links:
https://developers.google.com/discovery/v1/reference/apis#resource
"""
import datetime
from apitools.gen import command_registry
from apitools.gen import message_registry
from apitools.gen import service_registry
from apitools.gen import util
def _ApitoolsVersion():
"""Returns version of the currently installed google-apitools package."""
try:
import pkg_resources
except ImportError:
return 'X.X.X'
try:
return pkg_resources.get_distribution('google-apitools').version
except pkg_resources.DistributionNotFound:
return 'X.X.X'
def _StandardQueryParametersSchema(discovery_doc):
"""Sets up dict of standard query parameters."""
standard_query_schema = {
'id': 'StandardQueryParameters',
'type': 'object',
'description': 'Query parameters accepted by all methods.',
'properties': discovery_doc.get('parameters', {}),
}
# We add an entry for the trace, since Discovery doesn't.
standard_query_schema['properties']['trace'] = {
'type': 'string',
'description': ('A tracing token of the form "token:<tokenid>" '
'to include in api requests.'),
'location': 'query',
}
return standard_query_schema
class DescriptorGenerator(object):
"""Code generator for a given discovery document."""
def __init__(self, discovery_doc, client_info, names, root_package, outdir,
base_package, protorpc_package, generate_cli=False,
init_wildcards_file=True,
use_proto2=False, unelidable_request_methods=None,
apitools_version=''):
self.__discovery_doc = discovery_doc
self.__client_info = client_info
self.__outdir = outdir
self.__use_proto2 = use_proto2
self.__description = util.CleanDescription(
self.__discovery_doc.get('description', ''))
self.__package = self.__client_info.package
self.__version = self.__client_info.version
self.__revision = discovery_doc.get('revision', '1')
self.__generate_cli = generate_cli
self.__init_wildcards_file = init_wildcards_file
self.__root_package = root_package
self.__base_files_package = base_package
self.__protorpc_package = protorpc_package
self.__names = names
# Order is important here: we need the schemas before we can
# define the services.
self.__message_registry = message_registry.MessageRegistry(
self.__client_info, self.__names, self.__description,
self.__root_package, self.__base_files_package,
self.__protorpc_package)
schemas = self.__discovery_doc.get('schemas', {})
for schema_name, schema in schemas.items():
self.__message_registry.AddDescriptorFromSchema(
schema_name, schema)
# We need to add one more message type for the global parameters.
standard_query_schema = _StandardQueryParametersSchema(
self.__discovery_doc)
self.__message_registry.AddDescriptorFromSchema(
standard_query_schema['id'], standard_query_schema)
# Now that we know all the messages, we need to correct some
# fields from MessageFields to EnumFields.
self.__message_registry.FixupMessageFields()
self.__command_registry = command_registry.CommandRegistry(
self.__package, self.__version, self.__client_info,
self.__message_registry, self.__root_package,
self.__base_files_package, self.__protorpc_package,
self.__names)
self.__command_registry.AddGlobalParameters(
self.__message_registry.LookupDescriptorOrDie(
'StandardQueryParameters'))
self.__services_registry = service_registry.ServiceRegistry(
self.__client_info,
self.__message_registry,
self.__command_registry,
self.__names,
self.__root_package,
self.__base_files_package,
unelidable_request_methods or [])
services = self.__discovery_doc.get('resources', {})
for service_name, methods in sorted(services.items()):
self.__services_registry.AddServiceFromResource(
service_name, methods)
# We might also have top-level methods.
api_methods = self.__discovery_doc.get('methods', [])
if api_methods:
self.__services_registry.AddServiceFromResource(
'api', {'methods': api_methods})
# pylint: disable=protected-access
self.__client_info = self.__client_info._replace(
scopes=self.__services_registry.scopes)
# The apitools version that will be used in prerequisites for the
# generated packages.
self.__apitools_version = (
apitools_version if apitools_version else _ApitoolsVersion())
@property
def client_info(self):
return self.__client_info
@property
def discovery_doc(self):
return self.__discovery_doc
@property
def names(self):
return self.__names
@property
def outdir(self):
return self.__outdir
@property
def package(self):
return self.__package
@property
def use_proto2(self):
return self.__use_proto2
@property
def apitools_version(self):
return self.__apitools_version
def _GetPrinter(self, out):
printer = util.SimplePrettyPrinter(out)
return printer
def WriteInit(self, out):
"""Write a simple __init__.py for the generated client."""
printer = self._GetPrinter(out)
if self.__init_wildcards_file:
printer('"""Common imports for generated %s client library."""',
self.__client_info.package)
printer('# pylint:disable=wildcard-import')
else:
printer('"""Package marker file."""')
printer()
printer('import pkgutil')
printer()
if self.__init_wildcards_file:
printer('from %s import *', self.__base_files_package)
if self.__root_package == '.':
import_prefix = ''
else:
import_prefix = '%s.' % self.__root_package
if self.__generate_cli:
printer('from %s%s import *',
import_prefix, self.__client_info.cli_rule_name)
printer('from %s%s import *',
import_prefix, self.__client_info.client_rule_name)
printer('from %s%s import *',
import_prefix, self.__client_info.messages_rule_name)
printer()
printer('__path__ = pkgutil.extend_path(__path__, __name__)')
def WriteIntermediateInit(self, out):
"""Write a simple __init__.py for an intermediate directory."""
printer = self._GetPrinter(out)
printer('#!/usr/bin/env python')
printer('"""Shared __init__.py for apitools."""')
printer()
printer('from pkgutil import extend_path')
printer('__path__ = extend_path(__path__, __name__)')
def WriteSetupPy(self, out):
"""Write a setup.py for upload to PyPI."""
printer = self._GetPrinter(out)
year = datetime.datetime.now().year
printer('# Copyright %s Google Inc. All Rights Reserved.' % year)
printer('#')
printer('# Licensed under the Apache License, Version 2.0 (the'
'"License");')
printer('# you may not use this file except in compliance with '
'the License.')
printer('# You may obtain a copy of the License at')
printer('#')
printer('# http://www.apache.org/licenses/LICENSE-2.0')
printer('#')
printer('# Unless required by applicable law or agreed to in writing, '
'software')
printer('# distributed under the License is distributed on an "AS IS" '
'BASIS,')
printer('# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either '
'express or implied.')
printer('# See the License for the specific language governing '
'permissions and')
printer('# limitations under the License.')
printer()
printer('import setuptools')
printer('REQUIREMENTS = [')
with printer.Indent(indent=' '):
parts = self.apitools_version.split('.')
major = parts.pop(0)
minor = parts.pop(0)
printer('"google-apitools>=%s,~=%s.%s",',
self.apitools_version, major, minor)
printer('"httplib2>=0.9",')
printer('"oauth2client>=1.4.12",')
printer(']')
printer('_PACKAGE = "apitools.clients.%s"' % self.__package)
printer()
printer('setuptools.setup(')
# TODO(craigcitro): Allow customization of these options.
with printer.Indent(indent=' '):
printer('name="google-apitools-%s-%s",',
self.__package, self.__version)
printer('version="%s.%s",',
self.apitools_version, self.__revision)
printer('description="Autogenerated apitools library for %s",' % (
self.__package,))
printer('url="https://github.com/google/apitools",')
printer('author="Craig Citro",')
printer('author_email="[email protected]",')
printer('packages=setuptools.find_packages(),')
printer('install_requires=REQUIREMENTS,')
printer('classifiers=[')
with printer.Indent(indent=' '):
printer('"Programming Language :: Python :: 2.7",')
printer('"License :: OSI Approved :: Apache Software '
'License",')
printer('],')
printer('license="Apache 2.0",')
printer('keywords="apitools apitools-%s %s",' % (
self.__package, self.__package))
printer(')')
def WriteMessagesFile(self, out):
self.__message_registry.WriteFile(self._GetPrinter(out))
def WriteMessagesProtoFile(self, out):
self.__message_registry.WriteProtoFile(self._GetPrinter(out))
def WriteServicesProtoFile(self, out):
self.__services_registry.WriteProtoFile(self._GetPrinter(out))
def WriteClientLibrary(self, out):
self.__services_registry.WriteFile(self._GetPrinter(out))
def WriteCli(self, out):
self.__command_registry.WriteFile(self._GetPrinter(out))
|
|
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Thu Apr 11 15:07:51 2013 by generateDS.py version 2.9a.
#
import sys
from mixbox.binding_utils import *
from stix.bindings import register_extension
from stix.bindings.course_of_action import StructuredCOAType
import stix.bindings.stix_common as stix_common_binding
XML_NS = "http://stix.mitre.org/extensions/StructuredCOA#Generic-1"
#
# Data representation classes.
#
@register_extension
class GenericStructuredCOAType(StructuredCOAType):
"""The GenericStructuredCOAType specifies an instantial extension from
the abstract course_of_action_binding.StructuredCOAType intended to support the generic
inclusion of any COA content.Specifies a reference URL for the
location of the Generic Structured COA."""
subclass = None
superclass = StructuredCOAType
xmlns = XML_NS
xmlns_prefix = "genericStructuredCOA"
xml_type = "GenericStructuredCOAType"
def __init__(self, idref=None, id=None, reference_location=None, Description=None, Type=None, Specification=None):
super(GenericStructuredCOAType, self).__init__(idref=idref, id=id)
self.reference_location = _cast(None, reference_location)
if Description is None:
self.Description = []
else:
self.Description = Description
self.Type = Type
self.Specification = Specification
def factory(*args_, **kwargs_):
if GenericStructuredCOAType.subclass:
return GenericStructuredCOAType.subclass(*args_, **kwargs_)
else:
return GenericStructuredCOAType(*args_, **kwargs_)
factory = staticmethod(factory)
def add_Description(self, Description): self.Description.append(Description)
def insert_Description(self, index, Description): self.Description[index] = Description
def get_Description(self): return self.Description
def set_Description(self, Description): self.Description = Description
def get_Type(self): return self.Type
def set_Type(self, Type): self.Type = Type
def get_Specification(self): return self.Specification
def set_Specification(self, Specification): self.Specification = Specification
def get_reference_location(self): return self.reference_location
def set_reference_location(self, reference_location): self.reference_location = reference_location
def hasContent_(self):
if (
self.Description or
self.Type is not None or
self.Specification is not None or
super(GenericStructuredCOAType, self).hasContent_()
):
return True
else:
return False
def export(self, lwrite, level, nsmap, namespace_=XML_NS, name_='GenericStructuredCOAType', namespacedef_='', pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
showIndent(lwrite, level, pretty_print)
lwrite('<%s:%s%s' % (nsmap[namespace_], name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(lwrite, level, already_processed, namespace_, name_='GenericStructuredCOAType')
if self.hasContent_():
lwrite('>%s' % (eol_, ))
self.exportChildren(lwrite, level + 1, nsmap, XML_NS, name_, pretty_print=pretty_print)
showIndent(lwrite, level, pretty_print)
lwrite('</%s:%s>%s' % (nsmap[namespace_], name_, eol_))
else:
lwrite('/>%s' % (eol_, ))
def exportAttributes(self, lwrite, level, already_processed, namespace_='genericStructuredCOA:', name_='GenericStructuredCOAType'):
super(GenericStructuredCOAType, self).exportAttributes(lwrite, level, already_processed, namespace_, name_='GenericStructuredCOAType')
# if 'xmlns' not in already_processed:
# already_processed.add('xmlns')
# xmlns = " xmlns:%s='%s'" % (self.xmlns_prefix, self.xmlns)
# lwrite(xmlns)
# if 'xsi:type' not in already_processed:
# already_processed.add('xsi:type')
# xsi_type = " xsi:type='%s:%s'" % (self.xmlns_prefix, self.xml_type)
# lwrite(xsi_type)
if self.reference_location is not None and 'reference_location' not in already_processed:
already_processed.add('reference_location')
lwrite(' reference_location=%s' % (quote_attrib(self.reference_location), ))
def exportChildren(self, lwrite, level, nsmap, namespace_=XML_NS, name_='GenericStructuredCOAType', fromsubclass_=False, pretty_print=True):
super(GenericStructuredCOAType, self).exportChildren(lwrite, level, nsmap, namespace_, name_, True, pretty_print=pretty_print)
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
for Description in self.Description:
Description.export(lwrite, level, nsmap, namespace_, name_='Description', pretty_print=pretty_print)
if self.Type is not None:
self.Type.export(lwrite, level, nsmap, namespace_, name_='Type', pretty_print=pretty_print)
if self.Specification is not None:
self.Specification.export(lwrite, level, nsmap, namespace_, name_='Specification', pretty_print=pretty_print)
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('reference_location', node)
if value is not None and 'reference_location' not in already_processed:
already_processed.add('reference_location')
self.reference_location = value
super(GenericStructuredCOAType, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Description':
obj_ = stix_common_binding.StructuredTextType.factory()
obj_.build(child_)
self.add_Description(obj_)
elif nodeName_ == 'Type':
obj_ = stix_common_binding.ControlledVocabularyStringType.factory()
obj_.build(child_)
self.set_Type(obj_)
elif nodeName_ == 'Specification':
obj_ = stix_common_binding.EncodedCDATAType.factory()
obj_.build(child_)
self.set_Specification(obj_)
super(GenericStructuredCOAType, self).buildChildren(child_, node, nodeName_, True)
# end class GenericStructuredCOAType
GDSClassesMapping = {}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'GenericStructuredCOAType'
rootClass = GenericStructuredCOAType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_=rootTag,
namespacedef_='',
pretty_print=True)
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'GenericStructuredCOAType'
rootClass = GenericStructuredCOAType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
# doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout, 0, name_="GenericStructuredCOAType",
# namespacedef_='')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"GenericStructuredCOAType"
]
|
|
from functools import partial
from tremendous.api import (
apply_color,
apply_256,
apply_256_bg,
apply_256_hl,
)
from tremendous.bindings import lib as __lib
from tremendous.bindings import ffi
colors_16 = dict(
bold=__lib.BOLD,
italic=__lib.ITALIC,
under=__lib.UNDER,
under2=__lib.UNDER2,
strike=__lib.STRIKE,
blink=__lib.BLINK,
flip=__lib.FLIP,
black=__lib.BLACK,
red=__lib.RED,
green=__lib.GREEN,
yellow=__lib.YELLOW,
blue=__lib.BLUE,
magenta=__lib.MAGENTA,
cyan=__lib.CYAN,
white=__lib.WHITE,
hblack=__lib.HBLACK,
hred=__lib.HRED,
hgreen=__lib.HGREEN,
hyellow=__lib.HYELLOW,
hblue=__lib.HBLUE,
hmagenta=__lib.HMAGENTA,
hcyan=__lib.HCYAN,
hwhite=__lib.HWHITE,
bgblack=__lib.BGBLACK,
bgred=__lib.BGRED,
bggreen=__lib.BGGREEN,
bgyellow=__lib.BGYELLOW,
bgblue=__lib.BGBLUE,
bgmagenta=__lib.BGMAGENTA,
bgcyan=__lib.BGCYAN,
bgwhite=__lib.BGWHITE,
)
__funcs = {}
# This is also gross. Sorry.
for k, v in colors_16.items():
if k.startswith('h'):
__funcs['highlight_' + k[1:]] = partial(apply_color, v)
__funcs['hi_' + k[1:]] = partial(apply_color, v)
__funcs['hl_' + k[1:]] = partial(apply_color, v)
elif k.startswith('bg'):
__funcs['background_' + k[1:]] = partial(apply_color, v)
__funcs['bg_' + k[2:]] = partial(apply_color, v)
elif k.startswith('under'):
__funcs[k] = partial(apply_color, v)
__funcs['underline' + k[5:]] = partial(apply_color, v)
else:
__funcs[k] = partial(apply_color, v)
extended_colors = {
'Grey0': [0, 0, 0],
'NavyBlue': [0, 0, 95],
'DarkBlue': [0, 0, 135],
'Blue3': [0, 0, 175],
'Blue3': [0, 0, 215],
'Blue1': [0, 0, 255],
'DarkGreen': [0, 95, 0],
'DeepSkyBlue4': [0, 95, 95],
'DeepSkyBlue4': [0, 95, 135],
'DeepSkyBlue4': [0, 95, 175],
'DodgerBlue3': [0, 95, 215],
'DodgerBlue2': [0, 95, 255],
'Green4': [0, 135, 0],
'SpringGreen4': [0, 135, 95],
'Turquoise4': [0, 135, 135],
'DeepSkyBlue3': [0, 135, 175],
'DeepSkyBlue3': [0, 135, 215],
'DodgerBlue1': [0, 135, 255],
'Green3': [0, 175, 0],
'SpringGreen3': [0, 175, 95],
'DarkCyan': [0, 175, 135],
'LightSeaGreen': [0, 175, 175],
'DeepSkyBlue2': [0, 175, 215],
'DeepSkyBlue1': [0, 175, 255],
'Green3': [0, 215, 0],
'SpringGreen3': [0, 215, 95],
'SpringGreen2': [0, 215, 135],
'Cyan3': [0, 215, 175],
'DarkTurquoise': [0, 215, 215],
'Turquoise2': [0, 215, 255],
'Green1': [0, 255, 0],
'SpringGreen2': [0, 255, 95],
'SpringGreen1': [0, 255, 135],
'MediumSpringGreen': [0, 255, 175],
'Cyan2': [0, 255, 215],
'Cyan1': [0, 255, 255],
'DarkRed': [95, 0, 0],
'DeepPink4': [95, 0, 95],
'Purple4': [95, 0, 135],
'Purple4': [95, 0, 175],
'Purple3': [95, 0, 215],
'BlueViolet': [95, 0, 255],
'Orange4': [95, 95, 0],
'Grey37': [95, 95, 95],
'MediumPurple4': [95, 95, 135],
'SlateBlue3': [95, 95, 175],
'SlateBlue3': [95, 95, 215],
'RoyalBlue1': [95, 95, 255],
'Chartreuse4': [95, 135, 0],
'DarkSeaGreen4': [95, 135, 95],
'PaleTurquoise4': [95, 135, 135],
'SteelBlue': [95, 135, 175],
'SteelBlue3': [95, 135, 215],
'CornflowerBlue': [95, 135, 255],
'Chartreuse3': [95, 175, 0],
'DarkSeaGreen4': [95, 175, 95],
'CadetBlue': [95, 175, 135],
'CadetBlue': [95, 175, 175],
'SkyBlue3': [95, 175, 215],
'SteelBlue1': [95, 175, 255],
'Chartreuse3': [95, 215, 0],
'PaleGreen3': [95, 215, 95],
'SeaGreen3': [95, 215, 135],
'Aquamarine3': [95, 215, 175],
'MediumTurquoise': [95, 215, 215],
'SteelBlue1': [95, 215, 255],
'Chartreuse2': [95, 255, 0],
'SeaGreen2': [95, 255, 95],
'SeaGreen1': [95, 255, 135],
'SeaGreen1': [95, 255, 175],
'Aquamarine1': [95, 255, 215],
'DarkSlateGray2': [95, 255, 255],
'DarkRed': [135, 0, 0],
'DeepPink4': [135, 0, 95],
'DarkMagenta': [135, 0, 135],
'DarkMagenta': [135, 0, 175],
'DarkViolet': [135, 0, 215],
'Purple': [135, 0, 255],
'Orange4': [135, 95, 0],
'LightPink4': [135, 95, 95],
'Plum4': [135, 95, 135],
'MediumPurple3': [135, 95, 175],
'MediumPurple3': [135, 95, 215],
'SlateBlue1': [135, 95, 255],
'Yellow4': [135, 135, 0],
'Wheat4': [135, 135, 95],
'Grey53': [135, 135, 135],
'LightSlateGrey': [135, 135, 175],
'MediumPurple': [135, 135, 215],
'LightSlateBlue': [135, 135, 255],
'Yellow4': [135, 175, 0],
'DarkOliveGreen3': [135, 175, 95],
'DarkSeaGreen': [135, 175, 135],
'LightSkyBlue3': [135, 175, 175],
'LightSkyBlue3': [135, 175, 215],
'SkyBlue2': [135, 175, 255],
'Chartreuse2': [135, 215, 0],
'DarkOliveGreen3': [135, 215, 95],
'PaleGreen3': [135, 215, 135],
'DarkSeaGreen3': [135, 215, 175],
'DarkSlateGray3': [135, 215, 215],
'SkyBlue1': [135, 215, 255],
'Chartreuse1': [135, 255, 0],
'LightGreen': [135, 255, 95],
'LightGreen': [135, 255, 135],
'PaleGreen1': [135, 255, 175],
'Aquamarine1': [135, 255, 215],
'DarkSlateGray1': [135, 255, 255],
'Red3': [175, 0, 0],
'DeepPink4': [175, 0, 95],
'MediumVioletRed': [175, 0, 135],
'Magenta3': [175, 0, 175],
'DarkViolet': [175, 0, 215],
'Purple': [175, 0, 255],
'DarkOrange3': [175, 95, 0],
'IndianRed': [175, 95, 95],
'HotPink3': [175, 95, 135],
'MediumOrchid3': [175, 95, 175],
'MediumOrchid': [175, 95, 215],
'MediumPurple2': [175, 95, 255],
'DarkGoldenrod': [175, 135, 0],
'LightSalmon3': [175, 135, 95],
'RosyBrown': [175, 135, 135],
'Grey63': [175, 135, 175],
'MediumPurple2': [175, 135, 215],
'MediumPurple1': [175, 135, 255],
'Gold3': [175, 175, 0],
'DarkKhaki': [175, 175, 95],
'NavajoWhite3': [175, 175, 135],
'Grey69': [175, 175, 175],
'LightSteelBlue3': [175, 175, 215],
'LightSteelBlue': [175, 175, 255],
'Yellow3': [175, 215, 0],
'DarkOliveGreen3': [175, 215, 95],
'DarkSeaGreen3': [175, 215, 135],
'DarkSeaGreen2': [175, 215, 175],
'LightCyan3': [175, 215, 215],
'LightSkyBlue1': [175, 215, 255],
'GreenYellow': [175, 255, 0],
'DarkOliveGreen2': [175, 255, 95],
'PaleGreen1': [175, 255, 135],
'DarkSeaGreen2': [175, 255, 175],
'DarkSeaGreen1': [175, 255, 215],
'PaleTurquoise1': [175, 255, 255],
'Red3': [215, 0, 0],
'DeepPink3': [215, 0, 95],
'DeepPink3': [215, 0, 135],
'Magenta3': [215, 0, 175],
'Magenta3': [215, 0, 215],
'Magenta2': [215, 0, 255],
'DarkOrange3': [215, 95, 0],
'IndianRed': [215, 95, 95],
'HotPink3': [215, 95, 135],
'HotPink2': [215, 95, 175],
'Orchid': [215, 95, 215],
'MediumOrchid1': [215, 95, 255],
'Orange3': [215, 135, 0],
'LightSalmon3': [215, 135, 95],
'LightPink3': [215, 135, 135],
'Pink3': [215, 135, 175],
'Plum3': [215, 135, 215],
'Violet': [215, 135, 255],
'Gold3': [215, 175, 0],
'LightGoldenrod3': [215, 175, 95],
'Tan': [215, 175, 135],
'MistyRose3': [215, 175, 175],
'Thistle3': [215, 175, 215],
'Plum2': [215, 175, 255],
'Yellow3': [215, 215, 0],
'Khaki3': [215, 215, 95],
'LightGoldenrod2': [215, 215, 135],
'LightYellow3': [215, 215, 175],
'Grey84': [215, 215, 215],
'LightSteelBlue1': [215, 215, 255],
'Yellow2': [215, 255, 0],
'DarkOliveGreen1': [215, 255, 95],
'DarkOliveGreen1': [215, 255, 135],
'DarkSeaGreen1': [215, 255, 175],
'Honeydew2': [215, 255, 215],
'LightCyan1': [215, 255, 255],
'Red1': [255, 0, 0],
'DeepPink2': [255, 0, 95],
'DeepPink1': [255, 0, 135],
'DeepPink1': [255, 0, 175],
'Magenta2': [255, 0, 215],
'Magenta1': [255, 0, 255],
'OrangeRed1': [255, 95, 0],
'IndianRed1': [255, 95, 95],
'IndianRed1': [255, 95, 135],
'HotPink': [255, 95, 175],
'HotPink': [255, 95, 215],
'MediumOrchid1': [255, 95, 255],
'DarkOrange': [255, 135, 0],
'Salmon1': [255, 135, 95],
'LightCoral': [255, 135, 135],
'PaleVioletRed1': [255, 135, 175],
'Orchid2': [255, 135, 215],
'Orchid1': [255, 135, 255],
'Orange1': [255, 175, 0],
'SandyBrown': [255, 175, 95],
'LightSalmon1': [255, 175, 135],
'LightPink1': [255, 175, 175],
'Pink1': [255, 175, 215],
'Plum1': [255, 175, 255],
'Gold1': [255, 215, 0],
'LightGoldenrod2': [255, 215, 95],
'LightGoldenrod2': [255, 215, 135],
'NavajoWhite1': [255, 215, 175],
'MistyRose1': [255, 215, 215],
'Thistle1': [255, 215, 255],
'Yellow1': [255, 255, 0],
'LightGoldenrod1': [255, 255, 95],
'Khaki1': [255, 255, 135],
'Wheat1': [255, 255, 175],
'Cornsilk1': [255, 255, 215],
'Grey100': [255, 255, 255],
'Grey3': [8, 8, 8],
'Grey7': [18, 18, 18],
'Grey11': [28, 28, 28],
'Grey15': [38, 38, 38],
'Grey19': [48, 48, 48],
'Grey23': [58, 58, 58],
'Grey27': [68, 68, 68],
'Grey30': [78, 78, 78],
'Grey35': [88, 88, 88],
'Grey39': [98, 98, 98],
'Grey42': [108, 108, 108],
'Grey46': [118, 118, 118],
'Grey50': [128, 128, 128],
'Grey54': [138, 138, 138],
'Grey58': [148, 148, 148],
'Grey62': [158, 158, 158],
'Grey66': [168, 168, 168],
'Grey70': [178, 178, 178],
'Grey74': [188, 188, 188],
'Grey78': [198, 198, 198],
'Grey82': [208, 208, 208],
'Grey85': [218, 218, 218],
'Grey89': [228, 228, 228],
'Grey93': [238, 238, 238],
}
__extended_funcs = {}
# This is also gross. Sorry.
for k, v in extended_colors.items():
color = ffi.new('rgb_t *', v)
__extended_funcs[k.lower()] = partial(apply_256, v)
__extended_funcs['bg_' + k.lower()] = partial(apply_256_bg, v)
__extended_funcs['background_' + k.lower()] = partial(apply_256_bg, v)
__extended_funcs['hl_' + k.lower()] = partial(apply_256_hl, v)
__extended_funcs['highlight' + k.lower()] = partial(apply_256_hl, v)
|
|
from __future__ import unicode_literals
import re
from django.template import (Node, Variable, TemplateSyntaxError,
TokenParser, Library, TOKEN_TEXT, TOKEN_VAR)
from django.template.base import _render_value_in_context
from django.template.defaulttags import token_kwargs
from django.utils import six
from django.utils import translation
register = Library()
class GetAvailableLanguagesNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
from django.conf import settings
context[self.variable] = [(k, translation.ugettext(v)) for k, v in settings.LANGUAGES]
return ''
class GetLanguageInfoNode(Node):
def __init__(self, lang_code, variable):
self.lang_code = Variable(lang_code)
self.variable = variable
def render(self, context):
lang_code = self.lang_code.resolve(context)
context[self.variable] = translation.get_language_info(lang_code)
return ''
class GetLanguageInfoListNode(Node):
def __init__(self, languages, variable):
self.languages = Variable(languages)
self.variable = variable
def get_language_info(self, language):
# ``language`` is either a language code string or a sequence
# with the language code as its first item
if len(language[0]) > 1:
return translation.get_language_info(language[0])
else:
return translation.get_language_info(str(language))
def render(self, context):
langs = self.languages.resolve(context)
context[self.variable] = [self.get_language_info(lang) for lang in langs]
return ''
class GetCurrentLanguageNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language()
return ''
class GetCurrentLanguageBidiNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language_bidi()
return ''
class TranslateNode(Node):
def __init__(self, filter_expression, noop, asvar=None,
message_context=None):
self.noop = noop
self.asvar = asvar
self.message_context = message_context
self.filter_expression = filter_expression
if isinstance(self.filter_expression.var, six.string_types):
self.filter_expression.var = Variable("'%s'" %
self.filter_expression.var)
def render(self, context):
self.filter_expression.var.translate = not self.noop
if self.message_context:
self.filter_expression.var.message_context = (
self.message_context.resolve(context))
output = self.filter_expression.resolve(context)
value = _render_value_in_context(output, context)
if self.asvar:
context[self.asvar] = value
return ''
else:
return value
class BlockTranslateNode(Node):
def __init__(self, extra_context, singular, plural=None, countervar=None,
counter=None, message_context=None):
self.extra_context = extra_context
self.singular = singular
self.plural = plural
self.countervar = countervar
self.counter = counter
self.message_context = message_context
def render_token_list(self, tokens):
result = []
vars = []
for token in tokens:
if token.token_type == TOKEN_TEXT:
result.append(token.contents)
elif token.token_type == TOKEN_VAR:
result.append('%%(%s)s' % token.contents)
vars.append(token.contents)
return ''.join(result), vars
def render(self, context):
if self.message_context:
message_context = self.message_context.resolve(context)
else:
message_context = None
tmp_context = {}
for var, val in self.extra_context.items():
tmp_context[var] = val.resolve(context)
# Update() works like a push(), so corresponding context.pop() is at
# the end of function
context.update(tmp_context)
singular, vars = self.render_token_list(self.singular)
# Escape all isolated '%'
singular = re.sub('%(?!\()', '%%', singular)
if self.plural and self.countervar and self.counter:
count = self.counter.resolve(context)
context[self.countervar] = count
plural, plural_vars = self.render_token_list(self.plural)
plural = re.sub('%(?!\()', '%%', plural)
if message_context:
result = translation.npgettext(message_context, singular,
plural, count)
else:
result = translation.ungettext(singular, plural, count)
vars.extend(plural_vars)
else:
if message_context:
result = translation.pgettext(message_context, singular)
else:
result = translation.ugettext(singular)
data = dict([(v, _render_value_in_context(context.get(v, ''), context)) for v in vars])
context.pop()
try:
result = result % data
except (KeyError, ValueError):
with translation.override(None):
result = self.render(context)
return result
class LanguageNode(Node):
def __init__(self, nodelist, language):
self.nodelist = nodelist
self.language = language
def render(self, context):
with translation.override(self.language.resolve(context)):
output = self.nodelist.render(context)
return output
@register.tag("get_available_languages")
def do_get_available_languages(parser, token):
"""
This will store a list of available languages
in the context.
Usage::
{% get_available_languages as languages %}
{% for language in languages %}
...
{% endfor %}
This will just pull the LANGUAGES setting from
your setting file (or the default settings) and
put it into the named variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_available_languages' requires 'as variable' (got %r)" % args)
return GetAvailableLanguagesNode(args[2])
@register.tag("get_language_info")
def do_get_language_info(parser, token):
"""
This will store the language information dictionary for the given language
code in a context variable.
Usage::
{% get_language_info for LANGUAGE_CODE as l %}
{{ l.code }}
{{ l.name }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
"""
args = token.contents.split()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for string as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoNode(args[2], args[4])
@register.tag("get_language_info_list")
def do_get_language_info_list(parser, token):
"""
This will store a list of language information dictionaries for the given
language codes in a context variable. The language codes can be specified
either as a list of strings or a settings.LANGUAGES style tuple (or any
sequence of sequences whose first items are language codes).
Usage::
{% get_language_info_list for LANGUAGES as langs %}
{% for l in langs %}
{{ l.code }}
{{ l.name }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
{% endfor %}
"""
args = token.contents.split()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for sequence as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoListNode(args[2], args[4])
@register.filter
def language_name(lang_code):
return translation.get_language_info(lang_code)['name']
@register.filter
def language_name_local(lang_code):
return translation.get_language_info(lang_code)['name_local']
@register.filter
def language_bidi(lang_code):
return translation.get_language_info(lang_code)['bidi']
@register.tag("get_current_language")
def do_get_current_language(parser, token):
"""
This will store the current language in the context.
Usage::
{% get_current_language as language %}
This will fetch the currently active language and
put it's value into the ``language`` context
variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageNode(args[2])
@register.tag("get_current_language_bidi")
def do_get_current_language_bidi(parser, token):
"""
This will store the current language layout in the context.
Usage::
{% get_current_language_bidi as bidi %}
This will fetch the currently active language's layout and
put it's value into the ``bidi`` context variable.
True indicates right-to-left layout, otherwise left-to-right
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language_bidi' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageBidiNode(args[2])
@register.tag("trans")
def do_translate(parser, token):
"""
This will mark a string for translation and will
translate the string for the current language.
Usage::
{% trans "this is a test" %}
This will mark the string for translation so it will
be pulled out by mark-messages.py into the .po files
and will run the string through the translation engine.
There is a second form::
{% trans "this is a test" noop %}
This will only mark for translation, but will return
the string unchanged. Use it when you need to store
values into forms that should be translated later on.
You can use variables instead of constant strings
to translate stuff you marked somewhere else::
{% trans variable %}
This will just try to translate the contents of
the variable ``variable``. Make sure that the string
in there is something that is in the .po file.
It is possible to store the translated string into a variable::
{% trans "this is a test" as var %}
{{ var }}
Contextual translations are also supported::
{% trans "this is a test" context "greeting" %}
This is equivalent to calling pgettext instead of (u)gettext.
"""
class TranslateParser(TokenParser):
def top(self):
value = self.value()
# Backwards Compatiblity fix:
# FilterExpression does not support single-quoted strings,
# so we make a cheap localized fix in order to maintain
# backwards compatibility with existing uses of ``trans``
# where single quote use is supported.
if value[0] == "'":
m = re.match("^'([^']+)'(\|.*$)", value)
if m:
value = '"%s"%s' % (m.group(1).replace('"','\\"'), m.group(2))
elif value[-1] == "'":
value = '"%s"' % value[1:-1].replace('"','\\"')
noop = False
asvar = None
message_context = None
while self.more():
tag = self.tag()
if tag == 'noop':
noop = True
elif tag == 'context':
message_context = parser.compile_filter(self.value())
elif tag == 'as':
asvar = self.tag()
else:
raise TemplateSyntaxError(
"Only options for 'trans' are 'noop', " \
"'context \"xxx\"', and 'as VAR'.")
return value, noop, asvar, message_context
value, noop, asvar, message_context = TranslateParser(token.contents).top()
return TranslateNode(parser.compile_filter(value), noop, asvar,
message_context)
@register.tag("blocktrans")
def do_block_translate(parser, token):
"""
This will translate a block of text with parameters.
Usage::
{% blocktrans with bar=foo|filter boo=baz|filter %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
Additionally, this supports pluralization::
{% blocktrans count count=var|length %}
There is {{ count }} object.
{% plural %}
There are {{ count }} objects.
{% endblocktrans %}
This is much like ngettext, only in template syntax.
The "var as value" legacy format is still supported::
{% blocktrans with foo|filter as bar and baz|filter as boo %}
{% blocktrans count var|length as count %}
Contextual translations are supported::
{% blocktrans with bar=foo|filter context "greeting" %}
This is {{ bar }}.
{% endblocktrans %}
This is equivalent to calling pgettext/npgettext instead of
(u)gettext/(u)ngettext.
"""
bits = token.split_contents()
options = {}
remaining_bits = bits[1:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'count':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if len(value) != 1:
raise TemplateSyntaxError('"count" in %r tag expected exactly '
'one keyword argument.' % bits[0])
elif option == "context":
try:
value = remaining_bits.pop(0)
value = parser.compile_filter(value)
except Exception:
raise TemplateSyntaxError('"context" in %r tag expected '
'exactly one argument.' % bits[0])
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
if 'count' in options:
countervar, counter = six.dictitems(options['count'])[0]
else:
countervar, counter = None, None
if 'context' in options:
message_context = options['context']
else:
message_context = None
extra_context = options.get('with', {})
singular = []
plural = []
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
singular.append(token)
else:
break
if countervar and counter:
if token.contents.strip() != 'plural':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags inside it")
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
plural.append(token)
else:
break
if token.contents.strip() != 'endblocktrans':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags (seen %r) inside it" % token.contents)
return BlockTranslateNode(extra_context, singular, plural, countervar,
counter, message_context)
@register.tag
def language(parser, token):
"""
This will enable the given language just for this block.
Usage::
{% language "de" %}
This is {{ bar }} and {{ boo }}.
{% endlanguage %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (language)" % bits[0])
language = parser.compile_filter(bits[1])
nodelist = parser.parse(('endlanguage',))
parser.delete_first_token()
return LanguageNode(nodelist, language)
|
|
"""
A base contact form for allowing users to send email messages through
a web interface, and a subclass demonstrating useful functionality.
"""
from django import forms
from django.conf import settings
from django.core.mail import send_mail
from django.template import loader, RequestContext
from django.contrib.sites.models import Site
from djpcms.utils.uniforms import Fieldset, FormLayout, blockLabels2
class ContactForm(forms.Form):
"""
Base contact form class from which all contact form classes should
inherit.
If you don't need any custom functionality, you can simply use
this form to provide basic contact functionality; it will collect
name, email address and message.
The ``contact_form`` view included in this application knows how
to work with this form and can handle many types of subclasses as
well (see below for a discussion of the important points), so in
many cases it will be all that you need. If you'd like to use this
form or a subclass of it from one of your own views, just do the
following:
1. When you instantiate the form, pass the current
``HttpRequest`` object to the constructor as the keyword
argument ``request``; this is used internally by the base
implementation, and also made available so that subclasses
can add functionality which relies on inspecting the
request.
2. To send the message, call the form's ``save`` method, which
accepts the keyword argument ``fail_silently`` and defaults
it to ``False``. This argument is passed directly to
``send_mail``, and allows you to suppress or raise
exceptions as needed for debugging. The ``save`` method has
no return value.
Other than that, treat it like any other form; validity checks and
validated data are handled normally, through the ``is_valid``
method and the ``cleaned_data`` dictionary.
Base implementation
-------------------
Under the hood, this form uses a somewhat abstracted interface in
order to make it easier to subclass and add functionality. There
are several important attributes subclasses may want to look at
overriding, all of which will work (in the base implementation) as
either plain attributes or as callable methods:
* ``from_email`` -- used to get the address to use in the
``From:`` header of the message. The base implementation
returns the value of the ``DEFAULT_FROM_EMAIL`` setting.
* ``message`` -- used to get the message body as a string. The
base implementation renders a template using the form's
``cleaned_data`` dictionary as context.
* ``recipient_list`` -- used to generate the list of
recipients for the message. The base implementation returns
the email addresses specified in the ``MANAGERS`` setting.
* ``subject`` -- used to generate the subject line for the
message. The base implementation returns the string 'Message
sent through the web site', with the name of the current
``Site`` prepended.
* ``template_name`` -- used by the base ``message`` method to
determine which template to use for rendering the
message. Default is ``contact_form/contact_form.txt``.
Internally, the base implementation ``_get_message_dict`` method
collects ``from_email``, ``message``, ``recipient_list`` and
``subject`` into a dictionary, which the ``save`` method then
passes directly to ``send_mail`` as keyword arguments.
Particularly important is the ``message`` attribute, with its base
implementation as a method which renders a template; because it
passes ``cleaned_data`` as the template context, any additional
fields added by a subclass will automatically be available in the
template. This means that many useful subclasses can get by with
just adding a few fields and possibly overriding
``template_name``.
Much useful functionality can be achieved in subclasses without
having to override much of the above; adding additional validation
methods works the same as any other form, and typically only a few
items -- ``recipient_list`` and ``subject_line``, for example,
need to be overridden to achieve customized behavior.
Other notes for subclassing
---------------------------
Subclasses which want to inspect the current ``HttpRequest`` to
add functionality can access it via the attribute ``request``; the
base ``message`` takes advantage of this to use ``RequestContext``
when rendering its template. See the ``AkismetContactForm``
subclass in this file for an example of using the request to
perform additional validation.
Subclasses which override ``__init__`` need to accept ``*args``
and ``**kwargs``, and pass them via ``super`` in order to ensure
proper behavior.
Subclasses should be careful if overriding ``_get_message_dict``,
since that method **must** return a dictionary suitable for
passing directly to ``send_mail`` (unless ``save`` is overridden
as well).
Overriding ``save`` is relatively safe, though remember that code
which uses your form will expect ``save`` to accept the
``fail_silently`` keyword argument. In the base implementation,
that argument defaults to ``False``, on the assumption that it's
far better to notice errors than to silently not send mail from
the contact form (see also the Zen of Python: "Errors should never
pass silently, unless explicitly silenced").
"""
fail_silently=False
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request',None)
if self.request is None:
raise TypeError("Keyword argument 'request' must be supplied")
super(ContactForm, self).__init__(*args, **kwargs)
name = forms.CharField(max_length=100, label=u'Name')
email = forms.EmailField(max_length=200, label=u'E-mail')
#subj = forms.CharField(max_length=100, required = False, label=u'Subject')
body = forms.CharField(widget=forms.Textarea(), label=u'Message')
from_email = settings.DEFAULT_FROM_EMAIL
recipient_list = [mail_tuple[1] for mail_tuple in settings.MANAGERS]
_context = None
submits = (('Send message','contact'),)
layout = FormLayout(Fieldset('name','email'),
Fieldset('body',css_class = blockLabels2))
template_name = ['bits/contact_form_message.txt',
'djpcms/bits/contact_form_message.txt']
subject_template_name = ['bits/contact_form_subject.txt',
'djpcms/bits/contact_form_subject.txt']
def message(self):
"""
Renders the body of the message to a string.
"""
if callable(self.template_name):
template_name = self.template_name()
else:
template_name = self.template_name
return loader.render_to_string(template_name,
self.get_context())
def subject(self):
"""
Renders the subject of the message to a string.
"""
subject = loader.render_to_string(self.subject_template_name,
self.get_context())
return ''.join(subject.splitlines())
def get_context(self):
if self._context is None:
self._context = RequestContext(self.request,
dict(self.cleaned_data,
site=Site.objects.get_current()))
return self._context
def get_message_dict(self):
if not self.is_valid():
raise ValueError("Message cannot be sent from invalid contact form")
message_dict = {}
for message_part in ('from_email', 'message', 'recipient_list', 'subject'):
attr = getattr(self, message_part)
message_dict[message_part] = callable(attr) and attr() or attr
return message_dict
def save(self, **kwargs):
"""Builds and sends the email message.
"""
send_mail(fail_silently=self.fail_silently, **self.get_message_dict())
class AkismetContactForm(ContactForm):
"""
Contact form which doesn't add any extra fields, but does add an
Akismet spam check to the validation routine.
Requires the setting ``AKISMET_API_KEY``, which should be a valid
Akismet API key.
"""
def clean_body(self):
if 'body' in self.cleaned_data and getattr(settings, 'AKISMET_API_KEY', ''):
from akismet import Akismet
from django.utils.encoding import smart_str
akismet_api = Akismet(key=settings.AKISMET_API_KEY,
blog_url='http://%s/' % Site.objects.get_current().domain)
if akismet_api.verify_key():
akismet_data = { 'comment_type': 'comment',
'referer': self.request.META.get('HTTP_REFERER', ''),
'user_ip': self.request.META.get('REMOTE_ADDR', ''),
'user_agent': self.request.META.get('HTTP_USER_AGENT', '') }
if akismet_api.comment_check(smart_str(self.cleaned_data['body']), data=akismet_data, build_data=True):
raise forms.ValidationError(u"Akismet thinks this message is spam")
return self.cleaned_data['body']
|
|
"""
Patient panel
"""
import tempfile
import wx
from ObjectListView import ObjectListView, ColumnDefn, OLVEvent
from images import bitmap_from_base64, toolbar_print_one_b64
from objectlistviewmod import ObjectListViewMod, EVT_OVL_CHECK_EVENT
from pdfviewer import PDFViewer
from printing import generate_prescription
from drugaddpanel import DrugAddPanel
from acdbtextctrl import AcDbTextCtrl
from dbtextctrl import DbTextCtrl
from database import Diagnosis, Doctor
class PatientPanel(wx.Panel):
"""
Patient panel
"""
def __init__(self, parent, session, **kwds):
super(PatientPanel, self).__init__(parent, **kwds)
self.session = session
self.patient_list_panel = None
self.patient = None
sizer = wx.BoxSizer(wx.VERTICAL)
self.toolbar = wx.ToolBar(self, style=wx.TB_NODIVIDER)
self.tb_print = self.toolbar.AddLabelTool(
wx.ID_ANY, 'Print', bitmap_from_base64(toolbar_print_one_b64),
shortHelp="Print Prescription")
self.Bind(wx.EVT_TOOL, self.OnPrint, self.tb_print)
self.toolbar.AddStretchableSpace()
self.lbl_doctor = wx.StaticText(self.toolbar, label="Doctor's Name ", size=wx.Size(-1, -1))
self.toolbar.AddControl(self.lbl_doctor)
self.txt_doctor = AcDbTextCtrl(self.toolbar, self.session, Doctor)
self.toolbar.AddControl(self.txt_doctor)
self.toolbar.Realize()
sizer.Add(self.toolbar, 0, wx.ALL | wx. EXPAND)
grid_sizer = wx.FlexGridSizer(8, 2, 5, 5)
grid_sizer.AddGrowableCol(1, 1)
label_width = 100
self.lbl_bed = wx.StaticText(self, label='Bed', size=wx.Size(label_width, -1))
self.txt_bed = DbTextCtrl(self, self.session, self.OnChangeList)
grid_sizer.Add(self.lbl_bed, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
grid_sizer.Add(self.txt_bed, 1, wx.EXPAND)
self.lbl_hospital_no = wx.StaticText(self, label='Hospital No',
size=wx.Size(label_width, -1))
self.txt_hospital_no = DbTextCtrl(self, self.session, self.OnChangeList)
grid_sizer.Add(self.lbl_hospital_no, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
grid_sizer.Add(self.txt_hospital_no, 1, wx.EXPAND)
self.lbl_national_id_no = wx.StaticText(self, label='National Id No',
size=wx.Size(label_width, -1))
self.txt_national_id_no = DbTextCtrl(self, self.session)
grid_sizer.Add(self.lbl_national_id_no, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
grid_sizer.Add(self.txt_national_id_no, 1, wx.EXPAND)
self.lbl_name = wx.StaticText(self, label='Name', size=wx.Size(label_width, -1))
self.txt_name = DbTextCtrl(self, self.session, self.OnChangeList)
grid_sizer.Add(self.lbl_name, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
grid_sizer.Add(self.txt_name, 1, wx.EXPAND)
self.lbl_age = wx.StaticText(self, label='Age', size=wx.Size(label_width, -1))
self.txt_age = DbTextCtrl(self, self.session)
grid_sizer.Add(self.lbl_age, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
grid_sizer.Add(self.txt_age, 1, wx.EXPAND)
self.lbl_sex = wx.StaticText(self, label='Sex', size=wx.Size(label_width, -1))
self.txt_sex = DbTextCtrl(self, self.session)
grid_sizer.Add(self.lbl_sex, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
grid_sizer.Add(self.txt_sex, 1, wx.EXPAND)
self.lbl_diagnosis = wx.StaticText(self, label='Diagnosis', size=wx.Size(label_width, -1))
self.txt_diagnosis = AcDbTextCtrl(self, self.session, Diagnosis)
grid_sizer.Add(self.lbl_diagnosis, 1, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL)
grid_sizer.Add(self.txt_diagnosis, 1, wx.EXPAND)
sizer.Add(grid_sizer, 0, wx.ALL | wx.EXPAND, border=10)
self.txt_drug_name = DrugAddPanel(self, self.session, self)
sizer.Add(self.txt_drug_name, 0, wx.RIGHT | wx.LEFT | wx.EXPAND, border=10)
self.prescription_list = ObjectListViewMod(
self,
style=wx.LC_REPORT|wx.SUNKEN_BORDER,
cellEditMode=ObjectListView.CELLEDIT_DOUBLECLICK
)
self.prescription_list.SetColumns([
ColumnDefn("Drug", "left", 180, "drug_name", isEditable=False),
ColumnDefn("Order", "left", 140, "drug_order")
])
self.prescription_list.SetEmptyListMsg("")
self.prescription_list.useAlternateBackColors = False
self.prescription_list.CreateCheckStateColumn()
self.prescription_list.Bind(EVT_OVL_CHECK_EVENT, self.OnRxCheck)
self.prescription_list.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK, self.OnRxContextMenu)
self.prescription_list.Bind(OLVEvent.EVT_CELL_EDIT_FINISHED, self.OnCellEditFinished)
sizer.Add(self.prescription_list, 1, wx.RIGHT | wx.LEFT | wx.BOTTOM | wx. EXPAND, border=10)
#Enter Treversal
self.txt_bed.Bind(wx.EVT_KEY_UP, self.OntxtBedKeyUp)
self.txt_hospital_no.Bind(wx.EVT_KEY_UP, self.OntxtHospitalNoKeyUp)
self.txt_national_id_no.Bind(wx.EVT_KEY_UP, self.OntxtNationalIdNoKeyUp)
self.txt_name.Bind(wx.EVT_KEY_UP, self.OntxtNameKeyUp)
self.txt_age.Bind(wx.EVT_KEY_UP, self.OntxtAgeKeyUp)
self.txt_sex.Bind(wx.EVT_KEY_UP, self.OntxtSexKeyUp)
self.txt_diagnosis.Bind(wx.EVT_KEY_UP, self.OntxtDiagnosisKeyUp)
self.SetSizer(sizer)
self.rx_menu = wx.Menu()
menu_id = 500
self.rx_menu.Append(menu_id, "Remove", "Remove Medication.")
wx.EVT_MENU(self, menu_id, self.OnRemoveRx)
menu_id = 501
self.rx_menu.Append(menu_id, "Tick All", "Tick All Medications")
wx.EVT_MENU(self, menu_id, self.OnTickAllRx)
menu_id = 502
self.rx_menu.Append(menu_id, "Untick All", "Untick All Medications")
wx.EVT_MENU(self, menu_id, self.OnUntickAllRx)
self.unset()
def set(self, patient):
""" Set Patient """
self.patient = patient
self.txt_hospital_no.SetDbObjectAttr(patient, "hospital_no")
self.txt_national_id_no.SetDbObjectAttr(patient, "national_id_no")
self.txt_bed.SetDbObjectAttr(patient, "bed_no")
self.txt_name.SetDbObjectAttr(patient, "name")
self.txt_age.SetDbObjectAttr(patient, "age")
self.txt_sex.SetDbObjectAttr(patient, "sex")
self.txt_diagnosis.SetDbObjectAttr(patient, "diagnosis")#.ChangeValue(str(patient.diagnosis))
self.update_rx()
self.toolbar.EnableTool(self.tb_print.GetId(), True)
self.txt_hospital_no.Enable()
self.txt_national_id_no.Enable()
self.txt_bed.Enable()
self.txt_name.Enable()
self.txt_age.Enable()
self.txt_sex.Enable()
self.txt_diagnosis.Enable()
self.txt_drug_name.Enable()
self.prescription_list.Enable()
def unset(self):
""" Clear the panel """
self.patient = None
self.txt_hospital_no.SetDbObjectAttr(None, "")
self.txt_national_id_no.SetDbObjectAttr(None, "")
self.txt_bed.SetDbObjectAttr(None, "")
self.txt_name.SetDbObjectAttr(None, "")
self.txt_age.SetDbObjectAttr(None, "")
self.txt_sex.SetDbObjectAttr(None, "")
self.txt_diagnosis.SetDbObjectAttr(None, "")#.ChangeValue("")
self.prescription_list.DeleteAllItems()
self.toolbar.EnableTool(self.tb_print.GetId(), False)
self.txt_hospital_no.Disable()
self.txt_national_id_no.Disable()
self.txt_bed.Disable()
self.txt_name.Disable()
self.txt_age.Disable()
self.txt_sex.Disable()
self.txt_diagnosis.Disable()
self.txt_drug_name.Disable()
self.prescription_list.Disable()
def update_rx(self):
""" Update the medications list """
self.prescription_list.DeleteAllItems()
for row in self.patient.rxs:
self.prescription_list.AddObject(row)
if row.active:
self.prescription_list.SetCheckState(row, True)
else:
self.prescription_list.SetCheckState(row, False)
self.prescription_list.RefreshObjects(self.prescription_list.GetObjects())
def OnChangeList(self, event):
""" Update patient list to reflect changes """
self.patient_list_panel.patient_list.RefreshObjects([self.patient])
def OnCellEditFinished(self, event):
""" Save changes to drug order in the drug list """
self.session.commit()
def OntxtBedKeyUp(self, event):
""" Enter treversal """
if event.GetKeyCode() == wx.WXK_RETURN:
self.txt_hospital_no.SetFocus()
self.txt_hospital_no.SetSelection(-1, -1)
def OntxtHospitalNoKeyUp(self, event):
""" Enter treversal """
if event.GetKeyCode() == wx.WXK_RETURN:
self.txt_national_id_no.SetFocus()
self.txt_national_id_no.SetSelection(-1, -1)
def OntxtNationalIdNoKeyUp(self, event):
""" Enter treversal """
if event.GetKeyCode() == wx.WXK_RETURN:
self.txt_name.SetFocus()
self.txt_name.SetSelection(-1, -1)
def OntxtNameKeyUp(self, event):
""" Enter treversal """
if event.GetKeyCode() == wx.WXK_RETURN:
self.txt_age.SetFocus()
self.txt_age.SetSelection(-1, -1)
def OntxtAgeKeyUp(self, event):
""" Enter treversal """
if event.GetKeyCode() == wx.WXK_RETURN:
self.txt_sex.SetFocus()
self.txt_sex.SetSelection(-1, -1)
def OntxtSexKeyUp(self, event):
""" Enter treversal """
if event.GetKeyCode() == wx.WXK_RETURN:
self.txt_diagnosis.SetFocus()
self.txt_diagnosis.SetSelection(-1, -1)
def OntxtDiagnosisKeyUp(self, event):
""" Enter treversal """
if event.GetKeyCode() == wx.WXK_RETURN:
self.txt_drug_name.txt_drug_name.SetFocus()
self.txt_drug_name.txt_drug_name.SetSelection(-1, -1)
def OnRxCheck(self, event):
""" Enter treversal """
if event.value is True:
event.object.active = True
else:
event.object.active = False
self.session.commit()
def OnRxContextMenu(self, event):
""" Show medication list context menu """
self.PopupMenu(self.rx_menu)
def OnTickAllRx(self, event):
""" Tick all medications """
rxs = self.prescription_list.GetObjects()
for rx in rxs:
rx.active = True
self.prescription_list.SetCheckState(rx, True)
self.session.commit()
self.prescription_list.RefreshObjects(rxs)
def OnUntickAllRx(self, event):
""" Untick all medications """
rxs = self.prescription_list.GetObjects()
for rx in rxs:
rx.active = False
self.prescription_list.SetCheckState(rx, False)
self.session.commit()
self.prescription_list.RefreshObjects(rxs)
def OnRemoveRx(self, event):
""" Remove medication """
dlg = wx.MessageDialog(None, 'Remove selected medications?', 'Remove Medication',
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
result = dlg.ShowModal()
if result != wx.ID_YES:
return
for rx in self.prescription_list.GetSelectedObjects():
self.session.delete(rx)
self.session.commit()
self.update_rx()
def OnPrint(self, event):
""" Print this prescription """
if self.patient is None:
return
self.session.refresh(self.patient)
temp_file = tempfile.mktemp(".pdf")
generate_prescription(self.session, self.patient, self.txt_doctor.GetValue(), temp_file)
pdf_view = PDFViewer(None, title="Print Preview")
pdf_view.viewer.UsePrintDirect = ``False``
pdf_view.viewer.LoadFile(temp_file)
pdf_view.Show()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class FirewallPoliciesOperations(object):
"""FirewallPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
firewall_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
firewall_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified Firewall Policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param firewall_policy_name: The name of the Firewall Policy.
:type firewall_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
firewall_policy_name=firewall_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
firewall_policy_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.FirewallPolicy"
"""Gets the specified Firewall Policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param firewall_policy_name: The name of the Firewall Policy.
:type firewall_policy_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FirewallPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_11_01.models.FirewallPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
firewall_policy_name, # type: str
parameters, # type: "_models.FirewallPolicy"
**kwargs # type: Any
):
# type: (...) -> "_models.FirewallPolicy"
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'FirewallPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FirewallPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('FirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
firewall_policy_name, # type: str
parameters, # type: "_models.FirewallPolicy"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.FirewallPolicy"]
"""Creates or updates the specified Firewall Policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param firewall_policy_name: The name of the Firewall Policy.
:type firewall_policy_name: str
:param parameters: Parameters supplied to the create or update Firewall Policy operation.
:type parameters: ~azure.mgmt.network.v2020_11_01.models.FirewallPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either FirewallPolicy or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_11_01.models.FirewallPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
firewall_policy_name=firewall_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.FirewallPolicyListResult"]
"""Lists all Firewall Policies in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FirewallPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_11_01.models.FirewallPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('FirewallPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.FirewallPolicyListResult"]
"""Gets all the Firewall Policies in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FirewallPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_11_01.models.FirewallPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('FirewallPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/firewallPolicies'} # type: ignore
|
|
import sqlalchemy as sa
from sqlalchemy import testing, util
from sqlalchemy.orm import mapper, deferred, defer, undefer, Load, \
load_only, undefer_group, create_session, synonym, relationship, Session,\
joinedload, defaultload, aliased, contains_eager, with_polymorphic
from sqlalchemy.testing import eq_, AssertsCompiledSQL, assert_raises_message
from test.orm import _fixtures
from .inheritance._poly_fixtures import Company, Person, Engineer, Manager, \
Boss, Machine, Paperwork, _Polymorphic
class DeferredTest(AssertsCompiledSQL, _fixtures.FixtureTest):
def test_basic(self):
"""A basic deferred load."""
Order, orders = self.classes.Order, self.tables.orders
mapper(Order, orders, order_by=orders.c.id, properties={
'description': deferred(orders.c.description)})
o = Order()
self.assert_(o.description is None)
q = create_session().query(Order)
def go():
l = q.all()
o2 = l[2]
x = o2.description
self.sql_eq_(go, [
("SELECT orders.id AS orders_id, "
"orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, "
"orders.isopen AS orders_isopen "
"FROM orders ORDER BY orders.id", {}),
("SELECT orders.description AS orders_description "
"FROM orders WHERE orders.id = :param_1",
{'param_1':3})])
def test_defer_primary_key(self):
"""what happens when we try to defer the primary key?"""
Order, orders = self.classes.Order, self.tables.orders
mapper(Order, orders, order_by=orders.c.id, properties={
'id': deferred(orders.c.id)})
# right now, it's not that graceful :)
q = create_session().query(Order)
assert_raises_message(
sa.exc.NoSuchColumnError,
"Could not locate",
q.first
)
def test_unsaved(self):
"""Deferred loading does not kick in when just PK cols are set."""
Order, orders = self.classes.Order, self.tables.orders
mapper(Order, orders, properties={
'description': deferred(orders.c.description)})
sess = create_session()
o = Order()
sess.add(o)
o.id = 7
def go():
o.description = "some description"
self.sql_count_(0, go)
def test_synonym_group_bug(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties={
'isopen':synonym('_isopen', map_column=True),
'description':deferred(orders.c.description, group='foo')
})
sess = create_session()
o1 = sess.query(Order).get(1)
eq_(o1.description, "order 1")
def test_unsaved_2(self):
Order, orders = self.classes.Order, self.tables.orders
mapper(Order, orders, properties={
'description': deferred(orders.c.description)})
sess = create_session()
o = Order()
sess.add(o)
def go():
o.description = "some description"
self.sql_count_(0, go)
def test_unsaved_group(self):
"""Deferred loading doesn't kick in when just PK cols are set"""
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, order_by=orders.c.id, properties=dict(
description=deferred(orders.c.description, group='primary'),
opened=deferred(orders.c.isopen, group='primary')))
sess = create_session()
o = Order()
sess.add(o)
o.id = 7
def go():
o.description = "some description"
self.sql_count_(0, go)
def test_unsaved_group_2(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, order_by=orders.c.id, properties=dict(
description=deferred(orders.c.description, group='primary'),
opened=deferred(orders.c.isopen, group='primary')))
sess = create_session()
o = Order()
sess.add(o)
def go():
o.description = "some description"
self.sql_count_(0, go)
def test_save(self):
Order, orders = self.classes.Order, self.tables.orders
m = mapper(Order, orders, properties={
'description': deferred(orders.c.description)})
sess = create_session()
o2 = sess.query(Order).get(2)
o2.isopen = 1
sess.flush()
def test_group(self):
"""Deferred load with a group"""
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties=util.OrderedDict([
('userident', deferred(orders.c.user_id, group='primary')),
('addrident', deferred(orders.c.address_id, group='primary')),
('description', deferred(orders.c.description, group='primary')),
('opened', deferred(orders.c.isopen, group='primary'))
]))
sess = create_session()
q = sess.query(Order).order_by(Order.id)
def go():
l = q.all()
o2 = l[2]
eq_(o2.opened, 1)
eq_(o2.userident, 7)
eq_(o2.description, 'order 3')
self.sql_eq_(go, [
("SELECT orders.id AS orders_id "
"FROM orders ORDER BY orders.id", {}),
("SELECT orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen "
"FROM orders WHERE orders.id = :param_1",
{'param_1':3})])
o2 = q.all()[2]
eq_(o2.description, 'order 3')
assert o2 not in sess.dirty
o2.description = 'order 3'
def go():
sess.flush()
self.sql_count_(0, go)
def test_preserve_changes(self):
"""A deferred load operation doesn't revert modifications on attributes"""
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties = {
'userident': deferred(orders.c.user_id, group='primary'),
'description': deferred(orders.c.description, group='primary'),
'opened': deferred(orders.c.isopen, group='primary')
})
sess = create_session()
o = sess.query(Order).get(3)
assert 'userident' not in o.__dict__
o.description = 'somenewdescription'
eq_(o.description, 'somenewdescription')
def go():
eq_(o.opened, 1)
self.assert_sql_count(testing.db, go, 1)
eq_(o.description, 'somenewdescription')
assert o in sess.dirty
def test_commits_state(self):
"""
When deferred elements are loaded via a group, they get the proper
CommittedState and don't result in changes being committed
"""
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties = {
'userident': deferred(orders.c.user_id, group='primary'),
'description': deferred(orders.c.description, group='primary'),
'opened': deferred(orders.c.isopen, group='primary')})
sess = create_session()
o2 = sess.query(Order).get(3)
# this will load the group of attributes
eq_(o2.description, 'order 3')
assert o2 not in sess.dirty
# this will mark it as 'dirty', but nothing actually changed
o2.description = 'order 3'
# therefore the flush() shouldn't actually issue any SQL
self.assert_sql_count(testing.db, sess.flush, 0)
def test_map_selectable_wo_deferred(self):
"""test mapping to a selectable with deferred cols,
the selectable doesn't include the deferred col.
"""
Order, orders = self.classes.Order, self.tables.orders
order_select = sa.select([
orders.c.id,
orders.c.user_id,
orders.c.address_id,
orders.c.description,
orders.c.isopen]).alias()
mapper(Order, order_select, properties={
'description':deferred(order_select.c.description)
})
sess = Session()
o1 = sess.query(Order).order_by(Order.id).first()
assert 'description' not in o1.__dict__
eq_(o1.description, 'order 1')
class DeferredOptionsTest(AssertsCompiledSQL, _fixtures.FixtureTest):
__dialect__ = 'default'
def test_options(self):
"""Options on a mapper to create deferred and undeferred columns"""
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders)
sess = create_session()
q = sess.query(Order).order_by(Order.id).options(defer('user_id'))
def go():
q.all()[0].user_id
self.sql_eq_(go, [
("SELECT orders.id AS orders_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen "
"FROM orders ORDER BY orders.id", {}),
("SELECT orders.user_id AS orders_user_id "
"FROM orders WHERE orders.id = :param_1",
{'param_1':1})])
sess.expunge_all()
q2 = q.options(undefer('user_id'))
self.sql_eq_(q2.all, [
("SELECT orders.id AS orders_id, "
"orders.user_id AS orders_user_id, "
"orders.address_id AS orders_address_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen "
"FROM orders ORDER BY orders.id",
{})])
def test_undefer_group(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties=util.OrderedDict([
('userident', deferred(orders.c.user_id, group='primary')),
('description', deferred(orders.c.description, group='primary')),
('opened', deferred(orders.c.isopen, group='primary'))
]
))
sess = create_session()
q = sess.query(Order).order_by(Order.id)
def go():
l = q.options(undefer_group('primary')).all()
o2 = l[2]
eq_(o2.opened, 1)
eq_(o2.userident, 7)
eq_(o2.description, 'order 3')
self.sql_eq_(go, [
("SELECT orders.user_id AS orders_user_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen, "
"orders.id AS orders_id, "
"orders.address_id AS orders_address_id "
"FROM orders ORDER BY orders.id",
{})])
def test_undefer_star(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties=util.OrderedDict([
('userident', deferred(orders.c.user_id)),
('description', deferred(orders.c.description)),
('opened', deferred(orders.c.isopen))
]
))
sess = create_session()
q = sess.query(Order).options(Load(Order).undefer('*'))
self.assert_compile(q,
"SELECT orders.user_id AS orders_user_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen, "
"orders.id AS orders_id, "
"orders.address_id AS orders_address_id FROM orders"
)
def test_locates_col(self):
"""Manually adding a column to the result undefers the column."""
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties={
'description': deferred(orders.c.description)})
sess = create_session()
o1 = sess.query(Order).order_by(Order.id).first()
def go():
eq_(o1.description, 'order 1')
self.sql_count_(1, go)
sess = create_session()
o1 = (sess.query(Order).
order_by(Order.id).
add_column(orders.c.description).first())[0]
def go():
eq_(o1.description, 'order 1')
self.sql_count_(0, go)
def test_deep_options(self):
users, items, order_items, Order, Item, User, orders = (self.tables.users,
self.tables.items,
self.tables.order_items,
self.classes.Order,
self.classes.Item,
self.classes.User,
self.tables.orders)
mapper(Item, items, properties=dict(
description=deferred(items.c.description)))
mapper(Order, orders, properties=dict(
items=relationship(Item, secondary=order_items)))
mapper(User, users, properties=dict(
orders=relationship(Order, order_by=orders.c.id)))
sess = create_session()
q = sess.query(User).order_by(User.id)
l = q.all()
item = l[0].orders[1].items[1]
def go():
eq_(item.description, 'item 4')
self.sql_count_(1, go)
eq_(item.description, 'item 4')
sess.expunge_all()
l = q.options(undefer('orders.items.description')).all()
item = l[0].orders[1].items[1]
def go():
eq_(item.description, 'item 4')
self.sql_count_(0, go)
eq_(item.description, 'item 4')
def test_path_entity(self):
"""test the legacy *addl_attrs argument."""
User = self.classes.User
Order = self.classes.Order
Item = self.classes.Item
users = self.tables.users
orders = self.tables.orders
items = self.tables.items
order_items = self.tables.order_items
mapper(User, users, properties={
"orders": relationship(Order, lazy="joined")
})
mapper(Order, orders, properties={
"items": relationship(Item, secondary=order_items, lazy="joined")
})
mapper(Item, items)
sess = create_session()
exp = ("SELECT users.id AS users_id, users.name AS users_name, "
"items_1.id AS items_1_id, orders_1.id AS orders_1_id, "
"orders_1.user_id AS orders_1_user_id, orders_1.address_id "
"AS orders_1_address_id, orders_1.description AS "
"orders_1_description, orders_1.isopen AS orders_1_isopen "
"FROM users LEFT OUTER JOIN orders AS orders_1 "
"ON users.id = orders_1.user_id LEFT OUTER JOIN "
"(order_items AS order_items_1 JOIN items AS items_1 "
"ON items_1.id = order_items_1.item_id) "
"ON orders_1.id = order_items_1.order_id")
q = sess.query(User).options(defer(User.orders, Order.items, Item.description))
self.assert_compile(q, exp)
def test_chained_multi_col_options(self):
users, User = self.tables.users, self.classes.User
orders, Order = self.tables.orders, self.classes.Order
mapper(User, users, properties={
"orders": relationship(Order)
})
mapper(Order, orders)
sess = create_session()
q = sess.query(User).options(
joinedload(User.orders).defer("description").defer("isopen")
)
self.assert_compile(q,
"SELECT users.id AS users_id, users.name AS users_name, "
"orders_1.id AS orders_1_id, orders_1.user_id AS orders_1_user_id, "
"orders_1.address_id AS orders_1_address_id FROM users "
"LEFT OUTER JOIN orders AS orders_1 ON users.id = orders_1.user_id"
)
def test_load_only_no_pk(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders)
sess = create_session()
q = sess.query(Order).options(load_only("isopen", "description"))
self.assert_compile(q,
"SELECT orders.id AS orders_id, "
"orders.description AS orders_description, "
"orders.isopen AS orders_isopen FROM orders")
def test_load_only_no_pk_rt(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders)
sess = create_session()
q = sess.query(Order).order_by(Order.id).\
options(load_only("isopen", "description"))
eq_(q.first(), Order(id=1))
def test_load_only_w_deferred(self):
orders, Order = self.tables.orders, self.classes.Order
mapper(Order, orders, properties={
"description": deferred(orders.c.description)
})
sess = create_session()
q = sess.query(Order).options(
load_only("isopen", "description"),
undefer("user_id")
)
self.assert_compile(q,
"SELECT orders.description AS orders_description, "
"orders.id AS orders_id, "
"orders.user_id AS orders_user_id, "
"orders.isopen AS orders_isopen FROM orders")
def test_load_only_propagate_unbound(self):
self._test_load_only_propagate(False)
def test_load_only_propagate_bound(self):
self._test_load_only_propagate(True)
def _test_load_only_propagate(self, use_load):
User = self.classes.User
Address = self.classes.Address
users = self.tables.users
addresses = self.tables.addresses
mapper(User, users, properties={
"addresses": relationship(Address)
})
mapper(Address, addresses)
sess = create_session()
expected = [
("SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.id IN (:id_1, :id_2)", {'id_2': 8, 'id_1': 7}),
("SELECT addresses.id AS addresses_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id", {'param_1': 7}),
("SELECT addresses.id AS addresses_id, "
"addresses.email_address AS addresses_email_address "
"FROM addresses WHERE :param_1 = addresses.user_id", {'param_1': 8}),
]
if use_load:
opt = Load(User).defaultload(User.addresses).load_only("id", "email_address")
else:
opt = defaultload(User.addresses).load_only("id", "email_address")
q = sess.query(User).options(opt).filter(User.id.in_([7, 8]))
def go():
for user in q:
user.addresses
self.sql_eq_(go, expected)
def test_load_only_parent_specific(self):
User = self.classes.User
Address = self.classes.Address
Order = self.classes.Order
users = self.tables.users
addresses = self.tables.addresses
orders = self.tables.orders
mapper(User, users)
mapper(Address, addresses)
mapper(Order, orders)
sess = create_session()
q = sess.query(User, Order, Address).options(
Load(User).load_only("name"),
Load(Order).load_only("id"),
Load(Address).load_only("id", "email_address")
)
self.assert_compile(q,
"SELECT users.id AS users_id, users.name AS users_name, "
"orders.id AS orders_id, "
"addresses.id AS addresses_id, addresses.email_address "
"AS addresses_email_address FROM users, orders, addresses"
)
def test_load_only_path_specific(self):
User = self.classes.User
Address = self.classes.Address
Order = self.classes.Order
users = self.tables.users
addresses = self.tables.addresses
orders = self.tables.orders
mapper(User, users, properties=util.OrderedDict([
("addresses", relationship(Address, lazy="joined")),
("orders", relationship(Order, lazy="joined"))
]))
mapper(Address, addresses)
mapper(Order, orders)
sess = create_session()
q = sess.query(User).options(
load_only("name").defaultload("addresses").load_only("id", "email_address"),
defaultload("orders").load_only("id")
)
# hmmmm joinedload seems to be forcing users.id into here...
self.assert_compile(
q,
"SELECT users.id AS users_id, users.name AS users_name, "
"addresses_1.id AS addresses_1_id, "
"addresses_1.email_address AS addresses_1_email_address, "
"orders_1.id AS orders_1_id FROM users "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id "
"LEFT OUTER JOIN orders AS orders_1 ON users.id = orders_1.user_id"
)
class InheritanceTest(_Polymorphic):
__dialect__ = 'default'
def test_load_only_subclass(self):
s = Session()
q = s.query(Manager).options(load_only("status", "manager_name"))
self.assert_compile(
q,
"SELECT managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.type AS people_type, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people JOIN managers "
"ON people.person_id = managers.person_id "
"ORDER BY people.person_id"
)
def test_load_only_subclass_and_superclass(self):
s = Session()
q = s.query(Boss).options(load_only("status", "manager_name"))
self.assert_compile(
q,
"SELECT managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.type AS people_type, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people JOIN managers "
"ON people.person_id = managers.person_id JOIN boss "
"ON managers.person_id = boss.boss_id ORDER BY people.person_id"
)
def test_load_only_alias_subclass(self):
s = Session()
m1 = aliased(Manager, flat=True)
q = s.query(m1).options(load_only("status", "manager_name"))
self.assert_compile(
q,
"SELECT managers_1.person_id AS managers_1_person_id, "
"people_1.person_id AS people_1_person_id, "
"people_1.type AS people_1_type, "
"managers_1.status AS managers_1_status, "
"managers_1.manager_name AS managers_1_manager_name "
"FROM people AS people_1 JOIN managers AS "
"managers_1 ON people_1.person_id = managers_1.person_id "
"ORDER BY people_1.person_id"
)
def test_load_only_subclass_from_relationship_polymorphic(self):
s = Session()
wp = with_polymorphic(Person, [Manager], flat=True)
q = s.query(Company).join(Company.employees.of_type(wp)).options(
contains_eager(Company.employees.of_type(wp)).
load_only(wp.Manager.status, wp.Manager.manager_name)
)
self.assert_compile(
q,
"SELECT people_1.person_id AS people_1_person_id, "
"people_1.type AS people_1_type, "
"managers_1.person_id AS managers_1_person_id, "
"managers_1.status AS managers_1_status, "
"managers_1.manager_name AS managers_1_manager_name, "
"companies.company_id AS companies_company_id, "
"companies.name AS companies_name "
"FROM companies JOIN (people AS people_1 LEFT OUTER JOIN "
"managers AS managers_1 ON people_1.person_id = "
"managers_1.person_id) ON companies.company_id = "
"people_1.company_id"
)
def test_load_only_subclass_from_relationship(self):
s = Session()
from sqlalchemy import inspect
inspect(Company).add_property("managers", relationship(Manager))
q = s.query(Company).join(Company.managers).options(
contains_eager(Company.managers).
load_only("status", "manager_name")
)
self.assert_compile(
q,
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name, "
"managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.type AS people_type, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM companies JOIN (people JOIN managers ON people.person_id = "
"managers.person_id) ON companies.company_id = people.company_id"
)
def test_defer_on_wildcard_subclass(self):
# pretty much the same as load_only except doesn't
# exclude the primary key
s = Session()
q = s.query(Manager).options(
defer(".*"), undefer("status"))
self.assert_compile(
q,
"SELECT managers.status AS managers_status "
"FROM people JOIN managers ON "
"people.person_id = managers.person_id ORDER BY people.person_id"
)
def test_defer_super_name_on_subclass(self):
s = Session()
q = s.query(Manager).options(defer("name"))
self.assert_compile(
q,
"SELECT managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.company_id AS people_company_id, "
"people.type AS people_type, managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people JOIN managers "
"ON people.person_id = managers.person_id "
"ORDER BY people.person_id"
)
|
|
# -*- coding: utf-8 -*-
"""
Dependencies: flask, tornado
SeeAlso:
routes.turk_identification
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from ibeis.control import accessor_decors, controller_inject
from ibeis.algo.hots import pipeline
from flask import url_for, request, current_app # NOQA
from os.path import join, dirname, abspath, exists
import cv2
import numpy as np # NOQA
import utool as ut
from ibeis.web import appfuncs as appf
from ibeis import constants as const
import traceback
import requests
import six
from datetime import datetime
ut.noinject('[apis_query]')
CLASS_INJECT_KEY, register_ibs_method = (
controller_inject.make_ibs_register_decorator(__name__))
register_api = controller_inject.get_ibeis_flask_api(__name__)
register_route = controller_inject.get_ibeis_flask_route(__name__)
GRAPH_CLIENT_PEEK = 100
ANNOT_INFR_PEAK_MAX = 50
@register_ibs_method
@accessor_decors.default_decorator
@register_api('/api/query/annot/rowid/', methods=['GET'])
def get_recognition_query_aids(ibs, is_known, species=None):
"""
DEPCIRATE
RESTful:
Method: GET
URL: /api/query/annot/rowid/
"""
qaid_list = ibs.get_valid_aids(is_known=is_known, species=species)
return qaid_list
@register_ibs_method
@register_api('/api/query/chip/dict/simple/', methods=['GET'])
def query_chips_simple_dict(ibs, *args, **kwargs):
r"""
Runs query_chips, but returns a json compatible dictionary
Args:
same as query_chips
RESTful:
Method: GET
URL: /api/query/chip/dict/simple/
SeeAlso:
query_chips
CommandLine:
python -m ibeis.web.apis_query --test-query_chips_simple_dict:0
python -m ibeis.web.apis_query --test-query_chips_simple_dict:1
python -m ibeis.web.apis_query --test-query_chips_simple_dict:0 --humpbacks
Example:
>>> # xdoctest: +REQUIRES(--web)
>>> from ibeis.control.IBEISControl import * # NOQA
>>> import ibeis
>>> ibs = ibeis.opendb(defaultdb='testdb1')
>>> #qaid = ibs.get_valid_aids()[0:3]
>>> qaids = ibs.get_valid_aids()
>>> daids = ibs.get_valid_aids()
>>> dict_list = ibs.query_chips_simple_dict(qaids, daids)
>>> qgids = ibs.get_annot_image_rowids(qaids)
>>> qnids = ibs.get_annot_name_rowids(qaids)
>>> for dict_, qgid, qnid in list(zip(dict_list, qgids, qnids)):
>>> dict_['qgid'] = qgid
>>> dict_['qnid'] = qnid
>>> dict_['dgid_list'] = ibs.get_annot_image_rowids(dict_['daid_list'])
>>> dict_['dnid_list'] = ibs.get_annot_name_rowids(dict_['daid_list'])
>>> dict_['dgname_list'] = ibs.get_image_gnames(dict_['dgid_list'])
>>> dict_['qgname'] = ibs.get_image_gnames(dict_['qgid'])
>>> result = ut.repr2(dict_list, nl=2, precision=2, hack_liststr=True)
>>> result = result.replace('u\'', '"').replace('\'', '"')
>>> print(result)
Example:
>>> # xdoctest: +REQUIRES(--web)
>>> from ibeis.control.IBEISControl import * # NOQA
>>> import time
>>> import ibeis
>>> import requests
>>> # Start up the web instance
>>> web_instance = ibeis.opendb_in_background(db='testdb1', web=True, browser=False)
>>> time.sleep(10)
>>> web_port = ibs.get_web_port_via_scan()
>>> if web_port is None:
>>> raise ValueError('IA web server is not running on any expected port')
>>> baseurl = 'http://127.0.1.1:%s' % (web_port, )
>>> data = dict(qaid_list=[1])
>>> resp = requests.get(baseurl + '/api/query/chip/simple/dict/', data=data)
>>> print(resp)
>>> web_instance.terminate()
>>> json_dict = resp.json()
>>> cmdict_list = json_dict['response']
>>> assert 'score_list' in cmdict_list[0]
"""
kwargs['return_cm_simple_dict'] = True
return ibs.query_chips(*args, **kwargs)
@register_ibs_method
@register_api('/api/query/chip/dict/', methods=['GET'])
def query_chips_dict(ibs, *args, **kwargs):
"""
Runs query_chips, but returns a json compatible dictionary
RESTful:
Method: GET
URL: /api/query/chip/dict/
"""
kwargs['return_cm_dict'] = True
return ibs.query_chips(*args, **kwargs)
@register_ibs_method
@register_api('/api/review/query/graph/', methods=['POST'])
def process_graph_match_html(ibs, **kwargs):
"""
RESTful:
Method: POST
URL: /api/review/query/graph/
"""
def sanitize(state):
state = state.strip().lower()
state = ''.join(state.split())
return state
import uuid
map_dict = {
'sameanimal' : const.EVIDENCE_DECISION.INT_TO_CODE[const.EVIDENCE_DECISION.POSITIVE],
'differentanimals' : const.EVIDENCE_DECISION.INT_TO_CODE[const.EVIDENCE_DECISION.NEGATIVE],
'cannottell' : const.EVIDENCE_DECISION.INT_TO_CODE[const.EVIDENCE_DECISION.INCOMPARABLE],
'unreviewed' : const.EVIDENCE_DECISION.INT_TO_CODE[const.EVIDENCE_DECISION.UNREVIEWED],
'unknown' : const.EVIDENCE_DECISION.INT_TO_CODE[const.EVIDENCE_DECISION.UNKNOWN],
'photobomb' : 'photobomb',
'scenerymatch' : 'scenerymatch',
'excludetop' : 'excludetop',
'excludebottom' : 'excludebottom',
}
annot_uuid_1 = uuid.UUID(request.form['identification-annot-uuid-1'])
annot_uuid_2 = uuid.UUID(request.form['identification-annot-uuid-2'])
state = request.form.get('identification-submit', '')
state = sanitize(state)
state = map_dict[state]
tag_list = []
if state in ['photobomb', 'scenerymatch']:
tag_list.append(state)
state = const.EVIDENCE_DECISION.NEGATIVE
assert state in map_dict.values(), 'matching state is unrecognized'
# Get checbox tags
checbox_tag_list = ['photobomb', 'scenerymatch']
for checbox_tag in checbox_tag_list:
checkbox_name = 'ia-%s-value' % (checbox_tag)
if checkbox_name in request.form:
tag_list.append(checbox_tag)
tag_list = sorted(set(tag_list))
confidence_default = const.CONFIDENCE.INT_TO_CODE[const.CONFIDENCE.UNKNOWN]
confidence = request.form.get('ia-turk-confidence', confidence_default)
if confidence not in const.CONFIDENCE.CODE_TO_INT.keys():
confidence = confidence_default
if len(tag_list) == 0:
tag_str = ''
else:
tag_str = ';'.join(tag_list)
user_times = {
'server_time_start' : request.form.get('server_time_start', None),
'client_time_start' : request.form.get('client_time_start', None),
'client_time_end' : request.form.get('client_time_end', None),
}
return (annot_uuid_1, annot_uuid_2, state, tag_str, 'web-api', confidence, user_times)
def ensure_review_image(ibs, aid, cm, qreq_, view_orientation='vertical',
draw_matches=True, verbose=False):
r""""
Create the review image for a pair of annotations
CommandLine:
python -m ibeis.web.apis_query ensure_review_image --show
Example:
>>> # SCRIPT
>>> from ibeis.web.apis_query import * # NOQA
>>> import ibeis
>>> cm, qreq_ = ibeis.testdata_cm('PZ_MTEST', a='default:dindex=0:10,qindex=0:1')
>>> ibs = qreq_.ibs
>>> aid = cm.get_top_aids()[0]
>>> tt = ut.tic('make image')
>>> image = ensure_review_image(ibs, aid, cm, qreq_)
>>> ut.toc(tt)
>>> ut.quit_if_noshow()
>>> print('image.shape = %r' % (image.shape,))
>>> print('image.dtype = %r' % (image.dtype,))
>>> ut.print_object_size(image)
>>> import plottool_ibeis as pt
>>> pt.imshow(image)
>>> ut.show_if_requested()
"""
from ibeis.gui import id_review_api
# Get thumb path
match_thumb_path = ibs.get_match_thumbdir()
match_thumb_filename = id_review_api.get_match_thumb_fname(cm, aid, qreq_,
view_orientation=view_orientation,
draw_matches=draw_matches)
match_thumb_filepath = join(match_thumb_path, match_thumb_filename)
if verbose:
print('Checking: %r' % (match_thumb_filepath, ))
if exists(match_thumb_filepath):
image = cv2.imread(match_thumb_filepath)
else:
render_config = {
'dpi' : 150,
'draw_fmatches' : draw_matches,
'vert' : view_orientation == 'vertical',
'show_aidstr' : False,
'show_name' : False,
'show_exemplar' : False,
'show_num_gt' : False,
'show_timedelta' : False,
'show_name_rank' : False,
'show_score' : False,
'show_annot_score' : False,
'show_name_score' : False,
'draw_lbl' : False,
'draw_border' : False,
}
if hasattr(qreq_, 'render_single_result'):
image = qreq_.render_single_result(cm, aid, **render_config)
else:
image = cm.render_single_annotmatch(qreq_, aid, **render_config)
#image = vt.crop_out_imgfill(image, fillval=(255, 255, 255), thresh=64)
cv2.imwrite(match_thumb_filepath, image)
return image
@register_api('/api/review/query/graph/alias/', methods=['POST'], __api_plural_check__=False)
def review_graph_match_html_alias(*args, **kwargs):
review_graph_match_html(*args, **kwargs)
@register_api('/api/review/query/graph/', methods=['GET'])
def review_graph_match_html(ibs, review_pair, cm_dict, query_config_dict,
_internal_state, callback_url,
callback_method='POST',
view_orientation='vertical', include_jquery=False):
r"""
Args:
ibs (ibeis.IBEISController): image analysis api
review_pair (dict): pair of annot uuids
cm_dict (dict):
query_config_dict (dict):
_internal_state (?):
callback_url (str):
callback_method (unicode): (default = u'POST')
view_orientation (unicode): (default = u'vertical')
include_jquery (bool): (default = False)
CommandLine:
python -m ibeis.web.apis_query review_graph_match_html --show
ibeis --web
python -m ibeis.web.apis_query review_graph_match_html --show --domain=localhost
Example:
>>> # xdoctest: +REQUIRES(--web)
>>> from ibeis.web.apis_query import * # NOQA
>>> import ibeis
>>> web_ibs = ibeis.opendb_bg_web('testdb1') # , domain='http://52.33.105.88')
>>> aids = web_ibs.send_ibeis_request('/api/annot/', 'get')[0:2]
>>> uuid_list = web_ibs.send_ibeis_request('/api/annot/uuid/', type_='get', aid_list=aids)
>>> quuid_list = uuid_list[0:1]
>>> duuid_list = uuid_list
>>> query_config_dict = {
>>> # 'pipeline_root' : 'BC_DTW'
>>> }
>>> data = dict(
>>> query_annot_uuid_list=quuid_list, database_annot_uuid_list=duuid_list,
>>> query_config_dict=query_config_dict,
>>> )
>>> jobid = web_ibs.send_ibeis_request('/api/engine/query/graph/', **data)
>>> print('jobid = %r' % (jobid,))
>>> status_response = web_ibs.wait_for_results(jobid)
>>> result_response = web_ibs.read_engine_results(jobid)
>>> inference_result = result_response['json_result']
>>> print('inference_result = %r' % (inference_result,))
>>> auuid2_cm = inference_result['cm_dict']
>>> quuid = quuid_list[0]
>>> class_dict = auuid2_cm[str(quuid)]
>>> # Get information in frontend
>>> #ibs = ibeis.opendb('testdb1')
>>> #cm = match_obj = ibeis.ChipMatch.from_dict(class_dict, ibs=ibs)
>>> #match_obj.print_rawinfostr()
>>> # Make the dictionary a bit more managable
>>> #match_obj.compress_top_feature_matches(num=2)
>>> #class_dict = match_obj.to_dict(ibs=ibs)
>>> cm_dict = class_dict
>>> # Package for review
>>> review_pair = {'annot_uuid_1': quuid, 'annot_uuid_2': duuid_list[1]}
>>> callback_method = u'POST'
>>> view_orientation = u'vertical'
>>> include_jquery = False
>>> kw = dict(
>>> review_pair=review_pair,
>>> cm_dict=cm_dict,
>>> query_config_dict=query_config_dict,
>>> _internal_state=None,
>>> callback_url = None,
>>> )
>>> html_str = web_ibs.send_ibeis_request('/api/review/query/graph/', type_='get', **kw)
>>> web_ibs.terminate2()
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> ut.render_html(html_str)
>>> ut.show_if_requested()
Example2:
>>> # DISABLE_DOCTEST
>>> # This starts off using web to get information, but finishes the rest in python
>>> from ibeis.web.apis_query import * # NOQA
>>> import ibeis
>>> ut.exec_funckw(review_graph_match_html, globals())
>>> web_ibs = ibeis.opendb_bg_web('testdb1') # , domain='http://52.33.105.88')
>>> aids = web_ibs.send_ibeis_request('/api/annot/', 'get')[0:2]
>>> uuid_list = web_ibs.send_ibeis_request('/api/annot/uuid/', type_='get', aid_list=aids)
>>> quuid_list = uuid_list[0:1]
>>> duuid_list = uuid_list
>>> query_config_dict = {
>>> # 'pipeline_root' : 'BC_DTW'
>>> }
>>> data = dict(
>>> query_annot_uuid_list=quuid_list, database_annot_uuid_list=duuid_list,
>>> query_config_dict=query_config_dict,
>>> )
>>> jobid = web_ibs.send_ibeis_request('/api/engine/query/graph/', **data)
>>> status_response = web_ibs.wait_for_results(jobid)
>>> result_response = web_ibs.read_engine_results(jobid)
>>> web_ibs.terminate2()
>>> # NOW WORK IN THE FRONTEND
>>> inference_result = result_response['json_result']
>>> auuid2_cm = inference_result['cm_dict']
>>> quuid = quuid_list[0]
>>> class_dict = auuid2_cm[str(quuid)]
>>> # Get information in frontend
>>> ibs = ibeis.opendb('testdb1')
>>> cm = ibeis.ChipMatch.from_dict(class_dict, ibs=ibs)
>>> cm.print_rawinfostr()
>>> # Make the dictionary a bit more managable
>>> cm.compress_top_feature_matches(num=1)
>>> cm.print_rawinfostr()
>>> class_dict = cm.to_dict(ibs=ibs)
>>> cm_dict = class_dict
>>> # Package for review ( CANT CALL DIRECTLY BECAUSE OF OUT OF CONTEXT )
>>> review_pair = {'annot_uuid_1': quuid, 'annot_uuid_2': duuid_list[1]}
>>> x = review_graph_match_html(ibs, review_pair, cm_dict,
>>> query_config_dict, _internal_state=None,
>>> callback_url=None)
>>> ut.quit_if_noshow()
>>> import plottool_ibeis as pt
>>> ut.render_html(html_str)
>>> ut.show_if_requested()
"""
from ibeis.algo.hots import chip_match
# from ibeis.algo.hots.query_request import QueryRequest
proot = query_config_dict.get('pipeline_root', 'vsmany')
proot = query_config_dict.get('proot', proot)
if proot.upper() in ('BC_DTW', 'OC_WDTW'):
cls = chip_match.AnnotMatch # ibs.depc_annot.requestclass_dict['BC_DTW']
else:
cls = chip_match.ChipMatch
view_orientation = view_orientation.lower()
if view_orientation not in ['vertical', 'horizontal']:
view_orientation = 'horizontal'
# unpack info
try:
annot_uuid_1 = review_pair['annot_uuid_1']
annot_uuid_2 = review_pair['annot_uuid_2']
except Exception:
#??? HACK
# FIXME:
print('[!!!!] review_pair = %r' % (review_pair,))
review_pair = review_pair[0]
annot_uuid_1 = review_pair['annot_uuid_1']
annot_uuid_2 = review_pair['annot_uuid_2']
ibs.web_check_uuids(qannot_uuid_list=[annot_uuid_1],
dannot_uuid_list=[annot_uuid_2])
aid_1 = ibs.get_annot_aids_from_uuid(annot_uuid_1)
aid_2 = ibs.get_annot_aids_from_uuid(annot_uuid_2)
cm = cls.from_dict(cm_dict, ibs=ibs)
qreq_ = ibs.new_query_request([aid_1], [aid_2],
cfgdict=query_config_dict)
# Get score
idx = cm.daid2_idx[aid_2]
match_score = cm.name_score_list[idx]
#match_score = cm.aid2_score[aid_2]
try:
image_matches = ensure_review_image(ibs, aid_2, cm, qreq_,
view_orientation=view_orientation)
except KeyError:
image_matches = np.zeros((100, 100, 3), dtype=np.uint8)
traceback.print_exc()
try:
image_clean = ensure_review_image(ibs, aid_2, cm, qreq_,
view_orientation=view_orientation,
draw_matches=False)
except KeyError:
image_clean = np.zeros((100, 100, 3), dtype=np.uint8)
traceback.print_exc()
image_matches_src = appf.embed_image_html(image_matches)
image_clean_src = appf.embed_image_html(image_clean)
confidence_dict = const.CONFIDENCE.NICE_TO_CODE
confidence_nice_list = confidence_dict.keys()
confidence_text_list = confidence_dict.values()
confidence_selected_list = [
confidence_text == 'unspecified'
for confidence_text in confidence_text_list
]
confidence_list = list(zip(confidence_nice_list, confidence_text_list, confidence_selected_list))
if False:
from ibeis.web import apis_query
root_path = dirname(abspath(apis_query.__file__))
else:
root_path = dirname(abspath(__file__))
css_file_list = [
['css', 'style.css'],
['include', 'bootstrap', 'css', 'bootstrap.css'],
]
json_file_list = [
['javascript', 'script.js'],
['include', 'bootstrap', 'js', 'bootstrap.js'],
]
if include_jquery:
json_file_list = [
['javascript', 'jquery.min.js'],
] + json_file_list
EMBEDDED_CSS = ''
EMBEDDED_JAVASCRIPT = ''
css_template_fmtstr = '<style type="text/css" ia-dependency="css">%s</style>\n'
json_template_fmtstr = '<script type="text/javascript" ia-dependency="javascript">%s</script>\n'
for css_file in css_file_list:
css_filepath_list = [root_path, 'static'] + css_file
with open(join(*css_filepath_list)) as css_file:
EMBEDDED_CSS += css_template_fmtstr % (css_file.read(), )
for json_file in json_file_list:
json_filepath_list = [root_path, 'static'] + json_file
with open(join(*json_filepath_list)) as json_file:
EMBEDDED_JAVASCRIPT += json_template_fmtstr % (json_file.read(), )
annot_uuid_1 = str(annot_uuid_1)
annot_uuid_2 = str(annot_uuid_2)
embedded = dict(globals(), **locals())
return appf.template('turk', 'identification_insert', **embedded)
@register_route('/test/review/query/chip/', methods=['GET'])
def review_query_chips_test(**kwargs):
"""
CommandLine:
python -m ibeis.web.apis_query review_query_chips_test --show
Example:
>>> # SCRIPT
>>> import ibeis
>>> web_ibs = ibeis.opendb_bg_web(
>>> browser=True, url_suffix='/test/review/query/chip/?__format__=true')
"""
ibs = current_app.ibs
# the old block curvature dtw
if 'use_bc_dtw' in request.args:
query_config_dict = {
'pipeline_root' : 'BC_DTW'
}
# the new oriented curvature dtw
elif 'use_oc_wdtw' in request.args:
query_config_dict = {
'pipeline_root' : 'OC_WDTW'
}
else:
query_config_dict = {}
result_dict = ibs.query_chips_test(query_config_dict=query_config_dict)
review_pair = result_dict['inference_dict']['annot_pair_dict']['review_pair_list'][0]
annot_uuid_key = str(review_pair['annot_uuid_key'])
cm_dict = result_dict['cm_dict'][annot_uuid_key]
query_config_dict = result_dict['query_config_dict']
_internal_state = result_dict['inference_dict']['_internal_state']
callback_url = request.args.get('callback_url', url_for('process_graph_match_html'))
callback_method = request.args.get('callback_method', 'POST')
# view_orientation = request.args.get('view_orientation', 'vertical')
view_orientation = request.args.get('view_orientation', 'horizontal')
template_html = review_graph_match_html(ibs, review_pair, cm_dict,
query_config_dict, _internal_state,
callback_url, callback_method,
view_orientation,
include_jquery=True)
template_html = '''
<script src="http://code.jquery.com/jquery-2.2.1.min.js" ia-dependency="javascript"></script>
%s
''' % (template_html, )
return template_html
return 'done'
@register_ibs_method
@register_api('/test/query/chip/', methods=['GET'])
def query_chips_test(ibs, **kwargs):
"""
CommandLine:
python -m ibeis.web.apis_query query_chips_test
Example:
>>> # SLOW_DOCTEST
>>> # xdoctest: +SKIP
>>> from ibeis.control.IBEISControl import * # NOQA
>>> import ibeis
>>> qreq_ = ibeis.testdata_qreq_(defaultdb='testdb1')
>>> ibs = qreq_.ibs
>>> result_dict = ibs.query_chips_test()
>>> print(result_dict)
"""
from random import shuffle # NOQA
# Compile test data
aid_list = ibs.get_valid_aids()
# shuffle(aid_list)
qaid_list = aid_list[:1]
daid_list = aid_list[-4:]
result_dict = ibs.query_chips_graph(qaid_list, daid_list, **kwargs)
return result_dict
@register_ibs_method
@register_api('/api/query/graph/', methods=['GET', 'POST'])
def query_chips_graph(ibs, qaid_list, daid_list, user_feedback=None,
query_config_dict={}, echo_query_params=True):
from ibeis.unstable.orig_graph_iden import OrigAnnotInference
import theano # NOQA
import uuid
def convert_to_uuid(nid):
try:
text = ibs.get_name_texts(nid)
uuid_ = uuid.UUID(text)
except ValueError:
uuid_ = nid
return uuid_
cm_list, qreq_ = ibs.query_chips(qaid_list=qaid_list, daid_list=daid_list,
cfgdict=query_config_dict, return_request=True)
cm_dict = {
str(ibs.get_annot_uuids(cm.qaid)): {
# 'qaid' : cm.qaid,
'qannot_uuid' : ibs.get_annot_uuids(cm.qaid),
# 'qnid' : cm.qnid,
'qname_uuid' : convert_to_uuid(cm.qnid),
'qname' : ibs.get_name_texts(cm.qnid),
# 'daid_list' : cm.daid_list,
'dannot_uuid_list' : ibs.get_annot_uuids(cm.daid_list),
# 'dnid_list' : cm.dnid_list,
'dname_uuid_list' : [convert_to_uuid(nid) for nid in cm.dnid_list],
# FIXME: use qreq_ state not ibeis state
'dname_list' : ibs.get_name_texts(cm.dnid_list),
'score_list' : cm.score_list,
'annot_score_list' : cm.annot_score_list,
'fm_list' : cm.fm_list if hasattr(cm, 'fm_list') else None,
'fsv_list' : cm.fsv_list if hasattr(cm, 'fsv_list') else None,
# Non-corresponding lists to above
# 'unique_nids' : cm.unique_nids,
'unique_name_uuid_list' : [convert_to_uuid(nid) for nid in cm.unique_nids],
# FIXME: use qreq_ state not ibeis state
'unique_name_list' : ibs.get_name_texts(cm.unique_nids),
'name_score_list' : cm.name_score_list,
# Placeholders for the reinitialization of the ChipMatch object
'fk_list' : None,
'H_list' : None,
'fsv_col_lbls' : None,
'filtnorm_aids' : None,
'filtnorm_fxs' : None,
}
for cm in cm_list
}
annot_inference = OrigAnnotInference(qreq_, cm_list, user_feedback)
inference_dict = annot_inference.make_annot_inference_dict()
result_dict = {
'cm_dict' : cm_dict,
'inference_dict' : inference_dict,
}
if echo_query_params:
result_dict['query_annot_uuid_list'] = ibs.get_annot_uuids(qaid_list)
result_dict['database_annot_uuid_list'] = ibs.get_annot_uuids(daid_list)
result_dict['query_config_dict'] = query_config_dict
return result_dict
@register_ibs_method
@register_api('/api/query/chip/', methods=['GET'])
def query_chips(ibs, qaid_list=None, daid_list=None, cfgdict=None,
use_cache=None, use_bigcache=None, qreq_=None,
return_request=False, verbose=pipeline.VERB_PIPELINE,
save_qcache=None, prog_hook=None, return_cm_dict=False,
return_cm_simple_dict=False):
r"""
Submits a query request to the hotspotter recognition pipeline. Returns
a list of QueryResult objects.
Args:
qaid_list (list): a list of annotation ids to be submitted as
queries
daid_list (list): a list of annotation ids used as the database
that will be searched
cfgdict (dict): dictionary of configuration options used to create
a new QueryRequest if not already specified
use_cache (bool): turns on/off chip match cache (default: True)
use_bigcache (bool): turns one/off chunked chip match cache (default:
True)
qreq_ (QueryRequest): optional, a QueryRequest object that
overrides all previous settings
return_request (bool): returns the request which will be created if
one is not already specified
verbose (bool): default=False, turns on verbose printing
Returns:
list: a list of ChipMatch objects containing the matching
annotations, scores, and feature matches
Returns(2):
tuple: (cm_list, qreq_) - a list of query results and optionally the
QueryRequest object used
RESTful:
Method: PUT
URL: /api/query/chip/
CommandLine:
python -m ibeis.web.apis_query --test-query_chips
# Test speed of single query
python -m ibeis --tf IBEISController.query_chips --db PZ_Master1 \
-a default:qindex=0:1,dindex=0:500 --nocache-hs
python -m ibeis --tf IBEISController.query_chips --db PZ_Master1 \
-a default:qindex=0:1,dindex=0:3000 --nocache-hs
python -m ibeis.web.apis_query --test-query_chips:1 --show
python -m ibeis.web.apis_query --test-query_chips:2 --show
Example:
>>> # SLOW_DOCTEST
>>> # xdoctest: +SKIP
>>> from ibeis.control.IBEISControl import * # NOQA
>>> import ibeis
>>> qreq_ = ibeis.testdata_qreq_()
>>> ibs = qreq_.ibs
>>> cm_list = qreq_.execute()
>>> cm = cm_list[0]
>>> ut.quit_if_noshow()
>>> cm.ishow_analysis(qreq_)
>>> ut.show_if_requested()
Example:
>>> # SLOW_DOCTEST
>>> # xdoctest: +SKIP
>>> import ibeis
>>> from ibeis.control.IBEISControl import * # NOQA
>>> qaid_list = [1]
>>> daid_list = [1, 2, 3, 4, 5]
>>> ibs = ibeis.opendb_test(db='testdb1')
>>> qreq_ = ibs.new_query_request(qaid_list, daid_list)
>>> cm = ibs.query_chips(qaid_list, daid_list, use_cache=False, qreq_=qreq_)[0]
>>> ut.quit_if_noshow()
>>> cm.ishow_analysis(qreq_)
>>> ut.show_if_requested()
Example1:
>>> # SLOW_DOCTEST
>>> # xdoctest: +SKIP
>>> import ibeis
>>> from ibeis.control.IBEISControl import * # NOQA
>>> qaid_list = [1]
>>> daid_list = [1, 2, 3, 4, 5]
>>> ibs = ibeis.opendb_test(db='testdb1')
>>> cfgdict = {'pipeline_root':'BC_DTW'}
>>> qreq_ = ibs.new_query_request(qaid_list, daid_list, cfgdict=cfgdict, verbose=True)
>>> cm = ibs.query_chips(qreq_=qreq_)[0]
>>> ut.quit_if_noshow()
>>> cm.ishow_analysis(qreq_)
>>> ut.show_if_requested()
"""
# The qaid and daid objects are allowed to be None if qreq_ is
# specified
if qreq_ is None:
assert qaid_list is not None, 'do not specify qaids and qreq'
assert daid_list is not None, 'do not specify daids and qreq'
qaid_list, was_scalar = ut.wrap_iterable(qaid_list)
if daid_list is None:
daid_list = ibs.get_valid_aids()
qreq_ = ibs.new_query_request(qaid_list, daid_list,
cfgdict=cfgdict, verbose=verbose)
else:
assert qaid_list is None, 'do not specify qreq and qaids'
assert daid_list is None, 'do not specify qreq and daids'
was_scalar = False
cm_list = qreq_.execute()
assert isinstance(cm_list, list), (
'Chip matches were not returned as a list')
# Convert to cm_list
if return_cm_simple_dict:
for cm in cm_list:
cm.qauuid = ibs.get_annot_uuids(cm.qaid)
cm.dauuid_list = ibs.get_annot_uuids(cm.daid_list)
keys = ['qaid', 'daid_list', 'score_list', 'qauuid', 'dauuid_list']
cm_list = [ut.dict_subset(cm.to_dict(), keys) for cm in cm_list]
elif return_cm_dict:
cm_list = [cm.to_dict() for cm in cm_list]
if was_scalar:
# hack for scalar input
assert len(cm_list) == 1
cm_list = cm_list[0]
if return_request:
return cm_list, qreq_
else:
return cm_list
##########################################################################################
@register_ibs_method
def get_graph_client_query_chips_graph_v2(ibs, graph_uuid):
graph_client = current_app.GRAPH_CLIENT_DICT.get(graph_uuid, None)
# We could be redirecting to a newer graph_client
graph_uuid_chain = [graph_uuid]
while isinstance(graph_client, six.string_types):
graph_uuid_chain.append(graph_client)
graph_client = current_app.GRAPH_CLIENT_DICT.get(graph_client, None)
if graph_client is None:
raise controller_inject.WebUnknownUUIDException(['graph_uuid'], [graph_uuid])
return graph_client, graph_uuid_chain
def ensure_review_image_v2(ibs, match, draw_matches=False, draw_heatmask=False,
view_orientation='vertical', overlay=True):
import plottool_ibeis as pt
render_config = {
'overlay' : overlay,
'show_ell' : draw_matches,
'show_lines' : draw_matches,
'show_ori' : False,
'heatmask' : draw_heatmask,
'vert' : view_orientation == 'vertical',
}
with pt.RenderingContext(dpi=150) as ctx:
match.show(**render_config)
image = ctx.image
return image
def query_graph_v2_callback(graph_client, callback_type):
from ibeis.web.graph_server import ut_to_json_encode
assert callback_type in ['review', 'finished']
callback_tuple = graph_client.callbacks.get(callback_type, None)
if callback_tuple is not None:
callback_url, callback_method = callback_tuple
if callback_url is not None:
callback_method = callback_method.lower()
data_dict = ut_to_json_encode({
'graph_uuid': graph_client.graph_uuid,
})
if callback_method == 'post':
requests.post(callback_url, data=data_dict)
elif callback_method == 'get':
requests.get(callback_url, params=data_dict)
elif callback_method == 'put':
requests.put(callback_url, data=data_dict)
elif callback_method == 'delete':
requests.delete(callback_url, data=data_dict)
else:
raise KeyError('Unsupported HTTP callback method')
@register_ibs_method
@register_api('/api/query/graph/v2/', methods=['POST'])
def query_chips_graph_v2(ibs, annot_uuid_list=None,
query_config_dict={},
review_callback_url=None,
review_callback_method='POST',
finished_callback_url=None,
finished_callback_method='POST',
creation_imageset_rowid_list=None,
**kwargs):
"""
CommandLine:
python -m ibeis.web.apis_query --test-query_chips_graph_v2:0
python -m ibeis reset_mtest_graph
python -m ibeis --db PZ_MTEST --web --browser --url=/turk/identification/hardcase/
python -m ibeis --db PZ_MTEST --web --browser --url=/turk/identification/graph/
Example:
>>> # xdoctest: +REQUIRES(--web)
>>> from ibeis.web.apis_query import *
>>> import ibeis
>>> # Open local instance
>>> ibs = ibeis.opendb('PZ_MTEST')
>>> uuid_list = ibs.annots().uuids[0:10]
>>> # Start up the web instance
>>> web_ibs = ibeis.opendb_bg_web(db='PZ_MTEST', web=True, browser=False)
>>> data = dict(annot_uuid_list=uuid_list)
>>> resp = web_ibs.send_ibeis_request('/api/query/graph/v2/', **data)
>>> print('resp = %r' % (resp,))
>>> #cmdict_list = json_dict['response']
>>> #assert 'score_list' in cmdict_list[0]
Example:
>>> # DEBUG_SCRIPT
>>> from ibeis.web.apis_query import *
>>> # Hack a flask context
>>> current_app = ut.DynStruct()
>>> current_app.GRAPH_CLIENT_DICT = {}
>>> old = query_chips_graph_v2.__globals__.get('current_app', None)
>>> query_chips_graph_v2.__globals__['current_app'] = current_app
>>> import ibeis
>>> ibs = ibeis.opendb('PZ_MTEST')
>>> #ut.exec_funckw(query_chips_graph_v2, globals())
>>> # Run function in main process
>>> query_chips_graph_v2(ibs)
>>> # Reset context
>>> query_chips_graph_v2.__globals__['current_app'] = old
"""
from ibeis.web.graph_server import GraphClient
print('[apis_query] Creating GraphClient')
if annot_uuid_list is None:
annot_uuid_list = ibs.get_annot_uuids(ibs.get_valid_aids())
ibs.web_check_uuids([], annot_uuid_list, [])
aid_list = ibs.get_annot_aids_from_uuid(annot_uuid_list)
# FILTER FOR GGR2
if True:
aid_list = ibs.check_ggr_valid_aids(aid_list, **kwargs)
graph_uuid = ut.hashable_to_uuid(sorted(aid_list))
if graph_uuid not in current_app.GRAPH_CLIENT_DICT:
for graph_uuid_ in current_app.GRAPH_CLIENT_DICT:
graph_client_ = current_app.GRAPH_CLIENT_DICT[graph_uuid_]
aid_list_ = graph_client_.aids
assert aid_list_ is not None
overlap_aid_set = set(aid_list_) & set(aid_list)
if len(overlap_aid_set) > 0:
overlap_aid_list = list(overlap_aid_set)
overlap_annot_uuid_list = ibs.get_annot_uuids(overlap_aid_list)
raise controller_inject.WebUnavailableUUIDException(
overlap_annot_uuid_list, graph_uuid_)
callback_dict = {
'review' : (review_callback_url, review_callback_method),
'finished' : (finished_callback_url, finished_callback_method),
}
graph_client = GraphClient(graph_uuid, callbacks=callback_dict,
autoinit=True)
if creation_imageset_rowid_list is not None:
graph_client.imagesets = creation_imageset_rowid_list
graph_client.aids = aid_list
config = {
'manual.n_peek' : GRAPH_CLIENT_PEEK,
'manual.autosave' : True,
'redun.pos' : 2,
'redun.neg' : 2,
'algo.quickstart' : False
}
config.update(query_config_dict)
print('[apis_query] graph_client.config = {}'.format(ut.repr3(config)))
graph_client.config = config
# Ensure no race-conditions
current_app.GRAPH_CLIENT_DICT[graph_uuid] = graph_client
# Start (create the Graph Inference object)
payload = {
'action' : 'start',
'dbdir' : ibs.dbdir,
'aids' : graph_client.aids,
'config' : graph_client.config,
}
future = graph_client.post(payload)
future.result() # Guarantee that this has happened before calling refresh
f2 = graph_client.post({'action' : 'latest_logs'})
f2.graph_client = graph_client
f2.add_done_callback(query_graph_v2_latest_logs)
# Start (create the Graph Inference object)
payload = {
'action' : 'get_feat_extractor',
}
future = graph_client.post(payload)
graph_client.extr = future.result()
# Start main loop
future = graph_client.post({'action' : 'continue_review'})
future.graph_client = graph_client
future.add_done_callback(query_graph_v2_on_request_review)
f2 = graph_client.post({'action' : 'latest_logs'})
f2.graph_client = graph_client
f2.add_done_callback(query_graph_v2_latest_logs)
return graph_uuid
@register_ibs_method
def review_graph_match_config_v2(ibs, graph_uuid, aid1=None, aid2=None,
view_orientation='vertical', view_version=1):
from ibeis.algo.verif import pairfeat
from flask import session
EDGES_KEY = '_EDGES_'
EDGES_MAX = 10
user_id = controller_inject.get_user()
graph_client, _ = ibs.get_graph_client_query_chips_graph_v2(graph_uuid)
if aid1 is not None and aid2 is not None:
previous_edge_list = None
if aid1 > aid2:
aid1, aid2 = aid2, aid1
edge = (aid1, aid2)
data = graph_client.check(edge)
if data is None:
data = (
edge,
np.nan,
{},
)
else:
if EDGES_KEY not in session:
session[EDGES_KEY] = []
previous_edge_list = session[EDGES_KEY]
print('Using previous_edge_list\n\tUser: %s\n\tList: %r' % (user_id, previous_edge_list, ))
data = graph_client.sample(previous_edge_list=previous_edge_list, max_previous_edges=EDGES_MAX)
if data is None:
raise controller_inject.WebReviewNotReadyException(graph_uuid)
edge, priority, data_dict = data
edge_ = [
int(edge[0]),
int(edge[1]),
]
if previous_edge_list is not None:
previous_edge_list.append(edge_)
if len(previous_edge_list) > EDGES_MAX:
cutoff = int(-1.0 * EDGES_MAX)
previous_edge_list = previous_edge_list[cutoff:]
session[EDGES_KEY] = previous_edge_list
print('Updating previous_edge_list\n\tUser: %s\n\tList: %r' % (user_id, previous_edge_list, ))
args = (edge, priority, )
print('Sampled edge %r with priority %0.02f' % args)
print('Data: ' + ut.repr4(data_dict))
aid_1, aid_2 = edge
annot_uuid_1 = str(ibs.get_annot_uuids(aid_1))
annot_uuid_2 = str(ibs.get_annot_uuids(aid_2))
feat_extract_config = {
'match_config': ({} if graph_client.extr is None else
graph_client.extr.match_config)
}
extr = pairfeat.PairwiseFeatureExtractor(ibs, config=feat_extract_config)
match = extr._exec_pairwise_match([edge])[0]
image_clean = ensure_review_image_v2(ibs, match,
view_orientation=view_orientation,
overlay=False)
# image_matches = ensure_review_image_v2(ibs, match, draw_matches=True,
# view_orientation=view_orientation)
print('Using View Version: %r' % (view_version, ))
if view_version == 1:
image_heatmask = ensure_review_image_v2(ibs, match, draw_heatmask=True,
view_orientation=view_orientation)
else:
image_heatmask = ensure_review_image_v2(ibs, match, draw_matches=True,
view_orientation=view_orientation)
image_clean_src = appf.embed_image_html(image_clean)
# image_matches_src = appf.embed_image_html(image_matches)
image_heatmask_src = appf.embed_image_html(image_heatmask)
now = datetime.utcnow()
server_time_start = float(now.strftime("%s.%f"))
return (edge, priority, data_dict, aid_1, aid_2, annot_uuid_1, annot_uuid_2,
image_clean_src, image_heatmask_src, image_heatmask_src,
server_time_start)
@register_api('/api/review/query/graph/v2/', methods=['GET'])
def review_graph_match_html_v2(ibs, graph_uuid, callback_url=None,
callback_method='POST',
view_orientation='vertical',
view_version=1,
include_jquery=False):
values = ibs.review_graph_match_config_v2(graph_uuid,
view_orientation=view_orientation,
view_version=view_version)
(edge, priority, data_dict, aid1, aid2, annot_uuid_1, annot_uuid_2,
image_clean_src, image_matches_src, image_heatmask_src,
server_time_start) = values
confidence_dict = const.CONFIDENCE.NICE_TO_CODE
confidence_nice_list = confidence_dict.keys()
confidence_text_list = confidence_dict.values()
confidence_selected_list = [
confidence_text == 'unspecified'
for confidence_text in confidence_text_list
]
confidence_list = list(zip(confidence_nice_list, confidence_text_list, confidence_selected_list))
if False:
from ibeis.web import apis_query
root_path = dirname(abspath(apis_query.__file__))
else:
root_path = dirname(abspath(__file__))
css_file_list = [
['css', 'style.css'],
['include', 'bootstrap', 'css', 'bootstrap.css'],
]
json_file_list = [
['javascript', 'script.js'],
['include', 'bootstrap', 'js', 'bootstrap.js'],
]
if include_jquery:
json_file_list = [
['javascript', 'jquery.min.js'],
] + json_file_list
EMBEDDED_CSS = ''
EMBEDDED_JAVASCRIPT = ''
css_template_fmtstr = '<style type="text/css" ia-dependency="css">%s</style>\n'
json_template_fmtstr = '<script type="text/javascript" ia-dependency="javascript">%s</script>\n'
for css_file in css_file_list:
css_filepath_list = [root_path, 'static'] + css_file
with open(join(*css_filepath_list)) as css_file:
EMBEDDED_CSS += css_template_fmtstr % (css_file.read(), )
for json_file in json_file_list:
json_filepath_list = [root_path, 'static'] + json_file
with open(join(*json_filepath_list)) as json_file:
EMBEDDED_JAVASCRIPT += json_template_fmtstr % (json_file.read(), )
embedded = dict(globals(), **locals())
return appf.template('turk', 'identification_insert', **embedded)
@register_api('/api/status/query/graph/v2/', methods=['GET'], __api_plural_check__=False)
def view_graphs_status(ibs):
graph_dict = {}
for graph_uuid in current_app.GRAPH_CLIENT_DICT:
graph_client = current_app.GRAPH_CLIENT_DICT.get(graph_uuid, None)
if graph_client is None:
continue
graph_status, graph_exception = graph_client.refresh_status()
if graph_client.review_dict is None:
num_edges = None
else:
edge_list = list(graph_client.review_dict.keys())
num_edges = len(edge_list)
graph_uuid = str(graph_uuid)
graph_dict[graph_uuid] = {
'status': graph_status,
'num_aids': len(graph_client.aids),
'num_reviews': num_edges,
}
return graph_dict
@register_ibs_method
@register_api('/api/review/query/graph/v2/', methods=['POST'])
def process_graph_match_html_v2(ibs, graph_uuid, **kwargs):
graph_client, _ = ibs.get_graph_client_query_chips_graph_v2(graph_uuid)
response_tuple = process_graph_match_html(ibs, **kwargs)
annot_uuid_1, annot_uuid_2, decision, tags, user_id, confidence, user_times = response_tuple
aid1 = ibs.get_annot_aids_from_uuid(annot_uuid_1)
aid2 = ibs.get_annot_aids_from_uuid(annot_uuid_2)
edge = (aid1, aid2, )
user_id = controller_inject.get_user()
now = datetime.utcnow()
if decision in ['excludetop', 'excludebottom']:
aid = aid1 if decision == 'excludetop' else aid2
metadata_dict = ibs.get_annot_metadata(aid)
assert 'excluded' not in metadata_dict
metadata_dict['excluded'] = True
ibs.set_annot_metadata([aid], [metadata_dict])
payload = {
'action' : 'remove_annots',
'aids' : [aid],
}
else:
payload = {
'action' : 'add_feedback',
'edge' : edge,
'evidence_decision' : decision,
# TODO: meta_decision should come from the html resp. When generating
# the html page, the default value should be its previous value. If the
# user changes it to be something incompatible them perhaps just reset
# it to null.
'meta_decision' : 'null',
'tags' : [] if len(tags) == 0 else tags.split(';'),
'user_id' : 'user:web:%s' % (user_id, ),
'confidence' : confidence,
'timestamp_s1' : user_times['server_time_start'],
'timestamp_c1' : user_times['client_time_start'],
'timestamp_c2' : user_times['client_time_end'],
'timestamp' : float(now.strftime("%s.%f"))
}
print('POSTING GRAPH CLIENT REVIEW:')
print(ut.repr4(payload))
graph_client.post(payload)
# Clean any old continue_reviews
graph_client.cleanup()
# Continue review
future = graph_client.post({'action' : 'continue_review'})
future.graph_client = graph_client
future.add_done_callback(query_graph_v2_on_request_review)
f2 = graph_client.post({'action' : 'latest_logs'})
f2.graph_client = graph_client
f2.add_done_callback(query_graph_v2_latest_logs)
return (annot_uuid_1, annot_uuid_2, )
@register_ibs_method
@register_api('/api/query/graph/v2/', methods=['GET'])
def sync_query_chips_graph_v2(ibs, graph_uuid):
import ibeis
graph_client, _ = ibs.get_graph_client_query_chips_graph_v2(graph_uuid)
# Create the AnnotInference
infr = ibeis.AnnotInference(ibs=ibs, aids=graph_client.aids, autoinit=True)
for key in graph_client.config:
infr.params[key] = graph_client.config[key]
infr.reset_feedback('staging', apply=True)
infr.relabel_using_reviews(rectify=True)
edge_delta_df = infr.match_state_delta(old='annotmatch', new='all')
name_delta_df = infr.get_ibeis_name_delta()
############################################################################
col_list = list(edge_delta_df.columns)
match_aid_edge_list = list(edge_delta_df.index)
match_aid1_list = ut.take_column(match_aid_edge_list, 0)
match_aid2_list = ut.take_column(match_aid_edge_list, 1)
match_annot_uuid1_list = ibs.get_annot_uuids(match_aid1_list)
match_annot_uuid2_list = ibs.get_annot_uuids(match_aid2_list)
match_annot_uuid_edge_list = list(zip(match_annot_uuid1_list, match_annot_uuid2_list))
zipped = list(zip(*( list(edge_delta_df[col]) for col in col_list )))
match_list = []
for match_annot_uuid_edge, zipped_ in list(zip(match_annot_uuid_edge_list, zipped)):
match_dict = {
'edge': match_annot_uuid_edge,
}
for index, col in enumerate(col_list):
match_dict[col] = zipped_[index]
match_list.append(match_dict)
############################################################################
col_list = list(name_delta_df.columns)
name_aid_list = list(name_delta_df.index)
name_annot_uuid_list = ibs.get_annot_uuids(name_aid_list)
old_name_list = list(name_delta_df['old_name'])
new_name_list = list(name_delta_df['new_name'])
zipped = list(zip(name_annot_uuid_list, old_name_list, new_name_list))
name_dict = {
str(name_annot_uuid): {
'old': old_name,
'new': new_name,
}
for name_annot_uuid, old_name, new_name in zipped
}
############################################################################
ret_dict = {
'match_list' : match_list,
'name_dict' : name_dict,
}
infr.write_ibeis_staging_feedback()
infr.write_ibeis_annotmatch_feedback(edge_delta_df)
infr.write_ibeis_name_assignment(name_delta_df)
edge_delta_df.reset_index()
return ret_dict
@register_ibs_method
@register_api('/api/query/graph/v2/', methods=['PUT'])
def add_annots_query_chips_graph_v2(ibs, graph_uuid, annot_uuid_list):
graph_client, _ = ibs.get_graph_client_query_chips_graph_v2(graph_uuid)
ibs.web_check_uuids([], annot_uuid_list, [])
aid_list = ibs.get_annot_aids_from_uuid(annot_uuid_list)
for graph_uuid_ in current_app.GRAPH_CLIENT_DICT:
graph_client_ = current_app.GRAPH_CLIENT_DICT[graph_uuid_]
aid_list_ = graph_client_.aids
assert aid_list_ is not None
overlap_aid_set = set(aid_list_) & set(aid_list)
if len(overlap_aid_set) > 0:
overlap_aid_list = list(overlap_aid_set)
overlap_annot_uuid_list = ibs.get_annot_uuids(overlap_aid_list)
raise controller_inject.WebUnavailableUUIDException(
overlap_annot_uuid_list, graph_uuid_)
aid_list_ = graph_client.aids + aid_list
graph_uuid_ = ut.hashable_to_uuid(sorted(aid_list_))
assert graph_uuid_ not in current_app.GRAPH_CLIENT_DICT
graph_client.graph_uuid = graph_uuid_
payload = {
'action' : 'add_annots',
'dbdir' : ibs.dbdir,
'aids' : aid_list,
}
future = graph_client.post(payload)
future.result() # Guarantee that this has happened before calling refresh
# Start main loop
future = graph_client.post({'action' : 'continue_review'})
future.graph_client = graph_client
future.add_done_callback(query_graph_v2_on_request_review)
current_app.GRAPH_CLIENT_DICT[graph_uuid_] = graph_client
current_app.GRAPH_CLIENT_DICT[graph_uuid] = graph_uuid_
return graph_uuid_
@register_ibs_method
def remove_annots_query_chips_graph_v2(ibs, graph_uuid, annot_uuid_list):
graph_client, _ = ibs.get_graph_client_query_chips_graph_v2(graph_uuid)
ibs.web_check_uuids([], annot_uuid_list, [])
aid_list = ibs.get_annot_aids_from_uuid(annot_uuid_list)
aid_list_ = list(set(graph_client.aids) - set(aid_list))
graph_uuid_ = ut.hashable_to_uuid(sorted(aid_list_))
assert graph_uuid_ not in current_app.GRAPH_CLIENT_DICT
graph_client.graph_uuid = graph_uuid_
payload = {
'action' : 'remove_annots',
'dbdir' : ibs.dbdir,
'aids' : aid_list,
}
future = graph_client.post(payload)
future.result() # Guarantee that this has happened before calling refresh
# Start main loop
future = graph_client.post({'action' : 'continue_review'})
future.graph_client = graph_client
future.add_done_callback(query_graph_v2_on_request_review)
current_app.GRAPH_CLIENT_DICT[graph_uuid_] = graph_client
current_app.GRAPH_CLIENT_DICT[graph_uuid] = graph_uuid_
return graph_uuid_
@register_ibs_method
@register_api('/api/query/graph/v2/', methods=['DELETE'])
def delete_query_chips_graph_v2(ibs, graph_uuid):
values = ibs.get_graph_client_query_chips_graph_v2(graph_uuid)
graph_client, graph_uuid_chain = values
del graph_client
for graph_uuid_ in graph_uuid_chain:
if graph_uuid_ in current_app.GRAPH_CLIENT_DICT:
current_app.GRAPH_CLIENT_DICT[graph_uuid_] = None
current_app.GRAPH_CLIENT_DICT.pop(graph_uuid_)
return True
def query_graph_v2_latest_logs(future):
if not future.cancelled():
logs = future.result()
print('--- <LOG DUMP> ---')
for msg, color in logs:
ut.cprint('[web.infr] ' + msg, color)
print('--- <\LOG DUMP> ---')
def query_graph_v2_on_request_review(future):
if not future.cancelled():
graph_client = future.graph_client
data_list = future.result()
if data_list is not None:
graph_client.update(data_list)
callback_type = 'review'
else:
graph_client.update(None)
callback_type = 'finished'
query_graph_v2_callback(graph_client, callback_type)
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.web.app
python -m ibeis.web.app --allexamples
python -m ibeis.web.app --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
|
"""Author: Cole Howard
Email: [email protected]
network.py is a basic implementation of a one layer linear neural network, to
examine an implementation of backpropagation. It is based on the basic model
of the Perceptron. Information on that can be found at:
https://en.wikipedia.org/wiki/Perceptron
The intent of this specific project is to alter the Perceptron's
decision function to a logistic function and add a "backpropagation" step
at the end of each vector's pass through the neuron.
There are several methods included that are currently passed, as they are
plans to make it extensible as possible as a tool for exploring neural nets
under more general circumstances.
Dependencies:
numpy.dot() : for a fast implementation of the dot product of two vectors
sklearn.datasets : (optional) for running this as a script on the
scikit-learn digits dataset
neuron : the class definition of an individual neuron, also included in
mini_net
Usage:
It is currently set up to run a training set of input (along with the
associated answers) and a set of similar, but distinct, input (without)
the answers, and have the machine guess an answer to each of those cases
based on information it gathered during the training run.
To import the network for testing on other data:
download the package mini_net,
then include in your script:
from network import Network
To execute as is, from the command line, while in the linear_neuron/mini_net/
directory, input:
$ python3 network.py
This will pull the learning and test data from scikit-learn run both and
return a success count, total count of the unseen test data, and the
success rate that equals.
First output and success ratio will be based on the first set of testing
vectors. The second set will represent the same for the validation
set. The visualization (see below) that pops up, just close that window
for the script to finish running.
Alternate data sets:
Alternate training and testing data sets can be swapped out in the first
section of main() below. See those notes for specifics.
Visualization:
Pyplot is included to provide a visual representation of a member of the
dataset.
"""
from matplotlib import pyplot as plt
from numpy import dot
from sklearn import datasets, utils
from neuron import Neuron
class Network:
""" A Network instance will create layers of neurons for the implementa-
tion of neural network.
Parameters
----------
images : list
Corresponding images of the dataset
neuron_targets : list
The possible final output values
vector_size : int
Size of the individual input vectors
train_set : list
Set of vectors for the learning portion
train_answers : list
Correct answers that correspond to the train_set
epochs : int
Number of times the learn_run will run for a given train_set
test_set : list
Set of vectors, discrete from the train_set to have the machine
guess against
test_answers : list
Correct answers for the test_set, to compare the machine's
guesses against
validation_set : list
A validation set to compare answers in a second run
validation_answers : list
Answer for the above
Attributes
----------
neurons : Class Neuron
Instances of the Neuron class, one for each of possible correct
answers
"""
def __init__(self, neuron_targets, train_set,
train_answers, epochs, test_set, test_answers, validation_set,
validation_answers, images=None):
self.neuron_count = neuron_targets
self.vector_size = len(train_set[0])
self.train_set = [self.append_bias(vector) for vector in train_set]
self.train_answers = train_answers
self.epochs = epochs
self.test_set = [self.append_bias(vector) for vector in test_set]
self.test_answers = test_answers
self.validation_set = validation_set
self.validation_answers = validation_answers
self.neurons = [Neuron(self.vector_size, x, len(self.train_set),
self.train_answers) for x in self.neuron_count]
self.images = images
def gradient_descent(self, vector, vector_index):
""" Calculates the gradient_descent
Parameters
----------
vector : list
A single input, comprised of floats
vector_index : int
Attributes
----------
learning_rate : float
Determines how much of the error is applied to the weights
in each iteration
Returns
-------
float
Represents the error to be used to update the weights of
the neurons. It should approximate a gradient descent in
topology of the outputs
"""
learning_rate = .05
temp_list = []
for x in self.neuron_count:
dp = self.neurons[x]._dot_product(vector)
temp_list.append(((self.neurons[x]._sigmoid(dp)) -
self.neurons[x].expected[vector_index]) *
self.neurons[x]._sigmoid(dp) * (1 -
self.neurons[x]._sigmoid(dp)))
gd = -1 * learning_rate * sum(temp_list)
return gd
def learn_run(self):
""" Runs an iteration through the neuron sets and adjust the weights
appropriately. It then follows up with a second weight adjusment
accross all neurons with an estimate of the gradient descent
function
"""
for x in range(self.epochs):
for idx, vector in enumerate(self.train_set):
for neuron in self.neurons:
neuron.train_pass(vector, idx)
gd = self.gradient_descent(vector, idx) # Backpropogate the error
for neuron in self.neurons:
neuron.update_weights(gd, vector)
def run_unseen(self, validation=False):
""" Makes guesses on the unseen data, and switches over the test
answers to validation set if the bool is True
For each vector in the collection, each neuron in turn will either
fire or not. If a vector fires, it is collected as a possible
correct guess. Not firing is collected as well, in case
there an no good guesses at all. The method will choose the
vector with the highest dot product, from either the fired list
or the dud list.
Parameters
----------
validation : bool
Runs a different set of vectors through the guessing
process if validation is set to True
Returns
-------
list
a list of ints (the guesses for each vector)
"""
if validation:
self.test_set = self.validation_set
temp_guess_list = [[] for x in self.test_set]
temp_dud_guess_list = [[] for x in self.test_set]
for idy, vector in enumerate(self.test_set):
for idx, neuron in enumerate(self.neurons):
nf = neuron.fires(vector)
if nf[0]:
temp_guess_list[idy].append((nf[1], idx))
temp_dud_guess_list[idy].append((0, idx))
else:
temp_guess_list[idy].append((0, None))
temp_dud_guess_list[idy].append((nf[1], idx))
temp_guess_list[idy].sort(reverse=True)
temp_dud_guess_list[idy].sort(reverse=True)
guess_list = [x[0][1] for x in temp_guess_list]
dud_guess_list = [x[0][1] for x in temp_dud_guess_list]
new_guess_list = [x if (x is not None) else dud_guess_list[idx] for
idx, x in enumerate(guess_list)]
return new_guess_list
def report_results(self, guess_list, validation=False):
""" Reports results of guesses on unseen set
Parameters
----------
guess_list : list
"""
if validation:
self.test_answers = self.validation_answers
# print("I guess this is a: ", guess_list[1])
# plt.imshow(self.images[1451], cmap="Greys",
# interpolation='nearest')
# plt.show()
successes = 0
for idx, item in enumerate(guess_list):
if self.test_answers[idx] == item:
successes += 1
print("Successes: {} Out of total: {}".format(successes,
len(guess_list)))
print("For a success rate of: ", successes/len(guess_list))
def append_bias(self, vector):
""" Takes a list of n entries and appends a 1 for the bias
Parameters
----------
vector : list
Attributes
----------
num_of_training_vectors : int
This is to adjust the size of the training set when all of the data
is provided as large list. Breaking the training data into a
training set, testing set, and a validation set. Picking this number
is a balance between speed (lower number) and overfitting the data
(a higher number)
Returns
-------
list
The input vector with a one appended to the end of the list, as
a bias
"""
temp_vector = [x for x in vector]
temp_vector.append(1)
return temp_vector
def main():
# In the scikit-learn set below, the data is shuffled using utils.resample
# as the first pass had an artifact in the end of the list that wasn't
# representative of the rest of the set.
# Dependent on input set
temp_digits = datasets.load_digits()
digits = utils.resample(temp_digits.data, random_state=0)
temp_answers = utils.resample(temp_digits.target, random_state=0)
images = utils.resample(temp_digits.images, random_state=0)
target_values = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
num_of_training_vectors = 950
answers, answers_to_test, validation_answers = temp_answers[:num_of_training_vectors], temp_answers[num_of_training_vectors:num_of_training_vectors+500], temp_answers[num_of_training_vectors+500:]
training_set, testing_set, validation_set = digits[:num_of_training_vectors], digits[num_of_training_vectors:num_of_training_vectors+500], digits[num_of_training_vectors+500:]
epoch = 100
# For all inputs
network = Network(target_values, training_set, answers, epoch, testing_set,
answers_to_test, validation_set, validation_answers,
images)
network.learn_run()
network.report_results(network.run_unseen())
network.report_results(network.run_unseen(True), True)
if __name__ == '__main__':
main()
|
|
"""Calxeda: ubootenv.py """
# Copyright (c) 2012-2013, Calxeda Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Calxeda Inc. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
# THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
import struct
from cxmanage_api.simg import has_simg, get_simg_contents
from cxmanage_api.crc32 import get_crc32
from cxmanage_api.cx_exceptions import UbootenvError
ENVIRONMENT_SIZE = 8192
UBOOTENV_V1_VARIABLES = ["bootcmd_default", "bootcmd_sata", "bootcmd_pxe",
"bootdevice"]
UBOOTENV_V2_VARIABLES = ["bootcmd0", "init_scsi", "bootcmd_scsi", "init_pxe",
"bootcmd_pxe", "devnum"]
class UbootEnv(object):
"""Represents a U-Boot Environment.
>>> from cxmanage_api.ubootenv import UbootEnv
>>> uboot = UbootEnv()
:param contents: UBootEnvironment contnents.
:type contents: string
"""
def __init__(self, contents=None):
"""Default constructor for the UbootEnv class."""
self.variables = {}
if (contents != None):
if (has_simg(contents)):
contents = get_simg_contents(contents)
contents = contents.rstrip("%c%c" % (chr(0), chr(255)))[4:]
lines = contents.split(chr(0))
for line in lines:
part = line.partition("=")
self.variables[part[0]] = part[2]
# pylint: disable=R0912
def set_boot_order(self, boot_args):
"""Sets the boot order specified in the uboot environment.
>>> uboot.set_boot_order(boot_args=['disk', 'pxe'])
.. note::
* Valid Args:
pxe - boot from pxe server\n
disk - boot from default sata device\n
diskX - boot from sata device X\n
diskX:Y - boot from sata device X, partition Y\n
retry - retry last boot device indefinitely\n
reset - reset A9\n
:param boot_args: Boot args (boot order). A list of strings.
:type boot_args: list
:raises ValueError: If an invalid boot device is specified.
:raises ValueError: If 'retry' and 'reset' args are used together.
:raises UbootenvError: If the u-boot environment is unrecognized
"""
validate_boot_args(boot_args)
if boot_args == self.get_boot_order():
return
commands = []
retry = False
reset = False
if all(x in self.variables for x in UBOOTENV_V1_VARIABLES):
version = 1
elif all(x in self.variables for x in UBOOTENV_V2_VARIABLES):
version = 2
else:
raise UbootenvError("Unrecognized u-boot environment")
for arg in boot_args:
if arg == "retry":
retry = True
elif arg == "reset":
reset = True
elif version == 1:
if arg == "pxe":
commands.append("run bootcmd_pxe")
elif arg == "disk":
commands.append("run bootcmd_sata")
elif arg.startswith("disk"):
try:
# pylint: disable=W0141
dev, part = map(int, arg[4:].split(":"))
bootdevice = "%i:%i" % (dev, part)
except ValueError:
bootdevice = str(int(arg[4:]))
commands.append("setenv bootdevice %s && run bootcmd_sata"
% bootdevice)
elif version == 2:
if arg == "pxe":
commands.append("run init_pxe && run bootcmd_pxe")
elif arg == "disk":
commands.append("run init_scsi && run bootcmd_scsi")
elif arg.startswith("disk"):
try:
# pylint: disable=W0141
dev, part = map(int, arg[4:].split(":"))
bootdevice = "%i:%i" % (dev, part)
except ValueError:
bootdevice = str(int(arg[4:]))
commands.append(
"setenv devnum %s && run init_scsi && run bootcmd_scsi"
% bootdevice
)
if retry and reset:
raise ValueError("retry and reset are mutually exclusive")
elif retry:
commands[-1] = "while true\ndo\n%s\nsleep 1\ndone" % commands[-1]
elif reset:
commands.append("reset")
if version == 1:
self.variables["bootcmd_default"] = "; ".join(commands)
else:
self.variables["bootcmd0"] = "; ".join(commands)
def get_boot_order(self):
"""Gets the boot order specified in the uboot environment.
>>> uboot.get_boot_order()
['disk', 'pxe']
:returns: Boot order for this U-Boot Environment.
:rtype: string
:raises UbootenvError: If a boot command is unrecognized.
"""
boot_args = []
if self.variables["bootcmd0"] == "run boot_iter":
for target in self.variables["boot_targets"].split():
if target == "pxe":
boot_args.append("pxe")
elif target == "scsi":
boot_args.append("disk")
else:
raise UbootenvError("Unrecognized boot target: %s"
% target)
else:
if "bootcmd_default" in self.variables:
commands = self.variables["bootcmd_default"].split("; ")
else:
commands = self.variables["bootcmd0"].split("; ")
retry = False
for command in commands:
if command.startswith("while true"):
retry = True
command = command.split("\n")[2]
if command in ["run bootcmd_pxe",
"run init_pxe && run bootcmd_pxe"]:
boot_args.append("pxe")
elif command in ["run bootcmd_sata",
"run init_scsi && run bootcmd_scsi"]:
boot_args.append("disk")
elif (command.startswith("setenv bootdevice") or
command.startswith("setenv devnum")):
boot_args.append("disk%s" % command.split()[2])
elif (command == "reset"):
boot_args.append("reset")
break
else:
raise UbootenvError("Unrecognized boot command: %s"
% command)
if retry:
boot_args.append("retry")
break
if not boot_args:
boot_args = ["none"]
validate_boot_args(boot_args) # sanity check
return boot_args
def set_pxe_interface(self, interface):
"""Sets the interfacespecified in the uboot environment.
>>> uboot.set_pxe_interface('eth0')
.. note::
* Valid Args: eth0 or eth1
:param interface: The interface to set.
:type boot_args: string
:raises ValueError: If an invalid interface is specified.
"""
validate_pxe_interface(interface)
if interface == self.get_pxe_interface():
return
if interface == "eth0":
self.variables["ethprime"] = "xgmac0"
elif (interface == "eth1"):
self.variables["ethprime"] = "xgmac1"
else:
raise ValueError("Invalid pxe interface: %s" % interface)
def get_pxe_interface(self):
"""Returns a string representation of the pxe interface.
>>> uboot.get_pxe_interface()
'eth0'
:returns: Boot order for this U-Boot Environment.
:rtype: string
:raises ValueError: If the u-boot environment value is not recognized.
"""
# This is based on reading the ethprime environment variable, and
# translating from xgmacX to ethX. By default ethprime is not set
# and eth0 is the assumed default (NOTE: this is brittle)
if "ethprime" in self.variables:
xgmac = self.variables["ethprime"]
if xgmac == "xgmac0":
return "eth0"
elif (xgmac == "xgmac1"):
return "eth1"
else:
raise ValueError("Unrecognized value for ethprime")
else:
return "eth0"
def get_contents(self):
"""Returns a raw string representation of the uboot environment.
>>> uboot.get_contents()
'j4\x88\xb7bootcmd_default=run bootcmd_sata; run bootcmd_pxe ... '
>>> #
>>> # Output trimmed for brevity ...
>>> #
:returns: Raw string representation of the UBoot Environment.
:rtype: string
"""
contents = ""
# Add variables
for variable in self.variables:
contents += "%s=%s\0" % (variable, self.variables[variable])
contents += "\0"
# Add padding to end
contents += "".join([chr(255)
for _ in range(ENVIRONMENT_SIZE - len(contents) - 4)])
# Add crc32 to beginning
crc32 = get_crc32(contents, 0xFFFFFFFF) ^ 0xFFFFFFFF
contents = struct.pack("<I", crc32) + contents
return contents
def validate_boot_args(boot_args):
""" Validate boot arguments. Raises a ValueError if the args are invalid."""
for arg in boot_args:
if arg in ["retry", "reset", "pxe", "disk", "none"]:
continue
elif arg.startswith("disk"):
try:
# pylint: disable=W0141
map(int, arg[4:].split(":"))
except ValueError:
try:
int(arg[4:])
except ValueError:
raise ValueError("Invalid boot arg: %s" % arg)
else:
raise ValueError("Invalid boot arg: %s" % arg)
def validate_pxe_interface(interface):
""" Validate pxe interface. Raises a ValueError if the args are invalid."""
if not interface in ["eth0", "eth1"]:
raise ValueError("Invalid pxe interface: %s" % interface)
|
|
"""Tests for acme.client."""
import datetime
import json
import unittest
from six.moves import http_client # pylint: disable=import-error
import mock
import requests
from acme import challenges
from acme import errors
from acme import jose
from acme import jws as acme_jws
from acme import messages
from acme import messages_test
from acme import test_util
CERT_DER = test_util.load_vector('cert.der')
KEY = jose.JWKRSA.load(test_util.load_vector('rsa512_key.pem'))
KEY2 = jose.JWKRSA.load(test_util.load_vector('rsa256_key.pem'))
class ClientTest(unittest.TestCase):
"""Tests for acme.client.Client."""
# pylint: disable=too-many-instance-attributes,too-many-public-methods
def setUp(self):
self.response = mock.MagicMock(
ok=True, status_code=http_client.OK, headers={}, links={})
self.net = mock.MagicMock()
self.net.post.return_value = self.response
self.net.get.return_value = self.response
self.directory = messages.Directory({
messages.NewRegistration:
'https://www.letsencrypt-demo.org/acme/new-reg',
messages.Revocation:
'https://www.letsencrypt-demo.org/acme/revoke-cert',
messages.NewAuthorization:
'https://www.letsencrypt-demo.org/acme/new-authz',
})
from acme.client import Client
self.client = Client(
directory=self.directory, key=KEY, alg=jose.RS256, net=self.net)
self.identifier = messages.Identifier(
typ=messages.IDENTIFIER_FQDN, value='example.com')
# Registration
self.contact = ('mailto:[email protected]', 'tel:+12025551212')
reg = messages.Registration(
contact=self.contact, key=KEY.public_key())
self.new_reg = messages.NewRegistration(**dict(reg))
self.regr = messages.RegistrationResource(
body=reg, uri='https://www.letsencrypt-demo.org/acme/reg/1',
new_authzr_uri='https://www.letsencrypt-demo.org/acme/new-reg',
terms_of_service='https://www.letsencrypt-demo.org/tos')
# Authorization
authzr_uri = 'https://www.letsencrypt-demo.org/acme/authz/1'
challb = messages.ChallengeBody(
uri=(authzr_uri + '/1'), status=messages.STATUS_VALID,
chall=challenges.DNS(token=jose.b64decode(
'evaGxfADs6pSRb2LAv9IZf17Dt3juxGJ-PCt92wr-oA')))
self.challr = messages.ChallengeResource(
body=challb, authzr_uri=authzr_uri)
self.authz = messages.Authorization(
identifier=messages.Identifier(
typ=messages.IDENTIFIER_FQDN, value='example.com'),
challenges=(challb,), combinations=None)
self.authzr = messages.AuthorizationResource(
body=self.authz, uri=authzr_uri,
new_cert_uri='https://www.letsencrypt-demo.org/acme/new-cert')
# Request issuance
self.certr = messages.CertificateResource(
body=messages_test.CERT, authzrs=(self.authzr,),
uri='https://www.letsencrypt-demo.org/acme/cert/1',
cert_chain_uri='https://www.letsencrypt-demo.org/ca')
def test_init_downloads_directory(self):
uri = 'http://www.letsencrypt-demo.org/directory'
from acme.client import Client
self.client = Client(
directory=uri, key=KEY, alg=jose.RS256, net=self.net)
self.net.get.assert_called_once_with(uri)
def test_register(self):
# "Instance of 'Field' has no to_json/update member" bug:
# pylint: disable=no-member
self.response.status_code = http_client.CREATED
self.response.json.return_value = self.regr.body.to_json()
self.response.headers['Location'] = self.regr.uri
self.response.links.update({
'next': {'url': self.regr.new_authzr_uri},
'terms-of-service': {'url': self.regr.terms_of_service},
})
self.assertEqual(self.regr, self.client.register(self.new_reg))
# TODO: test POST call arguments
# TODO: split here and separate test
reg_wrong_key = self.regr.body.update(key=KEY2.public_key())
self.response.json.return_value = reg_wrong_key.to_json()
self.assertRaises(
errors.UnexpectedUpdate, self.client.register, self.new_reg)
def test_register_missing_next(self):
self.response.status_code = http_client.CREATED
self.assertRaises(
errors.ClientError, self.client.register, self.new_reg)
def test_update_registration(self):
# "Instance of 'Field' has no to_json/update member" bug:
# pylint: disable=no-member
self.response.headers['Location'] = self.regr.uri
self.response.json.return_value = self.regr.body.to_json()
self.assertEqual(self.regr, self.client.update_registration(self.regr))
# TODO: test POST call arguments
# TODO: split here and separate test
self.response.json.return_value = self.regr.body.update(
contact=()).to_json()
self.assertRaises(
errors.UnexpectedUpdate, self.client.update_registration, self.regr)
def test_query_registration(self):
self.response.json.return_value = self.regr.body.to_json()
self.assertEqual(self.regr, self.client.query_registration(self.regr))
def test_query_registration_updates_new_authzr_uri(self):
self.response.json.return_value = self.regr.body.to_json()
self.response.links = {'next': {'url': 'UPDATED'}}
self.assertEqual(
'UPDATED',
self.client.query_registration(self.regr).new_authzr_uri)
def test_agree_to_tos(self):
self.client.update_registration = mock.Mock()
self.client.agree_to_tos(self.regr)
regr = self.client.update_registration.call_args[0][0]
self.assertEqual(self.regr.terms_of_service, regr.body.agreement)
def _prepare_response_for_request_challenges(self):
self.response.status_code = http_client.CREATED
self.response.headers['Location'] = self.authzr.uri
self.response.json.return_value = self.authz.to_json()
self.response.links = {
'next': {'url': self.authzr.new_cert_uri},
}
def test_request_challenges(self):
self._prepare_response_for_request_challenges()
self.client.request_challenges(self.identifier)
self.net.post.assert_called_once_with(
self.directory.new_authz,
messages.NewAuthorization(identifier=self.identifier))
def test_requets_challenges_custom_uri(self):
self._prepare_response_for_request_challenges()
self.client.request_challenges(self.identifier, 'URI')
self.net.post.assert_called_once_with('URI', mock.ANY)
def test_request_challenges_unexpected_update(self):
self._prepare_response_for_request_challenges()
self.response.json.return_value = self.authz.update(
identifier=self.identifier.update(value='foo')).to_json()
self.assertRaises(
errors.UnexpectedUpdate, self.client.request_challenges,
self.identifier, self.authzr.uri)
def test_request_challenges_missing_next(self):
self.response.status_code = http_client.CREATED
self.assertRaises(errors.ClientError, self.client.request_challenges,
self.identifier)
def test_request_domain_challenges(self):
self.client.request_challenges = mock.MagicMock()
self.assertEqual(
self.client.request_challenges(self.identifier),
self.client.request_domain_challenges('example.com'))
def test_request_domain_challenges_custom_uri(self):
self.client.request_challenges = mock.MagicMock()
self.assertEqual(
self.client.request_challenges(self.identifier, 'URI'),
self.client.request_domain_challenges('example.com', 'URI'))
def test_answer_challenge(self):
self.response.links['up'] = {'url': self.challr.authzr_uri}
self.response.json.return_value = self.challr.body.to_json()
chall_response = challenges.DNSResponse(validation=None)
self.client.answer_challenge(self.challr.body, chall_response)
# TODO: split here and separate test
self.assertRaises(errors.UnexpectedUpdate, self.client.answer_challenge,
self.challr.body.update(uri='foo'), chall_response)
def test_answer_challenge_missing_next(self):
self.assertRaises(
errors.ClientError, self.client.answer_challenge,
self.challr.body, challenges.DNSResponse(validation=None))
def test_retry_after_date(self):
self.response.headers['Retry-After'] = 'Fri, 31 Dec 1999 23:59:59 GMT'
self.assertEqual(
datetime.datetime(1999, 12, 31, 23, 59, 59),
self.client.retry_after(response=self.response, default=10))
@mock.patch('acme.client.datetime')
def test_retry_after_invalid(self, dt_mock):
dt_mock.datetime.now.return_value = datetime.datetime(2015, 3, 27)
dt_mock.timedelta = datetime.timedelta
self.response.headers['Retry-After'] = 'foooo'
self.assertEqual(
datetime.datetime(2015, 3, 27, 0, 0, 10),
self.client.retry_after(response=self.response, default=10))
@mock.patch('acme.client.datetime')
def test_retry_after_overflow(self, dt_mock):
dt_mock.datetime.now.return_value = datetime.datetime(2015, 3, 27)
dt_mock.timedelta = datetime.timedelta
dt_mock.datetime.side_effect = datetime.datetime
self.response.headers['Retry-After'] = "Tue, 116 Feb 2016 11:50:00 MST"
self.assertEqual(
datetime.datetime(2015, 3, 27, 0, 0, 10),
self.client.retry_after(response=self.response, default=10))
@mock.patch('acme.client.datetime')
def test_retry_after_seconds(self, dt_mock):
dt_mock.datetime.now.return_value = datetime.datetime(2015, 3, 27)
dt_mock.timedelta = datetime.timedelta
self.response.headers['Retry-After'] = '50'
self.assertEqual(
datetime.datetime(2015, 3, 27, 0, 0, 50),
self.client.retry_after(response=self.response, default=10))
@mock.patch('acme.client.datetime')
def test_retry_after_missing(self, dt_mock):
dt_mock.datetime.now.return_value = datetime.datetime(2015, 3, 27)
dt_mock.timedelta = datetime.timedelta
self.assertEqual(
datetime.datetime(2015, 3, 27, 0, 0, 10),
self.client.retry_after(response=self.response, default=10))
def test_poll(self):
self.response.json.return_value = self.authzr.body.to_json()
self.assertEqual((self.authzr, self.response),
self.client.poll(self.authzr))
# TODO: split here and separate test
self.response.json.return_value = self.authz.update(
identifier=self.identifier.update(value='foo')).to_json()
self.assertRaises(
errors.UnexpectedUpdate, self.client.poll, self.authzr)
def test_request_issuance(self):
self.response.content = CERT_DER
self.response.headers['Location'] = self.certr.uri
self.response.links['up'] = {'url': self.certr.cert_chain_uri}
self.assertEqual(self.certr, self.client.request_issuance(
messages_test.CSR, (self.authzr,)))
# TODO: check POST args
def test_request_issuance_missing_up(self):
self.response.content = CERT_DER
self.response.headers['Location'] = self.certr.uri
self.assertEqual(
self.certr.update(cert_chain_uri=None),
self.client.request_issuance(messages_test.CSR, (self.authzr,)))
def test_request_issuance_missing_location(self):
self.assertRaises(
errors.ClientError, self.client.request_issuance,
messages_test.CSR, (self.authzr,))
@mock.patch('acme.client.datetime')
@mock.patch('acme.client.time')
def test_poll_and_request_issuance(self, time_mock, dt_mock):
# clock.dt | pylint: disable=no-member
clock = mock.MagicMock(dt=datetime.datetime(2015, 3, 27))
def sleep(seconds):
"""increment clock"""
clock.dt += datetime.timedelta(seconds=seconds)
time_mock.sleep.side_effect = sleep
def now():
"""return current clock value"""
return clock.dt
dt_mock.datetime.now.side_effect = now
dt_mock.timedelta = datetime.timedelta
def poll(authzr): # pylint: disable=missing-docstring
# record poll start time based on the current clock value
authzr.times.append(clock.dt)
# suppose it takes 2 seconds for server to produce the
# result, increment clock
clock.dt += datetime.timedelta(seconds=2)
if len(authzr.retries) == 1: # no more retries
done = mock.MagicMock(uri=authzr.uri, times=authzr.times)
done.body.status = authzr.retries[0]
return done, []
# response (2nd result tuple element) is reduced to only
# Retry-After header contents represented as integer
# seconds; authzr.retries is a list of Retry-After
# headers, head(retries) is peeled of as a current
# Retry-After header, and tail(retries) is persisted for
# later poll() calls
return (mock.MagicMock(retries=authzr.retries[1:],
uri=authzr.uri + '.', times=authzr.times),
authzr.retries[0])
self.client.poll = mock.MagicMock(side_effect=poll)
mintime = 7
def retry_after(response, default):
# pylint: disable=missing-docstring
# check that poll_and_request_issuance correctly passes mintime
self.assertEqual(default, mintime)
return clock.dt + datetime.timedelta(seconds=response)
self.client.retry_after = mock.MagicMock(side_effect=retry_after)
def request_issuance(csr, authzrs): # pylint: disable=missing-docstring
return csr, authzrs
self.client.request_issuance = mock.MagicMock(
side_effect=request_issuance)
csr = mock.MagicMock()
authzrs = (
mock.MagicMock(uri='a', times=[], retries=(
8, 20, 30, messages.STATUS_VALID)),
mock.MagicMock(uri='b', times=[], retries=(
5, messages.STATUS_VALID)),
)
cert, updated_authzrs = self.client.poll_and_request_issuance(
csr, authzrs, mintime=mintime,
# make sure that max_attempts is per-authorization, rather
# than global
max_attempts=max(len(authzrs[0].retries), len(authzrs[1].retries)))
self.assertTrue(cert[0] is csr)
self.assertTrue(cert[1] is updated_authzrs)
self.assertEqual(updated_authzrs[0].uri, 'a...')
self.assertEqual(updated_authzrs[1].uri, 'b.')
self.assertEqual(updated_authzrs[0].times, [
datetime.datetime(2015, 3, 27),
# a is scheduled for 10, but b is polling [9..11), so it
# will be picked up as soon as b is finished, without
# additional sleeping
datetime.datetime(2015, 3, 27, 0, 0, 11),
datetime.datetime(2015, 3, 27, 0, 0, 33),
datetime.datetime(2015, 3, 27, 0, 1, 5),
])
self.assertEqual(updated_authzrs[1].times, [
datetime.datetime(2015, 3, 27, 0, 0, 2),
datetime.datetime(2015, 3, 27, 0, 0, 9),
])
self.assertEqual(clock.dt, datetime.datetime(2015, 3, 27, 0, 1, 7))
# CA sets invalid | TODO: move to a separate test
invalid_authzr = mock.MagicMock(
times=[], retries=[messages.STATUS_INVALID])
self.assertRaises(
errors.PollError, self.client.poll_and_request_issuance,
csr, authzrs=(invalid_authzr,), mintime=mintime)
# exceeded max_attemps | TODO: move to a separate test
self.assertRaises(
errors.PollError, self.client.poll_and_request_issuance,
csr, authzrs, mintime=mintime, max_attempts=2)
def test_check_cert(self):
self.response.headers['Location'] = self.certr.uri
self.response.content = CERT_DER
self.assertEqual(self.certr.update(body=messages_test.CERT),
self.client.check_cert(self.certr))
# TODO: split here and separate test
self.response.headers['Location'] = 'foo'
self.assertRaises(
errors.UnexpectedUpdate, self.client.check_cert, self.certr)
def test_check_cert_missing_location(self):
self.response.content = CERT_DER
self.assertRaises(
errors.ClientError, self.client.check_cert, self.certr)
def test_refresh(self):
self.client.check_cert = mock.MagicMock()
self.assertEqual(
self.client.check_cert(self.certr), self.client.refresh(self.certr))
def test_fetch_chain_no_up_link(self):
self.assertEqual([], self.client.fetch_chain(self.certr.update(
cert_chain_uri=None)))
def test_fetch_chain_single(self):
# pylint: disable=protected-access
self.client._get_cert = mock.MagicMock()
self.client._get_cert.return_value = (
mock.MagicMock(links={}), "certificate")
self.assertEqual([self.client._get_cert(self.certr.cert_chain_uri)[1]],
self.client.fetch_chain(self.certr))
def test_fetch_chain_max(self):
# pylint: disable=protected-access
up_response = mock.MagicMock(links={'up': {'url': 'http://cert'}})
noup_response = mock.MagicMock(links={})
self.client._get_cert = mock.MagicMock()
self.client._get_cert.side_effect = [
(up_response, "cert")] * 9 + [(noup_response, "last_cert")]
chain = self.client.fetch_chain(self.certr, max_length=10)
self.assertEqual(chain, ["cert"] * 9 + ["last_cert"])
def test_fetch_chain_too_many(self): # recursive
# pylint: disable=protected-access
response = mock.MagicMock(links={'up': {'url': 'http://cert'}})
self.client._get_cert = mock.MagicMock()
self.client._get_cert.return_value = (response, "certificate")
self.assertRaises(errors.Error, self.client.fetch_chain, self.certr)
def test_revoke(self):
self.client.revoke(self.certr.body)
self.net.post.assert_called_once_with(
self.directory[messages.Revocation], mock.ANY, content_type=None)
def test_revoke_bad_status_raises_error(self):
self.response.status_code = http_client.METHOD_NOT_ALLOWED
self.assertRaises(errors.ClientError, self.client.revoke, self.certr)
class ClientNetworkTest(unittest.TestCase):
"""Tests for acme.client.ClientNetwork."""
def setUp(self):
self.verify_ssl = mock.MagicMock()
self.wrap_in_jws = mock.MagicMock(return_value=mock.sentinel.wrapped)
from acme.client import ClientNetwork
self.net = ClientNetwork(
key=KEY, alg=jose.RS256, verify_ssl=self.verify_ssl,
user_agent='acme-python-test')
self.response = mock.MagicMock(ok=True, status_code=http_client.OK)
self.response.headers = {}
self.response.links = {}
def test_init(self):
self.assertTrue(self.net.verify_ssl is self.verify_ssl)
def test_wrap_in_jws(self):
class MockJSONDeSerializable(jose.JSONDeSerializable):
# pylint: disable=missing-docstring
def __init__(self, value):
self.value = value
def to_partial_json(self):
return {'foo': self.value}
@classmethod
def from_json(cls, value):
pass # pragma: no cover
# pylint: disable=protected-access
jws_dump = self.net._wrap_in_jws(
MockJSONDeSerializable('foo'), nonce=b'Tg')
jws = acme_jws.JWS.json_loads(jws_dump)
self.assertEqual(json.loads(jws.payload.decode()), {'foo': 'foo'})
self.assertEqual(jws.signature.combined.nonce, b'Tg')
def test_check_response_not_ok_jobj_no_error(self):
self.response.ok = False
self.response.json.return_value = {}
# pylint: disable=protected-access
self.assertRaises(
errors.ClientError, self.net._check_response, self.response)
def test_check_response_not_ok_jobj_error(self):
self.response.ok = False
self.response.json.return_value = messages.Error(
detail='foo', typ='serverInternal', title='some title').to_json()
# pylint: disable=protected-access
self.assertRaises(
messages.Error, self.net._check_response, self.response)
def test_check_response_not_ok_no_jobj(self):
self.response.ok = False
self.response.json.side_effect = ValueError
# pylint: disable=protected-access
self.assertRaises(
errors.ClientError, self.net._check_response, self.response)
def test_check_response_ok_no_jobj_ct_required(self):
self.response.json.side_effect = ValueError
for response_ct in [self.net.JSON_CONTENT_TYPE, 'foo']:
self.response.headers['Content-Type'] = response_ct
# pylint: disable=protected-access
self.assertRaises(
errors.ClientError, self.net._check_response, self.response,
content_type=self.net.JSON_CONTENT_TYPE)
def test_check_response_ok_no_jobj_no_ct(self):
self.response.json.side_effect = ValueError
for response_ct in [self.net.JSON_CONTENT_TYPE, 'foo']:
self.response.headers['Content-Type'] = response_ct
# pylint: disable=protected-access,no-value-for-parameter
self.assertEqual(
self.response, self.net._check_response(self.response))
def test_check_response_jobj(self):
self.response.json.return_value = {}
for response_ct in [self.net.JSON_CONTENT_TYPE, 'foo']:
self.response.headers['Content-Type'] = response_ct
# pylint: disable=protected-access,no-value-for-parameter
self.assertEqual(
self.response, self.net._check_response(self.response))
@mock.patch('acme.client.requests')
def test_send_request(self, mock_requests):
mock_requests.request.return_value = self.response
# pylint: disable=protected-access
self.assertEqual(self.response, self.net._send_request(
'HEAD', 'url', 'foo', bar='baz'))
mock_requests.request.assert_called_once_with(
'HEAD', 'url', 'foo', verify=mock.ANY, bar='baz', headers=mock.ANY)
@mock.patch('acme.client.requests')
def test_send_request_verify_ssl(self, mock_requests):
# pylint: disable=protected-access
for verify in True, False:
mock_requests.request.reset_mock()
mock_requests.request.return_value = self.response
self.net.verify_ssl = verify
# pylint: disable=protected-access
self.assertEqual(
self.response, self.net._send_request('GET', 'url'))
mock_requests.request.assert_called_once_with(
'GET', 'url', verify=verify, headers=mock.ANY)
@mock.patch('acme.client.requests')
def test_send_request_user_agent(self, mock_requests):
mock_requests.request.return_value = self.response
# pylint: disable=protected-access
self.net._send_request('GET', 'url', headers={'bar': 'baz'})
mock_requests.request.assert_called_once_with(
'GET', 'url', verify=mock.ANY,
headers={'User-Agent': 'acme-python-test', 'bar': 'baz'})
self.net._send_request('GET', 'url', headers={'User-Agent': 'foo2'})
mock_requests.request.assert_called_with(
'GET', 'url', verify=mock.ANY, headers={'User-Agent': 'foo2'})
@mock.patch('acme.client.requests')
def test_requests_error_passthrough(self, mock_requests):
mock_requests.exceptions = requests.exceptions
mock_requests.request.side_effect = requests.exceptions.RequestException
# pylint: disable=protected-access
self.assertRaises(requests.exceptions.RequestException,
self.net._send_request, 'GET', 'uri')
class ClientNetworkWithMockedResponseTest(unittest.TestCase):
"""Tests for acme.client.ClientNetwork which mock out response."""
# pylint: disable=too-many-instance-attributes
def setUp(self):
from acme.client import ClientNetwork
self.net = ClientNetwork(key=None, alg=None)
self.response = mock.MagicMock(ok=True, status_code=http_client.OK)
self.response.headers = {}
self.response.links = {}
self.checked_response = mock.MagicMock()
self.obj = mock.MagicMock()
self.wrapped_obj = mock.MagicMock()
self.content_type = mock.sentinel.content_type
self.all_nonces = [jose.b64encode(b'Nonce'), jose.b64encode(b'Nonce2')]
self.available_nonces = self.all_nonces[:]
def send_request(*args, **kwargs):
# pylint: disable=unused-argument,missing-docstring
if self.available_nonces:
self.response.headers = {
self.net.REPLAY_NONCE_HEADER:
self.available_nonces.pop().decode()}
else:
self.response.headers = {}
return self.response
# pylint: disable=protected-access
self.net._send_request = self.send_request = mock.MagicMock(
side_effect=send_request)
self.net._check_response = self.check_response
self.net._wrap_in_jws = mock.MagicMock(return_value=self.wrapped_obj)
def check_response(self, response, content_type):
# pylint: disable=missing-docstring
self.assertEqual(self.response, response)
self.assertEqual(self.content_type, content_type)
return self.checked_response
def test_head(self):
self.assertEqual(self.response, self.net.head('url', 'foo', bar='baz'))
self.send_request.assert_called_once_with(
'HEAD', 'url', 'foo', bar='baz')
def test_get(self):
self.assertEqual(self.checked_response, self.net.get(
'url', content_type=self.content_type, bar='baz'))
self.send_request.assert_called_once_with('GET', 'url', bar='baz')
def test_post(self):
# pylint: disable=protected-access
self.assertEqual(self.checked_response, self.net.post(
'uri', self.obj, content_type=self.content_type))
self.net._wrap_in_jws.assert_called_once_with(
self.obj, jose.b64decode(self.all_nonces.pop()))
assert not self.available_nonces
self.assertRaises(errors.MissingNonce, self.net.post,
'uri', self.obj, content_type=self.content_type)
self.net._wrap_in_jws.assert_called_with(
self.obj, jose.b64decode(self.all_nonces.pop()))
def test_post_wrong_initial_nonce(self): # HEAD
self.available_nonces = [b'f', jose.b64encode(b'good')]
self.assertRaises(errors.BadNonce, self.net.post, 'uri',
self.obj, content_type=self.content_type)
def test_post_wrong_post_response_nonce(self):
self.available_nonces = [jose.b64encode(b'good'), b'f']
self.assertRaises(errors.BadNonce, self.net.post, 'uri',
self.obj, content_type=self.content_type)
def test_head_get_post_error_passthrough(self):
self.send_request.side_effect = requests.exceptions.RequestException
for method in self.net.head, self.net.get:
self.assertRaises(
requests.exceptions.RequestException, method, 'GET', 'uri')
self.assertRaises(requests.exceptions.RequestException,
self.net.post, 'uri', obj=self.obj)
if __name__ == '__main__':
unittest.main() # pragma: no cover
|
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.krmapihosting import instance_pb2
from google3.cloud.graphite.mmv2.services.google.krmapihosting import instance_pb2_grpc
from typing import List
class Instance(object):
def __init__(
self,
name: str = None,
labels: dict = None,
bundles_config: dict = None,
use_private_endpoint: bool = None,
gke_resource_link: str = None,
state: str = None,
management_config: dict = None,
project: str = None,
location: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.labels = labels
self.bundles_config = bundles_config
self.use_private_endpoint = use_private_endpoint
self.management_config = management_config
self.project = project
self.location = location
self.service_account_file = service_account_file
def apply(self):
stub = instance_pb2_grpc.KrmapihostingAlphaInstanceServiceStub(
channel.Channel()
)
request = instance_pb2.ApplyKrmapihostingAlphaInstanceRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if InstanceBundlesConfig.to_proto(self.bundles_config):
request.resource.bundles_config.CopyFrom(
InstanceBundlesConfig.to_proto(self.bundles_config)
)
else:
request.resource.ClearField("bundles_config")
if Primitive.to_proto(self.use_private_endpoint):
request.resource.use_private_endpoint = Primitive.to_proto(
self.use_private_endpoint
)
if InstanceManagementConfig.to_proto(self.management_config):
request.resource.management_config.CopyFrom(
InstanceManagementConfig.to_proto(self.management_config)
)
else:
request.resource.ClearField("management_config")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
request.service_account_file = self.service_account_file
response = stub.ApplyKrmapihostingAlphaInstance(request)
self.name = Primitive.from_proto(response.name)
self.labels = Primitive.from_proto(response.labels)
self.bundles_config = InstanceBundlesConfig.from_proto(response.bundles_config)
self.use_private_endpoint = Primitive.from_proto(response.use_private_endpoint)
self.gke_resource_link = Primitive.from_proto(response.gke_resource_link)
self.state = InstanceStateEnum.from_proto(response.state)
self.management_config = InstanceManagementConfig.from_proto(
response.management_config
)
self.project = Primitive.from_proto(response.project)
self.location = Primitive.from_proto(response.location)
def delete(self):
stub = instance_pb2_grpc.KrmapihostingAlphaInstanceServiceStub(
channel.Channel()
)
request = instance_pb2.DeleteKrmapihostingAlphaInstanceRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
request.resource.labels = Primitive.to_proto(self.labels)
if InstanceBundlesConfig.to_proto(self.bundles_config):
request.resource.bundles_config.CopyFrom(
InstanceBundlesConfig.to_proto(self.bundles_config)
)
else:
request.resource.ClearField("bundles_config")
if Primitive.to_proto(self.use_private_endpoint):
request.resource.use_private_endpoint = Primitive.to_proto(
self.use_private_endpoint
)
if InstanceManagementConfig.to_proto(self.management_config):
request.resource.management_config.CopyFrom(
InstanceManagementConfig.to_proto(self.management_config)
)
else:
request.resource.ClearField("management_config")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
response = stub.DeleteKrmapihostingAlphaInstance(request)
@classmethod
def list(self, project, location, service_account_file=""):
stub = instance_pb2_grpc.KrmapihostingAlphaInstanceServiceStub(
channel.Channel()
)
request = instance_pb2.ListKrmapihostingAlphaInstanceRequest()
request.service_account_file = service_account_file
request.Project = project
request.Location = location
return stub.ListKrmapihostingAlphaInstance(request).items
def to_proto(self):
resource = instance_pb2.KrmapihostingAlphaInstance()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.labels):
resource.labels = Primitive.to_proto(self.labels)
if InstanceBundlesConfig.to_proto(self.bundles_config):
resource.bundles_config.CopyFrom(
InstanceBundlesConfig.to_proto(self.bundles_config)
)
else:
resource.ClearField("bundles_config")
if Primitive.to_proto(self.use_private_endpoint):
resource.use_private_endpoint = Primitive.to_proto(
self.use_private_endpoint
)
if InstanceManagementConfig.to_proto(self.management_config):
resource.management_config.CopyFrom(
InstanceManagementConfig.to_proto(self.management_config)
)
else:
resource.ClearField("management_config")
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
resource.location = Primitive.to_proto(self.location)
return resource
class InstanceBundlesConfig(object):
def __init__(self, config_controller_config: dict = None):
self.config_controller_config = config_controller_config
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = instance_pb2.KrmapihostingAlphaInstanceBundlesConfig()
if InstanceBundlesConfigConfigControllerConfig.to_proto(
resource.config_controller_config
):
res.config_controller_config.CopyFrom(
InstanceBundlesConfigConfigControllerConfig.to_proto(
resource.config_controller_config
)
)
else:
res.ClearField("config_controller_config")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceBundlesConfig(
config_controller_config=InstanceBundlesConfigConfigControllerConfig.from_proto(
resource.config_controller_config
),
)
class InstanceBundlesConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [InstanceBundlesConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [InstanceBundlesConfig.from_proto(i) for i in resources]
class InstanceBundlesConfigConfigControllerConfig(object):
def __init__(self, enabled: bool = None):
self.enabled = enabled
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_pb2.KrmapihostingAlphaInstanceBundlesConfigConfigControllerConfig()
)
if Primitive.to_proto(resource.enabled):
res.enabled = Primitive.to_proto(resource.enabled)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceBundlesConfigConfigControllerConfig(
enabled=Primitive.from_proto(resource.enabled),
)
class InstanceBundlesConfigConfigControllerConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceBundlesConfigConfigControllerConfig.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceBundlesConfigConfigControllerConfig.from_proto(i) for i in resources
]
class InstanceManagementConfig(object):
def __init__(self, standard_management_config: dict = None):
self.standard_management_config = standard_management_config
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = instance_pb2.KrmapihostingAlphaInstanceManagementConfig()
if InstanceManagementConfigStandardManagementConfig.to_proto(
resource.standard_management_config
):
res.standard_management_config.CopyFrom(
InstanceManagementConfigStandardManagementConfig.to_proto(
resource.standard_management_config
)
)
else:
res.ClearField("standard_management_config")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceManagementConfig(
standard_management_config=InstanceManagementConfigStandardManagementConfig.from_proto(
resource.standard_management_config
),
)
class InstanceManagementConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [InstanceManagementConfig.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [InstanceManagementConfig.from_proto(i) for i in resources]
class InstanceManagementConfigStandardManagementConfig(object):
def __init__(
self,
network: str = None,
master_ipv4_cidr_block: str = None,
man_block: str = None,
cluster_cidr_block: str = None,
services_cidr_block: str = None,
cluster_named_range: str = None,
services_named_range: str = None,
):
self.network = network
self.master_ipv4_cidr_block = master_ipv4_cidr_block
self.man_block = man_block
self.cluster_cidr_block = cluster_cidr_block
self.services_cidr_block = services_cidr_block
self.cluster_named_range = cluster_named_range
self.services_named_range = services_named_range
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_pb2.KrmapihostingAlphaInstanceManagementConfigStandardManagementConfig()
)
if Primitive.to_proto(resource.network):
res.network = Primitive.to_proto(resource.network)
if Primitive.to_proto(resource.master_ipv4_cidr_block):
res.master_ipv4_cidr_block = Primitive.to_proto(
resource.master_ipv4_cidr_block
)
if Primitive.to_proto(resource.man_block):
res.man_block = Primitive.to_proto(resource.man_block)
if Primitive.to_proto(resource.cluster_cidr_block):
res.cluster_cidr_block = Primitive.to_proto(resource.cluster_cidr_block)
if Primitive.to_proto(resource.services_cidr_block):
res.services_cidr_block = Primitive.to_proto(resource.services_cidr_block)
if Primitive.to_proto(resource.cluster_named_range):
res.cluster_named_range = Primitive.to_proto(resource.cluster_named_range)
if Primitive.to_proto(resource.services_named_range):
res.services_named_range = Primitive.to_proto(resource.services_named_range)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceManagementConfigStandardManagementConfig(
network=Primitive.from_proto(resource.network),
master_ipv4_cidr_block=Primitive.from_proto(
resource.master_ipv4_cidr_block
),
man_block=Primitive.from_proto(resource.man_block),
cluster_cidr_block=Primitive.from_proto(resource.cluster_cidr_block),
services_cidr_block=Primitive.from_proto(resource.services_cidr_block),
cluster_named_range=Primitive.from_proto(resource.cluster_named_range),
services_named_range=Primitive.from_proto(resource.services_named_range),
)
class InstanceManagementConfigStandardManagementConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceManagementConfigStandardManagementConfig.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceManagementConfigStandardManagementConfig.from_proto(i)
for i in resources
]
class InstanceStateEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return instance_pb2.KrmapihostingAlphaInstanceStateEnum.Value(
"KrmapihostingAlphaInstanceStateEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return instance_pb2.KrmapihostingAlphaInstanceStateEnum.Name(resource)[
len("KrmapihostingAlphaInstanceStateEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
|
#!/usr/bin/env python
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
import sys, optparse, subprocess, re, os, time, glob
import os.path as P
# The path to the ASP python files.
basepath = os.path.abspath(sys.path[0])
pythonpath = os.path.abspath(basepath + '/../Python') # for dev ASP
libexecpath = os.path.abspath(basepath + '/../libexec') # for packaged ASP
sys.path.insert(0, basepath) # prepend to Python path
sys.path.insert(0, pythonpath)
sys.path.insert(0, libexecpath)
from asp_system_utils import *
import asp_system_utils
asp_system_utils.verify_python_version_is_supported()
def get_asp_version():
'''Returns the current ASP version number'''
return "[ASP 2.5.2]"
class Step:
# The ids of individual stereo steps
pprc = 0
corr = 1
rfne = 2
fltr = 3
tri = 4
# Utilities to ensure that the Python parser does not garble negative
# integers such as '-365' into '-3'.
escapeStr='esc_rand_str'
def escape_vals(vals):
for index, val in enumerate(vals):
p = re.match("^-\d+$", val)
if p:
vals[index] = escapeStr + val
return vals
def unescape_vals(vals):
for index, val in enumerate(vals):
p = re.match("^" + escapeStr + "(-\d+)$", val)
if p:
vals[index] = p.group(1)
return vals
# Custom option parser that will ignore unknown options
class PassThroughOptionParser(optparse.OptionParser):
def _process_args( self, largs, rargs, values ):
rargs=escape_vals(rargs)
largs=escape_vals(largs)
while rargs:
try:
optparse.OptionParser._process_args(self,largs,rargs,values)
except (optparse.BadOptionError) as e:
if sys.version_info < (2, 6, 0):
# Port to Python 2.4
p = re.match("^.*?no such option:\s*(.*?)$", e.msg)
if p:
largs.append(p.group(1))
else:
largs.append(e.opt_str)
class BBox:
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
def name_str(self):
return "%i_%i_%i_%i" % ( self.x, self.y, self.width, self.height )
def crop_str(self):
return ["--trans-crop-win",str(self.x),
str(self.y),str(self.width),str(self.height)]
def intersect_boxes(A, B):
axmin = A.x; axmax = A.x + A.width; aymin = A.y; aymax = A.y + A.height
bxmin = B.x; bxmax = B.x + B.width; bymin = B.y; bymax = B.y + B.height
xmin = max(axmin, bxmin); xmax = min(axmax, bxmax)
ymin = max(aymin, bymin); ymax = min(aymax, bymax)
C = BBox(0, 0, 0, 0)
C.x = xmin; C.width = xmax - xmin
if (C.width < 0): C.width = 0
C.y = ymin; C.height = ymax - ymin
if (C.height < 0): C.height = 0
return C
# A very simple wrapper around subprocess
def generic_run(cmd, verbose):
cmd_str = " ".join(cmd)
if verbose:
print(cmd_str)
try:
code = subprocess.call(cmd)
except OSError as e:
raise Exception('%s: %s' % (cmd_str, e))
if code != 0:
raise Exception('Failed to run: ' + cmd_str)
# Run one of the stereo executables
def stereo_run(prog, args, opt, **kw):
binpath = bin_path(prog)
call = [binpath]
call.extend(args)
if opt.dryrun or opt.verbose: print(" ".join(call))
if opt.dryrun: return
try:
t_start = time.time()
code = subprocess.call(call)
if opt.verbose:
wall_s = time.time() - t_start
print('Wall time (s): {0:.1f}\n'.format(wall_s))
except OSError as e:
raise Exception('%s: %s' % (binpath, e))
if code != 0:
raise Exception('Stereo step ' + kw['msg'] + ' failed')
# When printing the version, don't throw an error. Just exit quetly.
def print_version_and_exit(opt, args):
args.append('-v')
try:
stereo_run('stereo_parse', args, opt, msg='')
except Exception as e:
pass
sys.exit(0)
def run_sparse_disp(args, opt):
settings = run_and_parse_output( "stereo_parse", args, ",", opt.verbose )
left_img = settings["trans_left_image"]
right_img = settings["trans_right_image"]
out_prefix = settings["out_prefix"]
sparse_args = left_img + right_img + out_prefix + ['--nodata-value', str(0)]
if opt.sparse_disp_options is not None:
sparse_args += opt.sparse_disp_options.split()
# Pass the number of threads to sparse_disp
# sparse_disp_options should trump
if not any('--processes' in s for s in sparse_args):
num_threads = 0
if hasattr(opt, 'threads') and opt.threads is not None and opt.threads > 0:
num_threads = opt.threads
if hasattr(opt, 'threads_single') and opt.threads_single is not None and opt.threads_single > 0:
num_threads = opt.threads_single
if num_threads > 0:
sparse_args += ['--processes', str(num_threads)]
# Set the env variables needed by sparse_disp and its dependencies.
# We set those here as the LD_LIBRARY_PATH set below is not the
# right one for ASP executables.
if os.environ.get('ASP_PYTHON_MODULES_PATH') is None:
die('\nERROR: Must set the environmental variable ASP_PYTHON_MODULES_PATH.', code=2)
if os.environ.get('LD_LIBRARY_PATH') is None:
os.environ['LD_LIBRARY_PATH'] = os.environ.get('ASP_PYTHON_MODULES_PATH')
else:
os.environ['LD_LIBRARY_PATH'] = os.environ.get('LD_LIBRARY_PATH') + \
":" + os.environ.get('ASP_PYTHON_MODULES_PATH')
if os.environ.get('PYTHONPATH') is None:
os.environ['PYTHONPATH'] = os.environ.get('ASP_PYTHON_MODULES_PATH')
else:
os.environ['PYTHONPATH'] = os.environ.get('ASP_PYTHON_MODULES_PATH') + ":" + \
os.environ.get('PYTHONPATH')
stereo_run('sparse_disp', sparse_args, opt,
msg='%d: Low-res correlation with sparse_disp' % Step.corr)
# Do low-res correlation.
def calc_lowres_disp(args, opt, sep):
if ( opt.seed_mode == 3 ):
run_sparse_disp(args, opt)
else:
tmp_args = args[:] # deep copy
tmp_args.extend(['--compute-low-res-disparity-only'])
# invoke here stereo_run to be able to see the output on screen
stereo_run('stereo_corr', tmp_args, opt, msg='')
# See if to attach a georef to D_sub and D_sub_spread.
tmp_args = args[:]
tmp_args.append('--attach-georeference-to-lowres-disparity')
run_and_parse_output("stereo_parse", tmp_args, sep, opt.verbose)
def parse_corr_seed_mode(filename):
mode = None
# Extract corr-seed-mode from filename.
if not os.path.isfile(filename):
return mode
fh = open(filename, "r")
for line in fh:
line = re.sub('\#.*?$', '', line) # wipe comments
matches = re.match('^\s*corr-seed-mode\s+(\d+)', line)
if matches:
mode = int(matches.group(1))
fh.close()
return mode
def run_multiview(prog_name, args, extra_args, entry_point, stop_point,
verbose, settings):
# Invoke multiview stereo processing, either using 'stereo', or
# using 'parallel_stereo', depending on the caller of this function.
# Doing multi-view stereo amounts to doing all the steps of stereo
# save for triangulation for stereo pairs made up of first image
# and another image in the sequence. Then, triangulation is done
# using all images. The precise stereo command to use for each
# pair was already generated by stereo_parse, and here we extract
# it from 'settings'.
# We must respect caller's entry and stop points.
# Must make sure to use the same Python invoked by parent
python_path = sys.executable
# Run all steps but tri
for s in sorted(settings.keys()):
m = re.match('multiview_command', s)
if not m: continue
local_args = settings[s][:]
local_args[0] = prog_name
local_entry = entry_point
local_stop = stop_point
if local_stop > Step.tri:
local_stop = Step.tri
local_args.extend(['--entry-point', str(local_entry)])
local_args.extend(['--stop-point', str(local_stop)])
local_args.extend(extra_args)
cmd = [python_path] + local_args
# Go on even if some of the runs fail
try:
generic_run(cmd, verbose)
except:
pass
# Run tri
local_args = [prog_name]
local_args.extend(args)
local_entry = Step.tri
local_stop = stop_point
local_args.extend(['--entry-point', str(local_entry)])
local_args.extend(['--stop-point', str(local_stop)])
local_args.extend(extra_args)
cmd = [python_path] + local_args
generic_run(cmd, verbose)
# Here we ensure that in the run directory
# the L.tif file is present, by sym-linking
# from the subdirectory of one of the pairs.
# It is nice to have L.tif for when we later
# do point2dem PC.tif --orthoimage L.tif
# The code below amounts to:
# ln -s out_prefix-pair1/1-L.tif out_prefix-L.tif
out_prefix = settings['out_prefix'][0]
sym_f = out_prefix + '-L.tif'
if not os.path.lexists(sym_f):
files = glob.glob(out_prefix + '-pair*/*-L.tif')
if len(files) > 0:
f = files[0]
run_dir = os.path.dirname(out_prefix)
rel_f = os.path.relpath(f, run_dir)
os.symlink(rel_f, sym_f)
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'BCCFBabyPage.short_title'
db.delete_column(u'bccf_bccfbabypage', 'short_title')
def backwards(self, orm):
# Adding field 'BCCFBabyPage.short_title'
db.add_column(u'bccf_bccfbabypage', 'short_title',
self.gf('django.db.models.fields.CharField')(default='Tab', max_length=20),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'bccf.article': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Article'},
'attached_document': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bccf.bccfbabypage': {
'Meta': {'ordering': "('order',)", 'object_name': 'BCCFBabyPage', '_ormbases': [u'bccf.BCCFChildPage']},
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'bccf.bccfchildpage': {
'Meta': {'ordering': "('-created',)", 'object_name': 'BCCFChildPage'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'bccf_topic': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['bccf.BCCFTopic']", 'null': 'True', 'blank': 'True'}),
u'comments_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'gparent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bccf.BCCFPage']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'page_for': ('django.db.models.fields.CharField', [], {'default': "'parent'", 'max_length': '13', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bccf.BCCFChildPage']", 'null': 'True', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'rating_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
u'rating_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'rating_sum': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'bccf.bccfgenericpage': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'BCCFGenericPage', '_ormbases': [u'bccf.BCCFChildPage']},
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'}),
'show_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_rating': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_resources': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'bccf.bccfpage': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'BCCFPage', '_ormbases': [u'pages.Page']},
'carousel_color': ('django.db.models.fields.CharField', [], {'default': "'dgreen-list'", 'max_length': '11'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'marquee': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bccf.PageMarquee']", 'null': 'True', 'blank': 'True'}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'})
},
u'bccf.bccftopic': {
'Meta': {'object_name': 'BCCFTopic'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'carousel_color': ('django.db.models.fields.CharField', [], {'default': "'dgreen-list'", 'max_length': '11'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'marquee': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bccf.PageMarquee']", 'null': 'True', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'bccf.blog': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Blog', '_ormbases': [u'bccf.BCCFChildPage']},
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bccf.campaign': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Campaign'},
'approve': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'approved_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'}),
'by_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'campaigns'", 'null': 'True', 'to': u"orm['auth.User']"})
},
u'bccf.downloadableform': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'DownloadableForm'},
'attached_document': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bccf.event': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Event', '_ormbases': [u'bccf.BCCFChildPage']},
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'}),
'date_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'event_product': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event-product'", 'null': 'True', 'to': u"orm['shop.Product']"}),
'full': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'location_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'location_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'location_street': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'location_street2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'max_seats': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'null': 'True', 'blank': 'True'}),
'price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'program': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'program'", 'null': 'True', 'to': u"orm['bccf.Program']"}),
'provider': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'events'", 'null': 'True', 'to': u"orm['auth.User']"}),
'survey_after': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'survey_after'", 'null': 'True', 'to': u"orm['builder.FormPublished']"}),
'survey_before': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'survey_before'", 'null': 'True', 'to': u"orm['builder.FormPublished']"})
},
u'bccf.eventregistration': {
'Meta': {'object_name': 'EventRegistration'},
'event': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'event'", 'to': u"orm['bccf.Event']"}),
'event_order': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'event-order'", 'null': 'True', 'blank': 'True', 'to': u"orm['shop.Order']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'paid': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'passed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'registration_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'reminder': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'bccf.footermarquee': {
'Meta': {'object_name': 'FooterMarquee'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'bccf.footermarqueeslide': {
'Meta': {'object_name': 'FooterMarqueeSlide'},
'caption': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'marquee': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['bccf.FooterMarquee']", 'symmetrical': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'null': 'True', 'blank': 'True'})
},
u'bccf.homemarquee': {
'Meta': {'object_name': 'HomeMarquee'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'bccf.homemarqueeslide': {
'Meta': {'object_name': 'HomeMarqueeSlide'},
'caption': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'linkLabel': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'null': 'True', 'blank': 'True'}),
'marquee': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['bccf.HomeMarquee']", 'symmetrical': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'bccf.magazine': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Magazine'},
'attached_document': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bccf.pagemarquee': {
'Meta': {'object_name': 'PageMarquee'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'bccf.pagemarqueeslide': {
'Meta': {'object_name': 'PageMarqueeSlide'},
'caption': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'linkLabel': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'null': 'True', 'blank': 'True'}),
'marquee': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['bccf.PageMarquee']", 'symmetrical': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'bccf.podcast': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Podcast', '_ormbases': [u'bccf.BCCFChildPage']},
'attached_audio': ('mezzanine.core.fields.FileField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bccf.professionalpayment': {
'Meta': {'ordering': "('-paid_on',)", 'object_name': 'ProfessionalPayment'},
'amount': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'paid_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'paid_to'", 'to': u"orm['auth.User']"})
},
u'bccf.program': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Program', '_ormbases': [u'bccf.BCCFChildPage']},
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'}),
'user_added': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'bccf.programrequest': {
'Meta': {'ordering': "('-created',)", 'object_name': 'ProgramRequest'},
'accept': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'accepted_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('mezzanine.core.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'program_requests'", 'to': u"orm['auth.User']"})
},
u'bccf.settings': {
'Meta': {'object_name': 'Settings'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'bccf.tipsheet': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'TipSheet'},
'attached_document': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'})
},
u'bccf.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'account_number': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'accreditation': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['bccf.Program']", 'null': 'True', 'blank': 'True'}),
'autosubscribe': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'avatar': ('bccf.fields.MyImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'male'", 'max_length': '6', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_mailing_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_forum_moderator': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'job_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '10', 'blank': 'True'}),
'linkedin': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'membership_level': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '1'}),
'membership_order': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'order'", 'null': 'True', 'to': u"orm['shop.Order']"}),
'membership_type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': u"orm['bccf.UserProfile']"}),
'payment': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'phone_mobile': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'phone_primary': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'phone_work': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'photo': ('bccf.fields.MyImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pinterest': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'post_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'province': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'rating_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
u'rating_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'rating_sum': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'requested_cancellation': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'show_in_list': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_signatures': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'signature': ('django.db.models.fields.TextField', [], {'max_length': '1024', 'blank': 'True'}),
'signature_html': ('django.db.models.fields.TextField', [], {'max_length': '1054', 'blank': 'True'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'street_2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'street_3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'time_zone': ('django.db.models.fields.FloatField', [], {'default': '3.0'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'youtube': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'bccf.video': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Video', '_ormbases': [u'bccf.BCCFChildPage']},
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'}),
'video_url': ('embed_video.fields.EmbedVideoField', [], {'default': "''", 'max_length': '1024', 'null': 'True', 'blank': 'True'})
},
u'builder.formpublished': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'FormPublished'},
u'bccfchildpage_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['bccf.BCCFChildPage']", 'unique': 'True', 'primary_key': 'True'}),
'closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'form_structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['builder.FormStructure']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'builder.formstructure': {
'Meta': {'object_name': 'FormStructure'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'default': "'Form Structure'", 'max_length': '100'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'JSON'", 'max_length': '4'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'pages.page': {
'Meta': {'ordering': "(u'titles',)", 'object_name': 'Page'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_menus': ('mezzanine.pages.fields.MenusField', [], {'default': '(1, 2, 3)', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'children'", 'null': 'True', 'to': u"orm['pages.Page']"}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
u'shop.category': {
'Meta': {'ordering': "(u'_order',)", 'object_name': 'Category', '_ormbases': [u'pages.Page']},
'carousel_color': ('django.db.models.fields.CharField', [], {'default': "'dgreen-list'", 'max_length': '11'}),
'combined': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'featured_image': ('mezzanine.core.fields.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'marquee': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['bccf.PageMarquee']", 'null': 'True', 'blank': 'True'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'product_options'", 'blank': 'True', 'to': u"orm['shop.ProductOption']"}),
u'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'}),
'price_max': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'price_min': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'sale': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['shop.Sale']", 'null': 'True', 'blank': 'True'})
},
u'shop.order': {
'Meta': {'ordering': "('-id',)", 'object_name': 'Order'},
'additional_instructions': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'billing_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'billing_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'billing_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'billing_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'discount_code': ('cartridge.shop.fields.DiscountCodeField', [], {'max_length': '20', 'blank': 'True'}),
'discount_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'payment_method': ('django.db.models.fields.CharField', [], {'default': "'paypal'", 'max_length': '6'}),
'shipping_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'shipping_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'shipping_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'shipping_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'tax_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'tax_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'shop.product': {
'Meta': {'object_name': 'Product'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['shop.Category']", 'symmetrical': 'False', 'blank': 'True'}),
u'comments_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'in_sitemap': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'rating_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
u'rating_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'rating_sum': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_products_rel_+'", 'blank': 'True', 'to': u"orm['shop.Product']"}),
'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'upsell_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'upsell_products_rel_+'", 'blank': 'True', 'to': u"orm['shop.Product']"})
},
u'shop.productoption': {
'Meta': {'object_name': 'ProductOption'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
u'shop.sale': {
'Meta': {'object_name': 'Sale'},
'active': ('django.db.models.fields.BooleanField', [], {}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'sale_related'", 'blank': 'True', 'to': u"orm['shop.Category']"}),
'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_percent': ('cartridge.shop.fields.PercentageField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['bccf']
|
|
"""
Provides functionality to interact with image processing services.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/image_processing/
"""
import asyncio
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_NAME, CONF_ENTITY_ID, CONF_NAME)
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.loader import bind_hass
from homeassistant.util.async_ import run_callback_threadsafe
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'image_processing'
DEPENDENCIES = ['camera']
SCAN_INTERVAL = timedelta(seconds=10)
DEVICE_CLASSES = [
'alpr', # Automatic license plate recognition
'face', # Face
'ocr', # OCR
]
SERVICE_SCAN = 'scan'
EVENT_DETECT_FACE = 'image_processing.detect_face'
ATTR_AGE = 'age'
ATTR_CONFIDENCE = 'confidence'
ATTR_FACES = 'faces'
ATTR_GENDER = 'gender'
ATTR_GLASSES = 'glasses'
ATTR_MOTION = 'motion'
ATTR_TOTAL_FACES = 'total_faces'
CONF_SOURCE = 'source'
CONF_CONFIDENCE = 'confidence'
DEFAULT_TIMEOUT = 10
DEFAULT_CONFIDENCE = 80
SOURCE_SCHEMA = vol.Schema({
vol.Required(CONF_ENTITY_ID): cv.entity_domain('camera'),
vol.Optional(CONF_NAME): cv.string,
})
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend({
vol.Optional(CONF_SOURCE): vol.All(cv.ensure_list, [SOURCE_SCHEMA]),
vol.Optional(CONF_CONFIDENCE, default=DEFAULT_CONFIDENCE):
vol.All(vol.Coerce(float), vol.Range(min=0, max=100)),
})
SERVICE_SCAN_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
@bind_hass
def scan(hass, entity_id=None):
"""Force process of all cameras or given entity."""
hass.add_job(async_scan, hass, entity_id)
@callback
@bind_hass
def async_scan(hass, entity_id=None):
"""Force process of all cameras or given entity."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.async_add_job(hass.services.async_call(DOMAIN, SERVICE_SCAN, data))
async def async_setup(hass, config):
"""Set up the image processing."""
component = EntityComponent(_LOGGER, DOMAIN, hass, SCAN_INTERVAL)
await component.async_setup(config)
async def async_scan_service(service):
"""Service handler for scan."""
image_entities = component.async_extract_from_service(service)
update_task = [entity.async_update_ha_state(True) for
entity in image_entities]
if update_task:
await asyncio.wait(update_task, loop=hass.loop)
hass.services.async_register(
DOMAIN, SERVICE_SCAN, async_scan_service,
schema=SERVICE_SCAN_SCHEMA)
return True
class ImageProcessingEntity(Entity):
"""Base entity class for image processing."""
timeout = DEFAULT_TIMEOUT
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return None
@property
def confidence(self):
"""Return minimum confidence for do some things."""
return None
def process_image(self, image):
"""Process image."""
raise NotImplementedError()
def async_process_image(self, image):
"""Process image.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(self.process_image, image)
async def async_update(self):
"""Update image and process it.
This method is a coroutine.
"""
camera = self.hass.components.camera
image = None
try:
image = await camera.async_get_image(
self.camera_entity, timeout=self.timeout)
except HomeAssistantError as err:
_LOGGER.error("Error on receive image from entity: %s", err)
return
# process image data
await self.async_process_image(image.content)
class ImageProcessingFaceEntity(ImageProcessingEntity):
"""Base entity class for face image processing."""
def __init__(self):
"""Initialize base face identify/verify entity."""
self.faces = []
self.total_faces = 0
@property
def state(self):
"""Return the state of the entity."""
confidence = 0
state = None
# No confidence support
if not self.confidence:
return self.total_faces
# Search high confidence
for face in self.faces:
if ATTR_CONFIDENCE not in face:
continue
f_co = face[ATTR_CONFIDENCE]
if f_co > confidence:
confidence = f_co
for attr in [ATTR_NAME, ATTR_MOTION]:
if attr in face:
state = face[attr]
break
return state
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return 'face'
@property
def state_attributes(self):
"""Return device specific state attributes."""
attr = {
ATTR_FACES: self.faces,
ATTR_TOTAL_FACES: self.total_faces,
}
return attr
def process_faces(self, faces, total):
"""Send event with detected faces and store data."""
run_callback_threadsafe(
self.hass.loop, self.async_process_faces, faces, total).result()
@callback
def async_process_faces(self, faces, total):
"""Send event with detected faces and store data.
known are a dict in follow format:
[
{
ATTR_CONFIDENCE: 80,
ATTR_NAME: 'Name',
ATTR_AGE: 12.0,
ATTR_GENDER: 'man',
ATTR_MOTION: 'smile',
ATTR_GLASSES: 'sunglasses'
},
]
This method must be run in the event loop.
"""
# Send events
for face in faces:
if ATTR_CONFIDENCE in face and self.confidence:
if face[ATTR_CONFIDENCE] < self.confidence:
continue
face.update({ATTR_ENTITY_ID: self.entity_id})
self.hass.async_add_job(
self.hass.bus.async_fire, EVENT_DETECT_FACE, face
)
# Update entity store
self.faces = faces
self.total_faces = total
|
|
"""
:mod:`pandas.io.xml` is a module for reading XML.
"""
from __future__ import annotations
import io
from pandas._typing import (
Buffer,
CompressionOptions,
FilePathOrBuffer,
StorageOptions,
)
from pandas.compat._optional import import_optional_dependency
from pandas.errors import (
AbstractMethodError,
ParserError,
)
from pandas.util._decorators import doc
from pandas.core.dtypes.common import is_list_like
from pandas.core.frame import DataFrame
from pandas.core.shared_docs import _shared_docs
from pandas.io.common import (
file_exists,
get_handle,
is_fsspec_url,
is_url,
stringify_path,
)
from pandas.io.parsers import TextParser
class _XMLFrameParser:
"""
Internal subclass to parse XML into DataFrames.
Parameters
----------
path_or_buffer : a valid JSON str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file.
xpath : str or regex
The XPath expression to parse required set of nodes for
migration to `Data Frame`. `etree` supports limited XPath.
namespacess : dict
The namespaces defined in XML document (`xmlns:namespace='URI')
as dicts with key being namespace and value the URI.
elems_only : bool
Parse only the child elements at the specified `xpath`.
attrs_only : bool
Parse only the attributes at the specified `xpath`.
names : list
Column names for Data Frame of parsed XML data.
encoding : str
Encoding of xml object or document.
stylesheet : str or file-like
URL, file, file-like object, or a raw string containing XSLT,
`etree` does not support XSLT but retained for consistency.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer'
Compression type for on-the-fly decompression of on-disk data.
If 'infer', then use extension for gzip, bz2, zip or xz.
storage_options : dict, optional
Extra options that make sense for a particular storage connection,
e.g. host, port, username, password, etc.,
See also
--------
pandas.io.xml._EtreeFrameParser
pandas.io.xml._LxmlFrameParser
Notes
-----
To subclass this class effectively you must override the following methods:`
* :func:`parse_data`
* :func:`_parse_nodes`
* :func:`_parse_doc`
* :func:`_validate_names`
* :func:`_validate_path`
See each method's respective documentation for details on their
functionality.
"""
def __init__(
self,
path_or_buffer,
xpath,
namespaces,
elems_only,
attrs_only,
names,
encoding,
stylesheet,
compression,
storage_options,
) -> None:
self.path_or_buffer = path_or_buffer
self.xpath = xpath
self.namespaces = namespaces
self.elems_only = elems_only
self.attrs_only = attrs_only
self.names = names
self.encoding = encoding
self.stylesheet = stylesheet
self.is_style = None
self.compression = compression
self.storage_options = storage_options
def parse_data(self) -> list[dict[str, str | None]]:
"""
Parse xml data.
This method will call the other internal methods to
validate xpath, names, parse and return specific nodes.
"""
raise AbstractMethodError(self)
def _parse_nodes(self) -> list[dict[str, str | None]]:
"""
Parse xml nodes.
This method will parse the children and attributes of elements
in xpath, conditionally for only elements, only attributes
or both while optionally renaming node names.
Raises
------
ValueError
* If only elements and only attributes are specified.
Notes
-----
Namespace URIs will be removed from return node values.Also,
elements with missing children or attributes compared to siblings
will have optional keys filled withi None values.
"""
raise AbstractMethodError(self)
def _validate_path(self) -> None:
"""
Validate xpath.
This method checks for syntax, evaluation, or empty nodes return.
Raises
------
SyntaxError
* If xpah is not supported or issues with namespaces.
ValueError
* If xpah does not return any nodes.
"""
raise AbstractMethodError(self)
def _validate_names(self) -> None:
"""
Validate names.
This method will check if names is a list-like and aligns
with length of parse nodes.
Raises
------
ValueError
* If value is not a list and less then length of nodes.
"""
raise AbstractMethodError(self)
def _parse_doc(self, raw_doc) -> bytes:
"""
Build tree from path_or_buffer.
This method will parse XML object into tree
either from string/bytes or file location.
"""
raise AbstractMethodError(self)
class _EtreeFrameParser(_XMLFrameParser):
"""
Internal class to parse XML into DataFrames with the Python
standard library XML module: `xml.etree.ElementTree`.
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
def parse_data(self) -> list[dict[str, str | None]]:
from xml.etree.ElementTree import XML
if self.stylesheet is not None:
raise ValueError(
"To use stylesheet, you need lxml installed and selected as parser."
)
self.xml_doc = XML(self._parse_doc(self.path_or_buffer))
self._validate_path()
self._validate_names()
return self._parse_nodes()
def _parse_nodes(self) -> list[dict[str, str | None]]:
elems = self.xml_doc.findall(self.xpath, namespaces=self.namespaces)
dicts: list[dict[str, str | None]]
if self.elems_only and self.attrs_only:
raise ValueError("Either element or attributes can be parsed not both.")
elif self.elems_only:
if self.names:
dicts = [
{
**(
{el.tag: el.text.strip()}
if el.text and not el.text.isspace()
else {}
),
**{
nm: ch.text.strip() if ch.text else None
for nm, ch in zip(self.names, el.findall("*"))
},
}
for el in elems
]
else:
dicts = [
{
ch.tag: ch.text.strip() if ch.text else None
for ch in el.findall("*")
}
for el in elems
]
elif self.attrs_only:
dicts = [
{k: v.strip() if v else None for k, v in el.attrib.items()}
for el in elems
]
else:
if self.names:
dicts = [
{
**el.attrib,
**(
{el.tag: el.text.strip()}
if el.text and not el.text.isspace()
else {}
),
**{
nm: ch.text.strip() if ch.text else None
for nm, ch in zip(self.names, el.findall("*"))
},
}
for el in elems
]
else:
dicts = [
{
**el.attrib,
**(
{el.tag: el.text.strip()}
if el.text and not el.text.isspace()
else {}
),
**{
ch.tag: ch.text.strip() if ch.text else None
for ch in el.findall("*")
},
}
for el in elems
]
dicts = [
{k.split("}")[1] if "}" in k else k: v for k, v in d.items()} for d in dicts
]
keys = list(dict.fromkeys([k for d in dicts for k in d.keys()]))
dicts = [{k: d[k] if k in d.keys() else None for k in keys} for d in dicts]
if self.names:
dicts = [
{nm: v for nm, (k, v) in zip(self.names, d.items())} for d in dicts
]
return dicts
def _validate_path(self) -> None:
"""
Notes
-----
`etree` supports limited XPath. If user attempts a more complex
expression syntax error will raise.
"""
msg = (
"xpath does not return any nodes. "
"If document uses namespaces denoted with "
"xmlns, be sure to define namespaces and "
"use them in xpath."
)
try:
elems = self.xml_doc.find(self.xpath, namespaces=self.namespaces)
if elems is None:
raise ValueError(msg)
if elems is not None and elems.find("*") is None and elems.attrib is None:
raise ValueError(msg)
except (KeyError, SyntaxError):
raise SyntaxError(
"You have used an incorrect or unsupported XPath "
"expression for etree library or you used an "
"undeclared namespace prefix."
)
def _validate_names(self) -> None:
if self.names:
parent = self.xml_doc.find(self.xpath, namespaces=self.namespaces)
children = parent.findall("*") if parent else []
if is_list_like(self.names):
if len(self.names) < len(children):
raise ValueError(
"names does not match length of child elements in xpath."
)
else:
raise TypeError(
f"{type(self.names).__name__} is not a valid type for names"
)
def _parse_doc(self, raw_doc) -> bytes:
from xml.etree.ElementTree import (
XMLParser,
parse,
tostring,
)
handle_data = get_data_from_filepath(
filepath_or_buffer=raw_doc,
encoding=self.encoding,
compression=self.compression,
storage_options=self.storage_options,
)
with preprocess_data(handle_data) as xml_data:
curr_parser = XMLParser(encoding=self.encoding)
r = parse(xml_data, parser=curr_parser)
return tostring(r.getroot())
class _LxmlFrameParser(_XMLFrameParser):
"""
Internal class to parse XML into DataFrames with third-party
full-featured XML library, `lxml`, that supports
XPath 1.0 and XSLT 1.0.
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
def parse_data(self) -> list[dict[str, str | None]]:
"""
Parse xml data.
This method will call the other internal methods to
validate xpath, names, optionally parse and run XSLT,
and parse original or transformed XML and return specific nodes.
"""
from lxml.etree import XML
self.xml_doc = XML(self._parse_doc(self.path_or_buffer))
if self.stylesheet is not None:
self.xsl_doc = XML(self._parse_doc(self.stylesheet))
self.xml_doc = XML(self._transform_doc())
self._validate_path()
self._validate_names()
return self._parse_nodes()
def _parse_nodes(self) -> list[dict[str, str | None]]:
elems = self.xml_doc.xpath(self.xpath, namespaces=self.namespaces)
dicts: list[dict[str, str | None]]
if self.elems_only and self.attrs_only:
raise ValueError("Either element or attributes can be parsed not both.")
elif self.elems_only:
if self.names:
dicts = [
{
**(
{el.tag: el.text.strip()}
if el.text and not el.text.isspace()
else {}
),
**{
nm: ch.text.strip() if ch.text else None
for nm, ch in zip(self.names, el.xpath("*"))
},
}
for el in elems
]
else:
dicts = [
{
ch.tag: ch.text.strip() if ch.text else None
for ch in el.xpath("*")
}
for el in elems
]
elif self.attrs_only:
dicts = [el.attrib for el in elems]
else:
if self.names:
dicts = [
{
**el.attrib,
**(
{el.tag: el.text.strip()}
if el.text and not el.text.isspace()
else {}
),
**{
nm: ch.text.strip() if ch.text else None
for nm, ch in zip(self.names, el.xpath("*"))
},
}
for el in elems
]
else:
dicts = [
{
**el.attrib,
**(
{el.tag: el.text.strip()}
if el.text and not el.text.isspace()
else {}
),
**{
ch.tag: ch.text.strip() if ch.text else None
for ch in el.xpath("*")
},
}
for el in elems
]
if self.namespaces or "}" in list(dicts[0].keys())[0]:
dicts = [
{k.split("}")[1] if "}" in k else k: v for k, v in d.items()}
for d in dicts
]
keys = list(dict.fromkeys([k for d in dicts for k in d.keys()]))
dicts = [{k: d[k] if k in d.keys() else None for k in keys} for d in dicts]
if self.names:
dicts = [
{nm: v for nm, (k, v) in zip(self.names, d.items())} for d in dicts
]
return dicts
def _validate_path(self) -> None:
msg = (
"xpath does not return any nodes. "
"Be sure row level nodes are in xpath. "
"If document uses namespaces denoted with "
"xmlns, be sure to define namespaces and "
"use them in xpath."
)
elems = self.xml_doc.xpath(self.xpath, namespaces=self.namespaces)
children = self.xml_doc.xpath(self.xpath + "/*", namespaces=self.namespaces)
attrs = self.xml_doc.xpath(self.xpath + "/@*", namespaces=self.namespaces)
if elems == []:
raise ValueError(msg)
if elems != [] and attrs == [] and children == []:
raise ValueError(msg)
def _validate_names(self) -> None:
"""
Validate names.
This method will check if names is a list and aligns with
length of parse nodes.
Raises
------
ValueError
* If value is not a list and less then length of nodes.
"""
if self.names:
children = self.xml_doc.xpath(
self.xpath + "[1]/*", namespaces=self.namespaces
)
if is_list_like(self.names):
if len(self.names) < len(children):
raise ValueError(
"names does not match length of child elements in xpath."
)
else:
raise TypeError(
f"{type(self.names).__name__} is not a valid type for names"
)
def _parse_doc(self, raw_doc) -> bytes:
from lxml.etree import (
XMLParser,
fromstring,
parse,
tostring,
)
handle_data = get_data_from_filepath(
filepath_or_buffer=raw_doc,
encoding=self.encoding,
compression=self.compression,
storage_options=self.storage_options,
)
with preprocess_data(handle_data) as xml_data:
curr_parser = XMLParser(encoding=self.encoding)
if isinstance(xml_data, io.StringIO):
doc = fromstring(
xml_data.getvalue().encode(self.encoding), parser=curr_parser
)
else:
doc = parse(xml_data, parser=curr_parser)
return tostring(doc)
def _transform_doc(self) -> bytes:
"""
Transform original tree using stylesheet.
This method will transform original xml using XSLT script into
am ideally flatter xml document for easier parsing and migration
to Data Frame.
"""
from lxml.etree import XSLT
transformer = XSLT(self.xsl_doc)
new_doc = transformer(self.xml_doc)
return bytes(new_doc)
def get_data_from_filepath(
filepath_or_buffer,
encoding,
compression,
storage_options,
) -> str | bytes | Buffer:
"""
Extract raw XML data.
The method accepts three input types:
1. filepath (string-like)
2. file-like object (e.g. open file object, StringIO)
3. XML string or bytes
This method turns (1) into (2) to simplify the rest of the processing.
It returns input types (2) and (3) unchanged.
"""
filepath_or_buffer = stringify_path(filepath_or_buffer)
if (
isinstance(filepath_or_buffer, str)
and not filepath_or_buffer.startswith(("<?xml", "<"))
) and (
not isinstance(filepath_or_buffer, str)
or is_url(filepath_or_buffer)
or is_fsspec_url(filepath_or_buffer)
or file_exists(filepath_or_buffer)
):
with get_handle(
filepath_or_buffer,
"r",
encoding=encoding,
compression=compression,
storage_options=storage_options,
) as handle_obj:
filepath_or_buffer = (
handle_obj.handle.read()
if hasattr(handle_obj.handle, "read")
else handle_obj.handle
)
return filepath_or_buffer
def preprocess_data(data) -> io.StringIO | io.BytesIO:
"""
Convert extracted raw data.
This method will return underlying data of extracted XML content.
The data either has a `read` attribute (e.g. a file object or a
StringIO/BytesIO) or is a string or bytes that is an XML document.
"""
if isinstance(data, str):
data = io.StringIO(data)
elif isinstance(data, bytes):
data = io.BytesIO(data)
return data
def _data_to_frame(data, **kwargs) -> DataFrame:
"""
Convert parsed data to Data Frame.
This method will bind xml dictionary data of keys and values
into named columns of Data Frame using the built-in TextParser
class that build Data Frame and infers specific dtypes.
"""
tags = next(iter(data))
nodes = [list(d.values()) for d in data]
try:
with TextParser(nodes, names=tags, **kwargs) as tp:
return tp.read()
except ParserError:
raise ParserError(
"XML document may be too complex for import. "
"Try to flatten document and use distinct "
"element and attribute names."
)
def _parse(
path_or_buffer,
xpath,
namespaces,
elems_only,
attrs_only,
names,
encoding,
parser,
stylesheet,
compression,
storage_options,
**kwargs,
) -> DataFrame:
"""
Call internal parsers.
This method will conditionally call internal parsers:
LxmlFrameParser and/or EtreeParser.
Raises
------
ImportError
* If lxml is not installed if selected as parser.
ValueError
* If parser is not lxml or etree.
"""
lxml = import_optional_dependency("lxml.etree", errors="ignore")
p: _EtreeFrameParser | _LxmlFrameParser
if parser == "lxml":
if lxml is not None:
p = _LxmlFrameParser(
path_or_buffer,
xpath,
namespaces,
elems_only,
attrs_only,
names,
encoding,
stylesheet,
compression,
storage_options,
)
else:
raise ImportError("lxml not found, please install or use the etree parser.")
elif parser == "etree":
p = _EtreeFrameParser(
path_or_buffer,
xpath,
namespaces,
elems_only,
attrs_only,
names,
encoding,
stylesheet,
compression,
storage_options,
)
else:
raise ValueError("Values for parser can only be lxml or etree.")
data_dicts = p.parse_data()
return _data_to_frame(data=data_dicts, **kwargs)
@doc(storage_options=_shared_docs["storage_options"])
def read_xml(
path_or_buffer: FilePathOrBuffer,
xpath: str | None = "./*",
namespaces: dict | list[dict] | None = None,
elems_only: bool | None = False,
attrs_only: bool | None = False,
names: list[str] | None = None,
encoding: str | None = "utf-8",
parser: str | None = "lxml",
stylesheet: FilePathOrBuffer | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
) -> DataFrame:
r"""
Read XML document into a ``DataFrame`` object.
.. versionadded:: 1.3.0
Parameters
----------
path_or_buffer : str, path object, or file-like object
Any valid XML string or path is acceptable. The string could be a URL.
Valid URL schemes include http, ftp, s3, and file.
xpath : str, optional, default './\*'
The XPath to parse required set of nodes for migration to DataFrame.
XPath should return a collection of elements and not a single
element. Note: The ``etree`` parser supports limited XPath
expressions. For more complex XPath, use ``lxml`` which requires
installation.
namespaces : dict, optional
The namespaces defined in XML document as dicts with key being
namespace prefix and value the URI. There is no need to include all
namespaces in XML, only the ones used in ``xpath`` expression.
Note: if XML document uses default namespace denoted as
`xmlns='<URI>'` without a prefix, you must assign any temporary
namespace prefix such as 'doc' to the URI in order to parse
underlying nodes and/or attributes. For example, ::
namespaces = {{"doc": "https://example.com"}}
elems_only : bool, optional, default False
Parse only the child elements at the specified ``xpath``. By default,
all child elements and non-empty text nodes are returned.
attrs_only : bool, optional, default False
Parse only the attributes at the specified ``xpath``.
By default, all attributes are returned.
names : list-like, optional
Column names for DataFrame of parsed XML data. Use this parameter to
rename original element names and distinguish same named elements.
encoding : str, optional, default 'utf-8'
Encoding of XML document.
parser : {{'lxml','etree'}}, default 'lxml'
Parser module to use for retrieval of data. Only 'lxml' and
'etree' are supported. With 'lxml' more complex XPath searches
and ability to use XSLT stylesheet are supported.
stylesheet : str, path object or file-like object
A URL, file-like object, or a raw string containing an XSLT script.
This stylesheet should flatten complex, deeply nested XML documents
for easier parsing. To use this feature you must have ``lxml`` module
installed and specify 'lxml' as ``parser``. The ``xpath`` must
reference nodes of transformed XML document generated after XSLT
transformation and not the original XML document. Only XSLT 1.0
scripts and not later versions is currently supported.
compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer', then use
gzip, bz2, zip or xz if path_or_buffer is a string ending in
'.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression
otherwise. If using 'zip', the ZIP file must contain only one data
file to be read in. Set to None for no decompression.
{storage_options}
Returns
-------
df
A DataFrame.
See Also
--------
read_json : Convert a JSON string to pandas object.
read_html : Read HTML tables into a list of DataFrame objects.
Notes
-----
This method is best designed to import shallow XML documents in
following format which is the ideal fit for the two-dimensions of a
``DataFrame`` (row by column). ::
<root>
<row>
<column1>data</column1>
<column2>data</column2>
<column3>data</column3>
...
</row>
<row>
...
</row>
...
</root>
As a file format, XML documents can be designed any way including
layout of elements and attributes as long as it conforms to W3C
specifications. Therefore, this method is a convenience handler for
a specific flatter design and not all possible XML structures.
However, for more complex XML documents, ``stylesheet`` allows you to
temporarily redesign original document with XSLT (a special purpose
language) for a flatter version for migration to a DataFrame.
This function will *always* return a single :class:`DataFrame` or raise
exceptions due to issues with XML document, ``xpath``, or other
parameters.
Examples
--------
>>> xml = '''<?xml version='1.0' encoding='utf-8'?>
... <data xmlns="http://example.com">
... <row>
... <shape>square</shape>
... <degrees>360</degrees>
... <sides>4.0</sides>
... </row>
... <row>
... <shape>circle</shape>
... <degrees>360</degrees>
... <sides/>
... </row>
... <row>
... <shape>triangle</shape>
... <degrees>180</degrees>
... <sides>3.0</sides>
... </row>
... </data>'''
>>> df = pd.read_xml(xml)
>>> df
shape degrees sides
0 square 360 4.0
1 circle 360 NaN
2 triangle 180 3.0
>>> xml = '''<?xml version='1.0' encoding='utf-8'?>
... <data>
... <row shape="square" degrees="360" sides="4.0"/>
... <row shape="circle" degrees="360"/>
... <row shape="triangle" degrees="180" sides="3.0"/>
... </data>'''
>>> df = pd.read_xml(xml, xpath=".//row")
>>> df
shape degrees sides
0 square 360 4.0
1 circle 360 NaN
2 triangle 180 3.0
>>> xml = '''<?xml version='1.0' encoding='utf-8'?>
... <doc:data xmlns:doc="https://example.com">
... <doc:row>
... <doc:shape>square</doc:shape>
... <doc:degrees>360</doc:degrees>
... <doc:sides>4.0</doc:sides>
... </doc:row>
... <doc:row>
... <doc:shape>circle</doc:shape>
... <doc:degrees>360</doc:degrees>
... <doc:sides/>
... </doc:row>
... <doc:row>
... <doc:shape>triangle</doc:shape>
... <doc:degrees>180</doc:degrees>
... <doc:sides>3.0</doc:sides>
... </doc:row>
... </doc:data>'''
>>> df = pd.read_xml(xml,
... xpath="//doc:row",
... namespaces={{"doc": "https://example.com"}})
>>> df
shape degrees sides
0 square 360 4.0
1 circle 360 NaN
2 triangle 180 3.0
"""
return _parse(
path_or_buffer=path_or_buffer,
xpath=xpath,
namespaces=namespaces,
elems_only=elems_only,
attrs_only=attrs_only,
names=names,
encoding=encoding,
parser=parser,
stylesheet=stylesheet,
compression=compression,
storage_options=storage_options,
)
|
|
from AppKit import NSMatrix, NSRadioButton, NSButtonCell, NSRadioModeMatrix, NSFont, NSCellDisabled
from vanilla.vanillaBase import VanillaBaseControl, _sizeStyleMap
from vanilla.vanillaButton import Button
from vanilla.vanillaStackGroup import VerticalStackGroup, HorizontalStackGroup
try:
from AppKit import NSRadioButtonType
except ImportError:
from AppKit import NSRadioButton
NSRadioButtonType = NSRadioButton
class _RadioGroupMixin(object):
_heights = dict(
regular=18,
small=15,
mini=12
)
def _init(self, cls, posSize, titles, callback=None, sizeStyle="regular"):
spacing = self._spacing[sizeStyle]
self._buttonHeight = self._heights[sizeStyle]
self._callback = callback
self._sizeStyle = sizeStyle
super(cls, self).__init__(posSize, spacing=spacing, alignment="leading")
self._buildButtons(titles, sizeStyle)
def _buildButtons(self, titles, sizeStyle):
self._buttons = []
for title in titles:
button = RadioButton("auto", title, callback=self._buttonCallback, sizeStyle=sizeStyle)
self._buttons.append(button)
self.addView(button, height=self._buttonHeight)
def _buttonCallback(self, sender):
for button in self._buttons:
if button != sender:
button.set(False)
if self._callback is not None:
self._callback(self)
def getFittingHeight(self):
"""
Get the fitting height for all buttons in the group.
"""
count = len(self._buttons)
size = self._heights[self._sizeStyle]
spacing = self._spacing[self._sizeStyle]
height = size * count
height += spacing * (count - 1)
return height
def get(self):
"""
Get the index of the selected radio button.
"""
for index, button in enumerate(self._buttons):
if button.get():
return index
def set(self, index):
"""
Set the index of the selected radio button.
"""
for other, button in enumerate(self._buttons):
button.set(other == index)
class VerticalRadioGroup(VerticalStackGroup, _RadioGroupMixin):
"""
A vertical collection of radio buttons.
.. image:: /_images/VerticalRadioGroup.png
::
from vanilla import Window, VerticalRadioGroup
class VerticalRadioGroupDemo:
def __init__(self):
self.w = Window((100, 100))
self.w.radioGroup = VerticalRadioGroup(
"auto",
["Option 1", "Option 2"],
callback=self.radioGroupCallback
)
self.w.radioGroup.set(0)
rules = [
"H:|-[radioGroup]-|",
"V:|-[radioGroup(==%d)]-|" % self.w.radioGroup.getFittingHeight()
]
self.w.addAutoPosSizeRules(rules)
self.w.open()
def radioGroupCallback(self, sender):
print("radio group edit!", sender.get())
VerticalRadioGroupDemo()
**posSize** Tuple of form *(left, top, width, height)* or *"auto"* representing
the position and size of the radio group.
**titles** A list of titles to be shown next to the radio buttons.
**callback** The method to be caled when a radio button is selected.
**sizeStyle** A string representing the desired size style of the radio group.
The options are:
+-----------+
| "regular" |
+-----------+
| "small" |
+-----------+
| "mini" |
+-----------+
"""
_spacing = dict(
regular=2,
small=2,
mini=2
)
def __init__(self, posSize, titles, callback=None, sizeStyle="regular"):
self._init(VerticalRadioGroup, posSize, titles, callback=callback, sizeStyle=sizeStyle)
class HorizontalRadioGroup(HorizontalStackGroup, _RadioGroupMixin):
### TODO: Example is not horizontal but vertical!
"""
A horizontal collection of radio buttons.
::
from vanilla import Window, HorizontalRadioGroup
class RadioGroupDemo:
def __init__(self):
self.w = Window((300, 100))
self.w.radioGroup = HorizontalRadioGroup(
"auto",
["Option 1", "Option 2"],
callback=self.radioGroupCallback
)
self.w.radioGroup.set(0)
rules = [
"H:|-[radioGroup]-|",
"V:|-[radioGroup(==%d)]-|" % self.w.radioGroup.getFittingHeight()
]
self.w.addAutoPosSizeRules(rules)
self.w.open()
def radioGroupCallback(self, sender):
print("radio group edit!", sender.get())
RadioGroupDemo()
**posSize** Tuple of form *(left, top, width, height)* or *"auto"* representing
the position and size of the radio group.
**titles** A list of titles to be shown next to the radio buttons.
**callback** The method to be caled when a radio button is selected.
**sizeStyle** A string representing the desired size style of the radio group.
The options are:
+-----------+
| "regular" |
+-----------+
| "small" |
+-----------+
| "mini" |
+-----------+
"""
_spacing = dict(
regular=4,
small=3,
mini=3
)
def __init__(self, posSize, titles, callback=None, sizeStyle="regular"):
self._init(HorizontalRadioGroup, posSize, titles, callback=callback, sizeStyle=sizeStyle)
class RadioButton(Button):
"""
A single radio button.
"""
nsButtonType = NSRadioButtonType
def __init__(self, posSize, title, value=False, callback=None, sizeStyle="regular"):
super(RadioButton, self).__init__(posSize, title, callback=callback, sizeStyle=sizeStyle)
self.set(value)
def set(self, value):
"""
Set the state of the radio button.
**value** A boolean representing the state of the radio button.
"""
self._nsObject.setState_(value)
def get(self):
"""
Get the state of the radio button.
"""
return self._nsObject.state()
# ------
# Legacy
# ------
class RadioGroup(VanillaBaseControl):
"""
A collection of radio buttons.
.. image:: /_images/RadioGroup.png
.. note:: This should be used only for frame layout.
::
from vanilla import Window, RadioGroup
class RadioGroupDemo:
def __init__(self):
self.w = Window((100, 60))
self.w.radioGroup = RadioGroup((10, 10, -10, 40),
["Option 1", "Option 2"],
callback=self.radioGroupCallback)
self.w.radioGroup.set(0)
self.w.open()
def radioGroupCallback(self, sender):
print("radio group edit!", sender.get())
RadioGroupDemo()
**posSize** Tuple of form *(left, top, width, height)* or *"auto"* representing
the position and size of the radio group.
**titles** A list of titles to be shown next to the radio buttons.
**isVertical** Boolean representing if the radio group is
vertical or horizontal.
**callback** The method to be caled when a radio button is selected.
**sizeStyle** A string representing the desired size style of the radio group.
The options are:
+-----------+
| "regular" |
+-----------+
| "small" |
+-----------+
| "mini" |
+-----------+
"""
nsMatrixClass = NSMatrix
nsCellClass = NSButtonCell
def __init__(self, posSize, titles, isVertical=True, callback=None, sizeStyle="regular"):
self._setupView(self.nsMatrixClass, posSize, callback=callback)
self._isVertical = isVertical
matrix = self._nsObject
matrix.setMode_(NSRadioModeMatrix)
matrix.setCellClass_(self.nsCellClass)
# XXX! this does not work for vertical radio groups!
matrix.setAutosizesCells_(True)
# we handle the control size setting here
# since the actual NS object is a NSMatrix
cellSizeStyle = _sizeStyleMap[sizeStyle]
font = NSFont.systemFontOfSize_(NSFont.systemFontSizeForControlSize_(cellSizeStyle))
# intercell spacing and cell spacing are based on the sizeStyle
if posSize == "auto":
w = 0
else:
w = posSize[2]
if sizeStyle == "regular":
matrix.setIntercellSpacing_((4.0, 2.0))
matrix.setCellSize_((w, 18))
elif sizeStyle == "small":
matrix.setIntercellSpacing_((3.5, 2.0))
matrix.setCellSize_((w, 15))
elif sizeStyle == "mini":
matrix.setIntercellSpacing_((3.0, 2.0))
matrix.setCellSize_((w, 12))
else:
raise ValueError("sizeStyle must be 'regular', 'small' or 'mini'")
for _ in range(len(titles)):
if isVertical:
matrix.addRow()
else:
matrix.addColumn()
for title, cell in zip(titles, matrix.cells()):
cell.setButtonType_(NSRadioButton)
cell.setTitle_(title)
cell.setControlSize_(cellSizeStyle)
cell.setFont_(font)
def _testForDeprecatedAttributes(self):
super(RadioGroup, self)._testForDeprecatedAttributes()
from warnings import warn
if hasattr(self, "_cellClass"):
warn(DeprecationWarning("The _cellClass attribute is deprecated. Use the nsCellClass attribute."))
self.nsCellClass = self._cellClass
def getNSMatrix(self):
"""
Return the `NSMatrix`_ that this object wraps.
.. _NSMatrix: https://developer.apple.com/documentation/appkit/nsmatrix?language=objc
"""
return self._nsObject
def get(self):
"""
Get the index of the selected radio button.
"""
if self._isVertical:
return self._nsObject.selectedRow()
else:
return self._nsObject.selectedColumn()
def set(self, index):
"""
Set the index of the selected radio button.
"""
if self._isVertical:
row = index
column = 0
else:
row = 0
column = index
self._nsObject.selectCellAtRow_column_(row, column)
def enableRadioButton(self, index, onOff=True):
"""
Enable or disable a RadioGroup button specified by its index.
"""
self._nsObject.cells()[index].setCellAttribute_to_(NSCellDisabled, not onOff)
|
|
#!/usr/bin/env python
from __future__ import print_function
import matplotlib
matplotlib.use('WXAgg')
import matplotlib.pyplot as plt
import pymongo
import sys
sys.path.append("/home/ggdhines/github/aggregation/engine")
from agglomerative import Agglomerative
import csv
import os
import urllib
import matplotlib.cbook as cbook
name_changes = {}
with open("/home/ggdhines/Downloads/Nomenclature_changes.csv","rb") as f:
f.readline()
reader = csv.reader(f,delimiter=",")
for zoo_id,pre_zoo_id in reader:
# print(pre_zoo_id+"|")
# print(pre_zoo_id == "")
if pre_zoo_id != "":
name_changes[zoo_id] = pre_zoo_id[:-1]
# assert False
roi_dict = {}
# method for checking if a given marking is within the ROI
def __in_roi__(site,marking):
"""
does the actual checking
:param object_id:
:param marking:
:return:
"""
if site not in roi_dict:
return True
roi = roi_dict[site]
x,y = marking
X = []
Y = []
for segment_index in range(len(roi)-1):
rX1,rY1 = roi[segment_index]
X.append(rX1)
Y.append(-rY1)
# find the line segment that "surrounds" x and see if y is above that line segment (remember that
# images are flipped)
for segment_index in range(len(roi)-1):
if (roi[segment_index][0] <= x) and (roi[segment_index+1][0] >= x):
rX1,rY1 = roi[segment_index]
rX2,rY2 = roi[segment_index+1]
# todo - check why such cases are happening
if rX1 == rX2:
continue
m = (rY2-rY1)/float(rX2-rX1)
rY = m*(x-rX1)+rY1
if y >= rY:
# we have found a valid marking
# create a special type of animal None that is used when the animal type is missing
# thus, the marking will count towards not being noise but will not be used when determining the type
return True
else:
return False
# probably shouldn't happen too often but if it does, assume that we are outside of the ROI
return False
# load the roi file - for checking if a given marking is within the ROI
with open("/home/ggdhines/github/Penguins/public/roi.tsv","rb") as roiFile:
roiFile.readline()
reader = csv.reader(roiFile,delimiter="\t")
for l in reader:
path = l[0]
t = [r.split(",") for r in l[1:] if r != ""]
roi_dict[path] = [(int(x)/1.92,int(y)/1.92) for (x,y) in t]
client = pymongo.MongoClient()
db = client['penguin']
classification_collection = db["penguin_classifications"]
subject_collection = db["penguin_subjects"]
# for c in classification_collection.find():
# _id = c["_id"]
# zooniverse_id = c["subjects"][0]["zooniverse_id"]
#
# classification_collection.update_one({"_id":_id},{"$set":{"zooniverse_id":zooniverse_id}})
clustering_engine = Agglomerative(None,None,{})
# result = db.profiles.create_index([('zooniverse_id', pymongo.ASCENDING)],unique=False)
# print result
for subject in subject_collection.find():
# _id = c["_id"]
zooniverse_id = subject["subjects"][0]["zooniverse_id"]
print(zooniverse_id)
markings = []
user_ids = []
tools = []
num_users = 0
path = subject["metadata"]["path"]
_,image_id = path.split("/")
site_id = image_id.split("_")[0]
# print(site_id)
big_path,little_path = path.split("/")
little_path = little_path[:-4]
d = "/tmp/penguin/"+big_path
if not os.path.exists(d):
os.makedirs(d)
# print(site_id in name_changes,site_id in roi_dict)
# print(name_changes)
# continue
for c2 in classification_collection.find({"zooniverse_id":zooniverse_id}):
num_users += 1
if "finished_at" in c2["annotations"][1]:
continue
if "user_name" in c2:
id_ = c2["user_name"]
else:
id_ = c2["user_ip"]
try:
for penguin in c2["annotations"][1]["value"].values():
if penguin["value"] == "other":
continue
try:
x = float(penguin["x"])
y = float(penguin["y"])
except TypeError:
print(penguin)
raise
except ValueError:
print("skipping bad markings")
continue
if site_id in roi_dict:
if not __in_roi__(site_id,(x,y)):
print("skipping due to being outside roi")
continue
markings.append((x,y))
user_ids.append(id_)
tools.append(penguin["value"])
except AttributeError:
continue
except KeyError:
continue
with open(d+"/"+little_path+".csv","w") as f:
if markings != []:
# call the panoptes based clustering algorithm
clustering_results = clustering_engine.__cluster__(markings,user_ids,tools,markings,None,None)
# print(len(markings))
# pts = [c["center"] for c in clustering_results[0]]
# # for c in clustering_results[0]:
# # print(c["center"])
# # print(c["cluster members"])
# # print("")
# x,y = zip(*pts)
#
# subject = subject_collection.find_one({"zooniverse_id":zooniverse_id})
# url = subject["location"]["standard"]
# fName = url.split("/")[-1]
# if not(os.path.isfile("/home/ggdhines/Databases/penguins/images/"+fName)):
# #urllib.urlretrieve ("http://demo.zooniverse.org/penguins/subjects/standard/"+fName, "/home/greg/Databases/penguins/images/"+fName)
# urllib.urlretrieve ("http://www.penguinwatch.org/subjects/standard/"+fName, "/home/ggdhines/Databases/penguins/images/"+fName)
# image_file = cbook.get_sample_data("/home/ggdhines/Databases/penguins/images/"+fName)
# image = plt.imread(image_file)
#
# fig, ax = plt.subplots()
# im = ax.imshow(image)
#
# plt.plot(x,y,".",color="red")
# plt.show()
f.write("penguin_index,x_center,y_center,probability_of_adult,probability_of_chick,probability_of_egg,probability_of_true_positive,num_markings\n")
for penguin_index,cluster in enumerate(clustering_results[0]):
center = cluster["center"]
tools = cluster["tools"]
probability_adult = sum([1 for t in tools if t == "adult"])/float(len(tools))
probability_chick = sum([1 for t in tools if t == "chick"])/float(len(tools))
probability_egg = sum([1 for t in tools if t == "egg"])/float(len(tools))
probability_true_positive = len(tools)/float(num_users)
count_true_positive = len(tools)
f.write(str(penguin_index)+","+str(center[0])+","+str(center[1])+","+str(probability_adult)+","+str(probability_chick)+"," + str(probability_egg)+ ","+str(probability_true_positive)+","+str(count_true_positive)+"\n")
# print(d+"/"+little_path+".csv")
else:
f.write("-1\n")
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Logistic distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import distribution
class Logistic(distribution.Distribution):
"""The Logistic distribution with location `loc` and `scale` parameters.
#### Mathematical details
The cumulative density function of this distribution is:
```none
cdf(x; mu, sigma) = 1 / (1 + exp(-(x - mu) / sigma))
```
where `loc = mu` and `scale = sigma`.
The Logistic distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ Logistic(loc=0, scale=1)
Y = loc + scale * X
```
#### Examples
Examples of initialization of one or a batch of distributions.
```python
# Define a single scalar Logistic distribution.
dist = tf.contrib.distributions.Logistic(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Logistics.
# The first has mean 1 and scale 11, the second 2 and 22.
dist = tf.contrib.distributions.Logistic(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.prob([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Logistics.
# Both have mean 1, but different scales.
dist = tf.contrib.distributions.Logistic(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.prob(3.0)
```
"""
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Logistic"):
"""Construct Logistic distributions with mean and scale `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor, the means of the distribution(s).
scale: Floating point tensor, the scales of the distribution(s). Must
contain only positive values.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or
more of the statistic's batch members are undefined.
name: The name to give Ops created by the initializer.
Raises:
TypeError: if loc and scale are different dtypes.
"""
parameters = locals()
with ops.name_scope(name, values=[loc, scale]):
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
check_ops.assert_same_float_dtype([self._loc, self._scale])
super(Logistic, self).__init__(
dtype=self._scale.dtype,
reparameterization_type=distribution.FULLY_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=name)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the location."""
return self._loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._scale
def _batch_shape_tensor(self):
return array_ops.broadcast_dynamic_shape(
array_ops.shape(self.loc), array_ops.shape(self.scale))
def _batch_shape(self):
return array_ops.broadcast_static_shape(
self.loc.get_shape(), self.scale.get_shape())
def _event_shape_tensor(self):
return constant_op.constant([], dtype=dtypes.int32)
def _event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
# Uniform variates must be sampled from the open-interval `(0, 1)` rather
# than `[0, 1)`. To do so, we use `np.finfo(self.dtype.as_numpy_dtype).tiny`
# because it is the smallest, positive, "normal" number. A "normal" number
# is such that the mantissa has an implicit leading 1. Normal, positive
# numbers x, y have the reasonable property that, `x + y >= max(x, y)`. In
# this case, a subnormal number (i.e., np.nextafter) can cause us to sample
# 0.
uniform = random_ops.random_uniform(
shape=array_ops.concat([[n], self.batch_shape_tensor()], 0),
minval=np.finfo(self.dtype.as_numpy_dtype).tiny,
maxval=1.,
dtype=self.dtype,
seed=seed)
sampled = math_ops.log(uniform) - math_ops.log1p(-1. * uniform)
return sampled * self.scale + self.loc
def _log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return -nn_ops.softplus(-self._z(x))
def _cdf(self, x):
return math_ops.sigmoid(self._z(x))
def _log_survival_function(self, x):
return -nn_ops.softplus(self._z(x))
def _survival_function(self, x):
return math_ops.sigmoid(-self._z(x))
def _log_unnormalized_prob(self, x):
z = self._z(x)
return - z - 2. * nn_ops.softplus(-z)
def _log_normalization(self):
return math_ops.log(self.scale)
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast sigma.
scale = self.scale * array_ops.ones_like(self.loc)
return 2 + math_ops.log(scale)
def _mean(self):
return self.loc * array_ops.ones_like(self.scale)
def _stddev(self):
return self.scale * array_ops.ones_like(self.loc) * math.pi / math.sqrt(3)
def _mode(self):
return self._mean()
def _z(self, x):
"""Standardize input `x` to a unit logistic."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
|
|
"""
SendKeys.py - Sends one or more keystroke or keystroke combinations
to the active window.
Copyright (C) 2003 Ollie Rutherfurd <[email protected]>
Python License
Version 0.3 (2003-06-14)
$Id$
"""
import sys
import time
from _sendkeys import char2keycode, key_up, key_down, toggle_numlock
__all__ = ['KeySequenceError', 'SendKeys']
try:
True
except NameError:
True,False = 1,0
KEYEVENTF_KEYUP = 2
VK_SHIFT = 16
VK_CONTROL = 17
VK_MENU = 18
PAUSE = 50/1000.0 # 50 milliseconds
# 'codes' recognized as {CODE( repeat)?}
CODES = {
'BACK': 8,
'BACKSPACE': 8,
'BS': 8,
'BKSP': 8,
'BREAK': 3,
'CAP': 20,
'CAPSLOCK': 20,
'DEL': 46,
'DELETE': 46,
'DOWN': 40,
'END': 35,
'ENTER': 13,
'ESC': 27,
'HELP': 47,
'HOME': 36,
'INS': 45,
'INSERT': 45,
'LEFT': 37,
'LWIN': 91,
'NUMLOCK': 144,
'PGDN': 34,
'PGUP': 33,
'PRTSC': 44,
'RIGHT': 39,
'RMENU': 165,
'RWIN': 92,
'SCROLLLOCK': 145,
'SPACE': 32,
'TAB': 9,
'UP': 38,
'DOWN': 40,
'BACKSPACE': 8,
'F1': 112,
'F2': 113,
'F3': 114,
'F4': 115,
'F5': 116,
'F6': 117,
'F7': 118,
'F8': 119,
'F9': 120,
'F10': 121,
'F11': 122,
'F12': 123,
'F13': 124,
'F14': 125,
'F15': 126,
'F16': 127,
'F17': 128,
'F18': 129,
'F19': 130,
'F20': 131,
'F21': 132,
'F22': 133,
'F23': 134,
'F24': 135,
}
ESCAPE = '+^%~{}[]'
NO_SHIFT = '[]'
SHIFT = {
'!': '1',
'@': '2',
'#': '3',
'$': '4',
'&': '7',
'*': '8',
'_': '-',
'|': '\\',
':': ';',
'"': '\'',
'<': ',',
'>': '.',
'?': '/',
}
# modifier keys
MODIFIERS = {
'+': VK_SHIFT,
'^': VK_CONTROL,
'%': VK_MENU,
}
class KeySequenceError(Exception):
"""Exception raised when a key sequence string has a syntax error"""
def __str__(self):
return ' '.join(self.args)
def _append_code(keys,code):
keys.append((code, True))
keys.append((code, False))
def _next_char(chars,error_msg=None):
if error_msg is None:
error_msg = 'expected another character'
try:
return chars.pop()
except IndexError:
raise KeySequenceError(error_msg)
def _handle_char(c,keys,shift):
if shift:
keys.append((MODIFIERS['+'],True))
_append_code(keys, char2keycode(c))
if shift:
keys.append((MODIFIERS['+'],False))
def _release_modifiers(keys,modifiers):
for c in modifiers.keys():
if modifiers[c]:
keys.append((MODIFIERS[c], False))
modifiers[c] = False
def str2keys(key_string,
with_spaces=False,
with_tabs=False,
with_newlines=False):
"""
Converts `key_string` string to a list of 2-tuples,
``(keycode,down)``, which can be given to `playkeys`.
`key_string` : str
A string of keys.
`with_spaces` : bool
Whether to treat spaces as ``{SPACE}``. If `False`, spaces are ignored.
`with_tabs` : bool
Whether to treat tabs as ``{TAB}``. If `False`, tabs are ignored.
`with_newlines` : bool
Whether to treat newlines as ``{ENTER}``. If `False`, newlines are ignored.
"""
# reading input as a stack
chars = list(key_string)
chars.reverse()
# results
keys = []
# for keeping track of whether shift, ctrl, & alt are pressed
modifiers = {}
for k in MODIFIERS.keys():
modifiers[k] = False
while chars:
c = chars.pop()
if c in MODIFIERS.keys():
keys.append((MODIFIERS[c],True))
modifiers[c] = True
# group of chars, for applying a modifier
elif c == '(':
while c != ')':
c = _next_char(chars,'`(` without `)`')
if c == ')':
raise KeySequenceError('expected a character before `)`')
if c == ' ' and with_spaces:
_handle_char(CODES['SPACE'], keys, False)
elif c == '\n' and with_newlines:
_handle_char(CODES['ENTER'], keys, False)
elif c == '\t' and with_tabs:
_handle_char(CODES['TAB'], keys, False)
else:
# if we need shift for this char and it's not already pressed
shift = (c.isupper() or c in SHIFT.keys()) and not modifiers['+']
if c in SHIFT.keys():
_handle_char(SHIFT[c], keys, shift)
else:
_handle_char(c.lower(), keys, shift)
c = _next_char(chars,'`)` not found')
_release_modifiers(keys,modifiers)
# escaped code, modifier, or repeated char
elif c == '{':
saw_space = False
name = [_next_char(chars)]
arg = ['0']
c = _next_char(chars, '`{` without `}`')
while c != '}':
if c == ' ':
saw_space = True
elif c in '.0123456789' and saw_space:
arg.append(c)
else:
name.append(c)
c = _next_char(chars, '`{` without `}`')
code = ''.join(name)
arg = float('0' + ''.join(arg))
if code == 'PAUSE':
if not arg:
arg = PAUSE
keys.append((None,arg))
else:
# always having 1 here makes logic
# easier -- we can always loop
if arg == 0:
arg = 1
for i in range(int(arg)):
if code in CODES.keys():
_append_code(keys, CODES[code])
else:
# must be an escaped modifier or a
# repeated char at this point
if len(code) > 1:
try:
# number in hex is also good (more keys are allowed)
_append_code(keys, int(code,16))
continue
except ValueError:
raise KeySequenceError('Unknown code: %s' % code)
# handling both {e 3} and {+}, {%}, {^}
shift = code in ESCAPE and not code in NO_SHIFT
# do shift if we've got an upper case letter
shift = shift or code[0].isupper()
c = code
if not shift:
# handle keys in SHIFT (!, @, etc...)
if c in SHIFT.keys():
c = SHIFT[c]
shift = True
_handle_char(c.lower(), keys, shift)
_release_modifiers(keys,modifiers)
# unexpected ")"
elif c == ')':
raise KeySequenceError('`)` should be preceeded by `(`')
# unexpected "}"
elif c == '}':
raise KeySequenceError('`}` should be preceeded by `{`')
# handling a single character
else:
if c == ' ' and not with_spaces:
continue
elif c == '\t' and not with_tabs:
continue
elif c == '\n' and not with_newlines:
continue
if c in ('~','\n'):
_append_code(keys, CODES['ENTER'])
elif c == ' ':
_append_code(keys, CODES['SPACE'])
elif c == '\t':
_append_code(keys, CODES['TAB'])
else:
# if we need shift for this char and it's not already pressed
shift = (c.isupper() or c in SHIFT.keys()) and not modifiers['+']
if c in SHIFT.keys():
_handle_char(SHIFT[c], keys, shift)
else:
_handle_char(c.lower(), keys, shift)
_release_modifiers(keys,modifiers)
_release_modifiers(keys,modifiers)
return keys
def playkeys(keys, pause=.05):
"""
Simulates pressing and releasing one or more keys.
`keys` : str
A list of 2-tuples consisting of ``(keycode,down)``
where `down` is `True` when the key is being pressed
and `False` when it's being released.
`keys` is returned from `str2keys`.
`pause` : float
Number of seconds between releasing a key and pressing the
next one.
"""
for (vk, arg) in keys:
if vk:
if arg:
key_down(vk)
else:
key_up(vk)
if pause: # pause after key up
time.sleep(pause)
else:
time.sleep(arg)
def SendKeys(keys,
pause=0.05,
with_spaces=False,
with_tabs=False,
with_newlines=False,
turn_off_numlock=True):
"""
Sends keys to the current window.
`keys` : str
A string of keys.
`pause` : float
The number of seconds to wait between sending each key
or key combination.
`with_spaces` : bool
Whether to treat spaces as ``{SPACE}``. If `False`, spaces are ignored.
`with_tabs` : bool
Whether to treat tabs as ``{TAB}``. If `False`, tabs are ignored.
`with_newlines` : bool
Whether to treat newlines as ``{ENTER}``. If `False`, newlines are ignored.
`turn_off_numlock` : bool
Whether to turn off `NUMLOCK` before sending keys.
example::
SendKeys("+hello{SPACE}+world+1")
would result in ``"Hello World!"``
"""
restore_numlock = False
try:
# read keystroke keys into a list of 2 tuples [(key,up),]
_keys = str2keys(keys, with_spaces, with_tabs, with_newlines)
# certain keystrokes don't seem to behave the same way if NUMLOCK
# is on (for example, ^+{LEFT}), so turn NUMLOCK off, if it's on
# and restore its original state when done.
if turn_off_numlock:
restore_numlock = toggle_numlock(False)
# "play" the keys to the active window
playkeys(_keys, pause)
finally:
if restore_numlock and turn_off_numlock:
key_down(CODES['NUMLOCK'])
key_up(CODES['NUMLOCK'])
def usage():
"""
Writes help message to `stderr` and exits.
"""
print >> sys.stderr, """\
%(name)s [-h] [-d seconds] [-p seconds] [-f filename] or [string of keys]
-dN or --delay=N : N is seconds before starting
-pN or --pause=N : N is seconds between each key
-fNAME or --file=NAME : NAME is filename containing keys to send
-h or --help : show help message
""" % {'name': 'SendKeys.py'}
sys.exit(1)
def error(msg):
"""
Writes `msg` to `stderr`, displays usage
information, and exits.
"""
print >> sys.stderr, '\nERROR: %s\n' % msg
usage()
def main(args=None):
import getopt
if args is None:
args = sys.argv[1:]
try:
opts,args = getopt.getopt(args,
"hp:d:f:", ["help","pause","delay","file"])
except getopt.GetoptError:
usage()
pause=0
delay=0
filename=None
for o, a in opts:
if o in ('-h','--help'):
usage()
elif o in ('-f','--file'):
filename = a
elif o in ('-p','--pause'):
try:
pause = float(a)
assert pause >= 0
except (ValueError,AssertionError),e:
error('`pause` must be >= 0.0')
elif o in ('-d','--delay'):
try:
delay = float(a)
assert delay >= 0
except (ValueError,AssertionError),e:
error('`delay` must be >= 0.0')
time.sleep(delay)
if not filename is None and args:
error("can't pass both filename and string of keys on command-line")
elif filename:
f = open(filename)
keys = f.read()
f.close()
SendKeys(keys, pause)
else:
for a in args:
SendKeys(a, pause)
if __name__ == '__main__':
main(sys.argv[1:])
# :indentSize=4:lineSeparator=\r\n:maxLineLen=80:noTabs=true:tabSize=4:
|
Subsets and Splits