gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import gym
import numpy as np
import unittest
from ray.rllib.models.tf.attention_net import relative_position_embedding, \
GTrXLNet
from ray.rllib.models.tf.layers import MultiHeadAttention
from ray.rllib.models.torch.attention_net import relative_position_embedding \
as relative_position_embedding_torch, GTrXLNet as TorchGTrXLNet
from ray.rllib.models.torch.modules.multi_head_attention import \
MultiHeadAttention as TorchMultiHeadAttention
from ray.rllib.utils.framework import try_import_torch, try_import_tf
from ray.rllib.utils.test_utils import framework_iterator
torch, nn = try_import_torch()
tf1, tf, tfv = try_import_tf()
class TestModules(unittest.TestCase):
"""Tests various torch/modules and tf/layers required for AttentionNet"""
def train_torch_full_model(self,
model,
inputs,
outputs,
num_epochs=250,
state=None,
seq_lens=None):
"""Convenience method that trains a Torch model for num_epochs epochs
and tests whether loss decreased, as expected.
Args:
model (nn.Module): Torch model to be trained.
inputs (torch.Tensor): Training data
outputs (torch.Tensor): Training labels
num_epochs (int): Number of epochs to train for
state (torch.Tensor): Internal state of module
seq_lens (torch.Tensor): Tensor of sequence lengths
"""
criterion = torch.nn.MSELoss(reduction="sum")
optimizer = torch.optim.Adam(model.parameters(), lr=3e-4)
# Check that the layer trains correctly
for t in range(num_epochs):
y_pred = model(inputs, state, seq_lens)
loss = criterion(y_pred[0], torch.squeeze(outputs[0]))
if t % 10 == 1:
print(t, loss.item())
# if t == 0:
# init_loss = loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# final_loss = loss.item()
# The final loss has decreased, which tests
# that the model is learning from the training data.
# self.assertLess(final_loss / init_loss, 0.99)
def train_torch_layer(self, model, inputs, outputs, num_epochs=250):
"""Convenience method that trains a Torch model for num_epochs epochs
and tests whether loss decreased, as expected.
Args:
model (nn.Module): Torch model to be trained.
inputs (torch.Tensor): Training data
outputs (torch.Tensor): Training labels
num_epochs (int): Number of epochs to train for
"""
criterion = torch.nn.MSELoss(reduction="sum")
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
# Check that the layer trains correctly
for t in range(num_epochs):
y_pred = model(inputs)
loss = criterion(y_pred, outputs)
if t == 1:
init_loss = loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
final_loss = loss.item()
# The final loss has decreased by a factor of 2, which tests
# that the model is learning from the training data.
self.assertLess(final_loss / init_loss, 0.5)
def train_tf_model(self,
model,
inputs,
outputs,
num_epochs=250,
minibatch_size=32):
"""Convenience method that trains a Tensorflow model for num_epochs
epochs and tests whether loss decreased, as expected.
Args:
model (tf.Model): Torch model to be trained.
inputs (np.array): Training data
outputs (np.array): Training labels
num_epochs (int): Number of training epochs
batch_size (int): Number of samples in each minibatch
"""
# Configure a model for mean-squared error loss.
model.compile(optimizer="SGD", loss="mse", metrics=["mae"])
hist = model.fit(
inputs,
outputs,
verbose=0,
epochs=num_epochs,
batch_size=minibatch_size).history
init_loss = hist["loss"][0]
final_loss = hist["loss"][-1]
self.assertLess(final_loss / init_loss, 0.5)
def test_multi_head_attention(self):
"""Tests the MultiHeadAttention mechanism of Vaswani et al."""
# B is batch size
B = 1
# D_in is attention dim, L is memory_tau
L, D_in, D_out = 2, 32, 10
for fw, sess in framework_iterator(
frameworks=("tfe", "torch", "tf"), session=True):
# Create a single attention layer with 2 heads.
if fw == "torch":
# Create random Tensors to hold inputs and outputs
x = torch.randn(B, L, D_in)
y = torch.randn(B, L, D_out)
model = TorchMultiHeadAttention(
in_dim=D_in, out_dim=D_out, num_heads=2, head_dim=32)
self.train_torch_layer(model, x, y, num_epochs=500)
# Framework is tensorflow or tensorflow-eager.
else:
x = np.random.random((B, L, D_in))
y = np.random.random((B, L, D_out))
inputs = tf.keras.layers.Input(shape=(L, D_in))
model = tf.keras.Sequential([
inputs,
MultiHeadAttention(
out_dim=D_out, num_heads=2, head_dim=32)
])
self.train_tf_model(model, x, y)
def test_attention_net(self):
"""Tests the GTrXL.
Builds a full AttentionNet and checks that it trains in a supervised
setting."""
# Checks that torch and tf embedding matrices are the same
with tf1.Session().as_default() as sess:
assert np.allclose(
relative_position_embedding(20, 15).eval(session=sess),
relative_position_embedding_torch(20, 15).numpy())
# B is batch size
B = 32
# D_in is attention dim, L is memory_tau
L, D_in, D_out = 2, 16, 2
for fw, sess in framework_iterator(session=True):
# Create a single attention layer with 2 heads
if fw == "torch":
# Create random Tensors to hold inputs and outputs
x = torch.randn(B, L, D_in)
y = torch.randn(B, L, D_out)
value_labels = torch.randn(B, L, D_in)
memory_labels = torch.randn(B, L, D_out)
attention_net = TorchGTrXLNet(
observation_space=gym.spaces.Box(
low=float("-inf"), high=float("inf"), shape=(D_in, )),
action_space=gym.spaces.Discrete(D_out),
num_outputs=D_out,
model_config={"max_seq_len": 2},
name="TestTorchAttentionNet",
num_transformer_units=2,
attn_dim=D_in,
num_heads=2,
memory_tau=L,
head_dim=D_out,
ff_hidden_dim=16,
init_gate_bias=2.0)
init_state = attention_net.get_initial_state()
# Get initial state and add a batch dimension.
init_state = [np.expand_dims(s, 0) for s in init_state]
seq_lens_init = torch.full(
size=(B, ), fill_value=L, dtype=torch.int32)
# Torch implementation expects a formatted input_dict instead
# of a numpy array as input.
input_dict = {"obs": x}
self.train_torch_full_model(
attention_net,
input_dict, [y, value_labels, memory_labels],
num_epochs=250,
state=init_state,
seq_lens=seq_lens_init)
# Framework is tensorflow or tensorflow-eager.
else:
x = np.random.random((B, L, D_in))
y = np.random.random((B, L, D_out))
value_labels = np.random.random((B, L, 1))
memory_labels = np.random.random((B, L, D_in))
# We need to create (N-1) MLP labels for N transformer units
mlp_labels = np.random.random((B, L, D_in))
attention_net = GTrXLNet(
observation_space=gym.spaces.Box(
low=float("-inf"), high=float("inf"), shape=(D_in, )),
action_space=gym.spaces.Discrete(D_out),
num_outputs=D_out,
model_config={"max_seq_len": 2},
name="TestTFAttentionNet",
num_transformer_units=2,
attn_dim=D_in,
num_heads=2,
memory_tau=L,
head_dim=D_out,
ff_hidden_dim=16,
init_gate_bias=2.0)
model = attention_net.trxl_model
# Get initial state and add a batch dimension.
init_state = attention_net.get_initial_state()
init_state = [np.tile(s, (B, 1, 1)) for s in init_state]
self.train_tf_model(
model, [x] + init_state,
[y, value_labels, memory_labels, mlp_labels],
num_epochs=200,
minibatch_size=B)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
|
# (c) 2018, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from ansible.modules.storage.netapp.netapp_e_host import Host
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
__metaclass__ = type
try:
from unittest import mock
except ImportError:
import mock
class HostTest(ModuleTestCase):
REQUIRED_PARAMS = {
'api_username': 'rw',
'api_password': 'password',
'api_url': 'http://localhost',
'ssid': '1',
'name': '1',
}
HOST = {
'name': '1',
'hostRef': '123',
'label': '1',
'id': '0' * 30,
'clusterRef': 40 * '0',
'hostTypeIndex': 28,
'hostSidePorts': [],
'initiators': [],
'ports': [],
}
HOST_ALT = {
'name': '2',
'label': '2',
'id': '1' * 30,
'clusterRef': '1',
'hostSidePorts': [],
'initiators': [],
'ports': [],
}
EXISTING_HOSTS = [
{"hostRef": "84000000600A098000A4B28D00303D065D430118", "clusterRef": "0000000000000000000000000000000000000000", "label": "beegfs_storage1",
"hostTypeIndex": 28, "ports": [], "initiators": [{"initiatorRef": "89000000600A098000A4B28D00303CF55D4300E3",
"nodeName": {"ioInterfaceType": "iscsi",
"iscsiNodeName": "iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818",
"remoteNodeWWN": None, "nvmeNodeName": None},
"alias": {"ioInterfaceType": "iscsi", "iscsiAlias": ""}, "label": "beegfs_storage1_iscsi_0",
"hostRef": "84000000600A098000A4B28D00303D065D430118",
"id": "89000000600A098000A4B28D00303CF55D4300E3"}],
"hostSidePorts": [{"type": "iscsi", "address": "iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818", "label": "beegfs_storage1_iscsi_0"}],
"id": "84000000600A098000A4B28D00303D065D430118", "name": "beegfs_storage1"},
{"hostRef": "84000000600A098000A4B9D10030370B5D430109", "clusterRef": "0000000000000000000000000000000000000000", "label": "beegfs_metadata1",
"hostTypeIndex": 28, "ports": [], "initiators": [{"initiatorRef": "89000000600A098000A4B28D00303CFC5D4300F7",
"nodeName": {"ioInterfaceType": "iscsi",
"iscsiNodeName": "iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8",
"remoteNodeWWN": None, "nvmeNodeName": None},
"alias": {"ioInterfaceType": "iscsi", "iscsiAlias": ""}, "label": "beegfs_metadata1_iscsi_0",
"hostRef": "84000000600A098000A4B9D10030370B5D430109",
"id": "89000000600A098000A4B28D00303CFC5D4300F7"}],
"hostSidePorts": [{"type": "iscsi", "address": "iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8", "label": "beegfs_metadata1_iscsi_0"}],
"id": "84000000600A098000A4B9D10030370B5D430109", "name": "beegfs_metadata1"},
{"hostRef": "84000000600A098000A4B9D10030370B5D430109", "clusterRef": "85000000600A098000A4B9D1003637135D483DEB", "label": "beegfs_metadata2",
"hostTypeIndex": 28, "ports": [], "initiators": [{"initiatorRef": "89000000600A098000A4B28D00303CFC5D4300F7",
"nodeName": {"ioInterfaceType": "iscsi",
"iscsiNodeName": "iqn.used_elsewhere",
"remoteNodeWWN": None, "nvmeNodeName": None},
"alias": {"ioInterfaceType": "iscsi", "iscsiAlias": ""}, "label": "beegfs_metadata2_iscsi_0",
"hostRef": "84000000600A098000A4B9D10030370B5D430109",
"id": "89000000600A098000A4B28D00303CFC5D4300F7"}],
"hostSidePorts": [{"type": "iscsi", "address": "iqn.used_elsewhere", "label": "beegfs_metadata2_iscsi_0"}],
"id": "84000000600A098000A4B9D10030370B5D430120", "name": "beegfs_metadata2"}]
HOST_GROUPS = [{"clusterRef": "85000000600A098000A4B9D1003637135D483DEB", "label": "test_group", "isSAControlled": False,
"confirmLUNMappingCreation": False, "protectionInformationCapableAccessMethod": True, "isLun0Restricted": False,
"id": "85000000600A098000A4B9D1003637135D483DEB", "name": "test_group"}]
HOST_TYPES = [{"name": "FactoryDefault", "index": 0, "code": "FactoryDefault"},
{"name": "Windows 2000/Server 2003/Server 2008 Non-Clustered", "index": 1, "code": "W2KNETNCL"},
{"name": "Solaris", "index": 2, "code": "SOL"},
{"name": "Linux", "index": 6, "code": "LNX"},
{"name": "LnxALUA", "index": 7, "code": "LnxALUA"},
{"name": "Windows 2000/Server 2003/Server 2008 Clustered", "index": 8, "code": "W2KNETCL"},
{"name": "LnxTPGSALUA_SF", "index": 27, "code": "LnxTPGSALUA_SF"},
{"name": "LnxDHALUA", "index": 28, "code": "LnxDHALUA"}]
REQ_FUNC = 'ansible.modules.storage.netapp.netapp_e_host.request'
def _set_args(self, args):
module_args = self.REQUIRED_PARAMS.copy()
module_args.update(args)
set_module_args(module_args)
def test_host_exists_pass(self):
"""Verify host_exists produces expected results."""
with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
self._set_args({'state': 'present', 'name': 'new_host', 'host_type': 'linux dm-mp', 'force_port': False,
'ports': [{'label': 'new_host_port_1', 'type': 'fc', 'port': '0x08ef08ef08ef08ef'}]})
host = Host()
self.assertFalse(host.host_exists())
self._set_args({'state': 'present', 'name': 'does_not_exist', 'host_type': 'linux dm-mp',
'ports': [{'label': 'beegfs_storage1_iscsi_0', 'type': 'iscsi',
'port': 'iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818'}]})
host = Host()
self.assertFalse(host.host_exists())
self._set_args({'state': 'present', 'name': 'beegfs_storage1', 'host_type': 'linux dm-mp',
'ports': [{'label': 'beegfs_storage1_iscsi_0', 'type': 'iscsi', 'port': 'iqn.differentiqn.org'}]})
host = Host()
self.assertTrue(host.host_exists())
with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': True,
'ports': [{'label': 'beegfs_metadata1_iscsi_0', 'type': 'iscsi',
'port': 'iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818'}]})
host = Host()
self.assertTrue(host.host_exists())
def test_host_exists_fail(self):
"""Verify host_exists produces expected exceptions."""
self._set_args({'state': 'present', 'host_type': 'linux dm-mp', 'ports': [{'label': 'abc', 'type': 'iscsi', 'port': 'iqn:0'}]})
host = Host()
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to determine host existence."):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
host.host_exists()
def test_needs_update_pass(self):
"""Verify needs_update produces expected results."""
# No changes
with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp',
'ports': [{'label': 'beegfs_metadata1_iscsi_0', 'type': 'iscsi',
'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
host = Host()
host.host_exists()
self.assertFalse(host.needs_update())
# Change host type
with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'windows', 'force_port': False,
'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'iscsi', 'port': 'iqn.not_used'}]})
host = Host()
host.host_exists()
self.assertTrue(host.needs_update())
# Add port to host
with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False,
'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'iscsi', 'port': 'iqn.not_used'}]})
host = Host()
host.host_exists()
self.assertTrue(host.needs_update())
# Change port name
with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False,
'ports': [{'label': 'beegfs_metadata1_iscsi_2', 'type': 'iscsi',
'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
host = Host()
host.host_exists()
self.assertTrue(host.needs_update())
# take port from another host by force
with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': True,
'ports': [{'label': 'beegfs_metadata2_iscsi_0', 'type': 'iscsi',
'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
host = Host()
host.host_exists()
self.assertTrue(host.needs_update())
def test_needs_update_fail(self):
"""Verify needs_update produces expected exceptions."""
with self.assertRaisesRegexp(AnsibleFailJson, "is associated with a different host."):
with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False,
'ports': [{'label': 'beegfs_metadata2_iscsi_0', 'type': 'iscsi',
'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
host = Host()
host.host_exists()
host.needs_update()
def test_valid_host_type_pass(self):
"""Validate the available host types."""
with mock.patch(self.REQ_FUNC, return_value=(200, self.HOST_TYPES)):
self._set_args({'state': 'present', 'host_type': '0'})
host = Host()
self.assertTrue(host.valid_host_type())
self._set_args({'state': 'present', 'host_type': '28'})
host = Host()
self.assertTrue(host.valid_host_type())
self._set_args({'state': 'present', 'host_type': 'windows'})
host = Host()
self.assertTrue(host.valid_host_type())
self._set_args({'state': 'present', 'host_type': 'linux dm-mp'})
host = Host()
self.assertTrue(host.valid_host_type())
def test_valid_host_type_fail(self):
"""Validate the available host types."""
with self.assertRaisesRegexp(AnsibleFailJson, "host_type must be either a host type name or host type index found integer the documentation"):
self._set_args({'state': 'present', 'host_type': 'non-host-type'})
host = Host()
with mock.patch(self.REQ_FUNC, return_value=(200, self.HOST_TYPES)):
with self.assertRaisesRegexp(AnsibleFailJson, "There is no host type with index"):
self._set_args({'state': 'present', 'host_type': '4'})
host = Host()
host.valid_host_type()
with mock.patch(self.REQ_FUNC, return_value=Exception()):
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to get host types."):
self._set_args({'state': 'present', 'host_type': '4'})
host = Host()
host.valid_host_type()
def test_group_id_pass(self):
"""Verify group_id produces expected results."""
with mock.patch(self.REQ_FUNC, return_value=(200, self.HOST_GROUPS)):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False,
'ports': [{'label': 'beegfs_metadata2_iscsi_0', 'type': 'iscsi',
'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
host = Host()
self.assertEqual(host.group_id(), "0000000000000000000000000000000000000000")
self._set_args({'state': 'present', 'name': 'beegfs_metadata2', 'host_type': 'linux dm-mp', 'force_port': False, 'group': 'test_group',
'ports': [{'label': 'beegfs_metadata2_iscsi_0', 'type': 'iscsi',
'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
host = Host()
self.assertEqual(host.group_id(), "85000000600A098000A4B9D1003637135D483DEB")
def test_group_id_fail(self):
"""Verify group_id produces expected exceptions."""
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to get host groups."):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
self._set_args({'state': 'present', 'name': 'beegfs_metadata2', 'host_type': 'linux dm-mp', 'force_port': False, 'group': 'test_group2',
'ports': [
{'label': 'beegfs_metadata2_iscsi_0', 'type': 'iscsi', 'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
host = Host()
host.group_id()
with self.assertRaisesRegexp(AnsibleFailJson, "No group with the name:"):
with mock.patch(self.REQ_FUNC, return_value=(200, self.HOST_GROUPS)):
self._set_args({'state': 'present', 'name': 'beegfs_metadata2', 'host_type': 'linux dm-mp', 'force_port': False, 'group': 'test_group2',
'ports': [{'label': 'beegfs_metadata2_iscsi_0', 'type': 'iscsi',
'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
host = Host()
host.group_id()
def test_assigned_host_ports_pass(self):
"""Verify assigned_host_ports gives expected results."""
# Add an unused port to host
with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False,
'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'iscsi', 'port': 'iqn.not_used'}]})
host = Host()
host.host_exists()
self.assertTrue(host.needs_update())
self.assertEquals(host.assigned_host_ports(), {})
# Change port name (force)
with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': True,
'ports': [{'label': 'beegfs_metadata1_iscsi_2', 'type': 'iscsi',
'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
host = Host()
host.host_exists()
self.assertTrue(host.needs_update())
self.assertEquals(host.assigned_host_ports(), {'84000000600A098000A4B9D10030370B5D430109': ['89000000600A098000A4B28D00303CFC5D4300F7']})
# Change port type
with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': True,
'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'fc', 'port': '08:ef:7e:24:52:a0'}]})
host = Host()
host.host_exists()
self.assertTrue(host.needs_update())
self.assertEquals(host.assigned_host_ports(), {})
# take port from another host by force
with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': True,
'ports': [{'label': 'beegfs_metadata2_iscsi_0', 'type': 'iscsi', 'port': 'iqn.used_elsewhere'}]})
host = Host()
host.host_exists()
self.assertTrue(host.needs_update())
self.assertEquals(host.assigned_host_ports(), {'84000000600A098000A4B9D10030370B5D430109': ['89000000600A098000A4B28D00303CFC5D4300F7']})
# take port from another host by force
with mock.patch(self.REQ_FUNC, side_effect=[(200, self.EXISTING_HOSTS), (200, {})]):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': True,
'ports': [{'label': 'beegfs_metadata2_iscsi_0', 'type': 'iscsi', 'port': 'iqn.used_elsewhere'}]})
host = Host()
host.host_exists()
self.assertTrue(host.needs_update())
self.assertEquals(host.assigned_host_ports(apply_unassigning=True),
{'84000000600A098000A4B9D10030370B5D430109': ['89000000600A098000A4B28D00303CFC5D4300F7']})
def test_assigned_host_ports_fail(self):
"""Verify assigned_host_ports gives expected exceptions."""
# take port from another
with self.assertRaisesRegexp(AnsibleFailJson, "There are no host ports available OR there are not enough unassigned host ports"):
with mock.patch(self.REQ_FUNC, side_effect=[(200, self.EXISTING_HOSTS)]):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False,
'ports': [{'label': 'beegfs_metadata1_iscsi_2', 'type': 'iscsi',
'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
host = Host()
host.host_exists()
self.assertTrue(host.needs_update())
host.assigned_host_ports(apply_unassigning=True)
# take port from another host and fail because force == False
with self.assertRaisesRegexp(AnsibleFailJson, "There are no host ports available OR there are not enough unassigned host ports"):
with mock.patch(self.REQ_FUNC, side_effect=[(200, self.EXISTING_HOSTS)]):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False,
'ports': [{'label': 'beegfs_metadata2_iscsi_0', 'type': 'iscsi', 'port': 'iqn.used_elsewhere'}]})
host = Host()
host.host_exists()
self.assertTrue(host.needs_update())
host.assigned_host_ports(apply_unassigning=True)
# take port from another host and fail because force == False
with self.assertRaisesRegexp(AnsibleFailJson, "There are no host ports available OR there are not enough unassigned host ports"):
with mock.patch(self.REQ_FUNC, side_effect=[(200, self.EXISTING_HOSTS)]):
self._set_args({'state': 'present', 'name': 'beegfs_metadata3', 'host_type': 'linux dm-mp', 'force_port': False,
'ports': [{'label': 'beegfs_metadata2_iscsi_0', 'type': 'iscsi', 'port': 'iqn.used_elsewhere'}]})
host = Host()
host.host_exists()
host.assigned_host_ports(apply_unassigning=True)
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to unassign host port."):
with mock.patch(self.REQ_FUNC, side_effect=[(200, self.EXISTING_HOSTS), Exception()]):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': True,
'ports': [{'label': 'beegfs_metadata2_iscsi_0', 'type': 'iscsi', 'port': 'iqn.used_elsewhere'}]})
host = Host()
host.host_exists()
self.assertTrue(host.needs_update())
host.assigned_host_ports(apply_unassigning=True)
def test_update_host_pass(self):
"""Verify update_host produces expected results."""
# Change host type
with self.assertRaises(AnsibleExitJson):
with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'windows', 'force_port': True,
'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'iscsi',
'port': 'iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818'}]})
host = Host()
host.build_success_payload = lambda x: {}
host.host_exists()
self.assertTrue(host.needs_update())
host.update_host()
# Change port iqn
with self.assertRaises(AnsibleExitJson):
with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False,
'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'iscsi', 'port': 'iqn.not_used'}]})
host = Host()
host.build_success_payload = lambda x: {}
host.host_exists()
self.assertTrue(host.needs_update())
host.update_host()
# Change port type to fc
with self.assertRaises(AnsibleExitJson):
with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False,
'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'fc', 'port': '0x08ef08ef08ef08ef'}]})
host = Host()
host.build_success_payload = lambda x: {}
host.host_exists()
self.assertTrue(host.needs_update())
host.update_host()
# Change port name
with self.assertRaises(AnsibleExitJson):
with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'windows', 'force_port': True,
'ports': [{'label': 'beegfs_metadata1_iscsi_12', 'type': 'iscsi',
'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
host = Host()
host.build_success_payload = lambda x: {}
host.host_exists()
self.assertTrue(host.needs_update())
host.update_host()
# Change group
with self.assertRaises(AnsibleExitJson):
with mock.patch(self.REQ_FUNC, return_value=(200, self.EXISTING_HOSTS)):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'windows', 'force_port': False, 'group': 'test_group',
'ports': [{'label': 'beegfs_metadata1_iscsi_0', 'type': 'iscsi',
'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
host = Host()
host.build_success_payload = lambda x: {}
host.group_id = lambda: "85000000600A098000A4B9D1003637135D483DEB"
host.host_exists()
self.assertTrue(host.needs_update())
host.update_host()
def test_update_host_fail(self):
"""Verify update_host produces expected exceptions."""
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to update host."):
with mock.patch(self.REQ_FUNC, side_effect=[(200, self.EXISTING_HOSTS), Exception()]):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'windows', 'force_port': False, 'group': 'test_group',
'ports': [{'label': 'beegfs_metadata1_iscsi_0', 'type': 'iscsi',
'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
host = Host()
host.build_success_payload = lambda x: {}
host.group_id = lambda: "85000000600A098000A4B9D1003637135D483DEB"
host.host_exists()
self.assertTrue(host.needs_update())
host.update_host()
def test_create_host_pass(self):
"""Verify create_host produces expected results."""
def _assigned_host_ports(apply_unassigning=False):
return None
with self.assertRaises(AnsibleExitJson):
with mock.patch(self.REQ_FUNC, return_value=(200, {'id': '84000000600A098000A4B9D10030370B5D430109'})):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'windows', 'force_port': True, 'group': 'test_group',
'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'iscsi',
'port': 'iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818'}]})
host = Host()
host.host_exists = lambda: False
host.assigned_host_ports = _assigned_host_ports
host.build_success_payload = lambda x: {}
host.group_id = lambda: "85000000600A098000A4B9D1003637135D483DEB"
host.create_host()
def test_create_host_fail(self):
"""Verify create_host produces expected exceptions."""
def _assigned_host_ports(apply_unassigning=False):
return None
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to create host."):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'windows', 'force_port': True, 'group': 'test_group',
'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'iscsi',
'port': 'iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818'}]})
host = Host()
host.host_exists = lambda: False
host.assigned_host_ports = _assigned_host_ports
host.build_success_payload = lambda x: {}
host.group_id = lambda: "85000000600A098000A4B9D1003637135D483DEB"
host.create_host()
with self.assertRaisesRegexp(AnsibleExitJson, "Host already exists."):
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'windows', 'force_port': True, 'group': 'test_group',
'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'iscsi',
'port': 'iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818'}]})
host = Host()
host.host_exists = lambda: True
host.assigned_host_ports = _assigned_host_ports
host.build_success_payload = lambda x: {}
host.group_id = lambda: "85000000600A098000A4B9D1003637135D483DEB"
host.create_host()
def test_remove_host_pass(self):
"""Verify remove_host produces expected results."""
with mock.patch(self.REQ_FUNC, return_value=(200, None)):
self._set_args({'state': 'absent', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False, 'group': 'test_group',
'ports': [{'label': 'beegfs_metadata1_iscsi_0', 'type': 'iscsi',
'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
host = Host()
host.host_obj = {"id": "84000000600A098000A4B9D10030370B5D430109"}
host.remove_host()
def test_remove_host_fail(self):
"""Verify remove_host produces expected exceptions."""
with self.assertRaisesRegexp(AnsibleFailJson, "Failed to remove host."):
with mock.patch(self.REQ_FUNC, return_value=Exception()):
self._set_args({'state': 'absent', 'name': 'beegfs_metadata1', 'host_type': 'linux dm-mp', 'force_port': False, 'group': 'test_group',
'ports': [{'label': 'beegfs_metadata1_iscsi_0', 'type': 'iscsi',
'port': 'iqn.1993-08.org.debian.beegfs-metadata:01:69e4efdf30b8'}]})
host = Host()
host.host_obj = {"id": "84000000600A098000A4B9D10030370B5D430109"}
host.remove_host()
def test_build_success_payload(self):
"""Validate success payload."""
def _assigned_host_ports(apply_unassigning=False):
return None
self._set_args({'state': 'present', 'name': 'beegfs_metadata1', 'host_type': 'windows', 'force_port': True, 'group': 'test_group',
'ports': [{'label': 'beegfs_metadata1_iscsi_1', 'type': 'iscsi', 'port': 'iqn.1993-08.org.debian.beegfs-storage1:01:b0621126818'}]})
host = Host()
self.assertEquals(host.build_success_payload(), {'api_url': 'http://localhost/', 'ssid': '1'})
|
|
# Copyright (c) 2015 SONATA-NFV and Paderborn University
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, Paderborn University
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
from flask_restful import Resource
from flask import Response, request
from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
from emuvim.api.openstack.helper import get_host
import logging
import json
import uuid
from mininet.link import Link
LOG = logging.getLogger("api.openstack.nova")
class NovaDummyApi(BaseOpenstackDummy):
def __init__(self, in_ip, in_port, compute):
super(NovaDummyApi, self).__init__(in_ip, in_port)
self.compute = compute
self.compute.add_flavor('m1.tiny', 1, 512, "MB", 1, "GB")
self.compute.add_flavor('m1.nano', 1, 64, "MB", 0, "GB")
self.compute.add_flavor('m1.micro', 1, 128, "MB", 0, "GB")
self.compute.add_flavor('m1.small', 1, 1024, "MB", 2, "GB")
self.api.add_resource(NovaVersionsList, "/",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaVersionShow, "/v2.1/<id>",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaListServersApi, "/v2.1/<id>/servers",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaListServersAndPortsApi, "/v2.1/<id>/servers/andPorts",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaListServersDetailed, "/v2.1/<id>/servers/detail",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaShowServerDetails, "/v2.1/<id>/servers/<serverid>",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaInterfaceToServer, "/v2.1/<id>/servers/<serverid>/os-interface",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaShowAndDeleteInterfaceAtServer, "/v2.1/<id>/servers/<serverid>/os-interface/<port_id>",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaListFlavors, "/v2.1/<id>/flavors", "/v2/<id>/flavors",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaListFlavorsDetails, "/v2.1/<id>/flavors/detail", "/v2/<id>/flavors/detail",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaListFlavorById, "/v2.1/<id>/flavors/<flavorid>", "/v2/<id>/flavors/<flavorid>",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaListImages, "/v2.1/<id>/images",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaListImagesDetails, "/v2.1/<id>/images/detail",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaListImageById, "/v2.1/<id>/images/<imageid>",
resource_class_kwargs={'api': self})
self.api.add_resource(NovaLimits, "/v2.1/<id>/limits",
resource_class_kwargs={'api': self})
class NovaVersionsList(Resource):
def __init__(self, api):
self.api = api
def get(self):
"""
Lists API versions.
:return: Returns a json with API versions.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = """
{
"versions": [
{
"id": "v2.1",
"links": [
{
"href": "http://%s:%d/v2.1/",
"rel": "self"
}
],
"status": "CURRENT",
"version": "2.38",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
]
}
""" % (get_host(request), self.api.port)
response = Response(resp, status=200, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not show list of versions." % __name__)
return ex.message, 500
class NovaVersionShow(Resource):
def __init__(self, api):
self.api = api
def get(self, id):
"""
Returns API details.
:param id:
:type id: ``str``
:return: Returns a json with API details.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = """
{
"version": {
"id": "v2.1",
"links": [
{
"href": "http://%s:%d/v2.1/",
"rel": "self"
},
{
"href": "http://docs.openstack.org/",
"rel": "describedby",
"type": "text/html"
}
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.compute+json;version=2.1"
}
],
"status": "CURRENT",
"version": "2.38",
"min_version": "2.1",
"updated": "2013-07-23T11:33:21Z"
}
}
""" % (get_host(request), self.api.port)
response = Response(resp, status=200, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not show list of versions." % __name__)
return ex.message, 500
class NovaListServersApi(Resource):
def __init__(self, api):
self.api = api
def get(self, id):
"""
Creates a list with all running servers and their detailed information.
:param id: Used to create a individual link to quarry further information.
:type id: ``str``
:return: Returns a json response with a dictionary that contains the server information.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
resp['servers'] = list()
for server in self.api.compute.computeUnits.values():
s = server.create_server_dict(self.api.compute)
s['links'] = [{'href': "http://%s:%d/v2.1/%s/servers/%s" % (get_host(request),
self.api.port,
id,
server.id)}]
resp['servers'].append(s)
response = Response(json.dumps(resp), status=200,
mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(
u"%s: Could not retrieve the list of servers." % __name__)
return ex.message, 500
def post(self, id):
"""
Creates a server instance.
:param id: tenant id, we ignore this most of the time
:type id: ``str``
:return: Returns a flask response, with detailed information about the just created server.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s POST" % str(self.__class__.__name__))
try:
server_dict = json.loads(request.data)['server']
networks = server_dict.get('networks', None)
name = str(self.api.compute.dc.label) + "_" + server_dict["name"]
if self.api.compute.find_server_by_name_or_id(name) is not None:
LOG.error("Server with name %s already exists. 409" % name)
return Response(
"Server with name %s already exists." % name, status=409)
# TODO: not finished!
server = self.api.compute.create_server(name)
server.full_name = str(
self.api.compute.dc.label) + "_" + server_dict["name"]
server.template_name = server_dict["name"]
if "metadata" in server_dict:
server.properties = server_dict["metadata"]
for flavor in self.api.compute.flavors.values():
if flavor.id == server_dict.get('flavorRef', ''):
server.flavor = flavor.name
for image in self.api.compute.images.values():
if image.id in server_dict['imageRef']:
server.image = image.name
if networks is not None:
for net in networks:
port_name_or_id = net.get('port', "")
port = self.api.compute.find_port_by_name_or_id(port_name_or_id)
if port is not None:
server.port_names.append(port_name_or_id)
else:
return Response(
"Currently only networking by port is supported.", status=400)
self.api.compute._start_compute(server)
response = NovaShowServerDetails(self.api).get(id, server.id)
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not create the server." % __name__)
return ex.message, 500
class NovaListServersAndPortsApi(Resource):
def __init__(self, api):
self.api = api
def get(self, id):
"""
Creates a list with all running servers and their detailed information. This function also presents all
port information of each server.
:param id: Used to create a individual link to quarry further information.
:type id: ``str``
:return: Returns a json response with a dictionary that contains the server information.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
resp['servers'] = list()
for server in self.api.compute.computeUnits.values():
s = server.create_server_dict(self.api.compute)
s['links'] = [{'href': "http://%s:%d/v2.1/%s/servers/%s" % (get_host(request),
self.api.port,
id,
server.id)}]
s['ports'] = list()
for port_name in server.port_names:
port = self.api.compute.find_port_by_name_or_id(port_name)
if port is None:
continue
tmp = port.create_port_dict(self.api.compute)
tmp['intf_name'] = port.intf_name
s['ports'].append(tmp)
resp['servers'].append(s)
response = Response(json.dumps(resp), status=200,
mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(
u"%s: Could not retrieve the list of servers." % __name__)
return ex.message, 500
class NovaListServersDetailed(Resource):
def __init__(self, api):
self.api = api
def get(self, id):
"""
As List Servers, it lists all running servers and their details but furthermore it also states the
used flavor and the server image.
:param id: tenant id, used for the 'href' link.
:type id: ``str``
:return: Returns a flask response, with detailed information aboit the servers and their flavor and image.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = {"servers": list()}
for server in self.api.compute.computeUnits.values():
s = server.create_server_dict(self.api.compute)
s['links'] = [{'href': "http://%s:%d/v2.1/%s/servers/%s" % (get_host(request),
self.api.port,
id,
server.id)}]
flavor = self.api.compute.flavors[server.flavor]
s['flavor'] = {
"id": flavor.id,
"links": [
{
"href": "http://%s:%d/v2.1/%s/flavors/%s" % (get_host(request),
self.api.port,
id,
flavor.id),
"rel": "bookmark"
}
]
}
image = self.api.compute.images[server.image]
s['image'] = {
"id": image.id,
"links": [
{
"href": "http://%s:%d/v2.1/%s/images/%s" % (get_host(request),
self.api.port,
id,
image.id),
"rel": "bookmark"
}
]
}
resp['servers'].append(s)
response = Response(json.dumps(resp), status=200,
mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(
u"%s: Could not retrieve the list of servers." % __name__)
return ex.message, 500
class NovaListFlavors(Resource):
def __init__(self, api):
self.api = api
def get(self, id):
"""
Lists all available flavors.
:param id: tenant id, used for the 'href' link
:type id: ``str``
:return: Returns a flask response with a list of all flavors.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
resp['flavors'] = list()
for flavor in self.api.compute.flavors.values():
f = flavor.__dict__.copy()
f['id'] = flavor.id
f['name'] = flavor.name
f['links'] = [{'href': "http://%s:%d/v2.1/%s/flavors/%s" % (get_host(request),
self.api.port,
id,
flavor.id)}]
resp['flavors'].append(f)
response = Response(json.dumps(resp), status=200,
mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(
u"%s: Could not retrieve the list of servers." % __name__)
return ex.message, 500
def post(self, id):
LOG.debug("API CALL: %s POST" % str(self.__class__.__name__))
data = json.loads(request.data).get("flavor")
LOG.warning("Create Flavor: %s" % str(data))
# add to internal dict
f = self.api.compute.add_flavor(
data.get("name"),
data.get("vcpus"),
data.get("ram"), "MB",
data.get("disk"), "GB")
# create response based on incoming data
data["id"] = f.id
data["links"] = [{'href': "http://%s:%d/v2.1/%s/flavors/%s" % (get_host(request),
self.api.port,
id,
f.id)}]
resp = {"flavor": data}
return Response(json.dumps(resp), status=200,
mimetype="application/json")
class NovaListFlavorsDetails(Resource):
def __init__(self, api):
self.api = api
def get(self, id):
"""
Lists all flavors with additional information like ram and disk space.
:param id: tenant id, used for the 'href' link
:type id: ``str``
:return: Returns a flask response with a list of all flavors with additional information.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
resp['flavors'] = list()
for flavor in self.api.compute.flavors.values():
# use the class dict. it should work fine
# but use a copy so we don't modifiy the original
f = flavor.__dict__.copy()
# add additional expected stuff stay openstack compatible
f['links'] = [{'href': "http://%s:%d/v2.1/%s/flavors/%s" % (get_host(request),
self.api.port,
id,
flavor.id)}]
f['OS-FLV-DISABLED:disabled'] = False
f['OS-FLV-EXT-DATA:ephemeral'] = 0
f['os-flavor-access:is_public'] = True
f['ram'] = flavor.memory
f['vcpus'] = flavor.cpu
f['swap'] = 0
f['disk'] = flavor.storage
f['rxtx_factor'] = 1.0
resp['flavors'].append(f)
response = Response(json.dumps(resp), status=200,
mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(
u"%s: Could not retrieve the list of servers." % __name__)
return ex.message, 500
def post(self, id):
LOG.debug("API CALL: %s POST" % str(self.__class__.__name__))
data = json.loads(request.data).get("flavor")
LOG.warning("Create Flavor: %s" % str(data))
# add to internal dict
f = self.api.compute.add_flavor(
data.get("name"),
data.get("vcpus"),
data.get("ram"), "MB",
data.get("disk"), "GB")
# create response based on incoming data
data["id"] = f.id
data["links"] = [{'href': "http://%s:%d/v2.1/%s/flavors/%s" % (get_host(request),
self.api.port,
id,
f.id)}]
resp = {"flavor": data}
return Response(json.dumps(resp), status=200,
mimetype="application/json")
class NovaListFlavorById(Resource):
def __init__(self, api):
self.api = api
def get(self, id, flavorid):
"""
Returns details about one flavor.
:param id: tenant id, used for the 'href' link
:type id: ``str``
:param flavorid: Represents the flavor.
:type flavorid: ``str``
:return: Returns a flask response with detailed information about the flavor.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
resp['flavor'] = dict()
flavor = self.api.compute.flavors.get(flavorid, None)
if flavor is None:
for f in self.api.compute.flavors.values():
if f.id == flavorid:
flavor = f
break
resp['flavor']['id'] = flavor.id
resp['flavor']['name'] = flavor.name
resp['flavor']['links'] = [{'href': "http://%s:%d/v2.1/%s/flavors/%s" % (get_host(request),
self.api.port,
id,
flavor.id)}]
response = Response(json.dumps(resp), status=200,
mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not retrieve flavor with id %s" %
(__name__, flavorid))
return ex.message, 500
def delete(self, id, flavorid):
"""
Removes the given flavor.
Does not really remove anything from the machine, just fakes an OK.
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
return Response("", status=204, mimetype="application/json")
class NovaListImages(Resource):
def __init__(self, api):
self.api = api
def get(self, id):
"""
Creates a list of all usable images.
:param id: tenant id, used for the 'href' link
:type id: ``str``
:return: Returns a flask response with a list of available images.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
resp['images'] = list()
for image in self.api.compute.images.values():
f = dict()
f['id'] = image.id
f['name'] = str(image.name).replace(":latest", "")
f['links'] = [{'href': "http://%s:%d/v2.1/%s/images/%s" % (get_host(request),
self.api.port,
id,
image.id)}]
resp['images'].append(f)
response = Response(json.dumps(resp), status=200,
mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(
u"%s: Could not retrieve the list of images." % __name__)
return ex.message, 500
class NovaListImagesDetails(Resource):
def __init__(self, api):
self.api = api
def get(self, id):
"""
As List Images but with additional metadata.
:param id: tenant id, used for the 'href' link
:type id: ``str``
:return: Returns a flask response with a list of images and their metadata.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
resp['images'] = list()
for image in self.api.compute.images.values():
# use the class dict. it should work fine
# but use a copy so we don't modifiy the original
f = image.__dict__.copy()
# add additional expected stuff stay openstack compatible
f['name'] = str(image.name).replace(":latest", "")
f['links'] = [{'href': "http://%s:%d/v2.1/%s/images/%s" % (get_host(request),
self.api.port,
id,
image.id)}]
f['metadata'] = {
"architecture": "x86_64",
"auto_disk_config": "True",
"kernel_id": "nokernel",
"ramdisk_id": "nokernel"
}
resp['images'].append(f)
response = Response(json.dumps(resp), status=200,
mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(
u"%s: Could not retrieve the list of images." % __name__)
return ex.message, 500
class NovaListImageById(Resource):
def __init__(self, api):
self.api = api
def get(self, id, imageid):
"""
Gets an image by id from the emulator with openstack nova compliant return values.
:param id: tenantid, we ignore this most of the time
:type id: ``str``
:param imageid: id of the image. If it is 1 the dummy CREATE-IMAGE is returned
:type imageid: ``str``
:return: Returns a flask response with the information about one image.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = dict()
i = resp['image'] = dict()
for image in self.api.compute.images.values():
if image.id == imageid or image.name == imageid:
i['id'] = image.id
i['name'] = image.name
return Response(json.dumps(resp), status=200,
mimetype="application/json")
response = Response(
"Image with id or name %s does not exists." % imageid, status=404)
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not retrieve image with id %s." %
(__name__, imageid))
return ex.message, 500
def delete(self, id, imageid):
"""
Removes the given image.
Does not really remove anything from the machine, just fakes an OK.
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
return Response("", status=204, mimetype="application/json")
class NovaShowServerDetails(Resource):
def __init__(self, api):
self.api = api
def get(self, id, serverid):
"""
Returns detailed information about the specified server.
:param id: tenant id, used for the 'href' link
:type id: ``str``
:param serverid: Specifies the requested server.
:type serverid: ``str``
:return: Returns a flask response with details about the server.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
server = self.api.compute.find_server_by_name_or_id(serverid)
if server is None:
return Response(
"Server with id or name %s does not exists." % serverid, status=404)
s = server.create_server_dict()
s['links'] = [{'href': "http://%s:%d/v2.1/%s/servers/%s" % (get_host(request),
self.api.port,
id,
server.id)}]
flavor = self.api.compute.flavors[server.flavor]
s['flavor'] = {
"id": flavor.id,
"links": [
{
"href": "http://%s:%d/v2.1/%s/flavors/%s" % (get_host(request),
self.api.port,
id,
flavor.id),
"rel": "bookmark"
}
]
}
image = self.api.compute.images[server.image]
s['image'] = {
"id": image.id,
"links": [
{
"href": "http://%s:%d/v2.1/%s/images/%s" % (get_host(request),
self.api.port,
id,
image.id),
"rel": "bookmark"
}
]
}
response = Response(json.dumps(
{'server': s}), status=200, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(
u"%s: Could not retrieve the server details." % __name__)
return ex.message, 500
def delete(self, id, serverid):
"""
Delete a server instance.
:param id: tenant id, we ignore this most of the time
:type id: ``str``
:param serverid: The UUID of the server
:type serverid: ``str``
:return: Returns 204 if everything is fine.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s DELETE" % str(self.__class__.__name__))
try:
server = self.api.compute.find_server_by_name_or_id(serverid)
if server is None:
return Response('Could not find server.',
status=404, mimetype="application/json")
self.api.compute.stop_compute(server)
response = Response('', status=204, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(u"%s: Could not create the server." % __name__)
return ex.message, 500
class NovaInterfaceToServer(Resource):
def __init__(self, api):
self.api = api
def post(self, id, serverid):
"""
Add an interface to the specified server.
:param id: tenant id, we ignore this most of the time
:type id: ``str``
:param serverid: Specifies the server.
:type serverid: ``str``
:return: Returns a flask response with information about the attached interface.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
server = self.api.compute.find_server_by_name_or_id(serverid)
if server is None:
return Response(
"Server with id or name %s does not exists." % serverid, status=404)
if server.emulator_compute is None:
LOG.error("The targeted container does not exist.")
return Response(
"The targeted container of %s does not exist." % serverid, status=404)
data = json.loads(request.data).get("interfaceAttachment")
resp = dict()
port = data.get("port_id", None)
net = data.get("net_id", None)
dc = self.api.compute.dc
network_dict = dict()
network = None
if net is not None and port is not None:
port = self.api.compute.find_port_by_name_or_id(port)
network = self.api.compute.find_network_by_name_or_id(net)
network_dict['id'] = port.intf_name
network_dict['ip'] = port.ip_address
network_dict[network_dict['id']] = network.name
elif net is not None:
network = self.api.compute.find_network_by_name_or_id(net)
if network is None:
return Response(
"Network with id or name %s does not exists." % net, status=404)
port = self.api.compute.create_port("port:cp%s:fl:%s" %
(len(self.api.compute.ports), str(uuid.uuid4())))
port.net_name = network.name
port.ip_address = network.get_new_ip_address(port.name)
network_dict['id'] = port.intf_name
network_dict['ip'] = port.ip_address
network_dict[network_dict['id']] = network.name
elif port is not None:
port = self.api.compute.find_port_by_name_or_id(port)
network_dict['id'] = port.intf_name
network_dict['ip'] = port.ip_address
network = self.api.compute.find_network_by_name_or_id(
port.net_name)
network_dict[network_dict['id']] = network.name
else:
raise Exception(
"You can only attach interfaces by port or network at the moment")
if network == self.api.manage.floating_network:
dc.net.addLink(server.emulator_compute, self.api.manage.floating_switch,
params1=network_dict, cls=Link, intfName1=port.intf_name)
else:
dc.net.addLink(server.emulator_compute, dc.switch,
params1=network_dict, cls=Link, intfName1=port.intf_name)
resp["port_state"] = "ACTIVE"
resp["port_id"] = port.id
resp["net_id"] = self.api.compute.find_network_by_name_or_id(
port.net_name).id
resp["mac_addr"] = port.mac_address
resp["fixed_ips"] = list()
fixed_ips = dict()
fixed_ips["ip_address"] = port.ip_address
fixed_ips["subnet_id"] = network.subnet_name
resp["fixed_ips"].append(fixed_ips)
response = Response(json.dumps(
{"interfaceAttachment": resp}), status=202, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(
u"%s: Could not add interface to the server." % __name__)
return ex.message, 500
class NovaShowAndDeleteInterfaceAtServer(Resource):
def __init__(self, api):
self.api = api
def delete(self, id, serverid, port_id):
"""
Deletes an existing interface.
:param id: tenant id, we ignore this most of the time
:type id: ``str``
:param serverid: Specifies the server, where the interface will be deleted.
:type serverid: ``str``
:param port_id: Specifies the port of the interface.
:type port_id: ``str``
:return: Returns a flask response with 202 if everything worked out. Otherwise it will return 404 and an
error message.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
server = self.api.compute.find_server_by_name_or_id(serverid)
if server is None:
return Response(
"Server with id or name %s does not exists." % serverid, status=404)
port = self.api.compute.find_port_by_name_or_id(port_id)
if port is None:
return Response(
"Port with id or name %s does not exists." % port_id, status=404)
for link in self.api.compute.dc.net.links:
if str(link.intf1) == port.intf_name and \
str(link.intf1.ip) == port.ip_address.split('/')[0]:
self.api.compute.dc.net.removeLink(link)
break
response = Response("", status=202, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(
u"%s: Could not detach interface from the server." % __name__)
return ex.message, 500
class NovaLimits(Resource):
def __init__(self, api):
self.api = api
def get(self, id):
"""
Returns the resource limits of the emulated cloud.
https://developer.openstack.org/api-ref/compute/?expanded=show-rate-and-absolute-limits-detail#limits-limits
TODO: For now we only return fixed limits, not based on the real deployment.
:param id: tenant id, used for the 'href' link
:type id: ``str``
:return: Returns the resource limits.
:rtype: :class:`flask.response`
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
resp = {
"limits": {
"absolute": {
"maxImageMeta": 12800,
"maxPersonality": 500,
"maxPersonalitySize": 1024000,
"maxSecurityGroupRules": 2000,
"maxSecurityGroups": 1000,
"maxServerMeta": 12800,
"maxTotalCores": 2000,
"maxTotalFloatingIps": 1000,
"maxTotalInstances": 1000,
"maxTotalKeypairs": 1000,
"maxTotalRAMSize": 5120000,
"maxServerGroups": 1000,
"maxServerGroupMembers": 1000,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 0,
"totalFloatingIpsUsed": 0,
"totalServerGroupsUsed": 0
},
"rate": []
}
}
response = Response(json.dumps(resp), status=200,
mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
LOG.exception(
u"%s: Could not retrieve the list of images." % __name__)
return ex.message, 500
|
|
from .utils import NamespacedClient, query_params, _make_path
class CatClient(NamespacedClient):
@query_params('h', 'help', 'local', 'master_timeout', 'v')
def aliases(self, name=None, params=None):
"""
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-alias.html>`_
:arg name: A comma-separated list of alias names to return
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
_, data = self.transport.perform_request('GET', _make_path('_cat',
'aliases', name), params=params)
return data
@query_params('bytes', 'h', 'help', 'local', 'master_timeout', 'v')
def allocation(self, node_id=None, params=None):
"""
Allocation provides a snapshot of how shards have located around the
cluster and the state of disk usage.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-allocation.html>`_
:arg node_id: A comma-separated list of node IDs or names to limit the
returned information
:arg bytes: The unit in which to display byte values
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
_, data = self.transport.perform_request('GET', _make_path('_cat',
'allocation', node_id), params=params)
return data
@query_params('h', 'help', 'local', 'master_timeout', 'v')
def count(self, index=None, params=None):
"""
Count provides quick access to the document count of the entire cluster,
or individual indices.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-count.html>`_
:arg index: A comma-separated list of index names to limit the returned
information
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
_, data = self.transport.perform_request('GET', _make_path('_cat',
'count', index), params=params)
return data
@query_params('h', 'help', 'local', 'master_timeout', 'ts', 'v')
def health(self, params=None):
"""
health is a terse, one-line representation of the same information from
:meth:`~elasticsearch.client.cluster.ClusterClient.health` API
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-health.html>`_
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg ts: Set to false to disable timestamping, default True
:arg v: Verbose mode. Display column headers, default False
"""
_, data = self.transport.perform_request('GET', '/_cat/health',
params=params)
return data
@query_params('help')
def help(self, params=None):
"""
A simple help for the cat api.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat.html>`_
:arg help: Return help information, default False
"""
_, data = self.transport.perform_request('GET', '/_cat', params=params)
return data
@query_params('bytes', 'h', 'help', 'local', 'master_timeout', 'pri', 'v')
def indices(self, index=None, params=None):
"""
The indices command provides a cross-section of each index.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-indices.html>`_
:arg index: A comma-separated list of index names to limit the returned
information
:arg bytes: The unit in which to display byte values
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg pri: Set to true to return stats only for primary shards, default
False
:arg v: Verbose mode. Display column headers, default False
"""
_, data = self.transport.perform_request('GET', _make_path('_cat',
'indices', index), params=params)
return data
@query_params('h', 'help', 'local', 'master_timeout', 'v')
def master(self, params=None):
"""
Displays the master's node ID, bound IP address, and node name.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-master.html>`_
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
_, data = self.transport.perform_request('GET', '/_cat/master',
params=params)
return data
@query_params('h', 'help', 'local', 'master_timeout', 'v')
def nodes(self, params=None):
"""
The nodes command shows the cluster topology.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-nodes.html>`_
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
_, data = self.transport.perform_request('GET', '/_cat/nodes',
params=params)
return data
@query_params('bytes', 'h', 'help', 'local', 'master_timeout', 'v')
def recovery(self, index=None, params=None):
"""
recovery is a view of shard replication.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-recovery.html>`_
:arg index: A comma-separated list of index names to limit the returned
information
:arg bytes: The unit in which to display byte values
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
_, data = self.transport.perform_request('GET', _make_path('_cat',
'recovery', index), params=params)
return data
@query_params('h', 'help', 'local', 'master_timeout', 'v')
def shards(self, index=None, params=None):
"""
The shards command is the detailed view of what nodes contain which shards.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-shards.html>`_
:arg index: A comma-separated list of index names to limit the returned
information
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
_, data = self.transport.perform_request('GET', _make_path('_cat',
'shards', index), params=params)
return data
@query_params('h', 'help', 'local', 'master_timeout', 'v')
def segments(self, index=None, params=None):
"""
The segments command is the detailed view of Lucene segments per index.
:arg index: A comma-separated list of index names to limit the returned
information
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
_, data = self.transport.perform_request('GET', _make_path('_cat',
'segments', index), params=params)
return data
@query_params('h', 'help', 'local', 'master_timeout', 'v')
def pending_tasks(self, params=None):
"""
pending_tasks provides the same information as the
:meth:`~elasticsearch.client.cluster.ClusterClient.pending_tasks` API
in a convenient tabular format.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-pending-tasks.html>`_
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
_, data = self.transport.perform_request('GET', '/_cat/pending_tasks',
params=params)
return data
@query_params('full_id', 'h', 'help', 'local', 'master_timeout', 'v')
def thread_pool(self, params=None):
"""
Get information about thread pools.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-thread-pool.html>`_
:arg full_id: Enables displaying the complete node ids (default: 'false')
:arg h: Comma-separated list of column names to display
:arg help: Return help information (default: 'false')
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers (default: 'false')
"""
_, data = self.transport.perform_request('GET', '/_cat/thread_pool',
params=params)
return data
@query_params('bytes', 'fields', 'h', 'help', 'local', 'master_timeout',
'v')
def fielddata(self, fields=None, params=None):
"""
Shows information about currently loaded fielddata on a per-node basis.
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/master/cat-fielddata.html>`_
:arg fields: A comma-separated list of fields to return the fielddata
size
:arg bytes: The unit in which to display byte values
:arg fields: A comma-separated list of fields to return the fielddata
size
:arg h: Comma-separated list of column names to display
:arg help: Return help information (default: 'false')
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers (default: 'false')
"""
_, data = self.transport.perform_request('GET', _make_path('_cat',
'fielddata', fields), params=params)
return data
@query_params('h', 'help', 'local', 'master_timeout', 'v')
def plugins(self, params=None):
"""
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/cat-plugins.html>`_
:arg h: Comma-separated list of column names to display
:arg help: Return help information, default False
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg master_timeout: Explicit operation timeout for connection to master
node
:arg v: Verbose mode. Display column headers, default False
"""
_, data = self.transport.perform_request('GET', '/_cat/plugins',
params=params)
return data
|
|
#!/usr/bin/env python
"""t is for people that want do things, not organize their tasks."""
from __future__ import with_statement
import os, re, sys, hashlib
import time, datetime
from operator import itemgetter
from optparse import OptionParser, OptionGroup
class InvalidTaskfile(Exception):
"""Raised when the path to a task file already exists as a directory."""
pass
class AmbiguousPrefix(Exception):
"""Raised when trying to use a prefix that could identify multiple tasks."""
def __init__(self, prefix):
super(AmbiguousPrefix, self).__init__()
self.prefix = prefix
class UnknownPrefix(Exception):
"""Raised when trying to use a prefix that does not match any tasks."""
def __init__(self, prefix):
super(UnknownPrefix, self).__init__()
self.prefix = prefix
def _hash(text):
"""Return a hash of the given text for use as an id.
Currently SHA1 hashing is used. It should be plenty for our purposes.
"""
return hashlib.sha1(text.encode('utf-8')).hexdigest()
def _task_from_taskline(taskline):
"""Parse a taskline (from a task file) and return a task.
A taskline should be in the format:
summary text ... | meta1:meta1_value,meta2:meta2_value,...
The task returned will be a dictionary such as:
{ 'id': <hash id>,
'text': <summary text>,
... other metadata ... }
A taskline can also consist of only summary text, in which case the id
and other metadata will be generated when the line is read. This is
supported to enable editing of the taskfile with a simple text editor.
"""
if taskline.strip().startswith('#'):
return None
elif '|' in taskline:
text, _, meta = taskline.rpartition('|')
task = { 'text': text.strip() }
for piece in meta.strip().split(','):
label, data = piece.split(':')
task[label.strip()] = data.strip()
else:
text = taskline.strip()
task = { 'id': _hash(text), 'text': text }
return task
def _tasklines_from_tasks(tasks):
"""Parse a list of tasks into tasklines suitable for writing."""
tasklines = []
for task in tasks:
meta = [m for m in task.items() if m[0] != 'text']
meta_str = ', '.join('%s:%s' % m for m in meta)
tasklines.append('%s | %s\n' % (task['text'], meta_str))
return tasklines
def _prefixes(ids):
"""Return a mapping of ids to prefixes in O(n) time.
Each prefix will be the shortest possible substring of the ID that
can uniquely identify it among the given group of IDs.
If an ID of one task is entirely a substring of another task's ID, the
entire ID will be the prefix.
"""
ps = {}
for id in ids:
id_len = len(id)
for i in range(1, id_len+1):
# identifies an empty prefix slot, or a singular collision
prefix = id[:i]
if (not prefix in ps) or (ps[prefix] and prefix != ps[prefix]):
break
if prefix in ps:
# if there is a collision
other_id = ps[prefix]
for j in range(i, id_len+1):
if other_id[:j] == id[:j]:
ps[id[:j]] = ''
else:
ps[other_id[:j]] = other_id
ps[id[:j]] = id
break
else:
ps[other_id[:id_len+1]] = other_id
ps[id] = id
else:
# no collision, can safely add
ps[prefix] = id
ps = dict(zip(ps.values(), ps.keys()))
if '' in ps:
del ps['']
return ps
def _format_time(ts):
"""Simple time formatting"""
dt = datetime.datetime.fromtimestamp(int(ts))
today = datetime.date.today()
fmt = ' '.join(filter(None, [
'%Y' if dt.year != today.year else '',
'%b %d' if (dt.month, dt.day) != (today.month, today.day) else '',
'%H:%M'
]))
return dt.strftime(fmt)
class TaskDict(object):
"""A set of tasks, both finished and unfinished, for a given list.
The list's files are read from disk when the TaskDict is initialized. They
can be written back out to disk with the write() function.
"""
def __init__(self, taskdir='.', name='tasks'):
"""Initialize by reading the task files, if they exist."""
self.tasks = {}
self.done = {}
self.name = name
self.taskdir = taskdir
filemap = (('tasks', self.name), ('done', '.%s.done' % self.name))
for kind, filename in filemap:
path = os.path.join(os.path.expanduser(self.taskdir), filename)
if os.path.isdir(path):
raise InvalidTaskfile
if os.path.exists(path):
with open(path, 'r') as tfile:
tls = [tl.strip() for tl in tfile if tl]
tasks = map(_task_from_taskline, tls)
for task in tasks:
if task is not None:
getattr(self, kind)[task['id']] = task
def __getitem__(self, prefix):
"""Return the unfinished task with the given prefix.
If more than one task matches the prefix an AmbiguousPrefix exception
will be raised, unless the prefix is the entire ID of one task.
If no tasks match the prefix an UnknownPrefix exception will be raised.
"""
matched = [tid for tid in self.tasks.keys() if tid.startswith(prefix)]
if len(matched) == 1:
return self.tasks[matched[0]]
elif len(matched) == 0:
raise UnknownPrefix(prefix)
else:
matched = [tid for tid in self.tasks.keys() if tid == prefix]
if len(matched) == 1:
return self.tasks[matched[0]]
else:
raise AmbiguousPrefix(prefix)
def add_task(self, text, track_time=False):
"""Add a new, unfinished task with the given summary text."""
task_id = _hash(text)
self.tasks[task_id] = {'id': task_id, 'text': text}
if track_time:
self.tasks[task_id]['add_ts'] = str(int(time.time()))
def edit_task(self, prefix, text):
"""Edit the task with the given prefix.
If more than one task matches the prefix an AmbiguousPrefix exception
will be raised, unless the prefix is the entire ID of one task.
If no tasks match the prefix an UnknownPrefix exception will be raised.
"""
task = self[prefix]
if text.startswith('s/') or text.startswith('/'):
text = re.sub('^s?/', '', text).rstrip('/')
find, _, repl = text.partition('/')
text = re.sub(find, repl, task['text'])
task['text'] = text
def finish_task(self, prefix, track_time=False):
"""Mark the task with the given prefix as finished.
If more than one task matches the prefix an AmbiguousPrefix exception
will be raised, if no tasks match it an UnknownPrefix exception will
be raised.
"""
task = self.tasks.pop(self[prefix]['id'])
if track_time:
task['finish_ts'] = str(int(time.time()))
self.done[task['id']] = task
def remove_task(self, prefix):
"""Remove the task from tasks list.
If more than one task matches the prefix an AmbiguousPrefix exception
will be raised, if no tasks match it an UnknownPrefix exception will
be raised.
"""
self.tasks.pop(self[prefix]['id'])
def print_list(self, kind='tasks', verbose=False, quiet=False, grep='',
track_time=False):
"""Print out a nicely formatted list of unfinished tasks."""
tasks = dict(getattr(self, kind).items())
label = 'prefix' if not verbose else 'id'
if not verbose:
for task_id, prefix in _prefixes(tasks).items():
tasks[task_id]['prefix'] = prefix
sorted_tasks = sorted(tasks.items())
plen = max(map(lambda t: len(t[label]), tasks.values())) if tasks else 0
if track_time:
tl = 'add_ts' if kind == 'tasks' else 'finish_ts'
for task in tasks.values():
task['time'] = _format_time(task[tl]) if tl in task else ''
tlen = max(len(t['time']) for t in tasks.values()) if tasks else 0
sorted_tasks = sorted(
sorted_tasks,
key=lambda i: int(i[1][tl]) if tl in i[1] else 0,
reverse=True)
for _, task in sorted_tasks:
if grep.lower() in task['text'].lower():
p = '%s - ' % task[label].ljust(plen) if not quiet else ''
if track_time and tlen:
p += '%s - ' % task['time'].ljust(tlen)
print(p + task['text'])
def write(self, delete_if_empty=False):
"""Flush the finished and unfinished tasks to the files on disk."""
filemap = (('tasks', self.name), ('done', '.%s.done' % self.name))
for kind, filename in filemap:
path = os.path.join(os.path.expanduser(self.taskdir), filename)
if os.path.isdir(path):
raise InvalidTaskfile
tasks = sorted(getattr(self, kind).values(), key=itemgetter('id'))
if tasks or not delete_if_empty:
with open(path, 'w') as tfile:
for taskline in _tasklines_from_tasks(tasks):
tfile.write(taskline)
elif not tasks and os.path.isfile(path):
os.remove(path)
def _build_parser():
"""Return a parser for the command-line interface."""
usage = "Usage: %prog [-t DIR] [-l LIST] [options] [TEXT]"
parser = OptionParser(usage=usage)
actions = OptionGroup(parser, "Actions",
"If no actions are specified the TEXT will be added as a new task.")
actions.add_option("-e", "--edit", dest="edit", default="",
help="edit TASK to contain TEXT", metavar="TASK")
actions.add_option("-f", "--finish", dest="finish",
help="mark TASK as finished", metavar="TASK")
actions.add_option("-r", "--remove", dest="remove",
help="Remove TASK from list", metavar="TASK")
parser.add_option_group(actions)
config = OptionGroup(parser, "Configuration Options")
config.add_option("-l", "--list", dest="name", default="tasks",
help="work on LIST", metavar="LIST")
config.add_option("-t", "--task-dir", dest="taskdir", default="",
help="work on the lists in DIR", metavar="DIR")
config.add_option("-d", "--delete-if-empty",
action="store_true", dest="delete", default=False,
help="delete the task file if it becomes empty")
config.add_option("--track-time", action="store_true", dest="track_time",
default=False, help="save and display time of adding tasks")
parser.add_option_group(config)
output = OptionGroup(parser, "Output Options")
output.add_option("-g", "--grep", dest="grep", default='',
help="print only tasks that contain WORD", metavar="WORD")
output.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="print more detailed output (full task ids, etc)")
output.add_option("-q", "--quiet",
action="store_true", dest="quiet", default=False,
help="print less detailed output (no task ids, etc)")
output.add_option("--done",
action="store_true", dest="done", default=False,
help="list done tasks instead of unfinished ones")
parser.add_option_group(output)
return parser
def _main():
"""Run the command-line interface."""
(options, args) = _build_parser().parse_args()
td = TaskDict(taskdir=options.taskdir, name=options.name)
text = ' '.join(args).strip()
try:
if options.finish:
td.finish_task(options.finish, track_time=options.track_time)
td.write(options.delete)
elif options.remove:
td.remove_task(options.remove)
td.write(options.delete)
elif options.edit:
td.edit_task(options.edit, text)
td.write(options.delete)
elif text:
td.add_task(text, track_time=options.track_time)
td.write(options.delete)
else:
kind = 'tasks' if not options.done else 'done'
td.print_list(kind=kind, verbose=options.verbose, quiet=options.quiet,
grep=options.grep, track_time=options.track_time)
except AmbiguousPrefix:
e = sys.exc_info()[1]
sys.stderr.write('The ID "%s" matches more than one task.\n' % e.prefix)
except UnknownPrefix:
e = sys.exc_info()[1]
sys.stderr.write('The ID "%s" does not match any task.\n' % e.prefix)
if __name__ == '__main__':
_main()
|
|
""" Roosa - A RESTful Ontology Server and Applications """
__author__ = "Jaakko Salonen, Juha Nurmi"
__copyright__ = "Copyright 2012, Jaakko Salonen, Juha Nurmi"
__version__ = "0.2.0"
__license__ = "MIT"
__status__ = "Prototype"
from flask import Flask, Blueprint, url_for, render_template, Response, make_response, request
from flask_rest import RESTResource
from rdflib import Graph, Namespace, plugin, query
from rdflib.serializer import Serializer
from curie import uri2curie, curie2uri
import os
import xml.etree.ElementTree # If I don't load this here I get error: AttributeError: 'module' object has no attribute 'ElementTree'
import rdflib
import json
import mimetypes
# Register RDFLib plugins
plugin.register('sparql', query.Processor, 'rdfextras.sparql.processor', 'Processor')
plugin.register('sparql', query.Result, 'rdfextras.sparql.query', 'SPARQLQueryResult')
plugin.register('sparql', query.Result, 'rdfextras.sparql.query', 'SPARQLQueryResult')
app = Flask(__name__)
app.REST_API_URL = 'http://localhost/api/resources/'
app.g = Graph()
app.debug = True
app.nss = \
dict(
dc=Namespace("http://purl.org/dc/elements/1.1/"),
rdf=Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#"),
owl=Namespace("http://www.w3.org/2002/07/owl#"),
vin=Namespace("http://www.w3.org/TR/2003/PR-owl-guide-20031209/wine#")
)
app.g.parse('data/wine.rdf')
def serve_file(filepath):
with open('./public/'+filepath, "r") as f:
(mimetype_prefix, mimetype_suffix) = mimetypes.guess_type(filepath)
text = f.read()
return Response(text, mimetype=mimetype_prefix)
@app.route('/<path:filepath>')
def public(filepath):
try:
return serve_file(filepath)
except IOError:
try:
return serve_file(filepath+'index.html')
except IOError:
current = ''
parts = filepath.split('/')
for part in parts:
current += part+'/'
try:
print "serve_file(%s)" % current
return serve_file(current)
except IOError:
pass
try:
print "serve_file(%sindex.html)" % current
return serve_file(current+'index.html')
except IOError:
pass
return "Not Found", 404
@app.route('/')
def index():
return public('index.html')
@app.route("/graph/<apicall>")
def graph(apicall):
f = open('./client/index.html', "r")
text = f.read()
f.close()
return text
"""
@app.route("/browser/")
@app.route("/browser/<uri>")
def browse_uri(uri=None):
resource = {}
resources = {}
if uri:
instance_uri = curie2uri(uri, app.nss)
search = app.g.triples((instance_uri, None, None))
resource['data'] = [ (uri2curie(p, app.nss), uri2curie(o, app.nss)) for s, p, o in search ]
search = app.g.triples((None, None, instance_uri))
resource['backlinks'] = [ (uri2curie(s, app.nss), uri2curie(p, app.nss)) for s, p, o in search ]
else:
# URI not given -> display index
for s, p, o in app.g:
curie = uri2curie(s, app.nss)
if resources.has_key(curie):
resources[curie]['triples'] += 1
else:
resources[curie] = {'triples': 1}
return render_template('browser.html', uri=uri,
resource=resource, resources=resources)
"""
@app.route('/api/namespaces/')
def api_namespaces_index(method=['GET']):
return Response(json.dumps(app.nss), mimetype='application/json')
@app.route('/api/resources/')
def api_resources_index(method=['GET']):
# Get list of all mentioned URIs
uris = []
for s, p, o in app.g.triples((None, None, None)):
if type(s) is rdflib.URIRef and not s in uris:
uris.append(s)
if type(o) is rdflib.URIRef and not o in uris:
uris.append(o)
return Response(json.dumps(uris), mimetype='application/json')
@app.route('/api/resources/<path:curie_or_uri>')
def api_resources(curie_or_uri=None, methods=['GET']):
if request.method == 'GET':
return api_resources_get(curie_or_uri)
else:
raise Exception("Invalid method")
def api_resources_get(curie_or_uri):
# Result resource
result = Graph()
# Resolve full URI
uri = curie2uri(curie_or_uri, app.nss)
print(uri)
# <instance_uri> ?predicate ?object
search = app.g.triples((uri, None, None))
for t in search:
result.add(t)
# ?subject ?predicate <instance_uri>
search = app.g.triples((None, None, uri))
for t in search:
result.add(t)
# Nothing found? -> return 404 not found
if len(result) == 0:
return "Not Found", 404
# Return
return Response(result.serialize(format='json-ld', indent=4), mimetype='application/json')
"""
# Blueprint for API
api = Blueprint("api", __name__, url_prefix="/api")
class InstanceHandler(object):
def get(self, instance_id):
print(instance_id)
# Result resource
result_resource = {}
result_graph = Graph()
# Resolve full URI
instance_uri = curie2uri(instance_id, app.nss)
# <instance_uri> ?predicate ?object
search = app.g.triples((instance_uri, None, None))
result_resource['data'] = [
(uri2curie(p, app.nss), uri2curie(o, app.nss)) for s, p, o in search
]
# JSON-LD: <instance_uri> ?predicate ?object
search = app.g.triples((instance_uri, None, None))
for t in search:
result_graph.add(t)
# ?subject ?predicate <instance_uri>
search = app.g.triples((None, None, instance_uri))
result_resource['backlinks'] = [
(uri2curie(s, app.nss), uri2curie(p, app.nss)) for s, p, o in search
]
# JSON-LD: ?subject ?predicate <instance_uri>
search = app.g.triples((None, None, instance_uri))
for t in search:
result_graph.add(t)
# Nothing found? -> return 404 not found
if (not result_resource['data'] and
not result_resource['backlinks']):
return 404, "Not Found"
# Return result resource
#return 200, result_resource
# Return JSON-LD
return 200, json.loads(result_graph.serialize(format='json-ld', indent=4))
def add(self, instance_id):
return self.update(instance_id, update=False)
def update(self, instance_id, update=True):
# Resolve full URI
instance_uri = curie2uri(instance_id, app.nss)
triples = []
# Process data: JSON
if request.headers['content-type'] == 'application/json':
# parse
try:
data_json = json.loads(request.data)
except ValueError:
# Bad data
return 400, "Bad Request"
# Triples in
for p, o in data_json['data']:
p_uri = curie2uri(p, app.nss)
o_uri = curie2uri(o, app.nss)
triples.append( (instance_uri, p_uri, o_uri) )
# Triples out
for o, p in data_json['backlinks']:
p_uri = curie2uri(p, app.nss)
o_uri = curie2uri(o, app.nss)
triples.append( (o_uri, p_uri, instance_uri) )
else:
# Unknown data type -> error
return 400, "Bad Request"
# Remove old triples
#if update:
# app.g.remove((instance_uri, None, None))
# app.g.remove((None, None, instance_uri))
# Add triples to graph
for s, p, o in triples:
app.g.add((s, p, o))
# Response
return 200, "OK"
def delete(self, instance_id):
# Resolve full URI
instance_uri = curie2uri(instance_id, app.nss)
# Not found?
# TODO: refactor
triples = app.g.triples((None, None, instance_uri))
count = 0
for t in triples:
count += 1
if count == 0:
# Not found
return 404, "Not Found"
# Remove triples where this URI is the subject
app.g.remove((instance_uri, None, None))
# Remove triples where this URI is the object
app.g.remove((None, None, instance_uri))
# Success
return 200, "Deleted"
instances_resource = RESTResource(
name="instance",
route="/resources",
app=api,
actions=["add", "update", "delete", "get"],
handler=InstanceHandler())
# Register blueprints
app.register_blueprint(api)
"""
if __name__ == "__main__":
# Run app in debug mode
app.run(port=8080, debug=app.debug, host='0.0.0.0')
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import unittest, json
import importlib
from frappe.modules import load_doctype_module, get_module_name
from frappe.utils import cstr
def main(app=None, module=None, doctype=None, verbose=False, tests=(), force=False):
frappe.flags.print_messages = verbose
frappe.flags.in_test = True
if not frappe.db:
frappe.connect()
# if not frappe.conf.get("db_name").startswith("test_"):
# raise Exception, 'db_name must start with "test_"'
# workaround! since there is no separate test db
frappe.clear_cache()
set_test_email_config()
if verbose:
print 'Running "before_tests" hooks'
for fn in frappe.get_hooks("before_tests", app_name=app):
frappe.get_attr(fn)()
if doctype:
ret = run_tests_for_doctype(doctype, verbose=verbose, tests=tests, force=force)
elif module:
ret = run_tests_for_module(module, verbose=verbose, tests=tests)
else:
ret = run_all_tests(app, verbose)
frappe.db.commit()
# workaround! since there is no separate test db
frappe.clear_cache()
return ret
def set_test_email_config():
frappe.conf.update({
"auto_email_id": "[email protected]",
"mail_server": "smtp.example.com",
"mail_login": "[email protected]",
"mail_password": "test",
"admin_password": "admin"
})
def run_all_tests(app=None, verbose=False):
import os
apps = [app] if app else frappe.get_installed_apps()
test_suite = unittest.TestSuite()
for app in apps:
for path, folders, files in os.walk(frappe.get_pymodule_path(app)):
for dontwalk in ('locals', '.git', 'public'):
if dontwalk in folders:
folders.remove(dontwalk)
# print path
for filename in files:
filename = cstr(filename)
if filename.startswith("test_") and filename.endswith(".py"):
# print filename[:-3]
_add_test(path, filename, verbose, test_suite=test_suite)
return unittest.TextTestRunner(verbosity=1+(verbose and 1 or 0)).run(test_suite)
def run_tests_for_doctype(doctype, verbose=False, tests=(), force=False):
module = frappe.db.get_value("DocType", doctype, "module")
test_module = get_module_name(doctype, module, "test_")
if force:
for name in frappe.db.sql_list("select name from `tab%s`" % doctype):
frappe.delete_doc(doctype, name, force=True)
make_test_records(doctype, verbose=verbose, force=force)
module = frappe.get_module(test_module)
return _run_unittest(module, verbose=verbose, tests=tests)
def run_tests_for_module(module, verbose=False, tests=()):
module = importlib.import_module(module)
if hasattr(module, "test_dependencies"):
for doctype in module.test_dependencies:
make_test_records(doctype, verbose=verbose)
return _run_unittest(module=module, verbose=verbose, tests=tests)
def _run_unittest(module, verbose=False, tests=()):
test_suite = unittest.TestSuite()
module_test_cases = unittest.TestLoader().loadTestsFromModule(module)
if tests:
for each in module_test_cases:
for test_case in each.__dict__["_tests"]:
if test_case.__dict__["_testMethodName"] in tests:
test_suite.addTest(test_case)
else:
test_suite.addTest(module_test_cases)
return unittest.TextTestRunner(verbosity=1+(verbose and 1 or 0)).run(test_suite)
def _add_test(path, filename, verbose, test_suite=None):
import os, imp
if os.path.sep.join(["doctype", "doctype", "boilerplate"]) in path:
# in /doctype/doctype/boilerplate/
return
if not test_suite:
test_suite = unittest.TestSuite()
if os.path.basename(os.path.dirname(path))=="doctype":
txt_file = os.path.join(path, filename[5:].replace(".py", ".json"))
with open(txt_file, 'r') as f:
doc = json.loads(f.read())
doctype = doc["name"]
make_test_records(doctype, verbose)
module = imp.load_source(filename[:-3], os.path.join(path, filename))
test_suite.addTest(unittest.TestLoader().loadTestsFromModule(module))
def make_test_records(doctype, verbose=0, force=False):
if not frappe.db:
frappe.connect()
for options in get_dependencies(doctype):
if options == "[Select]":
continue
if options not in frappe.local.test_objects:
frappe.local.test_objects[options] = []
make_test_records(options, verbose, force)
make_test_records_for_doctype(options, verbose, force)
def get_modules(doctype):
module = frappe.db.get_value("DocType", doctype, "module")
try:
test_module = load_doctype_module(doctype, module, "test_")
if test_module:
reload(test_module)
except ImportError:
test_module = None
return module, test_module
def get_dependencies(doctype):
module, test_module = get_modules(doctype)
meta = frappe.get_meta(doctype)
link_fields = meta.get_link_fields()
for df in meta.get_table_fields():
link_fields.extend(frappe.get_meta(df.options).get_link_fields())
options_list = [df.options for df in link_fields] + [doctype]
if hasattr(test_module, "test_dependencies"):
options_list += test_module.test_dependencies
options_list = list(set(options_list))
if hasattr(test_module, "test_ignore"):
for doctype_name in test_module.test_ignore:
if doctype_name in options_list:
options_list.remove(doctype_name)
return options_list
def make_test_records_for_doctype(doctype, verbose=0, force=False):
module, test_module = get_modules(doctype)
if verbose:
print "Making for " + doctype
if hasattr(test_module, "_make_test_records"):
frappe.local.test_objects[doctype] += test_module._make_test_records(verbose)
elif hasattr(test_module, "test_records"):
frappe.local.test_objects[doctype] += make_test_objects(doctype, test_module.test_records, verbose)
else:
test_records = frappe.get_test_records(doctype)
if test_records:
frappe.local.test_objects[doctype] += make_test_objects(doctype, test_records, verbose)
elif verbose:
print_mandatory_fields(doctype)
def make_test_objects(doctype, test_records, verbose=None):
records = []
if not frappe.get_meta(doctype).issingle:
existing = frappe.get_all(doctype, filters={"name":("like", "_T-" + doctype + "-%")})
if existing:
return [d.name for d in existing]
existing = frappe.get_all(doctype, filters={"name":("like", "_Test " + doctype + "%")})
if existing:
return [d.name for d in existing]
for doc in test_records:
if not doc.get("doctype"):
doc["doctype"] = doctype
d = frappe.copy_doc(doc)
if doc.get('name'):
d.name = doc.get('name')
if frappe.local.test_objects.get(d.doctype):
# do not create test records, if already exists
return []
if d.meta.get_field("naming_series"):
if not d.naming_series:
d.naming_series = "_T-" + d.doctype + "-"
# submit if docstatus is set to 1 for test record
docstatus = d.docstatus
d.docstatus = 0
d.run_method("before_test_insert")
try:
d.insert()
if docstatus == 1:
d.submit()
except frappe.NameError:
pass
records.append(d.name)
frappe.db.commit()
return records
def print_mandatory_fields(doctype):
print "Please setup make_test_records for: " + doctype
print "-" * 60
meta = frappe.get_meta(doctype)
print "Autoname: " + (meta.autoname or "")
print "Mandatory Fields: "
for d in meta.get("fields", {"reqd":1}):
print d.parent + ":" + d.fieldname + " | " + d.fieldtype + " | " + (d.options or "")
print
|
|
from __future__ import print_function
import unittest
import numpy as np
import pyqg
class PyqgModelTester(unittest.TestCase):
def setUp(self):
# need to eliminate beta and U for tests
self.m = pyqg.QGModel(beta=0., U1=0., U2=0., filterfac=0.)
# the maximum wavelengths to use in tests
# if we go to higher wavelengths, we don't get machine precision
self.kwavemax = int(self.m.nx/8)
self.lwavemax = int(self.m.ny/8)
def test_fft2(self, rtol=1e-15):
"""Check whether pyqg fft functions produce the expected results."""
# define a field with a known Fourier transform
# if I go higher than a factor of 8, the tests start failing
for kwave in range(1, self.kwavemax):
for lwave in range(1, self.lwavemax):
k = 2*np.pi*kwave/self.m.L
l = 2*np.pi*lwave/self.m.W
# check whether these are the correct wavenumbers
np.testing.assert_allclose(self.m.kk[kwave], k, rtol,
err_msg='Incorrect wavenumber (kwave=%g)' % kwave)
np.testing.assert_allclose(self.m.ll[lwave], l, rtol,
err_msg='Incorrect wavenumber (lwave=%g)' % lwave)
np.testing.assert_allclose(self.m.ll[-lwave], -l, rtol,
err_msg='Incorrect wavenumber (lwave=%g)' % lwave)
q1 = np.cos(k * self.m.x )
q2 = np.sin(l * self.m.y)
# assign it to the PV - FFT should also get taken at this point
self.m.set_q1q2(q1, q2)
# amplitude of RFFT
qhamp = np.real(self.m.qh * self.m.qh.conj())
# expected amplitude of RFFT
amp = (self.m.nx/2)**2 * self.m.ny**2
# only have one Fourier component for k-axis
np.testing.assert_allclose(qhamp[0,0,kwave], amp, rtol,
err_msg='Incorrect wave amplitude from FFT (kwave=%g)' % kwave)
# two symmetric pairs for l-axis
np.testing.assert_allclose(qhamp[1,lwave,0], amp, rtol,
err_msg='Incorrect wave amplitude from FFT (lwave=%g)' % lwave)
np.testing.assert_allclose(qhamp[1,-lwave,0], amp, rtol,
err_msg='Incorrect wave amplitude from FFT (lwave=%g)' % lwave)
# now mask those components
qhamp_mask = np.ma.masked_array(qhamp, np.zeros_like(qhamp))
qhamp_mask.mask[0,0,kwave] = 1
qhamp_mask.mask[1,lwave,0] = 1
qhamp_mask.mask[1,-lwave,0] = 1
# and make sure everything else is zero
np.testing.assert_allclose(qhamp_mask.filled(0.), 0.,
rtol=0., atol=rtol,
err_msg='Incorrect wave amplitude from FFT')
def test_inversion_barotropic(self, rtol=1e-13):
"""Check whether inverting a barotropic PV gives desired result.
Can't get it to work with rtol < 1e-13 """
# for barotropic, $q = \nabla^2 \psi$
# $\hat{q} = -(k^2 + l^2) \hat \psi$
#
# velocity: u = -dpsi/dy, v = dpsi/dx
for kwave in range(1, self.kwavemax):
for lwave in range(1, self.lwavemax):
k = 2*np.pi*kwave/self.m.L
l = 2*np.pi*lwave/self.m.W
q = np.cos(k * self.m.x ) + np.sin(l * self.m.y)
psi = -k**-2 * np.cos(k * self.m.x ) - l**-2 * np.sin(l * self.m.y)
u = l**-1 * np.cos(l * self.m.y)
v = k**-1 * np.sin(k * self.m.x)
self.m.set_q1q2(q, q)
self.m._invert()
for nz in range(self.m.nz):
np.testing.assert_allclose(self.m.u[nz], u, rtol,
err_msg='Incorrect velocity from barotropic pv inversion')
np.testing.assert_allclose(self.m.v[nz], v, rtol,
err_msg='Incorrect velocity from barotropic pv inversion')
def test_inversion_baroclinic(self, rtol=1e-13):
"""Check whether inverting a baroclinic PV gives desired result."""
# need to think about how to implement this
pass
def test_change_inversion_matrix(self):
"""Make sure we can change the inversion matrix after kernel has been
initialized."""
a_new = np.random.rand(self.m.nz, self.m.nz, self.m.nl, self.m.nk)
self.m.a = a_new
np.testing.assert_allclose(a_new, self.m.a)
def test_advection(self, rtol=1e-14):
"""Check whether calculating advection tendency gives the descired result."""
# sin(2 a) = 2 sin(a) cos(a)
for kwave in range(1, self.kwavemax):
for lwave in range(1, self.lwavemax):
k = 2*np.pi*kwave/self.m.L
l = 2*np.pi*lwave/self.m.W
q1 = np.cos(k * self.m.x )
q2 = np.sin(l * self.m.y )
self.m.set_q1q2(q1, q2)
# manually set velocity
u1 = np.sin(k * self.m.x)
v1 = np.zeros_like(self.m.y)
u2 = np.zeros_like(self.m.x)
v2 = np.cos(l * self.m.y)
self.m.u[0] = u1
self.m.v[0] = v1
self.m.u[1] = u2
self.m.v[1] = v2
self.m.set_U1U2(0.,0.)
# calculate tendency
#self.m._advection_tendency()
self.m._do_advection()
dqhdt_adv = self.m.dqhdt
# expected amplitude of RFFT
amp = (self.m.nx/2)**2 * self.m.ny**2
tabs = np.real(dqhdt_adv * dqhdt_adv.conj())
# these tests pass, but what about the factor of two?
np.testing.assert_allclose(tabs[0,0,2*kwave], k**2 * amp, rtol,
err_msg='Incorrect advection tendency k (%g,%g)' % (lwave,kwave))
np.testing.assert_allclose(tabs[1,2*lwave,0], l**2 * amp, rtol,
err_msg='Incorrect advection tendency +l (%g,%g)' % (lwave,kwave))
np.testing.assert_allclose(tabs[1,-2*lwave,0], l**2 * amp, rtol,
err_msg='Incorrect advection tendency -l (%g,%g)' % (lwave,kwave))
# now mask those components
tabs_mask = np.ma.masked_array(tabs, np.zeros_like(tabs))
tabs_mask.mask[0,0,2*kwave] = 1
tabs_mask.mask[1,2*lwave,0] = 1
tabs_mask.mask[1,-2*lwave,0] = 1
# and make sure everything else is zero
if np.any(np.isnan(tabs_mask.filled(0.))):
print("Found NaNs")
np.testing.assert_allclose(tabs_mask.filled(0.), 0.,
rtol=0., atol=rtol,
err_msg='Incorrect advection tendency (%g,%g)' % (lwave,kwave))
def test_friction(self, rtol=1e-15):
"""Check whether calculating advection tendency gives the expected result."""
# sin(2 a) = 2 sin(a) cos(a)
for kwave in range(1, self.kwavemax):
for lwave in range(1, self.lwavemax):
k = 2*np.pi*kwave/self.m.L
l = 2*np.pi*lwave/self.m.W
q1 = np.cos(k * self.m.x )
q2 = np.sin(l * self.m.y )
self.m.set_q1q2(q1, q2)
self.m._invert()
# make sure tendency is zero
self.m.dqhdt[:] = 0.
self.m._do_friction()
# from code
#self.dqhdt[-1] += self.rek * self.wv2 * self.ph[-1]
expected = self.m.rek * self.m.wv2 * self.m.ph[-1]
np.testing.assert_allclose(self.m.dqhdt[-1], expected, rtol,
err_msg='Ekman friction was wrong.')
np.testing.assert_allclose(self.m.dqhdt[:-1], 0., rtol,
err_msg='Nonzero friction found in upper layers.')
def test_timestepping(self, rtol=1e-15):
"""Make sure timstepping works properly."""
# set initial conditions to zero
self.m.set_q(np.zeros_like(self.m.q))
# create a random tendency
dqhdt = np.random.rand(*self.m.dqhdt.shape) + 1j*np.random.rand(*self.m.dqhdt.shape)
self.m.dqhdt[:] = dqhdt
# hack filter to be constant
#self.m.filtr = 1.
# make sure we are at the zero timestep
self.assertEqual(self.m.tc, 0)
# step forward first time (should use forward Euler)
self.m._forward_timestep()
np.testing.assert_allclose(self.m.qh, 1*self.m.dt*dqhdt,
err_msg='First timestep incorrect')
# step forward second time (should use AB2)
self.m._forward_timestep()
np.testing.assert_allclose(self.m.qh, 2*self.m.dt*dqhdt,
err_msg='Second timestep incorrect')
# step forward third time (should use AB3)
self.m._forward_timestep()
np.testing.assert_allclose(self.m.qh, 3*self.m.dt*dqhdt,
err_msg='Third timestep incorrect')
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import threading
import unittest
from xmanager.xm import core
from xmanager.xm import job_blocks
from xmanager.xm import testing
from xmanager.xm import utils
class TestError(RuntimeError):
"""Exception which can be used in tests below."""
async def failing_job_generator(work_unit: core.WorkUnit):
raise TestError
class ApplyArgsTest(unittest.TestCase):
def test_wrong_job_args(self):
with self.assertRaises(ValueError):
core._apply_args(
job_blocks.Job(
job_blocks.Executable(name=''), testing.TestExecutor()),
{'abra': 'kadabra'})
def test_wrong_job_group_args(self):
with self.assertRaises(ValueError):
core._apply_args(
job_blocks.JobGroup(
learner=job_blocks.Job(
job_blocks.Executable(name=''), testing.TestExecutor())),
{'eval': {
'args': {
'batch_size': 32
}
}})
class ExperimentTest(unittest.TestCase):
def test_single_job_launch(self):
experiment = testing.TestExperiment()
with experiment:
job = job_blocks.Job(
testing.TestExecutable(),
testing.TestExecutor(),
args={},
name='name')
experiment.add(job)
self.assertEqual(experiment.launched_jobs, [job])
def test_job_group_launch(self):
experiment = testing.TestExperiment()
with experiment:
foo_job = job_blocks.Job(
testing.TestExecutable(),
testing.TestExecutor(),
args={'foo': 1},
name='1')
bar_job = job_blocks.Job(
testing.TestExecutable(),
testing.TestExecutor(),
args={'bar': 2},
name='2')
experiment.add(job_blocks.JobGroup(foo=foo_job, bar=bar_job))
self.assertEqual(experiment.launched_jobs, [foo_job, bar_job])
def test_job_generator_launch(self):
experiment = testing.TestExperiment()
with experiment:
job = job_blocks.Job(
testing.TestExecutable(),
testing.TestExecutor(),
args={},
name='name')
async def job_generator(work_unit: core.WorkUnit, use_magic: bool):
self.assertEqual(use_magic, True)
work_unit.add(job)
experiment.add(job_generator, args={'use_magic': True})
self.assertEqual(experiment.launched_jobs, [job])
self.assertEqual(experiment.launched_jobs_args, [{'use_magic': True}])
def test_job_generator_raises(self):
experiment = testing.TestExperiment()
with self.assertRaises(TestError):
with experiment:
experiment.add(failing_job_generator)
def test_non_async_job_generator_raises_user_friendly_exception(self):
with self.assertRaisesRegex(ValueError, '.* generator must be an async .*'):
with testing.TestExperiment() as experiment:
def job_generator(work_unit: core.WorkUnit):
del work_unit
experiment.add(job_generator)
def test_auxiliary_unit_job(self):
experiment = testing.TestExperiment()
with experiment:
job = job_blocks.Job(
testing.TestExecutable(),
testing.TestExecutor(),
args={},
name='name')
experiment.add(core.AuxiliaryUnitJob(job, termination_delay_secs=600))
self.assertEqual(len(experiment.auxiliary_units), 1)
def test_auxiliary_unit_job_generator(self):
experiment = testing.TestExperiment()
with experiment:
async def make_job(aux_unit: core.ExperimentUnit):
aux_unit.add(
job_blocks.Job(
testing.TestExecutable(),
testing.TestExecutor(),
args={},
name='name'))
experiment.add(
core.AuxiliaryUnitJob(make_job, termination_delay_secs=600))
self.assertEqual(len(experiment.auxiliary_units), 1)
def test_launch_with_args(self):
experiment = testing.TestExperiment()
with experiment:
experiment.add(
job_blocks.JobGroup(
foo=job_blocks.Job(
testing.TestExecutable(),
testing.TestExecutor(),
args={
'x': 1,
'y': 2
},
env_vars={'EDITOR': 'vi'}),
bar=job_blocks.Job(
testing.TestExecutable(),
testing.TestExecutor(),
args=['--bar=1'])),
args={
'foo': {
'args': {
'x': 3,
'z': 4
},
'env_vars': {
'TURBO': 'ON'
}
},
'bar': {
'args': ['--spacebar']
},
})
self.assertEqual(
experiment.launched_jobs[0].args,
job_blocks.SequentialArgs.from_collection({
'x': 3,
'y': 2,
'z': 4
}),
)
self.assertEqual(experiment.launched_jobs[0].env_vars, {
'TURBO': 'ON',
'EDITOR': 'vi'
})
self.assertEqual(
experiment.launched_jobs[1].args,
job_blocks.SequentialArgs.from_collection(['--bar=1', '--spacebar']),
)
def test_add_runs_asynchronously(self):
generator_called = threading.Event()
with testing.TestExperiment() as experiment:
async def job_generator(work_unit: core.WorkUnit):
del work_unit
generator_called.set()
experiment.add(job_generator)
# Validate that job_generator is executed in a parallel thread.
self.assertTrue(generator_called.wait(timeout=5))
@utils.run_in_asyncio_loop
async def test_loop_is_reused_in_coro_context(self):
loop = asyncio.get_event_loop()
async with testing.TestExperiment() as experiment:
async def job_generator(work_unit: core.WorkUnit):
del work_unit
self.assertEqual(asyncio.get_event_loop(), loop)
experiment.add(job_generator)
@utils.run_in_asyncio_loop
async def test_sync_with_cant_be_used_in_coro_context(self):
# `async with` works.
async with testing.TestExperiment():
pass
with self.assertRaises(RuntimeError):
# But `with` raises an exception.
with testing.TestExperiment():
pass
@utils.run_in_asyncio_loop
async def test_work_unit_wait_until_complete(self):
experiment = testing.TestExperiment()
async with experiment:
experiment.add(
job_blocks.Job(
testing.TestExecutable(), testing.TestExecutor(), args={}))
await experiment.work_units[0].wait_until_complete()
@utils.run_in_asyncio_loop
async def test_work_unit_wait_until_complete_exception(self):
experiment = testing.TestExperiment()
with self.assertRaises(TestError):
async with experiment:
experiment.add(failing_job_generator)
with self.assertRaises(core.ExperimentUnitError):
await experiment.work_units[0].wait_until_complete()
@utils.run_in_asyncio_loop
async def test_get_full_job_name(self):
async def generator(work_unit):
self.assertEqual(work_unit.get_full_job_name('name'), '1_1_name')
async with testing.TestExperiment() as experiment:
experiment.add(generator)
if __name__ == '__main__':
unittest.main()
|
|
from datetime import timedelta
import json
from flask import Flask, request, make_response
from mock import patch, MagicMock
from cpucoolerchart import crawler
from cpucoolerchart._compat import to_native
from cpucoolerchart.crawler import update_data
from cpucoolerchart.extensions import db, cache
from cpucoolerchart.views import crossdomain
import cpucoolerchart.views
from .conftest import app, read_file, fill_data
class TestViews(object):
def setup(self):
self.app = app()
self.app.testing = True
self.client = self.app.test_client()
self.ctx = self.app.app_context()
self.ctx.push()
db.drop_all()
db.create_all()
def teardown(self):
db.session.close()
db.drop_all()
db.get_engine(self.app).dispose()
self.ctx.pop()
def test_crossdomain(self):
app = Flask('__test__')
app.config.update({
'ACCESS_CONTROL_ALLOW_ORIGIN': 'http://foo.bar'
})
client = app.test_client()
@app.route('/', methods=('GET', 'PUT', 'OPTIONS'))
@crossdomain()
def index():
return 'Hello, world!'
@app.route('/foo', methods=('GET', 'OPTIONS'))
@crossdomain(
origin=('http://foo.bar', 'http://foo2.bar'),
methods=('GET',),
headers=('X-FOO', 'X-BAR'),
max_age=timedelta(hours=10),
automatic_options=False,
attach_to_all=False,
)
def foo():
if request.method == 'OPTIONS':
return make_response('.', 200, {'Allow': 'GET, OPTIONS, HEAD'})
else:
return 'foo'
resp = client.get('/')
assert resp.headers['Access-Control-Allow-Origin'] == 'http://foo.bar'
resp = client.options('/')
assert resp.headers['Access-Control-Allow-Origin'] == 'http://foo.bar'
assert (sorted(resp.headers['Access-Control-Allow-Methods']
.split(', ')) ==
['GET', 'HEAD', 'OPTIONS', 'PUT'])
resp = client.options('/foo')
assert resp.data == b'.'
assert resp.headers['Allow'] == 'GET, OPTIONS, HEAD'
assert (resp.headers['Access-Control-Allow-Origin'] ==
'http://foo.bar, http://foo2.bar')
assert resp.headers['Access-Control-Allow-Methods'] == 'GET'
assert resp.headers['Access-Control-Allow-Headers'] == 'X-FOO, X-BAR'
assert resp.headers['Access-Control-Max-Age'] == '36000'
resp = client.get('/foo')
assert resp.data == b'foo'
assert 'Access-Control-Allow-Origin' not in resp.headers
def test_view_cache(self):
r = self.client.get('/makers')
assert r.status_code == 200
data = json.loads(to_native(r.data))
assert data == {"count": 0, "items": []}
fill_data()
r = self.client.get('/makers')
assert r.status_code == 200
data = json.loads(to_native(r.data))
assert data == {"count": 0, "items": []}
cache.clear()
r = self.client.get('/makers')
assert r.status_code == 200
data = json.loads(to_native(r.data))
assert data == {
"count": 2,
"items": [
{"id": 1, "name": "Intel"},
{"id": 2, "name": "CoolerMaster"},
]
}
def test_view_func_heatsinks(self):
fill_data()
r = self.client.get('/heatsinks')
assert r.status_code == 200
data = json.loads(to_native(r.data))
assert data == {
"count": 1,
"items": [{
"id": 1,
"maker_id": 1,
"name": "Stock",
"heatsink_type": "flower",
"width": None,
"depth": None,
"height": None,
"weight": None,
"danawa_id": None,
"price": None,
"shop_count": None,
"first_seen": None,
"image_url": None,
}]
}
def test_view_func_fan_configs(self):
fill_data()
r = self.client.get('/fan-configs')
assert r.status_code == 200
data = json.loads(to_native(r.data))
assert data == {
"count": 1,
"items": [{
"id": 1,
"heatsink_id": 1,
"fan_count": 1,
"fan_size": 92,
"fan_thickness": 15,
}]
}
def test_view_func_measurements(self):
fill_data()
r = self.client.get('/measurements')
assert r.status_code == 200
data = json.loads(to_native(r.data))
assert data == {
"count": 1,
"items": [{
"id": 1,
"fan_config_id": 1,
"noise": 35,
"noise_actual_min": None,
"noise_actual_max": None,
"power": 150,
"rpm_min": None,
"rpm_max": None,
"cpu_temp_delta": 66.4,
"power_temp_delta": None,
}]
}
def test_view_func_all(self):
fill_data()
r = self.client.get('/all')
assert r.status_code == 200
assert r.data + b'\n' == read_file('mock.csv')
@patch('cpucoolerchart.views.update_queue')
@patch('cpucoolerchart.views.is_update_needed', autospec=True)
def test_view_func_update(self, is_update_needed, update_queue):
is_update_needed.return_value = True
cpucoolerchart.views.heroku = MagicMock()
from_key = cpucoolerchart.views.heroku.from_key
self.app.config['USE_QUEUE'] = False
r = self.client.post('/update')
assert (json.loads(to_native(r.data))['msg'] ==
'the app is not configured to update data via HTTP')
assert r.status_code == 404
self.app.debug = False
self.app.config['USE_QUEUE'] = True
r = self.client.post('/update')
assert json.loads(to_native(r.data))['msg'] == 'process started'
assert r.status_code == 202
update_queue.enqueue_call.assert_called_with(update_data, result_ttl=0)
update_queue.enqueue_call.reset_mock()
r = self.client.post('/update')
assert json.loads(to_native(r.data))['msg'] == 'too many requests'
assert r.status_code == 429
self.app.debug = True
self.app.config['START_WORKER_NODE'] = 'heroku'
self.app.config['HEROKU_API_KEY'] = '12345678'
self.app.config['HEROKU_APP_NAME'] = 'foobar'
r = self.client.post('/update')
assert json.loads(to_native(r.data))['msg'] == 'process started'
assert r.status_code == 202
update_queue.enqueue_call.assert_called_with(update_data, result_ttl=0)
update_queue.enqueue_call.reset_mock()
from_key.assert_called_with('12345678')
from_key.reset_mock()
is_update_needed.return_value = False
r = self.client.post('/update')
assert json.loads(to_native(r.data))['msg'] == 'already up to date'
assert r.status_code == 202
assert update_queue.enqueue_call.call_count == 0
assert from_key.call_count == 0
|
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class servicegroupbindings(base_resource) :
""" Configuration for servicegroupbind resource. """
def __init__(self) :
self._servicegroupname = ""
self._ipaddress = ""
self._port = 0
self._state = ""
self._svrstate = ""
self._vservername = ""
self.___count = 0
@property
def servicegroupname(self) :
"""The name of the service.<br/>Minimum length = 1.
"""
try :
return self._servicegroupname
except Exception as e:
raise e
@servicegroupname.setter
def servicegroupname(self, servicegroupname) :
"""The name of the service.<br/>Minimum length = 1
"""
try :
self._servicegroupname = servicegroupname
except Exception as e:
raise e
@property
def ipaddress(self) :
"""The IP address of the vserver.
"""
try :
return self._ipaddress
except Exception as e:
raise e
@property
def port(self) :
"""The port of the vserver.<br/>Range 1 - 65535.
"""
try :
return self._port
except Exception as e:
raise e
@property
def state(self) :
"""The state of the service group.<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._state
except Exception as e:
raise e
@property
def svrstate(self) :
"""The state of the vserver.<br/>Possible values = UP, DOWN, UNKNOWN, BUSY, OUT OF SERVICE, GOING OUT OF SERVICE, DOWN WHEN GOING OUT OF SERVICE, NS_EMPTY_STR, Unknown, DISABLED.
"""
try :
return self._svrstate
except Exception as e:
raise e
@property
def vservername(self) :
"""The name of the vserver.
"""
try :
return self._vservername
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(servicegroupbindings_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.servicegroupbindings
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.servicegroupname) :
return str(self.servicegroupname)
return None
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the servicegroupbindings resources that are configured on netscaler.
"""
try :
if type(name) != cls :
if type(name) is not list :
obj = servicegroupbindings()
obj.servicegroupname = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [servicegroupbindings() for _ in range(len(name))]
obj = [servicegroupbindings() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = servicegroupbindings()
obj[i].servicegroupname = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_, obj) :
""" Use this API to fetch filtered set of servicegroupbindings resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
option_ = options()
option_.filter = filter_
option_.args = nitro_util.object_to_string_withoutquotes(obj)
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client, obj) :
""" Use this API to count the servicegroupbindings resources configured on NetScaler.
"""
try :
option_ = options()
option_.count = True
option_.args = nitro_util.object_to_string_withoutquotes(obj)
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_, obj) :
""" Use this API to count filtered the set of servicegroupbindings resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
option_ = options()
option_.count = True
option_.filter = filter_
option_.args = nitro_util.object_to_string_withoutquotes(obj)
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class State:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Svrstate:
UP = "UP"
DOWN = "DOWN"
UNKNOWN = "UNKNOWN"
BUSY = "BUSY"
OUT_OF_SERVICE = "OUT OF SERVICE"
GOING_OUT_OF_SERVICE = "GOING OUT OF SERVICE"
DOWN_WHEN_GOING_OUT_OF_SERVICE = "DOWN WHEN GOING OUT OF SERVICE"
NS_EMPTY_STR = "NS_EMPTY_STR"
Unknown = "Unknown"
DISABLED = "DISABLED"
class servicegroupbindings_response(base_response) :
def __init__(self, length=1) :
self.servicegroupbindings = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.servicegroupbindings = [servicegroupbindings() for _ in range(length)]
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple transfer learning with an Inception v3 architecture model which
displays summaries in TensorBoard.
This example shows how to take a Inception v3 architecture model trained on
ImageNet images, and train a new top layer that can recognize other classes of
images.
The top layer receives as input a 2048-dimensional vector for each image. We
train a softmax layer on top of this representation. Assuming the softmax layer
contains N labels, this corresponds to learning N + 2048*N model parameters
corresponding to the learned biases and weights.
Here's an example, which assumes you have a folder containing class-named
subfolders, each full of images for each label. The example folder flower_photos
should have a structure like this:
~/flower_photos/daisy/photo1.jpg
~/flower_photos/daisy/photo2.jpg
...
~/flower_photos/rose/anotherphoto77.jpg
...
~/flower_photos/sunflower/somepicture.jpg
The subfolder names are important, since they define what label is applied to
each image, but the filenames themselves don't matter. Once your images are
prepared, you can run the training with a command like this:
bazel build third_party/tensorflow/examples/image_retraining:retrain && \
bazel-bin/third_party/tensorflow/examples/image_retraining/retrain \
--image_dir ~/flower_photos
You can replace the image_dir argument with any folder containing subfolders of
images. The label for each image is taken from the name of the subfolder it's
in.
This produces a new model file that can be loaded and run by any TensorFlow
program, for example the label_image sample code.
To use with TensorBoard:
By default, this script will log summaries to /tmp/retrain_logs directory
Visualize the summaries with this command:
tensorboard --logdir /tmp/retrain_logs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import glob
import hashlib
import os.path
import random
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
FLAGS = tf.app.flags.FLAGS
# Input and output file flags.
tf.app.flags.DEFINE_string('image_dir', '',
"""Path to folders of labeled images.""")
tf.app.flags.DEFINE_string('output_graph', '/tmp/output_graph.pb',
"""Where to save the trained graph.""")
tf.app.flags.DEFINE_string('output_labels', '/tmp/output_labels.txt',
"""Where to save the trained graph's labels.""")
tf.app.flags.DEFINE_string('summaries_dir', '/tmp/retrain_logs',
"""Where to save summary logs for TensorBoard.""")
# Details of the training configuration.
tf.app.flags.DEFINE_integer('how_many_training_steps', 4000,
"""How many training steps to run before ending.""")
tf.app.flags.DEFINE_float('learning_rate', 0.01,
"""How large a learning rate to use when training.""")
tf.app.flags.DEFINE_integer(
'testing_percentage', 10,
"""What percentage of images to use as a test set.""")
tf.app.flags.DEFINE_integer(
'validation_percentage', 10,
"""What percentage of images to use as a validation set.""")
tf.app.flags.DEFINE_integer('eval_step_interval', 10,
"""How often to evaluate the training results.""")
tf.app.flags.DEFINE_integer('train_batch_size', 100,
"""How many images to train on at a time.""")
tf.app.flags.DEFINE_integer('test_batch_size', 500,
"""How many images to test on at a time. This"""
""" test set is only used infrequently to verify"""
""" the overall accuracy of the model.""")
tf.app.flags.DEFINE_integer(
'validation_batch_size', 100,
"""How many images to use in an evaluation batch. This validation set is"""
""" used much more often than the test set, and is an early indicator of"""
""" how accurate the model is during training.""")
# File-system cache locations.
tf.app.flags.DEFINE_string('model_dir', '/tmp/imagenet',
"""Path to classify_image_graph_def.pb, """
"""imagenet_synset_to_human_label_map.txt, and """
"""imagenet_2012_challenge_label_map_proto.pbtxt.""")
tf.app.flags.DEFINE_string(
'bottleneck_dir', '/tmp/bottleneck',
"""Path to cache bottleneck layer values as files.""")
tf.app.flags.DEFINE_string('final_tensor_name', 'final_result',
"""The name of the output classification layer in"""
""" the retrained graph.""")
# Controls the distortions used during training.
tf.app.flags.DEFINE_boolean(
'flip_left_right', False,
"""Whether to randomly flip half of the training images horizontally.""")
tf.app.flags.DEFINE_integer(
'random_crop', 0,
"""A percentage determining how much of a margin to randomly crop off the"""
""" training images.""")
tf.app.flags.DEFINE_integer(
'random_scale', 0,
"""A percentage determining how much to randomly scale up the size of the"""
""" training images by.""")
tf.app.flags.DEFINE_integer(
'random_brightness', 0,
"""A percentage determining how much to randomly multiply the training"""
""" image input pixels up or down by.""")
# These are all parameters that are tied to the particular model architecture
# we're using for Inception v3. These include things like tensor names and their
# sizes. If you want to adapt this script to work with another model, you will
# need to update these to reflect the values in the network you're using.
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
BOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'
BOTTLENECK_TENSOR_SIZE = 2048
MODEL_INPUT_WIDTH = 299
MODEL_INPUT_HEIGHT = 299
MODEL_INPUT_DEPTH = 3
JPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'
RESIZED_INPUT_TENSOR_NAME = 'ResizeBilinear:0'
def create_image_lists(image_dir, testing_percentage, validation_percentage):
"""Builds a list of training images from the file system.
Analyzes the sub folders in the image directory, splits them into stable
training, testing, and validation sets, and returns a data structure
describing the lists of images for each label and their paths.
Args:
image_dir: String path to a folder containing subfolders of images.
testing_percentage: Integer percentage of the images to reserve for tests.
validation_percentage: Integer percentage of images reserved for validation.
Returns:
A dictionary containing an entry for each label subfolder, with images split
into training, testing, and validation sets within each label.
"""
if not gfile.Exists(image_dir):
print("Image directory '" + image_dir + "' not found.")
return None
result = {}
sub_dirs = [x[0] for x in os.walk(image_dir)]
# The root directory comes first, so skip it.
is_root_dir = True
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
print("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(glob.glob(file_glob))
if not file_list:
print('No files found')
continue
if len(file_list) < 20:
print('WARNING: Folder has less than 20 images, which may cause issues.')
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
testing_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put an image in, the data set creator has a way of
# grouping photos that are close variations of each other. For example
# this is used in the plant disease data set to group multiple pictures of
# the same leaf.
hash_name = re.sub(r'_nohash_.*$', '', file_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(hash_name.encode('utf-8')).hexdigest()
percentage_hash = (int(hash_name_hashed, 16) % (65536)) * (100 / 65535.0)
if percentage_hash < validation_percentage:
validation_images.append(base_name)
elif percentage_hash < (testing_percentage + validation_percentage):
testing_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] = {
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_path(image_lists, label_name, index, image_dir, category):
""""Returns a path to an image for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Category has no images - %s.', category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
category):
""""Returns a path to a bottleneck file for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
bottleneck_dir: Folder string holding cached files of bottleneck values.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '.txt'
def create_inception_graph():
""""Creates a graph from saved GraphDef file and returns a Graph object.
Returns:
Graph holding the trained Inception network, and various tensors we'll be
manipulating.
"""
with tf.Session() as sess:
model_filename = os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb')
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
tf.import_graph_def(graph_def, name='', return_elements=[
BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
RESIZED_INPUT_TENSOR_NAME]))
return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
bottleneck_tensor):
"""Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: Numpy array of image data.
image_data_tensor: Input data layer in the graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values.
"""
bottleneck_values = sess.run(
bottleneck_tensor,
{image_data_tensor: image_data})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def maybe_download_and_extract():
"""Download and extract model tar file.
If the pretrained model we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a directory.
"""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL,
filepath,
_progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def ensure_dir_exists(dir_name):
"""Makes sure the folder exists on disk.
Args:
dir_name: Path string to the folder we want to create.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,
category, bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor):
"""Retrieves or calculates bottleneck values for an image.
If a cached version of the bottleneck data exists on-disk, return that,
otherwise calculate the data and save it to disk for future use.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be modulo-ed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of which set to pull images from - training, testing,
or validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: The tensor to feed loaded jpeg data into.
bottleneck_tensor: The output tensor for the bottleneck values.
Returns:
Numpy array of values produced by the bottleneck layer for the image.
"""
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
ensure_dir_exists(sub_dir_path)
bottleneck_path = get_bottleneck_path(image_lists, label_name, index,
bottleneck_dir, category)
if not os.path.exists(bottleneck_path):
print('Creating bottleneck at ' + bottleneck_path)
image_path = get_image_path(image_lists, label_name, index, image_dir,
category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
image_data = gfile.FastGFile(image_path, 'rb').read()
bottleneck_values = run_bottleneck_on_image(sess, image_data,
jpeg_data_tensor,
bottleneck_tensor)
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
return bottleneck_values
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor):
"""Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
bottleneck_tensor: The penultimate output layer of the graph.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(sess, image_lists, label_name, index,
image_dir, category, bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
print(str(how_many_bottlenecks) + ' bottleneck files created.')
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir, jpeg_data_tensor,
bottleneck_tensor):
"""Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: The number of bottleneck values to return.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(65536)
bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,
image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
return bottlenecks, ground_truths
def get_random_distorted_bottlenecks(
sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
distorted_image, resized_input_tensor, bottleneck_tensor):
"""Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(65536)
image_path = get_image_path(image_lists, label_name, image_index, image_dir,
category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
jpeg_data = gfile.FastGFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image,
{input_jpeg_tensor: jpeg_data})
bottleneck = run_bottleneck_on_image(sess, distorted_image_data,
resized_input_tensor,
bottleneck_tensor)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
return bottlenecks, ground_truths
def should_distort_images(flip_left_right, random_crop, random_scale,
random_brightness):
"""Whether any distortions are enabled, from the input flags.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
Returns:
Boolean value indicating whether any distortions should be applied.
"""
return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
(random_brightness != 0))
def add_input_distortions(flip_left_right, random_crop, random_scale,
random_brightness):
"""Creates the operations to apply the specified distortions.
During training it can help to improve the results if we run the images
through simple distortions like crops, scales, and flips. These reflect the
kind of variations we expect in the real world, and so can help train the
model to cope with natural data more effectively. Here we take the supplied
parameters and construct a network of operations to apply them to an image.
Cropping
~~~~~~~~
Cropping is done by placing a bounding box at a random position in the full
image. The cropping parameter controls the size of that box relative to the
input image. If it's zero, then the box is the same size as the input and no
cropping is performed. If the value is 50%, then the crop box will be half the
width and height of the input. In a diagram it looks like this:
< width >
+---------------------+
| |
| width - crop% |
| < > |
| +------+ |
| | | |
| | | |
| | | |
| +------+ |
| |
| |
+---------------------+
Scaling
~~~~~~~
Scaling is a lot like cropping, except that the bounding box is always
centered and its size varies randomly within the given range. For example if
the scale percentage is zero, then the bounding box is the same size as the
input and no scaling is applied. If it's 50%, then the bounding box will be in
a random range between half the width and height and full size.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
graph.
Returns:
The jpeg input layer and the distorted result tensor.
"""
jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=MODEL_INPUT_DEPTH)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
margin_scale = 1.0 + (random_crop / 100.0)
resize_scale = 1.0 + (random_scale / 100.0)
margin_scale_value = tf.constant(margin_scale)
resize_scale_value = tf.random_uniform(tensor_shape.scalar(),
minval=1.0,
maxval=resize_scale)
scale_value = tf.mul(margin_scale_value, resize_scale_value)
precrop_width = tf.mul(scale_value, MODEL_INPUT_WIDTH)
precrop_height = tf.mul(scale_value, MODEL_INPUT_HEIGHT)
precrop_shape = tf.pack([precrop_height, precrop_width])
precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)
precropped_image = tf.image.resize_bilinear(decoded_image_4d,
precrop_shape_as_int)
precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])
cropped_image = tf.random_crop(precropped_image_3d,
[MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH,
MODEL_INPUT_DEPTH])
if flip_left_right:
flipped_image = tf.image.random_flip_left_right(cropped_image)
else:
flipped_image = cropped_image
brightness_min = 1.0 - (random_brightness / 100.0)
brightness_max = 1.0 + (random_brightness / 100.0)
brightness_value = tf.random_uniform(tensor_shape.scalar(),
minval=brightness_min,
maxval=brightness_max)
brightened_image = tf.mul(flipped_image, brightness_value)
distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult')
return jpeg_data, distort_result
def variable_summaries(var, name):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.scalar_summary('mean/' + name, mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.scalar_summary('sttdev/' + name, stddev)
tf.scalar_summary('max/' + name, tf.reduce_max(var))
tf.scalar_summary('min/' + name, tf.reduce_min(var))
tf.histogram_summary(name, var)
def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor):
"""Adds a new softmax and fully-connected layer for training.
We need to retrain the top layer to identify our new classes, so this function
adds the right operations to the graph, along with some variables to hold the
weights, and then sets up all the gradients for the backward pass.
The set up for the softmax and fully-connected layers is based on:
https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
Args:
class_count: Integer of how many categories of things we're trying to
recognize.
final_tensor_name: Name string for the new final node that produces results.
bottleneck_tensor: The output of the main CNN graph.
Returns:
The tensors for the training and cross entropy results, and tensors for the
bottleneck input and ground truth input.
"""
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor, shape=[None, BOTTLENECK_TENSOR_SIZE],
name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(tf.float32,
[None, class_count],
name='GroundTruthInput')
# Organizing the following ops as `final_training_ops` so they're easier
# to see in TensorBoard
layer_name = 'final_training_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
layer_weights = tf.Variable(tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, class_count], stddev=0.001), name='final_weights')
variable_summaries(layer_weights, layer_name + '/weights')
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
variable_summaries(layer_biases, layer_name + '/biases')
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
tf.histogram_summary(layer_name + '/pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
tf.histogram_summary(final_tensor_name + '/activations', final_tensor)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits, ground_truth_input)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.scalar_summary('cross entropy', cross_entropy_mean)
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(
cross_entropy_mean)
return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Nothing.
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(result_tensor, 1), \
tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.scalar_summary('accuracy', evaluation_step)
return evaluation_step
def main(_):
# Setup the directory we'll write summaries to for TensorBoard
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
# Set up the pre-trained graph.
maybe_download_and_extract()
graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (
create_inception_graph())
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,
FLAGS.validation_percentage)
class_count = len(image_lists.keys())
if class_count == 0:
print('No valid folders of images found at ' + FLAGS.image_dir)
return -1
if class_count == 1:
print('Only one valid folder of images found at ' + FLAGS.image_dir +
' - multiple classes are needed for classification.')
return -1
# See if the command-line flags mean we're applying any distortions.
do_distort_images = should_distort_images(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
sess = tf.Session()
if do_distort_images:
# We will be applying distortions, so setup the operations we'll need.
distorted_jpeg_data_tensor, distorted_image_tensor = add_input_distortions(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
else:
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(sess, image_lists, FLAGS.image_dir, FLAGS.bottleneck_dir,
jpeg_data_tensor, bottleneck_tensor)
# Add the new layer that we'll be training.
(train_step, cross_entropy, bottleneck_input, ground_truth_input,
final_tensor) = add_final_training_ops(len(image_lists.keys()),
FLAGS.final_tensor_name,
bottleneck_tensor)
# Create the operations we need to evaluate the accuracy of our new layer.
evaluation_step = add_evaluation_step(final_tensor, ground_truth_input)
# Merge all the summaries and write them out to /tmp/retrain_logs (by default)
merged = tf.merge_all_summaries()
train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/validation')
# Set up all our weights to their initial default values.
init = tf.initialize_all_variables()
sess.run(init)
# Run the training for as many cycles as requested on the command line.
for i in range(FLAGS.how_many_training_steps):
# Get a catch of input bottleneck values, either calculated fresh every time
# with distortions applied, or from the cache stored on disk.
if do_distort_images:
train_bottlenecks, train_ground_truth = get_random_distorted_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.image_dir, distorted_jpeg_data_tensor,
distorted_image_tensor, resized_image_tensor, bottleneck_tensor)
else:
train_bottlenecks, train_ground_truth = get_random_cached_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor)
# Feed the bottlenecks and ground truth into the graph, and run a training
# step. Capture training summaries for TensorBoard with the `merged` op.
train_summary, _ = sess.run([merged, train_step],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == FLAGS.how_many_training_steps)
if (i % FLAGS.eval_step_interval) == 0 or is_last_step:
train_accuracy, cross_entropy_value = sess.run(
[evaluation_step, cross_entropy],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
print('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i,
train_accuracy * 100))
print('%s: Step %d: Cross entropy = %f' % (datetime.now(), i,
cross_entropy_value))
validation_bottlenecks, validation_ground_truth = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.validation_batch_size, 'validation',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy = sess.run(
[merged, evaluation_step],
feed_dict={bottleneck_input: validation_bottlenecks,
ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
print('%s: Step %d: Validation accuracy = %.1f%%' %
(datetime.now(), i, validation_accuracy * 100))
# We've completed all our training, so run a final test evaluation on
# some new images we haven't used before.
test_bottlenecks, test_ground_truth = get_random_cached_bottlenecks(
sess, image_lists, FLAGS.test_batch_size, 'testing',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
bottleneck_tensor)
test_accuracy = sess.run(
evaluation_step,
feed_dict={bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth})
print('Final test accuracy = %.1f%%' % (test_accuracy * 100))
# Write out the trained graph and labels with the weights stored as constants.
output_graph_def = graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
with gfile.FastGFile(FLAGS.output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
with gfile.FastGFile(FLAGS.output_labels, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
if __name__ == '__main__':
tf.app.run()
|
|
#!/usr/bin/env python3
import unittest
from unittest.mock import patch
import sys
from io import StringIO
import asyncio
import json
import WDL
from scripts import parse_wdl_workflow
# Test cases
class TestReadAndParseInput(unittest.TestCase):
def test_read_stdin_result_type(self):
with patch("sys.stdin", StringIO(test_wdl)):
loop = asyncio.get_event_loop()
result = loop.run_until_complete(parse_wdl_workflow.read_stdin("stdin", "", ""))
self.assertIsInstance(result, WDL.ReadSourceResult)
self.assertEqual(result.source_text, test_wdl)
self.assertEqual(result.abspath, "stdin")
def test_read_stdin_result_parsing(self):
with patch("sys.stdin", StringIO(test_wdl)):
doc = WDL.load("stdin", read_source=parse_wdl_workflow.read_stdin)
assert doc.workflow
self.assertEqual(len(doc.tasks), 3)
self.assertEqual(len(doc.workflow.inputs), 4)
self.assertEqual(len(doc.workflow.body), 4)
self.assertIsInstance(doc.workflow.body[0], WDL.Tree.Call)
self.assertIsInstance(doc.workflow.body[1], WDL.Tree.Conditional)
self.assertIsInstance(doc.workflow.body[2], WDL.Tree.Decl)
class TestJSONOutput(unittest.TestCase):
def setUp(self):
with patch("sys.stdin", StringIO(test_wdl)), patch("sys.stdout", new_callable=StringIO):
parse_wdl_workflow.main()
output = sys.stdout.getvalue()
self.json = json.loads(output)
self.task_names = frozenset(["RunValidateInput", "RunBowtie2_bowtie2_human_out", "RunGsnapFilter"])
self.workflow_inputs = ["docker_image_id", "fastqs_0", "fastqs_1", "host_genome"]
def test_workflow_inputs(self):
input_types = ["String", "File", "File", "String"]
json_inputs = self.json["inputs"]
self.assertEqual(len(json_inputs), 4)
for var, var_type in zip(self.workflow_inputs, input_types):
self.assertIn(var, json_inputs)
json_type = json_inputs[var]
self.assertEqual(var_type, json_type)
def test_task_names(self):
for task in self.json["task_names"]:
self.assertIn(task, self.task_names)
def test_task_info(self):
# Make sure inputs not from previous tasks have the WorkflowInput prefix
task_info = self.json["task_inputs"]
run_validate_inputs = task_info["RunValidateInput"]
for prefixed_var in run_validate_inputs:
self.assertIn(".", prefixed_var)
prefix, var = prefixed_var.split(".")
self.assertEqual(prefix, "WorkflowInput")
self.assertIn(var, self.workflow_inputs)
# Test the rest of the inputs - either they're in the stage input,
# or one of the outputs
valid_prefixes = set(["WorkflowInput"]).union(self.task_names)
valid_outputs = self.json["outputs"].values()
for task_name, task_inputs in task_info.items():
self.assertIn(task_name, self.task_names)
for prefixed_var in task_inputs:
self.assertIn(".", prefixed_var)
prefix, var = prefixed_var.split(".")
self.assertIn(prefix, valid_prefixes)
if prefix == "WorkflowInput":
self.assertIn(var, self.workflow_inputs)
else:
self.assertIn(prefixed_var, valid_outputs)
def test_file_basenames_and_outputs(self):
basenames = self.json["basenames"]
valid_outputs = self.json["outputs"].values()
self.assertEqual(len(basenames), len(valid_outputs))
for key, value in basenames.items():
self.assertIn(key, valid_outputs)
self.assertIn(".", key)
self.assertIn(".", value)
task = key.split(".")[0]
self.assertNotIn(task, value)
# Test document
test_wdl = """
version 1.0
task RunValidateInput {
input {
String docker_image_id
Array[File] fastqs
String host_genome
}
command<<<
idseq-dag-run-step --workflow-name host_filter \
--step-module idseq_dag.steps.run_validate_input \
--step-class PipelineStepRunValidateInput \
>>>
output {
File validate_input_summary_json = "validate_input_summary.json"
File valid_input1_fastq = "valid_input1.fastq"
File? valid_input2_fastq = "valid_input2.fastq"
File? output_read_count = "validate_input_out.count"
File? input_read_count = "fastqs.count"
}
runtime {
docker: docker_image_id
}
}
task RunBowtie2_bowtie2_human_out {
input {
String docker_image_id
Array[File] unmapped_human_fa
}
command<<<
idseq-dag-run-step --workflow-name host_filter \
--step-module idseq_dag.steps.run_bowtie2 \
--step-class PipelineStepRunBowtie2 \
>>>
output {
File bowtie2_human_1_fa = "bowtie2_human_1.fa"
File? bowtie2_human_2_fa = "bowtie2_human_2.fa"
File? bowtie2_human_merged_fa = "bowtie2_human_merged.fa"
File? output_read_count = "bowtie2_human_out.count"
}
runtime {
docker: docker_image_id
}
}
task RunGsnapFilter {
input {
String docker_image_id
Array[File] subsampled_fa
}
command<<<
idseq-dag-run-step --workflow-name host_filter \
--step-module idseq_dag.steps.run_gsnap_filter \
--step-class PipelineStepRunGsnapFilter \
>>>
output {
File gsnap_filter_1_fa = "gsnap_filter_1.fa"
File? gsnap_filter_2_fa = "gsnap_filter_2.fa"
File? gsnap_filter_merged_fa = "gsnap_filter_merged.fa"
File? output_read_count = "gsnap_filter_out.count"
}
runtime {
docker: docker_image_id
}
}
workflow idseq_host_filter {
input {
String docker_image_id
File fastqs_0
File? fastqs_1
String host_genome
}
call RunValidateInput {
input:
docker_image_id = docker_image_id,
fastqs = select_all([fastqs_0, fastqs_1]),
host_genome = host_genome,
}
if (host_genome != "human") {
call RunBowtie2_bowtie2_human_out {
input:
docker_image_id = docker_image_id,
unmapped_human_fa = select_all([RunValidateInput.valid_input1_fastq, RunValidateInput.valid_input2_fastq]),
}
}
Array[File] gsnap_filter_input = if (host_genome == "human")
then select_all([RunValidateInput.valid_input1_fastq, RunValidateInput.valid_input2_fastq])
else select_all([RunBowtie2_bowtie2_human_out.bowtie2_human_1_fa, RunBowtie2_bowtie2_human_out.bowtie2_human_2_fa, RunBowtie2_bowtie2_human_out.bowtie2_human_merged_fa])
call RunGsnapFilter {
input:
docker_image_id = docker_image_id,
subsampled_fa = gsnap_filter_input,
}
output {
File validate_input_out_validate_input_summary_json = RunValidateInput.validate_input_summary_json
File validate_input_out_valid_input1_fastq = RunValidateInput.valid_input1_fastq
File? validate_input_out_valid_input2_fastq = RunValidateInput.valid_input2_fastq
File? validate_input_out_count = RunValidateInput.output_read_count
File? bowtie2_human_out_bowtie2_human_1_fa = RunBowtie2_bowtie2_human_out.bowtie2_human_1_fa
File? bowtie2_human_out_bowtie2_human_2_fa = RunBowtie2_bowtie2_human_out.bowtie2_human_2_fa
File? bowtie2_human_out_bowtie2_human_merged_fa = RunBowtie2_bowtie2_human_out.bowtie2_human_merged_fa
File? bowtie2_human_out_count = RunBowtie2_bowtie2_human_out.output_read_count
File gsnap_filter_out_gsnap_filter_1_fa = RunGsnapFilter.gsnap_filter_1_fa
File? gsnap_filter_out_gsnap_filter_2_fa = RunGsnapFilter.gsnap_filter_2_fa
File? gsnap_filter_out_gsnap_filter_merged_fa = RunGsnapFilter.gsnap_filter_merged_fa
File? gsnap_filter_out_count = RunGsnapFilter.output_read_count
File? input_read_count = RunValidateInput.input_read_count
}
}
""" # noqa
|
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask import render_template
from sqlalchemy.dialects.postgresql import JSONB
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum
from indico.core.db.sqlalchemy.custom.utcdatetime import UTCDateTime
from indico.core.logger import Logger
from indico.util.date_time import now_utc
from indico.util.enum import IndicoEnum
from indico.util.string import format_repr
class InvalidTransactionStatus(Exception):
pass
class InvalidManualTransactionAction(Exception):
pass
class InvalidTransactionAction(Exception):
pass
class IgnoredTransactionAction(Exception):
pass
class DoublePaymentTransaction(Exception):
pass
class TransactionAction(int, IndicoEnum):
complete = 1
cancel = 2
pending = 3
reject = 4
class TransactionStatus(int, IndicoEnum):
#: payment attempt succeeded
successful = 1
#: payment cancelled manually
cancelled = 2
#: payment attempt failed
failed = 3
#: payment on hold pending approval of merchant
pending = 4
#: payment rejected after being pending
rejected = 5
class TransactionStatusTransition:
initial_statuses = [TransactionStatus.cancelled, TransactionStatus.failed, TransactionStatus.rejected]
@classmethod
def next(cls, transaction, action, provider=None):
manual = provider is None
if not transaction or transaction.status in cls.initial_statuses:
return cls._next_from_initial(action, manual)
elif transaction.status == TransactionStatus.successful:
return cls._next_from_successful(action, manual)
elif transaction.status == TransactionStatus.pending:
return cls._next_from_pending(action, manual)
else:
raise InvalidTransactionStatus(f"Invalid transaction status code '{transaction.status}'")
@staticmethod
def _next_from_initial(action, manual=False):
if manual:
if action == TransactionAction.complete:
return TransactionStatus.successful
elif action == TransactionAction.cancel:
raise IgnoredTransactionAction("Ignored cancel action on initial status")
else:
raise InvalidManualTransactionAction(action)
elif action == TransactionAction.complete:
return TransactionStatus.successful
elif action == TransactionAction.pending:
return TransactionStatus.pending
elif action == TransactionAction.reject:
raise IgnoredTransactionAction("Ignored reject action on initial status")
else:
raise InvalidTransactionAction(action)
@staticmethod
def _next_from_successful(action, manual=False):
if manual:
if action == TransactionAction.complete:
raise IgnoredTransactionAction("Ignored complete action on successful status")
elif action == TransactionAction.cancel:
return TransactionStatus.cancelled
else:
raise InvalidManualTransactionAction(action)
elif action == TransactionAction.complete:
raise DoublePaymentTransaction
elif action == TransactionAction.pending:
raise IgnoredTransactionAction("Ignored pending action on successful status")
elif action == TransactionAction.reject:
raise IgnoredTransactionAction("Ignored reject action on successful status")
else:
raise InvalidTransactionAction(action)
@staticmethod
def _next_from_pending(action, manual=False):
if manual:
if action == TransactionAction.complete:
raise IgnoredTransactionAction("Ignored complete action on pending status")
elif action == TransactionAction.cancel:
return TransactionStatus.cancelled
else:
raise InvalidManualTransactionAction(action)
elif action == TransactionAction.complete:
return TransactionStatus.successful
elif action == TransactionAction.pending:
raise IgnoredTransactionAction("Ignored pending action on pending status")
elif action == TransactionAction.reject:
return TransactionStatus.rejected
else:
raise InvalidTransactionAction(action)
class PaymentTransaction(db.Model):
"""Payment transactions."""
__tablename__ = 'payment_transactions'
__table_args__ = (db.CheckConstraint('amount > 0', 'positive_amount'),
{'schema': 'events'})
#: Entry ID
id = db.Column(
db.Integer,
primary_key=True
)
#: ID of the associated registration
registration_id = db.Column(
db.Integer,
db.ForeignKey('event_registration.registrations.id'),
index=True,
nullable=False
)
#: a :class:`TransactionStatus`
status = db.Column(
PyIntEnum(TransactionStatus),
nullable=False
)
#: the base amount the user needs to pay (without payment-specific fees)
amount = db.Column(
db.Numeric(11, 2), # max. 999999999.99
nullable=False
)
#: the currency of the payment (ISO string, e.g. EUR or USD)
currency = db.Column(
db.String,
nullable=False
)
#: the provider of the payment (e.g. manual, PayPal etc.)
provider = db.Column(
db.String,
nullable=False,
default='_manual'
)
#: the date and time the transaction was recorded
timestamp = db.Column(
UTCDateTime,
default=now_utc,
nullable=False
)
#: plugin-specific data of the payment
data = db.Column(
JSONB,
nullable=False
)
#: The associated registration
registration = db.relationship(
'Registration',
lazy=True,
foreign_keys=[registration_id],
backref=db.backref(
'transactions',
cascade='all, delete-orphan',
lazy=True
)
)
@property
def plugin(self):
from indico.modules.events.payment.util import get_payment_plugins
return get_payment_plugins().get(self.provider)
@property
def is_manual(self):
return self.provider == '_manual'
def __repr__(self):
# in case of a new object we might not have the default status set
status = TransactionStatus(self.status).name if self.status is not None else None
return format_repr(self, 'id', 'registration_id', 'provider', 'amount', 'currency', 'timestamp', status=status)
def render_details(self):
"""Render the transaction details."""
if self.is_manual:
return render_template('events/payment/transaction_details_manual.html', transaction=self, plugin=None)
plugin = self.plugin
if plugin is None:
return f'[plugin not loaded: {self.provider}]'
with plugin.plugin_context():
return plugin.render_transaction_details(self)
@classmethod
def create_next(cls, registration, amount, currency, action, provider=None, data=None):
previous_transaction = registration.transaction
new_transaction = PaymentTransaction(amount=amount, currency=currency,
provider=provider, data=data)
try:
next_status = TransactionStatusTransition.next(previous_transaction, action, provider)
except InvalidTransactionStatus as e:
Logger.get('payment').exception("%s (data received: %r)", e, data)
return None
except InvalidManualTransactionAction as e:
Logger.get('payment').exception("Invalid manual action code '%s' on initial status (data received: %r)",
e, data)
return None
except InvalidTransactionAction as e:
Logger.get('payment').exception("Invalid action code '%s' on initial status (data received: %r)", e, data)
return None
except IgnoredTransactionAction as e:
Logger.get('payment').warning("%s (data received: %r)", e, data)
return None
except DoublePaymentTransaction:
next_status = TransactionStatus.successful
Logger.get('payment').info("Received successful payment for an already paid registration")
registration.transaction = new_transaction
new_transaction.status = next_status
return new_transaction
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TFDecorator-aware replacements for the inspect module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import functools
import inspect as _inspect
import six
from tensorflow.python.util import tf_decorator
ArgSpec = _inspect.ArgSpec
if hasattr(_inspect, 'FullArgSpec'):
FullArgSpec = _inspect.FullArgSpec # pylint: disable=invalid-name
else:
FullArgSpec = namedtuple('FullArgSpec', [
'args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', 'kwonlydefaults',
'annotations'
])
def _convert_maybe_argspec_to_fullargspec(argspec):
if isinstance(argspec, FullArgSpec):
return argspec
return FullArgSpec(
args=argspec.args,
varargs=argspec.varargs,
varkw=argspec.keywords,
defaults=argspec.defaults,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
if hasattr(_inspect, 'getfullargspec'):
_getfullargspec = _inspect.getfullargspec # pylint: disable=invalid-name
def _getargspec(target):
"""A python3 version of getargspec.
Calls `getfullargspec` and assigns args, varargs,
varkw, and defaults to a python 2/3 compatible `ArgSpec`.
The parameter name 'varkw' is changed to 'keywords' to fit the
`ArgSpec` struct.
Args:
target: the target object to inspect.
Returns:
An ArgSpec with args, varargs, keywords, and defaults parameters
from FullArgSpec.
"""
fullargspecs = getfullargspec(target)
argspecs = ArgSpec(
args=fullargspecs.args,
varargs=fullargspecs.varargs,
keywords=fullargspecs.varkw,
defaults=fullargspecs.defaults)
return argspecs
else:
_getargspec = _inspect.getargspec
def _getfullargspec(target):
"""A python2 version of getfullargspec.
Args:
target: the target object to inspect.
Returns:
A FullArgSpec with empty kwonlyargs, kwonlydefaults and annotations.
"""
return _convert_maybe_argspec_to_fullargspec(getargspec(target))
def currentframe():
"""TFDecorator-aware replacement for inspect.currentframe."""
return _inspect.stack()[1][0]
def getargspec(obj):
"""TFDecorator-aware replacement for `inspect.getargspec`.
Note: `getfullargspec` is recommended as the python 2/3 compatible
replacement for this function.
Args:
obj: A function, partial function, or callable object, possibly decorated.
Returns:
The `ArgSpec` that describes the signature of the outermost decorator that
changes the callable's signature, or the `ArgSpec` that describes
the object if not decorated.
Raises:
ValueError: When callable's signature can not be expressed with
ArgSpec.
TypeError: For objects of unsupported types.
"""
if isinstance(obj, functools.partial):
return _get_argspec_for_partial(obj)
decorators, target = tf_decorator.unwrap(obj)
spec = next((d.decorator_argspec
for d in decorators
if d.decorator_argspec is not None), None)
if spec:
return spec
try:
# Python3 will handle most callables here (not partial).
return _getargspec(target)
except TypeError:
pass
if isinstance(target, type):
try:
return _getargspec(target.__init__)
except TypeError:
pass
try:
return _getargspec(target.__new__)
except TypeError:
pass
# The `type(target)` ensures that if a class is received we don't return
# the signature of it's __call__ method.
return _getargspec(type(target).__call__)
def _get_argspec_for_partial(obj):
"""Implements `getargspec` for `functools.partial` objects.
Args:
obj: The `functools.partial` obeject
Returns:
An `inspect.ArgSpec`
Raises:
ValueError: When callable's signature can not be expressed with
ArgSpec.
"""
# When callable is a functools.partial object, we construct its ArgSpec with
# following strategy:
# - If callable partial contains default value for positional arguments (ie.
# object.args), then final ArgSpec doesn't contain those positional arguments.
# - If callable partial contains default value for keyword arguments (ie.
# object.keywords), then we merge them with wrapped target. Default values
# from callable partial takes precedence over those from wrapped target.
#
# However, there is a case where it is impossible to construct a valid
# ArgSpec. Python requires arguments that have no default values must be
# defined before those with default values. ArgSpec structure is only valid
# when this presumption holds true because default values are expressed as a
# tuple of values without keywords and they are always assumed to belong to
# last K arguments where K is number of default values present.
#
# Since functools.partial can give default value to any argument, this
# presumption may no longer hold in some cases. For example:
#
# def func(m, n):
# return 2 * m + n
# partialed = functools.partial(func, m=1)
#
# This example will result in m having a default value but n doesn't. This is
# usually not allowed in Python and can not be expressed in ArgSpec correctly.
#
# Thus, we must detect cases like this by finding first argument with default
# value and ensures all following arguments also have default values. When
# this is not true, a ValueError is raised.
n_prune_args = len(obj.args)
partial_keywords = obj.keywords or {}
args, varargs, keywords, defaults = getargspec(obj.func)
# Pruning first n_prune_args arguments.
args = args[n_prune_args:]
# Partial function may give default value to any argument, therefore length
# of default value list must be len(args) to allow each argument to
# potentially be given a default value.
no_default = object()
all_defaults = [no_default] * len(args)
if defaults:
all_defaults[-len(defaults):] = defaults
# Fill in default values provided by partial function in all_defaults.
for kw, default in six.iteritems(partial_keywords):
idx = args.index(kw)
all_defaults[idx] = default
# Find first argument with default value set.
first_default = next(
(idx for idx, x in enumerate(all_defaults) if x is not no_default), None)
# If no default values are found, return ArgSpec with defaults=None.
if first_default is None:
return ArgSpec(args, varargs, keywords, None)
# Checks if all arguments have default value set after first one.
invalid_default_values = [
args[i] for i, j in enumerate(all_defaults)
if j is no_default and i > first_default
]
if invalid_default_values:
raise ValueError('Some arguments %s do not have default value, but they '
'are positioned after those with default values. This can '
'not be expressed with ArgSpec.' % invalid_default_values)
return ArgSpec(args, varargs, keywords, tuple(all_defaults[first_default:]))
def getfullargspec(obj):
"""TFDecorator-aware replacement for `inspect.getfullargspec`.
This wrapper emulates `inspect.getfullargspec` in[^)]* Python2.
Args:
obj: A callable, possibly decorated.
Returns:
The `FullArgSpec` that describes the signature of
the outermost decorator that changes the callable's signature. If the
callable is not decorated, `inspect.getfullargspec()` will be called
directly on the callable.
"""
decorators, target = tf_decorator.unwrap(obj)
for d in decorators:
if d.decorator_argspec is not None:
return _convert_maybe_argspec_to_fullargspec(d.decorator_argspec)
return _getfullargspec(target)
def getcallargs(func, *positional, **named):
"""TFDecorator-aware replacement for inspect.getcallargs.
Args:
func: A callable, possibly decorated
*positional: The positional arguments that would be passed to `func`.
**named: The named argument dictionary that would be passed to `func`.
Returns:
A dictionary mapping `func`'s named arguments to the values they would
receive if `func(*positional, **named)` were called.
`getcallargs` will use the argspec from the outermost decorator that provides
it. If no attached decorators modify argspec, the final unwrapped target's
argspec will be used.
"""
argspec = getfullargspec(func)
call_args = named.copy()
this = getattr(func, 'im_self', None) or getattr(func, '__self__', None)
if ismethod(func) and this:
positional = (this,) + positional
remaining_positionals = [arg for arg in argspec.args if arg not in call_args]
call_args.update(dict(zip(remaining_positionals, positional)))
default_count = 0 if not argspec.defaults else len(argspec.defaults)
if default_count:
for arg, value in zip(argspec.args[-default_count:], argspec.defaults):
if arg not in call_args:
call_args[arg] = value
return call_args
def getframeinfo(*args, **kwargs):
return _inspect.getframeinfo(*args, **kwargs)
def getdoc(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.getdoc.
Args:
object: An object, possibly decorated.
Returns:
The docstring associated with the object.
The outermost-decorated object is intended to have the most complete
documentation, so the decorated parameter is not unwrapped.
"""
return _inspect.getdoc(object)
def getfile(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.getfile."""
unwrapped_object = tf_decorator.unwrap(object)[1]
# Work around for the case when object is a stack frame
# and only .pyc files are used. In this case, getfile
# might return incorrect path. So, we get the path from f_globals
# instead.
if (hasattr(unwrapped_object, 'f_globals') and
'__file__' in unwrapped_object.f_globals):
return unwrapped_object.f_globals['__file__']
return _inspect.getfile(unwrapped_object)
def getmembers(object, predicate=None): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.getmembers."""
return _inspect.getmembers(object, predicate)
def getmodule(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.getmodule."""
return _inspect.getmodule(object)
def getmro(cls):
"""TFDecorator-aware replacement for inspect.getmro."""
return _inspect.getmro(cls)
def getsource(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.getsource."""
return _inspect.getsource(tf_decorator.unwrap(object)[1])
def getsourcefile(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.getsourcefile."""
return _inspect.getsourcefile(tf_decorator.unwrap(object)[1])
def getsourcelines(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.getsourcelines."""
return _inspect.getsourcelines(tf_decorator.unwrap(object)[1])
def isbuiltin(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.isbuiltin."""
return _inspect.isbuiltin(tf_decorator.unwrap(object)[1])
def isclass(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.isclass."""
return _inspect.isclass(tf_decorator.unwrap(object)[1])
def isfunction(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.isfunction."""
return _inspect.isfunction(tf_decorator.unwrap(object)[1])
def isframe(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.ismodule."""
return _inspect.isframe(tf_decorator.unwrap(object)[1])
def isgenerator(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.isgenerator."""
return _inspect.isgenerator(tf_decorator.unwrap(object)[1])
def ismethod(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.ismethod."""
return _inspect.ismethod(tf_decorator.unwrap(object)[1])
def ismodule(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.ismodule."""
return _inspect.ismodule(tf_decorator.unwrap(object)[1])
def isroutine(object): # pylint: disable=redefined-builtin
"""TFDecorator-aware replacement for inspect.isroutine."""
return _inspect.isroutine(tf_decorator.unwrap(object)[1])
def stack(context=1):
"""TFDecorator-aware replacement for inspect.stack."""
return _inspect.stack(context)[1:]
def getsource_no_unwrap(obj):
"""Return source code for an object. Does not unwrap TFDecorators.
The source code is returned literally, including indentation for functions not
at the top level. This function is analogous to inspect.getsource, with one
key difference - it doesn't unwrap decorators. For simplicity, support for
some Python object types is dropped (tracebacks, frames, code objects).
Args:
obj: a class, method, or function object.
Returns:
source code as a string
"""
lines, lnum = _inspect.findsource(obj)
return ''.join(_inspect.getblock(lines[lnum:]))
|
|
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
"""
This file contains implementation of inetconf interface for physical router
configuration manager
"""
from lxml import etree
from ncclient import manager
import copy
import time
import datetime
class PhysicalRouterConfig(object):
# mapping from contrail family names to junos
_FAMILY_MAP = {
'route-target': '<route-target/>',
'inet-vpn': '<inet-vpn><unicast/></inet-vpn>',
'inet6-vpn': '<inet6-vpn><unicast/></inet6-vpn>',
'e-vpn': '<evpn><signaling/></evpn>'
}
def __init__(self, management_ip, user_creds, vendor, product, vnc_managed, logger=None):
self.management_ip = management_ip
self.user_creds = user_creds
self.vendor = vendor
self.product = product
self.vnc_managed = vnc_managed
self.reset_bgp_config()
self._logger = logger
self.commit_stats = {
'netconf_enabled':False,
'netconf_enabled_status':'',
'last_commit_time': '',
'last_commit_duration': '',
'commit_status_message': '',
'total_commits_sent_since_up': 0,
}
self.bgp_config_sent = False
# end __init__
def update(self, management_ip, user_creds, vendor, product, vnc_managed):
self.management_ip = management_ip
self.user_creds = user_creds
self.vendor = vendor
self.product = product
self.vnc_managed = vnc_managed
# end update
def get_commit_stats(self):
return self.commit_stats
#end get_commit_stats
def send_netconf(self, new_config, default_operation="merge",
operation="replace"):
if (self.vendor is None or self.product is None or
self.vendor.lower() != "juniper" or self.product.lower() != "mx"):
self._logger.info("auto configuraion of physical router is not supported \
on the configured vendor family, ip: %s, not pushing netconf message" % (self.management_ip))
self.commit_stats['netconf_enabled'] = False
self.commit_stats['netconf_enabled_status'] = "netconf configuraion is not supported on this vendor/product family"
return
if (self.vnc_managed is None or self.vnc_managed == False):
self._logger.info("vnc managed property must be set for a physical router to get auto \
configured, ip: %s, not pushing netconf message" % (self.management_ip))
self.commit_stats['netconf_enabled'] = False
self.commit_stats['netconf_enabled_status'] = "netconf auto configuraion is not enabled on this physical router"
return
self.commit_stats['netconf_enabled'] = True
self.commit_stats['netconf_enabled_status'] = ''
start_time = None
try:
with manager.connect(host=self.management_ip, port=22,
username=self.user_creds['username'],
password=self.user_creds['password'],
unknown_host_cb=lambda x, y: True) as m:
add_config = etree.Element(
"config",
nsmap={"xc": "urn:ietf:params:xml:ns:netconf:base:1.0"})
config = etree.SubElement(add_config, "configuration")
config_group = etree.SubElement(config, "groups", operation=operation)
contrail_group = etree.SubElement(config_group, "name")
contrail_group.text = "__contrail__"
if isinstance(new_config, list):
for nc in new_config:
config_group.append(nc)
else:
config_group.append(new_config)
if operation == "delete":
apply_groups = etree.SubElement(config, "apply-groups", operation=operation)
else:
apply_groups = etree.SubElement(config, "apply-groups")
apply_groups.text = "__contrail__"
self._logger.info("\nsend netconf message: %s\n" % (etree.tostring(add_config, pretty_print=True)))
m.edit_config(
target='candidate', config=etree.tostring(add_config),
test_option='test-then-set',
default_operation=default_operation)
self.commit_stats['total_commits_sent_since_up'] += 1
start_time = time.time()
m.commit()
end_time = time.time()
self.commit_stats['commit_status_message'] = 'success'
self.commit_stats['last_commit_time'] = datetime.datetime.fromtimestamp(end_time).strftime('%Y-%m-%d %H:%M:%S')
self.commit_stats['last_commit_duration'] = str(end_time - start_time)
except Exception as e:
if self._logger:
self._logger.error("Router %s: %s" % (self.management_ip,
e.message))
self.commit_stats['commit_status_message'] = 'failed to apply config, router response: ' + e.message
if start_time is not None:
self.commit_stats['last_commit_time'] = datetime.datetime.fromtimestamp(start_time).strftime('%Y-%m-%d %H:%M:%S')
self.commit_stats['last_commit_duration'] = str(time.time() - start_time)
# end send_config
def add_dynamic_tunnels(self, tunnel_source_ip, ip_fabric_nets, bgp_router_ips):
self.tunnel_config = etree.Element("routing-options")
dynamic_tunnels = etree.SubElement(self.tunnel_config, "dynamic-tunnels")
dynamic_tunnel = etree.SubElement(dynamic_tunnels, "dynamic-tunnel")
etree.SubElement(dynamic_tunnel, "name").text = "__contrail__"
etree.SubElement(dynamic_tunnel, "source-address").text = tunnel_source_ip
etree.SubElement(dynamic_tunnel, "gre")
if ip_fabric_nets is not None:
for subnet in ip_fabric_nets.get("subnet", []):
dest_network = etree.SubElement(dynamic_tunnel, "destination-networks")
etree.SubElement(dest_network, "name").text = subnet['ip_prefix'] + '/' + str(subnet['ip_prefix_len'])
for bgp_router_ip in bgp_router_ips:
dest_network = etree.SubElement(dynamic_tunnel, "destination-networks")
etree.SubElement(dest_network, "name").text = bgp_router_ip + '/32'
#end add_dynamic_tunnels
'''
ri_name: routing instance name to be configured on mx
import/export targets: routing instance import, export targets
prefixes: for l3 public vrf static routes, bug#1395938
gateways: for l2 evpn, bug#1395944
router_external: this indicates the routing instance configured is for
the public network
interfaces: logical interfaces to be part of vrf
fip_map: contrail instance ip to floating-ip map, used for snat & floating ip support
network_id : this is used for configuraing irb interfaces
'''
def add_routing_instance(self, ri_name, import_targets, export_targets,
prefixes=[], gateways=[], router_external=False,
interfaces=[], vni=None, fip_map=None, network_id=None):
self.routing_instances[ri_name] = {'import_targets': import_targets,
'export_targets': export_targets,
'prefixes': prefixes,
'gateways': gateways,
'router_external': router_external,
'interfaces': interfaces,
'vni': vni,
'fip_map': fip_map}
ri_config = self.ri_config or etree.Element("routing-instances")
policy_config = self.policy_config or etree.Element("policy-options")
ri = etree.SubElement(ri_config, "instance")
etree.SubElement(ri, "name").text = ri_name
ri_opt = None
if router_external:
ri_opt = etree.SubElement(ri, "routing-options")
static_config = etree.SubElement(ri_opt, "static")
route_config = etree.SubElement(static_config, "route")
etree.SubElement(route_config, "name").text = "0.0.0.0/0"
etree.SubElement(route_config, "next-table").text = "inet.0"
#for both l2 and l3
etree.SubElement(ri, "vrf-import").text = ri_name + "-import"
etree.SubElement(ri, "vrf-export").text = ri_name + "-export"
if vni is None or router_external:
etree.SubElement(ri, "instance-type").text = "vrf"
etree.SubElement(ri, "vrf-table-label") #only for l3
if fip_map is None:
for interface in interfaces:
if_element = etree.SubElement(ri, "interface")
etree.SubElement(if_element, "name").text = interface
if ri_opt is None:
ri_opt = etree.SubElement(ri, "routing-options")
if prefixes and fip_map is None:
static_config = etree.SubElement(ri_opt, "static")
for prefix in prefixes:
route_config = etree.SubElement(static_config, "route")
etree.SubElement(route_config, "name").text = prefix
etree.SubElement(route_config, "discard")
auto_export = "<auto-export><family><inet><unicast/></inet></family></auto-export>"
ri_opt.append(etree.fromstring(auto_export))
else:
etree.SubElement(ri, "instance-type").text = "virtual-switch"
if fip_map is not None:
if ri_opt is None:
ri_opt = etree.SubElement(ri, "routing-options")
static_config = etree.SubElement(ri_opt, "static")
route_config = etree.SubElement(static_config, "route")
etree.SubElement(route_config, "name").text = "0.0.0.0/0"
etree.SubElement(route_config, "next-hop").text = interfaces[0]
if_element = etree.SubElement(ri, "interface")
etree.SubElement(if_element, "name").text = interfaces[0]
public_vrf_ips = {}
for pip in fip_map.values():
if pip["vrf_name"] not in public_vrf_ips:
public_vrf_ips[pip["vrf_name"]] = set()
public_vrf_ips[pip["vrf_name"]].add(pip["floating_ip"])
for public_vrf, fips in public_vrf_ips.items():
ri_public = etree.SubElement(ri_config, "instance")
etree.SubElement(ri_public, "name").text = public_vrf
ri_opt = etree.SubElement(ri_public, "routing-options")
static_config = etree.SubElement(ri_opt, "static")
if_element = etree.SubElement(ri_public, "interface")
etree.SubElement(if_element, "name").text = interfaces[1]
for fip in fips:
route_config = etree.SubElement(static_config, "route")
etree.SubElement(route_config, "name").text = fip + "/32"
etree.SubElement(route_config, "next-hop").text = interfaces[1]
# add policies for export route targets
ps = etree.SubElement(policy_config, "policy-statement")
etree.SubElement(ps, "name").text = ri_name + "-export"
term = etree.SubElement(ps, "term")
etree.SubElement(term, "name").text= "t1"
then = etree.SubElement(term, "then")
for route_target in export_targets:
comm = etree.SubElement(then, "community")
etree.SubElement(comm, "add")
etree.SubElement(comm, "community-name").text = route_target.replace(':', '_')
if fip_map is not None:
#for nat instance
etree.SubElement(then, "reject")
else:
etree.SubElement(then, "accept")
# add policies for import route targets
ps = etree.SubElement(policy_config, "policy-statement")
etree.SubElement(ps, "name").text = ri_name + "-import"
term = etree.SubElement(ps, "term")
etree.SubElement(term, "name").text= "t1"
from_ = etree.SubElement(term, "from")
for route_target in import_targets:
target_name = route_target.replace(':', '_')
etree.SubElement(from_, "community").text = target_name
then = etree.SubElement(term, "then")
etree.SubElement(then, "accept")
then = etree.SubElement(ps, "then")
etree.SubElement(then, "reject")
# add firewall config for public VRF
forwarding_options_config = self.forwarding_options_config
firewall_config = self.firewall_config
if router_external:
if self.forwarding_options_config is None:
forwarding_options_config = etree.Element("forwarding-options")
fo = etree.SubElement(forwarding_options_config, "family")
inet = etree.SubElement(fo, "inet")
f = etree.SubElement(inet, "filter")
etree.SubElement(f, "input").text = "redirect_to_public_vrf_filter"
firewall_config = self.firewall_config or etree.Element("firewall")
fc = etree.SubElement(firewall_config, "family")
inet = etree.SubElement(fc, "inet")
f = etree.SubElement(inet, "filter")
etree.SubElement(f, "name").text = "redirect_to_public_vrf_filter"
self.inet_forwarding_filter = f
term = etree.SubElement(f, "term")
etree.SubElement(term, "name").text= "default-term"
then_ = etree.SubElement(term, "then")
etree.SubElement(then_, "accept")
term = etree.Element("term")
etree.SubElement(term, "name").text= "term-" + ri_name[:59]
if prefixes:
from_ = etree.SubElement(term, "from")
for prefix in prefixes:
etree.SubElement(from_, "destination-address").text = prefix
then_ = etree.SubElement(term, "then")
etree.SubElement(then_, "routing-instance").text = ri_name
#insert after 'name' element but before the last term
self.inet_forwarding_filter.insert(1, term)
if fip_map is not None:
firewall_config = self.firewall_config or etree.Element("firewall")
fc = etree.SubElement(firewall_config, "family")
inet = etree.SubElement(fc, "inet")
f = etree.SubElement(inet, "filter")
etree.SubElement(f, "name").text = "redirect_to_" + ri_name[:46] + "_vrf"
term = etree.SubElement(f, "term")
etree.SubElement(term, "name").text= "term-" + ri_name[:59]
from_ = etree.SubElement(term, "from")
for fip_user_ip in fip_map.keys():
etree.SubElement(from_, "source-address").text = fip_user_ip
then_ = etree.SubElement(term, "then")
etree.SubElement(then_, "routing-instance").text = ri_name
term = etree.SubElement(f, "term")
etree.SubElement(term, "name").text= "default-term"
then_ = etree.SubElement(term, "then")
etree.SubElement(then_, "accept")
interfaces_config = self.interfaces_config or etree.Element("interfaces")
irb_intf = etree.SubElement(interfaces_config, "interface")
etree.SubElement(irb_intf, "name").text = "irb"
intf_unit = etree.SubElement(irb_intf, "unit")
etree.SubElement(intf_unit, "name").text = str(network_id)
family = etree.SubElement(intf_unit, "family")
inet = etree.SubElement(family, "inet")
f = etree.SubElement(inet, "filter")
iput = etree.SubElement(f, "input")
etree.SubElement(iput, "filter-name").text = "redirect_to_" + ri_name[:46] + "_vrf"
# add L2 EVPN and BD config
bd_config = None
interfaces_config = self.interfaces_config
proto_config = self.proto_config
if (router_external==False and vni is not None and
self.is_family_configured(self.bgp_params, "e-vpn")):
etree.SubElement(ri, "vtep-source-interface").text = "lo0.0"
bd_config = etree.SubElement(ri, "bridge-domains")
bd= etree.SubElement(bd_config, "domain")
etree.SubElement(bd, "name").text = "bd-" + str(vni)
etree.SubElement(bd, "vlan-id").text = 'none'
vxlan = etree.SubElement(bd, "vxlan")
etree.SubElement(vxlan, "vni").text = str(vni)
for interface in interfaces:
if_element = etree.SubElement(bd, "interface")
etree.SubElement(if_element, "name").text = interface
etree.SubElement(bd, "routing-interface").text = "irb." + str(network_id) #network_id is unique, hence irb
evpn_proto_config = etree.SubElement(ri, "protocols")
evpn = etree.SubElement(evpn_proto_config, "evpn")
etree.SubElement(evpn, "encapsulation").text = "vxlan"
etree.SubElement(evpn, "extended-vni-list").text = "all"
interfaces_config = self.interfaces_config or etree.Element("interfaces")
irb_intf = etree.SubElement(interfaces_config, "interface")
etree.SubElement(irb_intf, "name").text = "irb"
etree.SubElement(irb_intf, "gratuitous-arp-reply")
if gateways is not None:
intf_unit = etree.SubElement(irb_intf, "unit")
etree.SubElement(intf_unit, "name").text = str(network_id)
family = etree.SubElement(intf_unit, "family")
inet = etree.SubElement(family, "inet")
for gateway in gateways:
addr = etree.SubElement(inet, "address")
etree.SubElement(addr, "name").text = gateway
lo_intf = etree.SubElement(interfaces_config, "interface")
etree.SubElement(lo_intf, "name").text = "lo0"
intf_unit = etree.SubElement(lo_intf, "unit")
etree.SubElement(intf_unit, "name").text = "0"
family = etree.SubElement(intf_unit, "family")
inet = etree.SubElement(family, "inet")
addr = etree.SubElement(inet, "address")
etree.SubElement(addr, "name").text = self.bgp_params['address'] + "/32"
etree.SubElement(addr, "primary")
etree.SubElement(addr, "preferred")
for interface in interfaces:
intf = etree.SubElement(interfaces_config, "interface")
intfparts = interface.split(".")
etree.SubElement(intf, "name").text = intfparts[0]
etree.SubElement(intf, "encapsulation").text = "ethernet-bridge"
intf_unit = etree.SubElement(intf, "unit")
etree.SubElement(intf_unit, "name").text = intfparts[1]
family = etree.SubElement(intf_unit, "family")
etree.SubElement(family, "bridge")
proto_config = self.proto_config or etree.Element("protocols")
mpls = etree.SubElement(proto_config, "mpls")
intf = etree.SubElement(mpls, "interface")
etree.SubElement(intf, "name").text = "all"
#fip services config
services_config = self.services_config
if fip_map is not None:
services_config = self.services_config or etree.Element("services")
service_name = 'sv-' + ri_name
#mx has limitation for service-set and nat-rule name length, allowed max 63 chars
service_name = service_name[:23]
service_set = etree.SubElement(services_config, "service-set")
etree.SubElement(service_set, "name").text = service_name
nat_rule = etree.SubElement(service_set, "nat-rules")
etree.SubElement(nat_rule, "name").text = service_name + "-sn-rule"
nat_rule = etree.SubElement(service_set, "nat-rules")
etree.SubElement(nat_rule, "name").text = service_name + "-dn-rule"
next_hop_service = etree.SubElement(service_set, "next-hop-service")
etree.SubElement(next_hop_service , "inside-service-interface").text = interfaces[0]
etree.SubElement(next_hop_service , "outside-service-interface").text = interfaces[1]
nat = etree.SubElement(services_config, "nat")
snat_rule = etree.SubElement(nat, "rule")
etree.SubElement(snat_rule, "name").text = service_name + "-sn-rule"
etree.SubElement(snat_rule, "match-direction").text = "input"
dnat_rule = etree.SubElement(nat, "rule")
etree.SubElement(dnat_rule, "name").text = service_name + "-dn-rule"
etree.SubElement(dnat_rule, "match-direction").text = "output"
for pip, fip_vn in fip_map.items():
fip = fip_vn["floating_ip"]
term = etree.SubElement(snat_rule, "term")
etree.SubElement(term, "name").text = "term_" + pip.replace('.', '_')
from_ = etree.SubElement(term, "from")
src_addr = etree.SubElement(from_, "source-address")
etree.SubElement(src_addr, "name").text = pip + "/32" # private ip
then_ = etree.SubElement(term, "then")
translated = etree.SubElement(then_, "translated")
etree.SubElement(translated , "source-prefix").text = fip + "/32" # public ip
translation_type = etree.SubElement(translated, "translation-type")
etree.SubElement(translation_type, "basic-nat44")
term = etree.SubElement(dnat_rule, "term")
etree.SubElement(term, "name").text = "term_" + fip.replace('.', '_')
from_ = etree.SubElement(term, "from")
src_addr = etree.SubElement(from_, "destination-address")
etree.SubElement(src_addr, "name").text = fip + "/32" #public ip
then_ = etree.SubElement(term, "then")
translated = etree.SubElement(then_, "translated")
etree.SubElement(translated , "destination-prefix").text = pip + "/32" #source ip
translation_type = etree.SubElement(translated, "translation-type")
etree.SubElement(translation_type, "dnat-44")
interfaces_config = self.interfaces_config or etree.Element("interfaces")
si_intf = etree.SubElement(interfaces_config, "interface")
intfparts = interfaces[0].split(".")
etree.SubElement(si_intf, "name").text = intfparts[0]
intf_unit = etree.SubElement(si_intf, "unit")
etree.SubElement(intf_unit, "name").text = interfaces[0].split(".")[1]
family = etree.SubElement(intf_unit, "family")
etree.SubElement(family, "inet")
etree.SubElement(intf_unit, "service-domain").text = "inside"
intf_unit = etree.SubElement(si_intf, "unit")
etree.SubElement(intf_unit, "name").text = interfaces[1].split(".")[1]
family = etree.SubElement(intf_unit, "family")
etree.SubElement(family, "inet")
etree.SubElement(intf_unit, "service-domain").text = "outside"
self.forwarding_options_config = forwarding_options_config
self.firewall_config = firewall_config
self.policy_config = policy_config
self.proto_config = proto_config
self.interfaces_config = interfaces_config
self.services_config = services_config
self.route_targets |= import_targets | export_targets
self.ri_config = ri_config
# end add_routing_instance
def set_global_routing_options(self, bgp_params):
if bgp_params['address'] is not None:
self.global_routing_options_config = etree.Element("routing-options")
etree.SubElement(self.global_routing_options_config, "router-id").text = bgp_params['address']
#end set_global_routing_options
def is_family_configured(self, params, family_name):
if params is None or params.get('address_families') is None:
return False
families = params['address_families'].get('family', [])
if family_name in families:
return True
return False
def _add_family_etree(self, parent, params):
if params.get('address_families') is None:
return
family_etree = etree.SubElement(parent, "family")
for family in params['address_families'].get('family', []):
if family in self._FAMILY_MAP:
family_subtree = etree.fromstring(self._FAMILY_MAP[family])
family_etree.append(family_subtree)
else:
etree.SubElement(family_etree, family)
# end _add_family_etree
def add_bgp_auth_config(self, bgp_config, bgp_params):
if bgp_params.get('auth_data') is None:
return
keys = bgp_params['auth_data'].get('key_items', [])
if len(keys) > 0:
etree.SubElement(bgp_config, "authentication-key").text = keys[0].get('key')
def add_bgp_hold_time_config(self, bgp_config, bgp_params):
if bgp_params.get('hold_time') is None:
return
etree.SubElement(bgp_config, "hold-time").text = str(bgp_params.get('hold_time'))
def set_bgp_config(self, params):
self.bgp_params = params
if (self.vnc_managed is None or self.vnc_managed == False):
if self.bgp_config_sent:
# user must have unset the vnc managed property, so temporaly set it
# for deleting the existing config
self.vnc_managed = True
self.delete_bgp_config()
self.vnc_managed = False
return
return
# end set_bgp_config
def _get_bgp_config_xml(self, external=False):
if self.bgp_params is None:
return None
bgp_config = etree.Element("group", operation="replace")
if external:
etree.SubElement(bgp_config, "name").text = "__contrail_external__"
etree.SubElement(bgp_config, "type").text = "external"
else:
etree.SubElement(bgp_config, "name").text = "__contrail__"
etree.SubElement(bgp_config, "type").text = "internal"
etree.SubElement(bgp_config, "multihop")
local_address = etree.SubElement(bgp_config, "local-address")
local_address.text = self.bgp_params['address']
self._add_family_etree(bgp_config, self.bgp_params)
self.add_bgp_auth_config(bgp_config, self.bgp_params)
self.add_bgp_hold_time_config(bgp_config, self.bgp_params)
etree.SubElement(bgp_config, "keep").text = "all"
return bgp_config
# end _get_bgp_config_xml
def reset_bgp_config(self):
self.routing_instances = {}
self.bgp_params = None
self.ri_config = None
self.tunnel_config = None
self.interfaces_config = None
self.services_config = None
self.policy_config = None
self.firewall_config = None
self.inet_forwarding_filter = None
self.forwarding_options_config = None
self.global_routing_options_config = None
self.proto_config = None
self.route_targets = set()
self.bgp_peers = {}
self.external_peers = {}
# ene reset_bgp_config
def delete_bgp_config(self):
if not self.bgp_config_sent:
return
self.reset_bgp_config()
self.send_netconf([], default_operation="none", operation="delete")
self.bgp_config_sent = False
# end delete_config
def add_bgp_peer(self, router, params, attr, external):
peer_data = {}
peer_data['params'] = params
peer_data['attr'] = attr
if external:
self.external_peers[router] = peer_data
else:
self.bgp_peers[router] = peer_data
self.send_bgp_config()
# end add_peer
def delete_bgp_peer(self, router):
if router in self.bgp_peers:
del self.bgp_peers[router]
elif router in self.external_peers:
del self.external_peers[rotuer]
else:
return
self.send_bgp_config()
# end delete_bgp_peer
def _get_neighbor_config_xml(self, bgp_config, peers):
for peer, peer_data in peers.items():
params = peer_data.get('params', {})
attr = peer_data.get('attr', {})
nbr = etree.SubElement(bgp_config, "neighbor")
etree.SubElement(nbr, "name").text = peer
bgp_sessions = attr.get('session')
if bgp_sessions:
# for now assume only one session
session_attrs = bgp_sessions[0].get('attributes', [])
for session_attr in session_attrs:
# For not, only consider the attribute if bgp-router is
# not specified
if session_attr.get('bgp_router') is None:
self._add_family_etree(nbr, session_attr)
self.add_bgp_auth_config(nbr, session_attr)
break
if params.get('autonomous_system') is not None:
etree.SubElement(nbr, "peer-as").text = str(params.get('autonomous_system'))
# end _get_neighbor_config_xml
def send_bgp_config(self):
bgp_config = self._get_bgp_config_xml()
if bgp_config is None:
return
proto_config = etree.Element("protocols")
bgp = etree.SubElement(proto_config, "bgp")
bgp.append(bgp_config)
self._get_neighbor_config_xml(bgp_config, self.bgp_peers)
if self.external_peers is not None:
ext_grp_config = self._get_bgp_config_xml(True)
bgp.append(ext_grp_config)
self._get_neighbor_config_xml(ext_grp_config, self.external_peers)
routing_options_config = etree.Element("routing-options")
etree.SubElement(
routing_options_config,
"route-distinguisher-id").text = self.bgp_params['identifier']
etree.SubElement(routing_options_config, "autonomous-system").text = \
str(self.bgp_params.get('autonomous_system'))
config_list = [proto_config, routing_options_config]
if self.ri_config is not None:
config_list.append(self.ri_config)
for route_target in self.route_targets:
comm = etree.SubElement(self.policy_config, "community")
etree.SubElement(comm, 'name').text = route_target.replace(':', '_')
etree.SubElement(comm, 'members').text = route_target
if self.tunnel_config is not None:
config_list.append(self.tunnel_config)
if self.interfaces_config is not None:
config_list.append(self.interfaces_config)
if self.services_config is not None:
config_list.append(self.services_config)
if self.policy_config is not None:
config_list.append(self.policy_config)
if self.firewall_config is not None:
config_list.append(self.firewall_config)
if self.forwarding_options_config is not None:
config_list.append(self.forwarding_options_config)
if self.global_routing_options_config is not None:
config_list.append(self.global_routing_options_config)
if self.proto_config is not None:
config_list.append(self.proto_config)
self.send_netconf(config_list)
self.bgp_config_sent = True
# end send_bgp_config
# end PhycalRouterConfig
|
|
importantDatabase = {
"floor_00": {
"nameOnWeb": "Ground Floor",
"room_00": {
"Device": {
"Device_01": {
"inNode": "dataNodeSlave01",
"name": "Device02",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 0
},
"Device_02": {
"inNode": "dataNodeSlave01",
"name": "Device05",
"nameModule": "Light",
"nameOnWeb": "Sliding Door",
"value": 0
}
},
"Sensor": {},
"nameOnWeb": "Garage"
},
"room_01": {
"Device": {
"Device_01": {
"inNode": "dataNodeSlave01",
"name": "Device04",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 0
}
},
"Sensor": {
"Sensor_01": {
"inNode": "dataNodeSlave01",
"name": "Sensor03",
"nameModule": "Humidity",
"nameOnWeb": "Humidity",
"value": 54.0
},
"Sensor_02": {
"inNode": "dataNodeSlave01",
"name": "Sensor04",
"nameModule": "Temperature",
"nameOnWeb": "Temperature",
"value": 24.0
}
},
"nameOnWeb": "Warehouse"
},
"room_02": {
"Device": {
"Device_01": {
"inNode": "dataNodeSlave01",
"name": "Device06",
"nameModule": "Television",
"nameOnWeb": "Television",
"value": 0
},
"Device_02": {
"inNode": "dataNodeSlave01",
"name": "Device08",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 0
},
"Device_03": {
"inNode": "dataNodeSlave01",
"name": "Device07",
"nameModule": "AirConditioners",
"nameOnWeb": "Air Conditioners",
"value": 0
}
},
"Sensor": {
"Sensor_01": {
"inNode": "dataNodeSlave01",
"name": "Sensor01",
"nameModule": "Temperature",
"nameOnWeb": "Temperature",
"value": 28.9
}
},
"nameOnWeb": "Servant Room"
},
"room_03": {
"Device": {
"Device_01": {
"inNode": "dataNodeSlave01",
"name": "Device03",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 0
},
"Device_02": {
"inNode": "dataNodeSlave01",
"name": "Device01",
"nameModule": "WashingMachine",
"nameOnWeb": "Washing Machine",
"value": 0
}
},
"Sensor": {},
"nameOnWeb": "WC"
}
},
"floor_01": {
"nameOnWeb": "First Floor",
"room_00": {
"Device": {
"Device_01": {
"inNode": "dataNodeMaster01",
"name": "Device11",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 0
},
"Device_02": {
"inNode": "dataNodeMaster01",
"name": "Device08",
"nameModule": "Television",
"nameOnWeb": "Television",
"value": 0
},
"Device_03": {
"inNode": "dataNodeMaster01",
"name": "Device09",
"nameModule": "AirConditioners",
"nameOnWeb": "Air Conditioners",
"value": 0
}
},
"Sensor": {
"Sensor_01": {
"inNode": "dataNodeMaster01",
"name": "Sensor01",
"nameModule": "Temperature",
"nameOnWeb": "Temperature",
"value": 28.9
}
},
"nameOnWeb": "Living Room"
},
"room_01": {
"Device": {
"Device_01": {
"inNode": "dataNodeMaster01",
"name": "Device01",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 0
},
"Device_02": {
"inNode": "dataNodeMaster01",
"name": "Device10",
"nameModule": "Television",
"nameOnWeb": "Television",
"value": 0
}
},
"Sensor": {},
"nameOnWeb": "Dining Room"
},
"room_02": {
"Device": {
"Device_01": {
"inNode": "dataNodeMaster01",
"name": "Device04",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 0
},
"Device_02": {
"inNode": "dataNodeMaster01",
"name": "Device07",
"nameModule": "Refrigerator",
"nameOnWeb": "Refrigerator",
"value": 0
}
},
"Sensor": {
"Sensor_01": {
"inNode": "dataNodeSlave01",
"name": "Sensor05",
"nameModule": "GasSensor",
"nameOnWeb": "Gas Sensor",
"value": 0.0
}
},
"nameOnWeb": "Kitchen"
},
"room_03": {
"Device": {
"Device_01": {
"inNode": "dataNodeMaster01",
"name": "Device02",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 0
},
"Device_02": {
"inNode": "dataNodeMaster01",
"name": "Device06",
"nameModule": "AirConditioners",
"nameOnWeb": "Air Conditioners",
"value": 0
},
"Device_03": {
"inNode": "dataNodeMaster01",
"name": "Device03",
"nameModule": "Television",
"nameOnWeb": "Television",
"value": 0
}
},
"Sensor": {
"Sensor_01": {
"inNode": "dataNodeMaster01",
"name": "Sensor02",
"nameModule": "Temperature",
"nameOnWeb": "Temperature",
"value": 29.1
}
},
"nameOnWeb": "Master's Room"
},
"room_04": {
"Device": {
"Device_01": {
"inNode": "dataNodeMaster01",
"name": "Device05",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 0
}
},
"Sensor": {},
"nameOnWeb": "WC"
}
},
"floor_02": {
"nameOnWeb": "Second Floor",
"room_00": {
"Device": {
"Device_01": {
"inNode": "dataNodeSlave02",
"name": "Device02",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 0
}
},
"Sensor": {},
"nameOnWeb": "Altar room"
},
"room_01": {
"Device": {
"Device_01": {
"inNode": "dataNodeSlave02",
"name": "Device03",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 1
},
"Device_02": {
"inNode": "dataNodeSlave02",
"name": "Device06",
"nameModule": "AirConditioners",
"nameOnWeb": "Air Conditioners",
"value": 0
}
},
"Sensor": {
"Sensor_01": {
"inNode": "dataNodeSlave02",
"name": "Sensor01",
"nameModule": "Temperature",
"nameOnWeb": "Temperature",
"value": 28.9
}
},
"nameOnWeb": "Children's room"
},
"room_02": {
"Device": {
"Device_01": {
"inNode": "dataNodeSlave02",
"name": "Device04",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 0
},
"Device_02": {
"inNode": "dataNodeSlave02",
"name": "Device01",
"nameModule": "AirConditioners",
"nameOnWeb": "Air Conditioners",
"value": 0
}
},
"Sensor": {
"Sensor_01": {
"inNode": "dataNodeSlave02",
"name": "Sensor02",
"nameModule": "Temperature",
"nameOnWeb": "Temperature",
"value": 29.1
}
},
"nameOnWeb": "Guest's room"
},
"room_03": {
"Device": {
"Device_01": {
"inNode": "dataNodeSlave02",
"name": "Device05",
"nameModule": "Light",
"nameOnWeb": "Light",
"value": 0
}
},
"Sensor": {},
"nameOnWeb": "WC room"
},
"room_04": {
"Device": {},
"Sensor": {
"Sensor_01": {
"inNode": "dataNodeSlave02",
"name": "Sensor07",
"nameModule": "Humidity",
"nameOnWeb": "Soil Moisture",
"value": 75.0
}
},
"nameOnWeb": "Garden"
}
},
"floor_03": {
"nameOnWeb": "Third Floor",
"room_00": {
"Device": {},
"Sensor": {
"Sensor_01": {
"inNode": "dataNodeSlave02",
"name": "Sensor06",
"nameModule": "RainSensor",
"nameOnWeb": "Rain sensor",
"value": 6.0
}
},
"nameOnWeb": "OutSide"
}
}
}
|
|
"""Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <[email protected]>
# Balazs Kegl <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# Mathieu Blondel <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .isotonic import IsotonicRegression
from .metrics.classification import _check_binary_probabilistic_predictions
from .model_selection import check_cv
from .preprocessing import LabelBinarizer
from .svm import LinearSVC
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.fixes import signature
from .utils.validation import check_is_fitted
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is it is assumed that base_estimator has been
fitted already and all data is used for calibration. Note that
data for fitting the classifier and for calibrating it must be disjoint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' or 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach. It is not advised to use isotonic calibration
with too few calibration samples ``(<<1000)`` since it tends to overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer, cross-validation generator, iterable or "prefit", optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If ``y`` is neither binary nor
multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
lb = LabelBinarizer().fit(y)
self.classes_ = lb.classes_
# Check that we each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, y, classifier=True)
fit_parameters = signature(base_estimator.fit).parameters
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in fit_parameters):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv.split(X, y):
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classifiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parametric approach based on isotonic regression.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid'):
self.base_estimator = base_estimator
self.method = method
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = np.arange(df.shape[1])
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
lb = LabelBinarizer()
Y = lb.fit_transform(y)
self.classes_ = lb.classes_
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
|
|
import os, time, sys, traceback, weakref
import numpy as np
import threading
try:
import __builtin__ as builtins
import cPickle as pickle
except ImportError:
import builtins
import pickle
class ClosedError(Exception):
"""Raised when an event handler receives a request to close the connection
or discovers that the connection has been closed."""
pass
class NoResultError(Exception):
"""Raised when a request for the return value of a remote call fails
because the call has not yet returned."""
pass
class RemoteEventHandler(object):
"""
This class handles communication between two processes. One instance is present on
each process and listens for communication from the other process. This enables
(amongst other things) ObjectProxy instances to look up their attributes and call
their methods.
This class is responsible for carrying out actions on behalf of the remote process.
Each instance holds one end of a Connection which allows python
objects to be passed between processes.
For the most common operations, see _import(), close(), and transfer()
To handle and respond to incoming requests, RemoteEventHandler requires that its
processRequests method is called repeatedly (this is usually handled by the Process
classes defined in multiprocess.processes).
"""
handlers = {} ## maps {process ID : handler}. This allows unpickler to determine which process
## an object proxy belongs to
def __init__(self, connection, name, pid, debug=False):
self.debug = debug
self.conn = connection
self.name = name
self.results = {} ## reqId: (status, result); cache of request results received from the remote process
## status is either 'result' or 'error'
## if 'error', then result will be (exception, formatted exceprion)
## where exception may be None if it could not be passed through the Connection.
self.resultLock = threading.RLock()
self.proxies = {} ## maps {weakref(proxy): proxyId}; used to inform the remote process when a proxy has been deleted.
self.proxyLock = threading.RLock()
## attributes that affect the behavior of the proxy.
## See ObjectProxy._setProxyOptions for description
self.proxyOptions = {
'callSync': 'sync', ## 'sync', 'async', 'off'
'timeout': 10, ## float
'returnType': 'auto', ## 'proxy', 'value', 'auto'
'autoProxy': False, ## bool
'deferGetattr': False, ## True, False
'noProxyTypes': [ type(None), str, int, float, tuple, list, dict, LocalObjectProxy, ObjectProxy ],
}
self.optsLock = threading.RLock()
self.nextRequestId = 0
self.exited = False
# Mutexes to help prevent issues when multiple threads access the same RemoteEventHandler
self.processLock = threading.RLock()
self.sendLock = threading.RLock()
RemoteEventHandler.handlers[pid] = self ## register this handler as the one communicating with pid
@classmethod
def getHandler(cls, pid):
try:
return cls.handlers[pid]
except:
print(pid, cls.handlers)
raise
def debugMsg(self, msg):
if not self.debug:
return
print("[%d] %s" % (os.getpid(), str(msg)))
def getProxyOption(self, opt):
with self.optsLock:
return self.proxyOptions[opt]
def setProxyOptions(self, **kwds):
"""
Set the default behavior options for object proxies.
See ObjectProxy._setProxyOptions for more info.
"""
with self.optsLock:
self.proxyOptions.update(kwds)
def processRequests(self):
"""Process all pending requests from the pipe, return
after no more events are immediately available. (non-blocking)
Returns the number of events processed.
"""
with self.processLock:
if self.exited:
self.debugMsg(' processRequests: exited already; raise ClosedError.')
raise ClosedError()
numProcessed = 0
while self.conn.poll():
#try:
#poll = self.conn.poll()
#if not poll:
#break
#except IOError: # this can happen if the remote process dies.
## might it also happen in other circumstances?
#raise ClosedError()
try:
self.handleRequest()
numProcessed += 1
except ClosedError:
self.debugMsg('processRequests: got ClosedError from handleRequest; setting exited=True.')
self.exited = True
raise
#except IOError as err: ## let handleRequest take care of this.
#self.debugMsg(' got IOError from handleRequest; try again.')
#if err.errno == 4: ## interrupted system call; try again
#continue
#else:
#raise
except:
print("Error in process %s" % self.name)
sys.excepthook(*sys.exc_info())
if numProcessed > 0:
self.debugMsg('processRequests: finished %d requests' % numProcessed)
return numProcessed
def handleRequest(self):
"""Handle a single request from the remote process.
Blocks until a request is available."""
result = None
while True:
try:
## args, kwds are double-pickled to ensure this recv() call never fails
cmd, reqId, nByteMsgs, optStr = self.conn.recv()
break
except EOFError:
self.debugMsg(' handleRequest: got EOFError from recv; raise ClosedError.')
## remote process has shut down; end event loop
raise ClosedError()
except IOError as err:
if err.errno == 4: ## interrupted system call; try again
self.debugMsg(' handleRequest: got IOError 4 from recv; try again.')
continue
else:
self.debugMsg(' handleRequest: got IOError %d from recv (%s); raise ClosedError.' % (err.errno, err.strerror))
raise ClosedError()
self.debugMsg(" handleRequest: received %s %s" % (str(cmd), str(reqId)))
## read byte messages following the main request
byteData = []
if nByteMsgs > 0:
self.debugMsg(" handleRequest: reading %d byte messages" % nByteMsgs)
for i in range(nByteMsgs):
while True:
try:
byteData.append(self.conn.recv_bytes())
break
except EOFError:
self.debugMsg(" handleRequest: got EOF while reading byte messages; raise ClosedError.")
raise ClosedError()
except IOError as err:
if err.errno == 4:
self.debugMsg(" handleRequest: got IOError 4 while reading byte messages; try again.")
continue
else:
self.debugMsg(" handleRequest: got IOError while reading byte messages; raise ClosedError.")
raise ClosedError()
try:
if cmd == 'result' or cmd == 'error':
resultId = reqId
reqId = None ## prevents attempt to return information from this request
## (this is already a return from a previous request)
opts = pickle.loads(optStr)
self.debugMsg(" handleRequest: id=%s opts=%s" % (str(reqId), str(opts)))
#print os.getpid(), "received request:", cmd, reqId, opts
returnType = opts.get('returnType', 'auto')
if cmd == 'result':
with self.resultLock:
self.results[resultId] = ('result', opts['result'])
elif cmd == 'error':
with self.resultLock:
self.results[resultId] = ('error', (opts['exception'], opts['excString']))
elif cmd == 'getObjAttr':
result = getattr(opts['obj'], opts['attr'])
elif cmd == 'callObj':
obj = opts['obj']
fnargs = opts['args']
fnkwds = opts['kwds']
## If arrays were sent as byte messages, they must be re-inserted into the
## arguments
if len(byteData) > 0:
for i,arg in enumerate(fnargs):
if isinstance(arg, tuple) and len(arg) > 0 and arg[0] == '__byte_message__':
ind = arg[1]
dtype, shape = arg[2]
fnargs[i] = np.fromstring(byteData[ind], dtype=dtype).reshape(shape)
for k,arg in fnkwds.items():
if isinstance(arg, tuple) and len(arg) > 0 and arg[0] == '__byte_message__':
ind = arg[1]
dtype, shape = arg[2]
fnkwds[k] = np.fromstring(byteData[ind], dtype=dtype).reshape(shape)
if len(fnkwds) == 0: ## need to do this because some functions do not allow keyword arguments.
try:
result = obj(*fnargs)
except:
print("Failed to call object %s: %d, %s" % (obj, len(fnargs), fnargs[1:]))
raise
else:
result = obj(*fnargs, **fnkwds)
elif cmd == 'getObjValue':
result = opts['obj'] ## has already been unpickled into its local value
returnType = 'value'
elif cmd == 'transfer':
result = opts['obj']
returnType = 'proxy'
elif cmd == 'transferArray':
## read array data from next message:
result = np.fromstring(byteData[0], dtype=opts['dtype']).reshape(opts['shape'])
returnType = 'proxy'
elif cmd == 'import':
name = opts['module']
fromlist = opts.get('fromlist', [])
mod = builtins.__import__(name, fromlist=fromlist)
if len(fromlist) == 0:
parts = name.lstrip('.').split('.')
result = mod
for part in parts[1:]:
result = getattr(result, part)
else:
result = map(mod.__getattr__, fromlist)
elif cmd == 'del':
LocalObjectProxy.releaseProxyId(opts['proxyId'])
#del self.proxiedObjects[opts['objId']]
elif cmd == 'close':
if reqId is not None:
result = True
returnType = 'value'
exc = None
except:
exc = sys.exc_info()
if reqId is not None:
if exc is None:
self.debugMsg(" handleRequest: sending return value for %d: %s" % (reqId, str(result)))
#print "returnValue:", returnValue, result
if returnType == 'auto':
with self.optsLock:
noProxyTypes = self.proxyOptions['noProxyTypes']
result = self.autoProxy(result, noProxyTypes)
elif returnType == 'proxy':
result = LocalObjectProxy(result)
try:
self.replyResult(reqId, result)
except:
sys.excepthook(*sys.exc_info())
self.replyError(reqId, *sys.exc_info())
else:
self.debugMsg(" handleRequest: returning exception for %d" % reqId)
self.replyError(reqId, *exc)
elif exc is not None:
sys.excepthook(*exc)
if cmd == 'close':
if opts.get('noCleanup', False) is True:
os._exit(0) ## exit immediately, do not pass GO, do not collect $200.
## (more importantly, do not call any code that would
## normally be invoked at exit)
else:
raise ClosedError()
def replyResult(self, reqId, result):
self.send(request='result', reqId=reqId, callSync='off', opts=dict(result=result))
def replyError(self, reqId, *exc):
print("error: %s %s %s" % (self.name, str(reqId), str(exc[1])))
excStr = traceback.format_exception(*exc)
try:
self.send(request='error', reqId=reqId, callSync='off', opts=dict(exception=exc[1], excString=excStr))
except:
self.send(request='error', reqId=reqId, callSync='off', opts=dict(exception=None, excString=excStr))
def send(self, request, opts=None, reqId=None, callSync='sync', timeout=10, returnType=None, byteData=None, **kwds):
"""Send a request or return packet to the remote process.
Generally it is not necessary to call this method directly; it is for internal use.
(The docstring has information that is nevertheless useful to the programmer
as it describes the internal protocol used to communicate between processes)
========== ====================================================================
Arguments:
request String describing the type of request being sent (see below)
reqId Integer uniquely linking a result back to the request that generated
it. (most requests leave this blank)
callSync 'sync': return the actual result of the request
'async': return a Request object which can be used to look up the
result later
'off': return no result
timeout Time in seconds to wait for a response when callSync=='sync'
opts Extra arguments sent to the remote process that determine the way
the request will be handled (see below)
returnType 'proxy', 'value', or 'auto'
byteData If specified, this is a list of objects to be sent as byte messages
to the remote process.
This is used to send large arrays without the cost of pickling.
========== ====================================================================
Description of request strings and options allowed for each:
============= ============= ========================================================
request option description
------------- ------------- --------------------------------------------------------
getObjAttr Request the remote process return (proxy to) an
attribute of an object.
obj reference to object whose attribute should be
returned
attr string name of attribute to return
returnValue bool or 'auto' indicating whether to return a proxy or
the actual value.
callObj Request the remote process call a function or
method. If a request ID is given, then the call's
return value will be sent back (or information
about the error that occurred while running the
function)
obj the (reference to) object to call
args tuple of arguments to pass to callable
kwds dict of keyword arguments to pass to callable
returnValue bool or 'auto' indicating whether to return a proxy or
the actual value.
getObjValue Request the remote process return the value of
a proxied object (must be picklable)
obj reference to object whose value should be returned
transfer Copy an object to the remote process and request
it return a proxy for the new object.
obj The object to transfer.
import Request the remote process import new symbols
and return proxy(ies) to the imported objects
module the string name of the module to import
fromlist optional list of string names to import from module
del Inform the remote process that a proxy has been
released (thus the remote process may be able to
release the original object)
proxyId id of proxy which is no longer referenced by
remote host
close Instruct the remote process to stop its event loop
and exit. Optionally, this request may return a
confirmation.
result Inform the remote process that its request has
been processed
result return value of a request
error Inform the remote process that its request failed
exception the Exception that was raised (or None if the
exception could not be pickled)
excString string-formatted version of the exception and
traceback
============= =====================================================================
"""
if self.exited:
self.debugMsg(' send: exited already; raise ClosedError.')
raise ClosedError()
with self.sendLock:
#if len(kwds) > 0:
#print "Warning: send() ignored args:", kwds
if opts is None:
opts = {}
assert callSync in ['off', 'sync', 'async'], 'callSync must be one of "off", "sync", or "async"'
if reqId is None:
if callSync != 'off': ## requested return value; use the next available request ID
reqId = self.nextRequestId
self.nextRequestId += 1
else:
## If requestId is provided, this _must_ be a response to a previously received request.
assert request in ['result', 'error']
if returnType is not None:
opts['returnType'] = returnType
#print os.getpid(), "send request:", request, reqId, opts
## double-pickle args to ensure that at least status and request ID get through
try:
optStr = pickle.dumps(opts)
except:
print("==== Error pickling this object: ====")
print(opts)
print("=======================================")
raise
nByteMsgs = 0
if byteData is not None:
nByteMsgs = len(byteData)
## Send primary request
request = (request, reqId, nByteMsgs, optStr)
self.debugMsg('send request: cmd=%s nByteMsgs=%d id=%s opts=%s' % (str(request[0]), nByteMsgs, str(reqId), str(opts)))
self.conn.send(request)
## follow up by sending byte messages
if byteData is not None:
for obj in byteData: ## Remote process _must_ be prepared to read the same number of byte messages!
self.conn.send_bytes(obj)
self.debugMsg(' sent %d byte messages' % len(byteData))
self.debugMsg(' call sync: %s' % callSync)
if callSync == 'off':
return
req = Request(self, reqId, description=str(request), timeout=timeout)
if callSync == 'async':
return req
if callSync == 'sync':
try:
return req.result()
except NoResultError:
return req
def close(self, callSync='off', noCleanup=False, **kwds):
try:
self.send(request='close', opts=dict(noCleanup=noCleanup), callSync=callSync, **kwds)
self.exited = True
except ClosedError:
pass
def getResult(self, reqId):
## raises NoResultError if the result is not available yet
#print self.results.keys(), os.getpid()
with self.resultLock:
haveResult = reqId in self.results
if not haveResult:
try:
self.processRequests()
except ClosedError: ## even if remote connection has closed, we may have
## received new data during this call to processRequests()
pass
with self.resultLock:
if reqId not in self.results:
raise NoResultError()
status, result = self.results.pop(reqId)
if status == 'result':
return result
elif status == 'error':
#print ''.join(result)
exc, excStr = result
if exc is not None:
print("===== Remote process raised exception on request: =====")
print(''.join(excStr))
print("===== Local Traceback to request follows: =====")
raise exc
else:
print(''.join(excStr))
raise Exception("Error getting result. See above for exception from remote process.")
else:
raise Exception("Internal error.")
def _import(self, mod, **kwds):
"""
Request the remote process import a module (or symbols from a module)
and return the proxied results. Uses built-in __import__() function, but
adds a bit more processing:
_import('module') => returns module
_import('module.submodule') => returns submodule
(note this differs from behavior of __import__)
_import('module', fromlist=[name1, name2, ...]) => returns [module.name1, module.name2, ...]
(this also differs from behavior of __import__)
"""
return self.send(request='import', callSync='sync', opts=dict(module=mod), **kwds)
def getObjAttr(self, obj, attr, **kwds):
return self.send(request='getObjAttr', opts=dict(obj=obj, attr=attr), **kwds)
def getObjValue(self, obj, **kwds):
return self.send(request='getObjValue', opts=dict(obj=obj), **kwds)
def callObj(self, obj, args, kwds, **opts):
opts = opts.copy()
args = list(args)
## Decide whether to send arguments by value or by proxy
with self.optsLock:
noProxyTypes = opts.pop('noProxyTypes', None)
if noProxyTypes is None:
noProxyTypes = self.proxyOptions['noProxyTypes']
autoProxy = opts.pop('autoProxy', self.proxyOptions['autoProxy'])
if autoProxy is True:
args = [self.autoProxy(v, noProxyTypes) for v in args]
for k, v in kwds.iteritems():
opts[k] = self.autoProxy(v, noProxyTypes)
byteMsgs = []
## If there are arrays in the arguments, send those as byte messages.
## We do this because pickling arrays is too expensive.
for i,arg in enumerate(args):
if arg.__class__ == np.ndarray:
args[i] = ("__byte_message__", len(byteMsgs), (arg.dtype, arg.shape))
byteMsgs.append(arg)
for k,v in kwds.items():
if v.__class__ == np.ndarray:
kwds[k] = ("__byte_message__", len(byteMsgs), (v.dtype, v.shape))
byteMsgs.append(v)
return self.send(request='callObj', opts=dict(obj=obj, args=args, kwds=kwds), byteData=byteMsgs, **opts)
def registerProxy(self, proxy):
with self.proxyLock:
ref = weakref.ref(proxy, self.deleteProxy)
self.proxies[ref] = proxy._proxyId
def deleteProxy(self, ref):
with self.proxyLock:
proxyId = self.proxies.pop(ref)
try:
self.send(request='del', opts=dict(proxyId=proxyId), callSync='off')
except IOError: ## if remote process has closed down, there is no need to send delete requests anymore
pass
def transfer(self, obj, **kwds):
"""
Transfer an object by value to the remote host (the object must be picklable)
and return a proxy for the new remote object.
"""
if obj.__class__ is np.ndarray:
opts = {'dtype': obj.dtype, 'shape': obj.shape}
return self.send(request='transferArray', opts=opts, byteData=[obj], **kwds)
else:
return self.send(request='transfer', opts=dict(obj=obj), **kwds)
def autoProxy(self, obj, noProxyTypes):
## Return object wrapped in LocalObjectProxy _unless_ its type is in noProxyTypes.
for typ in noProxyTypes:
if isinstance(obj, typ):
return obj
return LocalObjectProxy(obj)
class Request(object):
"""
Request objects are returned when calling an ObjectProxy in asynchronous mode
or if a synchronous call has timed out. Use hasResult() to ask whether
the result of the call has been returned yet. Use result() to get
the returned value.
"""
def __init__(self, process, reqId, description=None, timeout=10):
self.proc = process
self.description = description
self.reqId = reqId
self.gotResult = False
self._result = None
self.timeout = timeout
def result(self, block=True, timeout=None):
"""
Return the result for this request.
If block is True, wait until the result has arrived or *timeout* seconds passes.
If the timeout is reached, raise NoResultError. (use timeout=None to disable)
If block is False, raise NoResultError immediately if the result has not arrived yet.
If the process's connection has closed before the result arrives, raise ClosedError.
"""
if self.gotResult:
return self._result
if timeout is None:
timeout = self.timeout
if block:
start = time.time()
while not self.hasResult():
if self.proc.exited:
raise ClosedError()
time.sleep(0.005)
if timeout >= 0 and time.time() - start > timeout:
print("Request timed out: %s" % self.description)
import traceback
traceback.print_stack()
raise NoResultError()
return self._result
else:
self._result = self.proc.getResult(self.reqId) ## raises NoResultError if result is not available yet
self.gotResult = True
return self._result
def hasResult(self):
"""Returns True if the result for this request has arrived."""
try:
self.result(block=False)
except NoResultError:
pass
return self.gotResult
class LocalObjectProxy(object):
"""
Used for wrapping local objects to ensure that they are send by proxy to a remote host.
Note that 'proxy' is just a shorter alias for LocalObjectProxy.
For example::
data = [1,2,3,4,5]
remotePlot.plot(data) ## by default, lists are pickled and sent by value
remotePlot.plot(proxy(data)) ## force the object to be sent by proxy
"""
nextProxyId = 0
proxiedObjects = {} ## maps {proxyId: object}
@classmethod
def registerObject(cls, obj):
## assign it a unique ID so we can keep a reference to the local object
pid = cls.nextProxyId
cls.nextProxyId += 1
cls.proxiedObjects[pid] = obj
#print "register:", cls.proxiedObjects
return pid
@classmethod
def lookupProxyId(cls, pid):
return cls.proxiedObjects[pid]
@classmethod
def releaseProxyId(cls, pid):
del cls.proxiedObjects[pid]
#print "release:", cls.proxiedObjects
def __init__(self, obj, **opts):
"""
Create a 'local' proxy object that, when sent to a remote host,
will appear as a normal ObjectProxy to *obj*.
Any extra keyword arguments are passed to proxy._setProxyOptions()
on the remote side.
"""
self.processId = os.getpid()
#self.objectId = id(obj)
self.typeStr = repr(obj)
#self.handler = handler
self.obj = obj
self.opts = opts
def __reduce__(self):
## a proxy is being pickled and sent to a remote process.
## every time this happens, a new proxy will be generated in the remote process,
## so we keep a new ID so we can track when each is released.
pid = LocalObjectProxy.registerObject(self.obj)
return (unpickleObjectProxy, (self.processId, pid, self.typeStr, None, self.opts))
## alias
proxy = LocalObjectProxy
def unpickleObjectProxy(processId, proxyId, typeStr, attributes=None, opts=None):
if processId == os.getpid():
obj = LocalObjectProxy.lookupProxyId(proxyId)
if attributes is not None:
for attr in attributes:
obj = getattr(obj, attr)
return obj
else:
proxy = ObjectProxy(processId, proxyId=proxyId, typeStr=typeStr)
if opts is not None:
proxy._setProxyOptions(**opts)
return proxy
class ObjectProxy(object):
"""
Proxy to an object stored by the remote process. Proxies are created
by calling Process._import(), Process.transfer(), or by requesting/calling
attributes on existing proxy objects.
For the most part, this object can be used exactly as if it
were a local object::
rsys = proc._import('sys') # returns proxy to sys module on remote process
rsys.stdout # proxy to remote sys.stdout
rsys.stdout.write # proxy to remote sys.stdout.write
rsys.stdout.write('hello') # calls sys.stdout.write('hello') on remote machine
# and returns the result (None)
When calling a proxy to a remote function, the call can be made synchronous
(result of call is returned immediately), asynchronous (result is returned later),
or return can be disabled entirely::
ros = proc._import('os')
## synchronous call; result is returned immediately
pid = ros.getpid()
## asynchronous call
request = ros.getpid(_callSync='async')
while not request.hasResult():
time.sleep(0.01)
pid = request.result()
## disable return when we know it isn't needed
rsys.stdout.write('hello', _callSync='off')
Additionally, values returned from a remote function call are automatically
returned either by value (must be picklable) or by proxy.
This behavior can be forced::
rnp = proc._import('numpy')
arrProxy = rnp.array([1,2,3,4], _returnType='proxy')
arrValue = rnp.array([1,2,3,4], _returnType='value')
The default callSync and returnType behaviors (as well as others) can be set
for each proxy individually using ObjectProxy._setProxyOptions() or globally using
proc.setProxyOptions().
"""
def __init__(self, processId, proxyId, typeStr='', parent=None):
object.__init__(self)
## can't set attributes directly because setattr is overridden.
self.__dict__['_processId'] = processId
self.__dict__['_typeStr'] = typeStr
self.__dict__['_proxyId'] = proxyId
self.__dict__['_attributes'] = ()
## attributes that affect the behavior of the proxy.
## in all cases, a value of None causes the proxy to ask
## its parent event handler to make the decision
self.__dict__['_proxyOptions'] = {
'callSync': None, ## 'sync', 'async', None
'timeout': None, ## float, None
'returnType': None, ## 'proxy', 'value', 'auto', None
'deferGetattr': None, ## True, False, None
'noProxyTypes': None, ## list of types to send by value instead of by proxy
}
self.__dict__['_handler'] = RemoteEventHandler.getHandler(processId)
self.__dict__['_handler'].registerProxy(self) ## handler will watch proxy; inform remote process when the proxy is deleted.
def _setProxyOptions(self, **kwds):
"""
Change the behavior of this proxy. For all options, a value of None
will cause the proxy to instead use the default behavior defined
by its parent Process.
Options are:
============= =============================================================
callSync 'sync', 'async', 'off', or None.
If 'async', then calling methods will return a Request object
which can be used to inquire later about the result of the
method call.
If 'sync', then calling a method
will block until the remote process has returned its result
or the timeout has elapsed (in this case, a Request object
is returned instead).
If 'off', then the remote process is instructed _not_ to
reply and the method call will return None immediately.
returnType 'auto', 'proxy', 'value', or None.
If 'proxy', then the value returned when calling a method
will be a proxy to the object on the remote process.
If 'value', then attempt to pickle the returned object and
send it back.
If 'auto', then the decision is made by consulting the
'noProxyTypes' option.
autoProxy bool or None. If True, arguments to __call__ are
automatically converted to proxy unless their type is
listed in noProxyTypes (see below). If False, arguments
are left untouched. Use proxy(obj) to manually convert
arguments before sending.
timeout float or None. Length of time to wait during synchronous
requests before returning a Request object instead.
deferGetattr True, False, or None.
If False, all attribute requests will be sent to the remote
process immediately and will block until a response is
received (or timeout has elapsed).
If True, requesting an attribute from the proxy returns a
new proxy immediately. The remote process is _not_ contacted
to make this request. This is faster, but it is possible to
request an attribute that does not exist on the proxied
object. In this case, AttributeError will not be raised
until an attempt is made to look up the attribute on the
remote process.
noProxyTypes List of object types that should _not_ be proxied when
sent to the remote process.
============= =============================================================
"""
self._proxyOptions.update(kwds)
def _getValue(self):
"""
Return the value of the proxied object
(the remote object must be picklable)
"""
return self._handler.getObjValue(self)
def _getProxyOption(self, opt):
val = self._proxyOptions[opt]
if val is None:
return self._handler.getProxyOption(opt)
return val
def _getProxyOptions(self):
return dict([(k, self._getProxyOption(k)) for k in self._proxyOptions])
def __reduce__(self):
return (unpickleObjectProxy, (self._processId, self._proxyId, self._typeStr, self._attributes))
def __repr__(self):
#objRepr = self.__getattr__('__repr__')(callSync='value')
return "<ObjectProxy for process %d, object 0x%x: %s >" % (self._processId, self._proxyId, self._typeStr)
def __getattr__(self, attr, **kwds):
"""
Calls __getattr__ on the remote object and returns the attribute
by value or by proxy depending on the options set (see
ObjectProxy._setProxyOptions and RemoteEventHandler.setProxyOptions)
If the option 'deferGetattr' is True for this proxy, then a new proxy object
is returned _without_ asking the remote object whether the named attribute exists.
This can save time when making multiple chained attribute requests,
but may also defer a possible AttributeError until later, making
them more difficult to debug.
"""
opts = self._getProxyOptions()
for k in opts:
if '_'+k in kwds:
opts[k] = kwds.pop('_'+k)
if opts['deferGetattr'] is True:
return self._deferredAttr(attr)
else:
#opts = self._getProxyOptions()
return self._handler.getObjAttr(self, attr, **opts)
def _deferredAttr(self, attr):
return DeferredObjectProxy(self, attr)
def __call__(self, *args, **kwds):
"""
Attempts to call the proxied object from the remote process.
Accepts extra keyword arguments:
_callSync 'off', 'sync', or 'async'
_returnType 'value', 'proxy', or 'auto'
If the remote call raises an exception on the remote process,
it will be re-raised on the local process.
"""
opts = self._getProxyOptions()
for k in opts:
if '_'+k in kwds:
opts[k] = kwds.pop('_'+k)
return self._handler.callObj(obj=self, args=args, kwds=kwds, **opts)
## Explicitly proxy special methods. Is there a better way to do this??
def _getSpecialAttr(self, attr):
## this just gives us an easy way to change the behavior of the special methods
return self._deferredAttr(attr)
def __getitem__(self, *args):
return self._getSpecialAttr('__getitem__')(*args)
def __setitem__(self, *args):
return self._getSpecialAttr('__setitem__')(*args, _callSync='off')
def __setattr__(self, *args):
return self._getSpecialAttr('__setattr__')(*args, _callSync='off')
def __str__(self, *args):
return self._getSpecialAttr('__str__')(*args, _returnType='value')
def __len__(self, *args):
return self._getSpecialAttr('__len__')(*args)
def __add__(self, *args):
return self._getSpecialAttr('__add__')(*args)
def __sub__(self, *args):
return self._getSpecialAttr('__sub__')(*args)
def __div__(self, *args):
return self._getSpecialAttr('__div__')(*args)
def __truediv__(self, *args):
return self._getSpecialAttr('__truediv__')(*args)
def __floordiv__(self, *args):
return self._getSpecialAttr('__floordiv__')(*args)
def __mul__(self, *args):
return self._getSpecialAttr('__mul__')(*args)
def __pow__(self, *args):
return self._getSpecialAttr('__pow__')(*args)
def __iadd__(self, *args):
return self._getSpecialAttr('__iadd__')(*args, _callSync='off')
def __isub__(self, *args):
return self._getSpecialAttr('__isub__')(*args, _callSync='off')
def __idiv__(self, *args):
return self._getSpecialAttr('__idiv__')(*args, _callSync='off')
def __itruediv__(self, *args):
return self._getSpecialAttr('__itruediv__')(*args, _callSync='off')
def __ifloordiv__(self, *args):
return self._getSpecialAttr('__ifloordiv__')(*args, _callSync='off')
def __imul__(self, *args):
return self._getSpecialAttr('__imul__')(*args, _callSync='off')
def __ipow__(self, *args):
return self._getSpecialAttr('__ipow__')(*args, _callSync='off')
def __rshift__(self, *args):
return self._getSpecialAttr('__rshift__')(*args)
def __lshift__(self, *args):
return self._getSpecialAttr('__lshift__')(*args)
def __irshift__(self, *args):
return self._getSpecialAttr('__irshift__')(*args, _callSync='off')
def __ilshift__(self, *args):
return self._getSpecialAttr('__ilshift__')(*args, _callSync='off')
def __eq__(self, *args):
return self._getSpecialAttr('__eq__')(*args)
def __ne__(self, *args):
return self._getSpecialAttr('__ne__')(*args)
def __lt__(self, *args):
return self._getSpecialAttr('__lt__')(*args)
def __gt__(self, *args):
return self._getSpecialAttr('__gt__')(*args)
def __le__(self, *args):
return self._getSpecialAttr('__le__')(*args)
def __ge__(self, *args):
return self._getSpecialAttr('__ge__')(*args)
def __and__(self, *args):
return self._getSpecialAttr('__and__')(*args)
def __or__(self, *args):
return self._getSpecialAttr('__or__')(*args)
def __xor__(self, *args):
return self._getSpecialAttr('__xor__')(*args)
def __iand__(self, *args):
return self._getSpecialAttr('__iand__')(*args, _callSync='off')
def __ior__(self, *args):
return self._getSpecialAttr('__ior__')(*args, _callSync='off')
def __ixor__(self, *args):
return self._getSpecialAttr('__ixor__')(*args, _callSync='off')
def __mod__(self, *args):
return self._getSpecialAttr('__mod__')(*args)
def __radd__(self, *args):
return self._getSpecialAttr('__radd__')(*args)
def __rsub__(self, *args):
return self._getSpecialAttr('__rsub__')(*args)
def __rdiv__(self, *args):
return self._getSpecialAttr('__rdiv__')(*args)
def __rfloordiv__(self, *args):
return self._getSpecialAttr('__rfloordiv__')(*args)
def __rtruediv__(self, *args):
return self._getSpecialAttr('__rtruediv__')(*args)
def __rmul__(self, *args):
return self._getSpecialAttr('__rmul__')(*args)
def __rpow__(self, *args):
return self._getSpecialAttr('__rpow__')(*args)
def __rrshift__(self, *args):
return self._getSpecialAttr('__rrshift__')(*args)
def __rlshift__(self, *args):
return self._getSpecialAttr('__rlshift__')(*args)
def __rand__(self, *args):
return self._getSpecialAttr('__rand__')(*args)
def __ror__(self, *args):
return self._getSpecialAttr('__ror__')(*args)
def __rxor__(self, *args):
return self._getSpecialAttr('__ror__')(*args)
def __rmod__(self, *args):
return self._getSpecialAttr('__rmod__')(*args)
def __hash__(self):
## Required for python3 since __eq__ is defined.
return id(self)
class DeferredObjectProxy(ObjectProxy):
"""
This class represents an attribute (or sub-attribute) of a proxied object.
It is used to speed up attribute requests. Take the following scenario::
rsys = proc._import('sys')
rsys.stdout.write('hello')
For this simple example, a total of 4 synchronous requests are made to
the remote process:
1) import sys
2) getattr(sys, 'stdout')
3) getattr(stdout, 'write')
4) write('hello')
This takes a lot longer than running the equivalent code locally. To
speed things up, we can 'defer' the two attribute lookups so they are
only carried out when neccessary::
rsys = proc._import('sys')
rsys._setProxyOptions(deferGetattr=True)
rsys.stdout.write('hello')
This example only makes two requests to the remote process; the two
attribute lookups immediately return DeferredObjectProxy instances
immediately without contacting the remote process. When the call
to write() is made, all attribute requests are processed at the same time.
Note that if the attributes requested do not exist on the remote object,
making the call to write() will raise an AttributeError.
"""
def __init__(self, parentProxy, attribute):
## can't set attributes directly because setattr is overridden.
for k in ['_processId', '_typeStr', '_proxyId', '_handler']:
self.__dict__[k] = getattr(parentProxy, k)
self.__dict__['_parent'] = parentProxy ## make sure parent stays alive
self.__dict__['_attributes'] = parentProxy._attributes + (attribute,)
self.__dict__['_proxyOptions'] = parentProxy._proxyOptions.copy()
def __repr__(self):
return ObjectProxy.__repr__(self) + '.' + '.'.join(self._attributes)
def _undefer(self):
"""
Return a non-deferred ObjectProxy referencing the same object
"""
return self._parent.__getattr__(self._attributes[-1], _deferGetattr=False)
|
|
import os
import time
import numpy as np
import os.path as osp
from baselines import logger
from collections import deque
from baselines.common import explained_variance, set_global_seeds
from baselines.common.policies import build_policy
try:
from mpi4py import MPI
except ImportError:
MPI = None
from baselines.ppo2.runner import Runner
def constfn(val):
def f(_):
return val
return f
def learn(*, network, env, total_timesteps, eval_env = None, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
save_interval=0, load_path=None, model_fn=None, **network_kwargs):
'''
Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)
Parameters:
----------
network: policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
See common/models.py/lstm for more details on using recurrent nets in policies
env: baselines.common.vec_env.VecEnv environment. Needs to be vectorized for parallel environment simulation.
The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.
nsteps: int number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
total_timesteps: int number of timesteps (i.e. number of actions taken in the environment)
ent_coef: float policy entropy coefficient in the optimization objective
lr: float or function learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the
training and 0 is the end of the training.
vf_coef: float value function loss coefficient in the optimization objective
max_grad_norm: float or None gradient norm clipping coefficient
gamma: float discounting factor
lam: float advantage estimation discounting factor (lambda in the paper)
log_interval: int number of timesteps between logging events
nminibatches: int number of training minibatches per update. For recurrent policies,
should be smaller or equal than number of environments run in parallel.
noptepochs: int number of training epochs per update
cliprange: float or function clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training
and 0 is the end of the training
save_interval: int number of timesteps between saving events
load_path: str path to load the model from
**network_kwargs: keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
set_global_seeds(seed)
if isinstance(lr, float): lr = constfn(lr)
else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
total_timesteps = int(total_timesteps)
policy = build_policy(env, network, **network_kwargs)
# Get the nb of env
nenvs = env.num_envs
# Get state_space and action_space
ob_space = env.observation_space
ac_space = env.action_space
# Calculate the batch_size
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
# Instantiate the model object (that creates act_model and train_model)
if model_fn is None:
from baselines.ppo2.model import Model
model_fn = Model
model = model_fn(policy=policy, ob_space=ob_space, ac_space=ac_space, nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm)
if load_path is not None:
model.load(load_path)
# Instantiate the runner object
runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
if eval_env is not None:
eval_runner = Runner(env = eval_env, model = model, nsteps = nsteps, gamma = gamma, lam= lam)
epinfobuf = deque(maxlen=100)
if eval_env is not None:
eval_epinfobuf = deque(maxlen=100)
# Start total timer
tfirststart = time.time()
nupdates = total_timesteps//nbatch
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
# Start timer
tstart = time.time()
frac = 1.0 - (update - 1.0) / nupdates
# Calculate the learning rate
lrnow = lr(frac)
# Calculate the cliprange
cliprangenow = cliprange(frac)
# Get minibatch
obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run() #pylint: disable=E0632
if eval_env is not None:
eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run() #pylint: disable=E0632
epinfobuf.extend(epinfos)
if eval_env is not None:
eval_epinfobuf.extend(eval_epinfos)
# Here what we're going to do is for each minibatch calculate the loss and append it.
mblossvals = []
if states is None: # nonrecurrent version
# Index of each element of batch_size
# Create the indices array
inds = np.arange(nbatch)
for _ in range(noptepochs):
# Randomize the indexes
np.random.shuffle(inds)
# 0 to batch_size with batch_train_size step
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mblossvals.append(model.train(lrnow, cliprangenow, *slices))
else: # recurrent version
assert nenvs % nminibatches == 0
envsperbatch = nenvs // nminibatches
envinds = np.arange(nenvs)
flatinds = np.arange(nenvs * nsteps).reshape(nenvs, nsteps)
envsperbatch = nbatch_train // nsteps
for _ in range(noptepochs):
np.random.shuffle(envinds)
for start in range(0, nenvs, envsperbatch):
end = start + envsperbatch
mbenvinds = envinds[start:end]
mbflatinds = flatinds[mbenvinds].ravel()
slices = (arr[mbflatinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mbstates = states[mbenvinds]
mblossvals.append(model.train(lrnow, cliprangenow, *slices, mbstates))
# Feedforward --> get losses --> update
lossvals = np.mean(mblossvals, axis=0)
# End timer
tnow = time.time()
# Calculate the fps (frame per second)
fps = int(nbatch / (tnow - tstart))
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, returns)
logger.logkv("serial_timesteps", update*nsteps)
logger.logkv("nupdates", update)
logger.logkv("total_timesteps", update*nbatch)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
if eval_env is not None:
logger.logkv('eval_eprewmean', safemean([epinfo['r'] for epinfo in eval_epinfobuf]) )
logger.logkv('eval_eplenmean', safemean([epinfo['l'] for epinfo in eval_epinfobuf]) )
logger.logkv('time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model.loss_names):
logger.logkv(lossname, lossval)
if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
logger.dumpkvs()
if save_interval and (update % save_interval == 0 or update == 1) and logger.get_dir() and (MPI is None or MPI.COMM_WORLD.Get_rank() == 0):
checkdir = osp.join(logger.get_dir(), 'checkpoints')
os.makedirs(checkdir, exist_ok=True)
savepath = osp.join(checkdir, '%.5i'%update)
print('Saving to', savepath)
model.save(savepath)
try:
env.save(savepath)
except AttributeError:
pass
return model
# Avoid division error when calculate the mean (in our case if epinfo is empty returns np.nan, not return an error)
def safemean(xs):
return np.nan if len(xs) == 0 else np.mean(xs)
|
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Generator of histology report
"""
import logging
logger = logging.getLogger(__name__)
import sys
import os.path
path_to_script = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(path_to_script, "../extern/dicom2fem/src"))
import argparse
import numpy as np
import scipy.ndimage
import sys
if sys.version_info.major == 3:
xrange = range
from . import tree
# import datareader
# from . import tb_vtk
# from datetime import datetime
# import collections
UNDERDEBUG = 8
class TBVolume(tree.TubeSkeletonBuilder):
"""
This generator is called by generateTree() function as a general form.
Other similar generator is used for generating LAR outputs.
"""
def __init__(self,
**kwargs
):
# super(tree.FiberSkeletBuilder, self).__init__()
tree.TubeSkeletonBuilder.__init__(self)
self.init(**kwargs)
def init(self, tube_skeleton=None, shape=None, voxelsize_mm=None,
background_intensity=20, dtype=np.int, intensity_profile=None):
self.tube_skeleton = tube_skeleton
if shape is None:
shape = [100, 100, 100]
if voxelsize_mm is None:
voxelsize_mm = [1., 1., 1.]
self.shape = np.asarray(shape, dtype=np.int)
self.data3d = (np.ones(shape, dtype=dtype) * background_intensity).astype(dtype=dtype)
self.voxelsize_mm = voxelsize_mm
logger.debug(f"volume shape: {self.shape} vxsz: {self.voxelsize_mm}")
if intensity_profile is not None:
self.intensity_profile = intensity_profile
else:
# self.intensity_profile = {1:200, 0.6: 100}
self.intensity_profile = {1: 200}
logger.debug(f"intensity profile: {self.intensity_profile}")
# self.intensity_profile = incollections.OrderedDict(sorted(intensity_profile, reverse=True))
self._cylinders_params = []
self._temp_intensity = 10
self.finish_progress_callback = None
# self.output_intensity = 200
def add_cylinder(self, p1m, p2m, rad, id):
"""
Funkce na vykresleni jednoho segmentu do 3D dat
"""
self._cylinders_params.append([p1m, p2m, rad, id])
def _add_cylinder(self, p1m, p2m, rad, id):
logger.debug(f"volume add cylinder {p1m}, {p2m}, {rad}")
cyl_data3d = np.ones(self.shape, dtype=np.bool)
# prvni a koncovy bod, ve pixelech
p1 = [p1m[0] / self.voxelsize_mm[0], p1m[1] /
self.voxelsize_mm[1], p1m[2] / self.voxelsize_mm[2]]
p2 = [p2m[0] / self.voxelsize_mm[0], p2m[1] /
self.voxelsize_mm[1], p2m[2] / self.voxelsize_mm[2]]
logger.debug(
"p1_px: " + str(p1[0]) + " " + str(p1[1]) + " " + str(p1[2]))
logger.debug(
"p2_px: " + str(p2[0]) + " " + str(p2[1]) + " " + str(p2[2]))
logger.debug( "radius_mm:" + str(rad))
# vzdalenosti mezi prvnim a koncovim bodem (pro jednotlive osy)
pdiff = [abs(p1[0] - p2[0]), abs(p1[1] - p2[1]), abs(p1[2] - p2[2])]
# generovani hodnot pro osu segmentu
num_points = max(pdiff) * \
2 # na jeden "pixel nejdelsi osy" je 2 bodu primky (shannon)
zvalues = np.linspace(p1[0], p2[0], num_points)
yvalues = np.linspace(p1[1], p2[1], num_points)
xvalues = np.linspace(p1[2], p2[2], num_points)
# drawing a line
no_index_error_occured = True
for i in range(0, len(xvalues)):
# TODO make something with indexes out of requested area
try:
cyl_data3d[int(zvalues[i])][int(yvalues[i])][int(xvalues[i])] = 0
except IndexError:
if no_index_error_occured:
import traceback
traceback.print_exc()
logger.warning("Cylinder drawing out of bounds. Other same type warnings are suppressed.")
no_index_error_occured = False
except:
import traceback
traceback.print_exc()
logger.debug("except in drawing line")
logger.warning("Cylinder drawing problem")
# import ipdb; ipdb.set_trace() # noqa BREAKPOINT
# cuting size of 3d space needed for calculating distances (smaller ==
# a lot faster)
cut_up = max(
0, round(min(p1[0], p2[0]) - (rad / min(self.voxelsize_mm)) - 2))
# ta 2 je kuli tomu abyh omylem nurizl
cut_down = min(self.shape[0], round(
max(p1[0], p2[0]) + (rad / min(self.voxelsize_mm)) + 2))
cut_yu = max(
0, round(min(p1[1], p2[1]) - (rad / min(self.voxelsize_mm)) - 2))
cut_yd = min(self.shape[1], round(
max(p1[1], p2[1]) + (rad / min(self.voxelsize_mm)) + 2))
cut_xl = max(
0, round(min(p1[2], p2[2]) - (rad / min(self.voxelsize_mm)) - 2))
cut_xr = min(self.shape[2], round(
max(p1[2], p2[2]) + (rad / min(self.voxelsize_mm)) + 2))
logger.debug("cutter_px: z_up-" + str(cut_up) + " z_down-" + str(cut_down) + " y_up-" + str(
cut_yu) + " y_down-" + str(cut_yd) + " x_left-" + str(cut_xl) + " x_right-" + str(cut_xr))
cyl_data3d_cut = cyl_data3d[
int(cut_up):int(cut_down),
int(cut_yu):int(cut_yd),
int(cut_xl):int(cut_xr)]
# calculating distances
# spotrebovava naprostou vetsinu casu (pro 200^3 je to kolem 1.2
# sekundy, proto jsou data osekana)
lineDst = scipy.ndimage.distance_transform_edt(
cyl_data3d_cut, self.voxelsize_mm)
# zkopirovani vyrezu zpet do celeho rozsahu dat
for z in xrange(0, len(cyl_data3d_cut)):
for y in xrange(0, len(cyl_data3d_cut[z])):
for x in xrange(0, len(cyl_data3d_cut[z][y])):
if lineDst[z][y][x] <= rad:
iX = int(z + cut_up)
iY = int(y + cut_yu)
iZ = int(x + cut_xl)
self.data3d[iX][iY][iZ] = self._temp_intensity
def get_output(self):
return self.data3d
def finish(self):
"""
:param self.finish_progress_callback(self, progress): function with iprogress parameter from 0.0 to 1.0
:return:
"""
progress_step = 1.0 / (len(self.intensity_profile) * len(self._cylinders_params))
progress = 0.0
for radk in sorted(self.intensity_profile, reverse=True):
radk_intensity = self.intensity_profile[radk]
for cyl in self._cylinders_params:
self._add_cylinder(cyl[0], cyl[1], cyl[2] * radk, cyl[3])
if self.finish_progress_callback is not None:
self.finish_progress_callback(self, progress)
progress += progress_step
self.data3d[self.data3d == self._temp_intensity] = radk_intensity
# import ipdb; ipdb.set_trace()
if self.finish_progress_callback is not None:
self.finish_progress_callback(self, 1.0)
def save(self, outputfile, filetype='pklz'):
import io3d
import io3d.misc
import numpy as np
data = {
'data3d': self.data3d.astype(np.uint8), # * self.output_intensity,
'voxelsize_mm': self.voxelsize_mm,
# 'segmentation': np.zeros_like(self.data3d, dtype=np.int8)
}
# data3d = np.zeros([10,10,10])
# segmentation = np.zeros([10,10,10])
#
# data3d [2:7,:3:5, :6] = 100
# datap = {
# "data3d": data3d,
# "segmentation": segmentation,
# "voxelsize_mm": [1,1,1]
# }
# io3d.write(datap, "file1.pklz")
# import ipdb; ipdb.set_trace()
io3d.write(data, outputfile)
# io3d.misc.obj_to_file(data, outputfile, filetype=filetype)
# dw = datawriter.DataWriter()
# dw.Write3DData(self.data3d, outputfile, filetype)
def show(self):
import sed3 as se
pyed = se.sed3(self.data3d)
pyed.show()
|
|
#!/usr/bin/env python3
__version__ = "0.1"
__description__ = "A simple DNS lookup utility, similar to 'dig'"
# Basic usage examples
# iterative: python3 dns-query.py wordpress.com -s 192.228.79.201 -q A -nr
# recursive: python3 dns-query.py wordpress.com -q A -r
import argparse
import socket
import sys
import struct
import random
from enum import Enum, unique
from ipaddress import IPv4Address, IPv6Address
def exit_error(msg):
print("ERROR: " + msg)
sys.exit(1)
def is_ip_valid(ip, family=socket.AF_INET):
try:
socket.inet_pton(family, ip)
return True
except socket.error:
return False
def bit_get(number, i):
""" Get the n bit of the number """
return (number >> i) & 1
# for qtype and type for messages
# https://tools.ietf.org/html/rfc1035#section-3.2.2
@unique
class RecordType(Enum):
A = 1 # ipv4 address record
NS = 2 # name server record
CNAME = 5 # canonical name record
SOA = 6 # start of zone authority record
MX = 15 # mail exchange record
AAAA = 28 # ipv6 address record
@unique # https://tools.ietf.org/html/rfc1035#section-3.2.4
class ClassType(Enum):
IN = 1 # the internet
# the other are not used
NONE = 254
ANY = 255
@unique # for qr
class QueryType(Enum):
QUESTION = 0
RESPONSE = 1
@unique # for opcode
class OperationCode(Enum):
QUERY = 0 # standard query
IQUERY = 1 # inverse query, obsolete
STATUS = 2 # server status request
# 3 is reserved, not used
NOTIFY = 4 # used by primary servers to tell secondary servers that a zone has changed
UPDATE = 5 # used to implement dynamic DNS
@unique # for rcode
class ResponseCode(Enum):
NO_ERROR = 0 # no error occurred
FORMAT_ERROR = 1 # The name server was unable to interpret the query
SERVER_FAILURE = 2 # The name server was unable to process this query due to a problem with the name server
NAME_ERROR = 3 # only for authoritative name server, signifies that the domain name referenced in the query does not exist
NOT_IMPLEMENTED = 4 # The name server does not support the requested kind of query
REFUSED = 5 # The name server refuses to perform the specified operation for policy reasons.
# + others not used for this app
class DNSQuestion:
# See https://tools.ietf.org/html/rfc1035#section-4.1.2
header_format = struct.Struct("!2H") # after the QNAME
def __init__(self, qname="", qtype=RecordType.A, qclass=ClassType.IN):
self._qtype = None
self._qclass = None
self.qname = qname # variable up to 255 bytes
self.qtype = qtype # 2 bytes, see RecordType
self.qclass = qclass # 2 bytes
@property
def qtype(self):
return self._qtype
@qtype.setter
def qtype(self, value):
self._qtype = RecordType(value)
@property
def qclass(self):
return self._qclass
@qclass.setter
def qclass(self, value):
self._qclass = ClassType(value)
def to_bytes(self):
"""
Pack a question
:return: bytes
"""
return DNSMessage.name_to_bytes(self.qname) + DNSQuestion.header_format.pack(self.qtype.value, self.qclass.value)
@staticmethod
def from_bytes(message_bytes, offset):
"""
Unpack a question from packed bytes
:param message_bytes: bytes
:param offset: int
:return: tuple(DNSQuestion, int)
"""
qname, offset = DNSMessage.name_from_bytes(message_bytes, offset)
qtype, qclass = DNSQuestion.header_format.unpack_from(message_bytes, offset)
return DNSQuestion(qname, qtype, qclass), offset + DNSQuestion.header_format.size
def to_str(self):
return "%s%s%s%s%s" % (self.qname, "\t" * 4, self.qclass.name, "\t", self.qtype.name)
def __repr__(self):
return "<DNSQuestion qname=%s, qtype=%s, qclass=%s>" % (self.qname, self.qtype.name, self.qclass.name)
class RDataMX:
# See https://tools.ietf.org/html/rfc1035#section-3.3.9
header_format = struct.Struct("!H")
def __init__(self, preference=0, exchange=""):
self.preference = preference # 2 bytes
self.exchange = exchange # variable length
@staticmethod
def from_bytes(message_bytes, offset):
"""
Unpack a rdata for a MX type
:param message_bytes: bytes
:param offset: int
:return: tuple(RDataMX, int)
"""
preference, = RDataMX.header_format.unpack_from(message_bytes, offset)
offset += RDataMX.header_format.size
exchange, offset = DNSMessage.name_from_bytes(message_bytes, offset)
return RDataMX(preference, exchange), offset
def __repr__(self):
return "<RDataMX preference=%d, exchange=%s>" % (self.preference, self.exchange)
class RDataSOA:
# See https://tools.ietf.org/html/rfc1035#section-3.3.13
header_format = struct.Struct("!LLLLL")
def __init__(self, mname="", rname="", serial=0, refresh=0, retry=0, expire=0, minimum=0):
self.mname = mname # variable length
self.rname = rname # variable length
self.serial = serial # 4 bytes
self.refresh = refresh # 4 bytes
self.retry = retry # 4 bytes
self.expire = expire # 4 bytes
self.minimum = minimum # 4 bytes
@staticmethod
def from_bytes(message_bytes, offset):
"""
Unpack a rdata for a SOA type
:param message_bytes: bytes
:param offset: int
:return: tuple(RDataSOA, int)
"""
mname, offset = DNSMessage.name_from_bytes(message_bytes, offset)
rname, offset = DNSMessage.name_from_bytes(message_bytes, offset)
serial, refresh, retry, expire, minimum = RDataSOA.header_format.unpack_from(message_bytes, offset)
return RDataSOA(mname, rname, serial, refresh, retry, expire, minimum), offset + RDataSOA.header_format.size
def __repr__(self):
return "<RDataSOA mname=%s, rname=%s, serial=%d, refresh=%d, retry=%d, expire=%d, minimum=%d>" % \
(self.mname, self.rname, self.serial, self.refresh, self.retry, self.expire, self.minimum)
class DNSResponse:
# See https://tools.ietf.org/html/rfc1035#section-3.2.1
header_format = struct.Struct("!2HLH") # after the NAME
def __init__(self):
self.name = "" # variable up to 255 bytes
self._type = None # 2 bytes, see RecordType, specifies meaning of the RDATA
self._classd = None # 2 bytes
self.ttl = 0 # 4 bytes
self.rdlength = 0 # 2 bytes
self.rdata = None # variable
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = RecordType(value)
@property
def classd(self):
return self._classd
@classd.setter
def classd(self, value):
self._classd = ClassType(value)
@staticmethod
def from_bytes(message_bytes, offset):
"""
Unpack a response
:param message_bytes: bytes
:param offset: int
:return: tuple(DNSResponse, int)
"""
response = DNSResponse()
response.name, offset = DNSMessage.name_from_bytes(message_bytes, offset)
response.type, response.classd, response.ttl, response.rdlength = \
DNSResponse.header_format.unpack_from(message_bytes, offset)
offset += DNSResponse.header_format.size
if response.type is RecordType.A: # https://tools.ietf.org/html/rfc1035#section-3.4.1
if response.rdlength != 4:
exit_error("Length for record type A is not 4")
response.rdata = str(IPv4Address(message_bytes[offset:offset + response.rdlength]))
offset += response.rdlength
elif response.type is RecordType.AAAA:
if response.rdlength != 16:
exit_error("Length for record type AAAA is not 16")
response.rdata = str(IPv6Address(message_bytes[offset:offset + response.rdlength]))
offset += response.rdlength
elif response.type is RecordType.NS or response.type is RecordType.CNAME:
# https://tools.ietf.org/html/rfc1035#section-3.3.11 or https://tools.ietf.org/html/rfc1035#section-3.3.1
response.rdata, offset = DNSMessage.name_from_bytes(message_bytes, offset)
elif response.type is RecordType.MX:
response.rdata, offset = RDataMX.from_bytes(message_bytes, offset)
elif response.type is RecordType.SOA:
response.rdata, offset = RDataSOA.from_bytes(message_bytes, offset)
else:
print("TODO: " + response.type.name)
return response, offset
def to_str(self):
base_return = "%s%s%d%s%s%s%s" % (self.name, "\t" * 3, self.ttl, "\t", self.classd.name, "\t", self.type.name)
if self.type is RecordType.MX:
base_return += "%s%d%s%s" % ("\t", self.rdata.preference, "\t\t", self.rdata.exchange)
elif self.type is RecordType.SOA:
base_return += "\tmname=%s, rname=%s, serial=%d, refresh=%d,\n%s retry=%d, expire=%d, minimum=%d" % \
(self.rdata.mname, self.rdata.rname, self.rdata.serial, self.rdata.refresh, "\t" * 7,
self.rdata.retry, self.rdata.expire, self.rdata.minimum)
else:
base_return += "%s%s" % ("\t", self.rdata)
return base_return
def __repr__(self):
return "<DNSResponse name=%s, type=%s, class=%s, ttl=%d, rdlength=%d, rdata=%s>" % \
(self.name, self.type.name, self.classd.name, self.ttl, self.rdlength, self.rdata)
class DNSMessage:
# All bytes are unsigned
# https://tools.ietf.org/html/rfc1035#section-4.1.1
header_format = struct.Struct("!6H")
def __init__(self, identifier=random.getrandbits(16), is_query=True, recursive=True):
self._qr = None
self._opcode = None
self._rcode = None
# HEADER
# 2 bytes
self.identifier = identifier
# 1 byte
self.qr = QueryType.QUESTION if is_query else QueryType.RESPONSE # 1 bit, query or response
self.opcode = OperationCode.QUERY # 4 bits, the kind of query message
self.aa = 0 # 1 bit, authoritative answer
self.tc = 0 # 1 bit, truncation, message not truncated
self.rd = 1 if recursive else 0 # 1 bit, recursion desired
# 1 byte
self.ra = 0 # 1 bit, recursion available
self.z = 0 # 1 bit, reserved for future use
self.ad = 0 # 1 bit, authentication data
self.cd = 0 # 1 bit, checking disabled
self.rcode = ResponseCode.NO_ERROR # 4 bit, response code
self.question_count = 0 # 2 bytes, question count in the question section
self.answer_count = 0 # 2 bytes, answer count in the answer section
self.authority_count = 0 # 2 bytes, names servers count in the authoritative section
self.additional_count = 0 # 2 bytes, additional count in the additional section
# BODY
self.questions = []
self.answers = []
self.authority = []
self.additional = []
@property
def qr(self):
return self._qr
@qr.setter
def qr(self, value):
self._qr = QueryType(value)
@property
def opcode(self):
return self._opcode
@opcode.setter
def opcode(self, value):
self._opcode = OperationCode(value)
@property
def rcode(self):
return self._rcode
@rcode.setter
def rcode(self, value):
self._rcode = ResponseCode(value)
def add_question(self, question_name, question_type=RecordType.A, question_class=ClassType.IN):
self.questions.append(DNSQuestion(question_name, question_type, question_class))
self.question_count += 1
return self
@staticmethod
def name_to_bytes(name):
"""
Convert a name to a packed bytes form
See: https://tools.ietf.org/html/rfc1035#section-3.1 and https://tools.ietf.org/html/rfc1035#section-2.3.4
:param name: a python string usually representing a web address
:return: bytes
"""
name_packed = bytes()
length_total = 0
for part in [part.encode("ascii") for part in name.split(".")]:
length_part = len(part)
length_total += length_part + 1 # 1 byte for the length
if length_part > 63:
exit_error("Labels can be maximum of 63 bytes")
name_packed += struct.pack("!B%ds" % length_part, length_part, part)
name_packed += struct.pack("!B", 0) # terminate name with 0
if length_total > 255:
exit_error("Names can not be more than 255 bytes")
return name_packed
@staticmethod
def name_from_bytes(message_bytes, offset):
"""
Convert from packed bytes to a readable string
See: https://tools.ietf.org/html/rfc1035#section-4.1.4
:param message_bytes: bytes
:param offset: int
:return: tuple(str, int)
"""
labels = []
length_total = len(message_bytes)
while True:
length, = struct.unpack_from("!B", message_bytes, offset)
# is pointer, has 11 prefix
if (length & 0xC0) == 0xC0:
# construct pointer, ignore the first 2 bits
pointer_raw, = struct.unpack_from("!H", message_bytes, offset)
pointer = pointer_raw & 0x3FFF # 0b0011111111111111
offset += 2
if pointer >= length_total:
exit_error("DNS name pointer out of packet range")
# find pointer, return the offset from this point, we do not care where the pointer is
prefix = ".".join(labels)
if prefix: # does have labels before, so we can add a dot, example ns3<dot here>google.com
prefix += "."
return prefix + DNSMessage.name_from_bytes(message_bytes, pointer)[0], offset
# normal string
offset += 1
if not length: # found end of string
return ".".join(labels), offset
label, = struct.unpack_from("!%ds" % length, message_bytes, offset)
labels.append(label.decode("ascii"))
offset += length
def to_bytes(self):
""":return: bytes"""
# set flags, use 0xF which is 1111 as a bit mask
flags = (self.qr.value << 15) | ((self.opcode.value & 0xF) << 11) | (self.aa << 10) | (self.tc << 9) | (self.rd << 8) | \
(self.ra << 7) | (self.z << 6) | (self.ad << 5) | (self.cd << 4) | self.rcode.value
# set header
data = self.header_format.pack(self.identifier, flags, self.question_count, self.additional_count,
self.authority_count, self.additional_count)
# set questions
for q in self.questions:
data += q.to_bytes()
return data
@staticmethod
def from_bytes(message_bytes):
"""
:param message_bytes: bytes
:return: DNSMessage
"""
if not isinstance(message_bytes, bytes):
raise Exception("Instance is not of type bytes")
offset, message = 0, DNSMessage()
# set header
message.id, flags, message.question_count, message.answer_count, message.authority_count, message.additional_count = \
message.header_format.unpack(message_bytes[offset:message.header_format.size])
message.qr, message.opcode, message.aa = bit_get(flags, 15), (flags & (0xF << 11)) >> 11, bit_get(flags, 10)
message.tc, message.rd, message.ra = bit_get(flags, 9), bit_get(flags, 8), bit_get(flags, 7)
message.z, message.ad, message.cd, message.rcode = bit_get(flags, 6), bit_get(flags, 5), bit_get(flags, 4), flags & 0xF
offset += message.header_format.size
# set questions
for _ in range(message.question_count):
q, offset = DNSQuestion.from_bytes(message_bytes, offset)
message.questions.append(q)
# set answers
for _ in range(message.answer_count):
response, offset = DNSResponse.from_bytes(message_bytes, offset)
message.answers.append(response)
# set authority
for _ in range(message.authority_count):
response, offset = DNSResponse.from_bytes(message_bytes, offset)
message.authority.append(response)
# set additional
for _ in range(message.additional_count):
response, offset = DNSResponse.from_bytes(message_bytes, offset)
message.additional.append(response)
return message
def header_to_str(self):
return "id = %d\n" \
"QR = %s, OPCODE = %s, AA = %d, TC = %d, RD = %d, RA = %d, Z = %d, AD = %d, CD = %d, RCODE = %s\n" \
"questions = %d, answers = %d\n" \
"authority = %d, additional = %d\n" \
% (self.identifier, self.qr.name, self.opcode.name, self.aa, self.tc, self.rd, self.ra, self.z, self.ad, self.cd,
self.rcode.name, self.question_count, self.answer_count, self.authority_count, self.additional_count)
def to_str(self):
str_repr = ["<<< HEADER >>>\n" + self.header_to_str()]
# add each section to a string
def add_to_str(count, list_count, section_name, add_header=True):
if not count:
return
assert count == len(list_count)
str_repr.append("\n<<< %s SECTION >>>" % section_name)
if add_header:
base_return = "Host%sTTL" % ("\t" * 4)
t = list_count[0].type
if t is RecordType.MX:
base_return += "%sPreference%sExchange" % ("\t" * 3, "\t")
str_repr.append(base_return)
for e in list_count:
str_repr.append(e.to_str())
add_to_str(self.question_count, self.questions, "QUESTION", False)
add_to_str(self.answer_count, self.answers, "ANSWER")
add_to_str(self.authority_count, self.authority, "AUTHORITY")
add_to_str(self.additional_count, self.additional, "ADDITIONAL")
return "\n".join(str_repr) + "\n"
if __name__ == "__main__":
supported_questions = RecordType._member_map_
cmd_parser = argparse.ArgumentParser(description="Simple DNS query app")
cmd_parser.add_argument("hostname", help="Hostname too look up")
cmd_parser.add_argument("--server", "-s", default="8.8.8.8", help="DNS server ip")
cmd_parser.add_argument("--port", "-p", default=53, type=int, help="DNS server port")
cmd_parser.add_argument("--recursive", "-r", dest="is_recursive", action="store_true", help="Use recursive querying, default")
cmd_parser.add_argument("--no-recursive", "-nr", dest="is_recursive", action="store_false",
help="Do not use recursive querying, use iterative")
cmd_parser.add_argument("--question", "-q", default="A", type=str,
help="The type of record returned. Supported are " + ", ".join(supported_questions.keys()))
cmd_parser.set_defaults(is_recursive=True)
args = cmd_parser.parse_args()
# set and validate
hostname = args.hostname
if not is_ip_valid(args.server):
exit_error("DNS server ip is not valid")
if args.question not in supported_questions:
exit_error("Question type is not supported. Supported questions are: " + ", ".join(supported_questions.keys()))
# create udp socket
S = None
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
except socket.error:
exit_error("Failed to create socket")
# find name server ip in additional section
def find_ns_ip(ns, additional):
for a in additional:
if (a.type is RecordType.A or a.type is RecordType.AAAA) and a.name == ns:
return a.rdata
return None
def send_dns_query(server_ip, server_port, question, is_recursive):
delimiter = "-" * 40
print("DNS server = %s:%d\nLooking up: %s" % (server_ip, server_port, hostname), end='\n' * 2)
try:
# send
send_message = DNSMessage(recursive=is_recursive).add_question(hostname, supported_questions[question])
send_data = send_message.to_bytes()
s.sendto(send_data, (server_ip, server_port))
print(delimiter + " SEND " + delimiter)
print(send_message.to_str())
# receive
receive_data, receive_address = s.recvfrom(1024)
receive_message = DNSMessage.from_bytes(receive_data)
print(delimiter + " RESPONSE " + delimiter)
print(receive_message.to_str())
if receive_message.tc:
exit_error("Value is truncated, use TCP")
# use iterative because the response is not recursive or we desire it to be iterative
if (not is_recursive or not receive_message.ra) and not receive_message.answers:
if not receive_message.authority and not receive_message.additional:
exit_error("Can not make iterative querying because there is no data in authority or additional")
print("\n".join(["-" * 150] * 2))
# loop over authority section and try to find suitable dns server with response
for r in receive_message.authority:
if r.type is RecordType.NS:
found_ip = find_ns_ip(r.rdata, receive_message.additional)
if found_ip is not None:
if send_dns_query(found_ip, 53, question, False):
return True
return False
return True
except socket.error:
exit_error("Failed to send message socket")
# first level
send_dns_query(args.server, args.port, args.question, args.is_recursive)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Cluster Resolvers for Cloud TPUs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import re
from six.moves import urllib
from six.moves.urllib.error import URLError
from six.moves.urllib.request import Request
from six.moves.urllib.request import urlopen
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import ClusterResolver
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import format_master_url
from tensorflow.python.distribute.cluster_resolver.cluster_resolver import get_accelerator_devices
from tensorflow.python.framework import errors
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
_GOOGLE_API_CLIENT_INSTALLED = True
try:
from googleapiclient import discovery # pylint: disable=g-import-not-at-top
from oauth2client.client import GoogleCredentials # pylint: disable=g-import-not-at-top
except ImportError:
_GOOGLE_API_CLIENT_INSTALLED = False
_GKE_ENV_VARIABLE = 'KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS'
_ENDPOINTS_SEPARATOR = ','
_DEFAULT_ENV_VARIABLE = 'TPU_NAME'
_DISCOVERY_SERVICE_URL_ENV_VARIABLE = 'TPU_API_DISCOVERY_URL'
_TPU_DEVICE_REGEX = re.compile(
r'.*task:(?P<host_id>\d+)/.*device:TPU:(?P<core_id>\d+)$')
_TPU_CONN_RETRIES = 120
DeviceDetails = collections.namedtuple(
'DeviceDetails', ['device_map', 'total_cores'])
@tf_export('distribute.cluster_resolver.TPUClusterResolver')
class TPUClusterResolver(ClusterResolver):
"""Cluster Resolver for Google Cloud TPUs.
This is an implementation of cluster resolvers for the Google Cloud TPU
service. As Cloud TPUs are in alpha, you will need to specify a API definition
file for this to consume, in addition to a list of Cloud TPUs in your Google
Cloud Platform project.
"""
def _tpuService(self):
"""Creates a new Cloud TPU API object.
This works around an issue where the underlying HTTP connection sometimes
times out when the script has been running for too long. Other methods in
this object calls this method to get a new API object whenever they need
to communicate with the Cloud API.
Returns:
A Google Cloud TPU API object.
"""
if self._service:
return self._service
credentials = self._credentials
if credentials is None or credentials == 'default':
credentials = GoogleCredentials.get_application_default()
if self._discovery_url:
return discovery.build(
'tpu', 'v1alpha1',
credentials=credentials,
discoveryServiceUrl=self._discovery_url)
else:
return discovery.build(
'tpu', 'v1alpha1',
credentials=credentials)
def _requestComputeMetadata(self, path):
req = Request('http://metadata/computeMetadata/v1/%s' % path,
headers={'Metadata-Flavor': 'Google'})
resp = urlopen(req)
return compat.as_bytes(resp.read())
def _shouldResolve(self):
if isinstance(self._should_resolve_override, bool):
return self._should_resolve_override
if (self._tpu == compat.as_bytes('') or
self._tpu == compat.as_bytes('local') or
self._tpu.startswith(compat.as_bytes('/bns')) or
self._tpu.startswith(compat.as_bytes('localhost:')) or
self._tpu.startswith(compat.as_bytes('grpc://')) or
self._tpu.startswith(compat.as_bytes('uptc://'))):
return False
return True
@staticmethod
def _get_device_dict_and_cores(devices):
"""Returns a dict of hosts to cores and total cores given devices names.
Returns a namedtuple with two attributes:
device_map: A map of host_ids to a list of core_ids.
total_cores: The total number of cores within the TPU system.
Args:
devices: A list of devices returned by session.list_devices()
"""
device_map = collections.defaultdict(list)
num_cores = 0
for device in devices:
match = _TPU_DEVICE_REGEX.match(device.name)
if match:
host_id = match.group('host_id')
core_id = match.group('core_id')
device_map[host_id].append(core_id)
num_cores += 1
return DeviceDetails(device_map, num_cores)
@staticmethod
def _verify_and_return_same_core_count(device_dict):
"""Verifies that every device in device_dict has the same # of cores."""
num_cores_per_host_set = (
{len(core_ids) for core_ids in device_dict.values()})
if len(num_cores_per_host_set) != 1:
raise RuntimeError('TPU cores on each device is not the same. This '
'should never happen. Devices: {}'.format(device_dict))
return num_cores_per_host_set.pop()
@staticmethod
def _inGke():
"""When running in GKE, the environment variable will be set."""
return _GKE_ENV_VARIABLE in os.environ
@staticmethod
def _gkeEndpoints():
return os.environ[_GKE_ENV_VARIABLE]
@staticmethod
def _envVarFallback():
if _DEFAULT_ENV_VARIABLE in os.environ:
return os.environ[_DEFAULT_ENV_VARIABLE]
return None
@staticmethod
def _environmentDiscoveryUrl():
return os.environ.get(_DISCOVERY_SERVICE_URL_ENV_VARIABLE)
@staticmethod
def _isRunningInGCE():
"""Checks for GCE presence by attempting to query the metadata service."""
try:
req = Request('http://metadata.google.internal/computeMetadata/v1',
headers={'Metadata-Flavor': 'Google'})
resp = urllib.request.urlopen(req, timeout=1)
info = resp.info()
if 'Metadata-Flavor' in info and info['Metadata-Flavor'] == 'Google':
return True
except URLError:
pass
return False
def __init__(self,
tpu=None,
zone=None,
project=None,
job_name='worker',
coordinator_name=None,
coordinator_address=None,
credentials='default',
service=None,
discovery_url=None):
"""Creates a new TPUClusterResolver object.
The ClusterResolver will then use the parameters to query the Cloud TPU APIs
for the IP addresses and ports of each Cloud TPU listed.
Args:
tpu: A string corresponding to the TPU to use. If the string is the empty
string, the string 'local', or a string that begins with 'grpc://' or
'/bns', then it is assumed to not correspond with a Cloud TPU and will
instead be passed as the session master and no ClusterSpec propagation
will be done. In the future, this may also support a list of strings
when multiple Cloud TPUs are used.
zone: Zone where the TPUs are located. If omitted or empty, we will assume
that the zone of the TPU is the same as the zone of the GCE VM, which we
will try to discover from the GCE metadata service.
project: Name of the GCP project containing Cloud TPUs. If omitted or
empty, we will try to discover the project name of the GCE VM from the
GCE metadata service.
job_name: Name of the TensorFlow job the TPUs belong to.
coordinator_name: The name to use for the coordinator. Set to None if the
coordinator should not be included in the computed ClusterSpec.
coordinator_address: The address of the coordinator (typically an ip:port
pair). If set to None, a TF server will be started. If coordinator_name
is None, a TF server will not be started even if coordinator_address is
None.
credentials: GCE Credentials. If None, then we use default credentials
from the oauth2client
service: The GCE API object returned by the googleapiclient.discovery
function. If you specify a custom service object, then the credentials
parameter will be ignored.
discovery_url: A URL template that points to the location of
the discovery service. It should have two parameters {api} and
{apiVersion} that when filled in produce an absolute URL to the
discovery document for that service. The environment variable
'TPU_API_DISCOVERY_URL' will override this.
Raises:
ImportError: If the googleapiclient is not installed.
ValueError: If no TPUs are specified.
RuntimeError: If an empty TPU name is specified and this is running in a
Google Cloud environment.
"""
if isinstance(tpu, list):
if not tpu:
raise ValueError('At least one TPU must be specified.')
if len(tpu) != 1:
raise NotImplementedError(
'Using multiple TPUs in a single session is not yet implemented')
tpu = tpu[0]
in_gke = self._inGke()
# When using GKE with Cloud TPUs, the env variable will be set.
if tpu is None:
if in_gke:
tpu = self._gkeEndpoints()
else:
tpu = self._envVarFallback()
if tpu is None:
raise ValueError('Please provide a TPU Name to connect to.')
self._tpu = compat.as_bytes(tpu) # self._tpu is always bytes
# If we are running in Cloud and don't specify a TPU name
if self._isRunningInGCE() and not self._tpu:
raise RuntimeError('You need to specify a TPU Name if you are running in '
'the Google Cloud environment.')
# By default the task_type is 'worker` and the task_id is 0 (which is the
# first worker in the task).
self.task_type = job_name
self.task_id = 0
if tpu.startswith('grpc://'):
# Cloud environment, where we are using GRPC to communicate to TPUs.
self._environment = ''
elif tpu == 'local' or not tpu:
# Google environment, where the TPU is attached to the host.
self._environment = 'google'
elif tpu.startswith('/bns') or tpu.startswith('uptc://'):
# Google environment, where we reach the TPU through BNS.
self._environment = 'google'
# If TPU is in the Google environment or exists locally, we don't use any
# RPC layer.
if tpu.startswith('/bns') or tpu.startswith(
'uptc://') or tpu == 'local' or not tpu:
self.rpc_layer = None
else:
self.rpc_layer = 'grpc'
# Setting this overrides the return value of self._shouldResolve()
self._should_resolve_override = None
# We strip out the protocol if it is included, and override the
# shouldResolve function to never resolve. We are adding the protocol back
# in later in self.master().
if self.rpc_layer is not None and tpu.startswith(self.rpc_layer + '://'):
tpu = tpu[len(self.rpc_layer + '://'):]
self._tpu = compat.as_bytes(tpu) # self._tpu is always bytes
self._should_resolve_override = False
# Whether we should actually attempt to contact Cloud APIs
should_resolve = self._shouldResolve()
# We error out if we are in a non-Cloud environment which cannot talk to the
# Cloud APIs using the standard class and a special object is not passed in.
self._service = service
if (self._service is None and should_resolve and
not _GOOGLE_API_CLIENT_INSTALLED):
raise ImportError('googleapiclient and oauth2client must be installed '
'before using the TPU cluster resolver. Execute: '
'`pip install --upgrade google-api-python-client` '
'and `pip install --upgrade oauth2client` to '
'install with pip.')
# We save user-passed credentials, unless the user didn't pass in anything.
self._credentials = credentials
if (credentials == 'default' and should_resolve and
_GOOGLE_API_CLIENT_INSTALLED):
self._credentials = None
# Automatically detect project and zone if unspecified.
if not project and should_resolve:
project = compat.as_str(
self._requestComputeMetadata('project/project-id'))
if not zone and should_resolve:
zone_path = compat.as_str(self._requestComputeMetadata('instance/zone'))
zone = zone_path.split('/')[-1]
self._project = project
self._zone = zone
self._discovery_url = self._environmentDiscoveryUrl() or discovery_url
self._coordinator_name = coordinator_name
if (coordinator_name and not coordinator_address and
(should_resolve or in_gke)):
self._start_local_server()
else:
self._coordinator_address = coordinator_address
def master(self, task_type=None, task_id=None, rpc_layer=None):
"""Get the Master string to be used for the session.
In the normal case, this returns the grpc path (grpc://1.2.3.4:8470) of
first instance in the ClusterSpec returned by the cluster_spec function.
If a non-TPU name is used when constructing a TPUClusterResolver, that will
be returned instead (e.g. If the tpus argument's value when constructing
this TPUClusterResolver was 'grpc://10.240.1.2:8470',
'grpc://10.240.1.2:8470' will be returned).
Args:
task_type: (Optional, string) The type of the TensorFlow task of the
master.
task_id: (Optional, integer) The index of the TensorFlow task of the
master.
rpc_layer: (Optional, string) The RPC protocol TensorFlow should use to
communicate with TPUs.
Returns:
string, the connection string to use when creating a session.
Raises:
ValueError: If none of the TPUs specified exists.
"""
if self._shouldResolve():
# We are going to communicate with the Cloud TPU APIs to get a Cluster.
cluster_spec = self.cluster_spec()
if task_type is not None and task_id is not None:
# task_type and task_id is from the function parameter
master = cluster_spec.task_address(task_type, task_id)
elif self.task_type is not None and self.task_id is not None:
# task_type and task_id is from the object
master = cluster_spec.task_address(self.task_type, self.task_id)
else:
# by default we take the first item in the cluster with the right name
job_tasks = cluster_spec.job_tasks(self.task_type)
if not job_tasks:
raise ValueError('No TPUs with the specified names exist.')
master = job_tasks[0]
else:
if isinstance(self._tpu, (bytes, bytearray)):
master = compat.as_text(self._tpu).split(_ENDPOINTS_SEPARATOR)[0]
else:
master = self._tpu.split(_ENDPOINTS_SEPARATOR)[0]
return format_master_url(master, rpc_layer or self.rpc_layer)
def get_master(self):
return self.master()
def get_job_name(self):
if (self._shouldResolve() or
self._isRunningInGCE()):
return self.task_type
def cluster_spec(self):
"""Returns a ClusterSpec object based on the latest TPU information.
We retrieve the information from the GCE APIs every time this method is
called.
Returns:
A ClusterSpec containing host information returned from Cloud TPUs.
Raises:
RuntimeError: If the provided TPU is not healthy.
"""
############################################################################
# There are 5 potential cases this code must handle:
# 1. [Normal case.] We should resolve the TPU name to a set of tasks, and
# a. Create a ClusterSpec that includes the coordinator job
# b. Create a ClusterSpec without the coordinator job.
# 2. [GKE / No API Access.] We should not resolve the TPU name to a set of
# tasks and
# a. Create a ClusterSpec with the coordinator
# b. Create a ClusterSpec without the coordinator
# 3. [Other (legacy non-gRPC).] We should return an empty ClusterSpec.
############################################################################
if self._shouldResolve():
# Case 1.
full_name = 'projects/%s/locations/%s/nodes/%s' % (
self._project, self._zone, compat.as_text(self._tpu))
service = self._tpuService()
request = service.projects().locations().nodes().get(name=full_name)
response = request.execute()
if 'state' in response and response['state'] != 'READY':
raise RuntimeError('TPU "%s" is not yet ready; state: "%s"' %
(compat.as_text(self._tpu), response['state']))
if 'health' in response and response['health'] != 'HEALTHY':
raise RuntimeError('TPU "%s" is unhealthy: "%s"' %
(compat.as_text(self._tpu), response['health']))
if 'networkEndpoints' in response:
worker_list = [
'%s:%s' % (endpoint['ipAddress'], endpoint['port'])
for endpoint in response['networkEndpoints']
]
else:
# Fall back to the deprecated response format
instance_url = '%s:%s' % (response['ipAddress'], response['port'])
worker_list = [instance_url]
cluster_spec = {self.task_type: worker_list}
else:
if self.rpc_layer is None:
# Case 3.
return None
# Case 2.
tpus = []
for tpu in compat.as_text(self._tpu).split(_ENDPOINTS_SEPARATOR):
# We are working around the fact that GKE environment variable that is
# supplied to us has the protocol string embedded in it, but we want
# to strip it out for the ClusterSpec.
if (self.rpc_layer is not None and
tpu.startswith(self.rpc_layer + '://')):
tpus.append(tpu[len(self.rpc_layer + '://'):])
else:
tpus.append(tpu)
cluster_spec = {self.task_type: tpus}
if self._coordinator_address:
# {1, 2}.a
cluster_spec[self._coordinator_name] = [self._coordinator_address]
return server_lib.ClusterSpec(cluster_spec)
def num_accelerators(self,
task_type=None,
task_id=None,
accelerator_type='TPU',
config_proto=None):
"""Returns the number of TPU cores per worker.
Connects to the master and list all the devices present in the master,
and counts them up. Also verifies that the device counts per host in the
cluster is the same before returning the number of TPU cores per host.
Args:
task_type: Unused.
task_id: Unused.
accelerator_type: Unused.
config_proto: Used to create a connection to a TPU master in order to
retrieve the system metadata.
Raises:
RuntimeError: If we cannot talk to a TPU worker after retrying or if the
number of TPU devices per host is different.
"""
retry_count = 1
# TODO(b/120564445): Replace with standard library for retries.
while True:
try:
device_details = TPUClusterResolver._get_device_dict_and_cores(
get_accelerator_devices(self.master(), config_proto=config_proto))
break
except errors.DeadlineExceededError:
error_message = ('Failed to connect to master. The TPU might not be '
'ready (e.g. still scheduling) or the master '
'address is incorrect: got (%s)' % self.master())
if retry_count <= _TPU_CONN_RETRIES:
logging.warning(error_message)
logging.warning('Retrying (%d/%d)...', retry_count, _TPU_CONN_RETRIES)
retry_count += 1
else:
raise RuntimeError(error_message)
if device_details.total_cores:
return TPUClusterResolver._verify_and_return_same_core_count(
device_details.device_map)
return 0
@property
def environment(self):
"""Returns the current environment which TensorFlow is running in."""
return self._environment
def _start_local_server(self):
address = compat.as_text(self._requestComputeMetadata(
'instance/network-interfaces/0/ip'))
self._server = server_lib.Server(
{
'local': ['0.0.0.0:0']
}, protocol='grpc', config=None, start=True)
# self._server.target is of the form: grpc://ipaddress:port
target = compat.as_bytes(self._server.target)
splits = target.split(compat.as_bytes(':'))
assert len(splits) == 3, self._server.target
assert splits[0] == compat.as_bytes('grpc'), self._server.target
self._coordinator_port = compat.as_text(splits[2])
self._coordinator_address = '%s:%s' % (
address, compat.as_text(self._coordinator_port))
def __deepcopy__(self, memo):
# TODO(b/73668574): Remove this once RunConfig avoids performing deepcopy.
return self
|
|
import os
import tempfile
import httplib
import urllib, urllib2
import base64
from functools import wraps
from string import Template
from wsgiref.util import request_uri
from amara.lib.iri import *
from akara import logger
def status_response(code):
return '%i %s'%(code, httplib.responses[code])
class iterwrapper:
"""
Wraps the response body iterator from the application to meet WSGI
requirements.
"""
def __init__(self, wrapped, responder):
"""
wrapped - the iterator coming from the application
response_chunk_handler - a callable for any processing of a
response body chunk before passing it on to the server.
"""
self._wrapped = iter(wrapped)
self._responder = responder(self._wrapped)
if hasattr(wrapped, 'close'):
self.close = self._wrapped.close
def __iter__(self):
return self
def next(self):
return self._responder.next()
def geturl(environ, relative=''):
"""
Constructs a portable URL for your application. If relative is omitted or '',
Just return the current base URL. Otherwise resolve the relative portion against
the present base and return the resulting URL.
(Inspired by url functions in CherryPy and Pylons, and Ian Bicking's code in PEP 333)
If you have a proxy that forwards the HOST but not the original HTTP request path
you might have to set akara.proxy-base in environ (e.g. through .ini) See
http://wiki.xml3k.org/Akara/Configuration
"""
#Manually set proxy base URI for non-well-behaved proxies, such as Apache < 1.3.33,
#Or for cases where the proxy is not mounted at the root of a host, and thus the original
#request path info is lost
if environ.get('akara.proxy-base'):
url = environ['akara.proxy-base']
if relative: url = Uri.Absolutize(relative, url)
return url
url = environ['wsgi.url_scheme']+'://'
#Apache 1.3.33 and later mod_proxy uses X-Forwarded-Host
if environ.get('HTTP_X_FORWARDED_HOST'):
url += environ['HTTP_X_FORWARDED_HOST']
#Lighttpd uses X-Host
elif environ.get('HTTP_X_HOST'):
url += environ['HTTP_X_HOST']
elif environ.get('HTTP_HOST'):
url += environ['HTTP_HOST']
else:
url += environ['SERVER_NAME']
if environ['wsgi.url_scheme'] == 'https':
if environ['SERVER_PORT'] != '443':
url += ':' + environ['SERVER_PORT']
else:
if environ['SERVER_PORT'] != '80':
url += ':' + environ['SERVER_PORT']
#Can't use the more strict Uri.PercentEncode because it would quote the '/'
url += urllib.quote(environ.get('SCRIPT_NAME', '').rstrip('/')) + '/'
if relative: url = Uri.Absolutize(relative, url)
return url
def guess_self_uri(environ):
return absolutize(environ['SCRIPT_NAME'].rstrip('/'), request_uri(environ, include_query=False))
def find_peer_service(environ, peer_id):
'''
Find a peer service endpoint, by ID, mounted on this same Akara instance
Must be caled from a running akara service, and it is highly recommended to call
at the top of service functions, or at least before the request environ has been manipulated
'''
from amara.lib.iri import absolutize, join
from akara import request
from akara.registry import _current_registry
serverbase = guess_self_uri(environ)
for (path, s) in _current_registry._registered_services.iteritems():
if s.ident == peer_id:
return join(serverbase, '..', path)
return None
def http_method_handler(method):
'''
A decorator maker to flag a function as suitable for a given HTTP method
'''
def wrap(f):
#@wraps(f)
#def wrapper(*args, **kwargs):
# return f()
f.method = method
return f
return wrap
class wsgibase(object):
def __init__(self):
self._method_handlers = {}
if not hasattr(self, 'dispatch'):
self.dispatch = self.dispatch_by_lookup
#if not hasattr(self, 'dispatch'):
# self.dispatch = self.dispatch_by_lookup if hasattr(self, '_methods') else self.dispatch_simply
for obj in ( getattr(self, name) for name in dir(self) ):
method = getattr(obj, 'method', None)
if method:
self._method_handlers[method] = obj
return
def __call__(self, environ, start_response):
self.environ = environ
self.start_response = start_response
return self
def __iter__(self):
func = self.dispatch()
if func is None:
response_headers = [('Content-type','text/plain')]
self.start_response(response(httplib.METHOD_NOT_ALLOWED), response_headers)
yield 'HTTP method Not Allowed'
else:
yield func()
def dispatch_simply(self):
func = 'do_%s' % self.environ['REQUEST_METHOD']
if not hasattr(self, func):
return None
else:
return func
def dispatch_by_lookup(self):
return self._method_handlers.get(self.environ['REQUEST_METHOD'])
def parse_fields(self):
s = self.environ['wsgi.input'].read(int(self.environ['CONTENT_LENGTH']))
return cgi.parse_qs(s)
def extract_auth(environ):
'''
Extract auth creds (HTTP basic only, for now) from the incoming request and return the
(username, password)
environ - The usual WSGI structure. Note: if you are using simple_service,
in Akara services available as akara.request.environ, or perhaps passed right
into the handler
top - top URL to be used for this auth.
'''
#Useful: http://www.voidspace.org.uk/python/articles/authentication.shtml
auth = environ.get('HTTP_AUTHORIZATION')
if not auth: return None
scheme, data = auth.split(None, 1)
if scheme.lower() != 'basic':
raise RuntimeError('Unsupported HTTP auth scheme: %s'%scheme)
username, password = data.decode('base64').split(':', 1)
return username, password
def copy_auth(environ, top, realm=None):
'''
Get auth creds (HTTP basic only, for now) from the incoming request and return an
HTTP auth handler for urllib2. This handler allows you to "forward" this auth to
remote services
environ - The usual WSGI structure. Note: if you are using simple_service,
in Akara services available as akara.request.environ, or perhaps passed right
into the handler
top - top URL to be used for this auth.
'''
#Useful: http://www.voidspace.org.uk/python/articles/authentication.shtml
creds = extract_auth(environ)
if creds:
username, password = creds
else:
return None
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
# HTTPPasswordMgr top must omit any URL components before the host (i.e. no scheme and no auth info in the authority section)
#(scheme, authority, path, query, fragment) = split_uri_ref(top)
#auth, host, port = split_authority(authority)
#auth_top_url = (host + ':' + port if port else host) + path
#print >> sys.stderr, 'Auth creds: %s:%s (%s)'%(username, password, auth_top_url)
logger.debug('Auth creds: %s:%s (%s)'%(username, password, top))
# Not setting the realm for now, so use None
#password_mgr.add_password(None, auth_top_url, username, password)
password_mgr.add_password(None, top, username, password)
#password_handler = urllib2.HTTPDigestAuthHandler(password_mgr)
password_handler = urllib2.HTTPBasicAuthHandler(password_mgr)
return password_handler
def header_credentials(username, password, headers=None):
'''
httplib2's simple HTTP auth support is great, but it doesn't recognize every case
in which auth is needed, sometimes because of compliance issues on the remote site*
Also, there are unusual cases where you want to always send the auth header,
without first waiting for 401 challenge
This function helps with these issues by unconditionally setting up httplib2 headers
for Basic authentication
>>> username = '[email protected]'
>>> password = 'password'
>>> H = httplib2.Http()
>>> auth_headers = header_credentials(username, password)
>>> response, content = H.request(url, 'GET', headers=auth_headers)
* For an example of such issues: http://pyre.posterous.com/accessing-posterous-api-in-python
'''
credentials = "Basic " + base64.b64encode("%s:%s"%(username, password))
if headers:
headers.update({ 'Authorization': credentials })
else:
headers = { 'Authorization': credentials }
return headers
CHUNKLEN = 4096
def read_http_body_to_temp(environ, start_response):
'''
Handle the reading of a file from an HTTP message body (file pointer from wsgi.input)
in chunks to a temporary file
Returns the file path of the resulting temp file
'''
clen = int(environ.get('CONTENT_LENGTH', None))
if not clen:
raise ContentLengthRequiredError()
http_body = environ['wsgi.input']
temp = tempfile.mkstemp(suffix=".dat")
while clen != 0:
chunk_len = min(CHUNKLEN, clen)
data = http_body.read(chunk_len)
if data:
#assert chunk_len == os.write(temp[0], data)
written = os.write(temp[0], data)
#print >> sys.stderr, "Bytes written to file in this chunk", written
clen -= len(data)
else:
clen = 0
os.fsync(temp[0]) #is this needed with the close below?
os.close(temp[0])
return temp[1]
#Convert WSGI environ headers to a plain header list (e.g. for forwarding request headers)
#Copied from webob.
_key2header = {
'CONTENT_TYPE': 'Content-Type',
'CONTENT_LENGTH': 'Content-Length',
'HTTP_CONTENT_TYPE': 'Content_Type',
'HTTP_CONTENT_LENGTH': 'Content_Length',
}
#Skipping User-Agent is actually Moin-specific, since Moin seems to react to different UAs, and e.g. gives 403 errors in response to Curl's UA
_skip_headers = [
'HTTP_HOST',
'HTTP_ACCEPT',
'HTTP_USER_AGENT',
]
def _trans_key(key, exclude=[]):
if not isinstance(key, basestring):
return None
elif key in _key2header:
#Do NOT copy these special headers (change from Webob)
return None
#return _key2header[key]
elif key in _skip_headers or key in exclude:
return None
elif key.startswith('HTTP_'):
return key[5:].replace('_', '-').title()
else:
return None
def copy_headers(environ,exclude=[]):
header_list = []
for k, v in environ.iteritems():
pure_header = _trans_key(k,exclude)
if pure_header:
#FIXME: does this account for dupe headers in the inbound WSGI?
header_list.append((pure_header, v))
return header_list
def copy_headers_to_dict(environ, exclude=[]):
headers = {}
for k, v in environ.iteritems():
pure_header = _trans_key(k,exclude)
if pure_header:
#FIXME: does this account for dupe headers in the inbound WSGI?
headers[pure_header] = v
return headers
#
# ======================================================================
# Exceptions
# ======================================================================
# Base exception used to indicate errors. Rather than replicating tons
# of error handling code, these errors are raised instead. A top-level
# exception handler catches them and then generates some kind of
# appropriate HTTP response. Positional arguments (if any)
# are just passed to the Exception base as before. Keyword arguments
# are saved in a local dictionary. They will be used to pass parameters
# to the Template strings used when generating error messages.
class HttpError(Exception):
def __init__(self,*args,**kwargs):
Exception.__init__(self,*args)
self.parms = kwargs
class BadTargetError(HttpError): pass
class HTTPAuthorizationError(HttpError): pass
class MoinAuthorizationError(HttpError): pass
class UnexpectedResponseError(HttpError): pass
class MoinMustAuthenticateError(HttpError): pass
class MoinNotFoundError(HttpError): pass
class ContentLengthRequiredError(HttpError): pass
class GenericClientError(HttpError): pass
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from pylons import tmpl_context as c
from datetime import datetime
import urllib2
import mock
from ming.orm.ormsession import ThreadLocalORMSession
from ming.orm import session
from ming import schema
from nose.tools import (
raises,
assert_equal,
assert_in,
assert_true,
assert_false,
)
from forgetracker.model import Ticket, TicketAttachment
from forgetracker.tests.unit import TrackerTestWithModel
from forgetracker.import_support import ResettableStream
from allura.model import Feed, Post, User
from allura.lib import helpers as h
from allura.tests import decorators as td
class TestTicketModel(TrackerTestWithModel):
def test_that_label_counts_are_local_to_tool(self):
"""Test that label queries return only artifacts from the specified
tool.
"""
# create a ticket in two different tools, with the same label
from allura.tests import decorators as td
@td.with_tool('test', 'Tickets', 'bugs', username='test-user')
def _test_ticket():
return Ticket(ticket_num=1, summary="ticket1", labels=["mylabel"])
@td.with_tool('test', 'Tickets', 'bugs2', username='test-user')
def _test_ticket2():
return Ticket(ticket_num=2, summary="ticket2", labels=["mylabel"])
# create and save the tickets
t1 = _test_ticket()
t2 = _test_ticket2()
ThreadLocalORMSession.flush_all()
# test label query results
label_count1 = t1.artifacts_labeled_with(
"mylabel", t1.app_config).count()
label_count2 = t2.artifacts_labeled_with(
"mylabel", t2.app_config).count()
assert 1 == label_count1 == label_count2
def test_that_it_has_ordered_custom_fields(self):
custom_fields = dict(my_field='my value')
Ticket(summary='my ticket', custom_fields=custom_fields, ticket_num=3)
ThreadLocalORMSession.flush_all()
ticket = Ticket.query.get(summary='my ticket')
assert ticket.custom_fields == dict(my_field='my value')
@raises(schema.Invalid)
def test_ticket_num_required(self):
Ticket(summary='my ticket')
def test_ticket_num_required2(self):
t = Ticket(summary='my ticket', ticket_num=12)
try:
t.ticket_num = None
except schema.Invalid:
pass
else:
raise AssertionError('Expected schema.Invalid to be thrown')
def test_activity_extras(self):
t = Ticket(summary='my ticket', ticket_num=12)
assert_in('allura_id', t.activity_extras)
assert_equal(t.activity_extras['summary'], t.summary)
def test_has_activity_access(self):
t = Ticket(summary='ticket', ticket_num=666)
assert_true(t.has_activity_access('read', c.user, 'activity'))
t.deleted = True
assert_false(t.has_activity_access('read', c.user, 'activity'))
def test_comment_has_activity_access(self):
t = Ticket(summary='ticket', ticket_num=666, deleted=True)
p = t.discussion_thread.add_post(text='test post')
assert_equal(p.status, 'ok')
assert_true(p.has_activity_access('read', c.user, 'activity'))
p.status = 'spam'
assert_false(p.has_activity_access('read', c.user, 'activity'))
p.status = 'pending'
assert_false(p.has_activity_access('read', c.user, 'activity'))
p.status = 'ok'
p.deleted = True
assert_false(p.has_activity_access('read', c.user, 'activity'))
def test_private_ticket(self):
from allura.model import ProjectRole
from allura.model import ACE, DENY_ALL
from allura.lib.security import Credentials, has_access
from allura.websetup import bootstrap
admin = c.user
creator = bootstrap.create_user('Not a Project Admin')
developer = bootstrap.create_user('Project Developer')
observer = bootstrap.create_user('Random Non-Project User')
anon = User(_id=None, username='*anonymous',
display_name='Anonymous')
t = Ticket(summary='my ticket', ticket_num=3,
reported_by_id=creator._id)
assert creator == t.reported_by
role_admin = ProjectRole.by_name('Admin')._id
role_developer = ProjectRole.by_name('Developer')._id
role_creator = ProjectRole.by_user(t.reported_by, upsert=True)._id
ProjectRole.by_user(
developer, upsert=True).roles.append(role_developer)
ThreadLocalORMSession.flush_all()
cred = Credentials.get().clear()
t.private = True
assert_equal(t.acl, [
ACE.allow(role_developer, 'save_searches'),
ACE.allow(role_developer, 'read'),
ACE.allow(role_developer, 'create'),
ACE.allow(role_developer, 'update'),
ACE.allow(role_developer, 'unmoderated_post'),
ACE.allow(role_developer, 'post'),
ACE.allow(role_developer, 'moderate'),
ACE.allow(role_developer, 'delete'),
ACE.allow(role_creator, 'read'),
ACE.allow(role_creator, 'post'),
ACE.allow(role_creator, 'create'),
ACE.allow(role_creator, 'unmoderated_post'),
DENY_ALL])
assert has_access(t, 'read', user=admin)()
assert has_access(t, 'create', user=admin)()
assert has_access(t, 'update', user=admin)()
assert has_access(t, 'read', user=creator)()
assert has_access(t, 'post', user=creator)()
assert has_access(t, 'unmoderated_post', user=creator)()
assert has_access(t, 'create', user=creator)()
assert not has_access(t, 'update', user=creator)()
assert has_access(t, 'read', user=developer)()
assert has_access(t, 'create', user=developer)()
assert has_access(t, 'update', user=developer)()
assert not has_access(t, 'read', user=observer)()
assert not has_access(t, 'create', user=observer)()
assert not has_access(t, 'update', user=observer)()
assert not has_access(t, 'read', user=anon)()
assert not has_access(t, 'create', user=anon)()
assert not has_access(t, 'update', user=anon)()
t.private = False
assert t.acl == []
assert has_access(t, 'read', user=admin)()
assert has_access(t, 'create', user=admin)()
assert has_access(t, 'update', user=admin)()
assert has_access(t, 'read', user=developer)()
assert has_access(t, 'create', user=developer)()
assert has_access(t, 'update', user=developer)()
assert has_access(t, 'read', user=creator)()
assert has_access(t, 'unmoderated_post', user=creator)()
assert has_access(t, 'create', user=creator)()
assert not has_access(t, 'update', user=creator)()
assert has_access(t, 'read', user=observer)()
assert has_access(t, 'read', user=anon)()
def test_feed(self):
t = Ticket(
app_config_id=c.app.config._id,
ticket_num=1,
summary='test ticket',
description='test description',
created_date=datetime(2012, 10, 29, 9, 57, 21, 465000))
assert_equal(t.created_date, datetime(2012, 10, 29, 9, 57, 21, 465000))
f = Feed.post(
t,
title=t.summary,
description=t.description,
pubdate=t.created_date)
assert_equal(f.pubdate, datetime(2012, 10, 29, 9, 57, 21, 465000))
assert_equal(f.title, 'test ticket')
assert_equal(f.description,
'<div class="markdown_content"><p>test description</p></div>')
@td.with_tool('test', 'Tickets', 'bugs', username='test-user')
@td.with_tool('test', 'Tickets', 'bugs2', username='test-user')
def test_ticket_move(self):
app1 = c.project.app_instance('bugs')
app2 = c.project.app_instance('bugs2')
with h.push_context(c.project._id, app_config_id=app1.config._id):
ticket = Ticket.new()
ticket.summary = 'test ticket'
ticket.description = 'test description'
ticket.assigned_to_id = User.by_username('test-user')._id
ticket.discussion_thread.add_post(text='test comment')
assert_equal(
Ticket.query.find({'app_config_id': app1.config._id}).count(), 1)
assert_equal(
Ticket.query.find({'app_config_id': app2.config._id}).count(), 0)
assert_equal(
Post.query.find(dict(thread_id=ticket.discussion_thread._id)).count(), 1)
t = ticket.move(app2.config)
assert_equal(
Ticket.query.find({'app_config_id': app1.config._id}).count(), 0)
assert_equal(
Ticket.query.find({'app_config_id': app2.config._id}).count(), 1)
assert_equal(t.summary, 'test ticket')
assert_equal(t.description, 'test description')
assert_equal(t.assigned_to.username, 'test-user')
assert_equal(t.url(), '/p/test/bugs2/1/')
post = Post.query.find(dict(thread_id=ticket.discussion_thread._id,
text={'$ne': 'test comment'})).first()
assert post is not None, 'No comment about ticket moving'
message = 'Ticket moved from /p/test/bugs/1/'
assert_equal(post.text, message)
post = Post.query.find(dict(text='test comment')).first()
assert_equal(post.thread.discussion_id, app2.config.discussion_id)
assert_equal(post.thread.app_config_id, app2.config._id)
assert_equal(post.app_config_id, app2.config._id)
@td.with_tool('test', 'Tickets', 'bugs', username='test-user')
@td.with_tool('test', 'Tickets', 'bugs2', username='test-user')
def test_ticket_move_with_different_custom_fields(self):
app1 = c.project.app_instance('bugs')
app2 = c.project.app_instance('bugs2')
app1.globals.custom_fields.extend([
{'name': '_test', 'type': 'string', 'label': 'Test field'},
{'name': '_test2', 'type': 'string', 'label': 'Test field 2'}])
app2.globals.custom_fields.append(
{'name': '_test', 'type': 'string', 'label': 'Test field'})
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
with h.push_context(c.project._id, app_config_id=app1.config._id):
ticket = Ticket.new()
ticket.summary = 'test ticket'
ticket.description = 'test description'
ticket.custom_fields['_test'] = 'test val'
ticket.custom_fields['_test2'] = 'test val 2'
t = ticket.move(app2.config)
assert_equal(t.summary, 'test ticket')
assert_equal(t.description, 'test description')
assert_equal(t.custom_fields['_test'], 'test val')
post = Post.query.find(
dict(thread_id=ticket.discussion_thread._id)).first()
assert post is not None, 'No comment about ticket moving'
message = 'Ticket moved from /p/test/bugs/1/'
message += '\n\nCan\'t be converted:\n'
message += '\n- **_test2**: test val 2'
assert_equal(post.text, message)
@td.with_tool('test', 'Tickets', 'bugs', username='test-user')
@td.with_tool('test', 'Tickets', 'bugs2', username='test-user')
def test_ticket_move_with_users_not_in_project(self):
app1 = c.project.app_instance('bugs')
app2 = c.project.app_instance('bugs2')
app1.globals.custom_fields.extend([
{'name': '_user_field', 'type': 'user', 'label': 'User field'},
{'name': '_user_field_2', 'type': 'user', 'label': 'User field 2'}])
app2.globals.custom_fields.extend([
{'name': '_user_field', 'type': 'user', 'label': 'User field'},
{'name': '_user_field_2', 'type': 'user', 'label': 'User field 2'}])
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
from allura.websetup import bootstrap
bootstrap.create_user('test-user-0')
with h.push_context(c.project._id, app_config_id=app1.config._id):
ticket = Ticket.new()
ticket.summary = 'test ticket'
ticket.description = 'test description'
ticket.custom_fields['_user_field'] = 'test-user' # in project
# not in project
ticket.custom_fields['_user_field_2'] = 'test-user-0'
# not in project
ticket.assigned_to_id = User.by_username('test-user-0')._id
t = ticket.move(app2.config)
assert_equal(t.assigned_to_id, None)
assert_equal(t.custom_fields['_user_field'], 'test-user')
assert_equal(t.custom_fields['_user_field_2'], '')
post = Post.query.find(
dict(thread_id=ticket.discussion_thread._id)).first()
assert post is not None, 'No comment about ticket moving'
message = 'Ticket moved from /p/test/bugs/1/'
message += '\n\nCan\'t be converted:\n'
message += '\n- **_user_field_2**: test-user-0 (user not in project)'
message += '\n- **assigned_to**: test-user-0 (user not in project)'
assert_equal(post.text, message)
@td.with_tool('test', 'Tickets', 'bugs', username='test-user')
def test_attach_with_resettable_stream(self):
with h.push_context(c.project._id, app_config_id=c.app.config._id):
ticket = Ticket.new()
ticket.summary = 'test ticket'
ticket.description = 'test description'
assert_equal(len(ticket.attachments), 0)
f = urllib2.urlopen('file://%s' % __file__)
TicketAttachment.save_attachment(
'test_ticket_model.py', ResettableStream(f),
artifact_id=ticket._id)
ThreadLocalORMSession.flush_all()
# need to refetch since attachments are cached
session(ticket).expunge(ticket)
ticket = Ticket.query.get(_id=ticket._id)
assert_equal(len(ticket.attachments), 1)
assert_equal(ticket.attachments[0].filename, 'test_ticket_model.py')
def test_json_parents(self):
ticket = Ticket.new()
json_keys = ticket.__json__().keys()
assert_in('related_artifacts', json_keys) # from Artifact
assert_in('votes_up', json_keys) # VotableArtifact
assert_in('ticket_num', json_keys) # Ticket
assert ticket.__json__()['assigned_to'] is None
@mock.patch('forgetracker.model.ticket.tsearch')
@mock.patch.object(Ticket, 'paged_search')
@mock.patch.object(Ticket, 'paged_query')
def test_paged_query_or_search(self, query, search, tsearch):
app_cfg, user = mock.Mock(), mock.Mock()
mongo_query = 'mongo query'
solr_query = 'solr query'
kw = {'kw1': 'test1', 'kw2': 'test2'}
filter = None
Ticket.paged_query_or_search(app_cfg, user, mongo_query, solr_query, filter, **kw)
query.assert_called_once_with(app_cfg, user, mongo_query, sort=None, limit=None, page=0, **kw)
assert_equal(tsearch.query_filter_choices.call_count, 1)
assert_equal(tsearch.query_filter_choices.call_args[0][0], 'solr query')
assert_equal(search.call_count, 0)
query.reset_mock(), search.reset_mock(), tsearch.reset_mock()
filter = {'status': 'unread'}
Ticket.paged_query_or_search(app_cfg, user, mongo_query, solr_query, filter, **kw)
search.assert_called_once_with(app_cfg, user, solr_query, filter=filter, sort=None, limit=None, page=0, **kw)
assert_equal(query.call_count, 0)
assert_equal(tsearch.query_filter_choices.call_count, 0)
def test_index(self):
idx = Ticket(ticket_num=2, summary="ticket2", labels=["mylabel", "other"]).index()
assert_equal(idx['summary_t'], 'ticket2')
assert_equal(idx['labels_t'], 'mylabel other')
assert_equal(idx['reported_by_s'], 'test-user')
assert_equal(idx['assigned_to_s'], None) # must exist at least
|
|
# Copyright 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import datetime
import json
import mock
import unittest
from keystoneauth1 import plugin
from keystoneauth1 import loading
from keystoneauth1 import exceptions
from swiftclient import authv1
class TestDataNoAccount(object):
options = dict(
auth_url='http://saio:8080/auth/v1.0',
username='test:tester',
password='testing')
storage_url = 'http://saio:8080/v1/AUTH_test'
expected_endpoint = storage_url
token = 'token'
class TestDataWithAccount(object):
options = dict(
auth_url='http://saio:8080/auth/v1.0',
username='test2:tester2',
project_name='SOME_other_account',
password='testing2')
storage_url = 'http://saio:8080/v1/AUTH_test2'
expected_endpoint = 'http://saio:8080/v1/SOME_other_account'
token = 'other_token'
class TestPluginLoading(TestDataNoAccount, unittest.TestCase):
def test_can_load(self):
loader = loading.get_plugin_loader('v1password')
self.assertIsInstance(loader, authv1.PasswordLoader)
auth_plugin = loader.load_from_options(**self.options)
self.assertIsInstance(auth_plugin, authv1.PasswordPlugin)
self.assertEqual(self.options['auth_url'], auth_plugin.auth_url)
self.assertEqual(self.options['username'], auth_plugin.user)
self.assertEqual(self.options.get('project_name'), auth_plugin.account)
self.assertEqual(self.options['password'], auth_plugin.key)
def test_get_state(self):
auth_plugin = authv1.PasswordPlugin(**self.options)
self.assertIsNone(auth_plugin.get_auth_state())
with mock.patch('swiftclient.authv1.time.time', return_value=1234.56):
auth_plugin.auth_ref = authv1.AccessInfoV1(
self.options['auth_url'],
self.storage_url,
self.options.get('project_name'),
self.options['username'],
self.token,
60)
expected = json.dumps({
'auth_url': self.options['auth_url'],
'username': self.options['username'],
'account': self.options.get('project_name'),
'issued': 1234.56,
'storage_url': self.storage_url,
'auth_token': self.token,
'expires': 1234.56 + 60,
}, sort_keys=True)
self.assertEqual(expected, auth_plugin.auth_ref.get_state())
self.assertEqual(expected, auth_plugin.get_auth_state())
def test_set_state(self):
auth_plugin = authv1.PasswordPlugin(**self.options)
self.assertIsNone(auth_plugin.auth_ref)
auth_plugin.auth_ref = object()
auth_plugin.set_auth_state(None)
self.assertIsNone(auth_plugin.get_auth_state())
state = json.dumps({
'auth_url': self.options['auth_url'],
'username': self.options['username'],
'account': self.options.get('project_name'),
'issued': 1234.56,
'storage_url': self.storage_url,
'auth_token': self.token,
'expires': None,
}, sort_keys=True)
auth_plugin.set_auth_state(state)
self.assertIsInstance(auth_plugin.auth_ref, authv1.AccessInfoV1)
self.assertEqual(self.options['username'],
auth_plugin.auth_ref.username)
self.assertEqual(self.options['auth_url'],
auth_plugin.auth_ref.auth_url)
self.assertEqual(self.storage_url, auth_plugin.auth_ref.storage_url)
self.assertEqual(self.options.get('project_name'), auth_plugin.account)
self.assertEqual(self.token, auth_plugin.auth_ref.auth_token)
self.assertEqual(1234.56, auth_plugin.auth_ref._issued)
self.assertIs(datetime.datetime, type(auth_plugin.auth_ref.issued))
self.assertIsNone(auth_plugin.auth_ref._expires)
self.assertIsNone(auth_plugin.auth_ref.expires)
class TestPluginLoadingWithAccount(TestDataWithAccount, TestPluginLoading):
pass
class TestPlugin(TestDataNoAccount, unittest.TestCase):
def setUp(self):
self.mock_session = mock.MagicMock()
self.mock_response = self.mock_session.get.return_value
self.mock_response.status_code = 200
self.mock_response.headers = {
'X-Auth-Token': self.token,
'X-Storage-Url': self.storage_url,
}
def test_get_access(self):
auth_plugin = authv1.PasswordPlugin(**self.options)
with mock.patch('swiftclient.authv1.time.time', return_value=1234.56):
access = auth_plugin.get_access(self.mock_session)
self.assertEqual(self.mock_session.get.mock_calls, [mock.call(
self.options['auth_url'], authenticated=False, log=False, headers={
'X-Auth-User': self.options['username'],
'X-Auth-Key': self.options['password'],
})])
self.assertEqual(self.options['username'], access.username)
# `openstack token issue` requires a user_id property
self.assertEqual(self.options['username'], access.user_id)
self.assertEqual(self.storage_url, access.storage_url)
self.assertEqual(self.token, access.auth_token)
self.assertEqual(1234.56, access._issued)
self.assertIs(datetime.datetime, type(auth_plugin.auth_ref.issued))
self.assertIsNone(access.expires)
# `openstack catalog list/show` require a catalog property
catalog = access.service_catalog.catalog
self.assertEqual('swift', catalog[0].get('name'))
self.assertEqual('object-store', catalog[0].get('type'))
self.assertIn('endpoints', catalog[0])
self.assertIn(self.storage_url, [
e.get('publicURL') for e in catalog[0]['endpoints']])
def test_get_access_with_expiry(self):
auth_plugin = authv1.PasswordPlugin(**self.options)
self.mock_response.headers['X-Auth-Token-Expires'] = '78.9'
with mock.patch('swiftclient.authv1.time.time',
return_value=1234.56) as mock_time:
access = auth_plugin.get_access(self.mock_session)
self.assertEqual(1234.56 + 78.9, access._expires)
self.assertIs(datetime.datetime,
type(auth_plugin.auth_ref.expires))
self.assertIs(True, access.will_expire_soon(90))
self.assertIs(False, access.will_expire_soon(60))
self.assertEqual(3, len(mock_time.mock_calls))
def test_get_access_bad_expiry(self):
auth_plugin = authv1.PasswordPlugin(**self.options)
self.mock_response.headers['X-Auth-Token-Expires'] = 'foo'
access = auth_plugin.get_access(self.mock_session)
self.assertIsNone(access.expires)
self.assertIs(False, access.will_expire_soon(60))
self.assertIs(False, access.will_expire_soon(1e20))
def test_get_access_bad_status(self):
auth_plugin = authv1.PasswordPlugin(**self.options)
self.mock_response.status_code = 401
self.assertRaises(exceptions.InvalidResponse,
auth_plugin.get_access, self.mock_session)
def test_get_access_missing_token(self):
auth_plugin = authv1.PasswordPlugin(**self.options)
self.mock_response.headers.pop('X-Auth-Token')
self.assertRaises(exceptions.InvalidResponse,
auth_plugin.get_access, self.mock_session)
def test_get_access_accepts_storage_token(self):
auth_plugin = authv1.PasswordPlugin(**self.options)
self.mock_response.headers.pop('X-Auth-Token')
self.mock_response.headers['X-Storage-Token'] = 'yet another token'
access = auth_plugin.get_access(self.mock_session)
self.assertEqual('yet another token', access.auth_token)
def test_get_access_missing_url(self):
auth_plugin = authv1.PasswordPlugin(**self.options)
self.mock_response.headers.pop('X-Storage-Url')
self.assertRaises(exceptions.InvalidResponse,
auth_plugin.get_access, self.mock_session)
def test_get_endpoint(self):
auth_plugin = authv1.PasswordPlugin(**self.options)
object_store_endpoint = auth_plugin.get_endpoint(
self.mock_session, service_type='object-store')
self.assertEqual(object_store_endpoint, self.expected_endpoint)
auth_endpoint = auth_plugin.get_endpoint(
self.mock_session, interface=plugin.AUTH_INTERFACE)
self.assertEqual(auth_endpoint, self.options['auth_url'])
with self.assertRaises(exceptions.EndpointNotFound) as exc_mgr:
auth_plugin.get_endpoint(self.mock_session)
self.assertEqual('public endpoint for None service not found',
str(exc_mgr.exception))
with self.assertRaises(exceptions.EndpointNotFound) as exc_mgr:
auth_plugin.get_endpoint(
self.mock_session, service_type='identity', region_name='DFW')
self.assertEqual(
'public endpoint for identity service in DFW region not found',
str(exc_mgr.exception))
with self.assertRaises(exceptions.EndpointNotFound) as exc_mgr:
auth_plugin.get_endpoint(
self.mock_session, service_type='image', service_name='glance')
self.assertEqual(
'public endpoint for image service named glance not found',
str(exc_mgr.exception))
with self.assertRaises(exceptions.EndpointNotFound) as exc_mgr:
auth_plugin.get_endpoint(
self.mock_session, service_type='compute', service_name='nova',
region_name='IAD')
self.assertEqual('public endpoint for compute service named nova in '
'IAD region not found', str(exc_mgr.exception))
class TestPluginWithAccount(TestDataWithAccount, TestPlugin):
pass
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.test.utils import override_settings
from django.urls import reverse
from django.utils.http import urlencode
from django.utils.http import urlunquote
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.backups \
import tables as backup_tables
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:project:backups:index')
class VolumeBackupsViewTests(test.TestCase):
@test.create_mocks({api.cinder: ('volume_list',
'volume_backup_list_paged')})
def _test_backups_index_paginated(self, marker, sort_dir, backups, url,
has_more, has_prev):
self.mock_volume_backup_list_paged.return_value = [backups,
has_more, has_prev]
self.mock_volume_list.return_value = self.cinder_volumes.list()
res = self.client.get(urlunquote(url))
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'horizon/common/_data_table_view.html')
self.mock_volume_backup_list_paged.assert_called_once_with(
test.IsHttpRequest(), marker=marker, sort_dir=sort_dir,
paginate=True)
self.mock_volume_list.assert_called_once_with(test.IsHttpRequest())
return res
@override_settings(API_RESULT_PAGE_SIZE=1)
def test_backups_index_paginated(self):
backups = self.cinder_volume_backups.list()
size = settings.API_RESULT_PAGE_SIZE
base_url = INDEX_URL
next = backup_tables.BackupsTable._meta.pagination_param
# get first page
expected_backups = backups[:size]
res = self._test_backups_index_paginated(
marker=None, sort_dir="desc", backups=expected_backups,
url=base_url, has_more=True, has_prev=False)
result = res.context['volume_backups_table'].data
self.assertItemsEqual(result, expected_backups)
# get second page
expected_backups = backups[size:2 * size]
marker = expected_backups[0].id
url = base_url + "?%s=%s" % (next, marker)
res = self._test_backups_index_paginated(
marker=marker, sort_dir="desc", backups=expected_backups, url=url,
has_more=True, has_prev=True)
result = res.context['volume_backups_table'].data
self.assertItemsEqual(result, expected_backups)
# get last page
expected_backups = backups[-size:]
marker = expected_backups[0].id
url = base_url + "?%s=%s" % (next, marker)
res = self._test_backups_index_paginated(
marker=marker, sort_dir="desc", backups=expected_backups, url=url,
has_more=False, has_prev=True)
result = res.context['volume_backups_table'].data
self.assertItemsEqual(result, expected_backups)
@override_settings(API_RESULT_PAGE_SIZE=1)
def test_backups_index_paginated_prev_page(self):
backups = self.cinder_volume_backups.list()
size = settings.API_RESULT_PAGE_SIZE
base_url = INDEX_URL
prev = backup_tables.BackupsTable._meta.prev_pagination_param
# prev from some page
expected_backups = backups[size:2 * size]
marker = expected_backups[0].id
url = base_url + "?%s=%s" % (prev, marker)
res = self._test_backups_index_paginated(
marker=marker, sort_dir="asc", backups=expected_backups, url=url,
has_more=True, has_prev=True)
result = res.context['volume_backups_table'].data
self.assertItemsEqual(result, expected_backups)
# back to first page
expected_backups = backups[:size]
marker = expected_backups[0].id
url = base_url + "?%s=%s" % (prev, marker)
res = self._test_backups_index_paginated(
marker=marker, sort_dir="asc", backups=expected_backups, url=url,
has_more=True, has_prev=False)
result = res.context['volume_backups_table'].data
self.assertItemsEqual(result, expected_backups)
@test.create_mocks({api.cinder: ('volume_backup_create',
'volume_get')})
def test_create_backup_available(self):
volume = self.volumes.first()
backup = self.cinder_volume_backups.first()
self.mock_volume_get.return_value = volume
self.mock_volume_backup_create.return_value = backup
formData = {'method': 'CreateBackupForm',
'tenant_id': self.tenant.id,
'volume_id': volume.id,
'container_name': backup.container_name,
'name': backup.name,
'description': backup.description}
url = reverse('horizon:project:volumes:create_backup',
args=[volume.id])
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(error=0, warning=0)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_volume_get.assert_called_once_with(test.IsHttpRequest(),
volume.id)
self.mock_volume_backup_create.assert_called_once_with(
test.IsHttpRequest(),
volume.id,
backup.container_name,
backup.name,
backup.description,
force=False)
@test.create_mocks({api.cinder: ('volume_backup_create',
'volume_get')})
def test_create_backup_in_use(self):
# The second volume in the cinder test volume data is in-use
volume = self.volumes.list()[1]
backup = self.cinder_volume_backups.first()
self.mock_volume_get.return_value = volume
self.mock_volume_backup_create.return_value = backup
formData = {'method': 'CreateBackupForm',
'tenant_id': self.tenant.id,
'volume_id': volume.id,
'container_name': backup.container_name,
'name': backup.name,
'description': backup.description}
url = reverse('horizon:project:volumes:create_backup',
args=[volume.id])
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(error=0, warning=0)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_volume_get.assert_called_once_with(test.IsHttpRequest(),
volume.id)
self.mock_volume_backup_create.assert_called_once_with(
test.IsHttpRequest(),
volume.id,
backup.container_name,
backup.name,
backup.description,
force=True)
@test.create_mocks({api.cinder: ('volume_list',
'volume_backup_list_paged',
'volume_backup_delete')})
def test_delete_volume_backup(self):
vol_backups = self.cinder_volume_backups.list()
volumes = self.cinder_volumes.list()
backup = self.cinder_volume_backups.first()
self.mock_volume_backup_list_paged.return_value = [vol_backups,
False, False]
self.mock_volume_list.return_value = volumes
self.mock_volume_backup_delete.return_value = None
formData = {'action':
'volume_backups__delete__%s' % backup.id}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.assertMessageCount(success=1)
self.mock_volume_backup_list_paged.assert_called_once_with(
test.IsHttpRequest(), marker=None, sort_dir='desc',
paginate=True)
self.mock_volume_list.assert_called_once_with(test.IsHttpRequest())
self.mock_volume_backup_delete.assert_called_once_with(
test.IsHttpRequest(), backup.id)
@test.create_mocks({api.cinder: ('volume_backup_get',
'volume_get')})
def test_volume_backup_detail_get(self):
backup = self.cinder_volume_backups.first()
volume = self.cinder_volumes.get(id=backup.volume_id)
self.mock_volume_backup_get.return_value = backup
self.mock_volume_get.return_value = volume
url = reverse('horizon:project:backups:detail',
args=[backup.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertEqual(res.context['backup'].id, backup.id)
self.mock_volume_backup_get.assert_called_once_with(
test.IsHttpRequest(), backup.id)
self.mock_volume_get.assert_called_once_with(
test.IsHttpRequest(), backup.volume_id)
@test.create_mocks({api.cinder: ('volume_backup_get',)})
def test_volume_backup_detail_get_with_exception(self):
# Test to verify redirect if get volume backup fails
backup = self.cinder_volume_backups.first()
self.mock_volume_backup_get.side_effect = self.exceptions.cinder
url = reverse('horizon:project:backups:detail',
args=[backup.id])
res = self.client.get(url)
self.assertNoFormErrors(res)
self.assertMessageCount(error=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
self.mock_volume_backup_get.assert_called_once_with(
test.IsHttpRequest(), backup.id)
@test.create_mocks({api.cinder: ('volume_backup_get',
'volume_get')})
def test_volume_backup_detail_with_missing_volume(self):
# Test to check page still loads even if volume is deleted
backup = self.cinder_volume_backups.first()
self.mock_volume_backup_get.return_value = backup
self.mock_volume_get.side_effect = self.exceptions.cinder
url = reverse('horizon:project:backups:detail',
args=[backup.id])
res = self.client.get(url)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertEqual(res.context['backup'].id, backup.id)
self.mock_volume_backup_get.assert_called_once_with(
test.IsHttpRequest(), backup.id)
self.mock_volume_get.assert_called_once_with(
test.IsHttpRequest(), backup.volume_id)
@test.create_mocks({api.cinder: ('volume_list',
'volume_backup_restore')})
def test_restore_backup(self):
mock_backup = self.cinder_volume_backups.first()
volumes = self.cinder_volumes.list()
expected_volumes = [vol for vol in volumes
if vol.status == 'available']
self.mock_volume_list.return_value = expected_volumes
self.mock_volume_backup_restore.return_value = mock_backup
formData = {'method': 'RestoreBackupForm',
'backup_id': mock_backup.id,
'backup_name': mock_backup.name,
'volume_id': mock_backup.volume_id}
url = reverse('horizon:project:backups:restore',
args=[mock_backup.id])
url += '?%s' % urlencode({'backup_name': mock_backup.name,
'volume_id': mock_backup.volume_id})
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(info=1)
self.assertRedirectsNoFollow(res,
reverse('horizon:project:volumes:index'))
self.mock_volume_list.assert_called_once_with(test.IsHttpRequest(),
{'status': 'available'})
self.mock_volume_backup_restore.assert_called_once_with(
test.IsHttpRequest(), mock_backup.id, mock_backup.volume_id)
|
|
from django.conf import settings
from django.db import connection, router, transaction, connections
from django.db.backends import util
from django.db.models import signals, get_model
from django.db.models.fields import (AutoField, Field, IntegerField,
PositiveIntegerField, PositiveSmallIntegerField, FieldDoesNotExist)
from django.db.models.related import RelatedObject
from django.db.models.query import QuerySet
from django.db.models.query_utils import QueryWrapper
from django.db.models.deletion import CASCADE
from django.utils.encoding import smart_unicode
from django.utils.translation import (ugettext_lazy as _, string_concat,
ungettext, ugettext)
from django.utils.functional import curry
from django.core import exceptions
from django import forms
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
pending_lookups = {}
def add_lazy_relation(cls, field, relation, operation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
operation is the work that must be performed once the relation can be resolved.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
except AttributeError:
# If it doesn't have a split it's actually a model class
app_label = relation._meta.app_label
model_name = relation._meta.object_name
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_model returns None, it means that the related
# model isn't loaded yet, so we need to pend the relation until the class
# is prepared.
model = get_model(app_label, model_name, False)
if model:
operation(field, model, cls)
else:
key = (app_label, model_name)
value = (cls, field, operation)
pending_lookups.setdefault(key, []).append(value)
def do_pending_lookups(sender, **kwargs):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
#HACK
class RelatedField(object):
def contribute_to_class(self, cls, name):
sup = super(RelatedField, self)
# Store the opts for related_query_name()
self.opts = cls._meta
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name)
if not cls._meta.abstract and self.rel.related_name:
self.rel.related_name = self.rel.related_name % {
'class': cls.__name__.lower(),
'app_label': cls._meta.app_label.lower(),
}
other = self.rel.to
if isinstance(other, basestring) or other._meta.pk is None:
def resolve_related_class(field, model, cls):
field.rel.to = model
field.do_related_class(model, cls)
add_lazy_relation(cls, self, other, resolve_related_class)
else:
self.do_related_class(other, cls)
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.object_name.lower() + '_' + self.rel.to._meta.pk.name)
if self.verbose_name is None:
self.verbose_name = self.rel.to._meta.verbose_name
self.rel.field_name = self.rel.field_name or self.rel.to._meta.pk.name
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
self.related = RelatedObject(other, cls, self)
if not cls._meta.abstract:
self.contribute_to_related_class(other, self.related)
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
# FIXME: lt and gt are explicitly allowed to make
# get_(next/prev)_by_date work; other lookups are not allowed since that
# gets messy pretty quick. This is a good candidate for some refactoring
# in the future.
if lookup_type in ['exact', 'gt', 'lt', 'gte', 'lte']:
return self._pk_trace(value, 'get_prep_lookup', lookup_type)
if lookup_type in ('range', 'in'):
return [self._pk_trace(v, 'get_prep_lookup', lookup_type) for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError("Related Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
# FIXME: lt and gt are explicitly allowed to make
# get_(next/prev)_by_date work; other lookups are not allowed since that
# gets messy pretty quick. This is a good candidate for some refactoring
# in the future.
if lookup_type in ['exact', 'gt', 'lt', 'gte', 'lte']:
return [self._pk_trace(value, 'get_db_prep_lookup', lookup_type,
connection=connection, prepared=prepared)]
if lookup_type in ('range', 'in'):
return [self._pk_trace(v, 'get_db_prep_lookup', lookup_type,
connection=connection, prepared=prepared)
for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError("Related Field has invalid lookup: %s" % lookup_type)
def _pk_trace(self, value, prep_func, lookup_type, **kwargs):
# Value may be a primary key, or an object held in a relation.
# If it is an object, then we need to get the primary key value for
# that object. In certain conditions (especially one-to-one relations),
# the primary key may itself be an object - so we need to keep drilling
# down until we hit a value that can be used for a comparison.
v = value
try:
while True:
v = getattr(v, v._meta.pk.name)
except AttributeError:
pass
except exceptions.ObjectDoesNotExist:
v = None
field = self
while field.rel:
if hasattr(field.rel, 'field_name'):
field = field.rel.to._meta.get_field(field.rel.field_name)
else:
field = field.rel.to._meta.pk
if lookup_type in ('range', 'in'):
v = [v]
v = getattr(field, prep_func)(lookup_type, v, **kwargs)
if isinstance(v, list):
v = v[0]
return v
def related_query_name(self):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_name or self.opts.object_name.lower()
class SingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_name)
except AttributeError:
params = {'%s__pk' % self.related.field.name: instance._get_pk_val()}
db = router.db_for_read(self.related.model, instance=instance)
rel_obj = self.related.model._base_manager.using(db).get(**params)
setattr(instance, self.cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError("%s must be accessed via instance" % self.related.opts.object_name)
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.related.get_accessor_name()))
elif value is not None and not isinstance(value, self.related.model):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.related.get_accessor_name(), self.related.opts.object_name))
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": instance is on database "%s", value is on database "%s"' %
(value, instance._state.db, value._state.db))
# Set the value of the related field to the value of the related object's related field
setattr(value, self.related.field.attname, getattr(instance, self.related.field.rel.get_related_field().attname))
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
def __get__(self, instance, instance_type=None):
if instance is None:
return self
cache_name = self.field.get_cache_name()
try:
return getattr(instance, cache_name)
except AttributeError:
val = getattr(instance, self.field.attname)
if val is None:
# If NULL is an allowed value, return it.
if self.field.null:
return None
raise self.field.rel.to.DoesNotExist
other_field = self.field.rel.get_related_field()
if other_field.rel:
params = {'%s__pk' % self.field.rel.field_name: val}
else:
params = {'%s__exact' % self.field.rel.field_name: val}
# If the related manager indicates that it should be used for
# related fields, respect that.
rel_mgr = self.field.rel.to._default_manager
db = router.db_for_read(self.field.rel.to, instance=instance)
if getattr(rel_mgr, 'use_for_related_fields', False):
rel_obj = rel_mgr.using(db).get(**params)
else:
rel_obj = QuerySet(self.field.rel.to).using(db).get(**params)
setattr(instance, cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError("%s must be accessed via instance" % self._field.name)
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name))
elif value is not None and not isinstance(value, self.field.rel.to):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.field.name, self.field.rel.to._meta.object_name))
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": instance is on database "%s", value is on database "%s"' %
(value, instance._state.db, value._state.db))
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.field.get_cache_name(), None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related:
cache_name = self.field.related.get_cache_name()
try:
delattr(related, cache_name)
except AttributeError:
pass
# Set the value of the related field
try:
val = getattr(value, self.field.rel.get_related_field().attname)
except AttributeError:
val = None
setattr(instance, self.field.attname, val)
# Since we already know what the related object is, seed the related
# object cache now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.field.get_cache_name(), value)
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self.create_manager(instance,
self.related.model._default_manager.__class__)
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
manager = self.__get__(instance)
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
def delete_manager(self, instance):
"""
Returns a queryset based on the related model's base manager (rather
than the default manager, as returned by __get__). Used by
Model.delete().
"""
return self.create_manager(instance,
self.related.model._base_manager.__class__)
def create_manager(self, instance, superclass):
"""
Creates the managers used by other methods (__get__() and delete()).
"""
rel_field = self.related.field
rel_model = self.related.model
class RelatedManager(superclass):
def get_query_set(self):
db = self._db or router.db_for_read(rel_model, instance=instance)
return superclass.get_query_set(self).using(db).filter(**(self.core_filters))
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, rel_field.name, instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs.update({rel_field.name: instance})
db = router.db_for_write(rel_model, instance=instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
# Update kwargs with the related object that this
# ForeignRelatedObjectsDescriptor knows about.
kwargs.update({rel_field.name: instance})
db = router.db_for_write(rel_model, instance=instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs):
val = getattr(instance, rel_field.rel.get_related_field().attname)
for obj in objs:
# Is obj actually part of this descriptor set?
if getattr(obj, rel_field.attname) == val:
setattr(obj, rel_field.name, None)
obj.save()
else:
raise rel_field.rel.to.DoesNotExist("%r is not related to %r." % (obj, instance))
remove.alters_data = True
def clear(self):
for obj in self.all():
setattr(obj, rel_field.name, None)
obj.save()
clear.alters_data = True
manager = RelatedManager()
attname = rel_field.rel.get_related_field().name
manager.core_filters = {'%s__%s' % (rel_field.name, attname):
getattr(instance, attname)}
manager.model = self.related.model
return manager
def create_many_related_manager(superclass, rel=False):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
through = rel.through
class ManyRelatedManager(superclass):
def __init__(self, model=None, core_filters=None, instance=None, symmetrical=None,
join_table=None, source_field_name=None, target_field_name=None,
reverse=False):
super(ManyRelatedManager, self).__init__()
self.core_filters = core_filters
self.model = model
self.symmetrical = symmetrical
self.instance = instance
self.source_field_name = source_field_name
self.target_field_name = target_field_name
self.through = through
self._pk_val = self.instance.pk
self.reverse = reverse
if self._pk_val is None:
raise ValueError("%r instance needs to have a primary key value before a many-to-many relationship can be used." % instance.__class__.__name__)
def get_query_set(self):
db = self._db or router.db_for_read(self.instance.__class__, instance=self.instance)
return superclass.get_query_set(self).using(db)._next_is_sticky().filter(**(self.core_filters))
# If the ManyToMany relation has an intermediary model,
# the add and remove methods do not exist.
if rel.through._meta.auto_created:
def add(self, *objs):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
self._remove_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, remove the mirror entry in the m2m table
if self.symmetrical:
self._remove_items(self.target_field_name, self.source_field_name, *objs)
remove.alters_data = True
def clear(self):
self._clear_items(self.source_field_name)
# If this is a symmetrical m2m relation to self, clear the mirror entry in the m2m table
if self.symmetrical:
self._clear_items(self.target_field_name)
clear.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not rel.through._meta.auto_created:
opts = through._meta
raise AttributeError("Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = \
super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# join_table: name of the m2m link table
# source_field_name: the PK fieldname in join_table for the source object
# target_field_name: the PK fieldname in join_table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError('Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db))
new_ids.add(obj.pk)
elif isinstance(obj, Model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True)
vals = vals.filter(**{
source_field_name: self._pk_val,
'%s__in' % target_field_name: new_ids,
})
new_ids = new_ids - set(vals)
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
for obj_id in new_ids:
self.through._default_manager.using(db).create(**{
'%s_id' % source_field_name: self._pk_val,
'%s_id' % target_field_name: obj_id,
})
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_col_name: the PK colname in join_table for the source object
# target_col_name: the PK colname in join_table for the target object
# *objs - objects to remove
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
old_ids.add(obj.pk)
else:
old_ids.add(obj)
# Work out what DB we're operating on
db = router.db_for_write(self.through, instance=self.instance)
# Send a signal to the other end if need be.
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are deleting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
# Remove the specified objects from the join table
self.through._default_manager.using(db).filter(**{
source_field_name: self._pk_val,
'%s__in' % target_field_name: old_ids
}).delete()
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are deleting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
def _clear_items(self, source_field_name):
db = router.db_for_write(self.through, instance=self.instance)
# source_col_name: the PK colname in join_table for the source object
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are clearing the
# duplicate data rows for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
self.through._default_manager.using(db).filter(**{
source_field_name: self._pk_val
}).delete()
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are clearing the
# duplicate data rows for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model = self.related.model
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass, self.related.field.rel)
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.related.field.name: instance._get_pk_val()},
instance=instance,
symmetrical=False,
source_field_name=self.related.field.m2m_reverse_field_name(),
target_field_name=self.related.field.m2m_field_name(),
reverse=True
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
if not self.related.field.rel.through._meta.auto_created:
opts = self.related.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
def _through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.field.rel.through
through = property(_through)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model=self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass, self.field.rel)
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.field.related_query_name(): instance._get_pk_val()},
instance=instance,
symmetrical=self.field.rel.symmetrical,
source_field_name=self.field.m2m_field_name(),
target_field_name=self.field.m2m_reverse_field_name(),
reverse=False
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
if not self.field.rel.through._meta.auto_created:
opts = self.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ManyToOneRel(object):
def __init__(self, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.to, self.field_name = to, field_name
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.multiple = True
self.parent_link = parent_link
self.on_delete = on_delete
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
data = self.to._meta.get_field_by_name(self.field_name)
if not data[2]:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return data[0]
class OneToOneRel(ManyToOneRel):
def __init__(self, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None):
super(OneToOneRel, self).__init__(to, field_name,
related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete
)
self.multiple = False
class ManyToManyRel(object):
def __init__(self, to, related_name=None, limit_choices_to=None,
symmetrical=True, through=None):
self.to = to
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.symmetrical = symmetrical
self.multiple = True
self.through = through
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the field in the to' object to which this relationship is tied
(this is always the primary key on the target model). Provided for
symmetry with ManyToOneRel.
"""
return self.to._meta.pk
class ForeignKey(RelatedField, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('Model %(model)s with pk %(pk)r does not exist.')
}
description = _("Foreign Key (type determined by related field)")
def __init__(self, to, to_field=None, rel_class=ManyToOneRel, **kwargs):
try:
to_name = to._meta.object_name.lower()
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
if 'db_index' not in kwargs:
kwargs['db_index'] = True
kwargs['rel'] = rel_class(to, to_field,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
Field.__init__(self, **kwargs)
def validate(self, value, model_instance):
if self.rel.parent_link:
return
super(ForeignKey, self).validate(value, model_instance)
if value is None:
return
using = router.db_for_read(model_instance.__class__, instance=model_instance)
qs = self.rel.to._default_manager.using(using).filter(
**{self.rel.field_name: value}
)
qs = qs.complex_filter(self.rel.limit_choices_to)
if not qs.exists():
raise exceptions.ValidationError(self.error_messages['invalid'] % {
'model': self.rel.to._meta.verbose_name, 'pk': value})
def get_attname(self):
return '%s_id' % self.name
def get_validator_unique_lookup_type(self):
return '%s__%s__exact' % (self.name, self.rel.get_related_field().name)
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.rel.to):
return getattr(field_default, self.rel.get_related_field().attname)
return field_default
def get_db_prep_save(self, value, connection):
if value == '' or value == None:
return None
else:
return self.rel.get_related_field().get_db_prep_save(value,
connection=connections[router.db_for_read(self.rel.to)])
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_unicode(choice_list[1][0])
return Field.value_to_string(self, obj)
def contribute_to_class(self, cls, name):
super(ForeignKey, self).contribute_to_class(cls, name)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
if isinstance(self.rel.to, basestring):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "o2m")
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# don't get a related descriptor.
if not self.rel.is_hidden():
setattr(cls, related.get_accessor_name(), ForeignRelatedObjectsDescriptor(related))
if self.rel.field_name is None:
self.rel.field_name = cls._meta.pk.name
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to),
'to_field_name': self.rel.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.rel.get_related_field()
return rel_field.related_db_type(connection=connections[router.db_for_read(rel_field.model)])
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
description = _("One-to-one relationship")
def __init__(self, to, to_field=None, **kwargs):
kwargs['unique'] = True
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(),
SingleRelatedObjectDescriptor(related))
def formfield(self, **kwargs):
if self.rel.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.rel.to):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
managed = True
if isinstance(field.rel.to, basestring) and field.rel.to != RECURSIVE_RELATIONSHIP_CONSTANT:
to_model = field.rel.to
to = to_model.split('.')[-1]
def set_managed(field, model, cls):
field.rel.through._meta.managed = model._meta.managed or cls._meta.managed
add_lazy_relation(klass, field, to_model, set_managed)
elif isinstance(field.rel.to, basestring):
to = klass._meta.object_name
to_model = klass
managed = klass._meta.managed
else:
to = field.rel.to._meta.object_name
to_model = field.rel.to
managed = klass._meta.managed or to_model._meta.managed
name = '%s_%s' % (klass._meta.object_name, field.name)
if field.rel.to == RECURSIVE_RELATIONSHIP_CONSTANT or to == klass._meta.object_name:
from_ = 'from_%s' % to.lower()
to = 'to_%s' % to.lower()
else:
from_ = klass._meta.object_name.lower()
to = to.lower()
meta = type('Meta', (object,), {
'db_table': field._get_m2m_db_table(klass._meta),
'managed': managed,
'auto_created': klass,
'app_label': klass._meta.app_label,
'unique_together': (from_, to),
'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to},
})
# Construct and return the new class.
return type(name, (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
from_: models.ForeignKey(klass, related_name='%s+' % name),
to: models.ForeignKey(to_model, related_name='%s+' % name)
})
class ManyToManyField(RelatedField, Field):
description = _("Many-to-many relationship")
def __init__(self, to, **kwargs):
try:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ManyToManyField must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(to,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', to==RECURSIVE_RELATIONSHIP_CONSTANT),
through=kwargs.pop('through', None))
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
Field.__init__(self, **kwargs)
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
self.help_text = string_concat(self.help_text, ' ', msg)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.rel.through is not None:
return self.rel.through._meta.db_table
elif self.db_table:
return self.db_table
else:
return util.truncate_name('%s_%s' % (opts.db_table, self.name),
connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"Function that can be curried to provide the source accessor or DB column name for the m2m table"
cache_attr = '_m2m_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
for f in self.rel.through._meta.fields:
if hasattr(f,'rel') and f.rel and f.rel.to == related.model:
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"Function that can be curried to provide the related accessor or DB column name for the m2m table"
cache_attr = '_m2m_reverse_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
for f in self.rel.through._meta.fields:
if hasattr(f,'rel') and f.rel and f.rel.to == related.parent_model:
if related.model == related.parent_model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
else:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_unicode(data)
def contribute_to_class(self, cls, name):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.rel.symmetrical and (self.rel.to == "self" or self.rel.to == cls._meta.object_name):
self.rel.related_name = "%s_rel_+" % name
super(ManyToManyField, self).contribute_to_class(cls, name)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
if not self.rel.through and not cls._meta.abstract:
self.rel.through = create_many_to_many_intermediary_model(self, cls)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
# Populate some necessary rel arguments so that cross-app relations
# work correctly.
if isinstance(self.rel.through, basestring):
def resolve_through_model(field, model, cls):
field.rel.through = model
add_lazy_relation(cls, self, self.rel.through, resolve_through_model)
if isinstance(self.rel.to, basestring):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "m2m")
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# don't get a related descriptor.
if not self.rel.is_hidden():
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_attr, related, 'column')
self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column')
self.m2m_field_name = curry(self._get_m2m_attr, related, 'name')
self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name')
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelMultipleChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to)
}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
|
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
import scipy.linalg
from functools import reduce
from pyscf import gto
from pyscf import lib
from pyscf import scf
from pyscf import fci
from pyscf import ci
from pyscf.ci import gcisd
from pyscf.ci import ucisd
from pyscf import ao2mo
class KnownValues(unittest.TestCase):
def test_contract(self):
'''cross check with UCISD'''
mol = gto.M()
mol.nelectron = 6
nocc, nvir = mol.nelectron//2, 4
nmo = nocc + nvir
nmo_pair = nmo*(nmo+1)//2
mf = scf.UHF(mol)
numpy.random.seed(12)
mf._eri = numpy.random.random(nmo_pair*(nmo_pair+1)//2) * .2
mf.mo_coeff = numpy.random.random((2,nmo,nmo))
mf.mo_energy = [numpy.arange(0., nmo)]*2
mf.mo_occ = numpy.zeros((2,nmo))
mf.mo_occ[:,:nocc] = 1
h1 = numpy.random.random((nmo,nmo)) * .1
h1 = h1 + h1.T + numpy.diag(numpy.arange(nmo))
mf.get_hcore = lambda *args: h1
mf1 = scf.addons.convert_to_ghf(mf)
mf1.get_hcore = lambda *args: scipy.linalg.block_diag(h1, h1)
gci = ci.GCISD(mf1)
c2 = numpy.random.random((nocc*2,nocc*2,nvir*2,nvir*2)) * .1 - .1
c2 = c2 - c2.transpose(0,1,3,2)
c2 = c2 - c2.transpose(1,0,2,3)
c1 = numpy.random.random((nocc*2,nvir*2)) * .1
c0 = .5
civec = gci.amplitudes_to_cisdvec(c0, c1, c2)
civecref = gci.contract(civec, gci.ao2mo())
c0ref, c1ref, c2ref = gci.cisdvec_to_amplitudes(civecref)
c1ref = gci.spin2spatial(c1ref)
c2ref = gci.spin2spatial(c2ref)
c1 = gci.spin2spatial(c1)
c2 = gci.spin2spatial(c2)
myci = ci.UCISD(mf)
civec = myci.amplitudes_to_cisdvec(c0, c1, c2)
cinew = myci.contract(civec, myci.ao2mo())
c0new, c1new, c2new = myci.cisdvec_to_amplitudes(cinew)
self.assertAlmostEqual(abs(c0new -c0ref ).max(), 0, 12)
self.assertAlmostEqual(abs(c1new[0]-c1ref[0]).max(), 0, 12)
self.assertAlmostEqual(abs(c1new[1]-c1ref[1]).max(), 0, 12)
self.assertAlmostEqual(abs(c2new[0]-c2ref[0]).max(), 0, 12)
self.assertAlmostEqual(abs(c2new[1]-c2ref[1]).max(), 0, 12)
self.assertAlmostEqual(abs(c2new[2]-c2ref[2]).max(), 0, 12)
self.assertAlmostEqual(lib.finger(cinew), -102.17887236599671, 9)
def test_from_fcivec(self):
numpy.random.seed(12)
nocc = 3
nvir = 5
nmo = nocc + nvir
orbspin = numpy.zeros(nmo*2, dtype=int)
orbspin[1::2] = 1
c1a = numpy.random.random((nocc,nvir))
c1b = numpy.random.random((nocc,nvir))
c2aa = numpy.random.random((nocc,nocc,nvir,nvir))
c2bb = numpy.random.random((nocc,nocc,nvir,nvir))
c2ab = numpy.random.random((nocc,nocc,nvir,nvir))
c2ab = c2ab + c2ab.transpose(1,0,3,2)
c1 = gcisd.spatial2spin((c1a, c1b), orbspin)
c2 = gcisd.spatial2spin((c2aa, c2ab, c2bb), orbspin)
cisdvec = gcisd.amplitudes_to_cisdvec(1., c1, c2)
fcivec = gcisd.to_fcivec(cisdvec, nocc*2, orbspin)
cisdvec1 = gcisd.from_fcivec(fcivec, nocc*2, orbspin)
self.assertAlmostEqual(abs(cisdvec-cisdvec1).max(), 0, 12)
ci1 = gcisd.to_fcivec(cisdvec1, nocc*2, orbspin)
self.assertAlmostEqual(abs(fcivec-ci1).max(), 0, 12)
vec1 = gcisd.from_ucisdvec(ucisd.amplitudes_to_cisdvec(1, (c1a,c1b), (c2aa,c2ab,c2bb)),
nocc*2, orbspin)
self.assertAlmostEqual(abs(cisdvec - vec1).max(), 0, 12)
c1 = gcisd.spatial2spin((c1a, c1a), orbspin)
c2aa = c2ab - c2ab.transpose(1,0,2,3)
c2 = gcisd.spatial2spin((c2aa, c2ab, c2aa), orbspin)
cisdvec = gcisd.amplitudes_to_cisdvec(1., c1, c2)
vec1 = gcisd.from_rcisdvec(ci.cisd.amplitudes_to_cisdvec(1, c1a, c2ab), nocc*2, orbspin)
self.assertTrue(numpy.all(cisdvec == vec1))
def test_h4(self):
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0.,-1. , 1. )],
]
mol.charge = 2
mol.spin = 2
mol.basis = '3-21g'
mol.build()
mf = scf.GHF(mol).run(conv_tol=1e-14)
myci = ci.GCISD(mf)
myci.kernel()
self.assertAlmostEqual(myci.e_tot, -0.86423570617209888, 8)
mf = scf.RHF(mol).run(conv_tol=1e-14)
myci = ci.GCISD(mf)
myci.kernel()
self.assertAlmostEqual(myci.e_tot, -0.86423570617209888, 8)
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0.,-1. , 1. )],
]
mol.charge = 2
mol.spin = 0
mol.basis = '3-21g'
mol.build()
mf = scf.UHF(mol).run(conv_tol=1e-14)
myci = ci.GCISD(mf)
myci.kernel()
self.assertAlmostEqual(myci.e_tot, -0.86423570617209888, 8)
mf = scf.UHF(mol).run(conv_tol=1e-14)
gmf = scf.addons.convert_to_ghf(mf)
ehf0 = mf.e_tot - mol.energy_nuc()
myci = gcisd.GCISD(gmf)
eris = myci.ao2mo()
ecisd = myci.kernel(eris=eris)[0]
eri_aa = ao2mo.kernel(mf._eri, mf.mo_coeff[0])
eri_bb = ao2mo.kernel(mf._eri, mf.mo_coeff[1])
eri_ab = ao2mo.kernel(mf._eri, [mf.mo_coeff[0], mf.mo_coeff[0],
mf.mo_coeff[1], mf.mo_coeff[1]])
h1a = reduce(numpy.dot, (mf.mo_coeff[0].T, mf.get_hcore(), mf.mo_coeff[0]))
h1b = reduce(numpy.dot, (mf.mo_coeff[1].T, mf.get_hcore(), mf.mo_coeff[1]))
efci, fcivec = fci.direct_uhf.kernel((h1a,h1b), (eri_aa,eri_ab,eri_bb),
h1a.shape[0], mol.nelec)
self.assertAlmostEqual(myci.e_tot-mol.energy_nuc(), efci, 9)
dm1ref, dm2ref = fci.direct_uhf.make_rdm12s(fcivec, h1a.shape[0], mol.nelec)
nmo = myci.nmo
rdm1 = myci.make_rdm1(myci.ci, nmo, mol.nelectron)
rdm2 = myci.make_rdm2(myci.ci, nmo, mol.nelectron)
idxa = eris.orbspin == 0
idxb = eris.orbspin == 1
self.assertAlmostEqual(abs(dm1ref[0] - rdm1[idxa][:,idxa]).max(), 0, 6)
self.assertAlmostEqual(abs(dm1ref[1] - rdm1[idxb][:,idxb]).max(), 0, 6)
self.assertAlmostEqual(abs(dm2ref[0] - rdm2[idxa][:,idxa][:,:,idxa][:,:,:,idxa]).max(), 0, 6)
self.assertAlmostEqual(abs(dm2ref[1] - rdm2[idxa][:,idxa][:,:,idxb][:,:,:,idxb]).max(), 0, 6)
self.assertAlmostEqual(abs(dm2ref[2] - rdm2[idxb][:,idxb][:,:,idxb][:,:,:,idxb]).max(), 0, 6)
def test_h4_a(self):
'''Compare to FCI'''
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0.,-1. , 1. )],
]
mol.charge = -2
mol.spin = 2
mol.basis = '3-21g'
mol.build()
mf = scf.UHF(mol).run(conv_tol=1e-14)
ehf0 = mf.e_tot - mol.energy_nuc()
gmf = scf.addons.convert_to_ghf(mf)
myci = ci.GCISD(gmf)
eris = myci.ao2mo()
numpy.random.seed(12)
nocca, noccb = mol.nelec
nmo = mol.nao_nr()
nvira = nmo - nocca
nvirb = nmo - noccb
#cisdvec = myci.get_init_guess(eris)[1]
c1a = .1 * numpy.random.random((nocca,nvira))
c1b = .1 * numpy.random.random((noccb,nvirb))
c2aa = .1 * numpy.random.random((nocca,nocca,nvira,nvira))
c2bb = .1 * numpy.random.random((noccb,noccb,nvirb,nvirb))
c2ab = .1 * numpy.random.random((nocca,noccb,nvira,nvirb))
c1 = myci.spatial2spin((c1a, c1b))
c2 = myci.spatial2spin((c2aa, c2ab, c2bb))
cisdvec = myci.amplitudes_to_cisdvec(1., c1, c2)
self.assertEqual(cisdvec.size, myci.vector_size())
hcisd0 = myci.contract(cisdvec, eris)
eri_aa = ao2mo.kernel(mf._eri, mf.mo_coeff[0])
eri_bb = ao2mo.kernel(mf._eri, mf.mo_coeff[1])
eri_ab = ao2mo.kernel(mf._eri, [mf.mo_coeff[0], mf.mo_coeff[0],
mf.mo_coeff[1], mf.mo_coeff[1]])
h1a = reduce(numpy.dot, (mf.mo_coeff[0].T, mf.get_hcore(), mf.mo_coeff[0]))
h1b = reduce(numpy.dot, (mf.mo_coeff[1].T, mf.get_hcore(), mf.mo_coeff[1]))
h2e = fci.direct_uhf.absorb_h1e((h1a,h1b), (eri_aa,eri_ab,eri_bb),
h1a.shape[0], mol.nelec, .5)
fcivec = myci.to_fcivec(cisdvec, mol.nelectron, eris.orbspin)
hci1 = fci.direct_uhf.contract_2e(h2e, fcivec, h1a.shape[0], mol.nelec)
hci1 -= ehf0 * fcivec
hcisd1 = myci.from_fcivec(hci1, mol.nelectron, eris.orbspin)
self.assertAlmostEqual(abs(hcisd1-hcisd0).max(), 0, 9)
hdiag0 = myci.make_diagonal(eris)
hdiag0 = myci.to_fcivec(hdiag0, mol.nelectron, eris.orbspin).ravel()
hdiag0 = myci.from_fcivec(hdiag0, mol.nelectron, eris.orbspin).ravel()
hdiag1 = fci.direct_uhf.make_hdiag((h1a,h1b), (eri_aa,eri_ab,eri_bb),
h1a.shape[0], mol.nelec)
hdiag1 = myci.from_fcivec(hdiag1, mol.nelectron, eris.orbspin).ravel()
self.assertAlmostEqual(abs(abs(hdiag0)-abs(hdiag1)).max(), 0, 9)
ecisd = myci.kernel()[0]
efci = fci.direct_uhf.kernel((h1a,h1b), (eri_aa,eri_ab,eri_bb),
h1a.shape[0], mol.nelec)[0]
self.assertAlmostEqual(ecisd, -0.037067274690894436, 9)
self.assertTrue(myci.e_tot-mol.energy_nuc() - efci < 0.002)
def test_rdm_h4(self):
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.spin = 2
mol.basis = 'sto-3g'
mol.build()
mf = scf.RHF(mol).run(conv_tol=1e-14)
myci = ci.GCISD(mf)
eris = myci.ao2mo()
ecisd, civec = myci.kernel(eris=eris)
self.assertAlmostEqual(ecisd, -0.035165114624046617, 8)
nmo = eris.mo_coeff.shape[1]
rdm1 = myci.make_rdm1(civec, nmo, mol.nelectron)
rdm2 = myci.make_rdm2(civec, nmo, mol.nelectron)
mo = eris.mo_coeff[:7] + eris.mo_coeff[7:]
eri = ao2mo.kernel(mf._eri, mo, compact=False).reshape([nmo]*4)
eri[eris.orbspin[:,None]!=eris.orbspin,:,:] = 0
eri[:,:,eris.orbspin[:,None]!=eris.orbspin] = 0
h1e = reduce(numpy.dot, (mo.T, mf.get_hcore(), mo))
h1e[eris.orbspin[:,None]!=eris.orbspin] = 0
e2 = (numpy.einsum('ij,ji', h1e, rdm1) +
numpy.einsum('ijkl,ijkl', eri, rdm2) * .5)
e2 += mol.energy_nuc()
self.assertAlmostEqual(myci.e_tot, e2, 9)
dm1 = numpy.einsum('ijkk->ji', rdm2)/(mol.nelectron-1)
self.assertAlmostEqual(abs(rdm1 - dm1).max(), 0, 9)
def test_rdm_real(self):
mol = gto.M()
mol.verbose = 0
nocc = 6
nvir = 10
mf = scf.GHF(mol)
nmo = nocc + nvir
npair = nmo*(nmo//2+1)//4
numpy.random.seed(12)
mf._eri = numpy.random.random(npair*(npair+1)//2)*.3
hcore = numpy.random.random((nmo,nmo)) * .5
hcore = hcore + hcore.T + numpy.diag(range(nmo))*2
mf.get_hcore = lambda *args: hcore
mf.get_ovlp = lambda *args: numpy.eye(nmo)
mf.mo_coeff = numpy.eye(nmo)
mf.mo_occ = numpy.zeros(nmo)
mf.mo_occ[:nocc] = 1
dm1 = mf.make_rdm1()
mf.e_tot = mf.energy_elec()[0]
myci = gcisd.GCISD(mf).run()
dm1 = myci.make_rdm1()
dm2 = myci.make_rdm2()
nao = nmo // 2
mo_a = mf.mo_coeff[:nao]
mo_b = mf.mo_coeff[nao:]
eri = ao2mo.kernel(mf._eri, mo_a)
eri += ao2mo.kernel(mf._eri, mo_b)
eri1 = ao2mo.kernel(mf._eri, (mo_a,mo_a,mo_b,mo_b))
eri += eri1
eri += eri1.T
eri = ao2mo.restore(1, eri, nmo)
h1 = reduce(numpy.dot, (mf.mo_coeff.T.conj(), hcore, mf.mo_coeff))
e1 = numpy.einsum('ij,ji', h1, dm1)
e1+= numpy.einsum('ijkl,ijkl', eri, dm2) * .5
self.assertAlmostEqual(e1, myci.e_tot, 7)
self.assertAlmostEqual(abs(dm2-dm2.transpose(1,0,3,2).conj()).max(), 0, 9)
self.assertAlmostEqual(abs(dm2-dm2.transpose(2,3,0,1) ).max(), 0, 9)
self.assertAlmostEqual(abs(dm2+dm2.transpose(2,1,0,3) ).max(), 0, 9)
self.assertAlmostEqual(abs(dm2+dm2.transpose(0,3,2,1) ).max(), 0, 9)
def test_rdm_complex(self):
mol = gto.M()
mol.verbose = 0
nocc = 4
nvir = 6
mf = scf.GHF(mol)
nmo = nocc + nvir
numpy.random.seed(1)
eri = (numpy.random.random((nmo,nmo,nmo,nmo)) +
numpy.random.random((nmo,nmo,nmo,nmo))* 1j - (.5+.5j))
eri = eri + eri.transpose(1,0,3,2).conj()
eri = eri + eri.transpose(2,3,0,1)
eri *= .1
def get_jk(mol, dm, *args,**kwargs):
vj = numpy.einsum('ijkl,lk->ij', eri, dm)
vk = numpy.einsum('ijkl,jk->il', eri, dm)
return vj, vk
def get_veff(mol, dm, *args, **kwargs):
vj, vk = get_jk(mol, dm)
return vj - vk
def ao2mofn(mos):
return eri
mf.get_jk = get_jk
mf.get_veff = get_veff
hcore = numpy.random.random((nmo,nmo)) * .2 + numpy.random.random((nmo,nmo))* .2j
hcore = hcore + hcore.T.conj() + numpy.diag(range(nmo))*2
mf.get_hcore = lambda *args: hcore
mf.get_ovlp = lambda *args: numpy.eye(nmo)
orbspin = numpy.zeros(nmo, dtype=int)
orbspin[1::2] = 1
mf.mo_coeff = lib.tag_array(numpy.eye(nmo) + 0j, orbspin=orbspin)
mf.mo_occ = numpy.zeros(nmo)
mf.mo_occ[:nocc] = 1
mf.e_tot = mf.energy_elec(mf.make_rdm1(), hcore)[0]
myci = gcisd.GCISD(mf)
eris = gcisd.gccsd._make_eris_incore(myci, mf.mo_coeff, ao2mofn)
myci.ao2mo = lambda *args, **kwargs: eris
myci.kernel(eris=eris)
dm1 = myci.make_rdm1()
dm2 = myci.make_rdm2()
e1 = numpy.einsum('ij,ji', hcore, dm1)
e1+= numpy.einsum('ijkl,ijkl', eri, dm2) * .5
self.assertAlmostEqual(e1, myci.e_tot, 7)
self.assertAlmostEqual(abs(dm2-dm2.transpose(1,0,3,2).conj()).max(), 0, 9)
self.assertAlmostEqual(abs(dm2-dm2.transpose(2,3,0,1) ).max(), 0, 9)
self.assertAlmostEqual(abs(dm2+dm2.transpose(2,1,0,3) ).max(), 0, 9)
self.assertAlmostEqual(abs(dm2+dm2.transpose(0,3,2,1) ).max(), 0, 9)
def test_rdm_vs_ucisd(self):
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.verbose = 5
mol.output = '/dev/null'
mol.basis = '631g'
mol.spin = 2
mol.build()
mf = scf.UHF(mol).run()
myuci = ucisd.UCISD(mf)
myuci.frozen = 1
myuci.kernel()
udm1 = myuci.make_rdm1()
udm2 = myuci.make_rdm2()
mf = scf.addons.convert_to_ghf(mf)
mygci = gcisd.GCISD(mf)
mygci.frozen = 2
mygci.kernel()
dm1 = mygci.make_rdm1()
dm2 = mygci.make_rdm2()
nao = mol.nao_nr()
mo_a = mf.mo_coeff[:nao]
mo_b = mf.mo_coeff[nao:]
nmo = mo_a.shape[1]
eri = ao2mo.kernel(mf._eri, mo_a+mo_b, compact=False).reshape([nmo]*4)
orbspin = mf.mo_coeff.orbspin
sym_forbid = (orbspin[:,None] != orbspin)
eri[sym_forbid,:,:] = 0
eri[:,:,sym_forbid] = 0
hcore = scf.RHF(mol).get_hcore()
h1 = reduce(numpy.dot, (mo_a.T.conj(), hcore, mo_a))
h1+= reduce(numpy.dot, (mo_b.T.conj(), hcore, mo_b))
e1 = numpy.einsum('ij,ji', h1, dm1)
e1+= numpy.einsum('ijkl,ijkl', eri, dm2) * .5
e1+= mol.energy_nuc()
self.assertAlmostEqual(e1, mygci.e_tot, 7)
idxa = numpy.where(orbspin == 0)[0]
idxb = numpy.where(orbspin == 1)[0]
self.assertAlmostEqual(abs(dm1[idxa[:,None],idxa] - udm1[0]).max(), 0, 5)
self.assertAlmostEqual(abs(dm1[idxb[:,None],idxb] - udm1[1]).max(), 0, 5)
self.assertAlmostEqual(abs(dm2[idxa[:,None,None,None],idxa[:,None,None],idxa[:,None],idxa] - udm2[0]).max(), 0, 5)
self.assertAlmostEqual(abs(dm2[idxa[:,None,None,None],idxa[:,None,None],idxb[:,None],idxb] - udm2[1]).max(), 0, 5)
self.assertAlmostEqual(abs(dm2[idxb[:,None,None,None],idxb[:,None,None],idxb[:,None],idxb] - udm2[2]).max(), 0, 5)
c0, c1, c2 = myuci.cisdvec_to_amplitudes(myuci.ci)
ut1 = [0] * 2
ut2 = [0] * 3
ut0 = c0 + .2j
ut1[0] = c1[0] + numpy.cos(c1[0]) * .2j
ut1[1] = c1[1] + numpy.cos(c1[1]) * .2j
ut2[0] = c2[0] + numpy.sin(c2[0]) * .8j
ut2[1] = c2[1] + numpy.sin(c2[1]) * .8j
ut2[2] = c2[2] + numpy.sin(c2[2]) * .8j
civec = myuci.amplitudes_to_cisdvec(ut0, ut1, ut2)
udm1 = myuci.make_rdm1(civec)
udm2 = myuci.make_rdm2(civec)
gt1 = mygci.spatial2spin(ut1)
gt2 = mygci.spatial2spin(ut2)
civec = mygci.amplitudes_to_cisdvec(ut0, gt1, gt2)
gdm1 = mygci.make_rdm1(civec)
gdm2 = mygci.make_rdm2(civec)
self.assertAlmostEqual(abs(gdm1[idxa[:,None],idxa] - udm1[0]).max(), 0, 9)
self.assertAlmostEqual(abs(gdm1[idxb[:,None],idxb] - udm1[1]).max(), 0, 9)
self.assertAlmostEqual(abs(gdm2[idxa[:,None,None,None],idxa[:,None,None],idxa[:,None],idxa] - udm2[0]).max(), 0, 9)
self.assertAlmostEqual(abs(gdm2[idxa[:,None,None,None],idxa[:,None,None],idxb[:,None],idxb] - udm2[1]).max(), 0, 9)
self.assertAlmostEqual(abs(gdm2[idxb[:,None,None,None],idxb[:,None,None],idxb[:,None],idxb] - udm2[2]).max(), 0, 9)
def test_rdm_vs_rcisd(self):
mol = gto.Mole()
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.verbose = 5
mol.output = '/dev/null'
mol.basis = '631g'
mol.build()
mf = scf.RHF(mol).run()
myrci = ci.cisd.CISD(mf).run()
rdm1 = myrci.make_rdm1()
rdm2 = myrci.make_rdm2()
mf = scf.addons.convert_to_ghf(mf)
mygci = gcisd.GCISD(mf).run()
dm1 = mygci.make_rdm1()
dm2 = mygci.make_rdm2()
nao = mol.nao_nr()
mo_a = mf.mo_coeff[:nao]
mo_b = mf.mo_coeff[nao:]
nmo = mo_a.shape[1]
eri = ao2mo.kernel(mf._eri, mo_a+mo_b, compact=False).reshape([nmo]*4)
orbspin = mf.mo_coeff.orbspin
sym_forbid = (orbspin[:,None] != orbspin)
eri[sym_forbid,:,:] = 0
eri[:,:,sym_forbid] = 0
hcore = scf.RHF(mol).get_hcore()
h1 = reduce(numpy.dot, (mo_a.T.conj(), hcore, mo_a))
h1+= reduce(numpy.dot, (mo_b.T.conj(), hcore, mo_b))
e1 = numpy.einsum('ij,ji', h1, dm1)
e1+= numpy.einsum('ijkl,ijkl', eri, dm2) * .5
e1+= mol.energy_nuc()
self.assertAlmostEqual(e1, mygci.e_tot, 7)
idxa = numpy.where(orbspin == 0)[0]
idxb = numpy.where(orbspin == 1)[0]
trdm1 = dm1[idxa[:,None],idxa]
trdm1+= dm1[idxb[:,None],idxb]
trdm2 = dm2[idxa[:,None,None,None],idxa[:,None,None],idxa[:,None],idxa]
trdm2+= dm2[idxb[:,None,None,None],idxb[:,None,None],idxb[:,None],idxb]
dm2ab = dm2[idxa[:,None,None,None],idxa[:,None,None],idxb[:,None],idxb]
trdm2+= dm2ab
trdm2+= dm2ab.transpose(2,3,0,1)
self.assertAlmostEqual(abs(trdm1 - rdm1).max(), 0, 5)
self.assertAlmostEqual(abs(trdm2 - rdm2).max(), 0, 5)
c0, c1, c2 = myrci.cisdvec_to_amplitudes(myrci.ci)
rt0 = c0 + .2j
rt1 = c1 + numpy.cos(c1) * .2j
rt2 = c2 + numpy.sin(c2) * .8j
civec = myrci.amplitudes_to_cisdvec(rt0, rt1, rt2)
rdm1 = myrci.make_rdm1(civec)
rdm2 = myrci.make_rdm2(civec)
gt1 = mygci.spatial2spin(rt1)
gt2 = mygci.spatial2spin(rt2)
civec = mygci.amplitudes_to_cisdvec(rt0, gt1, gt2)
gdm1 = mygci.make_rdm1(civec)
gdm2 = mygci.make_rdm2(civec)
orbspin = mf.mo_coeff.orbspin
idxa = numpy.where(orbspin == 0)[0]
idxb = numpy.where(orbspin == 1)[0]
trdm1 = gdm1[idxa[:,None],idxa]
trdm1+= gdm1[idxb[:,None],idxb]
trdm2 = gdm2[idxa[:,None,None,None],idxa[:,None,None],idxa[:,None],idxa]
trdm2+= gdm2[idxb[:,None,None,None],idxb[:,None,None],idxb[:,None],idxb]
dm2ab = gdm2[idxa[:,None,None,None],idxa[:,None,None],idxb[:,None],idxb]
trdm2+= dm2ab
trdm2+= dm2ab.transpose(2,3,0,1)
self.assertAlmostEqual(abs(trdm1 - rdm1).max(), 0, 9)
self.assertAlmostEqual(abs(trdm2 - rdm2).max(), 0, 9)
def test_ao_direct(self):
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.spin = 2
mol.basis = 'ccpvdz'
mol.build()
mf = scf.RHF(mol).run(conv_tol=1e-14)
myci = ci.GCISD(mf)
myci.max_memory = .1
myci.frozen = [2,3,4,5]
myci.direct = True
ecisd, civec = myci.kernel()
self.assertAlmostEqual(ecisd, -0.048829195509732602, 8)
def test_trans_rdm1(self):
numpy.random.seed(1)
norb = 4
nocc = 2
nvir = norb - nocc
c2 = numpy.random.random((nocc,nocc,nvir,nvir))
c2 = c2 + c2.transpose(1,0,3,2)
cibra = numpy.hstack((numpy.random.random(1+nocc*nvir), c2.ravel()))
c2 = numpy.random.random((nocc,nocc,nvir,nvir))
c2 = c2 + c2.transpose(1,0,3,2)
ciket = numpy.hstack((numpy.random.random(1+nocc*nvir), c2.ravel()))
cibra /= ci.cisd.dot(cibra, cibra, norb, nocc)**.5
ciket /= ci.cisd.dot(ciket, ciket, norb, nocc)**.5
fcibra = ci.cisd.to_fcivec(cibra, norb, nocc*2)
fciket = ci.cisd.to_fcivec(ciket, norb, nocc*2)
fcidm1 = fci.direct_spin1.trans_rdm1s(fcibra, fciket, norb, nocc*2)
myci1 = ci.GCISD(scf.GHF(gto.M()))
myci1.nmo = norb = 8
myci1.nocc = nocc = 4
orbspin = numpy.zeros(norb, dtype=int)
orbspin[1::2] = 1
myci1.mo_coeff = lib.tag_array(numpy.eye(norb), orbspin=orbspin)
myci1.mo_occ = numpy.zeros(norb)
myci1.mo_occ[:nocc] = 1
cibra = myci1.from_rcisdvec(cibra, (nocc//2,nocc//2), orbspin)
ciket = myci1.from_rcisdvec(ciket)
cidm1 = myci1.trans_rdm1(cibra, ciket, norb, nocc)
self.assertAlmostEqual(abs(cidm1[0::2,0::2] - fcidm1[0]).max(), 0, 12)
self.assertAlmostEqual(abs(cidm1[1::2,1::2] - fcidm1[1]).max(), 0, 12)
cibra = myci1.to_ucisdvec(cibra, orbspin)
ciket = myci1.to_ucisdvec(ciket)
myci2 = ci.UCISD(scf.UHF(gto.M()))
cidm1 = myci2.trans_rdm1(cibra, ciket, (norb//2,norb//2), (nocc//2,nocc//2))
self.assertAlmostEqual(abs(cidm1[0] - fcidm1[0]).max(), 0, 12)
self.assertAlmostEqual(abs(cidm1[1] - fcidm1[1]).max(), 0, 12)
def test_multi_roots(self):
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0.,-1. , 1. )],
]
mol.basis = '3-21g'
mol.build()
mf = scf.GHF(mol).run()
myci = ci.GCISD(mf)
myci.nroots = 3
myci.run()
self.assertAlmostEqual(myci.e_tot[2], -1.9802158893844912, 8)
def test_trans_rdm_with_frozen(self):
mol = gto.M(atom='''
O 0. 0. .0
H 0. -0.757 0.587
H 0. 0.757 0.587''', basis='sto3g')
mf = scf.convert_to_ghf(scf.RHF(mol).run())
orbspin = mf.mo_coeff.orbspin
idxa = numpy.where(orbspin == 0)[0]
idxb = numpy.where(orbspin == 1)[0]
nmo_1c = mf.mo_coeff.shape[1]//2
def check_frozen(frozen):
myci = ci.GCISD(mf)
myci.frozen = frozen
myci.nroots = 3
myci.kernel()
nocc = myci.nocc
nmo = myci.nmo
nfroz = len(frozen)
try:
ket_id = 1
fciket = gcisd.to_fcivec(myci.ci[ket_id], mol.nelectron, orbspin, myci.frozen)
except RuntimeError:
ket_id = 2
fciket = gcisd.to_fcivec(myci.ci[ket_id], mol.nelectron, orbspin, myci.frozen)
# spin-forbidden transition
cidm1 = myci.trans_rdm1(myci.ci[0], myci.ci[1], nmo, nocc)
self.assertAlmostEqual(abs(cidm1[idxa[:,None],idxa]).max(), 0, 7)
self.assertAlmostEqual(abs(cidm1[idxb[:,None],idxb]).max(), 0, 7)
cibra = (myci.ci[0] + myci.ci[ket_id]) * numpy.sqrt(.5)
fcibra = gcisd.to_fcivec(cibra, mol.nelectron, orbspin, myci.frozen)
fcidm1 = fci.direct_spin1.trans_rdm1s(fcibra, fciket, nmo_1c, mol.nelectron)
cidm1 = myci.trans_rdm1(cibra, myci.ci[ket_id], nmo, nocc)
self.assertAlmostEqual(abs(fcidm1[0]-cidm1[idxa[:,None],idxa]).max(), 0, 12)
self.assertAlmostEqual(abs(fcidm1[1]-cidm1[idxb[:,None],idxb]).max(), 0, 12)
check_frozen([10])
check_frozen([10,3])
if __name__ == "__main__":
print("Full Tests for GCISD")
unittest.main()
|
|
#!/usr/bin/env python3
# Copyright 2020 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Tests for the tokens module."""
import datetime
import io
import logging
from pathlib import Path
import tempfile
from typing import Iterator
import unittest
from pw_tokenizer import tokens
from pw_tokenizer.tokens import default_hash, _LOG
CSV_DATABASE = '''\
00000000,2019-06-10,""
141c35d5, ,"The answer: ""%s"""
2db1515f, ,"%u%d%02x%X%hu%hhu%d%ld%lu%lld%llu%c%c%c"
2e668cd6,2019-06-11,"Jello, world!"
31631781, ,"%d"
61fd1e26, ,"%ld"
68ab92da, ,"%s there are %x (%.2f) of them%c"
7b940e2a, ,"Hello %s! %hd %e"
851beeb6, ,"%u %d"
881436a0, ,"The answer is: %s"
ad002c97, ,"%llx"
b3653e13,2019-06-12,"Jello!"
b912567b, ,"%x%lld%1.2f%s"
cc6d3131,2020-01-01,"Jello?"
e13b0f94, ,"%llu"
e65aefef,2019-06-10,"Won't fit : %s%d"
'''
# The date 2019-06-10 is 07E3-06-0A in hex. In database order, it's 0A 06 E3 07.
BINARY_DATABASE = (
b'TOKENS\x00\x00\x10\x00\x00\x00\0\0\0\0' # header (0x10 entries)
b'\x00\x00\x00\x00\x0a\x06\xe3\x07' # 0x01
b'\xd5\x35\x1c\x14\xff\xff\xff\xff' # 0x02
b'\x5f\x51\xb1\x2d\xff\xff\xff\xff' # 0x03
b'\xd6\x8c\x66\x2e\x0b\x06\xe3\x07' # 0x04
b'\x81\x17\x63\x31\xff\xff\xff\xff' # 0x05
b'\x26\x1e\xfd\x61\xff\xff\xff\xff' # 0x06
b'\xda\x92\xab\x68\xff\xff\xff\xff' # 0x07
b'\x2a\x0e\x94\x7b\xff\xff\xff\xff' # 0x08
b'\xb6\xee\x1b\x85\xff\xff\xff\xff' # 0x09
b'\xa0\x36\x14\x88\xff\xff\xff\xff' # 0x0a
b'\x97\x2c\x00\xad\xff\xff\xff\xff' # 0x0b
b'\x13\x3e\x65\xb3\x0c\x06\xe3\x07' # 0x0c
b'\x7b\x56\x12\xb9\xff\xff\xff\xff' # 0x0d
b'\x31\x31\x6d\xcc\x01\x01\xe4\x07' # 0x0e
b'\x94\x0f\x3b\xe1\xff\xff\xff\xff' # 0x0f
b'\xef\xef\x5a\xe6\x0a\x06\xe3\x07' # 0x10
b'\x00'
b'The answer: "%s"\x00'
b'%u%d%02x%X%hu%hhu%d%ld%lu%lld%llu%c%c%c\x00'
b'Jello, world!\x00'
b'%d\x00'
b'%ld\x00'
b'%s there are %x (%.2f) of them%c\x00'
b'Hello %s! %hd %e\x00'
b'%u %d\x00'
b'The answer is: %s\x00'
b'%llx\x00'
b'Jello!\x00'
b'%x%lld%1.2f%s\x00'
b'Jello?\x00'
b'%llu\x00'
b'Won\'t fit : %s%d\x00')
INVALID_CSV = """\
1,,"Whoa there!"
2,this is totally invalid,"Whoa there!"
3,,"This one's OK"
,,"Also broken"
5,1845-2-2,"I'm %s fine"
6,"Missing fields"
"""
def read_db_from_csv(csv_str: str) -> tokens.Database:
with io.StringIO(csv_str) as csv_db:
return tokens.Database(tokens.parse_csv(csv_db))
def _entries(*strings: str) -> Iterator[tokens.TokenizedStringEntry]:
for string in strings:
yield tokens.TokenizedStringEntry(default_hash(string), string)
class TokenDatabaseTest(unittest.TestCase):
"""Tests the token database class."""
def test_csv(self):
db = read_db_from_csv(CSV_DATABASE)
self.assertEqual(str(db), CSV_DATABASE)
db = read_db_from_csv('')
self.assertEqual(str(db), '')
def test_csv_formatting(self):
db = read_db_from_csv('')
self.assertEqual(str(db), '')
db = read_db_from_csv('abc123,2048-4-1,Fake string\n')
self.assertEqual(str(db), '00abc123,2048-04-01,"Fake string"\n')
db = read_db_from_csv('1,1990-01-01,"Quotes"""\n'
'0,1990-02-01,"Commas,"",,"\n')
self.assertEqual(str(db), ('00000000,1990-02-01,"Commas,"",,"\n'
'00000001,1990-01-01,"Quotes"""\n'))
def test_bad_csv(self):
with self.assertLogs(_LOG, logging.ERROR) as logs:
db = read_db_from_csv(INVALID_CSV)
self.assertGreaterEqual(len(logs.output), 3)
self.assertEqual(len(db.token_to_entries), 3)
self.assertEqual(db.token_to_entries[1][0].string, 'Whoa there!')
self.assertFalse(db.token_to_entries[2])
self.assertEqual(db.token_to_entries[3][0].string, "This one's OK")
self.assertFalse(db.token_to_entries[4])
self.assertEqual(db.token_to_entries[5][0].string, "I'm %s fine")
self.assertFalse(db.token_to_entries[6])
def test_lookup(self):
db = read_db_from_csv(CSV_DATABASE)
self.assertEqual(db.token_to_entries[0x9999], [])
matches = db.token_to_entries[0x2e668cd6]
self.assertEqual(len(matches), 1)
jello = matches[0]
self.assertEqual(jello.token, 0x2e668cd6)
self.assertEqual(jello.string, 'Jello, world!')
self.assertEqual(jello.date_removed, datetime.datetime(2019, 6, 11))
matches = db.token_to_entries[0xe13b0f94]
self.assertEqual(len(matches), 1)
llu = matches[0]
self.assertEqual(llu.token, 0xe13b0f94)
self.assertEqual(llu.string, '%llu')
self.assertIsNone(llu.date_removed)
answer, = db.token_to_entries[0x141c35d5]
self.assertEqual(answer.string, 'The answer: "%s"')
def test_collisions(self):
hash_1 = tokens.pw_tokenizer_65599_hash('o000', 96)
hash_2 = tokens.pw_tokenizer_65599_hash('0Q1Q', 96)
self.assertEqual(hash_1, hash_2)
db = tokens.Database.from_strings(['o000', '0Q1Q'])
self.assertEqual(len(db.token_to_entries[hash_1]), 2)
self.assertCountEqual(
[entry.string for entry in db.token_to_entries[hash_1]],
['o000', '0Q1Q'])
def test_purge(self):
db = read_db_from_csv(CSV_DATABASE)
original_length = len(db.token_to_entries)
self.assertEqual(db.token_to_entries[0][0].string, '')
self.assertEqual(db.token_to_entries[0x31631781][0].string, '%d')
self.assertEqual(db.token_to_entries[0x2e668cd6][0].string,
'Jello, world!')
self.assertEqual(db.token_to_entries[0xb3653e13][0].string, 'Jello!')
self.assertEqual(db.token_to_entries[0xcc6d3131][0].string, 'Jello?')
self.assertEqual(db.token_to_entries[0xe65aefef][0].string,
"Won't fit : %s%d")
db.purge(datetime.datetime(2019, 6, 11))
self.assertLess(len(db.token_to_entries), original_length)
self.assertFalse(db.token_to_entries[0])
self.assertEqual(db.token_to_entries[0x31631781][0].string, '%d')
self.assertFalse(db.token_to_entries[0x2e668cd6])
self.assertEqual(db.token_to_entries[0xb3653e13][0].string, 'Jello!')
self.assertEqual(db.token_to_entries[0xcc6d3131][0].string, 'Jello?')
self.assertFalse(db.token_to_entries[0xe65aefef])
def test_merge(self):
"""Tests the tokens.Database merge method."""
db = tokens.Database()
# Test basic merging into an empty database.
db.merge(
tokens.Database([
tokens.TokenizedStringEntry(
1, 'one', date_removed=datetime.datetime.min),
tokens.TokenizedStringEntry(
2, 'two', date_removed=datetime.datetime.min),
]))
self.assertEqual({str(e) for e in db.entries()}, {'one', 'two'})
self.assertEqual(db.token_to_entries[1][0].date_removed,
datetime.datetime.min)
self.assertEqual(db.token_to_entries[2][0].date_removed,
datetime.datetime.min)
# Test merging in an entry with a removal date.
db.merge(
tokens.Database([
tokens.TokenizedStringEntry(3, 'three'),
tokens.TokenizedStringEntry(
4, 'four', date_removed=datetime.datetime.min),
]))
self.assertEqual({str(e)
for e in db.entries()},
{'one', 'two', 'three', 'four'})
self.assertIsNone(db.token_to_entries[3][0].date_removed)
self.assertEqual(db.token_to_entries[4][0].date_removed,
datetime.datetime.min)
# Test merging in one entry.
db.merge(tokens.Database([
tokens.TokenizedStringEntry(5, 'five'),
]))
self.assertEqual({str(e)
for e in db.entries()},
{'one', 'two', 'three', 'four', 'five'})
self.assertEqual(db.token_to_entries[4][0].date_removed,
datetime.datetime.min)
self.assertIsNone(db.token_to_entries[5][0].date_removed)
# Merge in repeated entries different removal dates.
db.merge(
tokens.Database([
tokens.TokenizedStringEntry(
4, 'four', date_removed=datetime.datetime.max),
tokens.TokenizedStringEntry(
5, 'five', date_removed=datetime.datetime.max),
]))
self.assertEqual(len(db.entries()), 5)
self.assertEqual({str(e)
for e in db.entries()},
{'one', 'two', 'three', 'four', 'five'})
self.assertEqual(db.token_to_entries[4][0].date_removed,
datetime.datetime.max)
self.assertIsNone(db.token_to_entries[5][0].date_removed)
# Merge in the same repeated entries now without removal dates.
db.merge(
tokens.Database([
tokens.TokenizedStringEntry(4, 'four'),
tokens.TokenizedStringEntry(5, 'five')
]))
self.assertEqual(len(db.entries()), 5)
self.assertEqual({str(e)
for e in db.entries()},
{'one', 'two', 'three', 'four', 'five'})
self.assertIsNone(db.token_to_entries[4][0].date_removed)
self.assertIsNone(db.token_to_entries[5][0].date_removed)
# Merge in an empty databsse.
db.merge(tokens.Database([]))
self.assertEqual({str(e)
for e in db.entries()},
{'one', 'two', 'three', 'four', 'five'})
def test_merge_multiple_datbases_in_one_call(self):
"""Tests the merge and merged methods with multiple databases."""
db = tokens.Database.merged(
tokens.Database([
tokens.TokenizedStringEntry(1,
'one',
date_removed=datetime.datetime.max)
]),
tokens.Database([
tokens.TokenizedStringEntry(2,
'two',
date_removed=datetime.datetime.min)
]),
tokens.Database([
tokens.TokenizedStringEntry(1,
'one',
date_removed=datetime.datetime.min)
]))
self.assertEqual({str(e) for e in db.entries()}, {'one', 'two'})
db.merge(
tokens.Database([
tokens.TokenizedStringEntry(4,
'four',
date_removed=datetime.datetime.max)
]),
tokens.Database([
tokens.TokenizedStringEntry(2,
'two',
date_removed=datetime.datetime.max)
]),
tokens.Database([
tokens.TokenizedStringEntry(3,
'three',
date_removed=datetime.datetime.min)
]))
self.assertEqual({str(e)
for e in db.entries()},
{'one', 'two', 'three', 'four'})
def test_entry_counts(self):
self.assertEqual(len(CSV_DATABASE.splitlines()), 16)
db = read_db_from_csv(CSV_DATABASE)
self.assertEqual(len(db.entries()), 16)
self.assertEqual(len(db.token_to_entries), 16)
# Add two strings with the same hash.
db.add(_entries('o000', '0Q1Q'))
self.assertEqual(len(db.entries()), 18)
self.assertEqual(len(db.token_to_entries), 17)
def test_mark_removed(self):
"""Tests that date_removed field is set by mark_removed."""
db = tokens.Database.from_strings(
['MILK', 'apples', 'oranges', 'CHEESE', 'pears'])
self.assertTrue(
all(entry.date_removed is None for entry in db.entries()))
date_1 = datetime.datetime(1, 2, 3)
db.mark_removed(_entries('apples', 'oranges', 'pears'), date_1)
self.assertEqual(
db.token_to_entries[default_hash('MILK')][0].date_removed, date_1)
self.assertEqual(
db.token_to_entries[default_hash('CHEESE')][0].date_removed,
date_1)
now = datetime.datetime.now()
db.mark_removed(_entries('MILK', 'CHEESE', 'pears'))
# New strings are not added or re-added in mark_removed().
self.assertGreaterEqual(
db.token_to_entries[default_hash('MILK')][0].date_removed, date_1)
self.assertGreaterEqual(
db.token_to_entries[default_hash('CHEESE')][0].date_removed,
date_1)
# These strings were removed.
self.assertGreaterEqual(
db.token_to_entries[default_hash('apples')][0].date_removed, now)
self.assertGreaterEqual(
db.token_to_entries[default_hash('oranges')][0].date_removed, now)
self.assertIsNone(
db.token_to_entries[default_hash('pears')][0].date_removed)
def test_add(self):
db = tokens.Database()
db.add(_entries('MILK', 'apples'))
self.assertEqual({e.string for e in db.entries()}, {'MILK', 'apples'})
db.add(_entries('oranges', 'CHEESE', 'pears'))
self.assertEqual(len(db.entries()), 5)
db.add(_entries('MILK', 'apples', 'only this one is new'))
self.assertEqual(len(db.entries()), 6)
db.add(_entries('MILK'))
self.assertEqual({e.string
for e in db.entries()}, {
'MILK', 'apples', 'oranges', 'CHEESE', 'pears',
'only this one is new'
})
def test_binary_format_write(self):
db = read_db_from_csv(CSV_DATABASE)
with io.BytesIO() as fd:
tokens.write_binary(db, fd)
binary_db = fd.getvalue()
self.assertEqual(BINARY_DATABASE, binary_db)
def test_binary_format_parse(self):
with io.BytesIO(BINARY_DATABASE) as binary_db:
db = tokens.Database(tokens.parse_binary(binary_db))
self.assertEqual(str(db), CSV_DATABASE)
class TestDatabaseFile(unittest.TestCase):
"""Tests the DatabaseFile class."""
def setUp(self):
file = tempfile.NamedTemporaryFile(delete=False)
file.close()
self._path = Path(file.name)
def tearDown(self):
self._path.unlink()
def test_update_csv_file(self):
self._path.write_text(CSV_DATABASE)
db = tokens.DatabaseFile(self._path)
self.assertEqual(str(db), CSV_DATABASE)
db.add([tokens.TokenizedStringEntry(0xffffffff, 'New entry!')])
db.write_to_file()
self.assertEqual(self._path.read_text(),
CSV_DATABASE + 'ffffffff, ,"New entry!"\n')
def test_csv_file_too_short_raises_exception(self):
self._path.write_text('1234')
with self.assertRaises(tokens.DatabaseFormatError):
tokens.DatabaseFile(self._path)
def test_csv_invalid_format_raises_exception(self):
self._path.write_text('MK34567890')
with self.assertRaises(tokens.DatabaseFormatError):
tokens.DatabaseFile(self._path)
def test_csv_not_utf8(self):
self._path.write_bytes(b'\x80' * 20)
with self.assertRaises(tokens.DatabaseFormatError):
tokens.DatabaseFile(self._path)
class TestFilter(unittest.TestCase):
"""Tests the filtering functionality."""
def setUp(self):
self.db = tokens.Database([
tokens.TokenizedStringEntry(1, 'Luke'),
tokens.TokenizedStringEntry(2, 'Leia'),
tokens.TokenizedStringEntry(2, 'Darth Vader'),
tokens.TokenizedStringEntry(2, 'Emperor Palpatine'),
tokens.TokenizedStringEntry(3, 'Han'),
tokens.TokenizedStringEntry(4, 'Chewbacca'),
tokens.TokenizedStringEntry(5, 'Darth Maul'),
tokens.TokenizedStringEntry(6, 'Han Solo'),
])
def test_filter_include_single_regex(self):
self.db.filter(include=[' ']) # anything with a space
self.assertEqual(
set(e.string for e in self.db.entries()),
{'Darth Vader', 'Emperor Palpatine', 'Darth Maul', 'Han Solo'})
def test_filter_include_multiple_regexes(self):
self.db.filter(include=['Darth', 'cc', '^Han$'])
self.assertEqual(set(e.string for e in self.db.entries()),
{'Darth Vader', 'Darth Maul', 'Han', 'Chewbacca'})
def test_filter_include_no_matches(self):
self.db.filter(include=['Gandalf'])
self.assertFalse(self.db.entries())
def test_filter_exclude_single_regex(self):
self.db.filter(exclude=['^[^L]'])
self.assertEqual(set(e.string for e in self.db.entries()),
{'Luke', 'Leia'})
def test_filter_exclude_multiple_regexes(self):
self.db.filter(exclude=[' ', 'Han', 'Chewbacca'])
self.assertEqual(set(e.string for e in self.db.entries()),
{'Luke', 'Leia'})
def test_filter_exclude_no_matches(self):
self.db.filter(exclude=['.*'])
self.assertFalse(self.db.entries())
def test_filter_include_and_exclude(self):
self.db.filter(include=[' '], exclude=['Darth', 'Emperor'])
self.assertEqual(set(e.string for e in self.db.entries()),
{'Han Solo'})
def test_filter_neither_include_nor_exclude(self):
self.db.filter()
self.assertEqual(
set(e.string for e in self.db.entries()), {
'Luke', 'Leia', 'Darth Vader', 'Emperor Palpatine', 'Han',
'Chewbacca', 'Darth Maul', 'Han Solo'
})
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using multiple GPU's with synchronous updates.
Accuracy:
cifar10_multi_gpu_train.py achieves ~86% accuracy after 100K steps (256
epochs of data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
--------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
2 Tesla K20m | 0.13-0.20 | ~84% at 30K steps (2.5 hours)
3 Tesla K20m | 0.13-0.18 | ~84% at 30K steps
4 Tesla K20m | ~0.10 | ~84% at 30K steps
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import re
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_integer('num_gpus', 1,
"""How many GPUs to use.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def tower_loss(scope):
"""Calculate the total loss on a single tower running the CIFAR model.
Args:
scope: unique prefix string identifying the CIFAR tower, e.g. 'tower_0'
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build inference Graph.
logits = cifar10.inference(images)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
_ = cifar10.loss(logits, labels)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection('losses', scope)
# Calculate the total loss for the current tower.
total_loss = tf.add_n(losses, name='total_loss')
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
loss_name = re.sub('%s_[0-9]*/' % cifar10.TOWER_NAME, '', l.op.name)
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(loss_name +' (raw)', l)
tf.scalar_summary(loss_name, loss_averages.average(l))
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
return total_loss
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(0, grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN /
FLAGS.batch_size)
decay_steps = int(num_batches_per_epoch * cifar10.NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(cifar10.INITIAL_LEARNING_RATE,
global_step,
decay_steps,
cifar10.LEARNING_RATE_DECAY_FACTOR,
staircase=True)
# Create an optimizer that performs gradient descent.
opt = tf.train.GradientDescentOptimizer(lr)
# Calculate the gradients for each model tower.
tower_grads = []
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (cifar10.TOWER_NAME, i)) as scope:
# Calculate the loss for one tower of the CIFAR model. This function
# constructs the entire CIFAR model but shares the variables across
# all towers.
loss = tower_loss(scope)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Calculate the gradients for the batch of data on this CIFAR tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
# Add a summary to track the learning rate.
summaries.append(tf.scalar_summary('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(
tf.histogram_summary(var.op.name + '/gradients', grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.histogram_summary(var.op.name, var))
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, variables_averages_op)
# Create a saver.
saver = tf.train.Saver(tf.all_variables())
# Build the summary operation from the last tower summaries.
summary_op = tf.merge_summary(summaries)
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
for step in xrange(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
examples_per_sec = num_examples_per_step / duration
sec_per_batch = duration / FLAGS.num_gpus
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
|
|
from math import isfinite
from typing import Any, Dict, NamedTuple
from graphql import graphql_sync
from graphql.error import GraphQLError
from graphql.language import ValueNode
from graphql.pyutils import inspect
from graphql.type import (
GraphQLArgument,
GraphQLField,
GraphQLFloat,
GraphQLObjectType,
GraphQLScalarType,
GraphQLSchema,
)
from graphql.utilities import value_from_ast_untyped
# this test is not (yet) part of GraphQL.js, see
# https://github.com/graphql/graphql-js/issues/2657
class Money(NamedTuple):
amount: float
currency: str
def is_finite(value: Any) -> bool:
"""Return true if a value is a finite number."""
return (isinstance(value, int) and not isinstance(value, bool)) or (
isinstance(value, float) and isfinite(value)
)
def serialize_money(output_value: Any) -> Dict[str, float]:
if not isinstance(output_value, Money):
raise GraphQLError("Cannot serialize money value: " + inspect(output_value))
return output_value._asdict()
def parse_money_value(input_value: Any) -> Money:
if not isinstance(input_value, Money):
raise GraphQLError("Cannot parse money value: " + inspect(input_value))
return input_value
def parse_money_literal(value_node: ValueNode, variables=None) -> Money:
money = value_from_ast_untyped(value_node, variables)
if variables is not None and (
# variables are not set when checked with ValuesOfCorrectTypeRule
not money
or not is_finite(money.get("amount"))
or not isinstance(money.get("currency"), str)
):
raise GraphQLError("Cannot parse literal money value: " + inspect(money))
return Money(**money)
MoneyScalar = GraphQLScalarType(
name="Money",
serialize=serialize_money,
parse_value=parse_money_value,
parse_literal=parse_money_literal,
)
def resolve_balance(root, _info):
return root
def resolve_to_euros(_root, _info, money):
amount = money.amount
currency = money.currency
if not amount or currency == "EUR":
return amount
if currency == "DM":
return amount * 0.5
raise ValueError("Cannot convert to euros: " + inspect(money))
schema = GraphQLSchema(
query=GraphQLObjectType(
name="RootQueryType",
fields={
"balance": GraphQLField(MoneyScalar, resolve=resolve_balance),
"toEuros": GraphQLField(
GraphQLFloat,
args={"money": GraphQLArgument(MoneyScalar)},
resolve=resolve_to_euros,
),
},
)
)
def describe_custom_scalar():
def serialize():
source = """
{
balance
}
"""
result = graphql_sync(schema, source, root_value=Money(42, "DM"))
assert result == ({"balance": {"amount": 42, "currency": "DM"}}, None)
def serialize_with_error():
source = """
{
balance
}
"""
result = graphql_sync(schema, source, root_value=21)
assert result == (
{"balance": None},
[
{
"message": "Cannot serialize money value: 21",
"locations": [(3, 15)],
"path": ["balance"],
}
],
)
def parse_value():
source = """
query Money($money: Money!) {
toEuros(money: $money)
}
"""
result = graphql_sync(
schema, source, variable_values={"money": Money(24, "EUR")}
)
assert result == ({"toEuros": 24}, None)
result = graphql_sync(
schema, source, variable_values={"money": Money(42, "DM")}
)
assert result == ({"toEuros": 21}, None)
def parse_value_with_error():
source = """
query Money($money: Money!) {
toEuros(money: $money)
}
"""
result = graphql_sync(
schema, source, variable_values={"money": Money(42, "USD")}
)
assert result == (
{"toEuros": None},
[
{
"message": "Cannot convert to euros: (42, 'USD')",
"locations": [(3, 15)],
}
],
)
result = graphql_sync(schema, source, variable_values={"money": 21})
assert result == (
None,
[
{
"message": "Variable '$money' got invalid value 21;"
" Cannot parse money value: 21",
"locations": [(2, 25)],
}
],
)
def parse_literal():
source = """
query Money($amount: Float!, $currency: String!) {
toEuros(money: {amount: $amount, currency: $currency})
}
"""
variable_values = {"amount": 42, "currency": "DM"}
result = graphql_sync(schema, source, variable_values=variable_values)
assert result == ({"toEuros": 21}, None)
def parse_literal_with_errors():
source = """
query Money($amount: String!, $currency: Float!) {
toEuros(money: {amount: $amount, currency: $currency})
}
"""
variable_values = {"amount": "DM", "currency": 42}
result = graphql_sync(schema, source, variable_values=variable_values)
assert result == (
{"toEuros": None},
[
{
"message": "Argument 'money' has invalid value"
" {amount: $amount, currency: $currency}.",
"locations": [(3, 30)],
},
],
)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import webob
from senlin.api.common import version_request as vr
from senlin.api.common import wsgi
from senlin.api.middleware import version_negotiation as vn
from senlin.api.openstack import versions as os_ver
from senlin.common import exception
from senlin.tests.unit.common import base
class VersionController(object):
pass
class VersionNegotiationMiddlewareTest(base.SenlinTestCase):
def _version_controller_factory(self, conf):
return VersionController()
def test_match_version_string(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
request = webob.Request({})
major = 1
minor = 0
match = version_negotiation._match_version_string(
'v{0}.{1}'.format(major, minor), request)
self.assertTrue(match)
self.assertEqual(major, request.environ['api.major'])
self.assertEqual(minor, request.environ['api.minor'])
def test_not_match_version_string(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
request = webob.Request({})
match = version_negotiation._match_version_string("invalid", request)
self.assertFalse(match)
def test_return_version_controller_when_request_path_is_version(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
request = webob.Request({'PATH_INFO': 'versions'})
response = version_negotiation.process_request(request)
self.assertIsInstance(response, VersionController)
def test_return_version_controller_when_request_path_is_empty(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
request = webob.Request({'PATH_INFO': '/'})
response = version_negotiation.process_request(request)
self.assertIsInstance(response, VersionController)
def test_request_path_contains_valid_version(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
major = 1
minor = 0
request = webob.Request({'PATH_INFO':
'v{0}.{1}/resource'.format(major, minor)})
response = version_negotiation.process_request(request)
self.assertIsNone(response)
self.assertEqual(major, request.environ['api.major'])
self.assertEqual(minor, request.environ['api.minor'])
def test_removes_version_from_request_path(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
expected_path = 'resource'
request = webob.Request({'PATH_INFO': 'v1.0/%s' % expected_path})
response = version_negotiation.process_request(request)
self.assertIsNone(response)
self.assertEqual(expected_path, request.path_info_peek())
def test_request_path_contains_unknown_version(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
request = webob.Request({'PATH_INFO': 'v2.0/resource'})
response = version_negotiation.process_request(request)
self.assertIsInstance(response, VersionController)
def test_accept_header_contains_valid_version(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
major = 1
minor = 0
request = webob.Request({'PATH_INFO': 'resource'})
request.headers['Accept'] = 'application/vnd.openstack.clustering-v1.0'
response = version_negotiation.process_request(request)
self.assertIsNone(response)
self.assertEqual(major, request.environ['api.major'])
self.assertEqual(minor, request.environ['api.minor'])
def test_accept_header_contains_unknown_version(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
request = webob.Request({'PATH_INFO': 'resource'})
request.headers['Accept'] = 'application/vnd.openstack.clustering-v2.0'
response = version_negotiation.process_request(request)
self.assertIsInstance(response, VersionController)
request.headers['Accept'] = 'application/vnd.openstack.clustering-vab'
response = version_negotiation.process_request(request)
self.assertIsNone(response)
def test_no_URI_version_accept_header_contains_invalid_MIME_type(self):
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
request = webob.Request({'PATH_INFO': 'resource'})
request.headers['Accept'] = 'application/invalidMIMEType'
response = version_negotiation.process_request(request)
self.assertIsInstance(response, webob.exc.HTTPNotFound)
request.headers['Accept'] = ''
response = version_negotiation.process_request(request)
self.assertIsInstance(response, webob.exc.HTTPNotFound)
def test_check_version_request(self):
request = webob.Request({'PATH_INFO': 'resource'})
request.headers[wsgi.API_VERSION_KEY] = 'cluster 1.0,compute 2.0'
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
version_negotiation.check_version_request(request)
self.assertIsNotNone(request.version_request)
expected = vr.APIVersionRequest('1.0')
self.assertEqual(expected, request.version_request)
def test_check_version_request_default(self):
request = webob.Request({'PATH_INFO': 'resource'})
request.headers[wsgi.API_VERSION_KEY] = 'compute 2.0'
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
version_negotiation.check_version_request(request)
self.assertIsNotNone(request.version_request)
expected = vr.APIVersionRequest(wsgi.DEFAULT_API_VERSION)
self.assertEqual(expected, request.version_request)
def test_check_version_request_invalid_format(self):
request = webob.Request({'PATH_INFO': 'resource'})
request.headers[wsgi.API_VERSION_KEY] = 'cluster 2.03'
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
ex = self.assertRaises(webob.exc.HTTPBadRequest,
version_negotiation.check_version_request,
request)
self.assertEqual("API Version String (2.03) is of invalid format. It "
"must be of format 'major.minor'.",
six.text_type(ex))
def test_check_version_request_invalid_version(self):
request = webob.Request({'PATH_INFO': 'resource'})
request.headers[wsgi.API_VERSION_KEY] = 'cluster 2.3'
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
ex = self.assertRaises(exception.InvalidGlobalAPIVersion,
version_negotiation.check_version_request,
request)
expected = ("Version 2.3 is not supported by the API. Minimum is "
"%(min_ver)s and maximum is %(max_ver)s." %
{'min_ver': str(os_ver.min_api_version()),
'max_ver': str(os_ver.max_api_version())})
self.assertEqual(expected, six.text_type(ex))
def test_check_version_request_latest(self):
request = webob.Request({'PATH_INFO': 'resource'})
request.headers[wsgi.API_VERSION_KEY] = 'cluster Latest'
version_negotiation = vn.VersionNegotiationFilter(
self._version_controller_factory, None, None)
version_negotiation.check_version_request(request)
self.assertIsNotNone(request.version_request)
expected = os_ver.max_api_version()
self.assertEqual(expected, request.version_request)
|
|
"""functions used to construct different architectures
Several Functions have been borrowed and modified from https://github.com/openai/pixel-cnn
"""
import tensorflow as tf
import numpy as np
FLAGS = tf.app.flags.FLAGS
def int_shape(x):
return list(map(int, x.get_shape()))
def concat_elu(x):
""" like concatenated ReLU (http://arxiv.org/abs/1603.05201), but then with ELU """
axis = len(x.get_shape())-1
return tf.nn.elu(tf.concat([x, -x], axis))
def set_nonlinearity(name):
if name == 'concat_elu':
return concat_elu
elif name == 'elu':
return tf.nn.elu
elif name == 'concat_relu':
return tf.nn.crelu
elif name == 'relu':
return tf.nn.relu
else:
raise('nonlinearity ' + name + ' is not supported')
def _activation_summary(x):
tensor_name = x.op.name
with tf.device('/cpu:0'):
tf.summary.histogram(tensor_name + '/activations', x)
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))
def _variable(name, shape, initializer):
var = tf.get_variable(name, shape, initializer=initializer)
_activation_summary(var)
return var
def mobius_pad(inputs, padding):
# pads edges in a mobius way (code could be cleaner)
# pad axis 1
top = inputs[:,-1:]
bottom = inputs[:,:1]
if padding[0] == "zeros":
top = tf.zeros_like(top)
bottom = tf.zeros_like(bottom)
inputs = tf.concat([top, inputs, bottom], axis=1)
# pad axis 2
left = inputs[:,:,-1:]
right = inputs[:,:,:1]
if padding[1] == "zeros":
left = tf.zeros_like(left)
right = tf.zeros_like(right)
inputs = tf.concat([left, inputs, right], axis=2)
# pad axis 3
if len(padding) == 3:
z_in = inputs[:,:,:,-1:]
z_out = inputs[:,:,:,:1]
if padding[1] == "zeros":
z_in = tf.zeros_like(z_in)
z_out = tf.zeros_like(z_out)
inputs = tf.concat([z_in, inputs, z_out], axis=3)
return inputs
def simple_conv_2d(x, k):
"""A simplified 2D convolution operation"""
y = tf.nn.conv2d(x, k, [1, 1, 1, 1], padding='VALID')
return y
def simple_conv_3d(x, k):
"""A simplified 3D convolution operation"""
y = tf.nn.conv3d(x, k, [1, 1, 1, 1, 1], padding='VALID')
return y
def conv_layer(inputs, kernel_size, stride, num_features, padding, idx, nonlinearity=None):
with tf.variable_scope('{0}_conv'.format(idx)) as scope:
input_channels = int(inputs.get_shape()[-1])
# determine dim
length_input = len(inputs.get_shape()) - 2
if length_input not in [2, 3]:
print("conv layer does not support non 2d or 3d inputs")
exit()
# make variables
weights = _variable('weights', shape=length_input*[kernel_size] + [input_channels,num_features],initializer=tf.contrib.layers.xavier_initializer_conv2d())
biases = _variable('biases',[num_features],initializer=tf.contrib.layers.xavier_initializer_conv2d())
# pad it mobius
inputs = mobius_pad(inputs, padding)
if length_input == 2:
conv = tf.nn.conv2d(inputs, weights, strides=[1, stride, stride, 1], padding='VALID')
elif length_input == 3:
conv = tf.nn.conv3d(inputs, weights, strides=[1, stride, stride, stride, 1], padding='VALID')
conv = tf.nn.bias_add(conv, biases)
if nonlinearity is not None:
conv = nonlinearity(conv)
return conv
def simple_trans_conv_2d(x, k):
"""A simplified 2D trans convolution operation"""
output_shape = tf.stack([tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[2], tf.shape(k)[2]])
y = tf.nn.conv2d_transpose(x, k, output_shape, [1, 1, 1, 1], padding='SAME')
y = tf.reshape(y, [int(x.get_shape()[0]), int(x.get_shape()[1]), int(x.get_shape()[2]), int(k.get_shape()[2])])
return y
def simple_trans_conv_3d(x, k):
"""A simplified 3D trans convolution operation"""
output_shape = tf.stack([tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[2], tf.shape(x)[3], tf.shape(k)[3]])
y = tf.nn.conv3d_transpose(x, k, output_shape, [1, 1, 1, 1, 1], padding='SAME')
y = tf.reshape(y, [int(x.get_shape()[0]), int(x.get_shape()[1]), int(x.get_shape()[2]), int(x.get_shape()[3]), int(k.get_shape()[3])])
return y
def transpose_conv_layer(inputs, kernel_size, stride, num_features, padding, idx, nonlinearity=None):
with tf.variable_scope('{0}_trans_conv'.format(idx)) as scope:
input_channels = int(inputs.get_shape()[-1])
# determine dim
length_input = len(inputs.get_shape()) - 2
batch_size = tf.shape(inputs)[0]
if length_input not in [2, 3]:
print("transpose conv layer does not support non 2d or 3d inputs")
exit()
# make variables
weights = _variable('weights', shape=length_input*[kernel_size] + [num_features,input_channels],initializer=tf.contrib.layers.xavier_initializer_conv2d())
biases = _variable('biases',[num_features],initializer=tf.contrib.layers.xavier_initializer_conv2d())
# pad it mobius
inputs_pad = mobius_pad(inputs, padding)
if length_input == 2:
output_shape = tf.stack([tf.shape(inputs_pad)[0], tf.shape(inputs_pad)[1]*stride, tf.shape(inputs_pad)[2]*stride, num_features])
conv = tf.nn.conv2d_transpose(inputs_pad, weights, output_shape, strides=[1,stride,stride,1], padding='SAME')
conv = conv[:,2:-2,2:-2]
elif length_input == 3:
output_shape = tf.stack([tf.shape(inputs)[0], tf.shape(inputs_pad)[1]*stride, tf.shape(inputs_pad)[2]*stride, tf.shape(inputs_pad)[3]*stride, num_features])
conv = tf.nn.conv3d_transpose(inputs_pad, weights, output_shape, strides=[1,stride,stride,stride,1], padding='SAME')
conv = conv[:,2:-2,2:-2,2:-2]
conv_biased = tf.nn.bias_add(conv, biases)
if nonlinearity is not None:
conv_biased = nonlinearity(conv_biased)
#reshape (transpose conv causes output to have ? size)
shape = int_shape(inputs)
if length_input == 2:
conv_biased = tf.reshape(conv_biased, [shape[0], shape[1]*stride, shape[2]*stride, num_features])
if length_input == 3:
conv_biased = tf.reshape(conv_biased, [shape[0], shape[1]*stride, shape[2]*stride, shape[3]*stride, num_features])
return conv_biased
def fc_layer(inputs, hiddens, idx, nonlinearity=None, flat = False):
with tf.variable_scope('{0}_fc'.format(idx)) as scope:
input_shape = inputs.get_shape().as_list()
if flat:
dim = input_shape[1]*input_shape[2]*input_shape[3]
inputs_processed = tf.reshape(inputs, [-1,dim])
else:
dim = input_shape[1]
inputs_processed = inputs
weights = _variable('weights', shape=[dim,hiddens],initializer=tf.contrib.layers.xavier_initializer())
biases = _variable('biases', [hiddens], initializer=tf.contrib.layers.xavier_initializer())
output_biased = tf.add(tf.matmul(inputs_processed,weights),biases,name=str(idx)+'_fc')
if nonlinearity is not None:
output_biased = nonlinearity(ouput_biased)
return output_biased
def nin(x, num_units, idx):
""" a network in network layer (1x1 CONV) """
s = int_shape(x)
x = tf.reshape(x, [np.prod(s[:-1]),s[-1]])
x = fc_layer(x, num_units, idx)
return tf.reshape(x, s[:-1]+[num_units])
def _phase_shift(I, r):
bsize, a, b, c = I.get_shape().as_list()
bsize = tf.shape(I)[0] # Handling Dimension(None) type for undefined batch dim
X = tf.reshape(I, (bsize, a, b, r, r))
X = tf.transpose(X, (0, 1, 2, 4, 3)) # bsize, a, b, 1, 1
X = tf.split(X, a, 1) # a, [bsize, b, r, r]
X = tf.concat(2, [tf.squeeze(x) for x in X]) # bsize, b, a*r, r
X = tf.split(X, b, 1) # b, [bsize, a*r, r]
X = tf.concat(2, [tf.squeeze(x) for x in X]) # bsize, a*r, b*r
return tf.reshape(X, (bsize, a*r, b*r, 1))
def PS(X, r, depth):
Xc = tf.split(3, depth, X)
X = tf.concat(3, [_phase_shift(x, r) for x in Xc])
return X
def trim_tensor(tensor, pos, width, trim_type):
tensor_shape = int_shape(tensor)
tensor_length = len(tensor_shape)
if tensor_length == 4:
if (pos-width < 0) or (pos+width+1 > max(tensor_shape[0],tensor_shape[1])):
print("this should probably never be called")
return tensor
elif trim_type == "point":
tensor = tensor[:,pos-width:pos+width+1,pos-width:pos+width+1]
elif trim_type == "line":
tensor = tensor[:,pos-width:pos+width+1]
elif trim_type == "plane":
print("can not extract a plane from a plane")
elif tensor_length == 5:
if (pos-width < 0) or (pos+width+1 > max(tensor_shape[0],tensor_shape[1],tensor_shape[2])):
return tensor
elif trim_type == "point":
tensor = tensor[:,pos-width:pos+width+1,pos-width:pos+width+1,pos-width:pos+width+1]
elif trim_type == "line":
tensor = tensor[:,pos-width:pos+width+1,pos-width:pos+width+1]
elif trim_type == "plane":
tensor = tensor[:,pos-width:pos+width+1]
else:
print("tensor size not supported")
exit()
return tensor
def res_block(x, a=None, filter_size=16, nonlinearity=concat_elu, keep_p=1.0, stride=1, gated=False, padding=["mobius", "mobius"], name="resnet", begin_nonlinearity=True):
# determine if 2d or 3d trans conv is needed
length_input = len(x.get_shape())
orig_x = x
if begin_nonlinearity:
x = nonlinearity(x)
if stride == 1:
x = conv_layer(x, 3, stride, filter_size, padding, name + '_conv_1')
elif stride == 2:
x = conv_layer(x, 4, stride, filter_size, padding, name + '_conv_1')
else:
print("stride > 2 is not supported")
exit()
if a is not None:
shape_a = int_shape(a)
shape_x_1 = int_shape(x)
if length_input == 4:
a = tf.pad(
a, [[0, 0], [0, shape_x_1[1]-shape_a[1]], [0, shape_x_1[2]-shape_a[2]],
[0, 0]])
elif length_input == 5:
a = tf.pad(
a, [[0, 0], [0, shape_x_1[1]-shape_a[1]], [0, shape_x_1[2]-shape_a[2]], [0, shape_x_1[3]-shape_a[3]],
[0, 0]])
x += nin(nonlinearity(a), filter_size, name + '_nin')
x = nonlinearity(x)
if keep_p < 1.0:
x = tf.nn.dropout(x, keep_prob=keep_p)
if not gated:
x = conv_layer(x, 3, 1, filter_size, padding, name + '_conv_2')
else:
x = conv_layer(x, 3, 1, filter_size*2, padding, name + '_conv_2')
x_1, x_2 = tf.split(x,2,-1)
x = x_1 * tf.nn.sigmoid(x_2)
if int(orig_x.get_shape()[2]) > int(x.get_shape()[2]):
if length_input == 4:
orig_x = tf.nn.avg_pool(orig_x, [1,2,2,1], [1,2,2,1], padding='SAME')
elif length_input == 5:
orig_x = tf.nn.avg_pool3d(orig_x, [1,2,2,2,1], [1,2,2,2,1], padding='SAME')
# pad it
out_filter = filter_size
in_filter = int(orig_x.get_shape()[-1])
if out_filter > in_filter:
if length_input == 4:
orig_x = tf.pad(
orig_x, [[0, 0], [0, 0], [0, 0],
[(out_filter-in_filter), 0]])
elif length_input == 5:
orig_x = tf.pad(
orig_x, [[0, 0], [0, 0], [0, 0], [0, 0],
[(out_filter-in_filter), 0]])
elif out_filter < in_filter:
orig_x = nin(orig_x, out_filter, name + '_nin_pad')
return orig_x + x
"""
def res_block_lstm(x, hidden_state_1=None, hidden_state_2=None, keep_p=1.0, name="resnet_lstm"):
orig_x = x
filter_size = orig_x.get_shape()
with tf.variable_scope(name + "_conv_LSTM_1", initializer = tf.random_uniform_initializer(-0.01, 0.01)):
lstm_cell_1 = BasicConvLSTMCell.BasicConvLSTMCell([int(x.get_shape()[1]),int(x.get_shape()[2])], [3,3], filter_size)
if hidden_state_1 == None:
batch_size = x.get_shape()[0]
hidden_state_1 = lstm_cell_1.zero_state(batch_size, tf.float32)
x_1, hidden_state_1 = lstm_cell_1(x, hidden_state_1)
if keep_p < 1.0:
x_1 = tf.nn.dropout(x_1, keep_prob=keep_p)
with tf.variable_scope(name + "_conv_LSTM_2", initializer = tf.random_uniform_initializer(-0.01, 0.01)):
lstm_cell_2 = BasicConvLSTMCell.BasicConvLSTMCell([int(x_1.get_shape()[1]),int(x_1.get_shape()[2])], [3,3], filter_size)
if hidden_state_2 == None:
batch_size = x_1.get_shape()[0]
hidden_state_2 = lstm_cell_2.zero_state(batch_size, tf.float32)
x_2, hidden_state_2 = lstm_cell_2(x_1, hidden_state_2)
return orig_x + x_2, hidden_state_1, hidden_state_2
"""
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Concat Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class ConcatOpTest(test.TestCase):
def testHStack(self):
with self.test_session(use_gpu=True):
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
c = array_ops.concat([p1, p2], 0)
params = {
p1: np.random.rand(4, 4).astype("f"),
p2: np.random.rand(4, 4).astype("f")
}
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
self.assertAllEqual(result[:4, :], params[p1])
self.assertAllEqual(result[4:, :], params[p2])
def testVStack(self):
with self.test_session(use_gpu=True):
p1 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
p2 = array_ops.placeholder(dtypes.float32, shape=[4, 4])
c = array_ops.concat([p1, p2], 1)
params = {
p1: np.random.rand(4, 4).astype("f"),
p2: np.random.rand(4, 4).astype("f")
}
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
self.assertAllEqual(result[:, :4], params[p1])
self.assertAllEqual(result[:, 4:], params[p2])
def testInt32GPU(self):
with self.test_session(use_gpu=True):
p1 = np.random.rand(2, 3).astype("i")
p2 = np.random.rand(2, 3).astype("i")
x1 = constant_op.constant(p1)
x2 = constant_op.constant(p2)
c = array_ops.concat([x1, x2], 0)
result = c.eval()
self.assertAllEqual(result[:2, :], p1)
self.assertAllEqual(result[2:, :], p2)
def testRefType(self):
with self.test_session(use_gpu=True):
p1 = np.random.rand(4, 4).astype("f")
p2 = np.random.rand(4, 4).astype("f")
v1 = variables.Variable(p1)
v2 = variables.Variable(p2)
c = array_ops.concat([v1, v2], 0)
variables.global_variables_initializer().run()
result = c.eval()
self.assertEqual(result.shape, c.get_shape())
self.assertAllEqual(result[:4, :], p1)
self.assertAllEqual(result[4:, :], p2)
def _testRandom(self, dtype):
# Random dims of rank 5
shape = np.random.randint(1, 5, size=5)
# Random number of tensors, but always > 1.
num_tensors = np.random.randint(2, 10)
# Random dim to concat on
concat_dim = np.random.randint(5)
params = {}
if dtype == dtypes.bfloat16:
dtype_feed = dtypes.float32
else:
dtype_feed = dtype
with self.test_session(use_gpu=True):
p = []
for i in np.arange(num_tensors):
input_shape = shape
input_shape[concat_dim] = np.random.randint(1, 5)
placeholder = array_ops.placeholder(dtype_feed, shape=input_shape)
p.append(placeholder)
t = dtype_feed.as_numpy_dtype
params[placeholder] = np.random.rand(*input_shape).astype(t)
if dtype != dtype_feed:
concat_inputs = [math_ops.cast(p_i, dtype) for p_i in p]
else:
concat_inputs = p
c = array_ops.concat(concat_inputs, concat_dim)
if dtype != dtype_feed:
c = math_ops.cast(c, dtype_feed)
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
cur_offset = 0
for i in np.arange(num_tensors):
# The index into the result is the ':' along all dimensions
# except the concat_dim. slice(0, size) is used for ':', and
# a list of slices is used to index into result.
ind = [slice(0, params[p[i]].shape[j]) for j in np.arange(5)]
ind[concat_dim] = slice(cur_offset,
cur_offset + params[p[i]].shape[concat_dim])
cur_offset += params[p[i]].shape[concat_dim]
if dtype == dtype_feed:
self.assertAllEqual(result[ind], params[p[i]])
else:
self.assertAllClose(result[ind], params[p[i]], 0.01)
def testRandom(self):
self._testRandom(dtypes.bool)
self._testRandom(dtypes.float32)
self._testRandom(dtypes.int16)
self._testRandom(dtypes.int32)
self._testRandom(dtypes.int64)
self._testRandom(dtypes.bfloat16)
self._testRandom(dtypes.complex64)
self._testRandom(dtypes.complex128)
def testInvalidConcatDimTypeAndShape(self):
a = variables.Variable(constant_op.constant(1.0, shape=[1]))
b = variables.Variable(constant_op.constant(2.0, shape=[1]))
with self.assertRaises(ValueError):
array_ops.concat(b, a)
with self.assertRaises(TypeError):
array_ops.concat(1, 4.2)
with self.assertRaises(ValueError):
array_ops.concat(1, a)
with self.assertRaises(TypeError):
array_ops.concat([a, b], a)
with self.assertRaises(ValueError):
array_ops.concat([a, b], [3])
with self.assertRaises(ValueError):
array_ops.concat([], 0)
# An integer tensor for shape dim should throw no error.
array_ops.concat(1, constant_op.constant(0, shape=[]))
# A non-scalar tensor for shape should throw ValueError.
with self.assertRaises(ValueError):
array_ops.concat(1, constant_op.constant(0, shape=[1]))
def _testGradientsSimple(self, dtype):
# Test both positive and negative concat axis.
# -2 and 1 correspond to the same axis for 3-dimensional tensors.
for axis in [-2, 1]:
with self.test_session(use_gpu=True):
inp = []
inp_tensors = []
for x in [1, 2, 6]:
shape = [10, x, 2]
t = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
t += -1j * t
inp.append(t)
inp_tensors.append(
constant_op.constant(
t.flatten(),
shape=shape,
dtype=dtype))
c = array_ops.concat(inp_tensors, axis)
output_shape = [10, 9, 2]
grad_inp = np.random.rand(*output_shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
grad_inp += -1j * grad_inp
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, axis)
result = concated_grad.eval()
self.assertAllEqual(result, grad_inp)
def testGradientsSimple(self):
self._testGradientsSimple(dtypes.float32)
self._testGradientsSimple(dtypes.complex64)
def testGradientsFirstDim(self):
with self.test_session(use_gpu=True):
inp = []
inp_tensors = []
for x in [1, 2, 6]:
shape = [x, 10, 2]
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
t.flatten(),
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, 0)
output_shape = [9, 10, 2]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, 0)
result = concated_grad.eval()
self.assertAllEqual(result, grad_inp)
def testGradientsLastDim(self):
# Test both positive and negative concat axis.
# -1 and 2 correspond to the same axis for 3-dimensional tensors.
for axis in [-1, 2]:
with self.test_session(use_gpu=True):
inp = []
inp_tensors = []
for x in [1, 2, 6]:
shape = [10, 2, x]
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(
t.flatten(),
shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, 2)
output_shape = [10, 2, 9]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, axis)
result = concated_grad.eval()
self.assertAllEqual(result, grad_inp)
def _RunAndVerifyGradientsRandom(self):
# Random dims of rank 5
input_shape = np.random.randint(1, 5, size=5)
# Random number of tensors
num_tensors = np.random.randint(12, 20)
# Random dim to concat on
concat_dim = np.random.randint(5)
concat_dim_sizes = np.random.randint(1, 5, size=num_tensors)
with self.test_session(use_gpu=True):
inp = []
inp_tensors = []
for x in concat_dim_sizes:
shape = input_shape
shape[concat_dim] = x
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(t.flatten(), shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, concat_dim)
output_shape = input_shape
output_shape[concat_dim] = concat_dim_sizes.sum()
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, concat_dim)
result = concated_grad.eval()
self.assertAllEqual(result, grad_inp)
def testGradientsRandom(self):
for _ in range(5):
self._RunAndVerifyGradientsRandom()
def testGradientWithUnknownInputDim(self):
with self.test_session(use_gpu=True):
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
c = array_ops.concat([x, y], 2)
output_shape = [10, 2, 9]
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], [x, y], [grad_tensor])
concated_grad = array_ops.concat(grad, 2)
params = {
x: np.random.rand(10, 2, 3).astype("f"),
y: np.random.rand(10, 2, 6).astype("f")
}
result = concated_grad.eval(feed_dict=params)
self.assertAllEqual(result, grad_inp)
def testShapeError(self):
# Rank doesn't match.
with self.assertRaises(ValueError):
array_ops.concat(
[constant_op.constant(10.0, shape=[4, 4, 4, 4]),
constant_op.constant(20.0, shape=[4, 4, 4])
], 1)
# Dimensions don't match in a non-concat dim.
with self.assertRaises(ValueError):
array_ops.concat(
[constant_op.constant(10.0, shape=[1, 2, 1]),
constant_op.constant(20.0, shape=[3, 2, 1])
], 1)
# concat_dim out of range.
with self.assertRaises(ValueError):
array_ops.concat(
[constant_op.constant(10.0, shape=[4, 4, 4]),
constant_op.constant(20.0, shape=[4, 4, 4])
], 3)
# concat_dim out of range
with self.assertRaises(ValueError):
array_ops.concat(
[constant_op.constant(10.0, shape=[4, 4, 4]),
constant_op.constant(20.0, shape=[4, 4, 4])
], -4)
def testShapeWithUnknownConcatDim(self):
p1 = array_ops.placeholder(dtypes.float32)
c1 = constant_op.constant(10.0, shape=[4, 4, 4, 4])
p2 = array_ops.placeholder(dtypes.float32)
c2 = constant_op.constant(20.0, shape=[4, 4, 4, 4])
dim = array_ops.placeholder(dtypes.int32)
concat = array_ops.concat([p1, c1, p2, c2], dim)
self.assertEqual(4, concat.get_shape().ndims)
# All dimensions unknown.
concat2 = array_ops.concat([p1, p2], dim)
self.assertEqual(None, concat2.get_shape())
# Rank doesn't match.
c3 = constant_op.constant(30.0, shape=[4, 4, 4])
with self.assertRaises(ValueError):
array_ops.concat([p1, c1, p2, c3], dim)
def testZeroSize(self):
# Verify that concat doesn't crash and burn for zero size inputs
np.random.seed(7)
with self.test_session(use_gpu=True) as sess:
for shape0 in (), (2,):
axis = len(shape0)
for shape1 in (), (3,):
for n0 in 0, 1, 2:
for n1 in 0, 1, 2:
x0 = np.random.randn(*(shape0 + (n0,) + shape1))
x1 = np.random.randn(*(shape0 + (n1,) + shape1))
correct = np.concatenate([x0, x1], axis=axis)
# TODO(irving): Make tf.concat handle map, then drop list().
xs = list(map(constant_op.constant, [x0, x1]))
c = array_ops.concat(xs, axis)
self.assertAllEqual(c.eval(), correct)
# Check gradients
dc = np.random.randn(*c.get_shape().as_list())
dxs = sess.run(gradients_impl.gradients(c, xs, dc))
self.assertAllEqual(dc, np.concatenate(dxs, axis=axis))
def testTensorConcatDim0Grad(self):
x_shapes = [[20, 7, 3], [10, 7, 3], [14, 7, 3]]
output_shape = [44, 7, 3]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.test_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
output = array_ops.concat(xs, 0)
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
def testTensorConcatDim1Grad(self):
x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]
output_shape = [20, 11, 3]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.test_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
output = array_ops.concat(xs, 1)
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
def testIndexedSlicesConcatDim0Grad(self):
x_shapes = [[20, 7, 3], [10, 7, 3], [14, 7, 3]]
output_shape = [4, 7, 3]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.test_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
x_concat = array_ops.concat(xs, 0)
output = array_ops.gather(x_concat, [1, 2, 0, 5])
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
def testIndexedSlicesConcatDim1Grad(self):
x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]
output_shape = [4, 11, 3]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.test_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
x_concat = array_ops.concat(xs, 1)
output = array_ops.gather(x_concat, [1, 2, 0, 5])
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
def testIndexedSlicesConcatDim2Grad(self):
x_shapes = [[20, 7, 3], [20, 7, 1], [20, 7, 2]]
output_shape = [4, 7, 6]
x_vals = [
np.random.random_sample(x_shape).astype(np.float64)
for x_shape in x_shapes
]
with self.test_session():
xs = [constant_op.constant(x_val) for x_val in x_vals]
x_concat = array_ops.concat(xs, 2)
output = array_ops.gather(x_concat, [1, 2, 0, 5])
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape)
self.assertLess(err, 1e-11)
def testIndexedSlicesConcatDim1Grad_UnknownInputDim(self):
x_shapes = [[20, 7, 3], [20, 3, 3], [20, 1, 3]]
output_shape = [4, 11, 3]
with self.test_session():
x_1 = array_ops.placeholder(dtypes.float64)
x_2 = array_ops.placeholder(dtypes.float64)
x_3 = array_ops.placeholder(dtypes.float64)
xs = [x_1, x_2, x_3]
x_concat = array_ops.concat(xs, 1)
output = array_ops.gather(x_concat, [1, 2, 0, 5])
params = {
x_1: np.random.random_sample(x_shapes[0]).astype(np.float64),
x_2: np.random.random_sample(x_shapes[1]).astype(np.float64),
x_3: np.random.random_sample(x_shapes[2]).astype(np.float64)
}
err = gradient_checker.compute_gradient_error(xs, x_shapes, output,
output_shape,
extra_feed_dict=params)
self.assertLess(err, 1e-11)
def testConcatTuple(self):
c1 = np.random.rand(4, 4)
c2 = np.random.rand(4, 4)
with self.test_session():
concat_list_t = array_ops.concat([c1, c2], 0)
concat_tuple_t = array_ops.concat((c1, c2), 0)
self.assertAllEqual(concat_list_t.eval(), concat_tuple_t.eval())
def testConcatNoScalars(self):
with self.test_session():
scalar = constant_op.constant(7)
dim = array_ops.placeholder(dtypes.int32)
with self.assertRaisesRegexp(
ValueError, r"Can't concatenate scalars \(use tf\.stack instead\)"):
array_ops.concat([scalar, scalar, scalar], dim)
# important as gpu implementation could fail if
# shared memory is not large for all the inputs
def testConcatLargeNumberOfTensors(self):
with self.test_session(use_gpu=True):
for concat_dim in range(2):
params = {}
p = []
shape = np.array([7, 13])
if test.is_gpu_available():
num_tensors = 5000
else:
num_tensors = 500
for i in np.arange(num_tensors):
input_shape = shape
placeholder = array_ops.placeholder(dtypes.float32, shape=input_shape)
p.append(placeholder)
params[placeholder] = np.random.rand(*input_shape).astype(np.float32)
concat_inputs = p
c = array_ops.concat(concat_inputs, concat_dim)
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
cur_offset = 0
for i in np.arange(num_tensors):
# The index into the result is the ':' along all dimensions
# except the concat_dim. slice(0, size) is used for ':', and
# a list of slices is used to index into result.
index = [slice(0, params[p[i]].shape[j]) for j in np.arange(2)]
index[concat_dim] = slice(cur_offset,
cur_offset + params[p[i]].shape[concat_dim])
cur_offset += params[p[i]].shape[concat_dim]
self.assertAllEqual(result[index], params[p[i]])
def testConcatEmpty(self):
with self.test_session(use_gpu=True):
t1 = []
t2 = []
output = gen_array_ops._concat_v2([t1, t2], 0).eval()
self.assertFalse(output) # Checks that output is empty
def testConcatInvalidAxis(self):
with self.assertRaises(ValueError):
with self.test_session(use_gpu=True):
t1 = [1]
t2 = [2]
gen_array_ops._concat_v2([t1, t2], 1).eval()
def testConcatNegativeAxis(self):
with self.test_session(use_gpu=True):
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
c = gen_array_ops._concat_v2([t1, t2], -2)
self.assertEqual([4, 3], c.get_shape().as_list())
output = c.eval()
self.assertAllEqual([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]],
output)
c = gen_array_ops._concat_v2([t1, t2], -1)
self.assertEqual([2, 6], c.get_shape().as_list())
output = c.eval()
self.assertAllEqual([[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]], output)
def _testGradientsForAxis(
self, inp_tensors, axis, output_shape, feed_dict=None):
with self.test_session():
c = array_ops.concat(inp_tensors, axis)
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, axis)
result = concated_grad.eval(feed_dict=feed_dict)
self.assertAllEqual(result, grad_inp)
def _testIndexedSlicesGradientsForAxis(
self, inp_tensors, axis, output_shape, gather_indexes, feed_dict=None):
with self.test_session():
c = array_ops.gather(
array_ops.concat(inp_tensors, axis), gather_indexes)
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.gather(
array_ops.concat(grad, axis), gather_indexes)
result = concated_grad.eval(feed_dict=feed_dict)
self.assertAllEqual(result, grad_inp)
def testGradientsNegativeAxis(self):
x1 = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]
x2 = [[7.0, 8.0, 9.0], [10.0, 11.0, 12.0]]
inp_tensors = [constant_op.constant(x1, shape=(2, 3), dtype=dtypes.float32),
constant_op.constant(x2, shape=(2, 3), dtype=dtypes.float32)]
# Test concat gradient with axis == -2
self._testGradientsForAxis(inp_tensors, -2, output_shape=[4, 3])
# Test concat gradient with unknown-shape tensors.
x1_placeholder = array_ops.placeholder(dtypes.float32)
x2_placeholder = array_ops.placeholder(dtypes.float32)
inp_tensors_placeholders = [x1_placeholder, x2_placeholder]
feed_dict = {x1_placeholder: x1, x2_placeholder: x2}
self._testGradientsForAxis(
inp_tensors_placeholders, -1, output_shape=[2, 6], feed_dict=feed_dict)
# Test IndexedSlices concat gradient.
self._testIndexedSlicesGradientsForAxis(
inp_tensors, -2, output_shape=[2, 3], gather_indexes=[2, 0])
# We don't support calculating IndexedSlices concat gradient for
# negative indexes when rank is not known.
with self.assertRaises(ValueError):
self._testIndexedSlicesGradientsForAxis(
inp_tensors_placeholders, -2, output_shape=[2, 3],
gather_indexes=[2, 0], feed_dict=feed_dict)
class ConcatOffsetTest(test.TestCase):
def testBasic(self):
with self.test_session(use_gpu=True) as sess:
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
s2 = constant_op.constant([2, 20, 5], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1, s2])
ans = sess.run(off)
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
def testNotVector(self):
with self.test_session() as sess:
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([[2, 3, 5]], dtypes.int32)
s1 = constant_op.constant([[2, 7, 5]], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"should be a vector"):
sess.run(off)
def testConcatDimOutOfRange(self):
with self.test_session() as sess:
cdim = constant_op.constant(4, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Concat dim is out of range: 4 vs. 3"):
sess.run(off)
def testDimMismatch(self):
with self.test_session() as sess:
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5, 10], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"should contain 3 elem"):
sess.run(off)
def testSizeMismatch(self):
with self.test_session() as sess:
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 10], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
r"All dimensions except 1 must match. Input 1 has shape \[2 7 10\] "
r"and doesn't match input 0 with shape \[2 3 5\]."):
sess.run(off)
def testNegativeDim(self):
with self.test_session(use_gpu=True) as sess:
cdim = constant_op.constant(-2, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
s2 = constant_op.constant([2, 20, 5], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1, s2])
ans = sess.run(off)
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
cdim = constant_op.constant(-3, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([1, 3, 5], dtypes.int32)
s2 = constant_op.constant([3, 3, 5], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1, s2])
ans = sess.run(off)
self.assertAllEqual(ans, [[0, 0, 0], [2, 0, 0], [3, 0, 0]])
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP68 implementation."""
from test_framework.test_framework import IoPTestFramework
from test_framework.util import *
from test_framework.blocktools import *
SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31)
SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height)
SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift
SEQUENCE_LOCKTIME_MASK = 0x0000ffff
# RPC error for non-BIP68 final transactions
NOT_FINAL_ERROR = "64: non-BIP68-final"
class BIP68Test(IoPTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [[], ["-acceptnonstdtxn=0"]]
def run_test(self):
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
# Generate some coins
self.nodes[0].generate(110)
self.log.info("Running test disable flag")
self.test_disable_flag()
self.log.info("Running test sequence-lock-confirmed-inputs")
self.test_sequence_lock_confirmed_inputs()
self.log.info("Running test sequence-lock-unconfirmed-inputs")
self.test_sequence_lock_unconfirmed_inputs()
self.log.info("Running test BIP68 not consensus before versionbits activation")
self.test_bip68_not_consensus()
self.log.info("Activating BIP68 (and 112/113)")
self.activateCSV()
self.log.info("Verifying nVersion=2 transactions are standard.")
self.log.info("Note that nVersion=2 transactions are always standard (independent of BIP68 activation status).")
self.test_version2_relay()
self.log.info("Passed")
# Test that BIP68 is not in effect if tx version is 1, or if
# the first sequence bit is set.
def test_disable_flag(self):
# Create some unconfirmed inputs
new_addr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(new_addr, 2) # send 2 IOP
utxos = self.nodes[0].listunspent(0, 0)
assert(len(utxos) > 0)
utxo = utxos[0]
tx1 = CTransaction()
value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN)
# Check that the disable flag disables relative locktime.
# If sequence locks were used, this would require 1 block for the
# input to mature.
sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1
tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)]
tx1.vout = [CTxOut(value, CScript([b'a']))]
tx1_signed = self.nodes[0].signrawtransaction(ToHex(tx1))["hex"]
tx1_id = self.nodes[0].sendrawtransaction(tx1_signed)
tx1_id = int(tx1_id, 16)
# This transaction will enable sequence-locks, so this transaction should
# fail
tx2 = CTransaction()
tx2.nVersion = 2
sequence_value = sequence_value & 0x7fffffff
tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)]
tx2.vout = [CTxOut(int(value-self.relayfee*COIN), CScript([b'a']))]
tx2.rehash()
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2))
# Setting the version back down to 1 should disable the sequence lock,
# so this should be accepted.
tx2.nVersion = 1
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Calculate the median time past of a prior block ("confirmations" before
# the current tip).
def get_median_time_past(self, confirmations):
block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations)
return self.nodes[0].getblockheader(block_hash)["mediantime"]
# Test that sequence locks are respected for transactions spending confirmed inputs.
def test_sequence_lock_confirmed_inputs(self):
# Create lots of confirmed utxos, and use them to generate lots of random
# transactions.
max_outputs = 50
addresses = []
while len(addresses) < max_outputs:
addresses.append(self.nodes[0].getnewaddress())
while len(self.nodes[0].listunspent()) < 200:
import random
random.shuffle(addresses)
num_outputs = random.randint(1, max_outputs)
outputs = {}
for i in range(num_outputs):
outputs[addresses[i]] = random.randint(1, 20)*0.01
self.nodes[0].sendmany("", outputs)
self.nodes[0].generate(1)
utxos = self.nodes[0].listunspent()
# Try creating a lot of random transactions.
# Each time, choose a random number of inputs, and randomly set
# some of those inputs to be sequence locked (and randomly choose
# between height/time locking). Small random chance of making the locks
# all pass.
for i in range(400):
# Randomly choose up to 10 inputs
num_inputs = random.randint(1, 10)
random.shuffle(utxos)
# Track whether any sequence locks used should fail
should_pass = True
# Track whether this transaction was built with sequence locks
using_sequence_locks = False
tx = CTransaction()
tx.nVersion = 2
value = 0
for j in range(num_inputs):
sequence_value = 0xfffffffe # this disables sequence locks
# 50% chance we enable sequence locks
if random.randint(0,1):
using_sequence_locks = True
# 10% of the time, make the input sequence value pass
input_will_pass = (random.randint(1,10) == 1)
sequence_value = utxos[j]["confirmations"]
if not input_will_pass:
sequence_value += 1
should_pass = False
# Figure out what the median-time-past was for the confirmed input
# Note that if an input has N confirmations, we're going back N blocks
# from the tip so that we're looking up MTP of the block
# PRIOR to the one the input appears in, as per the BIP68 spec.
orig_time = self.get_median_time_past(utxos[j]["confirmations"])
cur_time = self.get_median_time_past(0) # MTP of the tip
# can only timelock this input if it's not too old -- otherwise use height
can_time_lock = True
if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK:
can_time_lock = False
# if time-lockable, then 50% chance we make this a time lock
if random.randint(0,1) and can_time_lock:
# Find first time-lock value that fails, or latest one that succeeds
time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY
if input_will_pass and time_delta > cur_time - orig_time:
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)
elif (not input_will_pass and time_delta <= cur_time - orig_time):
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value))
value += utxos[j]["amount"]*COIN
# Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output
tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50
tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), CScript([b'a'])))
rawtx = self.nodes[0].signrawtransaction(ToHex(tx))["hex"]
if (using_sequence_locks and not should_pass):
# This transaction should be rejected
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx)
else:
# This raw transaction should be accepted
self.nodes[0].sendrawtransaction(rawtx)
utxos = self.nodes[0].listunspent()
# Test that sequence locks on unconfirmed inputs must have nSequence
# height or time of 0 to be accepted.
# Then test that BIP68-invalid transactions are removed from the mempool
# after a reorg.
def test_sequence_lock_unconfirmed_inputs(self):
# Store height so we can easily reset the chain at the end of the test
cur_height = self.nodes[0].getblockcount()
# Create a mempool tx.
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Anyone-can-spend mempool tx.
# Sequence lock of 0 should pass.
tx2 = CTransaction()
tx2.nVersion = 2
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(tx2_raw)
# Create a spend of the 0th output of orig_tx with a sequence lock
# of 1, and test what happens when submitting.
# orig_tx.vout[0] must be an anyone-can-spend output
def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock):
sequence_value = 1
if not use_height_lock:
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx = CTransaction()
tx.nVersion = 2
tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)]
tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee*COIN), CScript([b'a']))]
tx.rehash()
if (orig_tx.hash in node.getrawmempool()):
# sendrawtransaction should fail if the tx is in the mempool
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx))
else:
# sendrawtransaction should succeed if the tx is not in the mempool
node.sendrawtransaction(ToHex(tx))
return tx
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Now mine some blocks, but make sure tx2 doesn't get mined.
# Use prioritisetransaction to lower the effective feerate to 0
self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(-self.relayfee*COIN))
cur_time = int(time.time())
for i in range(10):
self.nodes[0].setmocktime(cur_time + 600)
self.nodes[0].generate(1)
cur_time += 600
assert(tx2.hash in self.nodes[0].getrawmempool())
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Mine tx2, and then try again
self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(self.relayfee*COIN))
# Advance the time on the node so that we can test timelocks
self.nodes[0].setmocktime(cur_time+600)
self.nodes[0].generate(1)
assert(tx2.hash not in self.nodes[0].getrawmempool())
# Now that tx2 is not in the mempool, a sequence locked spend should
# succeed
tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
assert(tx3.hash in self.nodes[0].getrawmempool())
self.nodes[0].generate(1)
assert(tx3.hash not in self.nodes[0].getrawmempool())
# One more test, this time using height locks
tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx4.hash in self.nodes[0].getrawmempool())
# Now try combining confirmed and unconfirmed inputs
tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True)
assert(tx5.hash not in self.nodes[0].getrawmempool())
utxos = self.nodes[0].listunspent()
tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1))
tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN)
raw_tx5 = self.nodes[0].signrawtransaction(ToHex(tx5))["hex"]
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5)
# Test mempool-BIP68 consistency after reorg
#
# State of the transactions in the last blocks:
# ... -> [ tx2 ] -> [ tx3 ]
# tip-1 tip
# And currently tx4 is in the mempool.
#
# If we invalidate the tip, tx3 should get added to the mempool, causing
# tx4 to be removed (fails sequence-lock).
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
assert(tx4.hash not in self.nodes[0].getrawmempool())
assert(tx3.hash in self.nodes[0].getrawmempool())
# Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in
# diagram above).
# This would cause tx2 to be added back to the mempool, which in turn causes
# tx3 to be removed.
tip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount()-1), 16)
height = self.nodes[0].getblockcount()
for i in range(2):
block = create_block(tip, create_coinbase(height), cur_time)
block.nVersion = 3
block.rehash()
block.solve()
tip = block.sha256
height += 1
self.nodes[0].submitblock(ToHex(block))
cur_time += 1
mempool = self.nodes[0].getrawmempool()
assert(tx3.hash not in mempool)
assert(tx2.hash in mempool)
# Reset the chain and get rid of the mocktimed-blocks
self.nodes[0].setmocktime(0)
self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height+1))
self.nodes[0].generate(10)
# Make sure that BIP68 isn't being used to validate blocks, prior to
# versionbits activation. If more blocks are mined prior to this test
# being run, then it's possible the test has activated the soft fork, and
# this test should be moved to run earlier, or deleted.
def test_bip68_not_consensus(self):
assert(get_bip9_status(self.nodes[0], 'csv')['status'] != 'active')
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Make an anyone-can-spend transaction
tx2 = CTransaction()
tx2.nVersion = 1
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
# sign tx2
tx2_raw = self.nodes[0].signrawtransaction(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Now make an invalid spend of tx2 according to BIP68
sequence_value = 100 # 100 block relative locktime
tx3 = CTransaction()
tx3.nVersion = 2
tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)]
tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx3.rehash()
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3))
# make a block that violates bip68; ensure that the tip updates
tip = int(self.nodes[0].getbestblockhash(), 16)
block = create_block(tip, create_coinbase(self.nodes[0].getblockcount()+1))
block.nVersion = 3
block.vtx.extend([tx1, tx2, tx3])
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.nodes[0].submitblock(ToHex(block))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
def activateCSV(self):
# activation should happen at block height 432 (3 periods)
# getblockchaininfo will show CSV as active at block 431 (144 * 3 -1) since it's returning whether CSV is active for the next block.
min_activation_height = 432
height = self.nodes[0].getblockcount()
assert_greater_than(min_activation_height - height, 2)
self.nodes[0].generate(min_activation_height - height - 2)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], "locked_in")
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], "active")
sync_blocks(self.nodes)
# Use self.nodes[1] to test that version 2 transactions are standard.
def test_version2_relay(self):
inputs = [ ]
outputs = { self.nodes[1].getnewaddress() : 1.0 }
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex']
tx = FromHex(CTransaction(), rawtxfund)
tx.nVersion = 2
tx_signed = self.nodes[1].signrawtransaction(ToHex(tx))["hex"]
tx_id = self.nodes[1].sendrawtransaction(tx_signed)
if __name__ == '__main__':
BIP68Test().main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Momentum."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class MomentumOptimizerTest(test.TestCase):
def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum):
var = var + accum * lr * momentum
accum = accum * momentum + g
var = var - lr * accum
var = var - accum * lr * momentum
return var, accum
def doTestBasic(self, use_resource=False, use_callable_params=False):
for i, dtype in enumerate([dtypes.half, dtypes.float32, dtypes.float64]):
if use_resource:
var0 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype, name="var0_%d" % i)
var1 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtype, name="var1_%d" % i)
else:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
learning_rate = lambda: 2.0
momentum = lambda: 0.9
if not use_callable_params:
learning_rate = learning_rate()
momentum = momentum()
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=learning_rate, momentum=momentum)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
if context.in_graph_mode():
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], self.evaluate(var0))
self.assertAllClose([3.0, 4.0], self.evaluate(var1))
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
if context.in_graph_mode():
self.assertFalse(slot0 in variables.trainable_variables())
self.assertFalse(slot1 in variables.trainable_variables())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
if context.in_graph_mode():
self.evaluate(mom_update)
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]),
self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]),
self.evaluate(var1))
# Step 2: the momentum accumulators contain the previous update.
if context.in_graph_mode():
self.evaluate(mom_update)
else:
mom_opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]),
self.evaluate(slot0))
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
self.evaluate(slot1))
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), self.evaluate(var0))
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - (
(0.9 * 0.01 + 0.01) * 2.0)
]), self.evaluate(var1))
def testBasic(self):
with self.test_session():
self.doTestBasic(use_resource=False)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
def testBasicCallableParams(self):
with context.eager_mode():
self.doTestBasic(use_resource=True, use_callable_params=True)
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testVariablesAcrossGraphs(self):
optimizer = momentum_lib.MomentumOptimizer(0.01, 0.5)
with ops.Graph().as_default():
var0 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtypes.float32, name="var0")
var1 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtypes.float32, name="var1")
if context.in_eager_mode():
loss = lambda: math_ops.reduce_sum(var0 + var1)
else:
loss = math_ops.reduce_sum(var0 + var1)
optimizer.minimize(loss)
optimizer_variables = optimizer.variables()
self.assertStartsWith(optimizer_variables[0].name, "var0")
self.assertStartsWith(optimizer_variables[1].name, "var1")
self.assertEquals(2, len(optimizer_variables))
with ops.Graph().as_default():
var2 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtypes.float32, name="var2")
var3 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtypes.float32, name="var3")
if context.in_eager_mode():
loss = lambda: math_ops.reduce_sum(var2 + var3)
else:
loss = math_ops.reduce_sum(var2 + var3)
optimizer.minimize(loss)
optimizer_variables = optimizer.variables()
self.assertStartsWith(optimizer_variables[0].name, "var2")
self.assertStartsWith(optimizer_variables[1].name, "var3")
self.assertEquals(2, len(optimizer_variables))
def testNesterovMomentum(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
cost = 5 * var0 * var0 + 3 * var1
global_step = variables.Variable(
array_ops.zeros([], dtypes.int64), name="global_step")
mom_op = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9, use_nesterov=True)
opt_op = mom_op.minimize(cost, global_step, [var0, var1])
variables.global_variables_initializer().run()
for t in range(1, 5):
opt_op.run()
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(var1_np,
accum1_np,
3, 2.0, 0.9)
self.assertAllClose(var0_np, var0.eval())
self.assertAllClose(var1_np, var1.eval())
def testSparseNesterovMomentum(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.test_session():
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
grads = []
for t in range(1, 5):
grads.append(var0_np * 10)
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(var1_np,
accum1_np,
3, 2.0, 0.9)
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
accum0_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
accum1_np = np.array([0.0, 0.0], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
loss = 5 * var0 * var0 + 3 * var1
mom_op = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9, use_nesterov=True)
x_feed = array_ops.placeholder(dtype)
y_feed = ops.IndexedSlices(
x_feed, constant_op.constant([0, 1]), constant_op.constant([2]))
grads_and_vars = [(y_feed, var0), (constant_op.constant(
[3.0, 3.0], dtype=dtype), var1)]
opt_update = mom_op.apply_gradients(grads_and_vars)
variables.global_variables_initializer().run()
for t in range(1, 5):
opt_update.run(feed_dict={x_feed: grads[t - 1]})
var0_np, accum0_np = self._update_nesterov_momentum_numpy(
var0_np, accum0_np, var0_np * 10, 2.0, 0.9)
var1_np, accum1_np = self._update_nesterov_momentum_numpy(var1_np,
accum1_np,
3, 2.0, 0.9)
self.assertAllClose(var0_np, var0.eval())
self.assertAllClose(var1_np, var1.eval())
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testMinimizeSparseResourceVariable(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype)
# pylint: disable=cell-var-from-loop
def loss():
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x)
return pred * pred
# pylint: enable=cell-var-from-loop
opt = momentum_lib.MomentumOptimizer(learning_rate=1.0, momentum=0.0)
sgd_op = opt.minimize(loss)
self.evaluate(variables.global_variables_initializer())
# Run 1 step of sgd
self.evaluate(sgd_op)
# Validate updated params
self.assertAllCloseAccordingToType([[-111, -138]], self.evaluate(var0))
@test_util.run_in_graph_and_eager_modes(reset_test=True)
def testMinimizeWith2DIndiciesForEmbeddingLookup(self):
var0 = resource_variable_ops.ResourceVariable(array_ops.ones([2, 2]))
def loss():
return math_ops.reduce_sum(embedding_ops.embedding_lookup(var0, [[1]]))
opt = momentum_lib.MomentumOptimizer(learning_rate=1.0, momentum=0.0)
sgd_op = opt.minimize(loss)
self.evaluate(variables.global_variables_initializer())
self.evaluate(sgd_op)
self.assertAllCloseAccordingToType([[1, 1], [0, 0]], self.evaluate(var0))
def testTensorLearningRateAndMomentum(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=constant_op.constant(2.0),
momentum=constant_op.constant(0.9))
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
self.assertFalse(slot0 in variables.trainable_variables())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
self.assertFalse(slot1 in variables.trainable_variables())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval())
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval())
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - (
(0.9 * 0.01 + 0.01) * 2.0)
]), var1.eval())
def _dbParamsMom01(self):
"""Return dist-belief momentum values.
Return values been generated from the dist-belief momentum unittest,
running with a learning rate of 0.1 and a momentum of 0.1.
These values record how a parameter vector of size 10, initialized with 0.0,
gets updated with 10 consecutive momentum steps. It uses random gradients.
Returns:
db_grad: The gradients to apply
db_out: The parameters after the momentum update.
"""
db_grad = [[]] * 10
db_out = [[]] * 10
# pylint: disable=line-too-long
db_grad[0] = [
0.00096264342, 0.17914793, 0.93945462, 0.41396621, 0.53037018,
0.93197989, 0.78648776, 0.50036013, 0.55345792, 0.96722615
]
db_out[0] = [
-9.6264346e-05, -0.017914793, -0.093945466, -0.041396622, -0.053037018,
-0.093197994, -0.078648776, -0.050036013, -0.055345792, -0.096722618
]
db_grad[1] = [
0.17075552, 0.88821375, 0.20873757, 0.25236958, 0.57578111, 0.15312378,
0.5513742, 0.94687688, 0.16012503, 0.22159521
]
db_out[1] = [
-0.017181443, -0.10852765, -0.12421377, -0.070773244, -0.11591884,
-0.11783017, -0.14165108, -0.14972731, -0.076892875, -0.1285544
]
db_grad[2] = [
0.35077485, 0.47304362, 0.44412705, 0.44368884, 0.078527533, 0.81223965,
0.31168157, 0.43203235, 0.16792089, 0.24644311
]
db_out[2] = [
-0.053967446, -0.1648933, -0.1716533, -0.1180798, -0.13005978,
-0.20151734, -0.17911947, -0.20289968, -0.095839672, -0.15638189
]
db_grad[3] = [
0.9694621, 0.75035888, 0.28171822, 0.83813518, 0.53807181, 0.3728098,
0.81454384, 0.03848977, 0.89759839, 0.93665648
]
db_out[3] = [
-0.15459226, -0.24556576, -0.20456907, -0.20662397, -0.18528105,
-0.24716705, -0.2643207, -0.21206589, -0.18749419, -0.2528303
]
db_grad[4] = [
0.38578293, 0.8536852, 0.88722926, 0.66276771, 0.13678469, 0.94036359,
0.69107032, 0.81897682, 0.5433259, 0.67860287
]
db_out[4] = [
-0.20323303, -0.33900154, -0.29658359, -0.28175515, -0.20448165,
-0.34576839, -0.34194785, -0.29488021, -0.25099224, -0.33033544
]
db_grad[5] = [
0.27885768, 0.76100707, 0.24625534, 0.81354135, 0.18959245, 0.48038563,
0.84163809, 0.41172323, 0.83259648, 0.44941229
]
db_out[5] = [
-0.23598288, -0.42444581, -0.33041057, -0.3706224, -0.22536094,
-0.40366709, -0.43387437, -0.34433398, -0.34060168, -0.38302717
]
db_grad[6] = [
0.27233034, 0.056316052, 0.5039115, 0.24105175, 0.35697976, 0.75913221,
0.73577434, 0.16014607, 0.57500273, 0.071136251
]
db_out[6] = [
-0.26649091, -0.43862185, -0.38418442, -0.40361428, -0.26314685,
-0.48537019, -0.51664448, -0.36529395, -0.40706289, -0.39540997
]
db_grad[7] = [
0.58697265, 0.2494842, 0.08106143, 0.39954534, 0.15892942, 0.12683646,
0.74053431, 0.16033, 0.66625422, 0.73515922
]
db_out[7] = [
-0.32823896, -0.46498787, -0.39766794, -0.446868, -0.28281838,
-0.50622416, -0.59897494, -0.38342294, -0.48033443, -0.47016418
]
db_grad[8] = [
0.8215279, 0.41994119, 0.95172721, 0.68000203, 0.79439718, 0.43384039,
0.55561525, 0.22567581, 0.93331909, 0.29438227
]
db_out[8] = [
-0.41656655, -0.50961858, -0.49418902, -0.51919359, -0.36422527,
-0.55169362, -0.6627695, -0.40780342, -0.58099347, -0.50707781
]
db_grad[9] = [
0.68297005, 0.67758518, 0.1748755, 0.13266537, 0.70697063, 0.055731893,
0.68593478, 0.50580865, 0.12602448, 0.093537711
]
db_out[9] = [
-0.49369633, -0.58184016, -0.52132869, -0.5396927, -0.44306302,
-0.56181377, -0.73774242, -0.46082234, -0.60366184, -0.52012295
]
# pylint: enable=line-too-long
return db_grad, db_out
def testLikeDistBeliefMom01(self):
with self.test_session():
db_grad, db_out = self._dbParamsMom01()
num_samples = len(db_grad)
var0 = variables.Variable([0.0] * num_samples)
grads0 = constant_op.constant([0.0] * num_samples)
mom_opt = momentum_lib.MomentumOptimizer(learning_rate=0.1, momentum=0.1)
mom_update = mom_opt.apply_gradients(zip([grads0], [var0]))
variables.global_variables_initializer().run()
for i in xrange(num_samples):
mom_update.run(feed_dict={grads0: db_grad[i]})
self.assertAllClose(np.array(db_out[i]), var0.eval())
def testSparse(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable(array_ops.zeros([4, 2], dtype=dtype))
var1 = variables.Variable(constant_op.constant(1.0, dtype, [4, 2]))
grads0 = ops.IndexedSlices(
constant_op.constant(
[[.1, .1]], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([4, 2]))
grads1 = ops.IndexedSlices(
constant_op.constant(
[[.01, .01], [.01, .01]], dtype=dtype),
constant_op.constant([2, 3]),
constant_op.constant([4, 2]))
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9)
mom_update = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Check we have slots
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([0, 0], var0.eval()[0])
self.assertAllClose([0, 0], var0.eval()[1])
self.assertAllClose([1, 1], var1.eval()[2])
# Step 1: the momentum accumulators are 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0, 0]), slot0.eval()[0])
self.assertAllCloseAccordingToType(np.array([.1, .1]), slot0.eval()[1])
self.assertAllCloseAccordingToType(
np.array([.01, .01]), slot1.eval()[2])
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(np.array([0, 0]), var0.eval()[0])
self.assertAllCloseAccordingToType(
np.array([-(0.1 * 2.0), -(0.1 * 2.0)]), var0.eval()[1])
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.01 * 2.0), 1.0 - (0.01 * 2.0)]), var1.eval()[2])
# Step 2: the momentum accumulators contain the previous update.
mom_update.run()
# Check that the momentum accumulators have been updated.
self.assertAllClose(np.array([0, 0]), slot0.eval()[0])
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval()[1])
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]),
slot1.eval()[2])
# Check that the parameters have been updated.
self.assertAllClose(np.array([0, 0]), var0.eval()[0])
self.assertAllCloseAccordingToType(
np.array([
-(0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), -(0.1 * 2.0) - (
(0.9 * 0.1 + 0.1) * 2.0)
]), var0.eval()[1])
self.assertAllCloseAccordingToType(
np.array([
0.98 - ((0.9 * 0.01 + 0.01) * 2.0), 0.98 - (
(0.9 * 0.01 + 0.01) * 2.0)
]), var1.eval()[2])
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
mom_opt = momentum_lib.MomentumOptimizer(
learning_rate=2.0, momentum=0.9)
mom_update1 = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
mom_update2 = mom_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
self.assertEqual(["momentum"], mom_opt.get_slot_names())
slot0 = mom_opt.get_slot(var0, "momentum")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = mom_opt.get_slot(var1, "momentum")
self.assertEquals(slot1.get_shape(), var1.get_shape())
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Step 1: the momentum accumulators where 0. So we should see a normal
# update: v -= grad * learning_rate
mom_update1.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval())
self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval())
# Step 2: the second momentum accumulators contain the previous update.
mom_update2.run()
# Check that the momentum accumulators have been updated.
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval())
self.assertAllCloseAccordingToType(
np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval())
# Check that the parameters have been updated.
self.assertAllCloseAccordingToType(
np.array([
1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0),
2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0)
]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([
2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - (
(0.9 * 0.01 + 0.01) * 2.0)
]), var1.eval())
if __name__ == "__main__":
test.main()
|
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of an S3-like storage server based on local files.
Useful to test features that will eventually run on S3, or if you want to
run something locally that was once running on S3.
We don't support all the features of S3, but it does work with the
standard S3 client for the most basic semantics. To use the standard
S3 client with this module:
c = S3.AWSAuthConnection("", "", server="localhost", port=8888,
is_secure=False)
c.create_bucket("mybucket")
c.put("mybucket", "mykey", "a value")
print c.get("mybucket", "mykey").body
"""
import bisect
import datetime
import hashlib
import os
import os.path
import urllib
from tornado import escape
from tornado import httpserver
from tornado import ioloop
from tornado import web
def start(port, root_directory="/tmp/s3", bucket_depth=0):
"""Starts the mock S3 server on the given port at the given path."""
application = S3Application(root_directory, bucket_depth)
http_server = httpserver.HTTPServer(application)
http_server.listen(port)
ioloop.IOLoop.current().start()
class S3Application(web.Application):
"""Implementation of an S3-like storage server based on local files.
If bucket depth is given, we break files up into multiple directories
to prevent hitting file system limits for number of files in each
directories. 1 means one level of directories, 2 means 2, etc.
"""
def __init__(self, root_directory, bucket_depth=0):
web.Application.__init__(self, [
(r"/", RootHandler),
(r"/([^/]+)/(.+)", ObjectHandler),
(r"/([^/]+)/", BucketHandler),
])
self.directory = os.path.abspath(root_directory)
if not os.path.exists(self.directory):
os.makedirs(self.directory)
self.bucket_depth = bucket_depth
class BaseRequestHandler(web.RequestHandler):
SUPPORTED_METHODS = ("PUT", "GET", "DELETE")
def render_xml(self, value):
assert isinstance(value, dict) and len(value) == 1
self.set_header("Content-Type", "application/xml; charset=UTF-8")
name = value.keys()[0]
parts = []
parts.append('<' + escape.utf8(name) +
' xmlns="http://doc.s3.amazonaws.com/2006-03-01">')
self._render_parts(value.values()[0], parts)
parts.append('</' + escape.utf8(name) + '>')
self.finish('<?xml version="1.0" encoding="UTF-8"?>\n' +
''.join(parts))
def _render_parts(self, value, parts=[]):
if isinstance(value, (unicode, bytes)):
parts.append(escape.xhtml_escape(value))
elif isinstance(value, int) or isinstance(value, long):
parts.append(str(value))
elif isinstance(value, datetime.datetime):
parts.append(value.strftime("%Y-%m-%dT%H:%M:%S.000Z"))
elif isinstance(value, dict):
for name, subvalue in value.iteritems():
if not isinstance(subvalue, list):
subvalue = [subvalue]
for subsubvalue in subvalue:
parts.append('<' + escape.utf8(name) + '>')
self._render_parts(subsubvalue, parts)
parts.append('</' + escape.utf8(name) + '>')
else:
raise Exception("Unknown S3 value type %r", value)
def _object_path(self, bucket, object_name):
if self.application.bucket_depth < 1:
return os.path.abspath(os.path.join(
self.application.directory, bucket, object_name))
hash = hashlib.md5(object_name).hexdigest()
path = os.path.abspath(os.path.join(
self.application.directory, bucket))
for i in range(self.application.bucket_depth):
path = os.path.join(path, hash[:2 * (i + 1)])
return os.path.join(path, object_name)
class RootHandler(BaseRequestHandler):
def get(self):
names = os.listdir(self.application.directory)
buckets = []
for name in names:
path = os.path.join(self.application.directory, name)
info = os.stat(path)
buckets.append({
"Name": name,
"CreationDate": datetime.datetime.utcfromtimestamp(
info.st_ctime),
})
self.render_xml({"ListAllMyBucketsResult": {
"Buckets": {"Bucket": buckets},
}})
class BucketHandler(BaseRequestHandler):
def get(self, bucket_name):
prefix = self.get_argument("prefix", u"")
marker = self.get_argument("marker", u"")
max_keys = int(self.get_argument("max-keys", 50000))
path = os.path.abspath(os.path.join(self.application.directory,
bucket_name))
terse = int(self.get_argument("terse", 0))
if not path.startswith(self.application.directory) or \
not os.path.isdir(path):
raise web.HTTPError(404)
object_names = []
for root, dirs, files in os.walk(path):
for file_name in files:
object_names.append(os.path.join(root, file_name))
skip = len(path) + 1
for i in range(self.application.bucket_depth):
skip += 2 * (i + 1) + 1
object_names = [n[skip:] for n in object_names]
object_names.sort()
contents = []
start_pos = 0
if marker:
start_pos = bisect.bisect_right(object_names, marker, start_pos)
if prefix:
start_pos = bisect.bisect_left(object_names, prefix, start_pos)
truncated = False
for object_name in object_names[start_pos:]:
if not object_name.startswith(prefix):
break
if len(contents) >= max_keys:
truncated = True
break
object_path = self._object_path(bucket_name, object_name)
c = {"Key": object_name}
if not terse:
info = os.stat(object_path)
c.update({
"LastModified": datetime.datetime.utcfromtimestamp(
info.st_mtime),
"Size": info.st_size,
})
contents.append(c)
marker = object_name
self.render_xml({"ListBucketResult": {
"Name": bucket_name,
"Prefix": prefix,
"Marker": marker,
"MaxKeys": max_keys,
"IsTruncated": truncated,
"Contents": contents,
}})
def put(self, bucket_name):
path = os.path.abspath(os.path.join(
self.application.directory, bucket_name))
if not path.startswith(self.application.directory) or \
os.path.exists(path):
raise web.HTTPError(403)
os.makedirs(path)
self.finish()
def delete(self, bucket_name):
path = os.path.abspath(os.path.join(
self.application.directory, bucket_name))
if not path.startswith(self.application.directory) or \
not os.path.isdir(path):
raise web.HTTPError(404)
if len(os.listdir(path)) > 0:
raise web.HTTPError(403)
os.rmdir(path)
self.set_status(204)
self.finish()
class ObjectHandler(BaseRequestHandler):
def get(self, bucket, object_name):
object_name = urllib.unquote(object_name)
path = self._object_path(bucket, object_name)
if not path.startswith(self.application.directory) or \
not os.path.isfile(path):
raise web.HTTPError(404)
info = os.stat(path)
self.set_header("Content-Type", "application/unknown")
self.set_header("Last-Modified", datetime.datetime.utcfromtimestamp(
info.st_mtime))
object_file = open(path, "rb")
try:
self.finish(object_file.read())
finally:
object_file.close()
def put(self, bucket, object_name):
object_name = urllib.unquote(object_name)
bucket_dir = os.path.abspath(os.path.join(
self.application.directory, bucket))
if not bucket_dir.startswith(self.application.directory) or \
not os.path.isdir(bucket_dir):
raise web.HTTPError(404)
path = self._object_path(bucket, object_name)
if not path.startswith(bucket_dir) or os.path.isdir(path):
raise web.HTTPError(403)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
object_file = open(path, "w")
object_file.write(self.request.body)
object_file.close()
self.finish()
def delete(self, bucket, object_name):
object_name = urllib.unquote(object_name)
path = self._object_path(bucket, object_name)
if not path.startswith(self.application.directory) or \
not os.path.isfile(path):
raise web.HTTPError(404)
os.unlink(path)
self.set_status(204)
self.finish()
|
|
import os
import collections
import tornado.web
from tornado.escape import url_escape
import uuid
from framework.dependency_management.dependency_resolver import ServiceLocator
from framework.lib.exceptions import InvalidTargetReference, \
InvalidParameterType
from framework.lib.general import cprint
from framework.interface import custom_handlers
class Redirect(custom_handlers.UIRequestHandler):
SUPPORTED_METHODS = ['GET']
def get(self):
self.redirect(self.reverse_url('home_ui_url'))
class Home(custom_handlers.UIRequestHandler):
SUPPORTED_METHODS = ['GET']
def get(self):
self.render('home.html',
auto_updater_api_url=self.reverse_url('auto_updater_api_url'),
)
class TransactionLog(custom_handlers.UIRequestHandler):
SUPPORTED_METHODS = ['GET']
@tornado.web.asynchronous
def get(self, target_id=None, transaction_id=None):
if not target_id:
raise tornado.web.HTTPError(405)
if transaction_id:
self.render("transaction.html",
transaction_api_url=self.reverse_url('transactions_api_url', target_id, transaction_id),
transaction_log_url=self.reverse_url('transaction_log_url', target_id, None),
transaction_replay_url=self.reverse_url('transaction_replay_url',target_id, transaction_id),
forward_zap_url=self.reverse_url('forward_zap_url',target_id, transaction_id)
)
else:
self.render("transaction_log.html",
transactions_api_url=self.reverse_url('transactions_api_url', target_id, None),
transactions_search_api_url=self.reverse_url('transactions_search_api_url', target_id),
transaction_log_url=self.reverse_url('transaction_log_url', target_id, None),
zest_console_url=self.reverse_url('zest_console_url', target_id)
)
class HTTPSessions(custom_handlers.UIRequestHandler):
""" HTTPSessions handles the user sessions. """
SUPPORTED_METHODS = ['GET']
@tornado.web.asynchronous
def get(self, target_id=None):
if not target_id:
raise tornado.web.HTTPError(405)
self.render("sessions_manager.html",
sessions_api_url=self.reverse_url('sessions_api_url', target_id),
)
class ReplayRequest(custom_handlers.UIRequestHandler):
SUPPORTED_METHODS = ['GET']
@tornado.web.asynchronous
def get(self, target_id=None, transaction_id=None):
if not target_id or not transaction_id:
raise tornado.web.HTTPError(405)
else:
self.render("replay_request.html",
transaction_api_url=self.reverse_url('transactions_api_url',target_id, transaction_id),
transaction_replay_api_url=self.reverse_url('transaction_replay_api_url', target_id, transaction_id)
)
class ZestScriptConsoleHandler(custom_handlers.UIRequestHandler):
SUPPORTED_METHODS = ['GET']
@tornado.web.asynchronous
def get(self, target_id=None):
if not target_id:
raise tornado.web.HTTPError(405)
else:
self.render("zest_console.html",
zest_console_api_url=self.reverse_url('zest_console_api_url', target_id),
zest_recording=self.get_component("zest").IsRecording(),
zest_target_heading=(self.get_component("zest").GetTargetConfig(target_id))['HOST_AND_PORT']
)
class UrlLog(custom_handlers.UIRequestHandler):
SUPPORTED_METHODS = ['GET']
@tornado.web.asynchronous
def get(self, target_id=None):
if not target_id:
raise tornado.web.HTTPError(405)
self.render("url_log.html",
urls_api_url=self.reverse_url('urls_api_url', target_id),
urls_search_api_url=self.reverse_url('urls_search_api_url', target_id),
transaction_log_url=self.reverse_url('transaction_log_url', target_id, None)
)
class TargetManager(custom_handlers.UIRequestHandler):
SUPPORTED_METHODS = ['GET']
@tornado.web.asynchronous
def get(self, target_id=None):
if not target_id:
self.render("target_manager.html",
owtf_sessions_api_url=self.reverse_url('owtf_sessions_api_url', None, None),
targets_api_url=self.reverse_url('targets_api_url', None),
targets_search_api_url=self.reverse_url('targets_search_api_url'),
targets_ui_url=self.reverse_url('targets_ui_url', None),
plugins_api_url=self.reverse_url('plugins_api_url', None, None, None),
worklist_api_url=self.reverse_url('worklist_api_url', None, None)
)
else:
adv_filter_data = self.get_component("plugin_output").GetUnique(target_id=int(target_id))
adv_filter_data["mapping"] = self.get_component("mapping_db").GetMappingTypes()
self.render("target.html",
target_api_url=self.reverse_url('targets_api_url', target_id),
targets_ui_url=self.reverse_url('targets_ui_url', None),
poutput_ui_url=self.reverse_url('poutput_ui_url', target_id),
adv_filter_data=adv_filter_data,
plugins_api_url=self.reverse_url('plugins_api_url', None, None, None),
worklist_api_url=self.reverse_url('worklist_api_url', None, None),
transaction_log_url=self.reverse_url('transaction_log_url', target_id, None),
url_log_url=self.reverse_url('url_log_url', target_id),
sessions_ui_url=self.reverse_url('sessions_ui_url', target_id),
)
class PlugnHack(custom_handlers.UIRequestHandler):
"""
PlugnHack handles the requests which are used for integration
of OWTF with Firefox browser using Plug-n-Hack add-on.
For more information about Mozilla Plug-n-Hack standard visit:
https://blog.mozilla.org/security/2013/08/22/plug-n-hack/
"""
SUPPORTED_METHODS = ['GET']
@tornado.web.asynchronous
def get(self, extension=""):
"""
pnh is an abbreviation for Plug-n-Hack
URL in default case = http://127.0.0.1:8009/ui/plugnhack/
Templates folder is framework/interface/templates/pnh
For Plug-n-Hack, following files are used:
===================================================
| File Name | Relative path |
===================================================
| Provider file | /ui/plugnhack/ |
---------------------------------------------------
| Manifest file | /ui/plugnhack/manifest.json |
---------------------------------------------------
| Commands file | /ui/plugnhack/service.json |
---------------------------------------------------
| PAC file | /ui/plugnhack/proxy.pac |
---------------------------------------------------
| CA Cert | /ui/plugnhack/ca.crt |
---------------------------------------------------
"""
root_url = self.request.protocol + "://" + self.request.host # URL of UI SERVER, http://127.0.0.1:8009
command_url = os.path.join(root_url,"") # URL for use in service.json, http://127.0.0.1:8009/
pnh_url = os.path.join(root_url,"ui/plugnhack") # URL for use in manifest.json, http://127.0.0.1:8009/ui/plugnhack
probe_url = "http://" + self.get_component("db_config").Get('INBOUND_PROXY_IP') + ":" + self.get_component("db_config").Get('INBOUND_PROXY_PORT') # URL for use in manifest.json, Plug-n-Hack probe will send messages to http://127.0.0.1:8008/plugnhack
# Obtain path to PlugnHack template files
# PLUGNHACK_TEMPLATES_DIR is defined in /framework/config/framework_config.cfg
pnh_folder = os.path.join(self.get_component("config").FrameworkConfigGet('PLUGNHACK_TEMPLATES_DIR'),"")
self.application.ca_cert = os.path.expanduser(self.get_component("db_config").Get('CA_CERT')) # CA certificate
# Using UUID system generate a key for substitution of 'api_key' in
# 'manifest.json', 'probe' descriptor section
# Its use is temporary, till Bhadarwaj implements 'API key generation'
api_key = uuid.uuid4().hex
if extension == "": # In this case plugnhack.html is rendered and {{ manifest_url }} is replaced with 'manifest_url' value
manifest_url = pnh_url + "/manifest.json"
# Set response status code to 200 'OK'
self.set_status(200)
# Set response header 'Content-Type'
self.set_header("Content-Type","text/html")
# Set response header 'Etag', it will not appear in response,
# we don't need web-cache validation
self.set_header("Etag","")
# Set response header 'Server, it will not appear in response
self.set_header("Server","")
# Set response header 'Date', it will not appear in response
self.set_header("Date","")
# Set response header 'Cache-Control', it will not appear,
# we don't need caching for Plugnhack
self.add_header("Cache-Control","no-cache")
# Set response header 'Pragma', it will not appear in response
self.add_header("Pragma","no-cache")
# Set response headers for CORS, it allows many resources on a
# web page to be requested from another domain outside the domain
# the resource originated from. This mechanism is used in OWASP ZAP.
self.add_header("Access-Control-Allow-Origin","*")
self.add_header("Access-Control-Allow-Header","OWTF-Header")
self.add_header("Access-Control-Allow-Method","GET,POST,OPTIONS")
self.render(pnh_folder + "plugnhack.html",
manifest_url=manifest_url,
plugnhack_ui_url=self.reverse_url('plugnhack_ui_url')
)
elif extension == "manifest.json": # In this case {{ pnh_url }} in manifest.json are replaced with 'pnh_url' value
# Set response status code to 200 'OK'
self.set_status(200)
# Set response header 'Content-Type'
self.set_header("Content-Type","application/json")
# Set response header 'Etag', it will not appear in response,
# we don't need web-cache validation
self.set_header("Etag","")
# Set response header 'Server, it will not appear in response
self.set_header("Server","")
# Set response header 'Date', it will not appear in response
self.set_header("Date","")
# Set response header 'Cache-Control', it will not appear,
# we don't need caching for Plugnhack
self.add_header("Cache-Control","no-cache")
# Set response header 'Pragma', it will not appear in response
self.add_header("Pragma","no-cache")
# Set response headers for CORS, it allows many resources on a
# web page to be requested from another domain outside the domain
# the resource originated from. This mechanism is used in OWASP ZAP.
# Without this Plug-n-Hack cannot send messages and error:
# 'Cross-Origin Request Blocked: The Same Origin Policy disallows reading
# the remote resource at' will be present in browser console
self.add_header("Access-Control-Allow-Origin","*")
self.add_header("Access-Control-Allow-Header","OWTF-Header")
self.add_header("Access-Control-Allow-Method","GET,POST,OPTIONS")
self.render(pnh_folder + "manifest.json",
pnh_url=pnh_url,
probe_url=probe_url,
api_key=api_key,
plugnhack_ui_url=self.reverse_url('plugnhack_ui_url')
)
elif extension == "service.json": # In this case {{ root_url }} in service.json are replaced with 'root_url' value
# Set response status code to 200 'OK'
self.set_status(200)
# Set response header 'Content-Type'
self.set_header("Content-Type","application/json")
# Set response header 'Etag', it will not appear in response,
# we don't need web-cache validation
self.set_header("Etag","")
# Set response header 'Server, it will not appear in response
self.set_header("Server","")
# Set response header 'Date', it will not appear in response
self.set_header("Date","")
# Set response header 'Cache-Control', it will not appear,
# we don't need caching for Plugnhack
self.add_header("Cache-Control","no-cache")
# Set response header 'Pragma', it will not appear in response
self.add_header("Pragma","no-cache")
# Set response headers for CORS, it allows many resources on a
# web page to be requested from another domain outside the domain
# the resource originated from. This mechanism is used in OWASP ZAP.
self.add_header("Access-Control-Allow-Origin","*")
self.add_header("Access-Control-Allow-Header","OWTF-Header")
self.add_header("Access-Control-Allow-Method","GET,POST,OPTIONS")
self.render(pnh_folder + "service.json",
root_url=command_url,
plugnhack_ui_url=self.reverse_url('plugnhack_ui_url')
)
elif extension == "proxy.pac": # In this case {{ proxy_details }} in proxy.pac is replaced with 'proxy_details' value
proxy_details = self.get_component("db_config").Get('INBOUND_PROXY_IP') + ":" + self.get_component("db_config").Get('INBOUND_PROXY_PORT') # OWTF proxy 127.0.0.1:8008
# Set response status code to 200 'OK'
self.set_status(200)
# Set response header 'Content-Type'
self.set_header("Content-Type","text/plain")
# Set response header 'Etag', it will not appear in response,
# we don't need web-cache validation
self.set_header("Etag","")
# Set response header 'Server, it will not appear in response
self.set_header("Server","")
# Set response header 'Date', it will not appear in response
self.set_header("Date","")
# Set response header 'Cache-Control', it will not appear,
# we don't need caching for Plugnhack
self.add_header("Cache-Control","no-cache")
# Set response header 'Pragma', it will not appear in response
self.add_header("Pragma","no-cache")
# Set response headers for CORS, it allows many resources on a
# web page to be requested from another domain outside the domain
# the resource originated from. This mechanism is used in OWASP ZAP.
self.add_header("Access-Control-Allow-Origin","*")
self.add_header("Access-Control-Allow-Header","OWTF-Header")
self.add_header("Access-Control-Allow-Method","GET,POST,OPTIONS")
self.render(pnh_folder + "proxy.pac",
proxy_details=proxy_details,
plugnhack_ui_url=self.reverse_url('plugnhack_ui_url')
)
elif extension == "ca.crt":
# Set response status code to 200 'OK'
self.set_status(200)
# Set response header 'Content-Type'
self.set_header("Content-Type","application/pkix-cert")
# Set response header 'Etag', it will not appear in response,
# we don't need web-cache validation
self.set_header("Etag","")
# Set response header 'Server, it will not appear in response
self.set_header("Server","")
# Set response header 'Date', it will not appear in response
self.set_header("Date","")
# Set response header 'Cache-Control', it will not appear,
# we don't need caching for Plugnhack
self.add_header("Cache-Control","no-cache")
# Set response header 'Pragma', it will not appear in response
self.add_header("Pragma","no-cache")
# Set response headers for CORS, it allows many resources on a
# web page to be requested from another domain outside the domain
# the resource originated from. This mechanism is used in OWASP ZAP.
self.add_header("Access-Control-Allow-Origin","*")
self.add_header("Access-Control-Allow-Header","OWTF-Header")
self.add_header("Access-Control-Allow-Method","GET,POST,OPTIONS")
self.render(self.application.ca_cert,
plugnhack_ui_url=self.reverse_url('plugnhack_ui_url')
)
class PluginOutput(custom_handlers.UIRequestHandler):
SUPPORTED_METHODS = ['GET']
def get(self, target_id=None):
if not target_id:
raise tornado.web.HTTPError(400)
try:
filter_data = dict(self.request.arguments) # IMPORTANT!!
plugin_outputs = self.get_component("plugin_output").GetAll(
filter_data,
target_id=target_id)
# Group the plugin outputs to make it easier in template
grouped_plugin_outputs = {}
for poutput in plugin_outputs:
if grouped_plugin_outputs.get(poutput['plugin_code']) is None:
# No problem of overwriting
grouped_plugin_outputs[poutput['plugin_code']] = []
grouped_plugin_outputs[poutput['plugin_code']].append(poutput)
# Needed ordered list for ease in templates
grouped_plugin_outputs = collections.OrderedDict(
sorted(grouped_plugin_outputs.items()))
# Get mappings
if self.get_argument("mapping", None):
mappings = self.get_component("mapping_db").GetMappings(self.get_argument("mapping", None))
else:
mappings = None
# Get test groups as well, for names and info links
test_groups = {}
for test_group in self.get_component("db_plugin").GetAllTestGroups():
test_group["mapped_code"] = test_group["code"]
test_group["mapped_descrip"] = test_group["descrip"]
if mappings:
try:
test_group["mapped_code"] = mappings[test_group['code']][0]
test_group["mapped_descrip"] = mappings[test_group['code']][1]
except KeyError:
pass
test_groups[test_group['code']] = test_group
self.render("plugin_report.html",
grouped_plugin_outputs=grouped_plugin_outputs,
test_groups=test_groups,
poutput_api_url=self.reverse_url('poutput_api_url', target_id, None, None, None),
transaction_log_url=self.reverse_url('transaction_log_url', target_id, None),
url_log_url=self.reverse_url('url_log_url', target_id),
# html=(self.application.Core.DB.Vulnexp.GetExplanation(owtf_code))
)
except InvalidTargetReference as e:
raise tornado.web.HTTPError(400)
except InvalidParameterType as e:
raise tornado.web.HTTPError(400)
class WorkerManager(custom_handlers.UIRequestHandler):
SUPPORTED_METHODS = ['GET']
@tornado.web.asynchronous
def get(self, worker_id=None):
config = ServiceLocator.get_component("config")
output_files_server = "%s://%s" % (
self.request.protocol,
self.request.host.replace(
config.FrameworkConfigGet("UI_SERVER_PORT"),
config.FrameworkConfigGet("FILE_SERVER_PORT")))
if not worker_id:
self.render("manager_interface.html",
worklist_api_url=self.reverse_url('worklist_api_url', None, None),
workers_api_url=output_files_server+self.reverse_url('workers_api_url', None, None),
targets_api_url=self.reverse_url('targets_api_url', None),
targets_ui_url=self.reverse_url('targets_ui_url', None),
)
else:
self.render("worker_interface.html",
worker_api_url=self.reverse_url('workers_api_url', worker_id, None),
targets_api_url=self.reverse_url('targets_api_url', None),
targets_ui_url=self.reverse_url('targets_ui_url', None)
)
class WorklistManager(custom_handlers.UIRequestHandler):
SUPPORTED_METHODS = ['GET']
def get(self):
self.render(
"worklist_manager.html",
worklist_api_url=self.reverse_url('worklist_api_url', None, None),
worklist_search_api_url=self.reverse_url('worklist_search_api_url'),
targets_ui_url=self.reverse_url('targets_ui_url', None),
)
class Help(custom_handlers.UIRequestHandler):
SUPPORTED_METHODS = ['GET']
def get(self):
self.render("help.html")
class ConfigurationManager(custom_handlers.UIRequestHandler):
SUPPORTED_METHODS = ('GET')
def get(self):
self.render(
"config_manager.html",
configuration_api_url=self.reverse_url('configuration_api_url')
)
class FileRedirectHandler(custom_handlers.UIRequestHandler):
SUPPORTED_METHODS = ('GET')
def get(self, file_url):
config = ServiceLocator.get_component("config")
output_files_server = "%s://%s/" % (
self.request.protocol,
self.request.host.replace(
config.FrameworkConfigGet("UI_SERVER_PORT"),
config.FrameworkConfigGet("FILE_SERVER_PORT")))
redirect_file_url = output_files_server + url_escape(file_url, plus=False)
self.redirect(redirect_file_url, permanent=True)
|
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Unit tests for the contents of device_utils.py (mostly DeviceUtils).
"""
# pylint: disable=C0321
# pylint: disable=W0212
# pylint: disable=W0613
import collections
import datetime
import logging
import os
import re
import signal
import sys
import unittest
from pylib import android_commands
from pylib import cmd_helper
from pylib import constants
from pylib.device import adb_wrapper
from pylib.device import device_errors
from pylib.device import device_utils
from pylib.device import intent
from pylib.utils import mock_calls
# RunCommand from third_party/android_testrunner/run_command.py is mocked
# below, so its path needs to be in sys.path.
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'android_testrunner'))
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'pymock'))
import mock # pylint: disable=F0401
class DeviceUtilsInitTest(unittest.TestCase):
def testInitWithStr(self):
serial_as_str = str('0123456789abcdef')
d = device_utils.DeviceUtils('0123456789abcdef')
self.assertEqual(serial_as_str, d.adb.GetDeviceSerial())
def testInitWithUnicode(self):
serial_as_unicode = unicode('fedcba9876543210')
d = device_utils.DeviceUtils(serial_as_unicode)
self.assertEqual(serial_as_unicode, d.adb.GetDeviceSerial())
def testInitWithAdbWrapper(self):
serial = '123456789abcdef0'
a = adb_wrapper.AdbWrapper(serial)
d = device_utils.DeviceUtils(a)
self.assertEqual(serial, d.adb.GetDeviceSerial())
def testInitWithAndroidCommands(self):
serial = '0fedcba987654321'
a = android_commands.AndroidCommands(device=serial)
d = device_utils.DeviceUtils(a)
self.assertEqual(serial, d.adb.GetDeviceSerial())
def testInitWithMissing_fails(self):
with self.assertRaises(ValueError):
device_utils.DeviceUtils(None)
with self.assertRaises(ValueError):
device_utils.DeviceUtils('')
class DeviceUtilsGetAVDsTest(mock_calls.TestCase):
def testGetAVDs(self):
with self.assertCall(
mock.call.pylib.cmd_helper.GetCmdOutput([mock.ANY, 'list', 'avd']),
'Available Android Virtual Devices:\n'
' Name: my_android5.0\n'
' Path: /some/path/to/.android/avd/my_android5.0.avd\n'
' Target: Android 5.0 (API level 21)\n'
' Tag/ABI: default/x86\n'
' Skin: WVGA800\n'):
self.assertEquals(['my_android5.0'],
device_utils.GetAVDs())
class DeviceUtilsRestartServerTest(mock_calls.TestCase):
@mock.patch('time.sleep', mock.Mock())
def testRestartServer_succeeds(self):
with self.assertCalls(
mock.call.pylib.device.adb_wrapper.AdbWrapper.KillServer(),
(mock.call.pylib.cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb']),
(1, '')),
mock.call.pylib.device.adb_wrapper.AdbWrapper.StartServer(),
(mock.call.pylib.cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb']),
(1, '')),
(mock.call.pylib.cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb']),
(0, '123\n'))):
device_utils.RestartServer()
class MockTempFile(object):
def __init__(self, name='/tmp/some/file'):
self.file = mock.MagicMock(spec=file)
self.file.name = name
self.file.name_quoted = cmd_helper.SingleQuote(name)
def __enter__(self):
return self.file
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class _PatchedFunction(object):
def __init__(self, patched=None, mocked=None):
self.patched = patched
self.mocked = mocked
def _AdbWrapperMock(test_serial):
adb = mock.Mock(spec=adb_wrapper.AdbWrapper)
adb.__str__ = mock.Mock(return_value=test_serial)
adb.GetDeviceSerial.return_value = test_serial
return adb
class DeviceUtilsTest(mock_calls.TestCase):
def setUp(self):
self.adb = _AdbWrapperMock('0123456789abcdef')
self.device = device_utils.DeviceUtils(
self.adb, default_timeout=10, default_retries=0)
self.watchMethodCalls(self.call.adb, ignore=['GetDeviceSerial'])
def ShellError(self, output=None, status=1):
def action(cmd, *args, **kwargs):
raise device_errors.AdbShellCommandFailedError(
cmd, output, status, str(self.device))
if output is None:
output = 'Permission denied\n'
return action
def TimeoutError(self, msg=None):
if msg is None:
msg = 'Operation timed out'
return mock.Mock(side_effect=device_errors.CommandTimeoutError(
msg, str(self.device)))
def CommandError(self, msg=None):
if msg is None:
msg = 'Command failed'
return mock.Mock(side_effect=device_errors.CommandFailedError(
msg, str(self.device)))
class DeviceUtilsIsOnlineTest(DeviceUtilsTest):
def testIsOnline_true(self):
with self.assertCall(self.call.adb.GetState(), 'device'):
self.assertTrue(self.device.IsOnline())
def testIsOnline_false(self):
with self.assertCall(self.call.adb.GetState(), 'offline'):
self.assertFalse(self.device.IsOnline())
def testIsOnline_error(self):
with self.assertCall(self.call.adb.GetState(), self.CommandError()):
self.assertFalse(self.device.IsOnline())
class DeviceUtilsHasRootTest(DeviceUtilsTest):
def testHasRoot_true(self):
with self.assertCall(self.call.adb.Shell('ls /root'), 'foo\n'):
self.assertTrue(self.device.HasRoot())
def testHasRoot_false(self):
with self.assertCall(self.call.adb.Shell('ls /root'), self.ShellError()):
self.assertFalse(self.device.HasRoot())
class DeviceUtilsEnableRootTest(DeviceUtilsTest):
def testEnableRoot_succeeds(self):
with self.assertCalls(
(self.call.device.IsUserBuild(), False),
self.call.adb.Root(),
self.call.adb.WaitForDevice()):
self.device.EnableRoot()
def testEnableRoot_userBuild(self):
with self.assertCalls(
(self.call.device.IsUserBuild(), True)):
with self.assertRaises(device_errors.CommandFailedError):
self.device.EnableRoot()
def testEnableRoot_rootFails(self):
with self.assertCalls(
(self.call.device.IsUserBuild(), False),
(self.call.adb.Root(), self.CommandError())):
with self.assertRaises(device_errors.CommandFailedError):
self.device.EnableRoot()
class DeviceUtilsIsUserBuildTest(DeviceUtilsTest):
def testIsUserBuild_yes(self):
with self.assertCall(
self.call.device.GetProp('ro.build.type', cache=True), 'user'):
self.assertTrue(self.device.IsUserBuild())
def testIsUserBuild_no(self):
with self.assertCall(
self.call.device.GetProp('ro.build.type', cache=True), 'userdebug'):
self.assertFalse(self.device.IsUserBuild())
class DeviceUtilsGetExternalStoragePathTest(DeviceUtilsTest):
def testGetExternalStoragePath_succeeds(self):
with self.assertCall(
self.call.adb.Shell('echo $EXTERNAL_STORAGE'), '/fake/storage/path\n'):
self.assertEquals('/fake/storage/path',
self.device.GetExternalStoragePath())
def testGetExternalStoragePath_fails(self):
with self.assertCall(self.call.adb.Shell('echo $EXTERNAL_STORAGE'), '\n'):
with self.assertRaises(device_errors.CommandFailedError):
self.device.GetExternalStoragePath()
class DeviceUtilsGetApplicationPathTest(DeviceUtilsTest):
def testGetApplicationPath_exists(self):
with self.assertCalls(
(self.call.adb.Shell('getprop ro.build.version.sdk'), '19\n'),
(self.call.adb.Shell('pm path android'),
'package:/path/to/android.apk\n')):
self.assertEquals('/path/to/android.apk',
self.device.GetApplicationPath('android'))
def testGetApplicationPath_notExists(self):
with self.assertCalls(
(self.call.adb.Shell('getprop ro.build.version.sdk'), '19\n'),
(self.call.adb.Shell('pm path not.installed.app'), '')):
self.assertEquals(None,
self.device.GetApplicationPath('not.installed.app'))
def testGetApplicationPath_fails(self):
with self.assertCalls(
(self.call.adb.Shell('getprop ro.build.version.sdk'), '19\n'),
(self.call.adb.Shell('pm path android'),
self.CommandError('ERROR. Is package manager running?\n'))):
with self.assertRaises(device_errors.CommandFailedError):
self.device.GetApplicationPath('android')
@mock.patch('time.sleep', mock.Mock())
class DeviceUtilsWaitUntilFullyBootedTest(DeviceUtilsTest):
def testWaitUntilFullyBooted_succeedsNoWifi(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device.GetApplicationPath('android'),
'package:/some/fake/path'),
# boot_completed
(self.call.device.GetProp('sys.boot_completed'), '1')):
self.device.WaitUntilFullyBooted(wifi=False)
def testWaitUntilFullyBooted_succeedsWithWifi(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device.GetApplicationPath('android'),
'package:/some/fake/path'),
# boot_completed
(self.call.device.GetProp('sys.boot_completed'), '1'),
# wifi_enabled
(self.call.adb.Shell('dumpsys wifi'),
'stuff\nWi-Fi is enabled\nmore stuff\n')):
self.device.WaitUntilFullyBooted(wifi=True)
def testWaitUntilFullyBooted_sdCardReadyFails_noPath(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), self.CommandError())):
with self.assertRaises(device_errors.CommandFailedError):
self.device.WaitUntilFullyBooted(wifi=False)
def testWaitUntilFullyBooted_sdCardReadyFails_notExists(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), self.ShellError()),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), self.ShellError()),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'),
self.TimeoutError())):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device.WaitUntilFullyBooted(wifi=False)
def testWaitUntilFullyBooted_devicePmFails(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device.GetApplicationPath('android'), self.CommandError()),
# pm_ready
(self.call.device.GetApplicationPath('android'), self.CommandError()),
# pm_ready
(self.call.device.GetApplicationPath('android'), self.TimeoutError())):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device.WaitUntilFullyBooted(wifi=False)
def testWaitUntilFullyBooted_bootFails(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device.GetApplicationPath('android'),
'package:/some/fake/path'),
# boot_completed
(self.call.device.GetProp('sys.boot_completed'), '0'),
# boot_completed
(self.call.device.GetProp('sys.boot_completed'), '0'),
# boot_completed
(self.call.device.GetProp('sys.boot_completed'), self.TimeoutError())):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device.WaitUntilFullyBooted(wifi=False)
def testWaitUntilFullyBooted_wifiFails(self):
with self.assertCalls(
self.call.adb.WaitForDevice(),
# sd_card_ready
(self.call.device.GetExternalStoragePath(), '/fake/storage/path'),
(self.call.adb.Shell('test -d /fake/storage/path'), ''),
# pm_ready
(self.call.device.GetApplicationPath('android'),
'package:/some/fake/path'),
# boot_completed
(self.call.device.GetProp('sys.boot_completed'), '1'),
# wifi_enabled
(self.call.adb.Shell('dumpsys wifi'), 'stuff\nmore stuff\n'),
# wifi_enabled
(self.call.adb.Shell('dumpsys wifi'), 'stuff\nmore stuff\n'),
# wifi_enabled
(self.call.adb.Shell('dumpsys wifi'), self.TimeoutError())):
with self.assertRaises(device_errors.CommandTimeoutError):
self.device.WaitUntilFullyBooted(wifi=True)
@mock.patch('time.sleep', mock.Mock())
class DeviceUtilsRebootTest(DeviceUtilsTest):
def testReboot_nonBlocking(self):
with self.assertCalls(
self.call.adb.Reboot(),
(self.call.device.IsOnline(), True),
(self.call.device.IsOnline(), False)):
self.device.Reboot(block=False)
def testReboot_blocking(self):
with self.assertCalls(
self.call.adb.Reboot(),
(self.call.device.IsOnline(), True),
(self.call.device.IsOnline(), False),
self.call.device.WaitUntilFullyBooted(wifi=False)):
self.device.Reboot(block=True)
def testReboot_blockUntilWifi(self):
with self.assertCalls(
self.call.adb.Reboot(),
(self.call.device.IsOnline(), True),
(self.call.device.IsOnline(), False),
self.call.device.WaitUntilFullyBooted(wifi=True)):
self.device.Reboot(block=True, wifi=True)
class DeviceUtilsInstallTest(DeviceUtilsTest):
def testInstall_noPriorInstall(self):
with self.assertCalls(
(mock.call.pylib.utils.apk_helper.GetPackageName('/fake/test/app.apk'),
'this.is.a.test.package'),
(self.call.device.GetApplicationPath('this.is.a.test.package'), None),
self.call.adb.Install('/fake/test/app.apk', reinstall=False)):
self.device.Install('/fake/test/app.apk', retries=0)
def testInstall_differentPriorInstall(self):
with self.assertCalls(
(mock.call.pylib.utils.apk_helper.GetPackageName('/fake/test/app.apk'),
'this.is.a.test.package'),
(self.call.device.GetApplicationPath('this.is.a.test.package'),
'/fake/data/app/this.is.a.test.package.apk'),
(self.call.device._GetChangedFilesImpl(
'/fake/test/app.apk', '/fake/data/app/this.is.a.test.package.apk'),
[('/fake/test/app.apk', '/fake/data/app/this.is.a.test.package.apk')]),
self.call.adb.Uninstall('this.is.a.test.package'),
self.call.adb.Install('/fake/test/app.apk', reinstall=False)):
self.device.Install('/fake/test/app.apk', retries=0)
def testInstall_differentPriorInstall_reinstall(self):
with self.assertCalls(
(mock.call.pylib.utils.apk_helper.GetPackageName('/fake/test/app.apk'),
'this.is.a.test.package'),
(self.call.device.GetApplicationPath('this.is.a.test.package'),
'/fake/data/app/this.is.a.test.package.apk'),
(self.call.device._GetChangedFilesImpl(
'/fake/test/app.apk', '/fake/data/app/this.is.a.test.package.apk'),
[('/fake/test/app.apk', '/fake/data/app/this.is.a.test.package.apk')]),
self.call.adb.Install('/fake/test/app.apk', reinstall=True)):
self.device.Install('/fake/test/app.apk', reinstall=True, retries=0)
def testInstall_identicalPriorInstall(self):
with self.assertCalls(
(mock.call.pylib.utils.apk_helper.GetPackageName('/fake/test/app.apk'),
'this.is.a.test.package'),
(self.call.device.GetApplicationPath('this.is.a.test.package'),
'/fake/data/app/this.is.a.test.package.apk'),
(self.call.device._GetChangedFilesImpl(
'/fake/test/app.apk', '/fake/data/app/this.is.a.test.package.apk'),
[])):
self.device.Install('/fake/test/app.apk', retries=0)
def testInstall_fails(self):
with self.assertCalls(
(mock.call.pylib.utils.apk_helper.GetPackageName('/fake/test/app.apk'),
'this.is.a.test.package'),
(self.call.device.GetApplicationPath('this.is.a.test.package'), None),
(self.call.adb.Install('/fake/test/app.apk', reinstall=False),
self.CommandError('Failure\r\n'))):
with self.assertRaises(device_errors.CommandFailedError):
self.device.Install('/fake/test/app.apk', retries=0)
class DeviceUtilsRunShellCommandTest(DeviceUtilsTest):
def setUp(self):
super(DeviceUtilsRunShellCommandTest, self).setUp()
self.device.NeedsSU = mock.Mock(return_value=False)
def testRunShellCommand_commandAsList(self):
with self.assertCall(self.call.adb.Shell('pm list packages'), ''):
self.device.RunShellCommand(['pm', 'list', 'packages'])
def testRunShellCommand_commandAsListQuoted(self):
with self.assertCall(self.call.adb.Shell("echo 'hello world' '$10'"), ''):
self.device.RunShellCommand(['echo', 'hello world', '$10'])
def testRunShellCommand_commandAsString(self):
with self.assertCall(self.call.adb.Shell('echo "$VAR"'), ''):
self.device.RunShellCommand('echo "$VAR"')
def testNewRunShellImpl_withEnv(self):
with self.assertCall(
self.call.adb.Shell('VAR=some_string echo "$VAR"'), ''):
self.device.RunShellCommand('echo "$VAR"', env={'VAR': 'some_string'})
def testNewRunShellImpl_withEnvQuoted(self):
with self.assertCall(
self.call.adb.Shell('PATH="$PATH:/other/path" run_this'), ''):
self.device.RunShellCommand('run_this', env={'PATH': '$PATH:/other/path'})
def testNewRunShellImpl_withEnv_failure(self):
with self.assertRaises(KeyError):
self.device.RunShellCommand('some_cmd', env={'INVALID NAME': 'value'})
def testNewRunShellImpl_withCwd(self):
with self.assertCall(self.call.adb.Shell('cd /some/test/path && ls'), ''):
self.device.RunShellCommand('ls', cwd='/some/test/path')
def testNewRunShellImpl_withCwdQuoted(self):
with self.assertCall(
self.call.adb.Shell("cd '/some test/path with/spaces' && ls"), ''):
self.device.RunShellCommand('ls', cwd='/some test/path with/spaces')
def testRunShellCommand_withHugeCmd(self):
payload = 'hi! ' * 1024
expected_cmd = "echo '%s'" % payload
with self.assertCalls(
(mock.call.pylib.utils.device_temp_file.DeviceTempFile(
self.adb, suffix='.sh'), MockTempFile('/sdcard/temp-123.sh')),
self.call.device._WriteFileWithPush('/sdcard/temp-123.sh', expected_cmd),
(self.call.adb.Shell('sh /sdcard/temp-123.sh'), payload + '\n')):
self.assertEquals([payload],
self.device.RunShellCommand(['echo', payload]))
def testRunShellCommand_withHugeCmdAmdSU(self):
payload = 'hi! ' * 1024
expected_cmd = """su -c sh -c 'echo '"'"'%s'"'"''""" % payload
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(mock.call.pylib.utils.device_temp_file.DeviceTempFile(
self.adb, suffix='.sh'), MockTempFile('/sdcard/temp-123.sh')),
self.call.device._WriteFileWithPush('/sdcard/temp-123.sh', expected_cmd),
(self.call.adb.Shell('sh /sdcard/temp-123.sh'), payload + '\n')):
self.assertEquals(
[payload],
self.device.RunShellCommand(['echo', payload], as_root=True))
def testRunShellCommand_withSu(self):
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(self.call.adb.Shell("su -c sh -c 'setprop service.adb.root 0'"), '')):
self.device.RunShellCommand('setprop service.adb.root 0', as_root=True)
def testRunShellCommand_manyLines(self):
cmd = 'ls /some/path'
with self.assertCall(self.call.adb.Shell(cmd), 'file1\nfile2\nfile3\n'):
self.assertEquals(['file1', 'file2', 'file3'],
self.device.RunShellCommand(cmd))
def testRunShellCommand_singleLine_success(self):
cmd = 'echo $VALUE'
with self.assertCall(self.call.adb.Shell(cmd), 'some value\n'):
self.assertEquals('some value',
self.device.RunShellCommand(cmd, single_line=True))
def testRunShellCommand_singleLine_successEmptyLine(self):
cmd = 'echo $VALUE'
with self.assertCall(self.call.adb.Shell(cmd), '\n'):
self.assertEquals('',
self.device.RunShellCommand(cmd, single_line=True))
def testRunShellCommand_singleLine_successWithoutEndLine(self):
cmd = 'echo -n $VALUE'
with self.assertCall(self.call.adb.Shell(cmd), 'some value'):
self.assertEquals('some value',
self.device.RunShellCommand(cmd, single_line=True))
def testRunShellCommand_singleLine_successNoOutput(self):
cmd = 'echo -n $VALUE'
with self.assertCall(self.call.adb.Shell(cmd), ''):
self.assertEquals('',
self.device.RunShellCommand(cmd, single_line=True))
def testRunShellCommand_singleLine_failTooManyLines(self):
cmd = 'echo $VALUE'
with self.assertCall(self.call.adb.Shell(cmd),
'some value\nanother value\n'):
with self.assertRaises(device_errors.CommandFailedError):
self.device.RunShellCommand(cmd, single_line=True)
def testRunShellCommand_checkReturn_success(self):
cmd = 'echo $ANDROID_DATA'
output = '/data\n'
with self.assertCall(self.call.adb.Shell(cmd), output):
self.assertEquals([output.rstrip()],
self.device.RunShellCommand(cmd, check_return=True))
def testRunShellCommand_checkReturn_failure(self):
cmd = 'ls /root'
output = 'opendir failed, Permission denied\n'
with self.assertCall(self.call.adb.Shell(cmd), self.ShellError(output)):
with self.assertRaises(device_errors.AdbCommandFailedError):
self.device.RunShellCommand(cmd, check_return=True)
def testRunShellCommand_checkReturn_disabled(self):
cmd = 'ls /root'
output = 'opendir failed, Permission denied\n'
with self.assertCall(self.call.adb.Shell(cmd), self.ShellError(output)):
self.assertEquals([output.rstrip()],
self.device.RunShellCommand(cmd, check_return=False))
class DeviceUtilsGetDevicePieWrapper(DeviceUtilsTest):
def testGetDevicePieWrapper_jb(self):
with self.assertCall(
self.call.device.build_version_sdk(),
constants.ANDROID_SDK_VERSION_CODES.JELLY_BEAN):
self.assertEqual('', self.device.GetDevicePieWrapper())
def testGetDevicePieWrapper_ics(self):
with self.assertCalls(
(self.call.device.build_version_sdk(),
constants.ANDROID_SDK_VERSION_CODES.ICE_CREAM_SANDWICH),
(mock.call.pylib.constants.GetOutDirectory(), '/foo/bar'),
(mock.call.os.path.exists(mock.ANY), True),
(self.call.adb.Push(mock.ANY, mock.ANY), '')):
self.assertNotEqual('', self.device.GetDevicePieWrapper())
@mock.patch('time.sleep', mock.Mock())
class DeviceUtilsKillAllTest(DeviceUtilsTest):
def testKillAll_noMatchingProcesses(self):
with self.assertCall(self.call.adb.Shell('ps'),
'USER PID PPID VSIZE RSS WCHAN PC NAME\n'):
with self.assertRaises(device_errors.CommandFailedError):
self.device.KillAll('test_process')
def testKillAll_nonblocking(self):
with self.assertCalls(
(self.call.adb.Shell('ps'),
'USER PID PPID VSIZE RSS WCHAN PC NAME\n'
'u0_a1 1234 174 123456 54321 ffffffff 456789ab some.process\n'),
(self.call.adb.Shell('kill -9 1234'), '')):
self.assertEquals(1,
self.device.KillAll('some.process', blocking=False))
def testKillAll_blocking(self):
with self.assertCalls(
(self.call.adb.Shell('ps'),
'USER PID PPID VSIZE RSS WCHAN PC NAME\n'
'u0_a1 1234 174 123456 54321 ffffffff 456789ab some.process\n'),
(self.call.adb.Shell('kill -9 1234'), ''),
(self.call.adb.Shell('ps'),
'USER PID PPID VSIZE RSS WCHAN PC NAME\n'
'u0_a1 1234 174 123456 54321 ffffffff 456789ab some.process\n'),
(self.call.adb.Shell('ps'),
'USER PID PPID VSIZE RSS WCHAN PC NAME\n')):
self.assertEquals(1,
self.device.KillAll('some.process', blocking=True))
def testKillAll_root(self):
with self.assertCalls(
(self.call.adb.Shell('ps'),
'USER PID PPID VSIZE RSS WCHAN PC NAME\n'
'u0_a1 1234 174 123456 54321 ffffffff 456789ab some.process\n'),
(self.call.device.NeedsSU(), True),
(self.call.adb.Shell("su -c sh -c 'kill -9 1234'"), '')):
self.assertEquals(1,
self.device.KillAll('some.process', as_root=True))
def testKillAll_sigterm(self):
with self.assertCalls(
(self.call.adb.Shell('ps'),
'USER PID PPID VSIZE RSS WCHAN PC NAME\n'
'u0_a1 1234 174 123456 54321 ffffffff 456789ab some.process\n'),
(self.call.adb.Shell('kill -15 1234'), '')):
self.assertEquals(1,
self.device.KillAll('some.process', signum=signal.SIGTERM))
class DeviceUtilsStartActivityTest(DeviceUtilsTest):
def testStartActivity_actionOnly(self):
test_intent = intent.Intent(action='android.intent.action.VIEW')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_success(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n this.is.a.test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_failure(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n this.is.a.test.package/.Main'),
'Error: Failed to start test activity'):
with self.assertRaises(device_errors.CommandFailedError):
self.device.StartActivity(test_intent)
def testStartActivity_blocking(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main')
with self.assertCall(
self.call.adb.Shell('am start '
'-W '
'-a android.intent.action.VIEW '
'-n this.is.a.test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent, blocking=True)
def testStartActivity_withCategory(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main',
category='android.intent.category.HOME')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-c android.intent.category.HOME '
'-n this.is.a.test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withMultipleCategories(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main',
category=['android.intent.category.HOME',
'android.intent.category.BROWSABLE'])
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-c android.intent.category.HOME '
'-c android.intent.category.BROWSABLE '
'-n this.is.a.test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withData(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main',
data='http://www.google.com/')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-d http://www.google.com/ '
'-n this.is.a.test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withStringExtra(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main',
extras={'foo': 'test'})
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n this.is.a.test.package/.Main '
'--es foo test'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withBoolExtra(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main',
extras={'foo': True})
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n this.is.a.test.package/.Main '
'--ez foo True'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withIntExtra(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main',
extras={'foo': 123})
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n this.is.a.test.package/.Main '
'--ei foo 123'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
def testStartActivity_withTraceFile(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main')
with self.assertCall(
self.call.adb.Shell('am start '
'--start-profiler test_trace_file.out '
'-a android.intent.action.VIEW '
'-n this.is.a.test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent,
trace_file_name='test_trace_file.out')
def testStartActivity_withForceStop(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main')
with self.assertCall(
self.call.adb.Shell('am start '
'-S '
'-a android.intent.action.VIEW '
'-n this.is.a.test.package/.Main'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent, force_stop=True)
def testStartActivity_withFlags(self):
test_intent = intent.Intent(action='android.intent.action.VIEW',
package='this.is.a.test.package',
activity='.Main',
flags='0x10000000')
with self.assertCall(
self.call.adb.Shell('am start '
'-a android.intent.action.VIEW '
'-n this.is.a.test.package/.Main '
'-f 0x10000000'),
'Starting: Intent { act=android.intent.action.VIEW }'):
self.device.StartActivity(test_intent)
class DeviceUtilsStartInstrumentationTest(DeviceUtilsTest):
def testStartInstrumentation_nothing(self):
with self.assertCalls(
self.call.device.RunShellCommand(
['am', 'instrument', 'test.package/.TestInstrumentation'],
check_return=True)):
self.device.StartInstrumentation(
'test.package/.TestInstrumentation',
finish=False, raw=False, extras=None)
def testStartInstrumentation_finish(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['am', 'instrument', '-w', 'test.package/.TestInstrumentation'],
check_return=True),
['OK (1 test)'])):
output = self.device.StartInstrumentation(
'test.package/.TestInstrumentation',
finish=True, raw=False, extras=None)
self.assertEquals(['OK (1 test)'], output)
def testStartInstrumentation_raw(self):
with self.assertCalls(
self.call.device.RunShellCommand(
['am', 'instrument', '-r', 'test.package/.TestInstrumentation'],
check_return=True)):
self.device.StartInstrumentation(
'test.package/.TestInstrumentation',
finish=False, raw=True, extras=None)
def testStartInstrumentation_extras(self):
with self.assertCalls(
self.call.device.RunShellCommand(
['am', 'instrument', '-e', 'foo', 'Foo', '-e', 'bar', 'Bar',
'test.package/.TestInstrumentation'],
check_return=True)):
self.device.StartInstrumentation(
'test.package/.TestInstrumentation',
finish=False, raw=False, extras={'foo': 'Foo', 'bar': 'Bar'})
class DeviceUtilsBroadcastIntentTest(DeviceUtilsTest):
def testBroadcastIntent_noExtras(self):
test_intent = intent.Intent(action='test.package.with.an.INTENT')
with self.assertCall(
self.call.adb.Shell('am broadcast -a test.package.with.an.INTENT'),
'Broadcasting: Intent { act=test.package.with.an.INTENT } '):
self.device.BroadcastIntent(test_intent)
def testBroadcastIntent_withExtra(self):
test_intent = intent.Intent(action='test.package.with.an.INTENT',
extras={'foo': 'bar value'})
with self.assertCall(
self.call.adb.Shell(
"am broadcast -a test.package.with.an.INTENT --es foo 'bar value'"),
'Broadcasting: Intent { act=test.package.with.an.INTENT } '):
self.device.BroadcastIntent(test_intent)
def testBroadcastIntent_withExtra_noValue(self):
test_intent = intent.Intent(action='test.package.with.an.INTENT',
extras={'foo': None})
with self.assertCall(
self.call.adb.Shell(
'am broadcast -a test.package.with.an.INTENT --esn foo'),
'Broadcasting: Intent { act=test.package.with.an.INTENT } '):
self.device.BroadcastIntent(test_intent)
class DeviceUtilsGoHomeTest(DeviceUtilsTest):
def testGoHome(self):
with self.assertCall(
self.call.adb.Shell('am start -W -a android.intent.action.MAIN '
'-c android.intent.category.HOME'),
'Starting: Intent { act=android.intent.action.MAIN }\r\n'):
self.device.GoHome()
class DeviceUtilsForceStopTest(DeviceUtilsTest):
def testForceStop(self):
with self.assertCall(
self.call.adb.Shell('am force-stop this.is.a.test.package'),
''):
self.device.ForceStop('this.is.a.test.package')
class DeviceUtilsClearApplicationStateTest(DeviceUtilsTest):
def testClearApplicationState_packageDoesntExist(self):
with self.assertCalls(
(self.call.adb.Shell('getprop ro.build.version.sdk'), '17\n'),
(self.call.device.GetApplicationPath('this.package.does.not.exist'),
None)):
self.device.ClearApplicationState('this.package.does.not.exist')
def testClearApplicationState_packageDoesntExistOnAndroidJBMR2OrAbove(self):
with self.assertCalls(
(self.call.adb.Shell('getprop ro.build.version.sdk'), '18\n'),
(self.call.adb.Shell('pm clear this.package.does.not.exist'),
'Failed\r\n')):
self.device.ClearApplicationState('this.package.does.not.exist')
def testClearApplicationState_packageExists(self):
with self.assertCalls(
(self.call.adb.Shell('getprop ro.build.version.sdk'), '17\n'),
(self.call.device.GetApplicationPath('this.package.exists'),
'/data/app/this.package.exists.apk'),
(self.call.adb.Shell('pm clear this.package.exists'),
'Success\r\n')):
self.device.ClearApplicationState('this.package.exists')
def testClearApplicationState_packageExistsOnAndroidJBMR2OrAbove(self):
with self.assertCalls(
(self.call.adb.Shell('getprop ro.build.version.sdk'), '18\n'),
(self.call.adb.Shell('pm clear this.package.exists'),
'Success\r\n')):
self.device.ClearApplicationState('this.package.exists')
class DeviceUtilsSendKeyEventTest(DeviceUtilsTest):
def testSendKeyEvent(self):
with self.assertCall(self.call.adb.Shell('input keyevent 66'), ''):
self.device.SendKeyEvent(66)
class DeviceUtilsPushChangedFilesIndividuallyTest(DeviceUtilsTest):
def testPushChangedFilesIndividually_empty(self):
test_files = []
with self.assertCalls():
self.device._PushChangedFilesIndividually(test_files)
def testPushChangedFilesIndividually_single(self):
test_files = [('/test/host/path', '/test/device/path')]
with self.assertCalls(self.call.adb.Push(*test_files[0])):
self.device._PushChangedFilesIndividually(test_files)
def testPushChangedFilesIndividually_multiple(self):
test_files = [
('/test/host/path/file1', '/test/device/path/file1'),
('/test/host/path/file2', '/test/device/path/file2')]
with self.assertCalls(
self.call.adb.Push(*test_files[0]),
self.call.adb.Push(*test_files[1])):
self.device._PushChangedFilesIndividually(test_files)
class DeviceUtilsPushChangedFilesZippedTest(DeviceUtilsTest):
def testPushChangedFilesZipped_empty(self):
test_files = []
with self.assertCalls():
self.device._PushChangedFilesZipped(test_files)
def _testPushChangedFilesZipped_spec(self, test_files):
mock_zip_temp = mock.mock_open()
mock_zip_temp.return_value.name = '/test/temp/file/tmp.zip'
with self.assertCalls(
(mock.call.tempfile.NamedTemporaryFile(suffix='.zip'), mock_zip_temp),
(mock.call.multiprocessing.Process(
target=device_utils.DeviceUtils._CreateDeviceZip,
args=('/test/temp/file/tmp.zip', test_files)), mock.Mock()),
(self.call.device.GetExternalStoragePath(),
'/test/device/external_dir'),
self.call.adb.Push(
'/test/temp/file/tmp.zip', '/test/device/external_dir/tmp.zip'),
self.call.device.RunShellCommand(
['unzip', '/test/device/external_dir/tmp.zip'],
as_root=True,
env={'PATH': '/data/local/tmp/bin:$PATH'},
check_return=True),
(self.call.device.IsOnline(), True),
self.call.device.RunShellCommand(
['rm', '/test/device/external_dir/tmp.zip'], check_return=True)):
self.device._PushChangedFilesZipped(test_files)
def testPushChangedFilesZipped_single(self):
self._testPushChangedFilesZipped_spec(
[('/test/host/path/file1', '/test/device/path/file1')])
def testPushChangedFilesZipped_multiple(self):
self._testPushChangedFilesZipped_spec(
[('/test/host/path/file1', '/test/device/path/file1'),
('/test/host/path/file2', '/test/device/path/file2')])
class DeviceUtilsFileExistsTest(DeviceUtilsTest):
def testFileExists_usingTest_fileExists(self):
with self.assertCall(
self.call.device.RunShellCommand(
['test', '-e', '/path/file.exists'], check_return=True), ''):
self.assertTrue(self.device.FileExists('/path/file.exists'))
def testFileExists_usingTest_fileDoesntExist(self):
with self.assertCall(
self.call.device.RunShellCommand(
['test', '-e', '/does/not/exist'], check_return=True),
self.ShellError('', 1)):
self.assertFalse(self.device.FileExists('/does/not/exist'))
class DeviceUtilsPullFileTest(DeviceUtilsTest):
def testPullFile_existsOnDevice(self):
with mock.patch('os.path.exists', return_value=True):
with self.assertCall(
self.call.adb.Pull('/data/app/test.file.exists',
'/test/file/host/path')):
self.device.PullFile('/data/app/test.file.exists',
'/test/file/host/path')
def testPullFile_doesntExistOnDevice(self):
with mock.patch('os.path.exists', return_value=True):
with self.assertCall(
self.call.adb.Pull('/data/app/test.file.does.not.exist',
'/test/file/host/path'),
self.CommandError('remote object does not exist')):
with self.assertRaises(device_errors.CommandFailedError):
self.device.PullFile('/data/app/test.file.does.not.exist',
'/test/file/host/path')
class DeviceUtilsReadFileTest(DeviceUtilsTest):
def testReadFileWithPull_success(self):
tmp_host_dir = '/tmp/dir/on.host/'
tmp_host = MockTempFile('/tmp/dir/on.host/tmp_ReadFileWithPull')
tmp_host.file.read.return_value = 'some interesting contents'
with self.assertCalls(
(mock.call.tempfile.mkdtemp(), tmp_host_dir),
(self.call.adb.Pull('/path/to/device/file', mock.ANY)),
(mock.call.__builtin__.open(mock.ANY, 'r'), tmp_host),
(mock.call.os.path.exists(tmp_host_dir), True),
(mock.call.shutil.rmtree(tmp_host_dir), None)):
self.assertEquals('some interesting contents',
self.device._ReadFileWithPull('/path/to/device/file'))
tmp_host.file.read.assert_called_once_with()
def testReadFileWithPull_rejected(self):
tmp_host_dir = '/tmp/dir/on.host/'
with self.assertCalls(
(mock.call.tempfile.mkdtemp(), tmp_host_dir),
(self.call.adb.Pull('/path/to/device/file', mock.ANY),
self.CommandError()),
(mock.call.os.path.exists(tmp_host_dir), True),
(mock.call.shutil.rmtree(tmp_host_dir), None)):
with self.assertRaises(device_errors.CommandFailedError):
self.device._ReadFileWithPull('/path/to/device/file')
def testReadFile_exists(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['ls', '-l', '/read/this/test/file'],
as_root=False, check_return=True),
['-rw-rw---- root foo 256 1970-01-01 00:00 file']),
(self.call.device.RunShellCommand(
['cat', '/read/this/test/file'], as_root=False, check_return=True),
['this is a test file'])):
self.assertEqual('this is a test file\n',
self.device.ReadFile('/read/this/test/file'))
def testReadFile_doesNotExist(self):
with self.assertCall(
self.call.device.RunShellCommand(
['ls', '-l', '/this/file/does.not.exist'],
as_root=False, check_return=True),
self.CommandError('File does not exist')):
with self.assertRaises(device_errors.CommandFailedError):
self.device.ReadFile('/this/file/does.not.exist')
def testReadFile_withSU(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['ls', '-l', '/this/file/can.be.read.with.su'],
as_root=True, check_return=True),
['-rw------- root root 256 1970-01-01 00:00 can.be.read.with.su']),
(self.call.device.RunShellCommand(
['cat', '/this/file/can.be.read.with.su'],
as_root=True, check_return=True),
['this is a test file', 'read with su'])):
self.assertEqual(
'this is a test file\nread with su\n',
self.device.ReadFile('/this/file/can.be.read.with.su',
as_root=True))
def testReadFile_withPull(self):
contents = 'a' * 123456
with self.assertCalls(
(self.call.device.RunShellCommand(
['ls', '-l', '/read/this/big/test/file'],
as_root=False, check_return=True),
['-rw-rw---- root foo 123456 1970-01-01 00:00 file']),
(self.call.device._ReadFileWithPull('/read/this/big/test/file'),
contents)):
self.assertEqual(
contents, self.device.ReadFile('/read/this/big/test/file'))
def testReadFile_withPullAndSU(self):
contents = 'b' * 123456
with self.assertCalls(
(self.call.device.RunShellCommand(
['ls', '-l', '/this/big/file/can.be.read.with.su'],
as_root=True, check_return=True),
['-rw------- root root 123456 1970-01-01 00:00 can.be.read.with.su']),
(self.call.device.NeedsSU(), True),
(mock.call.pylib.utils.device_temp_file.DeviceTempFile(self.adb),
MockTempFile('/sdcard/tmp/on.device')),
self.call.device.RunShellCommand(
['cp', '/this/big/file/can.be.read.with.su',
'/sdcard/tmp/on.device'],
as_root=True, check_return=True),
(self.call.device._ReadFileWithPull('/sdcard/tmp/on.device'),
contents)):
self.assertEqual(
contents,
self.device.ReadFile('/this/big/file/can.be.read.with.su',
as_root=True))
class DeviceUtilsWriteFileTest(DeviceUtilsTest):
def testWriteFileWithPush_success(self):
tmp_host = MockTempFile('/tmp/file/on.host')
contents = 'some interesting contents'
with self.assertCalls(
(mock.call.tempfile.NamedTemporaryFile(), tmp_host),
self.call.adb.Push('/tmp/file/on.host', '/path/to/device/file')):
self.device._WriteFileWithPush('/path/to/device/file', contents)
tmp_host.file.write.assert_called_once_with(contents)
def testWriteFileWithPush_rejected(self):
tmp_host = MockTempFile('/tmp/file/on.host')
contents = 'some interesting contents'
with self.assertCalls(
(mock.call.tempfile.NamedTemporaryFile(), tmp_host),
(self.call.adb.Push('/tmp/file/on.host', '/path/to/device/file'),
self.CommandError())):
with self.assertRaises(device_errors.CommandFailedError):
self.device._WriteFileWithPush('/path/to/device/file', contents)
def testWriteFile_withPush(self):
contents = 'some large contents ' * 26 # 20 * 26 = 520 chars
with self.assertCalls(
self.call.device._WriteFileWithPush('/path/to/device/file', contents)):
self.device.WriteFile('/path/to/device/file', contents)
def testWriteFile_withPushForced(self):
contents = 'tiny contents'
with self.assertCalls(
self.call.device._WriteFileWithPush('/path/to/device/file', contents)):
self.device.WriteFile('/path/to/device/file', contents, force_push=True)
def testWriteFile_withPushAndSU(self):
contents = 'some large contents ' * 26 # 20 * 26 = 520 chars
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(mock.call.pylib.utils.device_temp_file.DeviceTempFile(self.adb),
MockTempFile('/sdcard/tmp/on.device')),
self.call.device._WriteFileWithPush('/sdcard/tmp/on.device', contents),
self.call.device.RunShellCommand(
['cp', '/sdcard/tmp/on.device', '/path/to/device/file'],
as_root=True, check_return=True)):
self.device.WriteFile('/path/to/device/file', contents, as_root=True)
def testWriteFile_withEcho(self):
with self.assertCall(self.call.adb.Shell(
"echo -n the.contents > /test/file/to.write"), ''):
self.device.WriteFile('/test/file/to.write', 'the.contents')
def testWriteFile_withEchoAndQuotes(self):
with self.assertCall(self.call.adb.Shell(
"echo -n 'the contents' > '/test/file/to write'"), ''):
self.device.WriteFile('/test/file/to write', 'the contents')
def testWriteFile_withEchoAndSU(self):
with self.assertCalls(
(self.call.device.NeedsSU(), True),
(self.call.adb.Shell("su -c sh -c 'echo -n contents > /test/file'"),
'')):
self.device.WriteFile('/test/file', 'contents', as_root=True)
class DeviceUtilsLsTest(DeviceUtilsTest):
def testLs_directory(self):
result = [('.', adb_wrapper.DeviceStat(16889, 4096, 1417436123)),
('..', adb_wrapper.DeviceStat(16873, 4096, 12382237)),
('testfile.txt', adb_wrapper.DeviceStat(33206, 3, 1417436122))]
with self.assertCalls(
(self.call.adb.Ls('/data/local/tmp'), result)):
self.assertEquals(result,
self.device.Ls('/data/local/tmp'))
def testLs_nothing(self):
with self.assertCalls(
(self.call.adb.Ls('/data/local/tmp/testfile.txt'), [])):
self.assertEquals([],
self.device.Ls('/data/local/tmp/testfile.txt'))
class DeviceUtilsStatTest(DeviceUtilsTest):
def testStat_file(self):
result = [('.', adb_wrapper.DeviceStat(16889, 4096, 1417436123)),
('..', adb_wrapper.DeviceStat(16873, 4096, 12382237)),
('testfile.txt', adb_wrapper.DeviceStat(33206, 3, 1417436122))]
with self.assertCalls(
(self.call.adb.Ls('/data/local/tmp'), result)):
self.assertEquals(adb_wrapper.DeviceStat(33206, 3, 1417436122),
self.device.Stat('/data/local/tmp/testfile.txt'))
def testStat_directory(self):
result = [('.', adb_wrapper.DeviceStat(16873, 4096, 12382237)),
('..', adb_wrapper.DeviceStat(16873, 4096, 12382237)),
('tmp', adb_wrapper.DeviceStat(16889, 4096, 1417436123))]
with self.assertCalls(
(self.call.adb.Ls('/data/local'), result)):
self.assertEquals(adb_wrapper.DeviceStat(16889, 4096, 1417436123),
self.device.Stat('/data/local/tmp'))
def testStat_doesNotExist(self):
result = [('.', adb_wrapper.DeviceStat(16889, 4096, 1417436123)),
('..', adb_wrapper.DeviceStat(16873, 4096, 12382237)),
('testfile.txt', adb_wrapper.DeviceStat(33206, 3, 1417436122))]
with self.assertCalls(
(self.call.adb.Ls('/data/local/tmp'), result)):
with self.assertRaises(device_errors.CommandFailedError):
self.device.Stat('/data/local/tmp/does.not.exist.txt')
class DeviceUtilsSetJavaAssertsTest(DeviceUtilsTest):
def testSetJavaAsserts_enable(self):
with self.assertCalls(
(self.call.device.ReadFile(constants.DEVICE_LOCAL_PROPERTIES_PATH),
'some.example.prop=with an example value\n'
'some.other.prop=value_ok\n'),
self.call.device.WriteFile(
constants.DEVICE_LOCAL_PROPERTIES_PATH,
'some.example.prop=with an example value\n'
'some.other.prop=value_ok\n'
'dalvik.vm.enableassertions=all\n'),
(self.call.device.GetProp('dalvik.vm.enableassertions'), ''),
self.call.device.SetProp('dalvik.vm.enableassertions', 'all')):
self.assertTrue(self.device.SetJavaAsserts(True))
def testSetJavaAsserts_disable(self):
with self.assertCalls(
(self.call.device.ReadFile(constants.DEVICE_LOCAL_PROPERTIES_PATH),
'some.example.prop=with an example value\n'
'dalvik.vm.enableassertions=all\n'
'some.other.prop=value_ok\n'),
self.call.device.WriteFile(
constants.DEVICE_LOCAL_PROPERTIES_PATH,
'some.example.prop=with an example value\n'
'some.other.prop=value_ok\n'),
(self.call.device.GetProp('dalvik.vm.enableassertions'), 'all'),
self.call.device.SetProp('dalvik.vm.enableassertions', '')):
self.assertTrue(self.device.SetJavaAsserts(False))
def testSetJavaAsserts_alreadyEnabled(self):
with self.assertCalls(
(self.call.device.ReadFile(constants.DEVICE_LOCAL_PROPERTIES_PATH),
'some.example.prop=with an example value\n'
'dalvik.vm.enableassertions=all\n'
'some.other.prop=value_ok\n'),
(self.call.device.GetProp('dalvik.vm.enableassertions'), 'all')):
self.assertFalse(self.device.SetJavaAsserts(True))
class DeviceUtilsGetPropTest(DeviceUtilsTest):
def testGetProp_exists(self):
with self.assertCall(
self.call.adb.Shell('getprop test.property'), 'property_value\n'):
self.assertEqual('property_value',
self.device.GetProp('test.property'))
def testGetProp_doesNotExist(self):
with self.assertCall(
self.call.adb.Shell('getprop property.does.not.exist'), '\n'):
self.assertEqual('', self.device.GetProp('property.does.not.exist'))
def testGetProp_cachedRoProp(self):
with self.assertCall(
self.call.adb.Shell('getprop ro.build.type'), 'userdebug\n'):
self.assertEqual('userdebug',
self.device.GetProp('ro.build.type', cache=True))
self.assertEqual('userdebug',
self.device.GetProp('ro.build.type', cache=True))
def testGetProp_retryAndCache(self):
with self.assertCalls(
(self.call.adb.Shell('getprop ro.build.type'), self.ShellError()),
(self.call.adb.Shell('getprop ro.build.type'), self.ShellError()),
(self.call.adb.Shell('getprop ro.build.type'), 'userdebug\n')):
self.assertEqual('userdebug',
self.device.GetProp('ro.build.type',
cache=True, retries=3))
self.assertEqual('userdebug',
self.device.GetProp('ro.build.type',
cache=True, retries=3))
class DeviceUtilsSetPropTest(DeviceUtilsTest):
def testSetProp(self):
with self.assertCall(
self.call.adb.Shell("setprop test.property 'test value'"), ''):
self.device.SetProp('test.property', 'test value')
def testSetProp_check_succeeds(self):
with self.assertCalls(
(self.call.adb.Shell('setprop test.property new_value'), ''),
(self.call.adb.Shell('getprop test.property'), 'new_value')):
self.device.SetProp('test.property', 'new_value', check=True)
def testSetProp_check_fails(self):
with self.assertCalls(
(self.call.adb.Shell('setprop test.property new_value'), ''),
(self.call.adb.Shell('getprop test.property'), 'old_value')):
with self.assertRaises(device_errors.CommandFailedError):
self.device.SetProp('test.property', 'new_value', check=True)
class DeviceUtilsGetPidsTest(DeviceUtilsTest):
def testGetPids_noMatches(self):
with self.assertCall(self.call.adb.Shell('ps'),
'USER PID PPID VSIZE RSS WCHAN PC NAME\n'
'user 1000 100 1024 1024 ffffffff 00000000 no.match\n'):
self.assertEqual({}, self.device.GetPids('does.not.match'))
def testGetPids_oneMatch(self):
with self.assertCall(self.call.adb.Shell('ps'),
'USER PID PPID VSIZE RSS WCHAN PC NAME\n'
'user 1000 100 1024 1024 ffffffff 00000000 not.a.match\n'
'user 1001 100 1024 1024 ffffffff 00000000 one.match\n'):
self.assertEqual({'one.match': '1001'}, self.device.GetPids('one.match'))
def testGetPids_mutlipleMatches(self):
with self.assertCall(self.call.adb.Shell('ps'),
'USER PID PPID VSIZE RSS WCHAN PC NAME\n'
'user 1000 100 1024 1024 ffffffff 00000000 not\n'
'user 1001 100 1024 1024 ffffffff 00000000 one.match\n'
'user 1002 100 1024 1024 ffffffff 00000000 two.match\n'
'user 1003 100 1024 1024 ffffffff 00000000 three.match\n'):
self.assertEqual(
{'one.match': '1001', 'two.match': '1002', 'three.match': '1003'},
self.device.GetPids('match'))
def testGetPids_exactMatch(self):
with self.assertCall(self.call.adb.Shell('ps'),
'USER PID PPID VSIZE RSS WCHAN PC NAME\n'
'user 1000 100 1024 1024 ffffffff 00000000 not.exact.match\n'
'user 1234 100 1024 1024 ffffffff 00000000 exact.match\n'):
self.assertEqual(
{'not.exact.match': '1000', 'exact.match': '1234'},
self.device.GetPids('exact.match'))
class DeviceUtilsTakeScreenshotTest(DeviceUtilsTest):
def testTakeScreenshot_fileNameProvided(self):
with self.assertCalls(
(mock.call.pylib.utils.device_temp_file.DeviceTempFile(
self.adb, suffix='.png'),
MockTempFile('/tmp/path/temp-123.png')),
(self.call.adb.Shell('/system/bin/screencap -p /tmp/path/temp-123.png'),
''),
self.call.device.PullFile('/tmp/path/temp-123.png',
'/test/host/screenshot.png')):
self.device.TakeScreenshot('/test/host/screenshot.png')
class DeviceUtilsGetMemoryUsageForPidTest(DeviceUtilsTest):
def setUp(self):
super(DeviceUtilsGetMemoryUsageForPidTest, self).setUp()
def testGetMemoryUsageForPid_validPid(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['showmap', '1234'], as_root=True, check_return=True),
['100 101 102 103 104 105 106 107 TOTAL']),
(self.call.device.ReadFile('/proc/1234/status', as_root=True),
'VmHWM: 1024 kB\n')):
self.assertEqual(
{
'Size': 100,
'Rss': 101,
'Pss': 102,
'Shared_Clean': 103,
'Shared_Dirty': 104,
'Private_Clean': 105,
'Private_Dirty': 106,
'VmHWM': 1024
},
self.device.GetMemoryUsageForPid(1234))
def testGetMemoryUsageForPid_noSmaps(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['showmap', '4321'], as_root=True, check_return=True),
['cannot open /proc/4321/smaps: No such file or directory']),
(self.call.device.ReadFile('/proc/4321/status', as_root=True),
'VmHWM: 1024 kb\n')):
self.assertEquals({'VmHWM': 1024}, self.device.GetMemoryUsageForPid(4321))
def testGetMemoryUsageForPid_noStatus(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
['showmap', '4321'], as_root=True, check_return=True),
['100 101 102 103 104 105 106 107 TOTAL']),
(self.call.device.ReadFile('/proc/4321/status', as_root=True),
self.CommandError())):
self.assertEquals(
{
'Size': 100,
'Rss': 101,
'Pss': 102,
'Shared_Clean': 103,
'Shared_Dirty': 104,
'Private_Clean': 105,
'Private_Dirty': 106,
},
self.device.GetMemoryUsageForPid(4321))
class DeviceUtilsGetBatteryInfoTest(DeviceUtilsTest):
def testGetBatteryInfo_normal(self):
with self.assertCall(
self.call.device.RunShellCommand(
['dumpsys', 'battery'], check_return=True),
[
'Current Battery Service state:',
' AC powered: false',
' USB powered: true',
' level: 100',
' temperature: 321',
]):
self.assertEquals(
{
'AC powered': 'false',
'USB powered': 'true',
'level': '100',
'temperature': '321',
},
self.device.GetBatteryInfo())
def testGetBatteryInfo_nothing(self):
with self.assertCall(
self.call.device.RunShellCommand(
['dumpsys', 'battery'], check_return=True), []):
self.assertEquals({}, self.device.GetBatteryInfo())
class DeviceUtilsGetChargingTest(DeviceUtilsTest):
def testGetCharging_usb(self):
with self.assertCall(
self.call.device.GetBatteryInfo(), {'USB powered': 'true'}):
self.assertTrue(self.device.GetCharging())
def testGetCharging_usbFalse(self):
with self.assertCall(
self.call.device.GetBatteryInfo(), {'USB powered': 'false'}):
self.assertFalse(self.device.GetCharging())
def testGetCharging_ac(self):
with self.assertCall(
self.call.device.GetBatteryInfo(), {'AC powered': 'true'}):
self.assertTrue(self.device.GetCharging())
def testGetCharging_wireless(self):
with self.assertCall(
self.call.device.GetBatteryInfo(), {'Wireless powered': 'true'}):
self.assertTrue(self.device.GetCharging())
def testGetCharging_unknown(self):
with self.assertCall(
self.call.device.GetBatteryInfo(), {'level': '42'}):
self.assertFalse(self.device.GetCharging())
class DeviceUtilsSetChargingTest(DeviceUtilsTest):
@mock.patch('time.sleep', mock.Mock())
def testSetCharging_enabled(self):
with self.assertCalls(
(self.call.device.FileExists(mock.ANY), True),
(self.call.device.RunShellCommand(mock.ANY, check_return=True), []),
(self.call.device.GetCharging(), False),
(self.call.device.RunShellCommand(mock.ANY, check_return=True), []),
(self.call.device.GetCharging(), True)):
self.device.SetCharging(True)
def testSetCharging_alreadyEnabled(self):
with self.assertCalls(
(self.call.device.FileExists(mock.ANY), True),
(self.call.device.RunShellCommand(mock.ANY, check_return=True), []),
(self.call.device.GetCharging(), True)):
self.device.SetCharging(True)
@mock.patch('time.sleep', mock.Mock())
def testSetCharging_disabled(self):
with self.assertCalls(
(self.call.device.FileExists(mock.ANY), True),
(self.call.device.RunShellCommand(mock.ANY, check_return=True), []),
(self.call.device.GetCharging(), True),
(self.call.device.RunShellCommand(mock.ANY, check_return=True), []),
(self.call.device.GetCharging(), False)):
self.device.SetCharging(False)
class DeviceUtilsSetBatteryMeasurementTest(DeviceUtilsTest):
def testBatteryMeasurement(self):
with self.assertCalls(
(self.call.device.RunShellCommand(
mock.ANY, retries=0, single_line=True,
timeout=10, check_return=True), '22'),
(self.call.device.RunShellCommand(
['dumpsys', 'batterystats', '--reset'], check_return=True), []),
(self.call.device.RunShellCommand(
['dumpsys', 'batterystats', '--charged', '--checkin'],
check_return=True), []),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'set', 'usb', '0'], check_return=True), []),
(self.call.device.GetCharging(), False),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'set', 'usb', '1'], check_return=True), []),
(self.call.device.RunShellCommand(
['dumpsys', 'battery', 'reset'], check_return=True), []),
(self.call.device.GetCharging(), True)):
with self.device.BatteryMeasurement():
pass
class DeviceUtilsStrTest(DeviceUtilsTest):
def testStr_returnsSerial(self):
with self.assertCalls(
(self.call.adb.GetDeviceSerial(), '0123456789abcdef')):
self.assertEqual('0123456789abcdef', str(self.device))
class DeviceUtilsParallelTest(mock_calls.TestCase):
def testParallel_default(self):
test_serials = ['0123456789abcdef', 'fedcba9876543210']
with self.assertCall(
mock.call.pylib.device.adb_wrapper.AdbWrapper.GetDevices(),
[_AdbWrapperMock(serial) for serial in test_serials]):
parallel_devices = device_utils.DeviceUtils.parallel()
for serial, device in zip(test_serials, parallel_devices.pGet(None)):
self.assertTrue(
isinstance(device, device_utils.DeviceUtils)
and serial == str(device),
'Expected a DeviceUtils object with serial %s' % serial)
def testParallel_noDevices(self):
with self.assertCall(
mock.call.pylib.device.adb_wrapper.AdbWrapper.GetDevices(), []):
with self.assertRaises(device_errors.NoDevicesError):
device_utils.DeviceUtils.parallel()
class DeviceUtilsClientCache(DeviceUtilsTest):
def testClientCache_twoCaches(self):
self.device._cache['test'] = 0
client_cache_one = self.device.GetClientCache('ClientOne')
client_cache_one['test'] = 1
client_cache_two = self.device.GetClientCache('ClientTwo')
client_cache_two['test'] = 2
self.assertEqual(self.device._cache, {'test': 0})
self.assertEqual(client_cache_one, {'test': 1})
self.assertEqual(client_cache_two, {'test': 2})
self.device._ClearCache()
self.assertEqual(self.device._cache, {})
self.assertEqual(client_cache_one, {})
self.assertEqual(client_cache_two, {})
def testClientCache_multipleInstances(self):
client_cache_one = self.device.GetClientCache('ClientOne')
client_cache_one['test'] = 1
client_cache_two = self.device.GetClientCache('ClientOne')
self.assertEqual(client_cache_one, {'test': 1})
self.assertEqual(client_cache_two, {'test': 1})
self.device._ClearCache()
self.assertEqual(client_cache_one, {})
self.assertEqual(client_cache_two, {})
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
|
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-13 23:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='LocalityGISSelections',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('localitylabel', models.CharField(blank=True, db_column='localitylabel', max_length=255, null=True, verbose_name='locality label')),
('sourcefc', models.CharField(blank=True, db_column='sourcefc', max_length=255, null=True, verbose_name='source fc')),
],
options={
'db_table': 'localitygisselections',
'managed': True,
'verbose_name_plural': 'Locality GIS Selections',
},
),
migrations.CreateModel(
name='LocalityPlaceResourceEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('enteredbyname', models.CharField(blank=True, db_column='enteredbyname', max_length=25, null=True, verbose_name='entered by name')),
('enteredbytribe', models.CharField(blank=True, db_column='enteredbytribe', max_length=100, null=True, verbose_name='entered by tribe')),
('enteredbytitle', models.CharField(blank=True, db_column='enteredbytitle', max_length=100, null=True, verbose_name='entered by title')),
('enteredbydate', models.DateTimeField(auto_now_add=True, db_column='enteredbydate', null=True, verbose_name='entered by date')),
('modifiedbyname', models.CharField(blank=True, db_column='modifiedbyname', max_length=25, null=True, verbose_name='modified by name')),
('modifiedbytitle', models.CharField(blank=True, db_column='modifiedbytitle', max_length=100, null=True, verbose_name='modified by title')),
('modifiedbytribe', models.CharField(blank=True, db_column='modifiedbytribe', max_length=100, null=True, verbose_name='modified by tribe')),
('modifiedbydate', models.DateTimeField(auto_now=True, db_column='modifiedbydate', null=True, verbose_name='modified by date')),
],
options={
'db_table': 'localityplaceresourceevent',
'managed': True,
'verbose_name_plural': 'Localities - Place-Resources',
},
),
migrations.CreateModel(
name='MediaCitationEvents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('enteredbyname', models.CharField(blank=True, db_column='enteredbyname', max_length=25, null=True, verbose_name='entered by name')),
('enteredbytribe', models.CharField(blank=True, db_column='enteredbytribe', max_length=100, null=True, verbose_name='entered by tribe')),
('enteredbytitle', models.CharField(blank=True, db_column='enteredbytitle', max_length=100, null=True, verbose_name='entered by title')),
('enteredbydate', models.DateTimeField(auto_now_add=True, db_column='enteredbydate', null=True, verbose_name='entered by date')),
('modifiedbyname', models.CharField(blank=True, db_column='modifiedbyname', max_length=25, null=True, verbose_name='modified by name')),
('modifiedbytitle', models.CharField(blank=True, db_column='modifiedbytitle', max_length=100, null=True, verbose_name='modified by title')),
('modifiedbytribe', models.CharField(blank=True, db_column='modifiedbytribe', max_length=100, null=True, verbose_name='modified by tribe')),
('modifiedbydate', models.DateTimeField(auto_now=True, db_column='modifiedbydate', null=True, verbose_name='modified by date')),
('relationshipdescription', models.CharField(blank=True, db_column='relationshipdescription', max_length=255, null=True, verbose_name='excerpt/description')),
('pages', models.CharField(blank=True, db_column='pages', max_length=255, null=True)),
],
options={
'db_table': 'mediacitationevents',
'verbose_name_plural': 'Media - Sources',
'verbose_name': 'Medium - Source',
'managed': True,
},
),
migrations.CreateModel(
name='PlaceAltIndigenousName',
fields=[
('altindigenousnameid', models.AutoField(db_column='altindigenousnameid', primary_key=True, serialize=False)),
('altindigenousname', models.CharField(blank=True, db_column='altindigenousname', max_length=255, null=True, verbose_name='alternate name')),
],
options={
'db_table': 'placealtindigenousname',
'verbose_name': 'Place - Alternate Name',
'managed': True,
'verbose_name_plural': 'Places - Alternate Names',
},
),
migrations.CreateModel(
name='PlaceGISSelections',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('placelabel', models.CharField(blank=True, db_column='placelabel', max_length=255, null=True, verbose_name='label')),
('sourcefc', models.CharField(blank=True, db_column='sourcefc', max_length=255, null=True, verbose_name='source fc')),
],
options={
'db_table': 'placegisselections',
'managed': True,
'verbose_name_plural': 'Place GIS Selections',
},
),
migrations.CreateModel(
name='PlacesCitationEvents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('enteredbyname', models.CharField(blank=True, db_column='enteredbyname', max_length=25, null=True, verbose_name='entered by name')),
('enteredbytribe', models.CharField(blank=True, db_column='enteredbytribe', max_length=100, null=True, verbose_name='entered by tribe')),
('enteredbytitle', models.CharField(blank=True, db_column='enteredbytitle', max_length=100, null=True, verbose_name='entered by title')),
('enteredbydate', models.DateTimeField(auto_now_add=True, db_column='enteredbydate', null=True, verbose_name='entered by date')),
('modifiedbyname', models.CharField(blank=True, db_column='modifiedbyname', max_length=25, null=True, verbose_name='modified by name')),
('modifiedbytitle', models.CharField(blank=True, db_column='modifiedbytitle', max_length=100, null=True, verbose_name='modified by title')),
('modifiedbytribe', models.CharField(blank=True, db_column='modifiedbytribe', max_length=100, null=True, verbose_name='modified by tribe')),
('modifiedbydate', models.DateTimeField(auto_now=True, db_column='modifiedbydate', null=True, verbose_name='modified by date')),
('relationshipdescription', models.CharField(blank=True, db_column='relationshipdescription', max_length=255, null=True, verbose_name='excerpt/description')),
('pages', models.CharField(blank=True, db_column='pages', max_length=255, null=True)),
],
options={
'db_table': 'placescitationevents',
'verbose_name_plural': 'Places - Sources',
'verbose_name': 'Place - Source',
'managed': True,
},
),
migrations.CreateModel(
name='PlacesMediaEvents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('enteredbyname', models.CharField(blank=True, db_column='enteredbyname', max_length=25, null=True, verbose_name='entered by name')),
('enteredbytribe', models.CharField(blank=True, db_column='enteredbytribe', max_length=100, null=True, verbose_name='entered by tribe')),
('enteredbytitle', models.CharField(blank=True, db_column='enteredbytitle', max_length=100, null=True, verbose_name='entered by title')),
('enteredbydate', models.DateTimeField(auto_now_add=True, db_column='enteredbydate', null=True, verbose_name='entered by date')),
('modifiedbyname', models.CharField(blank=True, db_column='modifiedbyname', max_length=25, null=True, verbose_name='modified by name')),
('modifiedbytitle', models.CharField(blank=True, db_column='modifiedbytitle', max_length=100, null=True, verbose_name='modified by title')),
('modifiedbytribe', models.CharField(blank=True, db_column='modifiedbytribe', max_length=100, null=True, verbose_name='modified by tribe')),
('modifiedbydate', models.DateTimeField(auto_now=True, db_column='modifiedbydate', null=True, verbose_name='modified by date')),
('relationshipdescription', models.CharField(blank=True, db_column='relationshipdescription', max_length=255, null=True, verbose_name='relationship description')),
('pages', models.CharField(blank=True, db_column='pages', max_length=50, null=True)),
],
options={
'db_table': 'placesmediaevents',
'verbose_name_plural': 'Places - Media',
'verbose_name': 'Place - Medium',
'managed': True,
},
),
migrations.CreateModel(
name='PlacesResourceCitationEvents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('enteredbyname', models.CharField(blank=True, db_column='enteredbyname', max_length=25, null=True, verbose_name='entered by name')),
('enteredbytribe', models.CharField(blank=True, db_column='enteredbytribe', max_length=100, null=True, verbose_name='entered by tribe')),
('enteredbytitle', models.CharField(blank=True, db_column='enteredbytitle', max_length=100, null=True, verbose_name='entered by title')),
('enteredbydate', models.DateTimeField(auto_now_add=True, db_column='enteredbydate', null=True, verbose_name='entered by date')),
('modifiedbyname', models.CharField(blank=True, db_column='modifiedbyname', max_length=25, null=True, verbose_name='modified by name')),
('modifiedbytitle', models.CharField(blank=True, db_column='modifiedbytitle', max_length=100, null=True, verbose_name='modified by title')),
('modifiedbytribe', models.CharField(blank=True, db_column='modifiedbytribe', max_length=100, null=True, verbose_name='modified by tribe')),
('modifiedbydate', models.DateTimeField(auto_now=True, db_column='modifiedbydate', null=True, verbose_name='modified by date')),
('relationshipdescription', models.CharField(blank=True, db_column='relationshipdescription', max_length=255, null=True, verbose_name='excerpt/description')),
('pages', models.CharField(blank=True, db_column='pages', max_length=255, null=True)),
],
options={
'db_table': 'placesresourcecitationevents',
'managed': True,
'verbose_name_plural': 'Place-Resources - Sources',
},
),
migrations.CreateModel(
name='PlacesResourceEvents',
fields=[
('enteredbyname', models.CharField(blank=True, db_column='enteredbyname', max_length=25, null=True, verbose_name='entered by name')),
('enteredbytribe', models.CharField(blank=True, db_column='enteredbytribe', max_length=100, null=True, verbose_name='entered by tribe')),
('enteredbytitle', models.CharField(blank=True, db_column='enteredbytitle', max_length=100, null=True, verbose_name='entered by title')),
('enteredbydate', models.DateTimeField(auto_now_add=True, db_column='enteredbydate', null=True, verbose_name='entered by date')),
('modifiedbyname', models.CharField(blank=True, db_column='modifiedbyname', max_length=25, null=True, verbose_name='modified by name')),
('modifiedbytitle', models.CharField(blank=True, db_column='modifiedbytitle', max_length=100, null=True, verbose_name='modified by title')),
('modifiedbytribe', models.CharField(blank=True, db_column='modifiedbytribe', max_length=100, null=True, verbose_name='modified by tribe')),
('modifiedbydate', models.DateTimeField(auto_now=True, db_column='modifiedbydate', null=True, verbose_name='modified by date')),
('placeresourceid', models.AutoField(db_column='placeresourceid', primary_key=True, serialize=False)),
('relationshipdescription', models.CharField(blank=True, db_column='relationshipdescription', max_length=255, null=True, verbose_name='excerpt')),
('barterresource', models.BooleanField(db_column='barterresource', default=False, verbose_name='barter resource?')),
('january', models.BooleanField(db_column='january', default=False)),
('february', models.BooleanField(db_column='february', default=False)),
('march', models.BooleanField(db_column='march', default=False)),
('april', models.BooleanField(db_column='april', default=False)),
('may', models.BooleanField(db_column='may', default=False)),
('june', models.BooleanField(db_column='june', default=False)),
('july', models.BooleanField(db_column='july', default=False)),
('august', models.BooleanField(db_column='august', default=False)),
('september', models.BooleanField(db_column='september', default=False)),
('october', models.BooleanField(db_column='october', default=False)),
('november', models.BooleanField(db_column='november', default=False)),
('december', models.BooleanField(db_column='december', default=False)),
('year', models.IntegerField(blank=True, db_column='year', null=True)),
('islocked', models.BooleanField(db_column='islocked', default=False, verbose_name='locked?')),
],
options={
'db_table': 'placesresourceevents',
'verbose_name': 'Place - Resource',
'managed': True,
'verbose_name_plural': 'Places - Resources',
},
),
migrations.CreateModel(
name='PlacesResourceMediaEvents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('enteredbyname', models.CharField(blank=True, db_column='enteredbyname', max_length=25, null=True, verbose_name='entered by name')),
('enteredbytribe', models.CharField(blank=True, db_column='enteredbytribe', max_length=100, null=True, verbose_name='entered by tribe')),
('enteredbytitle', models.CharField(blank=True, db_column='enteredbytitle', max_length=100, null=True, verbose_name='entered by title')),
('enteredbydate', models.DateTimeField(auto_now_add=True, db_column='enteredbydate', null=True, verbose_name='entered by date')),
('modifiedbyname', models.CharField(blank=True, db_column='modifiedbyname', max_length=25, null=True, verbose_name='modified by name')),
('modifiedbytitle', models.CharField(blank=True, db_column='modifiedbytitle', max_length=100, null=True, verbose_name='modified by title')),
('modifiedbytribe', models.CharField(blank=True, db_column='modifiedbytribe', max_length=100, null=True, verbose_name='modified by tribe')),
('modifiedbydate', models.DateTimeField(auto_now=True, db_column='modifiedbydate', null=True, verbose_name='modified by date')),
('relationshipdescription', models.CharField(blank=True, db_column='relationshipdescription', max_length=255, null=True, verbose_name='relationship description')),
('pages', models.CharField(blank=True, db_column='pages', max_length=50, null=True)),
],
options={
'db_table': 'placesresourcemediaevents',
'managed': True,
'verbose_name_plural': 'Place-Resources - Media',
},
),
migrations.CreateModel(
name='ResourceActivityCitationEvents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('enteredbyname', models.CharField(blank=True, db_column='enteredbyname', max_length=25, null=True, verbose_name='entered by name')),
('enteredbytribe', models.CharField(blank=True, db_column='enteredbytribe', max_length=100, null=True, verbose_name='entered by tribe')),
('enteredbytitle', models.CharField(blank=True, db_column='enteredbytitle', max_length=100, null=True, verbose_name='entered by title')),
('enteredbydate', models.DateTimeField(auto_now_add=True, db_column='enteredbydate', null=True, verbose_name='entered by date')),
('modifiedbyname', models.CharField(blank=True, db_column='modifiedbyname', max_length=25, null=True, verbose_name='modified by name')),
('modifiedbytitle', models.CharField(blank=True, db_column='modifiedbytitle', max_length=100, null=True, verbose_name='modified by title')),
('modifiedbytribe', models.CharField(blank=True, db_column='modifiedbytribe', max_length=100, null=True, verbose_name='modified by tribe')),
('modifiedbydate', models.DateTimeField(auto_now=True, db_column='modifiedbydate', null=True, verbose_name='modified by date')),
('relationshipdescription', models.CharField(blank=True, db_column='relationshipdescription', max_length=255, null=True, verbose_name='excerpt/description')),
('pages', models.CharField(blank=True, db_column='pages', max_length=255, null=True)),
],
options={
'db_table': 'resourceactivitycitationevents',
'managed': True,
'verbose_name_plural': 'Activity - Sources',
},
),
migrations.CreateModel(
name='ResourceActivityMediaEvents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('enteredbyname', models.CharField(blank=True, db_column='enteredbyname', max_length=25, null=True, verbose_name='entered by name')),
('enteredbytribe', models.CharField(blank=True, db_column='enteredbytribe', max_length=100, null=True, verbose_name='entered by tribe')),
('enteredbytitle', models.CharField(blank=True, db_column='enteredbytitle', max_length=100, null=True, verbose_name='entered by title')),
('enteredbydate', models.DateTimeField(auto_now_add=True, db_column='enteredbydate', null=True, verbose_name='entered by date')),
('modifiedbyname', models.CharField(blank=True, db_column='modifiedbyname', max_length=25, null=True, verbose_name='modified by name')),
('modifiedbytitle', models.CharField(blank=True, db_column='modifiedbytitle', max_length=100, null=True, verbose_name='modified by title')),
('modifiedbytribe', models.CharField(blank=True, db_column='modifiedbytribe', max_length=100, null=True, verbose_name='modified by tribe')),
('modifiedbydate', models.DateTimeField(auto_now=True, db_column='modifiedbydate', null=True, verbose_name='modified by date')),
('relationshipdescription', models.CharField(blank=True, db_column='relationshipdescription', max_length=255, null=True, verbose_name='relationship description')),
('pages', models.CharField(blank=True, db_column='pages', max_length=50, null=True)),
],
options={
'db_table': 'resourceactivitymediaevents',
'managed': True,
'verbose_name_plural': 'Activity - Media',
},
),
migrations.CreateModel(
name='ResourceAltIndigenousName',
fields=[
('altindigenousnameid', models.AutoField(db_column='altindigenousnameid', primary_key=True, serialize=False)),
('altindigenousname', models.CharField(blank=True, db_column='altindigenousname', max_length=255, null=True, verbose_name='alt name')),
],
options={
'db_table': 'resourcealtindigenousname',
'managed': True,
'verbose_name_plural': 'Resource Alternative Names',
},
),
migrations.CreateModel(
name='ResourceResourceEvents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('enteredbyname', models.CharField(blank=True, db_column='enteredbyname', max_length=25, null=True, verbose_name='entered by name')),
('enteredbytribe', models.CharField(blank=True, db_column='enteredbytribe', max_length=100, null=True, verbose_name='entered by tribe')),
('enteredbytitle', models.CharField(blank=True, db_column='enteredbytitle', max_length=100, null=True, verbose_name='entered by title')),
('enteredbydate', models.DateTimeField(auto_now_add=True, db_column='enteredbydate', null=True, verbose_name='entered by date')),
('modifiedbyname', models.CharField(blank=True, db_column='modifiedbyname', max_length=25, null=True, verbose_name='modified by name')),
('modifiedbytitle', models.CharField(blank=True, db_column='modifiedbytitle', max_length=100, null=True, verbose_name='modified by title')),
('modifiedbytribe', models.CharField(blank=True, db_column='modifiedbytribe', max_length=100, null=True, verbose_name='modified by tribe')),
('modifiedbydate', models.DateTimeField(auto_now=True, db_column='modifiedbydate', null=True, verbose_name='modified by date')),
('relationshipdescription', models.CharField(blank=True, db_column='relationshipdescription', max_length=255, null=True, verbose_name='relationship description')),
],
options={
'db_table': 'resourceresourceevents',
'managed': True,
'verbose_name_plural': 'Resources - Resources',
},
),
migrations.CreateModel(
name='ResourcesCitationEvents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('enteredbyname', models.CharField(blank=True, db_column='enteredbyname', max_length=25, null=True, verbose_name='entered by name')),
('enteredbytribe', models.CharField(blank=True, db_column='enteredbytribe', max_length=100, null=True, verbose_name='entered by tribe')),
('enteredbytitle', models.CharField(blank=True, db_column='enteredbytitle', max_length=100, null=True, verbose_name='entered by title')),
('enteredbydate', models.DateTimeField(auto_now_add=True, db_column='enteredbydate', null=True, verbose_name='entered by date')),
('modifiedbyname', models.CharField(blank=True, db_column='modifiedbyname', max_length=25, null=True, verbose_name='modified by name')),
('modifiedbytitle', models.CharField(blank=True, db_column='modifiedbytitle', max_length=100, null=True, verbose_name='modified by title')),
('modifiedbytribe', models.CharField(blank=True, db_column='modifiedbytribe', max_length=100, null=True, verbose_name='modified by tribe')),
('modifiedbydate', models.DateTimeField(auto_now=True, db_column='modifiedbydate', null=True, verbose_name='modified by date')),
('relationshipdescription', models.CharField(blank=True, db_column='relationshipdescription', max_length=255, null=True, verbose_name='excerpt/description')),
('pages', models.CharField(blank=True, db_column='pages', max_length=255, null=True)),
],
options={
'db_table': 'resourcescitationevents',
'verbose_name_plural': 'Resources - Sources',
'verbose_name': 'Resource - Source',
'managed': True,
},
),
migrations.CreateModel(
name='ResourcesMediaEvents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('enteredbyname', models.CharField(blank=True, db_column='enteredbyname', max_length=25, null=True, verbose_name='entered by name')),
('enteredbytribe', models.CharField(blank=True, db_column='enteredbytribe', max_length=100, null=True, verbose_name='entered by tribe')),
('enteredbytitle', models.CharField(blank=True, db_column='enteredbytitle', max_length=100, null=True, verbose_name='entered by title')),
('enteredbydate', models.DateTimeField(auto_now_add=True, db_column='enteredbydate', null=True, verbose_name='entered by date')),
('modifiedbyname', models.CharField(blank=True, db_column='modifiedbyname', max_length=25, null=True, verbose_name='modified by name')),
('modifiedbytitle', models.CharField(blank=True, db_column='modifiedbytitle', max_length=100, null=True, verbose_name='modified by title')),
('modifiedbytribe', models.CharField(blank=True, db_column='modifiedbytribe', max_length=100, null=True, verbose_name='modified by tribe')),
('modifiedbydate', models.DateTimeField(auto_now=True, db_column='modifiedbydate', null=True, verbose_name='modified by date')),
('relationshipdescription', models.CharField(blank=True, db_column='relationshipdescription', max_length=255, null=True, verbose_name='relationship description')),
('pages', models.CharField(blank=True, db_column='pages', max_length=50, null=True)),
],
options={
'db_table': 'resourcesmediaevents',
'verbose_name_plural': 'Resources - Media',
'verbose_name': 'Resource - Medium',
'managed': True,
},
),
]
|
|
import sys
from pingdomlib.analysis import PingdomAnalysis
checktypes = ['http', 'httpcustom', 'tcp', 'ping', 'dns', 'udp', 'smtp',
'pop3', 'imap']
legacy_notification_parameters = ['notifyagainevery', 'notifywhenbackup',
'sendnotificationwhendown', 'sendtoandroid',
'sendtoemail', 'sendtoiphone', 'sendtosms',
'sendtotwitter']
class PingdomCheck(object):
"""Class representing a check in pingdom
Attributes:
* id -- Check identifier
* name -- Check name
* type -- Check type
* lasterrortime -- Timestamp of last error (if any). Format is UNIX
timestamp
* lasttesttime -- Timestamp of last test (if any). Format is UNIX
timestamp
* lastresponsetime -- Response time (in milliseconds) of last test
* status -- Current status of check
* resolution -- How often should the check be tested. In minutes
* hostname -- Target host
* created -- Creation time. Format is UNIX timestamp
* contactids -- Identifiers s of contact who should receive alerts
* sendtoemail -- Send alerts as email
* sendtosms -- Send alerts as SMS
* sendtotwitter -- Send alerts through Twitter
* sendtoiphone -- Send alerts to iPhone
* sendtoandroid -- Send alerts to Android
* sendnotificationwhendown -- Send notification when down this many
times
* notifyagainevery -- Notify again every n result
* notifywhenbackup -- Notify when back up again
* use_legacy_notifications -- Use the old notifications instead of BeepManager
* probe_filters -- What region should the probe check from
"""
_detail_keys = ['name', 'resolution', 'sendtoemail', 'sendtosms',
'sendtotwitter', 'sendtoiphone', 'paused', 'contactids',
'sendnotificationwhendown', 'notifyagainevery',
'notifywhenbackup', 'created', 'type', 'hostname',
'status', 'lasterrortime', 'lasttesttime',
'use_legacy_notifications', 'lastresponsetime', 'probe_filters',]
def __init__(self, instantiator, checkinfo=dict()):
self.pingdom = instantiator
self.__addDetails__(checkinfo)
def __getattr__(self, attr):
# Pull variables from pingdom if unset
if attr in self._detail_keys:
self.getDetails()
return getattr(self, attr)
else:
raise AttributeError("'PingdomCheck' object has no attribute '%s'"
% attr)
def __setattr__(self, key, value):
# Autopush changes to attributes
if key in ['paused', 'resolution', 'contactids', 'sendtoemail',
'sendtosms', 'sendtotwitter', 'sendtoiphone',
'sendnotificationwhendown', 'notifyagainevery',
'notifywhenbackup', 'created', 'hostname', 'status',
'lasterrortime', 'lasttesttime', 'url', 'encryption',
'port', 'auth', 'shouldcontain', 'shouldnotcontain',
'postdata', 'additionalurls', 'stringtosend',
'stringtoexpect', 'expectedip', 'nameserver',
'use_legacy_notifications', 'host', 'alert_policy',
'autoresolve', 'probe_filters']:
if self.pingdom.pushChanges:
self.modify(**{key: value})
else:
object.__setattr__(self, key, value)
object.__setattr__(self, key, value)
def __str__(self):
return "<PingdomCheck (%s)%s is '%s'>" % (self.id, self.name,
self.status)
def getAnalyses(self, **kwargs):
"""Returns a list of the latest root cause analysis results for a
specified check.
Optional Parameters:
* limit -- Limits the number of returned results to the
specified quantity.
Type: Integer
Default: 100
* offset -- Offset for listing. (Requires limit.)
Type: Integer
Default: 0
* time_from -- Return only results with timestamp of first test greater
or equal to this value. Format is UNIX timestamp.
Type: Integer
Default: 0
* time_to -- Return only results with timestamp of first test less or
equal to this value. Format is UNIX timestamp.
Type: Integer
Default: Current Time
Returned structure:
[
{
'id' : <Integer> Analysis id
'timefirsttest' : <Integer> Time of test that initiated the
confirmation test
'timeconfrimtest' : <Integer> Time of the confirmation test
that perfromed the error
analysis
},
...
]
"""
# 'from' is a reserved word, use time_from instead
if kwargs.get('time_from'):
kwargs['from'] = kwargs.get('time_from')
del kwargs['time_from']
if kwargs.get('time_to'):
kwargs['to'] = kwargs.get('time_to')
del kwargs['time_to']
# Warn user about unhandled kwargs
for key in kwargs:
if key not in ['limit', 'offset', 'from', 'to']:
sys.stderr.write('%s not a valid argument for analysis()\n'
% key)
response = self.pingdom.request('GET', 'analysis/%s' % self.id,
kwargs)
return [PingdomAnalysis(self, x) for x in response.json()['analysis']]
def __addDetails__(self, checkinfo):
"""Fills attributes from a dictionary, uses special handling for the
'type' key"""
# Auto-load instance attributes from passed in dictionary
for key in checkinfo:
if key == 'type':
if checkinfo[key] in checktypes:
self.type = checkinfo[key]
else:
# Take key from type dict, convert to string for type
self.type = checkinfo[key].iterkeys().next()
# Take value from type dict, store to member of new attrib
object.__setattr__(self, self.type,
checkinfo[key].itervalues().next())
else:
# Store other key value pairs as attributes
object.__setattr__(self, key, checkinfo[key])
# back-fill missing keys (if any)
missing_keys = list(set(self._detail_keys) - set(checkinfo.keys()))
for key in missing_keys:
object.__setattr__(self, key, None)
if 'status' in checkinfo and checkinfo['status'] == 'paused':
object.__setattr__(self, 'paused', True)
else:
object.__setattr__(self, 'paused', False)
def getDetails(self):
"""Update check details, returns dictionary of details"""
response = self.pingdom.request('GET', 'checks/%s' % self.id)
self.__addDetails__(response.json()['check'])
return response.json()['check']
def modify(self, **kwargs):
"""Modify settings for a check. The provided settings will overwrite
previous values. Settings not provided will stay the same as before
the update. To clear an existing value, provide an empty value.
Please note that you cannot change the type of a check once it has
been created.
General parameters:
* name -- Check name
Type: String
* host - Target host
Type: String
* paused -- Check should be paused
Type: Boolean
* resolution -- Check resolution time (in minutes)
Type: Integer [1, 5, 15, 30, 60]
* contactids -- Comma separated list of contact IDs
Type: String
* sendtoemail -- Send alerts as email
Type: Boolean
* sendtosms -- Send alerts as SMS
Type: Boolean
* sendtotwitter -- Send alerts through Twitter
Type: Boolean
* sendtoiphone -- Send alerts to iPhone
Type: Boolean
* sendtoandroid -- Send alerts to Android
Type: Boolean
* sendnotificationwhendown -- Send notification when check is down
the given number of times
Type: Integer
* notifyagainevery -- Set how many results to wait for in between
notices
Type: Integer
* notifywhenbackup -- Notify when back up again
Type: Boolean
* use_legacy_notifications -- Use old notifications instead of BeepManager
Type: Boolean
* probe_filters -- Can be one of region: NA, region: EU, region: APAC
Type: String
HTTP check options:
* url -- Target path on server
Type: String
* encryption -- Use SSL/TLS
Type: Boolean
* port -- Target server port
Type: Integer
* auth -- Username and password for HTTP authentication
Example: user:password
Type: String
* shouldcontain -- Target site should contain this string.
Cannot be combined with 'shouldnotcontain'
Type: String
* shouldnotcontain -- Target site should not contain this string.
Cannot be combined with 'shouldcontain'
Type: String
* postdata -- Data that should be posted to the web page,
for example submission data for a sign-up or login form.
The data needs to be formatted in the same way as a web browser
would send it to the web server
Type: String
* requestheader<NAME> -- Custom HTTP header, replace <NAME> with
desired header name. Header in form: Header:Value
Type: String
HTTPCustom check options:
* url -- Target path on server
Type: String
* encryption -- Use SSL/TLS
Type: Boolean
* port -- Target server port
Type: Integer
* auth -- Username and password for HTTP authentication
Example: user:password
Type: String
* additionalurls -- Colon-separated list of additonal URLS with
hostname included
Type: String
TCP check options:
* port -- Target server port
Type: Integer
* stringtosend -- String to send
Type: String
* stringtoexpect -- String to expect in response
Type: String
DNS check options:
* expectedip -- Expected IP
Type: String
* nameserver -- Nameserver to check
Type: String
UDP check options:
* port -- Target server port
Type: Integer
* stringtosend -- String to send
Type: String
* stringtoexpect -- String to expect in response
Type: String
SMTP check options:
* port -- Target server port
Type: Integer
* auth -- Username and password for target SMTP authentication.
Example: user:password
Type: String
* stringtoexpect -- String to expect in response
Type: String
* encryption -- Use connection encryption
Type: Boolean
POP3 check options:
* port -- Target server port
Type: Integer
* stringtoexpect -- String to expect in response
Type: String
* encryption -- Use connection encryption
Type: Boolean
IMAP check options:
* port -- Target server port
Type: Integer
* stringtoexpect -- String to expect in response
Type: String
* encryption -- Use connection encryption
Type: Boolean
"""
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['paused', 'resolution', 'contactids', 'sendtoemail',
'sendtosms', 'sendtotwitter', 'sendtoiphone',
'sendnotificationwhendown', 'notifyagainevery',
'notifywhenbackup', 'created', 'type', 'hostname',
'status', 'lasterrortime', 'lasttesttime', 'url',
'encryption', 'port', 'auth', 'shouldcontain',
'shouldnotcontain', 'postdata', 'additionalurls',
'stringtosend', 'stringtoexpect', 'expectedip',
'nameserver', 'use_legacy_notifications', 'host',
'alert_policy', 'autoresolve', 'probe_filters']:
sys.stderr.write("'%s'" % key + ' is not a valid argument of' +
'<PingdomCheck>.modify()\n')
# If one of the legacy parameters is used, it is required to set the legacy flag.
# https://github.com/KennethWilke/PingdomLib/issues/12
if any([k for k in kwargs if k in legacy_notification_parameters]):
if "use_legacy_notifications" in kwargs and kwargs["use_legacy_notifications"] != True:
raise Exception("Cannot set legacy parameter when use_legacy_notifications is not True")
kwargs["use_legacy_notifications"] = True
response = self.pingdom.request("PUT", 'checks/%s' % self.id, kwargs)
return response.json()['message']
def delete(self):
"""Deletes the check from pingdom, CANNOT BE REVERSED!
Returns status message of operation"""
response = self.pingdom.request("DELETE", "checks/%s" % self.id)
return response.json()['message']
def averages(self, **kwargs):
"""Get the average time / uptime value for a specified check and time
period.
Optional parameters:
* time_from -- Start time of period. Format is UNIX timestamp
Type: Integer
Default: 0
* time_to -- End time of period. Format is UNIX timestamp
Type: Integer
Default: Current time
* probes -- Filter to only use results from a list of probes.
Format is a comma separated list of probe identifiers
Type: String
Default: All probes
* includeuptime -- Include uptime information
Type: Boolean
Default: False
* bycountry -- Split response times into country groups
Type: Boolean
Default: False
* byprobe -- Split response times into probe groups
Type: Boolean
Default: False
Returned structure:
{
'responsetime' :
{
'to' : <Integer> Start time of period
'from' : <Integer> End time of period
'avgresponse' : <Integer> Total average response time in
milliseconds
},
< More can be included with optional parameters >
}
"""
# 'from' is a reserved word, use time_from instead
if kwargs.get('time_from'):
kwargs['from'] = kwargs.get('time_from')
del kwargs['time_from']
if kwargs.get('time_to'):
kwargs['to'] = kwargs.get('time_to')
del kwargs['time_to']
# Warn user about unhandled parameters
for key in kwargs:
if key not in ['from', 'to', 'probes', 'includeuptime',
'bycountry', 'byprobe']:
sys.stderr.write("'%s'" % key + ' is not a valid argument of' +
'<PingdomCheck.averages()\n')
response = self.pingdom.request('GET', 'summary.average/%s' % self.id,
kwargs)
return response.json()['summary']
def hoursofday(self, **kwargs):
"""Returns the average response time for each hour of the day (0-23)
for a specific check over a selected time period. I.e. it shows you
what an average day looks like during that time period.
Optional parameters:
* time_from -- Start time of period. Format is UNIX timestamp
Type: Integer
Default: One week earlier than 'to'
* time_to -- End time of period. Format is UNIX timestamp
Type: Integer
Default: Current time
* probes -- Filter to only use results from a list of probes.
Format is a comma separated list of probe identifiers
Type: String
Default: All probes
* uselocaltime -- If true, use the user's local time zone for
results (from and to parameters should still be specified in
UTC). If false, use UTC for results.
Type: Boolean
Default: False
Returned structure:
[
{
'hour' : <Integer> Hour of day (0-23). Please note that
if data is missing for an individual hour, it's
entry will not be included in the result.
'avgresponse': <Integer> Average response time(in milliseconds)
for this hour of the day
},
...
]
"""
# 'from' is a reserved word, use time_from instead
if kwargs.get('time_from'):
kwargs['from'] = kwargs.get('time_from')
del kwargs['time_from']
if kwargs.get('time_to'):
kwargs['to'] = kwargs.get('time_to')
del kwargs['time_to']
# Warn user about unhanled parameters
for key in kwargs:
if key not in ['from', 'to', 'probes', 'uselocaltime']:
sys.stderr.write("'%s'" % key + ' is not a valid argument of' +
'<PingdomCheck.hoursofday()\n')
response = self.pingdom.request('GET', 'summary.hoursofday/%s' %
self.id, kwargs)
return response.json()['hoursofday']
def outages(self, **kwargs):
"""Get a list of status changes for a specified check and time period.
If order is speficied to descending, the list is ordered by newest
first. (Default is ordered by oldest first.)
Optional Parameters:
* time_from -- Start time of period. Format is UNIX timestamp
Type: Integer
Default: One week earlier than 'to'
* time_to -- End time of period. Format is UNIX timestamp
Type: Integer
Default: Current time
* order -- Sorting order of outages. Ascending or descending
Type: String ['asc', 'desc']
Default: asc
Returned structure:
[
{
'status' : <String> Interval status
'timefrom' : <Integer> Interval start. Format is UNIX timestamp
'timeto' : <Integer> Interval end. Format is UNIX timestamp
},
...
]
"""
# 'from' is a reserved word, use time_from instead
if kwargs.get('time_from'):
kwargs['from'] = kwargs.get('time_from')
del kwargs['time_from']
if kwargs.get('time_to'):
kwargs['to'] = kwargs.get('time_to')
del kwargs['time_to']
# Warn user about unhanled parameters
for key in kwargs:
if key not in ['from', 'to', 'order']:
sys.stderr.write("'%s'" % key + ' is not a valid argument of' +
'<PingdomCheck.outages()\n')
response = self.pingdom.request('GET', 'summary.outage/%s' % self.id,
kwargs)
return response.json()['summary']['states']
def performance(self, **kwargs):
"""For a given interval in time, return a list of sub intervals with
the given resolution. Useful for generating graphs. A sub interval
may be a week, a day or an hour depending on the choosen resolution
Optional Parameters:
* time_from -- Start time of period. Format is UNIX timestamp
Type: Integer
Default: 10 intervals earlier than 'to'
* time_to -- End time of period. Format is UNIX timestamp
Type: Integer
Default: Current time
* resolution -- Inteval size
Type: String ['hour', 'day', 'week']
Default: hour
* includeuptime -- Include uptime information
Type: Boolean
Default: False
* probes -- Filter to only use results from a list of probes.
Format is a comma separated list of probe identifiers. Can not
be used if includeuptime is set to true. Also note that this
can cause intervals to be omitted, since there may be no
results from the desired probes in them.
Type: String
Default: All probes
* order -- Sorting order of sub intervals. Ascending or descending.
Type: String ['asc', 'desc']
Default: asc
Returned structure:
{
<RESOLUTION> :
[
{
'starttime' : <Integer> Hour interval start. Format UNIX
timestamp
'avgresponse' : <Integer> Average response time for this
interval in milliseconds
'uptime' : <Integer> Total uptime for this interval in
seconds
'downtime' : <Integer> Total downtime for this interval
in seconds
'unmonitored' : <Integer> Total unmonitored time for this
interval in seconds
},
...
]
}
"""
# 'from' is a reserved word, use time_from instead
if kwargs.get('time_from'):
kwargs['from'] = kwargs.get('time_from')
del kwargs['time_from']
if kwargs.get('time_to'):
kwargs['to'] = kwargs.get('time_to')
del kwargs['time_to']
# Warn user about unhanled parameters
for key in kwargs:
if key not in ['from', 'to', 'resolution', 'includeuptime',
'probes', 'order']:
sys.stderr.write("'%s'" % key + ' is not a valid argument of' +
'<PingdomCheck.performance()\n')
response = self.pingdom.request('GET', 'summary.performance/%s' %
self.id, kwargs)
return response.json()['summary']
def probes(self, fromtime, totime=None):
"""Get a list of probes that performed tests for a specified check
during a specified period."""
args = {'from': fromtime}
if totime:
args['to'] = totime
response = self.pingdom.request('GET', 'summary.probes/%s' % self.id,
args)
return response.json()['probes']
def results(self, **kwargs):
"""Return a list of raw test results for a specified check
Optional Parameters:
* time_from -- Start time of period. Format is UNIX timestamp
Type: Integer
Default: 1 day prior to 'to'
* time_to -- End time of period. Format is UNIX timestamp
Type: Integer
Default: Current time
* probes -- Filter to only show results from a list of probes.
Format is a comma separated list of probe identifiers
Type: String
Default: All probes
* status -- Filter to only show results with specified statuses.
Format is a comma separated list of (down, up, unconfirmed,
unknown)
Type: String
Default: All statuses
* limit -- Number of results to show
Type: Integer (max 1000)
Default: 1000
* offset -- Number of results to skip
Type: Integer (max 43200)
Default: 0
* includeanalysis -- Attach available root cause analysis
identifiers to corresponding results
Type: Boolean
Default: False
* maxresponse -- Maximum response time (ms). If set, specified
interval must not be larger than 31 days.
Type: Integer
Default: None
* minresponse -- Minimum response time (ms). If set, specified
interval must not be larger than 31 days.
Type: Integer
Default: None
Returned structure:
{
'results' :
[
{
'probeid' : <Integer> Probe identifier
'time' : <Integer> Time when test was performed.
Format is UNIX timestamp
'status' : <String> Result status ['up', 'down',
'unconfirmed_down', 'unknown']
'responsetime' : <Integer> Response time in milliseconds
Will be 0 if no response was received
'statusdesc' : <String> Short status description
'statusdesclong' : <String> Long status description
'analysisid' : <Integer> Analysis identifier
},
...
],
'activeprobes' : <Integer List> Probe identifiers in result set
}
"""
# 'from' is a reserved word, use time_from instead
if kwargs.get('time_from'):
kwargs['from'] = kwargs.get('time_from')
del kwargs['time_from']
if kwargs.get('time_to'):
kwargs['to'] = kwargs.get('time_to')
del kwargs['time_to']
# Warn user about unhanled parameters
for key in kwargs:
if key not in ['from', 'to', 'probes', 'status', 'limit', 'offset',
'includeanalysis', 'maxresponse', 'minresponse']:
sys.stderr.write("'%s'" % key + ' is not a valid argument of' +
'<PingdomCheck.results()\n')
response = self.pingdom.request('GET', 'results/%s' % self.id, kwargs)
return response.json()
def publishPublicReport(self):
"""Activate public report for this check.
Returns status message"""
response = self.pingdom.request('PUT', 'reports.public/%s' % self.id)
return response.json()['message']
def removePublicReport(self):
"""Deactivate public report for this check.
Returns status message"""
response = self.pingdom.request('DELETE',
'reports.public/%s' % self.id)
return response.json()['message']
|
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import threading
import logging
from concurrent import futures
import grpc
from google.protobuf import empty_pb2, timestamp_pb2
from fedlearner.common import common_pb2 as common_pb
from fedlearner.common import data_join_service_pb2 as dj_pb
from fedlearner.common import data_join_service_pb2_grpc as dj_grpc
from fedlearner.common.db_client import DBClient
from fedlearner.proxy.channel import make_insecure_channel, ChannelType
from fedlearner.data_join.routine_worker import RoutineWorker
from fedlearner.data_join.raw_data_manifest_manager import (
RawDataManifestManager
)
from fedlearner.data_join.common import (retrieve_data_source,
commit_data_source)
class MasterFSM(object):
INVALID_PEER_FSM_STATE = {}
INVALID_PEER_FSM_STATE[common_pb.DataSourceState.Init] = set(
[common_pb.DataSourceState.Failed,
common_pb.DataSourceState.Ready,
common_pb.DataSourceState.Finished]
)
INVALID_PEER_FSM_STATE[common_pb.DataSourceState.Processing] = set(
[common_pb.DataSourceState.Failed,
common_pb.DataSourceState.Finished]
)
INVALID_PEER_FSM_STATE[common_pb.DataSourceState.Ready] = set(
[common_pb.DataSourceState.Failed,
common_pb.DataSourceState.Init]
)
INVALID_PEER_FSM_STATE[common_pb.DataSourceState.Finished] = set(
[common_pb.DataSourceState.Failed,
common_pb.DataSourceState.Init,
common_pb.DataSourceState.Processing]
)
def __init__(self, peer_client, data_source_name, kvstore, batch_mode):
self._lock = threading.Lock()
self._peer_client = peer_client
self._data_source_name = data_source_name
self._kvstore = kvstore
self._batch_mode = batch_mode
self._init_fsm_action()
self._data_source = None
self._sync_data_source()
self._reset_batch_mode()
self._raw_data_manifest_manager = RawDataManifestManager(
kvstore, self._data_source, batch_mode
)
self._data_source_meta = self._data_source.data_source_meta
if self._data_source.role == common_pb.FLRole.Leader:
self._role_repr = "leader"
else:
self._role_repr = "follower"
self._fsm_worker = None
self._started = False
def get_mainifest_manager(self):
return self._raw_data_manifest_manager
def get_data_source(self):
with self._lock:
return self._sync_data_source()
def set_failed(self):
return self.set_state(common_pb.DataSourceState.Failed, None)
def set_state(self, new_state, origin_state=None):
with self._lock:
try:
data_source = self._sync_data_source()
if data_source.state == new_state:
return True
if origin_state is None or data_source.state == origin_state:
data_source.state = new_state
self._update_data_source(data_source)
return True
new_data_source = self._sync_data_source()
logging.warning("DataSource: %s failed to set to state: "
"%d, origin state mismatch(%d != %d)",
self._data_source_name, new_state,
origin_state, new_data_source.state)
return False
except Exception as e: # pylint: disable=broad-except
logging.warning("Faile to set state to %d with exception %s",
new_state, e)
return False
return True
def start_fsm_worker(self):
with self._lock:
if not self._started:
assert self._fsm_worker is None, \
"fsm_woker must be None if FSM is not started"
self._started = True
self._fsm_worker = RoutineWorker(
'{}_fsm_worker'.format(self._data_source_name),
self._fsm_routine_fn,
self._fsm_routine_cond, 5
)
self._fsm_worker.start_routine()
def stop_fsm_worker(self):
tmp_worker = None
with self._lock:
if self._fsm_worker is not None:
tmp_worker = self._fsm_worker
self._fsm_worker = None
if tmp_worker is not None:
tmp_worker.stop_routine()
def _fsm_routine_fn(self):
peer_info = self._get_peer_data_source_status()
with self._lock:
data_source = self._sync_data_source()
state = data_source.state
if self._fallback_failed_state(peer_info):
logging.warning("%s at state %d, Peer at state %d "\
"state invalid! abort data source %s",
self._role_repr, state,
peer_info.state, self._data_source_name)
elif state not in self._fsm_driven_handle:
logging.error("%s at error state %d for data_source %s",
self._role_repr, state, self._data_source_name)
else:
state_changed = self._fsm_driven_handle[state](peer_info)
if state_changed:
new_state = self._sync_data_source().state
logging.warning("%s state changed from %d to %d",
self._role_repr, state, new_state)
state = new_state
if state in (common_pb.DataSourceState.Init,
common_pb.DataSourceState.Processing) and \
not self._batch_mode:
self._raw_data_manifest_manager.sub_new_raw_data()
def _fsm_routine_cond(self):
return True
def _sync_data_source(self):
if self._data_source is None:
self._data_source = \
retrieve_data_source(self._kvstore, self._data_source_name)
assert self._data_source is not None, \
"data source {} is not in kvstore".format(self._data_source_name)
return self._data_source
def _reset_batch_mode(self):
if self._batch_mode:
data_source = self._sync_data_source()
if data_source.state == common_pb.DataSourceState.UnKnown:
raise RuntimeError("Failed to reset batch mode since "\
"DataSource {} at UnKnown state"\
.format(self._data_source_name))
if data_source.state in (common_pb.DataSourceState.Init,
common_pb.DataSourceState.Processing):
logging.info("DataSouce %s at Init/Processing State. Don't "\
"need reset", self._data_source_name)
elif data_source.state == common_pb.DataSourceState.Ready:
logging.info("DataSouce %s at Ready. need reset to Processing "\
"state", self._data_source_name)
data_source.state = common_pb.DataSourceState.Processing
self._update_data_source(data_source)
else:
raise RuntimeError("Failed to reset batch mode since Data"\
"Source {} at Finished/Failed state, Peer"\
"may delete it"\
.format(self._data_source_name))
def _init_fsm_action(self):
self._fsm_driven_handle = {
common_pb.DataSourceState.UnKnown:
self._get_fsm_action('unknown'),
common_pb.DataSourceState.Init:
self._get_fsm_action('init'),
common_pb.DataSourceState.Processing:
self._get_fsm_action('processing'),
common_pb.DataSourceState.Ready:
self._get_fsm_action('ready'),
common_pb.DataSourceState.Finished:
self._get_fsm_action('finished'),
common_pb.DataSourceState.Failed:
self._get_fsm_action('failed')
}
def _get_fsm_action(self, action):
def _not_implement(useless):
raise NotImplementedError('state is not NotImplemented')
name = '_fsm_{}_action'.format(action)
return getattr(self, name, _not_implement)
def _fsm_init_action(self, peer_info):
state_changed = False
if self._data_source.role == common_pb.FLRole.Leader:
if peer_info.state == common_pb.DataSourceState.Init:
state_changed = True
elif peer_info.state == common_pb.DataSourceState.Processing:
state_changed = True
if state_changed:
self._data_source.state = common_pb.DataSourceState.Processing
self._update_data_source(self._data_source)
return True
return False
def _fsm_processing_action(self, peer_info):
if self._all_partition_finished():
state_changed = False
if self._data_source.role == common_pb.FLRole.Leader:
if peer_info.state == common_pb.DataSourceState.Processing:
state_changed = True
elif peer_info.state == common_pb.DataSourceState.Ready:
state_changed = True
if state_changed:
self._data_source.state = common_pb.DataSourceState.Ready
self._update_data_source(self._data_source)
return True
return False
def _fsm_ready_action(self, peer_info):
if self._batch_mode:
logging.info("stop fsm from Ready to Finish since "\
"the data join master run in batch mode")
return False
state_changed = False
if self._data_source.role == common_pb.FLRole.Leader:
if peer_info.state == common_pb.DataSourceState.Ready:
state_changed = True
elif peer_info.state == common_pb.DataSourceState.Finished:
state_changed = True
if state_changed:
self._data_source.state = common_pb.DataSourceState.Finished
self._update_data_source(self._data_source)
return True
return False
def _fsm_finished_action(self, peer_info):
return False
def _fsm_failed_action(self, peer_info):
if peer_info.state != common_pb.DataSourceState.Failed:
request = dj_pb.DataSourceRequest(
data_source_meta=self._data_source_meta
)
self._peer_client.AbortDataSource(request)
return False
def _fallback_failed_state(self, peer_info):
state = self._data_source.state
if (state in self.INVALID_PEER_FSM_STATE and
peer_info.state in self.INVALID_PEER_FSM_STATE[state]):
self._data_source.state = common_pb.DataSourceState.Failed
self._update_data_source(self._data_source)
return True
return False
def _update_data_source(self, data_source):
self._data_source = None
try:
commit_data_source(self._kvstore, data_source)
except Exception as e:
logging.error("Failed to update data source: %s since "\
"exception: %s", self._data_source_name, e)
raise
self._data_source = data_source
logging.debug("Success update to update data source: %s.",
self._data_source_name)
def _get_peer_data_source_status(self):
request = dj_pb.DataSourceRequest(
data_source_meta=self._data_source_meta
)
return self._peer_client.GetDataSourceStatus(request)
def _all_partition_finished(self):
all_manifest = self._raw_data_manifest_manager.list_all_manifest()
assert len(all_manifest) == \
self._data_source.data_source_meta.partition_num, \
"manifest number should same with partition number"
for manifest in all_manifest.values():
if manifest.sync_example_id_rep.state != \
dj_pb.SyncExampleIdState.Synced or \
manifest.join_example_rep.state != \
dj_pb.JoinExampleState.Joined:
return False
return True
class DataJoinMaster(dj_grpc.DataJoinMasterServiceServicer):
def __init__(self, peer_client, data_source_name,
kvstore_type, options):
super(DataJoinMaster, self).__init__()
self._data_source_name = data_source_name
kvstore = DBClient(kvstore_type, options.use_mock_etcd)
self._options = options
self._fsm = MasterFSM(peer_client, data_source_name,
kvstore, self._options.batch_mode)
self._data_source_meta = \
self._fsm.get_data_source().data_source_meta
def GetDataSource(self, request, context):
return self._fsm.get_data_source()
def GetDataSourceStatus(self, request, context):
self._check_data_source_meta(request.data_source_meta, True)
data_source = self._fsm.get_data_source()
response = dj_pb.DataSourceStatus(
role=data_source.role,
state=data_source.state
)
return response
def AbortDataSource(self, request, context):
response = self._check_data_source_meta(request.data_source_meta)
if response.code == 0 and not self._fsm.set_failed():
response.code = -2
response.error_message = "failed to set failed state to fsm"
return response
def RequestJoinPartition(self, request, context):
response = dj_pb.RawDataResponse()
meta_status = self._check_data_source_meta(request.data_source_meta)
if meta_status.code != 0:
response.status.MergeFrom(meta_status)
return response
rank_status = self._check_rank_id(request.rank_id)
if rank_status.code != 0:
response.status.MergeFrom(rank_status)
return response
data_source = self._fsm.get_data_source()
if data_source.state != common_pb.DataSourceState.Processing:
response.status.code = -3
response.status.error_message = \
"data source is not at processing state"
else:
manifest_manager = self._fsm.get_mainifest_manager()
rank_id = request.rank_id
manifest = None
partition_id = None if request.partition_id < 0 \
else request.partition_id
if request.HasField('sync_example_id'):
manifest = manifest_manager.alloc_sync_exampld_id(
rank_id, partition_id
)
elif request.HasField('join_example'):
manifest = manifest_manager.alloc_join_example(
rank_id, partition_id
)
else:
response.status.code = -4
response.status.error_message = "request not support"
if response.status.code == 0:
if manifest is not None:
response.manifest.MergeFrom(manifest)
else:
assert partition_id is None, \
"only the request without appoint partition "\
"support response no manifest"
response.finished.MergeFrom(empty_pb2.Empty())
return response
def FinishJoinPartition(self, request, context):
response = self._check_data_source_meta(request.data_source_meta)
if response.code != 0:
return response
response = self._check_rank_id(request.rank_id)
if response.code != 0:
return response
data_source = self._fsm.get_data_source()
if data_source.state != common_pb.DataSourceState.Processing:
response.code = -2
response.error_message = "data source is not at processing state"
else:
rank_id = request.rank_id
partition_id = request.partition_id
manifest_manager = self._fsm.get_mainifest_manager()
if request.HasField('sync_example_id'):
manifest_manager.finish_sync_example_id(rank_id, partition_id)
elif request.HasField('join_example'):
manifest_manager.finish_join_example(rank_id, partition_id)
else:
response.code = -3
response.error_message = "request not support"
return response
def QueryRawDataManifest(self, request, context):
self._check_data_source_meta(request.data_source_meta, True)
manifest_manager = self._fsm.get_mainifest_manager()
manifest = manifest_manager.get_manifest(request.partition_id)
return manifest
def FinishRawData(self, request, context):
response = self._check_data_source_meta(request.data_source_meta)
if response.code == 0:
if self._options.batch_mode:
response.code = -2
response.error_message = "Forbid to finish raw data since "\
"master run in batch mode"
elif request.HasField('finish_raw_data'):
manifest_manager = self._fsm.get_mainifest_manager()
manifest_manager.finish_raw_data(request.partition_id)
else:
response.code = -3
response.error_message = \
"FinishRawData should has finish_raw_data"
return response
def AddRawData(self, request, context):
response = self._check_data_source_meta(request.data_source_meta)
if response.code == 0:
sub_dir = self._fsm.get_data_source().raw_data_sub_dir
if self._options.batch_mode:
response.code = -2
response.error_message = "Forbid to add raw data since "\
"master run in batch mode"
elif request.HasField('added_raw_data_metas'):
manifest_manager = self._fsm.get_mainifest_manager()
manifest_manager.add_raw_data(
request.partition_id,
request.added_raw_data_metas.raw_data_metas,
request.added_raw_data_metas.dedup
)
else:
response.code = -3
response.error_message = \
"AddRawData should has field next_process_index"
return response
def ForwardPeerDumpedIndex(self, request, context):
response = self._check_data_source_meta(request.data_source_meta)
if response.code == 0:
manifest_manager = self._fsm.get_mainifest_manager()
if request.HasField('peer_dumped_index'):
manifest_manager.forward_peer_dumped_index(
request.partition_id,
request.peer_dumped_index.peer_dumped_index
)
else:
response.code = -2
response.error_message = "ForwardPeerDumpedIndex should "\
"has field peer_dumped_index"
return response
def GetRawDataLatestTimeStamp(self, request, context):
response = dj_pb.RawDataResponse(
timestamp=timestamp_pb2.Timestamp(seconds=0)
)
meta_status = self._check_data_source_meta(request.data_source_meta)
if meta_status.code != 0:
response.status.MergeFrom(meta_status)
return response
manifest_manager = self._fsm.get_mainifest_manager()
ts = manifest_manager.get_raw_date_latest_timestamp(
request.partition_id
)
if ts is not None:
response.timestamp.MergeFrom(ts)
return response
def _check_data_source_meta(self, remote_meta, raise_exp=False):
if self._data_source_meta != remote_meta:
local_meta = self._data_source_meta
if local_meta.name != remote_meta.name:
logging.error("data_source_meta mismtach since name "\
"%s != %s", local_meta.name, remote_meta.name)
if local_meta.partition_num != remote_meta.partition_num:
logging.error("data_source_meta mismatch since partition "\
"num %d != %d", local_meta.partition_num,
remote_meta.partition_num)
if local_meta.start_time != remote_meta.start_time:
logging.error("data_source_meta mismatch since start_time "\
"%d != %d",
local_meta.start_time, remote_meta.start_time)
if local_meta.end_time != remote_meta.end_time:
logging.error("data_source_meta mismatch since end_time "\
"%d != %d",
local_meta.end_time, remote_meta.end_time)
if local_meta.negative_sampling_rate != \
remote_meta.negative_sampling_rate:
logging.error("data_source_meta mismatch since negative_"\
"sampling_rate %f != %f",
local_meta.negative_sampling_rate,
remote_meta.negative_sampling_rate)
if raise_exp:
raise RuntimeError("data source meta mismatch")
return common_pb.Status(
code=-1, error_message="data source meta mismatch"
)
return common_pb.Status(code=0)
def _check_rank_id(self, rank_id):
if rank_id < 0:
return common_pb.Status(
code=-2, error_message="invalid rank id"
)
return common_pb.Status(code=0)
def start_fsm(self):
self._fsm.start_fsm_worker()
def stop_fsm(self):
self._fsm.stop_fsm_worker()
class DataJoinMasterService(object):
def __init__(self, listen_port, peer_addr, data_source_name,
kvstore_type, options):
channel = make_insecure_channel(
peer_addr, ChannelType.REMOTE,
options=[('grpc.max_send_message_length', 2**31-1),
('grpc.max_receive_message_length', 2**31-1)]
)
peer_client = dj_grpc.DataJoinMasterServiceStub(channel)
self._data_source_name = data_source_name
self._listen_port = listen_port
self._server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10),
options=[('grpc.max_send_message_length', 2**31-1),
('grpc.max_receive_message_length', 2**31-1)])
self._data_join_master = DataJoinMaster(
peer_client, data_source_name, kvstore_type, options
)
dj_grpc.add_DataJoinMasterServiceServicer_to_server(
self._data_join_master, self._server
)
self._server.add_insecure_port('[::]:%d'%listen_port)
self._server_started = False
def start(self):
if not self._server_started:
self._server.start()
self._data_join_master.start_fsm()
self._server_started = True
logging.info("DataJoinMasterService for data_source: " \
"%s start on port[%d]", self._data_source_name,
self._listen_port)
def stop(self):
if self._server_started:
self._data_join_master.stop_fsm()
self._server.stop(None)
self._server_started = False
logging.info("DataJoinMasterService for data_source: %s "\
"stopped ", self._data_source_name)
def run(self):
self.start()
self._server.wait_for_termination()
self.stop()
|
|
"""
plugins/Qoop.py
Author: Trey Stout
Date Added: Mon Mar 27 17:50:22 CST 2006
Bastard interface to zoto. Takes requests from a php page, and calls qoop.hit()
then returns some "xml" to qoop.
"""
## STD LIBS
from md5 import md5
from datetime import date, datetime
from xml.dom import minidom
from pprint import pprint, pformat
from math import floor
import time
## OUR LIBS
from AZTKAPI import AZTKAPI
from decorators import stack
import errors, aztk_config, validation
## 3RD PARTY LIBS
from twisted.internet.defer import Deferred, DeferredList
class Qoop(AZTKAPI):
enable_broker = False
enable_node = True
enable_web = True
enable_zapi = False
def start(self):
self.allowed_methods = [
'get_password_token',
'get_photos',
'get_albums',
'get_album_photos',
'get_user_info',
]
self.secret = self._cfg_qoop_secret
self.password = self._cfg_qoop_password
self.image_url = "http://%%s.%s/img/%%s/%%s" % aztk_config.setup.get('site', 'domain')
@stack
def invalid_token(self, failure):
return self.generate_error("Invalid or Expired Token [%s]" % failure.getErrorMessage())
@stack
def hit(self, method, args, vendor_secret, signature):
# make sure we recognize the called method
if method not in self.allowed_methods:
return self.generate_error("Method not recognized")
if isinstance(args, (list, tuple)):
args = dict(args)
# verify the arguments for this method
user_token = args.get('user_token', None)
backdoor_username = args.get('backdoor_username', None)
password_token = args.get('password_token', None)
album_id = args.get('album_id', None)
try:
per_page = int(args.get('per_page', 0))
except ValueError, ex:
return self.generate_error('"per_page" must be an integer, not %s [%s]' % (args['per_page'], type(args['page_number'])))
try:
page_number = int(args.get('page_number', 0))
except ValueError, ex:
return self.generate_error('"page_number" must be an integer, not %s [%s]' % (args['page_number'], type(args['page_number'])))
self.log.debug("method: %s" % method)
self.log.debug("backdoor_username: %s" % backdoor_username)
self.log.debug("album_id: %s" % album_id)
self.log.debug("per_page: %s" % per_page)
self.log.debug("page_number: %s" % page_number)
self.log.debug("user_token: %s" % user_token)
self.log.debug("password_token: %s" % password_token)
self.log.debug("vendor_secret: %s" % vendor_secret)
self.log.debug("vendor_password: %s" % self.password)
@stack
def check_access_level(auth_username):
d_fal = self.app.api.friends.get_access_level(backdoor_username, auth_username)
d_fal.addCallback(lambda fal: (auth_username, fal))
return d_fal
@stack
def run_call(user_thing, per_page, page_number):
self.log.debug("running call to method: %s" % method)
if isinstance(user_thing, tuple):
user, fal = user_thing
else:
user = user_thing
fal = 'private'
if method == "get_password_token":
self.log.debug("fetching a password token for %s" % user_token)
if md5("%s%s%s%s" % (method, user_token, self.secret, self.password)).hexdigest() != signature:
return self.generate_error("Signatures do not match for this method & user [%s]" % method)
return self.get_password_token(user_token)
elif method == "get_photos":
if per_page or page_number:
if backdoor_username:
if md5("%s%s%s%s%s%s" % (method, per_page, page_number, password_token, backdoor_username, self.password)).hexdigest() != signature:
return self.generate_error("Signatures do not match for this method & user")
else:
if md5("%s%s%s%s%s" % (method, per_page, page_number, password_token, self.password)).hexdigest() != signature:
return self.generate_error("Signatures do not match for this method & user")
else:
# set some default pagination to avoid signature calculation
self.log.debug("setting default pagination to page 1, 200 per page")
per_page = 200
page_number = 1
self.log.debug("done setting default pagination")
if backdoor_username:
if md5("%s%s%s%s" % (method, password_token, backdoor_username, self.password)).hexdigest() != signature:
return self.generate_error("Signatures do not match for this method & user")
else:
if md5("%s%s%s" % (method, password_token, self.password)).hexdigest() != signature:
return self.generate_error("Signatures do not match for this method & user")
return self.get_photos(user, password_token, per_page, page_number, backdoor_username, fal)
elif method == "get_albums":
if backdoor_username:
if md5("%s%s%s%s" % (method, password_token, backdoor_username, self.password)).hexdigest() != signature:
return self.generate_error("Signatures do not match for this method & user")
else:
if md5("%s%s%s" % (method, password_token, self.password)).hexdigest() != signature:
return self.generate_error("Signatures do not match for this method & user")
return self.get_albums(user, password_token, backdoor_username, fal)
elif method == "get_album_photos":
if backdoor_username:
if md5("%s%s%s%s%s" % (method, album_id, password_token, backdoor_username, self.password)).hexdigest() != signature:
return self.generate_error("Signatures do not match for this method & user")
else:
if md5("%s%s%s%s" % (method, album_id, password_token, self.password)).hexdigest() != signature:
return self.generate_error("Signatures do not match for this method & user")
return self.get_album_photos(user, password_token, album_id, backdoor_username, fal)
elif method == "get_user_info":
if md5("%s%s%s" % (method, password_token, self.password)).hexdigest() != signature:
return self.generate_error("Signatures do not match for this method & user")
return self.get_user_info(user, password_token)
if password_token:
# they sent a password token, let's find the user it belongs to
d = self.get_user_by_password_token(password_token)
if backdoor_username:
d.addCallback(check_access_level)
d.addCallback(run_call, per_page, page_number)
d.addErrback(self.invalid_token)
return d
else:
if method == "get_password_token":
return run_call(None, per_page, page_number)
else:
return self.generate_error("No password token was supplied")
@stack
def add_password_token(self, username):
""" It's lame, but so is Qoop """
username = validation.username(username)
# make a new key, or update the expiration of an existing one
data = {
"username": username,
"user_token": md5("%s%s" % (username, time.time())).hexdigest(),
"password_token": md5("%s%s" % (md5("%s%s" % (username, time.time())), time.time())).hexdigest()
}
d = self.app.db.query("""
insert into
qoop_tokens (
username,
user_token,
password_token,
expires
) values (
%(username)s,
%(user_token)s,
%(password_token)s,
DATE_ADD(now(), INTERVAL 2 HOUR)
)
on duplicate key
update
expires = DATE_ADD(now(), INTERVAL 2 HOUR)
""", data, database='cluster')
# first see if there is already a key we can use
def find_token(void):
d2 = self.app.db.query("""
select
user_token
from
qoop_tokens
where
username = %s
and
expires > now()
""", (username,), database='cluster')
d2.addCallback(result_mods.return_1st_cell)
return d2
d.addCallback(self.collect_garbage)
d.addCallback(find_token)
return d
@stack
def get_password_token(self, user_token):
"""
Using a valid user_token identifier, go find the password_token for future reference
"""
@stack
def make_response(token):
self.log.debug("found %s for %s" % (token, user_token))
doc = minidom.Document()
answer = doc.createElement('answer')
answer.setAttribute('status', 'ok')
doc.appendChild(answer)
token_node = doc.createElement('password_token')
token_node.appendChild(doc.createTextNode(token))
answer.appendChild(token_node)
xml = doc.toprettyxml()
doc.unlink()
self.log.debug("return data is %s" % xml)
return xml
d = self.app.db.query("""
select
password_token
from
qoop_tokens
where
user_token = %s
and
expires > now()
""", (user_token,), database='cluster')
d.addCallback(result_mods.return_1st_cell)
d.addCallback(make_response)
d.addErrback(self.invalid_token)
return d
@stack
def get_user_by_password_token(self, password_token):
self.log.debug("finding user for %s" % password_token)
def check_results(rows):
self.log.debug("got %s rows" % len(rows))
try:
return rows[0][0]
except:
raise ValueError, "Invalid Token: %s" % password_token
d = self.app.db.query("""
select
username
from
qoop_tokens
where
password_token = %s
""", (password_token,), database="cluster")
d.addCallback(result_mods.return_1st_cell)
return d
@stack
def generate_error(self, error_text):
doc = minidom.Document()
answer = doc.createElement('answer')
answer.setAttribute('status', 'fail')
error = doc.createElement("error")
error.appendChild(doc.createTextNode(error_text))
doc.appendChild(answer)
answer.appendChild(error)
xml = doc.toprettyxml()
doc.unlink()
return xml
@stack
def get_photos(self, username, password_token, per_page, page_number, backdoor_username, fal):
if backdoor_username:
browse = backdoor_username
else:
browse = username
if per_page > 0:
# they want paginated results
offset = int(per_page) * (int(page_number) - 1) # they seem to start at page 1 instead of 0
limit = int(per_page)
else:
offset = 0
limit = 0
@stack
def uh_oh(failure):
return self.generate_error("Error getting images for %s [%s]" % (browse, failure.getErrorMessage()))
@stack
def make_response(stuff):
images, total = stuff
# figure the total number of pages for the algebra noobs at Qoop. WTF?
total_pages = int(floor(total / per_page))
if (total % per_page): total_pages+=1
doc = minidom.Document()
answer = doc.createElement('answer')
answer.setAttribute('status', 'ok')
doc.appendChild(answer)
photo_list = doc.createElement('photolist')
photo_list.setAttribute('total_photos', "%s" % total)
photo_list.setAttribute('page_number', str(page_number))
photo_list.setAttribute('per_page', str(per_page))
photo_list.setAttribute('total_pages', str(total_pages))
photo_list.setAttribute('owner_id', browse)
answer.appendChild(photo_list)
for img in images:
img_node = doc.createElement('photo')
img_node.setAttribute('id', "%s" % img['image_id'])
img_node.setAttribute('orig_format', 'jpg')
if img['date']: img_node.setAttribute('taken', img['date'].strftime("%Y-%m-%d"))
if img['date_uploaded']: img_node.setAttribute('upload', img['date_uploaded'].strftime("%Y-%m-%d"))
title = doc.createElement('title')
title.appendChild(doc.createTextNode(img['title']))
desc = doc.createElement('description')
desc.appendChild(doc.createTextNode(img['description']))
orig_link = doc.createElement('original')
if img.has_key('original_x'): orig_link.setAttribute('x', str(img['original_x']))
if img.has_key('original_y'): orig_link.setAttribute('y', str(img['original_y']))
orig_link.appendChild(doc.createTextNode(self.image_url % (browse, "original", img['image_id'])))
img_node.appendChild(title)
img_node.appendChild(desc)
img_node.appendChild(orig_link)
if img.has_key('rendered_sizes'):
size_nodes = {}
for k,v in img['rendered_sizes'].items():
size_nodes[k] = doc.createElement(k)
size_nodes[k].setAttribute('x', str(v['x']))
size_nodes[k].setAttribute('y', str(v['y']))
size_nodes[k].appendChild(doc.createTextNode(self.image_url % (browse, v['code'], img['image_id'])))
img_node.appendChild(size_nodes[k])
photo_list.appendChild(img_node)
xml = doc.toprettyxml()
doc.unlink()
return xml
lightbox = {
"order_by": "date_uploaded",
"order_dir": "desc"
}
@stack
def get_total(image_list):
lightbox['count_only'] = True
d_total = self.app.api.lightbox.get_images(browse, lightbox, 0, 0, fal)
d_total.addCallback(lambda total: (image_list, total))
return d_total
@stack
def get_dimensions(stuff):
image_list, total = stuff
@stack
def store_original_size((w,h), new_list, i):
new_list[i]['original_x'] = w
new_list[i]['original_y'] = h
return (w, h)
@stack
def store_size(size_tuple, new_list, i, size_code, tag_name, original_dimensions):
self.log.debug("storing size %s as %s for image %s [%s x %s]" % (size_code, tag_name, new_list[i]['image_id'], size_tuple[0], size_tuple[1]))
x, y = size_tuple
if new_list[i].has_key('rendered_sizes'):
new_list[i]["rendered_sizes"][tag_name] = {'x': x, 'y': y, 'code': size_code}
else:
new_list[i]["rendered_sizes"] = {tag_name: {'x': x, 'y': y, 'code': size_code}}
return original_dimensions
@stack
def get_fudged_sizes(original_dimensions, new_list, i):
self.log.debug("starting chain for fudged sizes on image %s" % new_list[i]['image_id'])
d_fudge = self.app.api.images.get_fudged_rendered_size(original_dimensions, '17')
d_fudge.addCallback(store_size, new_list, i, '17', 'small', original_dimensions)
d_fudge.addCallback(self.app.api.images.get_fudged_rendered_size, '24')
d_fudge.addCallback(store_size, new_list, i, '24', 'medium', original_dimensions)
d_fudge.addCallback(self.app.api.images.get_fudged_rendered_size, '45')
d_fudge.addCallback(store_size, new_list, i, '45', 'large', original_dimensions)
d_fudge.addCallback(self.app.api.images.get_fudged_rendered_size, '230')
d_fudge.addCallback(store_size, new_list, i, '230', 'square', original_dimensions)
return d_fudge
# make a copy of the list
new_list = image_list[:]
def_list = []
for i in range(len(image_list)):
d_temp = self.app.api.images.get_original_size(username, image_list[i]['image_id'])
d_temp.addCallback(store_original_size, new_list, i)
d_temp.addCallback(get_fudged_sizes, new_list, i)
def_list.append(d_temp)
dl = DeferredList(def_list)
dl.addCallback(lambda _: (new_list, total))
return dl
d = self.app.api.lightbox.get_images(browse, lightbox, limit, offset, fal)
d.addCallback(get_total)
d.addCallback(get_dimensions)
d.addCallback(make_response)
d.addErrback(uh_oh)
return d
@stack
def get_albums(self, username, password_token, backdoor_username, fal):
if backdoor_username:
browse = backdoor_username
else:
browse = username
@stack
def process(galleries, categories):
generics = []
""" combine the two lists for a qoop "album" list """
for g in galleries:
temp = {
'name': '%s (Gallery)' % g['title'],
'description': g['description'],
'id': "GAL%s" % g['gallery_name'],
'num_photos': g['cnt_images'],
}
generics.append(temp)
# for c in categories:
# if c['category_id'] < 0: continue # no system tags
# if c['parent_id'] == 0: continue # no who/what/when/where tags
# temp = {
# 'name': c['name'],
# 'description': c.get('description', ''),
# 'id': "TAG%s" % c['category_id'],
# 'num_photos': c['cnt_images'],
# }
# generics.append(temp)
doc = minidom.Document()
answer = doc.createElement('answer')
answer.setAttribute('status', 'ok')
doc.appendChild(answer)
for g in generics:
if int(g['num_photos']) < 1: continue
album = doc.createElement('album')
album.setAttribute('id', "%s" % g['id'])
album.setAttribute('num_photos', "%s" % g['num_photos'])
album.setAttribute('owner_id', browse)
title = doc.createElement('title')
title.appendChild(doc.createTextNode(g['name']))
desc = doc.createElement('description')
desc.appendChild(doc.createTextNode(g['description']))
album.appendChild(title)
album.appendChild(desc)
answer.appendChild(album)
xml = doc.toprettyxml()
doc.unlink()
return xml
@stack
def get_galleries(categories):
d2 = self.app.api.galleries.get_list(browse, 0, 0, 0)
d2.addCallback(process, categories)
return d2
d = self.app.api.category.get_real_list(browse, fal, 1, 'name', 'asc')
d.addCallback(get_galleries)
return d
@stack
def get_album_photos(self, username, password_token, album_id, backdoor_username, fal):
if backdoor_username:
browse = backdoor_username
else:
browse = username
@stack
def process_list(images):
""" make the xml list for this category """
doc = minidom.Document()
answer = doc.createElement('answer')
answer.setAttribute('status', 'ok')
doc.appendChild(answer)
album_list = doc.createElement('albumlist')
album_list.setAttribute('num_photos', str(len(images)))
album_list.setAttribute('id', str(album_id))
album_list.setAttribute('owner_id', browse)
answer.appendChild(album_list)
for i in images:
if isinstance(i, dict):
# gallery results come in as dicts
id = "%s-%s" % (i['image_id'], i['filter_hash'])
else:
id = str(i)
node = doc.createElement('photo')
node.setAttribute('id', id)
album_list.appendChild(node)
xml = doc.toprettyxml()
doc.unlink()
return xml
if album_id.startswith("TAG"):
# get category images
lightbox = {
'ids_only': True,
'category_id': album_id[3:],
'order_by': 'date_uploaded',
'order_dir': 'desc',
}
d = self.app.api.lightbox.get_images(browse, lightbox, 0, 0, fal)
d.addCallback(process_list)
return d
elif album_id.startswith("GAL"):
# get gallery images
gal_id = album_id[3:]
d = self.app.api.galleries.get_images(album_id[3:], 0, 0, [])
d.addCallback(process_list)
return d
else:
return self.generate_error("Invalid album_id")
def get_user_info(self, username, password_token):
if not username: return self.generate_error("no user found for %s" % password_token)
@stack
def make_response(info_dict):
doc = minidom.Document()
answer = doc.createElement('answer')
answer.setAttribute('status', 'ok')
doc.appendChild(answer)
user_node = doc.createElement('user_info')
user_node.setAttribute('owner_id', username)
real_name = doc.createElement('realname')
real_name.appendChild(doc.createTextNode(info_dict['display_name']))
user_name_node = doc.createElement('username')
user_name_node.appendChild(doc.createTextNode(username))
location = doc.createElement('location')
location.appendChild(doc.createTextNode(info_dict['country']))
avatar = doc.createElement('image_url')
avatar.appendChild(doc.createTextNode(self.image_url % (username, 18, info_dict['avatar'])))
user_node.appendChild(real_name)
user_node.appendChild(user_name_node)
user_node.appendChild(location)
user_node.appendChild(avatar)
answer.appendChild(user_node)
xml = doc.toprettyxml()
doc.unlink()
return xml
d = self.app.api.users.get_info(username, 0)
d.addCallback(make_response)
return d
@stack
def collect_garbage(self, void):
return self.app.db.query("delete from qoop_tokens where expires < now()", (), database='cluster')
|
|
"""
Filename: graph_tools.py
Author: Daisuke Oyama
Tools for dealing with a directed graph.
"""
import numpy as np
from scipy import sparse
from scipy.sparse import csgraph
from fractions import gcd
class DiGraph(object):
r"""
Class for a directed graph. It stores useful information about the
graph structure such as strong connectivity [1]_ and periodicity
[2]_.
Parameters
----------
adj_matrix : array_like(ndim=2)
Adjacency matrix representing a directed graph. Must be of shape
n x n.
weighted : bool, optional(default=False)
Whether to treat `adj_matrix` as a weighted adjacency matrix.
Attributes
----------
csgraph : scipy.sparse.csr_matrix
Compressed sparse representation of the digraph.
is_strongly_connected : bool
Indicate whether the digraph is strongly connected.
num_strongly_connected_components : int
The number of the strongly connected components.
strongly_connected_components : list(ndarray(int))
List of numpy arrays containing the strongly connected
components.
num_sink_strongly_connected_components : int
The number of the sink strongly connected components.
sink_strongly_connected_components : list(ndarray(int))
List of numpy arrays containing the sink strongly connected
components.
is_aperiodic : bool
Indicate whether the digraph is aperiodic.
period : int
The period of the digraph. Defined only for a strongly connected
digraph.
cyclic_components : list(ndarray(int))
List of numpy arrays containing the cyclic components.
References
----------
.. [1] `Strongly connected component
<http://en.wikipedia.org/wiki/Strongly_connected_component>`_,
Wikipedia.
.. [2] `Aperiodic graph
<http://en.wikipedia.org/wiki/Aperiodic_graph>`_, Wikipedia.
"""
def __init__(self, adj_matrix, weighted=False):
if weighted:
dtype = None
else:
dtype = bool
self.csgraph = sparse.csr_matrix(adj_matrix, dtype=dtype)
m, n = self.csgraph.shape
if n != m:
raise ValueError('input matrix must be square')
self.n = n # Number of nodes
self._num_scc = None
self._scc_proj = None
self._sink_scc_labels = None
self._period = None
def __repr__(self):
return self.__str__()
def __str__(self):
return "Directed Graph:\n - n(number of nodes): {n}".format(n=self.n)
def _find_scc(self):
"""
Set ``self._num_scc`` and ``self._scc_proj``
by calling ``scipy.sparse.csgraph.connected_components``:
* docs.scipy.org/doc/scipy/reference/sparse.csgraph.html
* github.com/scipy/scipy/blob/master/scipy/sparse/csgraph/_traversal.pyx
``self._scc_proj`` is a list of length `n` that assigns to each node
the label of the strongly connected component to which it belongs.
"""
# Find the strongly connected components
self._num_scc, self._scc_proj = \
csgraph.connected_components(self.csgraph, connection='strong')
@property
def num_strongly_connected_components(self):
if self._num_scc is None:
self._find_scc()
return self._num_scc
@property
def scc_proj(self):
if self._scc_proj is None:
self._find_scc()
return self._scc_proj
@property
def is_strongly_connected(self):
return (self.num_strongly_connected_components == 1)
def _condensation_lil(self):
"""
Return the sparse matrix representation of the condensation digraph
in lil format.
"""
condensation_lil = sparse.lil_matrix(
(self.num_strongly_connected_components,
self.num_strongly_connected_components), dtype=bool
)
scc_proj = self.scc_proj
for node_from, node_to in _csr_matrix_indices(self.csgraph):
scc_from, scc_to = scc_proj[node_from], scc_proj[node_to]
if scc_from != scc_to:
condensation_lil[scc_from, scc_to] = True
return condensation_lil
def _find_sink_scc(self):
"""
Set self._sink_scc_labels, which is a list containing the labels of
the strongly connected components.
"""
condensation_lil = self._condensation_lil()
# A sink SCC is a SCC such that none of its members is strongly
# connected to nodes in other SCCs
# Those k's such that graph_condensed_lil.rows[k] == []
self._sink_scc_labels = \
np.where(np.logical_not(condensation_lil.rows))[0]
@property
def sink_scc_labels(self):
if self._sink_scc_labels is None:
self._find_sink_scc()
return self._sink_scc_labels
@property
def num_sink_strongly_connected_components(self):
return len(self.sink_scc_labels)
@property
def strongly_connected_components(self):
if self.is_strongly_connected:
return [np.arange(self.n)]
else:
return [np.where(self.scc_proj == k)[0]
for k in range(self.num_strongly_connected_components)]
@property
def sink_strongly_connected_components(self):
if self.is_strongly_connected:
return [np.arange(self.n)]
else:
return [np.where(self.scc_proj == k)[0]
for k in self.sink_scc_labels.tolist()]
def _compute_period(self):
"""
Set ``self._period`` and ``self._cyclic_components_proj``.
Use the algorithm described in:
J. P. Jarvis and D. R. Shier,
"Graph-Theoretic Analysis of Finite Markov Chains," 1996.
"""
# Degenerate graph with a single node (which is strongly connected)
# csgraph.reconstruct_path would raise an exception
# github.com/scipy/scipy/issues/4018
if self.n == 1:
if self.csgraph[0, 0] == 0: # No edge: "trivial graph"
self._period = 1 # Any universally accepted definition?
self._cyclic_components_proj = np.zeros(self.n, dtype=int)
return None
else: # Self loop
self._period = 1
self._cyclic_components_proj = np.zeros(self.n, dtype=int)
return None
if not self.is_strongly_connected:
raise NotImplementedError(
'Not defined for a non strongly-connected digraph'
)
if np.any(self.csgraph.diagonal() > 0):
self._period = 1
self._cyclic_components_proj = np.zeros(self.n, dtype=int)
return None
# Construct a breadth-first search tree rooted at 0
node_order, predecessors = \
csgraph.breadth_first_order(self.csgraph, i_start=0)
bfs_tree_csr = \
csgraph.reconstruct_path(self.csgraph, predecessors)
# Edges not belonging to tree_csr
non_bfs_tree_csr = self.csgraph - bfs_tree_csr
non_bfs_tree_csr.eliminate_zeros()
# Distance to 0
level = np.zeros(self.n, dtype=int)
for i in range(1, self.n):
level[node_order[i]] = level[predecessors[node_order[i]]] + 1
# Determine the period
d = 0
for node_from, node_to in _csr_matrix_indices(non_bfs_tree_csr):
value = level[node_from] - level[node_to] + 1
d = gcd(d, value)
if d == 1:
self._period = 1
self._cyclic_components_proj = np.zeros(self.n, dtype=int)
return None
self._period = d
self._cyclic_components_proj = level % d
@property
def period(self):
if self._period is None:
self._compute_period()
return self._period
@property
def is_aperiodic(self):
return (self.period == 1)
@property
def cyclic_components(self):
if self.is_aperiodic:
return [np.arange(self.n)]
else:
return [np.where(self._cyclic_components_proj == k)[0]
for k in range(self.period)]
def _csr_matrix_indices(S):
"""
Generate the indices of nonzero entries of a csr_matrix S
"""
m, n = S.shape
for i in range(m):
for j in range(S.indptr[i], S.indptr[i+1]):
row_index, col_index = i, S.indices[j]
yield row_index, col_index
|
|
import logging
log = logging.getLogger(__name__)
import numpy
import time
from spacq.interface.resources import Resource
from spacq.interface.units import Quantity
from spacq.tool.box import Synchronized
from ..abstract_device import AbstractDevice, AbstractSubdevice
from ..tools import quantity_unwrapped, BinaryEncoder
"""
Custom voltage source
Control the output voltages on all the ports.
"""
class Port(AbstractSubdevice):
"""
An output port on the voltage source.
"""
# Since there is no way to determine whether calibration has completed,
# wait this long and hope for the best.
calibration_delay = 2 # s
@staticmethod
def format_for_dac(msg):
"""
Perform some formatting to make the device happy:
flip all the bits in the message
pad messages until their length in bytes is a multiple of 4
"""
log.debug('Formatting for DAC: {0}'.format(msg))
msg_encoded = BinaryEncoder.encode(msg)
# Flip each byte separately.
msg_flipped = [chr(~ord(x) & 0xff) for x in msg_encoded]
missing_bytes = (4 - len(msg_encoded) % 4) % 4
result = BinaryEncoder.decode(msg_flipped + ['\x00'] * missing_bytes)
log.debug('Formatted for DAC (padded with {0} bytes): {1}'.format(missing_bytes, result))
return result
def _setup(self):
AbstractSubdevice._setup(self)
# These values are used to tune the input values according to empirical error.
self.gain = 1.0
self.offset = 0.0
# Resources.
write_only = ['voltage']
for name in write_only:
self.resources[name] = Resource(self, None, name)
self.resources['voltage'].units = 'V'
@Synchronized()
def _connected(self):
AbstractSubdevice._connected(self)
if self.do_apply_settings:
self.apply_settings(calibrate=False)
def __init__(self, device, num, resolution=20, apply_settings=True, min_value=-10,
max_value=+10, adaptive_filtering=True, calibrate_connected=False,
fast_settling=True, freq=100, *args, **kwargs):
"""
Initialize the output port.
device: The VoltageSource to which this Port belongs.
num: The index of this port.
resolution: How many bits the output value contains.
apply_settings: Whether to automatically apply all the settings.
min_value: Smallest value the port can produce.
max_value: Largest value the port can produce.
adaptive_filtering: Enable adaptive filtering.
calibrate_connected: Do not disconnect output while calibrating.
fast_settling: Enable fast settling.
freq: Clock rate in kHz.
"""
AbstractSubdevice.__init__(self, device, *args, **kwargs)
if resolution not in [16, 20]:
raise ValueError('Unsupported resolution: {0}'.format(resolution))
self.num = num
self.resolution = resolution
self.do_apply_settings = apply_settings
self.min_value = min_value
self.max_value = max_value
self.adaptive_filtering = adaptive_filtering
self.calibrate_connected = calibrate_connected
self.fast_settling = fast_settling
self.freq = freq
def calculate_voltage(self, voltage):
"""
Determine the value corresponding to the given voltage.
"""
try:
voltage_adjusted = voltage * self.gain + self.offset
except TypeError:
raise ValueError('Voltage must be a number. Given: {0}'.format(voltage))
if voltage_adjusted < self.min_value or voltage_adjusted > self.max_value:
raise ValueError('Adjusted voltage must be within [{0}, {1}]. '
'Given: {2}; adjusted to: {3}.'.format(self.min_value,
self.max_value, voltage, voltage_adjusted))
max_converted = (1 << self.resolution) - 1
value_span = self.max_value - self.min_value
# Map [-min_value, max_value] onto [0x0, 0xff...] depending on the resolution.
# First negate the voltage, so that flipping the bits later will make it correct.
return int(float(-voltage_adjusted + self.max_value) / value_span * max_converted)
@Synchronized()
def write_to_dac(self, message):
"""
Write a message to the DAC of the port.
Voodoo programming follows, thanks to:
NI's lack of support for anything non-Windows in this case
my lack of time & desire to properly reverse engineer the ni845x DLL
If the conversation does not go according to plan, bails out with an AssertionError!
"""
message_length = BinaryEncoder.length(message)
if message_length > 4:
raise ValueError('Message is longer than 4 bytes: {0}'.format(message))
message_formatted = self.format_for_dac(message)
# The reply always comes back with as many bits set to 1 as were sent.
expected_reply = self.format_for_dac('00' * message_length)
# Lots of assertions so we can bail ASAP to avoid crashing anything.
self.device.ask_encoded('0000 000c 0008 0100 0000 0000',
'0000 001c 0018 0100 0000 0002 0200 1000 0100 c001 0100 c000 0002 0000')
self.device.ask_encoded('0000 0010 000c 0113 0280 0000 0000 ff01',
'0000 000c 0008 0100 0000 0002')
self.device.ask_encoded('0000 0010 000c 0112 0280 0000 00ff ff00',
'0000 000c 0008 0100 0000 0002')
self.device.ask_encoded('0000 0010 000c 0111 0280 0000 00ff {0:02x} 00'.format(self.num),
'0000 000c 0008 0100 0000 0002')
self.device.ask_encoded('0000 000c 0008 0100 0000 0000',
'0000 001c 0018 0100 0000 0002 0200 1000 0100 c001 0100 c000 0002 0000')
self.device.ask_encoded('0000 0014 0010 0110 0260 0000 0000 {0:04x} 0700 0000'.format(self.freq),
'0000 000c 0008 0100 0000 0002')
self.device.ask_encoded('0000 0014 0010 0111 0260 0000 0003 {0:02x} 00 {1}'.format(message_length,
message_formatted),
'0000 0014 0010 0100 0000 0002 {0:02x} 00 0000 {1}'.format(message_length, expected_reply))
def apply_settings(self, calibrate=False):
"""
Apply the settings for the DAC on this port.
calibrate: Run self-calibration on this port as well.
Note: If self-calibrating, it is essential to wait the calibration_delay after this method returns.
"""
flags = ((not self.adaptive_filtering) << 15 |
self.calibrate_connected << 14 |
(not self.fast_settling) << 4)
if calibrate:
flags |= 0b01
# Write 16 bits to the top of the DIR: 0010 0100 xx10 0000 101x 00xx
self.write_to_dac('24 {0:04x}'.format(0x20a0 | flags))
@quantity_unwrapped('V')
def set_voltage(self, voltage):
"""
Set the voltage on this port, as a quantity in V.
"""
# Left-align the bits within the value:
# 20-bit: VVVV VVVV VVVV VVVV VVVV xxxx
# 16-bit: VVVV VVVV VVVV VVVV xxxx xxxx
# where the 'x's are don't-cares, so we just set them to 0 by shifting.
resulting_voltage = self.calculate_voltage(voltage) << (24 - self.resolution)
# Write 24 bits to the top of the DIR: 0100 0000 xxxx xxxx xxxx xxxx xxxx xxxx
self.write_to_dac('40 {0:06x}'.format(resulting_voltage))
voltage = property(fset=set_voltage)
@Synchronized()
def autotune(self, voltage_resource, min_value=None, max_value=None, final_value=0, set_result=True):
"""
Take some measured data and solve for the gain and offset.
voltage_resource: A resource which provides the realtime measured data for this port.
min_value: Smallest value to take into account.
max_value: Largest value to take into account.
final_value: Value to set port to after all measurements are taken.
set_result: Whether to apply the resulting gain and offset.
"""
self.device.status.append('Autotuning port {0}'.format(self.num))
try:
if min_value is None:
min_value = self.min_value
if max_value is None:
max_value = self.max_value
# Test with raw values.
old_gain, old_offset = self.gain, self.offset
self.gain, self.offset = 1, 0
if max_value < min_value:
raise ValueError('{0} > {1}'.format(min_value, max_value))
elif max_value == min_value:
num_points = 1
else:
num_points = 21
# Obtain data.
real = numpy.linspace(min_value, max_value, num_points)
measured = []
for x in real:
self.voltage = Quantity(x, 'V')
time.sleep(0.2)
measured.append(voltage_resource.value.value)
# Solve.
A = numpy.vstack([measured, numpy.ones(len(measured))]).T
gain, offset = numpy.linalg.lstsq(A, real)[0]
if set_result:
self.gain, self.offset = gain, offset
else:
self.gain, self.offset = old_gain, old_offset
# Set the voltage after the gain and offset, so that it is potentially more correct.
self.voltage = Quantity(final_value, 'V')
return (gain, offset)
finally:
self.device.status.pop()
class VoltageSource(AbstractDevice):
"""
Interface for the custom voltage source.
It uses several TI DAC1220 chips and an NI USB-8451 to interface with them over SPI.
"""
@property
def _gui_setup(self):
try:
from .gui.voltage_source import VoltageSourceSettingsDialog
return VoltageSourceSettingsDialog
except ImportError as e:
log.debug('Could not load GUI setup for device "{0}": {1}'.format(self.name, str(e)))
return None
def _setup(self):
AbstractDevice._setup(self)
self.ports = []
for num in xrange(16):
port = Port(self, num, **self.port_settings)
self.ports.append(port)
self.subdevices['port{0:02}'.format(num)] = port
def __init__(self, port_settings=None, *args, **kwargs):
"""
Initialize the voltage source and all its ports.
port_settings: A dictionary of values to give to each port upon creation.
"""
if port_settings is None:
self.port_settings = {}
else:
self.port_settings = port_settings
AbstractDevice.__init__(self, *args, **kwargs)
@Synchronized()
def ask_encoded(self, msg, assertion=None):
"""
Encode and write the message; then read and decode the answer.
"""
self.write(BinaryEncoder.encode(msg))
result = BinaryEncoder.decode(self.read_raw())
if assertion is not None:
# Ensure that extra formatting doesn't trigger an assertion failure.
formatted_assertion = BinaryEncoder.decode(BinaryEncoder.encode(assertion))
assert result == formatted_assertion, (
'Device in unknown state; expect general failure. '
'Asserted: {0}; observed: {1}.'.format(assertion, result))
return result
name = 'Voltage source'
implementation = VoltageSource
|
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
# -*- test-case-name: flocker.node.test.test_docker -*-
"""
Docker API client.
"""
from __future__ import absolute_import
from time import sleep
from zope.interface import Interface, implementer
from docker import Client
from docker.errors import APIError
from docker.utils import create_host_config
from eliot import Message
from pyrsistent import field, PRecord, pset
from characteristic import with_cmp
from twisted.python.components import proxyForInterface
from twisted.python.filepath import FilePath
from twisted.internet.defer import succeed, fail
from twisted.internet.threads import deferToThread
from twisted.web.http import NOT_FOUND, INTERNAL_SERVER_ERROR
from ..control._model import (
RestartNever, RestartAlways, RestartOnFailure, pset_field, pvector_field)
class AlreadyExists(Exception):
"""A unit with the given name already exists."""
@with_cmp(["address"])
class AddressInUse(Exception):
"""
The listen address for an exposed port was in use and could not be bound.
"""
def __init__(self, address):
"""
:param tuple address: The conventional Python representation of the
address which could not be bound (eg, an (ipv4 address, port
number) pair for IPv4 addresses).
"""
Exception.__init__(self, address)
self.address = address
class Environment(PRecord):
"""
A collection of environment variables.
:ivar frozenset variables: A ``frozenset`` of tuples containing
key and value pairs representing the environment variables.
"""
variables = field(mandatory=True)
def to_dict(self):
"""
Convert to a dictionary suitable for serialising to JSON and then on to
the Docker API.
:return: ``dict`` mapping keys to values.
"""
return dict(self.variables)
class Volume(PRecord):
"""
A Docker volume.
:ivar FilePath node_path: The volume's path on the node's
filesystem.
:ivar FilePath container_path: The volume's path within the
container.
"""
node_path = field(mandatory=True, type=FilePath)
container_path = field(mandatory=True, type=FilePath)
class PortMap(PRecord):
"""
A record representing the mapping between a port exposed internally by a
docker container and the corresponding external port on the host.
:ivar int internal_port: The port number exposed by the container.
:ivar int external_port: The port number exposed by the host.
"""
internal_port = field(mandatory=True, type=int)
external_port = field(mandatory=True, type=int)
class Unit(PRecord):
"""
Information about a unit managed by Docker.
XXX "Unit" is geard terminology, and should be renamed. See
https://clusterhq.atlassian.net/browse/FLOC-819
:ivar unicode name: The name of the unit, which may not be the same as
the container name.
:ivar unicode container_name: The name of the container where the
application is running.
:ivar unicode activation_state: The state of the
container. ``u"active"`` indicates it is running, ``u"inactive"``
indicates it is not running. See
https://clusterhq.atlassian.net/browse/FLOC-187 about using
constants instead of strings and other improvements.
:ivar unicode container_image: The docker image name associated with this
container.
:ivar PSet ports: The ``PortMap`` instances which define how
connections to ports on the host are routed to ports exposed in
the container.
:ivar Environment environment: An ``Environment`` whose variables
will be supplied to the Docker container or ``None`` if there are no
environment variables for this container.
:ivar PSet volumes: ``Volume`` instances, the container's volumes.
:ivar int mem_limit: The number of bytes to which to limit the in-core
memory allocations of this unit. Or ``None`` to apply no limits. The
behavior when the limit is encountered depends on the container
execution driver but the likely behavior is for the container process
to be killed (and therefore the container to exit). Docker most likely
maps this value onto the cgroups ``memory.limit_in_bytes`` value.
:ivar int cpu_shares: The number of CPU shares to allocate to this unit.
Or ``None`` to let it have the default number of shares. Docker maps
this value onto the cgroups ``cpu.shares`` value (the default of which
is probably 1024).
:ivar IRestartPolicy restart_policy: The restart policy of the container.
:ivar command_line: Custom command to run using the image, a ``PVector``
of ``unicode``. ``None`` means use default.
"""
name = field(mandatory=True)
container_name = field(mandatory=True)
activation_state = field(mandatory=True)
container_image = field(mandatory=True, initial=None)
ports = pset_field(PortMap)
environment = field(mandatory=True, initial=None)
volumes = pset_field(Volume)
mem_limit = field(mandatory=True, initial=None)
cpu_shares = field(mandatory=True, initial=None)
restart_policy = field(mandatory=True, initial=RestartNever())
command_line = pvector_field(unicode, optional=True, initial=None)
class IDockerClient(Interface):
"""
A client for the Docker HTTP API.
Note the difference in semantics between the results of ``add()``
(firing does not indicate application started successfully)
vs. ``remove()`` (firing indicates application has finished shutting
down).
"""
def add(unit_name, image_name, ports=None, environment=None, volumes=(),
mem_limit=None, cpu_shares=None, restart_policy=RestartNever(),
command_line=None):
"""
Install and start a new unit.
Note that callers should not assume success indicates the unit has
finished starting up. In addition to asynchronous nature of Docker,
even if container is up and running the application within it might
still be starting up, e.g. it may not have bound the external ports
yet. As a result the final success of application startup is out of
scope for this method.
:param unicode unit_name: The name of the unit to create.
:param unicode image_name: The Docker image to use for the unit.
:param list ports: A list of ``PortMap``\ s mapping ports exposed in
the container to ports exposed on the host. Default ``None`` means
that no port mappings will be configured for this unit. If a
``PortMap`` instance's ``external_port`` is set to ``0`` a free
port will automatically be assigned. The assigned port will be
reported for the container in the result of ``IDockerClient.list``.
:param Environment environment: Environment variables for the
container. Default ``None`` means that no environment variables
will be supplied to the unit.
:param volumes: A sequence of ``Volume`` instances to mount.
:param int mem_limit: The number of bytes to which to limit the in-core
memory allocations of the new unit. Or ``None`` to apply no
limits.
:param int cpu_shares: The number of CPU shares to allocate to the new
unit. Or ``None`` to let it have the default number of shares.
Docker maps this value onto the cgroups ``cpu.shares`` value (the
default of which is probably 1024).
:param IRestartPolicy restart_policy: The restart policy of the
container.
:param command_line: Custom command to run using the image, a sequence
of ``unicode``, or ``None`` to use default image command line.
:return: ``Deferred`` that fires on success, or errbacks with
:class:`AlreadyExists` if a unit by that name already exists.
"""
def exists(unit_name):
"""
Check whether the unit exists.
:param unicode unit_name: The name of the unit whose existence
we're checking.
:return: ``Deferred`` that fires with ``True`` if unit exists,
otherwise ``False``.
"""
def remove(unit_name):
"""
Stop and delete the given unit.
This can be done multiple times in a row for the same unit.
:param unicode unit_name: The name of the unit to stop.
:return: ``Deferred`` that fires once the unit has been stopped
and removed.
"""
def list():
"""
List all known units.
:return: ``Deferred`` firing with ``set`` of :class:`Unit`.
"""
@implementer(IDockerClient)
class FakeDockerClient(object):
"""
In-memory fake that simulates talking to a docker daemon.
The state the the simulated units is stored in memory.
:ivar dict _units: See ``units`` of ``__init__``\ .
:ivar pset _used_ports: A set of integers giving the port numbers which
will be considered in use. Attempts to add containers which use these
ports will fail.
"""
def __init__(self, units=None):
"""
:param dict units: A dictionary of canned ``Unit``\ s which will be
manipulated and returned by the methods of this
``FakeDockerClient``.
:type units: ``dict`` mapping `unit_name` to ``Unit``\ .
"""
if units is None:
units = {}
self._units = units
self._used_ports = pset()
def add(self, unit_name, image_name, ports=frozenset(), environment=None,
volumes=frozenset(), mem_limit=None, cpu_shares=None,
restart_policy=RestartNever(), command_line=None):
if unit_name in self._units:
return fail(AlreadyExists(unit_name))
for port in ports:
if port.external_port in self._used_ports:
raise AddressInUse(address=(b"0.0.0.0", port.external_port))
all_ports = set(range(2 ** 15, 2 ** 16))
assigned_ports = []
for port in ports:
if port.external_port == 0:
available_ports = pset(all_ports) - self._used_ports
assigned = next(iter(available_ports))
port = port.set(external_port=assigned)
assigned_ports.append(port)
self._used_ports = self._used_ports.add(port.external_port)
self._units[unit_name] = Unit(
name=unit_name,
container_name=unit_name,
container_image=image_name,
ports=frozenset(assigned_ports),
environment=environment,
volumes=frozenset(volumes),
activation_state=u'active',
mem_limit=mem_limit,
cpu_shares=cpu_shares,
restart_policy=restart_policy,
command_line=command_line,
)
return succeed(None)
def exists(self, unit_name):
return succeed(unit_name in self._units)
def remove(self, unit_name):
if unit_name in self._units:
del self._units[unit_name]
return succeed(None)
def list(self):
units = set(self._units.values())
return succeed(units)
# Basic namespace for Flocker containers:
BASE_NAMESPACE = u"flocker--"
class TimeoutClient(Client):
"""
A subclass of docker.Client that sets any infinite timeouts to the
provided ``long_timeout`` value.
This class is a temporary fix until docker-py is released with
PR #625 or similar. See https://github.com/docker/docker-py/pull/625
See Flocker JIRA Issue FLOC-2082
"""
def __init__(self, *args, **kw):
self.long_timeout = kw.pop('long_timeout', None)
Client.__init__(self, *args, **kw)
def _set_request_timeout(self, kwargs):
"""
Prepare the kwargs for an HTTP request by inserting the timeout
parameter, if not already present. If the timeout is infinite,
set it to the ``long_timeout`` parameter.
"""
kwargs = Client._set_request_timeout(self, kwargs)
if kwargs['timeout'] is None:
kwargs['timeout'] = self.long_timeout
return kwargs
@implementer(IDockerClient)
class DockerClient(object):
"""
Talk to the real Docker server directly.
Some operations can take a while (e.g. stopping a container), so we
use a thread pool. See https://clusterhq.atlassian.net/browse/FLOC-718
for using a custom thread pool.
:ivar unicode namespace: A namespace prefix to add to container names
so we don't clobber other applications interacting with Docker.
:ivar str base_url: URL for connection to the Docker server.
:ivar int long_timeout: Maximum time in seconds to wait for
long-running operations, particularly pulling an image.
"""
def __init__(
self, namespace=BASE_NAMESPACE, base_url=None,
long_timeout=600):
self.namespace = namespace
self._client = TimeoutClient(
version="1.15", base_url=base_url, long_timeout=long_timeout)
def _to_container_name(self, unit_name):
"""
Add the namespace to the container name.
:param unicode unit_name: The unit's name.
:return unicode: The container's name.
"""
return self.namespace + unit_name
def _parse_container_ports(self, data):
"""
Parse the ports from a data structure representing the Ports
configuration of a Docker container in the format returned by
``self._client.inspect_container`` and return a list containing
``PortMap`` instances mapped to the container and host exposed ports.
:param dict data: The data structure for the representation of
container and host port mappings in a single container.
This takes the form of the ``NetworkSettings.Ports`` portion
of a container's state and configuration as returned by inspecting
the container. This is a dictionary mapping container ports to a
list of host bindings, e.g.
"3306/tcp": [{"HostIp": "0.0.0.0","HostPort": "53306"},
{"HostIp": "0.0.0.0","HostPort": "53307"}]
:return list: A list that is either empty or contains ``PortMap``
instances.
"""
ports = []
for internal, hostmap in data.items():
internal_map = internal.split(u'/')
internal_port = internal_map[0]
internal_port = int(internal_port)
if hostmap:
for host in hostmap:
external_port = host[u"HostPort"]
external_port = int(external_port)
portmap = PortMap(internal_port=internal_port,
external_port=external_port)
ports.append(portmap)
return ports
def _parse_restart_policy(self, data):
"""
Parse the restart policy from the configuration of a Docker container
in the format returned by ``self._client.inspect_container`` and return
an ``IRestartPolicy``.
:param dict data: The data structure representing the restart policy of
a container, e.g.
{"Name": "policy-name", "MaximumRetryCount": 0}
:return IRestartPolicy: The model of the restart policy.
:raises ValueError: if an unknown policy is passed.
"""
POLICIES = {
u"": lambda data:
RestartNever(),
u"always": lambda data:
RestartAlways(),
u"on-failure": lambda data:
RestartOnFailure(
maximum_retry_count=data[u"MaximumRetryCount"] or None)
}
try:
# docker will treat an unknown plolicy as "never".
# We error out here, in case new policies are added.
return POLICIES[data[u"Name"]](data)
except KeyError:
raise ValueError("Unknown restart policy: %r" % (data[u"Name"],))
def _serialize_restart_policy(self, restart_policy):
"""
Serialize the restart policy from an ``IRestartPolicy`` to the format
expected by the docker API.
:param IRestartPolicy restart_policy: The model of the restart policy.
:returns: A dictionary suitable to pass to docker
:raises ValueError: if an unknown policy is passed.
"""
SERIALIZERS = {
RestartNever: lambda policy:
{u"Name": u""},
RestartAlways: lambda policy:
{u"Name": u"always"},
RestartOnFailure: lambda policy:
{u"Name": u"on-failure",
u"MaximumRetryCount": policy.maximum_retry_count or 0},
}
try:
return SERIALIZERS[restart_policy.__class__](restart_policy)
except KeyError:
raise ValueError("Unknown restart policy: %r" % (restart_policy,))
def _image_not_found(self, apierror):
"""
Inspect a ``docker.errors.APIError`` to determine if it represents a
failure to start a container because the container's image wasn't
found.
:return: ``True`` if this is the case, ``False`` if the error has
another cause.
:rtype: ``bool``
"""
return apierror.response.status_code == NOT_FOUND
def _address_in_use(self, apierror):
"""
Inspect a ``docker.errors.APIError`` to determine if it represents a
failure to start a container because the container is configured to use
ports that are already in use on the system.
:return: If this is the reason, an exception to raise describing the
problem. Otherwise, ``None``.
"""
# Recognize an error (without newline) like:
#
# Cannot start container <name>: Error starting userland proxy:
# listen tcp <ip>:<port>: bind: address already in use
#
# Or (without newline) like:
#
# Cannot start container <name>: Bind for <ip>:<port> failed:
# port is already allocated
#
# because Docker can't make up its mind about which format to use.
parts = apierror.explanation.split(b": ")
if parts[-1] == b"address already in use":
ip, port = parts[-3].split()[-1].split(b":")
elif parts[-1] == b"port is already allocated":
ip, port = parts[-2].split()[2].split(b":")
else:
return None
return AddressInUse(address=(ip, int(port)))
def add(self, unit_name, image_name, ports=None, environment=None,
volumes=(), mem_limit=None, cpu_shares=None,
restart_policy=RestartNever(), command_line=None):
container_name = self._to_container_name(unit_name)
if environment is not None:
environment = environment.to_dict()
if ports is None:
ports = []
restart_policy_dict = self._serialize_restart_policy(restart_policy)
def _create():
binds = {
volume.node_path.path: {
'bind': volume.container_path.path,
'ro': False,
}
for volume in volumes
}
port_bindings = {
p.internal_port: p.external_port
for p in ports
}
host_config = create_host_config(
binds=binds,
port_bindings=port_bindings,
restart_policy=restart_policy_dict,
)
# We're likely to get e.g. pvector, so make sure we're passing
# in something JSON serializable:
command_line_values = command_line
if command_line_values is not None:
command_line_values = list(command_line_values)
self._client.create_container(
name=container_name,
image=image_name,
command=command_line_values,
environment=environment,
ports=[p.internal_port for p in ports],
mem_limit=mem_limit,
cpu_shares=cpu_shares,
host_config=host_config,
)
def _add():
try:
_create()
except APIError as e:
if self._image_not_found(e):
# Pull it and try again
self._client.pull(image_name)
_create()
else:
# Unrecognized, just raise it.
raise
# Just because we got a response doesn't mean Docker has
# actually updated any internal state yet! So if e.g. we did a
# stop on this container Docker might well complain it knows
# not the container of which we speak. To prevent this we poll
# until it does exist.
while not self._blocking_exists(container_name):
sleep(0.001)
continue
self._client.start(container_name)
d = deferToThread(_add)
def _extract_error(failure):
failure.trap(APIError)
code = failure.value.response.status_code
if code == 409:
raise AlreadyExists(unit_name)
in_use = self._address_in_use(failure.value)
if in_use is not None:
# We likely can't start the container because its
# configuration conflicts with something else happening on
# the system. Reflect this failure condition in a more
# easily recognized way.
raise in_use
return failure
d.addErrback(_extract_error)
return d
def _blocking_exists(self, container_name):
"""
Blocking API to check if container exists.
:param unicode container_name: The name of the container whose
existence we're checking.
:return: ``True`` if unit exists, otherwise ``False``.
"""
try:
self._client.inspect_container(container_name)
return True
except APIError:
return False
def exists(self, unit_name):
container_name = self._to_container_name(unit_name)
return deferToThread(self._blocking_exists, container_name)
def _blocking_container_runs(self, container_name):
"""
Blocking API to check if container is running.
:param unicode container_name: The name of the container whose
state we're checking.
:return: ``True`` if container is running, otherwise ``False``.
"""
result = self._client.inspect_container(container_name)
Message.new(
message_type="flocker:docker:container_state",
container=container_name,
state=result
).write()
return result['State']['Running']
def remove(self, unit_name):
container_name = self._to_container_name(unit_name)
def _remove():
while True:
# There is a race condition between a process dying and
# docker noticing that fact.
# https://github.com/docker/docker/issues/5165#issuecomment-65753753 # noqa
# We loop here to let docker notice that the process is dead.
# Docker will return NOT_MODIFIED (which isn't an error) in
# that case.
try:
Message.new(
message_type="flocker:docker:container_stop",
container=container_name
).write()
self._client.stop(container_name)
except APIError as e:
if e.response.status_code == NOT_FOUND:
# If the container doesn't exist, we swallow the error,
# since this method is supposed to be idempotent.
Message.new(
message_type="flocker:docker:container_not_found",
container=container_name
).write()
break
elif e.response.status_code == INTERNAL_SERVER_ERROR:
# Docker returns this if the process had died, but
# hasn't noticed it yet.
Message.new(
message_type="flocker:docker:container_stop_internal_error", # noqa
container=container_name
).write()
continue
else:
raise
else:
Message.new(
message_type="flocker:docker:container_stopped",
container=container_name
).write()
break
try:
# The ``docker.Client.stop`` method sometimes returns a
# 404 error, even though the container exists.
# See https://github.com/docker/docker/issues/13088
# Wait until the container has actually stopped running
# before attempting to remove it. Otherwise we are
# likely to see: 'docker.errors.APIError: 409 Client
# Error: Conflict ("Conflict, You cannot remove a
# running container. Stop the container before
# attempting removal or use -f")'
# This code should probably be removed once the above
# issue has been resolved. See [FLOC-1850]
while self._blocking_container_runs(container_name):
sleep(0.01)
Message.new(
message_type="flocker:docker:container_remove",
container=container_name
).write()
self._client.remove_container(container_name)
Message.new(
message_type="flocker:docker:container_removed",
container=container_name
).write()
except APIError as e:
# If the container doesn't exist, we swallow the error,
# since this method is supposed to be idempotent.
if e.response.status_code == NOT_FOUND:
Message.new(
message_type="flocker:docker:container_not_found",
container=container_name
).write()
return
raise
d = deferToThread(_remove)
return d
def list(self):
def _list():
result = set()
ids = [d[u"Id"] for d in
self._client.containers(quiet=True, all=True)]
for i in ids:
try:
data = self._client.inspect_container(i)
except APIError as e:
# The container ID returned by the list API call above, may
# have been removed in another thread.
if e.response.status_code == NOT_FOUND:
continue
else:
raise
state = (u"active" if data[u"State"][u"Running"]
else u"inactive")
name = data[u"Name"]
# Since tags (e.g. "busybox") aren't stable, ensure we're
# looking at the actual image by using the hash:
image = data[u"Image"]
image_tag = data[u"Config"][u"Image"]
command = data[u"Config"][u"Cmd"]
try:
image_data = self._client.inspect_image(image)
except APIError as e:
if e.response.status_code == NOT_FOUND:
# Image has been deleted, so just fill in some
# stub data so we can return *something*. This
# should happen only for stopped containers so
# some inaccuracy is acceptable.
Message.new(
message_type="flocker:docker:image_not_found",
container=i, running=data[u"State"][u"Running"]
).write()
image_data = {u"Config": {u"Env": [], u"Cmd": []}}
else:
raise
if image_data[u"Config"][u"Cmd"] == command:
command = None
port_bindings = data[u"NetworkSettings"][u"Ports"]
if port_bindings is not None:
ports = self._parse_container_ports(port_bindings)
else:
ports = list()
volumes = []
binds = data[u"HostConfig"]['Binds']
if binds is not None:
for bind_config in binds:
parts = bind_config.split(':', 2)
node_path, container_path = parts[:2]
volumes.append(
Volume(container_path=FilePath(container_path),
node_path=FilePath(node_path))
)
if name.startswith(u"/" + self.namespace):
name = name[1 + len(self.namespace):]
else:
continue
# Retrieve environment variables for this container,
# disregarding any environment variables that are part
# of the image, rather than supplied in the configuration.
unit_environment = []
container_environment = data[u"Config"][u"Env"]
if image_data[u"Config"]["Env"] is None:
image_environment = []
else:
image_environment = image_data[u"Config"]["Env"]
if container_environment is not None:
for environment in container_environment:
if environment not in image_environment:
env_key, env_value = environment.split('=', 1)
unit_environment.append((env_key, env_value))
unit_environment = (
Environment(variables=frozenset(unit_environment))
if unit_environment else None
)
# Our Unit model counts None as the value for cpu_shares and
# mem_limit in containers without specified limits, however
# Docker returns the values in these cases as zero, so we
# manually convert.
cpu_shares = data[u"Config"][u"CpuShares"]
cpu_shares = None if cpu_shares == 0 else cpu_shares
mem_limit = data[u"Config"][u"Memory"]
mem_limit = None if mem_limit == 0 else mem_limit
restart_policy = self._parse_restart_policy(
data[U"HostConfig"][u"RestartPolicy"])
result.add(Unit(
name=name,
container_name=self._to_container_name(name),
activation_state=state,
container_image=image_tag,
ports=frozenset(ports),
volumes=frozenset(volumes),
environment=unit_environment,
mem_limit=mem_limit,
cpu_shares=cpu_shares,
restart_policy=restart_policy,
command_line=command)
)
return result
return deferToThread(_list)
class NamespacedDockerClient(proxyForInterface(IDockerClient, "_client")):
"""
A Docker client that only shows and creates containers in a given
namespace.
Unlike ``DockerClient``, whose namespace is there to prevent conflicts
with other Docker users, this class deals with Flocker's internal
concept of namespaces. I.e. if hypothetically Docker container names
supported path-based namespaces then ``DockerClient`` would look at
containers in ``/flocker/`` and this class would look at containers in
in ``/flocker/<namespace>/``.
"""
def __init__(self, namespace, base_url=None):
"""
:param unicode namespace: Namespace to restrict containers to.
"""
self._client = DockerClient(
namespace=BASE_NAMESPACE + namespace + u"--")
|
|
import json
import os
import subprocess
import uuid
import passlib.hash
import pytest
import gen
import gen.build_deploy.aws
import release
from dcos_installer import backend
from dcos_installer.config import Config, make_default_config_if_needed, to_config
os.environ["BOOTSTRAP_ID"] = "12345"
@pytest.fixture(scope='module')
def config():
if not os.path.exists('dcos-release.config.yaml'):
pytest.skip("Skipping because there is no configuration in dcos-release.config.yaml")
return release.load_config('dcos-release.config.yaml')
@pytest.fixture(scope='module')
def config_testing(config):
if 'testing' not in config:
pytest.skip("Skipped because there is no `testing` configuration in dcos-release.config.yaml")
return config['testing']
@pytest.fixture(scope='module')
def config_aws(config_testing):
if 'aws' not in config_testing:
pytest.skip("Skipped because there is no `testing.aws` configuration in dcos-release.config.yaml")
return config_testing['aws']
def test_password_hash():
"""Tests that the password hashing method creates de-cryptable hash
"""
password = 'DcosTestingPassword!@#'
# only reads from STDOUT
hash_pw = subprocess.check_output(['dcos_installer', '--hash-password', password])
print(hash_pw)
hash_pw = hash_pw.decode('ascii').strip('\n')
assert passlib.hash.sha512_crypt.verify(password, hash_pw), 'Hash does not match password'
def test_set_superuser_password(tmpdir):
"""Test that --set-superuser-hash works"""
with tmpdir.as_cwd():
tmpdir.join('genconf').ensure(dir=True)
# TODO(cmaloney): Add tests for the behavior around a non-existent config.yaml
# Setting in a non-empty config.yaml which has no password set
make_default_config_if_needed('genconf/config.yaml')
assert 'superuser_password_hash' not in Config('genconf/config.yaml').config
# Set the password
create_fake_build_artifacts(tmpdir)
subprocess.check_call(['dcos_installer', '--set-superuser-password', 'foo'], cwd=str(tmpdir))
# Check that config.yaml has the password set
config = Config('genconf/config.yaml')
assert passlib.hash.sha512_crypt.verify('foo', config['superuser_password_hash'])
def test_generate_node_upgrade_script(tmpdir, monkeypatch):
upgrade_config = """
---
# The name of your DC/OS cluster. Visable in the DC/OS user interface.
cluster_name: 'DC/OS'
master_discovery: static
exhibitor_storage_backend: 'static'
resolvers:
- 8.8.8.8
- 8.8.4.4
ssh_port: 22
process_timeout: 10000
bootstrap_url: file:///opt/dcos_install_tmp
master_list: ['10.0.0.1', '10.0.0.2', '10.0.0.5']
"""
monkeypatch.setenv('BOOTSTRAP_VARIANT', '')
create_config(upgrade_config, tmpdir)
create_fake_build_artifacts(tmpdir)
output = subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script', 'fake'], cwd=str(tmpdir))
assert output.decode('utf-8').splitlines()[-1].split("Node upgrade script URL: ", 1)[1]\
.endswith("dcos_node_upgrade.sh")
try:
subprocess.check_output(['dcos_installer', '--generate-node-upgrade-script'], cwd=str(tmpdir))
except subprocess.CalledProcessError as e:
print(e.output)
assert e.output.decode('ascii') == "Must provide the version of the cluster upgrading from\n"
else:
raise Exception("Test passed, this should not pass without specifying a version number")
def test_version(monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'some-variant')
version_data = subprocess.check_output(['dcos_installer', '--version']).decode()
assert json.loads(version_data) == {
'version': '1.10.0-beta2',
'variant': 'some-variant'
}
def test_good_create_config_from_post(tmpdir):
"""
Test that it creates the config
"""
# Create a temp config
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
temp_ip_detect_path = workspace + '/ip-detect'
f = open(temp_ip_detect_path, "w")
f.write("#/bin/bash foo")
good_post_data = {
"agent_list": ["10.0.0.2"],
"master_list": ["10.0.0.1"],
"cluster_name": "Good Test",
"resolvers": ["4.4.4.4"],
"ip_detect_filename": temp_ip_detect_path
}
expected_good_messages = {}
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=good_post_data,
config_path=temp_config_path)
assert messages == expected_good_messages
def test_bad_create_config_from_post(tmpdir):
# Create a temp config
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
bad_post_data = {
"agent_list": "foo",
"master_list": ["foo"],
}
expected_bad_messages = {
"agent_list": "Must be a JSON formatted list, but couldn't be parsed the given value `foo` as "
"one because of: Expecting value: line 1 column 1 (char 0)",
"master_list": 'Invalid IPv4 addresses in list: foo',
}
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=bad_post_data,
config_path=temp_config_path)
assert messages == expected_bad_messages
def test_do_validate_config(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
# Create a temp config
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
temp_config_path = str(genconf_dir.join('config.yaml'))
# Initialize with defautls
make_default_config_if_needed(temp_config_path)
create_fake_build_artifacts(tmpdir)
expected_output = {
'ip_detect_contents': 'ip-detect script `genconf/ip-detect` must exist',
'ssh_user': 'Must set ssh_user, no way to calculate value.',
'master_list': 'Must set master_list, no way to calculate value.',
'ssh_key_path': 'could not find ssh private key: genconf/ssh_key'
}
with tmpdir.as_cwd():
assert Config(config_path='genconf/config.yaml').do_validate(include_ssh=True) == expected_output
def test_get_config(tmpdir):
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
expected_data = {
'cluster_name': 'DC/OS',
'master_discovery': 'static',
'exhibitor_storage_backend': 'static',
'resolvers': ['8.8.8.8', '8.8.4.4'],
'ssh_port': 22,
'process_timeout': 10000,
'bootstrap_url': 'file:///opt/dcos_install_tmp'
}
make_default_config_if_needed(temp_config_path)
config = Config(temp_config_path)
assert expected_data == config.config
def test_determine_config_type(tmpdir):
# Ensure the default created config is of simple type
workspace = tmpdir.strpath
temp_config_path = workspace + '/config.yaml'
make_default_config_if_needed(temp_config_path)
got_output = backend.determine_config_type(config_path=temp_config_path)
expected_output = {
'message': '',
'type': 'minimal',
}
assert got_output == expected_output
def test_success():
mock_config = to_config({
'master_list': ['10.0.0.1', '10.0.0.2', '10.0.0.5'],
'agent_list': ['10.0.0.3', '10.0.0.4']
})
expected_output = {
"success": "http://10.0.0.1",
"master_count": 3,
"agent_count": 2
}
expected_output_bad = {
"success": "",
"master_count": 0,
"agent_count": 0
}
got_output, code = backend.success(mock_config)
mock_config.update({'master_list': '', 'agent_list': ''})
bad_out, bad_code = backend.success(mock_config)
assert got_output == expected_output
assert code == 200
assert bad_out == expected_output_bad
assert bad_code == 400
def test_accept_overrides_for_undefined_config_params(tmpdir):
temp_config_path = tmpdir.strpath + '/config.yaml'
param = ('fake_test_param_name', 'fake_test_param_value')
make_default_config_if_needed(temp_config_path)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
messages = backend.create_config_from_post(
post_data=dict([param]),
config_path=temp_config_path)
assert not messages, "unexpected validation error: {}".format(messages)
assert Config(config_path=temp_config_path)[param[0]] == param[1]
simple_full_config = """---
cluster_name: DC/OS
master_discovery: static
exhibitor_storage_backend: static
master_list:
- 127.0.0.1
bootstrap_url: http://example.com
"""
def test_do_configure(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(simple_full_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_configure(config_path='genconf/config.yaml') == 0
aws_base_config = """---
# NOTE: Taking advantage of what isn't talked about not being validated so we don't need valid AWS /
# s3 credentials in this configuration.
aws_template_storage_bucket: psychic
aws_template_storage_bucket_path: mofo-the-gorilla
aws_template_storage_region_name: us-west-2
aws_template_upload: false
"""
def test_do_aws_configure(tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
create_config(aws_base_config, tmpdir)
create_fake_build_artifacts(tmpdir)
with tmpdir.as_cwd():
assert backend.do_aws_cf_configure() == 0
valid_storage_config = """---
master_list:
- 127.0.0.1
aws_template_storage_access_key_id: {key_id}
aws_template_storage_bucket: {bucket}
aws_template_storage_bucket_path: mofo-the-gorilla
aws_template_storage_secret_access_key: {access_key}
aws_template_upload: true
"""
def test_do_aws_cf_configure_valid_storage_config(config_aws, tmpdir, monkeypatch):
bucket = str(uuid.uuid4())
config_str = valid_storage_config.format(
key_id=config_aws["access_key_id"],
bucket=bucket,
access_key=config_aws["secret_access_key"])
assert aws_cf_configure(bucket, config_str, config_aws, tmpdir, monkeypatch) == 0
# TODO: add an assertion that the config that was resolved inside do_aws_cf_configure
# ended up with the correct region where the above testing bucket was created.
def test_override_aws_template_storage_region_name(config_aws, tmpdir, monkeypatch):
bucket = str(uuid.uuid4())
config_str = valid_storage_config.format(
key_id=config_aws["access_key_id"],
bucket=bucket,
access_key=config_aws["secret_access_key"])
config_str += '\naws_template_storage_region_name: {}'.format(config_aws['region_name'])
assert aws_cf_configure(bucket, config_str, config_aws, tmpdir, monkeypatch) == 0
def aws_cf_configure(s3_bucket_name, config, config_aws, tmpdir, monkeypatch):
monkeypatch.setenv('BOOTSTRAP_VARIANT', 'test_variant')
session = gen.build_deploy.aws.get_test_session(config_aws)
s3 = session.resource('s3')
s3_bucket = s3.Bucket(s3_bucket_name)
s3_bucket.create(CreateBucketConfiguration={'LocationConstraint': config_aws['region_name']})
create_config(config, tmpdir)
create_fake_build_artifacts(tmpdir)
try:
with tmpdir.as_cwd():
return backend.do_aws_cf_configure()
finally:
objects = [{'Key': o.key} for o in s3_bucket.objects.all()]
s3_bucket.delete_objects(Delete={'Objects': objects})
s3_bucket.delete()
def create_config(config_str, tmpdir):
genconf_dir = tmpdir.join('genconf')
genconf_dir.ensure(dir=True)
config_path = genconf_dir.join('config.yaml')
config_path.write(config_str)
genconf_dir.join('ip-detect').write('#!/bin/bash\necho 127.0.0.1')
def create_fake_build_artifacts(tmpdir):
artifact_dir = tmpdir.join('artifacts/bootstrap')
artifact_dir.ensure(dir=True)
artifact_dir.join('12345.bootstrap.tar.xz').write('contents_of_bootstrap', ensure=True)
artifact_dir.join('12345.active.json').write('["package--version"]', ensure=True)
artifact_dir.join('test_variant.bootstrap.latest').write("12345")
tmpdir.join('artifacts/complete/test_variant.complete.latest.json').write(
'{"bootstrap": "12345", "packages": ["package--version"]}',
ensure=True,
)
tmpdir.join('artifacts/complete/complete.latest.json').write(
'{"bootstrap": "12345", "packages": ["package--version"]}',
ensure=True,
)
tmpdir.join('artifacts/packages/package/package--version.tar.xz').write('contents_of_package', ensure=True)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VAE base class."""
from __future__ import absolute_import
import functools
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_probability as tfp
from eim.models import base
tfk = tf.keras
tfkl = tf.keras.layers
tfpl = tfp.layers
tfd = tfp.distributions
deconv = functools.partial(tf.keras.layers.Conv2DTranspose, padding="SAME")
conv = functools.partial(tf.keras.layers.Conv2D, padding="SAME")
class VAE(base.ProbabilisticModel):
"""Variational autoencoder with continuous latent space."""
def __init__(self,
latent_dim,
data_dim,
decoder,
q,
proposal=None,
data_mean=None,
kl_weight=1.,
dtype=tf.float32):
"""Creates a VAE.
Args:
latent_dim: The size of the latent variable of the VAE.
data_dim: The size of the input data.
decoder: A callable that accepts a batch of latent samples and returns a
distribution over the data space of the VAE. The distribution should
support sample() and log_prob().
q: A callable that accepts a batch of data samples and returns a
distribution over the latent space of the VAE. The distribution should
support sample() and log_prob().
proposal: A distribution over the latent space of the VAE. The object must
support sample() and log_prob(). If not provided, defaults to Gaussian.
data_mean: Mean of the data used to center the input.
kl_weight: Weighting on the KL regularizer.
dtype: Type of the tensors.
"""
self.data_dim = data_dim
if data_mean is not None:
self.data_mean = data_mean
else:
self.data_mean = tf.zeros((), dtype=dtype)
self.decoder = decoder
self.q = q
self.kl_weight = kl_weight
self.dtype = dtype
if proposal is None:
self.proposal = base.get_independent_normal([latent_dim])
else:
self.proposal = proposal
def _log_prob(self, data, num_samples=1):
"""Compute a lower bound on the log likelihood."""
mean_centered_data = data - self.data_mean
# Tile by num_samples on the batch dimension.
tiled_mean_centered_data = tf.tile(mean_centered_data,
[num_samples] + [1] * len(self.data_dim))
tiled_data = tf.tile(data,
[num_samples] + [1] * len(self.data_dim))
# Construct approximate posterior and sample z.
q_z = self.q(tiled_mean_centered_data)
z = q_z.sample() # [num_samples * batch_size, data_dim]
log_q_z = q_z.log_prob(z) # [num_samples * batch_size]
# compute the proposal prob of z, #[num_samples * batch_size]
try:
log_p_z = self.proposal.log_prob(z, log_q_data=log_q_z)
except TypeError:
log_p_z = self.proposal.log_prob(z)
# Compute the model logprob of the data
p_x_given_z = self.decoder(z)
# [num_samples * batch_size]
log_p_x_given_z = p_x_given_z.log_prob(tiled_data)
elbo = log_p_x_given_z + self.kl_weight * (log_p_z - log_q_z)
iwae = (tf.reduce_logsumexp(tf.reshape(elbo, [num_samples, -1]), axis=0)
- tf.log(tf.to_float(num_samples)))
return iwae
def sample(self, num_samples=1):
z = self.proposal.sample(num_samples)
p_x_given_z = self.decoder(z)
return tf.cast(p_x_given_z.sample(), self.dtype)
class GaussianVAE(VAE):
"""VAE with Gaussian generative distribution."""
def __init__(self,
latent_dim,
data_dim,
decoder_hidden_sizes,
q_hidden_sizes,
proposal=None,
data_mean=None,
decoder_nn_scale=True,
scale_min=1e-5,
dtype=tf.float32,
kl_weight=1.,
name="gaussian_vae"):
# Make the decoder with a Gaussian distribution
decoder_fn = functools.partial(
base.conditional_normal,
data_dim=data_dim,
hidden_sizes=decoder_hidden_sizes,
scale_min=scale_min,
nn_scale=decoder_nn_scale,
bias_init=data_mean,
truncate=False,
name="%s/decoder" % name)
q = functools.partial(
base.conditional_normal,
data_dim=[latent_dim],
hidden_sizes=q_hidden_sizes,
scale_min=scale_min,
name="%s/q" % name)
super(GaussianVAE, self).__init__(
latent_dim=latent_dim,
data_dim=data_dim,
decoder=decoder_fn,
q=q,
data_mean=data_mean,
proposal=proposal,
kl_weight=kl_weight,
dtype=dtype)
class BernoulliVAE(VAE):
"""VAE with Bernoulli generative distribution."""
def __init__(self,
latent_dim,
data_dim,
decoder_hidden_sizes,
q_hidden_sizes,
proposal=None,
data_mean=None,
scale_min=1e-5,
kl_weight=1.,
reparameterize_sample=False,
temperature=None,
dtype=tf.float32,
name="bernoulli_vae"):
# Make the decoder with a Gaussian distribution
decoder_fn = functools.partial(
base.conditional_bernoulli,
data_dim=data_dim,
hidden_sizes=decoder_hidden_sizes,
bias_init=data_mean,
dtype=dtype,
use_gst=reparameterize_sample,
temperature=temperature,
name="%s/decoder" % name)
q = functools.partial(
base.conditional_normal,
data_dim=[latent_dim],
hidden_sizes=q_hidden_sizes,
scale_min=scale_min,
name="%s/q" % name)
super(BernoulliVAE, self).__init__(
latent_dim=latent_dim,
data_dim=data_dim,
decoder=decoder_fn,
q=q,
data_mean=data_mean,
proposal=proposal,
kl_weight=kl_weight,
dtype=dtype)
class HVAE(object):
"""2 stochastic layer VAE."""
def __init__(self,
latent_dim,
data_dim,
proposal=None,
data_mean=None,
kl_weight=1.,
dtype=tf.float32):
"""Create HVAE."""
self.latent_dim = latent_dim
self.data_dim = data_dim
if data_mean is not None:
self.data_mean = data_mean
else:
self.data_mean = 0.
self.kl_weight = kl_weight
self.dtype = dtype
if proposal is None:
self.proposal = base.get_independent_normal([latent_dim])
else:
self.proposal = proposal
self._build()
def _build(self):
pass
def log_prob(self, data, num_samples=1):
"""Computes log probability lower bound."""
tiled_data = tf.tile(data[None],
[num_samples, 1] + [1] * len(self.data_dim))
tiled_data_flat = tf.reshape(tiled_data, [-1] + self.data_dim)
# Construct approximate posterior and sample z.
q_z2_given_x = self.q_z2_given_x(data)
z2 = q_z2_given_x.sample(sample_shape=[num_samples]) # [S, B, ...]
z2_flat = tf.reshape(z2, [-1, self.latent_dim]) # [S*B, ...]
q_z1_given_x_z2 = self.q_z1_given_x_z2(tiled_data_flat, z2_flat)
z1 = q_z1_given_x_z2.sample()
log_q = self.kl_weight * (
tf.reshape(q_z2_given_x.log_prob(z2), [-1]) +
q_z1_given_x_z2.log_prob(z1))
log_p = (
self.kl_weight *
(self.proposal.log_prob(z2_flat) + self.p_z1_z2(z2_flat).log_prob(z1)) +
self.p_x_given_z1_z2(z1, z2_flat).log_prob(tiled_data_flat))
elbo = tf.reduce_logsumexp(tf.reshape(log_p - log_q, [num_samples, -1]),
axis=0) - tf.log(tf.to_float(num_samples))
return elbo
def sample(self, num_samples=1):
z2 = self.proposal.sample(num_samples)
z1 = self.p_z1_z2(z2).sample()
p_x = self.p_x_given_z1_z2(z1, z2)
return tf.cast(p_x.sample(), self.dtype)
class ConvBernoulliVAE(HVAE):
"""VAE with Bernoulli generative distribution."""
def __init__(self,
latent_dim,
data_dim,
proposal=None,
data_mean=None,
scale_min=1e-5,
kl_weight=1.,
dtype=tf.float32):
"""Create ConvBernoulliVAE."""
self.scale_min = scale_min
super(ConvBernoulliVAE, self).__init__(latent_dim,
data_dim,
proposal,
data_mean,
kl_weight,
dtype)
def _get_observation_layer(self):
bias_init = -tf.log(1. /
tf.clip_by_value(self.data_mean, 0.0001, 0.9999) - 1)
return [tfkl.Lambda(lambda t: t + bias_init),
tfkl.Flatten(),
tfpl.IndependentBernoulli(self.data_dim)]
def _build(self):
"""Creates the distributions for the VAE."""
def normal_layer_fn(t):
mu, raw_scale = tf.split(t, 2, axis=-1)
return tfd.Independent(
tfd.Normal(
loc=mu,
scale=tf.math.maximum(self.scale_min,
tf.math.softplus(raw_scale))))
# q(z2|x)
self.q_z2_given_x = tf.keras.Sequential([
tfkl.Lambda(lambda t: t - self.data_mean),
conv(32, 4, 2, activation="relu"),
conv(32, 4, 2, activation="relu"),
conv(32, 4, 2, activation="relu"),
tfkl.Flatten(),
tfkl.Dense(self.latent_dim * 2, activation=None),
tfpl.DistributionLambda(normal_layer_fn),
])
# q(z1|x,z2)
q_z1_x_fn = tf.keras.Sequential([
tfkl.Lambda(lambda t: t - self.data_mean),
conv(32, 4, 2, activation="relu"),
conv(32, 4, 2, activation="relu"),
conv(32, 4, 2, activation="relu"),
tfkl.Flatten()
])
q_z1_z2_fn = tfkl.Dense(512, activation="tanh")
q_z1_fn = tf.keras.Sequential([
tfkl.Dense(300, activation="tanh"),
tfkl.Dense(self.latent_dim * 2, activation=None),
tfpl.DistributionLambda(normal_layer_fn),
])
def q_z1_given_x_z2(x, z2):
x_out = q_z1_x_fn(x)
z2_out = q_z1_z2_fn(z2)
concat = tfkl.concatenate([x_out, z2_out])
return q_z1_fn(concat)
self.q_z1_given_x_z2 = q_z1_given_x_z2
# p(z_1|z_2)
self.p_z1_z2 = tf.keras.Sequential([
tfkl.Dense(300, activation="tanh"),
tfkl.Dense(300, activation="tanh"),
tfkl.Dense(self.latent_dim * 2, activation=None),
tfpl.DistributionLambda(normal_layer_fn),
])
# p(x|z1,z2)
p_x_z1 = tfkl.Dense(300, activation="tanh")
p_x_z2 = tfkl.Dense(300, activation="tanh")
p_x_z1_z2_fn = tf.keras.Sequential([
tfkl.Dense(512, activation="tanh"),
tfkl.Reshape((4, 4, 32)),
deconv(32, 4, 2, activation="relu"),
# Remove the extra row/column [7 x 7 x 32] after
tfkl.Lambda(lambda t: t[:, :-1, :-1, :]),
deconv(32, 4, 2, activation="relu"),
# In the LARS paper they say that RELU follows all conv layers, but I
# left it off here.
deconv(1, 4, 2, activation=None),] +
self._get_observation_layer())
def p_x_given_z1_z2(z1, z2):
z1_out = p_x_z1(z1)
z2_out = p_x_z2(z2)
concat = tfkl.concatenate([z1_out, z2_out])
return p_x_z1_z2_fn(concat)
# Note that the output will be [batch_size, 28, 28, 1]
# (trailing 1 dimensions)
self.p_x_given_z1_z2 = p_x_given_z1_z2
class ConvGaussianVAE(ConvBernoulliVAE):
"""VAE with Gaussian generative distribution."""
def __init__(self,
latent_dim,
data_dim,
proposal=None,
data_mean=None,
scale_min=1e-5,
scale_init=1.,
kl_weight=1.,
dtype=tf.float32,
name="ConvGaussianVAE"):
"""Create ConvGaussianVAE."""
self.scale_init = scale_init
self.name = name
super(ConvGaussianVAE, self).__init__(latent_dim,
data_dim,
proposal,
data_mean,
scale_min,
kl_weight,
dtype)
def _get_observation_layer(self):
return [tfpl.DistributionLambda(lambda t: tfd.Independent( # pylint: disable=g-long-lambda
tfd.Normal(loc=t, scale=self.decoder_scale[None])))]
def _build(self):
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
raw_scale_init = np.log(np.exp(self.scale_init) - 1 + self.scale_min)
raw_scale = tf.get_variable(
name="raw_sigma",
shape=self.data_dim,
dtype=tf.float32,
initializer=tf.constant_initializer(raw_scale_init),
trainable=True)
self.decoder_scale = tf.math.maximum(self.scale_min,
tf.math.softplus(raw_scale))
super(ConvGaussianVAE, self)._build()
|
|
"""Perform GATK based filtering, perferring variant quality score recalibration.
Performs cutoff-based soft filtering when VQSR fails on smaller sets of variant calls.
"""
import os
from bcbio import broad, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.pipeline import config_utils
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.variation import vcfutils, vfilter
def run(call_file, ref_file, vrn_files, data):
"""Run filtering on the input call file, handling SNPs and indels separately.
"""
algs = [data["config"]["algorithm"]] * len(data.get("vrn_files", [1]))
if config_utils.use_vqsr(algs):
if vcfutils.is_gvcf_file(call_file):
raise ValueError("Cannot force gVCF output with joint calling using tools_on: [gvcf] and use VQSR. "
"Try using cutoff-based soft filtering with tools_off: [vqsr]")
snp_file, indel_file = vcfutils.split_snps_indels(call_file, ref_file, data["config"])
snp_filter_file = _variant_filtration(snp_file, ref_file, vrn_files, data, "SNP",
vfilter.gatk_snp_cutoff)
indel_filter_file = _variant_filtration(indel_file, ref_file, vrn_files, data, "INDEL",
vfilter.gatk_indel_cutoff)
orig_files = [snp_filter_file, indel_filter_file]
out_file = "%scombined.vcf.gz" % os.path.commonprefix(orig_files)
combined_file = vcfutils.combine_variant_files(orig_files, out_file, ref_file, data["config"])
return combined_file
else:
snp_filter = vfilter.gatk_snp_cutoff(call_file, data)
indel_filter = vfilter.gatk_indel_cutoff(snp_filter, data)
return indel_filter
def _apply_vqsr(in_file, ref_file, recal_file, tranch_file,
sensitivity_cutoff, filter_type, data):
"""Apply VQSR based on the specified tranche, returning a filtered VCF file.
"""
base, ext = utils.splitext_plus(in_file)
out_file = "{base}-{filter}filter{ext}".format(base=base, ext=ext,
filter=filter_type)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
broad_runner = broad.runner_from_config(data["config"])
gatk_type = broad_runner.gatk_type()
if gatk_type == "gatk4":
params = ["-T", "ApplyVQSR",
"--variant", in_file,
"--output", tx_out_file]
else:
params = ["-T", "ApplyRecalibration",
"--input", in_file,
"--out", tx_out_file]
params += ["-R", ref_file,
"--recal_file", recal_file,
"--tranches_file", tranch_file,
"--mode", filter_type]
resources = config_utils.get_resources("gatk_apply_recalibration", data["config"])
opts = resources.get("options", [])
if not opts:
opts += ["--ts_filter_level", sensitivity_cutoff]
params += opts
broad_runner.run_gatk(params)
return out_file
def _get_training_data(vrn_files):
"""Retrieve training data, returning an empty set of information if not available.
"""
out = {"SNP": [], "INDEL": []}
# SNPs
for name, train_info in [("train_hapmap", "known=false,training=true,truth=true,prior=15.0"),
("train_omni", "known=false,training=true,truth=true,prior=12.0"),
("train_1000g", "known=false,training=true,truth=false,prior=10.0"),
("dbsnp", "known=true,training=false,truth=false,prior=2.0")]:
if name not in vrn_files:
return {}
else:
out["SNP"].append((name.replace("train_", ""), train_info, vrn_files[name]))
# Indels
if "train_indels" in vrn_files:
out["INDEL"].append(("mills", "known=true,training=true,truth=true,prior=12.0",
vrn_files["train_indels"]))
else:
return {}
return out
def _have_training_data(vrn_files):
return len(_get_training_data(vrn_files)) > 0
def _get_vqsr_training(filter_type, vrn_files, gatk_type):
"""Return parameters for VQSR training, handling SNPs and Indels.
"""
params = []
for name, train_info, fname in _get_training_data(vrn_files)[filter_type]:
if gatk_type == "gatk4":
params.extend(["--resource", "%s,%s:%s" % (name, train_info, fname)])
else:
params.extend(["-resource:%s,VCF,%s" % (name, train_info), fname])
if filter_type == "INDEL":
params.extend(["--maxGaussians", "4"])
return params
def _get_vqsr_annotations(filter_type, data):
"""Retrieve appropriate annotations to use for VQSR based on filter type.
Issues reported with MQ and bwa-mem quality distribution, results in intermittent
failures to use VQSR:
http://gatkforums.broadinstitute.org/discussion/4425/variant-recalibration-failing
http://gatkforums.broadinstitute.org/discussion/4248/variantrecalibrator-removing-all-snps-from-the-training-set
"""
if filter_type == "SNP":
# MQ, MQRankSum
anns = ["QD", "FS", "ReadPosRankSum", "SOR"]
else:
assert filter_type == "INDEL"
# MQRankSum
anns = ["QD", "FS", "ReadPosRankSum", "SOR"]
if dd.get_coverage_interval(data) == "genome":
anns += ["DP"]
return anns
def _run_vqsr(in_file, ref_file, vrn_files, sensitivity_cutoff, filter_type, data):
"""Run variant quality score recalibration.
"""
cutoffs = ["100.0", "99.99", "99.98", "99.97", "99.96", "99.95", "99.94", "99.93", "99.92", "99.91",
"99.9", "99.8", "99.7", "99.6", "99.5", "99.0", "98.0", "90.0"]
if sensitivity_cutoff not in cutoffs:
cutoffs.append(sensitivity_cutoff)
cutoffs.sort()
broad_runner = broad.runner_from_config(data["config"])
gatk_type = broad_runner.gatk_type()
base = utils.splitext_plus(in_file)[0]
recal_file = ("%s-vqsrrecal.vcf.gz" % base) if gatk_type == "gatk4" else ("%s.recal" % base)
tranches_file = "%s.tranches" % base
plot_file = "%s-plots.R" % base
if not utils.file_exists(recal_file):
with file_transaction(data, recal_file, tranches_file, plot_file) as (tx_recal, tx_tranches, tx_plot_file):
params = ["-T", "VariantRecalibrator",
"-R", ref_file,
"--mode", filter_type,
"--tranches_file", tx_tranches,
"--rscript_file", tx_plot_file]
if gatk_type == "gatk4":
params += ["--variant", in_file, "--output", tx_recal]
else:
params += ["--input", in_file, "--recal_file", tx_recal]
params += _get_vqsr_training(filter_type, vrn_files, gatk_type)
resources = config_utils.get_resources("gatk_variant_recalibrator", data["config"])
opts = resources.get("options", [])
if not opts:
for cutoff in cutoffs:
opts += ["-tranche", str(cutoff)]
for a in _get_vqsr_annotations(filter_type, data):
opts += ["-an", a]
params += opts
cores = dd.get_cores(data)
memscale = {"magnitude": 0.9 * cores, "direction": "increase"} if cores > 1 else None
try:
broad_runner.new_resources("gatk-vqsr")
broad_runner.run_gatk(params, log_error=False, memscale=memscale, parallel_gc=True)
except: # Can fail to run if not enough values are present to train.
return None, None
if gatk_type == "gatk4":
vcfutils.bgzip_and_index(recal_file, data["config"])
return recal_file, tranches_file
# ## SNP and indel specific variant filtration
def _already_cutoff_filtered(in_file, filter_type):
"""Check if we have a pre-existing cutoff-based filter file from previous VQSR failure.
"""
filter_file = "%s-filter%s.vcf.gz" % (utils.splitext_plus(in_file)[0], filter_type)
return utils.file_exists(filter_file)
def _variant_filtration(in_file, ref_file, vrn_files, data, filter_type,
hard_filter_fn):
"""Filter SNP and indel variant calls using GATK best practice recommendations.
Use cutoff-based soft filters if configuration indicates too little data or
already finished a cutoff-based filtering step, otherwise try VQSR.
"""
# Algorithms multiplied by number of input files to check for large enough sample sizes
algs = [data["config"]["algorithm"]] * len(data.get("vrn_files", [1]))
if (not config_utils.use_vqsr(algs) or
_already_cutoff_filtered(in_file, filter_type)):
logger.info("Skipping VQSR, using cutoff-based filers: we don't have whole genome input data")
return hard_filter_fn(in_file, data)
elif not _have_training_data(vrn_files):
logger.info("Skipping VQSR, using cutoff-based filers: genome build does not have sufficient training data")
return hard_filter_fn(in_file, data)
else:
sensitivities = {"INDEL": "98.0", "SNP": "99.97"}
recal_file, tranches_file = _run_vqsr(in_file, ref_file, vrn_files,
sensitivities[filter_type], filter_type, data)
if recal_file is None: # VQSR failed
logger.info("VQSR failed due to lack of training data. Using cutoff-based soft filtering.")
return hard_filter_fn(in_file, data)
else:
return _apply_vqsr(in_file, ref_file, recal_file, tranches_file,
sensitivities[filter_type], filter_type, data)
|
|
#!/usr/bin/env python3
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Usage: gn_to_cmake.py <json_file_name>
gn gen out/config --ide=json --json-ide-script=../../gn/gn_to_cmake.py
or
gn gen out/config --ide=json
python gn/gn_to_cmake.py out/config/project.json
The first is recommended, as it will auto-update.
"""
from __future__ import print_function
import functools
import json
import posixpath
import string
import sys
def CMakeStringEscape(a):
"""Escapes the string 'a' for use inside a CMake string.
This means escaping
'\' otherwise it may be seen as modifying the next character
'"' otherwise it will end the string
';' otherwise the string becomes a list
The following do not need to be escaped
'#' when the lexer is in string state, this does not start a comment
"""
return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"')
def CMakeTargetEscape(a):
"""Escapes the string 'a' for use as a CMake target name.
CMP0037 in CMake 3.0 restricts target names to "^[A-Za-z0-9_.:+-]+$"
The ':' is only allowed for imported targets.
"""
def Escape(c):
if c in string.ascii_letters or c in string.digits or c in '_.+-':
return c
else:
return '__'
return ''.join([Escape(c) for c in a])
def SetVariable(out, variable_name, value):
"""Sets a CMake variable."""
out.write('set("')
out.write(CMakeStringEscape(variable_name))
out.write('" "')
out.write(CMakeStringEscape(value))
out.write('")\n')
def SetVariableList(out, variable_name, values):
"""Sets a CMake variable to a list."""
if not values:
return SetVariable(out, variable_name, "")
if len(values) == 1:
return SetVariable(out, variable_name, values[0])
out.write('list(APPEND "')
out.write(CMakeStringEscape(variable_name))
out.write('"\n "')
out.write('"\n "'.join([CMakeStringEscape(value) for value in values]))
out.write('")\n')
def SetFilesProperty(output, variable, property_name, values, sep):
"""Given a set of source files, sets the given property on them."""
output.write('set_source_files_properties(')
WriteVariable(output, variable)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetCurrentTargetProperty(out, property_name, values, sep=''):
"""Given a target, sets the given property."""
out.write('set_target_properties("${target}" PROPERTIES ')
out.write(property_name)
out.write(' "')
for value in values:
out.write(CMakeStringEscape(value))
out.write(sep)
out.write('")\n')
def WriteVariable(output, variable_name, prepend=None):
if prepend:
output.write(prepend)
output.write('${')
output.write(variable_name)
output.write('}')
# See GetSourceFileType in gn
source_file_types = {
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.c': 'c',
'.s': 'asm',
'.S': 'asm',
'.asm': 'asm',
'.o': 'obj',
'.obj': 'obj',
}
class CMakeTargetType(object):
def __init__(self, command, modifier, property_modifier, is_linkable):
self.command = command
self.modifier = modifier
self.property_modifier = property_modifier
self.is_linkable = is_linkable
CMakeTargetType.custom = CMakeTargetType('add_custom_target', 'SOURCES',
None, False)
# See GetStringForOutputType in gn
cmake_target_types = {
'unknown': CMakeTargetType.custom,
'group': CMakeTargetType.custom,
'executable': CMakeTargetType('add_executable', None, 'RUNTIME', True),
'loadable_module': CMakeTargetType('add_library', 'MODULE', 'LIBRARY', True),
'shared_library': CMakeTargetType('add_library', 'SHARED', 'LIBRARY', True),
'static_library': CMakeTargetType('add_library', 'STATIC', 'ARCHIVE', False),
'source_set': CMakeTargetType('add_library', 'OBJECT', None, False),
'copy': CMakeTargetType.custom,
'action': CMakeTargetType.custom,
'action_foreach': CMakeTargetType.custom,
'bundle_data': CMakeTargetType.custom,
'create_bundle': CMakeTargetType.custom,
}
def FindFirstOf(s, a):
return min(s.find(i) for i in a if i in s)
def GetCMakeTargetName(gn_target_name):
# See <chromium>/src/tools/gn/label.cc#Resolve
# //base/test:test_support(//build/toolchain/win:msvc)
path_separator = FindFirstOf(gn_target_name, (':', '('))
location = None
name = None
toolchain = None
if not path_separator:
location = gn_target_name[2:]
else:
location = gn_target_name[2:path_separator]
toolchain_separator = gn_target_name.find('(', path_separator)
if toolchain_separator == -1:
name = gn_target_name[path_separator + 1:]
else:
if toolchain_separator > path_separator:
name = gn_target_name[path_separator + 1:toolchain_separator]
assert gn_target_name.endswith(')')
toolchain = gn_target_name[toolchain_separator + 1:-1]
assert location or name
cmake_target_name = None
if location.endswith('/' + name):
cmake_target_name = location
elif location:
cmake_target_name = location + '_' + name
else:
cmake_target_name = name
if toolchain:
cmake_target_name += '--' + toolchain
return CMakeTargetEscape(cmake_target_name)
class Project(object):
def __init__(self, project_json):
self.targets = project_json['targets']
build_settings = project_json['build_settings']
self.root_path = build_settings['root_path']
self.build_path = posixpath.join(self.root_path,
build_settings['build_dir'][2:])
self.object_source_deps = {}
def GetAbsolutePath(self, path):
if path.startswith("//"):
return self.root_path + "/" + path[2:]
else:
return path
def GetObjectSourceDependencies(self, gn_target_name, object_dependencies):
"""All OBJECT libraries whose sources have not been absorbed."""
if gn_target_name in self.object_source_deps:
object_dependencies.update(self.object_source_deps[gn_target_name])
return
target_deps = set()
dependencies = self.targets[gn_target_name].get('deps', [])
for dependency in dependencies:
dependency_type = self.targets[dependency].get('type', None)
if dependency_type == 'source_set':
target_deps.add(dependency)
if dependency_type not in gn_target_types_that_absorb_objects:
self.GetObjectSourceDependencies(dependency, target_deps)
self.object_source_deps[gn_target_name] = target_deps
object_dependencies.update(target_deps)
def GetObjectLibraryDependencies(self, gn_target_name, object_dependencies):
"""All OBJECT libraries whose libraries have not been absorbed."""
dependencies = self.targets[gn_target_name].get('deps', [])
for dependency in dependencies:
dependency_type = self.targets[dependency].get('type', None)
if dependency_type == 'source_set':
object_dependencies.add(dependency)
self.GetObjectLibraryDependencies(dependency, object_dependencies)
class Target(object):
def __init__(self, gn_target_name, project):
self.gn_name = gn_target_name
self.properties = project.targets[self.gn_name]
self.cmake_name = GetCMakeTargetName(self.gn_name)
self.gn_type = self.properties.get('type', None)
self.cmake_type = cmake_target_types.get(self.gn_type, None)
def WriteAction(out, target, project, sources, synthetic_dependencies):
outputs = []
output_directories = set()
for output in target.properties.get('outputs', []):
output_abs_path = project.GetAbsolutePath(output)
outputs.append(output_abs_path)
output_directory = posixpath.dirname(output_abs_path)
if output_directory:
output_directories.add(output_directory)
outputs_name = '${target}__output'
SetVariableList(out, outputs_name, outputs)
out.write('add_custom_command(OUTPUT ')
WriteVariable(out, outputs_name)
out.write('\n')
if output_directories:
out.write(' COMMAND ${CMAKE_COMMAND} -E make_directory "')
out.write('" "'.join([CMakeStringEscape(d) for d in output_directories]))
out.write('"\n')
script = target.properties['script']
arguments = target.properties['args']
out.write(' COMMAND python "')
out.write(CMakeStringEscape(project.GetAbsolutePath(script)))
out.write('"')
if arguments:
out.write('\n "')
out.write('"\n "'.join([CMakeStringEscape(a) for a in arguments]))
out.write('"')
out.write('\n')
out.write(' DEPENDS ')
for sources_type_name in sources.values():
WriteVariable(out, sources_type_name, ' ')
out.write('\n')
#TODO: CMake 3.7 is introducing DEPFILE
out.write(' WORKING_DIRECTORY "')
out.write(CMakeStringEscape(project.build_path))
out.write('"\n')
out.write(' COMMENT "Action: ${target}"\n')
out.write(' VERBATIM)\n')
synthetic_dependencies.add(outputs_name)
def ExpandPlaceholders(source, a):
source_dir, source_file_part = posixpath.split(source)
source_name_part, _ = posixpath.splitext(source_file_part)
#TODO: {{source_gen_dir}}, {{source_out_dir}}, {{response_file_name}}
return a.replace('{{source}}', source) \
.replace('{{source_file_part}}', source_file_part) \
.replace('{{source_name_part}}', source_name_part) \
.replace('{{source_dir}}', source_dir) \
.replace('{{source_root_relative_dir}}', source_dir)
def WriteActionForEach(out, target, project, sources, synthetic_dependencies):
all_outputs = target.properties.get('outputs', [])
inputs = target.properties.get('sources', [])
# TODO: consider expanding 'output_patterns' instead.
outputs_per_input = len(all_outputs) / len(inputs)
for count, source in enumerate(inputs):
source_abs_path = project.GetAbsolutePath(source)
outputs = []
output_directories = set()
for output in all_outputs[outputs_per_input * count:
outputs_per_input * (count+1)]:
output_abs_path = project.GetAbsolutePath(output)
outputs.append(output_abs_path)
output_directory = posixpath.dirname(output_abs_path)
if output_directory:
output_directories.add(output_directory)
outputs_name = '${target}__output_' + str(count)
SetVariableList(out, outputs_name, outputs)
out.write('add_custom_command(OUTPUT ')
WriteVariable(out, outputs_name)
out.write('\n')
if output_directories:
out.write(' COMMAND ${CMAKE_COMMAND} -E make_directory "')
out.write('" "'.join([CMakeStringEscape(d) for d in output_directories]))
out.write('"\n')
script = target.properties['script']
# TODO: need to expand {{xxx}} in arguments
arguments = target.properties['args']
out.write(' COMMAND python "')
out.write(CMakeStringEscape(project.GetAbsolutePath(script)))
out.write('"')
if arguments:
out.write('\n "')
expand = functools.partial(ExpandPlaceholders, source_abs_path)
out.write('"\n "'.join(
[CMakeStringEscape(expand(a)) for a in arguments]))
out.write('"')
out.write('\n')
out.write(' DEPENDS')
if 'input' in sources:
WriteVariable(out, sources['input'], ' ')
out.write(' "')
out.write(CMakeStringEscape(source_abs_path))
out.write('"\n')
#TODO: CMake 3.7 is introducing DEPFILE
out.write(' WORKING_DIRECTORY "')
out.write(CMakeStringEscape(project.build_path))
out.write('"\n')
out.write(' COMMENT "Action ${target} on ')
out.write(CMakeStringEscape(source_abs_path))
out.write('"\n')
out.write(' VERBATIM)\n')
synthetic_dependencies.add(outputs_name)
def WriteCopy(out, target, project, sources, synthetic_dependencies):
inputs = target.properties.get('sources', [])
raw_outputs = target.properties.get('outputs', [])
# TODO: consider expanding 'output_patterns' instead.
outputs = []
for output in raw_outputs:
output_abs_path = project.GetAbsolutePath(output)
outputs.append(output_abs_path)
outputs_name = '${target}__output'
SetVariableList(out, outputs_name, outputs)
out.write('add_custom_command(OUTPUT ')
WriteVariable(out, outputs_name)
out.write('\n')
for src, dst in zip(inputs, outputs):
out.write(' COMMAND ${CMAKE_COMMAND} -E copy "')
out.write(CMakeStringEscape(project.GetAbsolutePath(src)))
out.write('" "')
out.write(CMakeStringEscape(dst))
out.write('"\n')
out.write(' DEPENDS ')
for sources_type_name in sources.values():
WriteVariable(out, sources_type_name, ' ')
out.write('\n')
out.write(' WORKING_DIRECTORY "')
out.write(CMakeStringEscape(project.build_path))
out.write('"\n')
out.write(' COMMENT "Copy ${target}"\n')
out.write(' VERBATIM)\n')
synthetic_dependencies.add(outputs_name)
def WriteCompilerFlags(out, target, project, sources):
# Hack, set linker language to c if no c or cxx files present.
if not 'c' in sources and not 'cxx' in sources:
SetCurrentTargetProperty(out, 'LINKER_LANGUAGE', ['C'])
# Mark uncompiled sources as uncompiled.
if 'input' in sources:
SetFilesProperty(out, sources['input'], 'HEADER_FILE_ONLY', ('True',), '')
if 'other' in sources:
SetFilesProperty(out, sources['other'], 'HEADER_FILE_ONLY', ('True',), '')
# Mark object sources as linkable.
if 'obj' in sources:
SetFilesProperty(out, sources['obj'], 'EXTERNAL_OBJECT', ('True',), '')
# TODO: 'output_name', 'output_dir', 'output_extension'
# This includes using 'source_outputs' to direct compiler output.
# Includes
includes = target.properties.get('include_dirs', [])
if includes:
out.write('set_property(TARGET "${target}" ')
out.write('APPEND PROPERTY INCLUDE_DIRECTORIES')
for include_dir in includes:
out.write('\n "')
out.write(project.GetAbsolutePath(include_dir))
out.write('"')
out.write(')\n')
# Defines
defines = target.properties.get('defines', [])
if defines:
SetCurrentTargetProperty(out, 'COMPILE_DEFINITIONS', defines, ';')
# Compile flags
# "arflags", "asmflags", "cflags",
# "cflags_c", "clfags_cc", "cflags_objc", "clfags_objcc"
# CMake does not have per target lang compile flags.
# TODO: $<$<COMPILE_LANGUAGE:CXX>:cflags_cc style generator expression.
# http://public.kitware.com/Bug/view.php?id=14857
flags = []
flags.extend(target.properties.get('cflags', []))
cflags_asm = target.properties.get('asmflags', [])
cflags_c = target.properties.get('cflags_c', [])
cflags_cxx = target.properties.get('cflags_cc', [])
if 'c' in sources and not any(k in sources for k in ('asm', 'cxx')):
flags.extend(cflags_c)
elif 'cxx' in sources and not any(k in sources for k in ('asm', 'c')):
flags.extend(cflags_cxx)
else:
# TODO: This is broken, one cannot generally set properties on files,
# as other targets may require different properties on the same files.
if 'asm' in sources and cflags_asm:
SetFilesProperty(out, sources['asm'], 'COMPILE_FLAGS', cflags_asm, ' ')
if 'c' in sources and cflags_c:
SetFilesProperty(out, sources['c'], 'COMPILE_FLAGS', cflags_c, ' ')
if 'cxx' in sources and cflags_cxx:
SetFilesProperty(out, sources['cxx'], 'COMPILE_FLAGS', cflags_cxx, ' ')
if flags:
SetCurrentTargetProperty(out, 'COMPILE_FLAGS', flags, ' ')
# Linker flags
ldflags = target.properties.get('ldflags', [])
if ldflags:
SetCurrentTargetProperty(out, 'LINK_FLAGS', ldflags, ' ')
gn_target_types_that_absorb_objects = (
'executable',
'loadable_module',
'shared_library',
'static_library'
)
def WriteSourceVariables(out, target, project):
# gn separates the sheep from the goats based on file extensions.
# A full separation is done here because of flag handing (see Compile flags).
source_types = {'cxx':[], 'c':[], 'asm':[],
'obj':[], 'obj_target':[], 'input':[], 'other':[]}
# TODO .def files on Windows
for source in target.properties.get('sources', []):
_, ext = posixpath.splitext(source)
source_abs_path = project.GetAbsolutePath(source)
source_types[source_file_types.get(ext, 'other')].append(source_abs_path)
for input_path in target.properties.get('inputs', []):
input_abs_path = project.GetAbsolutePath(input_path)
source_types['input'].append(input_abs_path)
# OBJECT library dependencies need to be listed as sources.
# Only executables and non-OBJECT libraries may reference an OBJECT library.
# https://gitlab.kitware.com/cmake/cmake/issues/14778
if target.gn_type in gn_target_types_that_absorb_objects:
object_dependencies = set()
project.GetObjectSourceDependencies(target.gn_name, object_dependencies)
for dependency in object_dependencies:
cmake_dependency_name = GetCMakeTargetName(dependency)
obj_target_sources = '$<TARGET_OBJECTS:' + cmake_dependency_name + '>'
source_types['obj_target'].append(obj_target_sources)
sources = {}
for source_type, sources_of_type in source_types.items():
if sources_of_type:
sources[source_type] = '${target}__' + source_type + '_srcs'
SetVariableList(out, sources[source_type], sources_of_type)
return sources
def WriteTarget(out, target, project):
out.write('\n#')
out.write(target.gn_name)
out.write('\n')
if target.cmake_type is None:
print('Target {} has unknown target type {}, skipping.'.format(
target.gn_name, target.gn_type))
return
SetVariable(out, 'target', target.cmake_name)
sources = WriteSourceVariables(out, target, project)
synthetic_dependencies = set()
if target.gn_type == 'action':
WriteAction(out, target, project, sources, synthetic_dependencies)
if target.gn_type == 'action_foreach':
WriteActionForEach(out, target, project, sources, synthetic_dependencies)
if target.gn_type == 'copy':
WriteCopy(out, target, project, sources, synthetic_dependencies)
out.write(target.cmake_type.command)
out.write('("${target}"')
if target.cmake_type.modifier is not None:
out.write(' ')
out.write(target.cmake_type.modifier)
for sources_type_name in sources.values():
WriteVariable(out, sources_type_name, ' ')
if synthetic_dependencies:
out.write(' DEPENDS')
for synthetic_dependencie in synthetic_dependencies:
WriteVariable(out, synthetic_dependencie, ' ')
out.write(')\n')
if target.cmake_type.command != 'add_custom_target':
WriteCompilerFlags(out, target, project, sources)
libraries = set()
nonlibraries = set()
dependencies = set(target.properties.get('deps', []))
# Transitive OBJECT libraries are in sources.
# Those sources are dependent on the OBJECT library dependencies.
# Those sources cannot bring in library dependencies.
object_dependencies = set()
if target.gn_type != 'source_set':
project.GetObjectLibraryDependencies(target.gn_name, object_dependencies)
for object_dependency in object_dependencies:
dependencies.update(project.targets.get(object_dependency).get('deps', []))
for dependency in dependencies:
gn_dependency_type = project.targets.get(dependency, {}).get('type', None)
cmake_dependency_type = cmake_target_types.get(gn_dependency_type, None)
cmake_dependency_name = GetCMakeTargetName(dependency)
if cmake_dependency_type.command != 'add_library':
nonlibraries.add(cmake_dependency_name)
elif cmake_dependency_type.modifier != 'OBJECT':
if target.cmake_type.is_linkable:
libraries.add(cmake_dependency_name)
else:
nonlibraries.add(cmake_dependency_name)
# Non-library dependencies.
if nonlibraries:
out.write('add_dependencies("${target}"')
for nonlibrary in nonlibraries:
out.write('\n "')
out.write(nonlibrary)
out.write('"')
out.write(')\n')
# Non-OBJECT library dependencies.
external_libraries = target.properties.get('libs', [])
if target.cmake_type.is_linkable and (external_libraries or libraries):
library_dirs = target.properties.get('lib_dirs', [])
if library_dirs:
SetVariableList(out, '${target}__library_directories', library_dirs)
system_libraries = []
for external_library in external_libraries:
if '/' in external_library:
libraries.add(project.GetAbsolutePath(external_library))
else:
if external_library.endswith('.framework'):
external_library = external_library[:-len('.framework')]
system_library = 'library__' + external_library
if library_dirs:
system_library = system_library + '__for_${target}'
out.write('find_library("')
out.write(CMakeStringEscape(system_library))
out.write('" "')
out.write(CMakeStringEscape(external_library))
out.write('"')
if library_dirs:
out.write(' PATHS "')
WriteVariable(out, '${target}__library_directories')
out.write('"')
out.write(')\n')
system_libraries.append(system_library)
out.write('target_link_libraries("${target}"')
for library in libraries:
out.write('\n "')
out.write(CMakeStringEscape(library))
out.write('"')
for system_library in system_libraries:
WriteVariable(out, system_library, '\n "')
out.write('"')
out.write(')\n')
def WriteProject(project):
out = open(posixpath.join(project.build_path, 'CMakeLists.txt'), 'w+')
out.write('# Generated by gn_to_cmake.py.\n')
out.write('cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)\n')
out.write('cmake_policy(VERSION 2.8.8)\n\n')
# Update the gn generated ninja build.
# If a build file has changed, this will update CMakeLists.ext if
# gn gen out/config --ide=json --json-ide-script=../../gn/gn_to_cmake.py
# style was used to create this config.
out.write('execute_process(COMMAND ninja -C "')
out.write(CMakeStringEscape(project.build_path))
out.write('" build.ninja)\n')
out.write('include(CMakeLists.ext)\n')
out.close()
out = open(posixpath.join(project.build_path, 'CMakeLists.ext'), 'w+')
out.write('# Generated by gn_to_cmake.py.\n')
out.write('cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)\n')
out.write('cmake_policy(VERSION 2.8.8)\n')
# The following appears to be as-yet undocumented.
# http://public.kitware.com/Bug/view.php?id=8392
out.write('enable_language(ASM)\n\n')
# ASM-ATT does not support .S files.
# output.write('enable_language(ASM-ATT)\n')
# Current issues with automatic re-generation:
# The gn generated build.ninja target uses build.ninja.d
# but build.ninja.d does not contain the ide or gn.
# Currently the ide is not run if the project.json file is not changed
# but the ide needs to be run anyway if it has itself changed.
# This can be worked around by deleting the project.json file.
out.write('file(READ "')
gn_deps_file = posixpath.join(project.build_path, 'build.ninja.d')
out.write(CMakeStringEscape(gn_deps_file))
out.write('" "gn_deps_string" OFFSET ')
out.write(str(len('build.ninja: ')))
out.write(')\n')
# One would think this would need to worry about escaped spaces
# but gn doesn't escape spaces here (it generates invalid .d files).
out.write('string(REPLACE " " ";" "gn_deps" ${gn_deps_string})\n')
out.write('foreach("gn_dep" ${gn_deps})\n')
out.write(' configure_file(${gn_dep} "CMakeLists.devnull" COPYONLY)\n')
out.write('endforeach("gn_dep")\n')
for target_name in project.targets.keys():
out.write('\n')
WriteTarget(out, Target(target_name, project), project)
def main():
if len(sys.argv) != 2:
print('Usage: ' + sys.argv[0] + ' <json_file_name>')
exit(1)
json_path = sys.argv[1]
project = None
with open(json_path, 'r') as json_file:
project = json.loads(json_file.read())
WriteProject(Project(project))
if __name__ == "__main__":
main()
|
|
from __future__ import unicode_literals
import django_filters
from netaddr import IPNetwork
from netaddr.core import AddrFormatError
from django.db.models import Q
from dcim.models import Site, Device, Interface
from extras.filters import CustomFieldFilterSet
from tenancy.models import Tenant
from utilities.filters import NullableModelMultipleChoiceFilter, NumericInFilter
from .models import (
Aggregate, IPAddress, IPADDRESS_ROLE_CHOICES, IPADDRESS_STATUS_CHOICES, Prefix, PREFIX_STATUS_CHOICES, RIR, Role,
Service, VLAN, VLAN_STATUS_CHOICES, VLANGroup, VRF,
)
class VRFFilter(CustomFieldFilterSet, django_filters.FilterSet):
id__in = NumericInFilter(name='id', lookup_expr='in')
q = django_filters.CharFilter(
method='search',
label='Search',
)
tenant_id = NullableModelMultipleChoiceFilter(
queryset=Tenant.objects.all(),
label='Tenant (ID)',
)
tenant = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
to_field_name='slug',
label='Tenant (slug)',
)
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(
Q(name__icontains=value) |
Q(rd__icontains=value) |
Q(description__icontains=value)
)
class Meta:
model = VRF
fields = ['name', 'rd', 'enforce_unique']
class RIRFilter(django_filters.FilterSet):
id__in = NumericInFilter(name='id', lookup_expr='in')
class Meta:
model = RIR
fields = ['name', 'slug', 'is_private']
class AggregateFilter(CustomFieldFilterSet, django_filters.FilterSet):
id__in = NumericInFilter(name='id', lookup_expr='in')
q = django_filters.CharFilter(
method='search',
label='Search',
)
rir_id = django_filters.ModelMultipleChoiceFilter(
queryset=RIR.objects.all(),
label='RIR (ID)',
)
rir = django_filters.ModelMultipleChoiceFilter(
name='rir__slug',
queryset=RIR.objects.all(),
to_field_name='slug',
label='RIR (slug)',
)
class Meta:
model = Aggregate
fields = ['family', 'date_added']
def search(self, queryset, name, value):
if not value.strip():
return queryset
qs_filter = Q(description__icontains=value)
try:
prefix = str(IPNetwork(value.strip()).cidr)
qs_filter |= Q(prefix__net_contains_or_equals=prefix)
except (AddrFormatError, ValueError):
pass
return queryset.filter(qs_filter)
class RoleFilter(django_filters.FilterSet):
class Meta:
model = Role
fields = ['name', 'slug']
class PrefixFilter(CustomFieldFilterSet, django_filters.FilterSet):
id__in = NumericInFilter(name='id', lookup_expr='in')
q = django_filters.CharFilter(
method='search',
label='Search',
)
parent = django_filters.CharFilter(
method='search_by_parent',
label='Parent prefix',
)
mask_length = django_filters.NumberFilter(
method='filter_mask_length',
label='Mask length',
)
vrf_id = NullableModelMultipleChoiceFilter(
queryset=VRF.objects.all(),
label='VRF',
)
vrf = NullableModelMultipleChoiceFilter(
name='vrf',
queryset=VRF.objects.all(),
to_field_name='rd',
label='VRF (RD)',
)
tenant_id = NullableModelMultipleChoiceFilter(
queryset=Tenant.objects.all(),
label='Tenant (ID)',
)
tenant = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
to_field_name='slug',
label='Tenant (slug)',
)
site_id = NullableModelMultipleChoiceFilter(
queryset=Site.objects.all(),
label='Site (ID)',
)
site = NullableModelMultipleChoiceFilter(
name='site',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site (slug)',
)
vlan_id = NullableModelMultipleChoiceFilter(
queryset=VLAN.objects.all(),
label='VLAN (ID)',
)
vlan_vid = django_filters.NumberFilter(
name='vlan__vid',
label='VLAN number (1-4095)',
)
role_id = NullableModelMultipleChoiceFilter(
queryset=Role.objects.all(),
label='Role (ID)',
)
role = NullableModelMultipleChoiceFilter(
name='role',
queryset=Role.objects.all(),
to_field_name='slug',
label='Role (slug)',
)
status = django_filters.MultipleChoiceFilter(
choices=PREFIX_STATUS_CHOICES
)
class Meta:
model = Prefix
fields = ['family', 'is_pool']
def search(self, queryset, name, value):
if not value.strip():
return queryset
qs_filter = Q(description__icontains=value)
try:
prefix = str(IPNetwork(value.strip()).cidr)
qs_filter |= Q(prefix__net_contains_or_equals=prefix)
except (AddrFormatError, ValueError):
pass
return queryset.filter(qs_filter)
def search_by_parent(self, queryset, name, value):
value = value.strip()
if not value:
return queryset
try:
query = str(IPNetwork(value).cidr)
return queryset.filter(prefix__net_contained_or_equal=query)
except (AddrFormatError, ValueError):
return queryset.none()
def filter_mask_length(self, queryset, name, value):
if not value:
return queryset
return queryset.filter(prefix__net_mask_length=value)
class IPAddressFilter(CustomFieldFilterSet, django_filters.FilterSet):
id__in = NumericInFilter(name='id', lookup_expr='in')
q = django_filters.CharFilter(
method='search',
label='Search',
)
parent = django_filters.CharFilter(
method='search_by_parent',
label='Parent prefix',
)
mask_length = django_filters.NumberFilter(
method='filter_mask_length',
label='Mask length',
)
vrf_id = NullableModelMultipleChoiceFilter(
queryset=VRF.objects.all(),
label='VRF',
)
vrf = NullableModelMultipleChoiceFilter(
name='vrf',
queryset=VRF.objects.all(),
to_field_name='rd',
label='VRF (RD)',
)
tenant_id = NullableModelMultipleChoiceFilter(
queryset=Tenant.objects.all(),
label='Tenant (ID)',
)
tenant = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
to_field_name='slug',
label='Tenant (slug)',
)
device_id = django_filters.ModelMultipleChoiceFilter(
name='interface__device',
queryset=Device.objects.all(),
label='Device (ID)',
)
device = django_filters.ModelMultipleChoiceFilter(
name='interface__device__name',
queryset=Device.objects.all(),
to_field_name='name',
label='Device (name)',
)
interface_id = django_filters.ModelMultipleChoiceFilter(
queryset=Interface.objects.all(),
label='Interface (ID)',
)
status = django_filters.MultipleChoiceFilter(
choices=IPADDRESS_STATUS_CHOICES
)
role = django_filters.MultipleChoiceFilter(
choices=IPADDRESS_ROLE_CHOICES
)
class Meta:
model = IPAddress
fields = ['family']
def search(self, queryset, name, value):
if not value.strip():
return queryset
qs_filter = Q(description__icontains=value)
try:
ipaddress = str(IPNetwork(value.strip()))
qs_filter |= Q(address__net_host=ipaddress)
except (AddrFormatError, ValueError):
pass
return queryset.filter(qs_filter)
def search_by_parent(self, queryset, name, value):
value = value.strip()
if not value:
return queryset
try:
query = str(IPNetwork(value.strip()).cidr)
return queryset.filter(address__net_host_contained=query)
except (AddrFormatError, ValueError):
return queryset.none()
def filter_mask_length(self, queryset, name, value):
if not value:
return queryset
return queryset.filter(address__net_mask_length=value)
class VLANGroupFilter(django_filters.FilterSet):
site_id = NullableModelMultipleChoiceFilter(
queryset=Site.objects.all(),
label='Site (ID)',
)
site = NullableModelMultipleChoiceFilter(
name='site',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site (slug)',
)
class Meta:
model = VLANGroup
fields = ['name', 'slug']
class VLANFilter(CustomFieldFilterSet, django_filters.FilterSet):
id__in = NumericInFilter(name='id', lookup_expr='in')
q = django_filters.CharFilter(
method='search',
label='Search',
)
site_id = NullableModelMultipleChoiceFilter(
queryset=Site.objects.all(),
label='Site (ID)',
)
site = NullableModelMultipleChoiceFilter(
name='site',
queryset=Site.objects.all(),
to_field_name='slug',
label='Site (slug)',
)
group_id = NullableModelMultipleChoiceFilter(
queryset=VLANGroup.objects.all(),
label='Group (ID)',
)
group = NullableModelMultipleChoiceFilter(
name='group',
queryset=VLANGroup.objects.all(),
to_field_name='slug',
label='Group',
)
tenant_id = NullableModelMultipleChoiceFilter(
queryset=Tenant.objects.all(),
label='Tenant (ID)',
)
tenant = NullableModelMultipleChoiceFilter(
name='tenant',
queryset=Tenant.objects.all(),
to_field_name='slug',
label='Tenant (slug)',
)
role_id = NullableModelMultipleChoiceFilter(
queryset=Role.objects.all(),
label='Role (ID)',
)
role = NullableModelMultipleChoiceFilter(
name='role',
queryset=Role.objects.all(),
to_field_name='slug',
label='Role (slug)',
)
status = django_filters.MultipleChoiceFilter(
choices=VLAN_STATUS_CHOICES
)
class Meta:
model = VLAN
fields = ['vid', 'name']
def search(self, queryset, name, value):
if not value.strip():
return queryset
qs_filter = Q(name__icontains=value) | Q(description__icontains=value)
try:
qs_filter |= Q(vid=int(value.strip()))
except ValueError:
pass
return queryset.filter(qs_filter)
class ServiceFilter(django_filters.FilterSet):
device_id = django_filters.ModelMultipleChoiceFilter(
queryset=Device.objects.all(),
label='Device (ID)',
)
device = django_filters.ModelMultipleChoiceFilter(
name='device__name',
queryset=Device.objects.all(),
to_field_name='name',
label='Device (name)',
)
class Meta:
model = Service
fields = ['name', 'protocol', 'port']
|
|
from __future__ import division, absolute_import, print_function
import sys
from functools import reduce
import numpy as np
from numpy.ma import *
from numpy.core.numerictypes import float32
from numpy.ma.core import umath
from numpy.testing import *
pi = np.pi
def eq(v, w, msg=''):
result = allclose(v, w)
if not result:
print("""Not eq:%s
%s
----
%s""" % (msg, str(v), str(w)))
return result
class TestMa(TestCase):
def setUp(self):
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
xm = array(x, mask=m1)
ym = array(y, mask=m2)
z = np.array([-.5, 0., .5, .8])
zm = array(z, mask=[0, 1, 0, 0])
xf = np.where(m1, 1e+20, x)
s = x.shape
xm.set_fill_value(1e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s)
def test_testBasic1d(self):
# Test of basic array creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
self.assertFalse(isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
self.assertEqual(shape(xm), s)
self.assertEqual(xm.shape, s)
self.assertEqual(xm.dtype, x.dtype)
self.assertEqual(xm.size, reduce(lambda x, y:x * y, s))
self.assertEqual(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
self.assertTrue(eq(xm, xf))
self.assertTrue(eq(filled(xm, 1.e20), xf))
self.assertTrue(eq(x, xm))
def test_testBasic2d(self):
# Test of basic array creation and properties in 2 dimensions.
for s in [(4, 3), (6, 2)]:
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
self.assertFalse(isMaskedArray(x))
self.assertTrue(isMaskedArray(xm))
self.assertEqual(shape(xm), s)
self.assertEqual(xm.shape, s)
self.assertEqual(xm.size, reduce(lambda x, y:x * y, s))
self.assertEqual(count(xm),
len(m1) - reduce(lambda x, y:x + y, m1))
self.assertTrue(eq(xm, xf))
self.assertTrue(eq(filled(xm, 1.e20), xf))
self.assertTrue(eq(x, xm))
self.setUp()
def test_testArithmetic(self):
# Test of basic arithmetic.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
a2d = array([[1, 2], [0, 4]])
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
self.assertTrue(eq(a2d * a2d, a2d * a2dm))
self.assertTrue(eq(a2d + a2d, a2d + a2dm))
self.assertTrue(eq(a2d - a2d, a2d - a2dm))
for s in [(12,), (4, 3), (2, 6)]:
x = x.reshape(s)
y = y.reshape(s)
xm = xm.reshape(s)
ym = ym.reshape(s)
xf = xf.reshape(s)
self.assertTrue(eq(-x, -xm))
self.assertTrue(eq(x + y, xm + ym))
self.assertTrue(eq(x - y, xm - ym))
self.assertTrue(eq(x * y, xm * ym))
with np.errstate(divide='ignore', invalid='ignore'):
self.assertTrue(eq(x / y, xm / ym))
self.assertTrue(eq(a10 + y, a10 + ym))
self.assertTrue(eq(a10 - y, a10 - ym))
self.assertTrue(eq(a10 * y, a10 * ym))
with np.errstate(divide='ignore', invalid='ignore'):
self.assertTrue(eq(a10 / y, a10 / ym))
self.assertTrue(eq(x + a10, xm + a10))
self.assertTrue(eq(x - a10, xm - a10))
self.assertTrue(eq(x * a10, xm * a10))
self.assertTrue(eq(x / a10, xm / a10))
self.assertTrue(eq(x ** 2, xm ** 2))
self.assertTrue(eq(abs(x) ** 2.5, abs(xm) ** 2.5))
self.assertTrue(eq(x ** y, xm ** ym))
self.assertTrue(eq(np.add(x, y), add(xm, ym)))
self.assertTrue(eq(np.subtract(x, y), subtract(xm, ym)))
self.assertTrue(eq(np.multiply(x, y), multiply(xm, ym)))
with np.errstate(divide='ignore', invalid='ignore'):
self.assertTrue(eq(np.divide(x, y), divide(xm, ym)))
def test_testMixedArithmetic(self):
na = np.array([1])
ma = array([1])
self.assertTrue(isinstance(na + ma, MaskedArray))
self.assertTrue(isinstance(ma + na, MaskedArray))
def test_testUfuncs1(self):
# Test various functions such as sin, cos.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
self.assertTrue(eq(np.cos(x), cos(xm)))
self.assertTrue(eq(np.cosh(x), cosh(xm)))
self.assertTrue(eq(np.sin(x), sin(xm)))
self.assertTrue(eq(np.sinh(x), sinh(xm)))
self.assertTrue(eq(np.tan(x), tan(xm)))
self.assertTrue(eq(np.tanh(x), tanh(xm)))
with np.errstate(divide='ignore', invalid='ignore'):
self.assertTrue(eq(np.sqrt(abs(x)), sqrt(xm)))
self.assertTrue(eq(np.log(abs(x)), log(xm)))
self.assertTrue(eq(np.log10(abs(x)), log10(xm)))
self.assertTrue(eq(np.exp(x), exp(xm)))
self.assertTrue(eq(np.arcsin(z), arcsin(zm)))
self.assertTrue(eq(np.arccos(z), arccos(zm)))
self.assertTrue(eq(np.arctan(z), arctan(zm)))
self.assertTrue(eq(np.arctan2(x, y), arctan2(xm, ym)))
self.assertTrue(eq(np.absolute(x), absolute(xm)))
self.assertTrue(eq(np.equal(x, y), equal(xm, ym)))
self.assertTrue(eq(np.not_equal(x, y), not_equal(xm, ym)))
self.assertTrue(eq(np.less(x, y), less(xm, ym)))
self.assertTrue(eq(np.greater(x, y), greater(xm, ym)))
self.assertTrue(eq(np.less_equal(x, y), less_equal(xm, ym)))
self.assertTrue(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
self.assertTrue(eq(np.conjugate(x), conjugate(xm)))
self.assertTrue(eq(np.concatenate((x, y)), concatenate((xm, ym))))
self.assertTrue(eq(np.concatenate((x, y)), concatenate((x, y))))
self.assertTrue(eq(np.concatenate((x, y)), concatenate((xm, y))))
self.assertTrue(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
def test_xtestCount(self):
# Test count
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
if sys.version_info[0] >= 3:
self.assertTrue(isinstance(count(ott), np.integer))
else:
self.assertTrue(isinstance(count(ott), int))
self.assertEqual(3, count(ott))
self.assertEqual(1, count(1))
self.assertTrue(eq(0, array(1, mask=[1])))
ott = ott.reshape((2, 2))
assert_(isinstance(count(ott, 0), np.ndarray))
if sys.version_info[0] >= 3:
assert_(isinstance(count(ott), np.integer))
else:
assert_(isinstance(count(ott), int))
self.assertTrue(eq(3, count(ott)))
assert_(getmask(count(ott, 0)) is nomask)
self.assertTrue(eq([1, 2], count(ott, 0)))
def test_testMinMax(self):
# Test minimum and maximum.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
xr = np.ravel(x) # max doesn't work if shaped
xmr = ravel(xm)
# true because of careful selection of data
self.assertTrue(eq(max(xr), maximum(xmr)))
self.assertTrue(eq(min(xr), minimum(xmr)))
def test_testAddSumProd(self):
# Test add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
self.assertTrue(eq(np.add.reduce(x), add.reduce(x)))
self.assertTrue(eq(np.add.accumulate(x), add.accumulate(x)))
self.assertTrue(eq(4, sum(array(4), axis=0)))
self.assertTrue(eq(4, sum(array(4), axis=0)))
self.assertTrue(eq(np.sum(x, axis=0), sum(x, axis=0)))
self.assertTrue(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
self.assertTrue(eq(np.sum(x, 0), sum(x, 0)))
self.assertTrue(eq(np.product(x, axis=0), product(x, axis=0)))
self.assertTrue(eq(np.product(x, 0), product(x, 0)))
self.assertTrue(eq(np.product(filled(xm, 1), axis=0),
product(xm, axis=0)))
if len(s) > 1:
self.assertTrue(eq(np.concatenate((x, y), 1),
concatenate((xm, ym), 1)))
self.assertTrue(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
self.assertTrue(eq(np.sum(x, 1), sum(x, 1)))
self.assertTrue(eq(np.product(x, 1), product(x, 1)))
def test_testCI(self):
# Test of conversions and indexing
x1 = np.array([1, 2, 4, 3])
x2 = array(x1, mask=[1, 0, 0, 0])
x3 = array(x1, mask=[0, 1, 0, 1])
x4 = array(x1)
# test conversion to strings
junk, garbage = str(x2), repr(x2)
assert_(eq(np.sort(x1), sort(x2, fill_value=0)))
# tests of indexing
assert_(type(x2[1]) is type(x1[1]))
assert_(x1[1] == x2[1])
assert_(x2[0] is masked)
assert_(eq(x1[2], x2[2]))
assert_(eq(x1[2:5], x2[2:5]))
assert_(eq(x1[:], x2[:]))
assert_(eq(x1[1:], x3[1:]))
x1[2] = 9
x2[2] = 9
assert_(eq(x1, x2))
x1[1:3] = 99
x2[1:3] = 99
assert_(eq(x1, x2))
x2[1] = masked
assert_(eq(x1, x2))
x2[1:3] = masked
assert_(eq(x1, x2))
x2[:] = x1
x2[1] = masked
assert_(allequal(getmask(x2), array([0, 1, 0, 0])))
x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x3), array([0, 1, 1, 0])))
x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
assert_(allequal(getmask(x4), array([0, 1, 1, 0])))
assert_(allequal(x4, array([1, 2, 3, 4])))
x1 = np.arange(5) * 1.0
x2 = masked_values(x1, 3.0)
assert_(eq(x1, x2))
assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
assert_(eq(3.0, x2.fill_value))
x1 = array([1, 'hello', 2, 3], object)
x2 = np.array([1, 'hello', 2, 3], object)
s1 = x1[1]
s2 = x2[1]
self.assertEqual(type(s2), str)
self.assertEqual(type(s1), str)
self.assertEqual(s1, s2)
assert_(x1[1:1].shape == (0,))
def test_testCopySize(self):
# Tests of some subtle points of copying and sizing.
n = [0, 0, 1, 0, 0]
m = make_mask(n)
m2 = make_mask(m)
self.assertTrue(m is m2)
m3 = make_mask(m, copy=1)
self.assertTrue(m is not m3)
x1 = np.arange(5)
y1 = array(x1, mask=m)
self.assertTrue(y1._data is not x1)
self.assertTrue(allequal(x1, y1._data))
self.assertTrue(y1.mask is m)
y1a = array(y1, copy=0)
self.assertTrue(y1a.mask is y1.mask)
y2 = array(x1, mask=m, copy=0)
self.assertTrue(y2.mask is m)
self.assertTrue(y2[2] is masked)
y2[2] = 9
self.assertTrue(y2[2] is not masked)
self.assertTrue(y2.mask is not m)
self.assertTrue(allequal(y2.mask, 0))
y3 = array(x1 * 1.0, mask=m)
self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)
x4 = arange(4)
x4[2] = masked
y4 = resize(x4, (8,))
self.assertTrue(eq(concatenate([x4, x4]), y4))
self.assertTrue(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
y5 = repeat(x4, (2, 2, 2, 2), axis=0)
self.assertTrue(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
y6 = repeat(x4, 2, axis=0)
self.assertTrue(eq(y5, y6))
def test_testPut(self):
# Test of put
d = arange(5)
n = [0, 0, 0, 1, 1]
m = make_mask(n)
x = array(d, mask=m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is masked)
x[[1, 4]] = [10, 40]
self.assertTrue(x.mask is not m)
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is not masked)
self.assertTrue(eq(x, [0, 10, 2, -1, 40]))
x = array(d, mask=m)
x.put([0, 1, 2], [-1, 100, 200])
self.assertTrue(eq(x, [-1, 100, 200, 0, 0]))
self.assertTrue(x[3] is masked)
self.assertTrue(x[4] is masked)
def test_testMaPut(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1]
i = np.nonzero(m)[0]
put(ym, i, zm)
assert_(all(take(ym, i, axis=0) == zm))
def test_testOddFeatures(self):
# Test of other odd features
x = arange(20)
x = x.reshape(4, 5)
x.flat[5] = 12
assert_(x[1, 0] == 12)
z = x + 10j * x
assert_(eq(z.real, x))
assert_(eq(z.imag, 10 * x))
assert_(eq((z * conjugate(z)).real, 101 * x * x))
z.imag[...] = 0.0
x = arange(10)
x[3] = masked
assert_(str(x[3]) == str(masked))
c = x >= 8
assert_(count(where(c, masked, masked)) == 0)
assert_(shape(where(c, masked, masked)) == c.shape)
z = where(c, x, masked)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is masked)
assert_(z[7] is masked)
assert_(z[8] is not masked)
assert_(z[9] is not masked)
assert_(eq(x, z))
z = where(c, masked, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
z = masked_where(c, x)
assert_(z.dtype is x.dtype)
assert_(z[3] is masked)
assert_(z[4] is not masked)
assert_(z[7] is not masked)
assert_(z[8] is masked)
assert_(z[9] is masked)
assert_(eq(x, z))
x = array([1., 2., 3., 4., 5.])
c = array([1, 1, 1, 0, 0])
x[2] = masked
z = where(c, x, -x)
assert_(eq(z, [1., 2., 0., -4., -5]))
c[0] = masked
z = where(c, x, -x)
assert_(eq(z, [1., 2., 0., -4., -5]))
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2)))
assert_(eq(masked_where(greater_equal(x, 2), x),
masked_greater_equal(x, 2)))
assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2)))
assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)))
assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2)))
assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]))
assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]))
assert_(eq(masked_inside(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 1, 3).mask,
[1, 1, 1, 1, 0]))
assert_(eq(masked_outside(array(list(range(5)),
mask=[0, 1, 0, 0, 0]), 1, 3).mask,
[1, 1, 0, 0, 1]))
assert_(eq(masked_equal(array(list(range(5)),
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 0]))
assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1],
mask=[1, 0, 0, 0, 0]), 2).mask,
[1, 0, 1, 0, 1]))
assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
[99, 99, 3, 4, 5]))
atest = ones((10, 10, 10), dtype=float32)
btest = zeros(atest.shape, MaskType)
ctest = masked_where(btest, atest)
assert_(eq(atest, ctest))
z = choose(c, (-x, x))
assert_(eq(z, [1., 2., 0., -4., -5]))
assert_(z[0] is masked)
assert_(z[1] is not masked)
assert_(z[2] is masked)
x = arange(6)
x[5] = masked
y = arange(6) * 10
y[2] = masked
c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0])
cm = c.filled(1)
z = where(c, x, y)
zm = where(cm, x, y)
assert_(eq(z, zm))
assert_(getmask(zm) is nomask)
assert_(eq(zm, [0, 1, 2, 30, 40, 50]))
z = where(c, masked, 1)
assert_(eq(z, [99, 99, 99, 1, 1, 1]))
z = where(c, 1, masked)
assert_(eq(z, [99, 1, 1, 99, 99, 99]))
def test_testMinMax2(self):
# Test of minumum, maximum.
assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3]))
assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9]))
x = arange(5)
y = arange(5) - 2
x[3] = masked
y[0] = masked
assert_(eq(minimum(x, y), where(less(x, y), x, y)))
assert_(eq(maximum(x, y), where(greater(x, y), x, y)))
assert_(minimum(x) == 0)
assert_(maximum(x) == 4)
def test_testTakeTransposeInnerOuter(self):
# Test of take, transpose, inner, outer products
x = arange(24)
y = np.arange(24)
x[5:6] = masked
x = x.reshape(2, 3, 4)
y = y.reshape(2, 3, 4)
assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))))
assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)))
assert_(eq(np.inner(filled(x, 0), filled(y, 0)),
inner(x, y)))
assert_(eq(np.outer(filled(x, 0), filled(y, 0)),
outer(x, y)))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y, [0, 3, 4])
assert_(t[0] == 'abc')
assert_(t[1] == 2)
assert_(t[2] == 3)
def test_testInplace(self):
# Test of inplace operations and rich comparisons
y = arange(10)
x = arange(10)
xm = arange(10)
xm[2] = masked
x += 1
assert_(eq(x, y + 1))
xm += 1
assert_(eq(x, y + 1))
x = arange(10)
xm = arange(10)
xm[2] = masked
x -= 1
assert_(eq(x, y - 1))
xm -= 1
assert_(eq(xm, y - 1))
x = arange(10) * 1.0
xm = arange(10) * 1.0
xm[2] = masked
x *= 2.0
assert_(eq(x, y * 2))
xm *= 2.0
assert_(eq(xm, y * 2))
x = arange(10) * 2
xm = arange(10)
xm[2] = masked
x //= 2
assert_(eq(x, y))
xm //= 2
assert_(eq(x, y))
x = arange(10) * 1.0
xm = arange(10) * 1.0
xm[2] = masked
x /= 2.0
assert_(eq(x, y / 2.0))
xm /= arange(10)
assert_(eq(xm, ones((10,))))
x = arange(10).astype(float32)
xm = arange(10)
xm[2] = masked
x += 1.
assert_(eq(x, y + 1.))
def test_testPickle(self):
# Test of pickling
import pickle
x = arange(12)
x[4:10:2] = masked
x = x.reshape(4, 3)
s = pickle.dumps(x)
y = pickle.loads(s)
assert_(eq(x, y))
def test_testMasked(self):
# Test of masked element
xx = arange(6)
xx[1] = masked
self.assertTrue(str(masked) == '--')
self.assertTrue(xx[1] is masked)
self.assertEqual(filled(xx[1], 0), 0)
# don't know why these should raise an exception...
#self.assertRaises(Exception, lambda x,y: x+y, masked, masked)
#self.assertRaises(Exception, lambda x,y: x+y, masked, 2)
#self.assertRaises(Exception, lambda x,y: x+y, masked, xx)
#self.assertRaises(Exception, lambda x,y: x+y, xx, masked)
def test_testAverage1(self):
# Test of average.
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
self.assertTrue(eq(2.0, average(ott, axis=0)))
self.assertTrue(eq(2.0, average(ott, weights=[1., 1., 2., 1.])))
result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1)
self.assertTrue(eq(2.0, result))
self.assertTrue(wts == 4.0)
ott[:] = masked
self.assertTrue(average(ott, axis=0) is masked)
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
ott = ott.reshape(2, 2)
ott[:, 1] = masked
self.assertTrue(eq(average(ott, axis=0), [2.0, 0.0]))
self.assertTrue(average(ott, axis=1)[0] is masked)
self.assertTrue(eq([2., 0.], average(ott, axis=0)))
result, wts = average(ott, axis=0, returned=1)
self.assertTrue(eq(wts, [1., 0.]))
def test_testAverage2(self):
# More tests of average.
w1 = [0, 1, 1, 1, 1, 0]
w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
x = arange(6)
self.assertTrue(allclose(average(x, axis=0), 2.5))
self.assertTrue(allclose(average(x, axis=0, weights=w1), 2.5))
y = array([arange(6), 2.0 * arange(6)])
self.assertTrue(allclose(average(y, None),
np.add.reduce(np.arange(6)) * 3. / 12.))
self.assertTrue(allclose(average(y, axis=0), np.arange(6) * 3. / 2.))
self.assertTrue(allclose(average(y, axis=1),
[average(x, axis=0), average(x, axis=0)*2.0]))
self.assertTrue(allclose(average(y, None, weights=w2), 20. / 6.))
self.assertTrue(allclose(average(y, axis=0, weights=w2),
[0., 1., 2., 3., 4., 10.]))
self.assertTrue(allclose(average(y, axis=1),
[average(x, axis=0), average(x, axis=0)*2.0]))
m1 = zeros(6)
m2 = [0, 0, 1, 1, 0, 0]
m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
m4 = ones(6)
m5 = [0, 1, 1, 1, 1, 1]
self.assertTrue(allclose(average(masked_array(x, m1), axis=0), 2.5))
self.assertTrue(allclose(average(masked_array(x, m2), axis=0), 2.5))
self.assertTrue(average(masked_array(x, m4), axis=0) is masked)
self.assertEqual(average(masked_array(x, m5), axis=0), 0.0)
self.assertEqual(count(average(masked_array(x, m4), axis=0)), 0)
z = masked_array(y, m3)
self.assertTrue(allclose(average(z, None), 20. / 6.))
self.assertTrue(allclose(average(z, axis=0),
[0., 1., 99., 99., 4.0, 7.5]))
self.assertTrue(allclose(average(z, axis=1), [2.5, 5.0]))
self.assertTrue(allclose(average(z, axis=0, weights=w2),
[0., 1., 99., 99., 4.0, 10.0]))
a = arange(6)
b = arange(6) * 3
r1, w1 = average([[a, b], [b, a]], axis=1, returned=1)
self.assertEqual(shape(r1), shape(w1))
self.assertEqual(r1.shape, w1.shape)
r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1)
self.assertEqual(shape(w2), shape(r2))
r2, w2 = average(ones((2, 2, 3)), returned=1)
self.assertEqual(shape(w2), shape(r2))
r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1)
self.assertTrue(shape(w2) == shape(r2))
a2d = array([[1, 2], [0, 4]], float)
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
a2da = average(a2d, axis=0)
self.assertTrue(eq(a2da, [0.5, 3.0]))
a2dma = average(a2dm, axis=0)
self.assertTrue(eq(a2dma, [1.0, 3.0]))
a2dma = average(a2dm, axis=None)
self.assertTrue(eq(a2dma, 7. / 3.))
a2dma = average(a2dm, axis=1)
self.assertTrue(eq(a2dma, [1.5, 4.0]))
def test_testToPython(self):
self.assertEqual(1, int(array(1)))
self.assertEqual(1.0, float(array(1)))
self.assertEqual(1, int(array([[[1]]])))
self.assertEqual(1.0, float(array([[1]])))
self.assertRaises(TypeError, float, array([1, 1]))
self.assertRaises(ValueError, bool, array([0, 1]))
self.assertRaises(ValueError, bool, array([0, 0], mask=[0, 1]))
def test_testScalarArithmetic(self):
xm = array(0, mask=1)
#TODO FIXME: Find out what the following raises a warning in r8247
with np.errstate():
np.seterr(divide='ignore')
self.assertTrue((1 / array(0)).mask)
self.assertTrue((1 + xm).mask)
self.assertTrue((-xm).mask)
self.assertTrue((-xm).mask)
self.assertTrue(maximum(xm, xm).mask)
self.assertTrue(minimum(xm, xm).mask)
self.assertTrue(xm.filled().dtype is xm._data.dtype)
x = array(0, mask=0)
self.assertTrue(x.filled() == x._data)
self.assertEqual(str(xm), str(masked_print_option))
def test_testArrayMethods(self):
a = array([1, 3, 2])
self.assertTrue(eq(a.any(), a._data.any()))
self.assertTrue(eq(a.all(), a._data.all()))
self.assertTrue(eq(a.argmax(), a._data.argmax()))
self.assertTrue(eq(a.argmin(), a._data.argmin()))
self.assertTrue(eq(a.choose(0, 1, 2, 3, 4),
a._data.choose(0, 1, 2, 3, 4)))
self.assertTrue(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])))
self.assertTrue(eq(a.conj(), a._data.conj()))
self.assertTrue(eq(a.conjugate(), a._data.conjugate()))
m = array([[1, 2], [3, 4]])
self.assertTrue(eq(m.diagonal(), m._data.diagonal()))
self.assertTrue(eq(a.sum(), a._data.sum()))
self.assertTrue(eq(a.take([1, 2]), a._data.take([1, 2])))
self.assertTrue(eq(m.transpose(), m._data.transpose()))
def test_testArrayAttributes(self):
a = array([1, 3, 2])
self.assertEqual(a.ndim, 1)
def test_testAPI(self):
self.assertFalse([m for m in dir(np.ndarray)
if m not in dir(MaskedArray) and
not m.startswith('_')])
def test_testSingleElementSubscript(self):
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
self.assertEqual(a[0].shape, ())
self.assertEqual(b[0].shape, ())
self.assertEqual(b[1].shape, ())
class TestUfuncs(TestCase):
def setUp(self):
self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
def test_testUfuncRegression(self):
f_invalid_ignore = [
'sqrt', 'arctanh', 'arcsin', 'arccos',
'arccosh', 'arctanh', 'log', 'log10', 'divide',
'true_divide', 'floor_divide', 'remainder', 'fmod']
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh',
'arcsinh',
'arccosh',
'arctanh',
'absolute', 'fabs', 'negative',
# 'nonzero', 'around',
'floor', 'ceil',
# 'sometrue', 'alltrue',
'logical_not',
'add', 'subtract', 'multiply',
'divide', 'true_divide', 'floor_divide',
'remainder', 'fmod', 'hypot', 'arctan2',
'equal', 'not_equal', 'less_equal', 'greater_equal',
'less', 'greater',
'logical_and', 'logical_or', 'logical_xor']:
try:
uf = getattr(umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(np.ma, f)
args = self.d[:uf.nin]
with np.errstate():
if f in f_invalid_ignore:
np.seterr(invalid='ignore')
if f in ['arctanh', 'log', 'log10']:
np.seterr(divide='ignore')
ur = uf(*args)
mr = mf(*args)
self.assertTrue(eq(ur.filled(0), mr.filled(0), f))
self.assertTrue(eqmask(ur.mask, mr.mask))
def test_reduce(self):
a = self.d[0]
self.assertFalse(alltrue(a, axis=0))
self.assertTrue(sometrue(a, axis=0))
self.assertEqual(sum(a[:3], axis=0), 0)
self.assertEqual(product(a, axis=0), 0)
def test_minmax(self):
a = arange(1, 13).reshape(3, 4)
amask = masked_where(a < 5, a)
self.assertEqual(amask.max(), a.max())
self.assertEqual(amask.min(), 5)
self.assertTrue((amask.max(0) == a.max(0)).all())
self.assertTrue((amask.min(0) == [5, 6, 7, 8]).all())
self.assertTrue(amask.max(1)[0].mask)
self.assertTrue(amask.min(1)[0].mask)
def test_nonzero(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = array([1, 0, 2, 0], mask=[0, 0, 1, 1])
self.assertTrue(eq(nonzero(x), [0]))
class TestArrayMethods(TestCase):
def setUp(self):
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = np.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX)
#------------------------------------------------------
def test_trace(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXdiag = mX.diagonal()
self.assertEqual(mX.trace(), mX.diagonal().compressed().sum())
self.assertTrue(eq(mX.trace(),
X.trace() - sum(mXdiag.mask * X.diagonal(),
axis=0)))
def test_clip(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
clipped = mx.clip(2, 8)
self.assertTrue(eq(clipped.mask, mx.mask))
self.assertTrue(eq(clipped._data, x.clip(2, 8)))
self.assertTrue(eq(clipped._data, mx._data.clip(2, 8)))
def test_ptp(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
(n, m) = X.shape
self.assertEqual(mx.ptp(), mx.compressed().ptp())
rows = np.zeros(n, np.float_)
cols = np.zeros(m, np.float_)
for k in range(m):
cols[k] = mX[:, k].compressed().ptp()
for k in range(n):
rows[k] = mX[k].compressed().ptp()
self.assertTrue(eq(mX.ptp(0), cols))
self.assertTrue(eq(mX.ptp(1), rows))
def test_swapaxes(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXswapped = mX.swapaxes(0, 1)
self.assertTrue(eq(mXswapped[-1], mX[:, -1]))
mXXswapped = mXX.swapaxes(0, 2)
self.assertEqual(mXXswapped.shape, (2, 2, 3, 3))
def test_cumprod(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXcp = mX.cumprod(0)
self.assertTrue(eq(mXcp._data, mX.filled(1).cumprod(0)))
mXcp = mX.cumprod(1)
self.assertTrue(eq(mXcp._data, mX.filled(1).cumprod(1)))
def test_cumsum(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXcp = mX.cumsum(0)
self.assertTrue(eq(mXcp._data, mX.filled(0).cumsum(0)))
mXcp = mX.cumsum(1)
self.assertTrue(eq(mXcp._data, mX.filled(0).cumsum(1)))
def test_varstd(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
self.assertTrue(eq(mX.var(axis=None), mX.compressed().var()))
self.assertTrue(eq(mX.std(axis=None), mX.compressed().std()))
self.assertTrue(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape))
self.assertTrue(eq(mX.var().shape, X.var().shape))
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
for k in range(6):
self.assertTrue(eq(mXvar1[k], mX[k].compressed().var()))
self.assertTrue(eq(mXvar0[k], mX[:, k].compressed().var()))
self.assertTrue(eq(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std()))
def eqmask(m1, m2):
if m1 is nomask:
return m2 is nomask
if m2 is nomask:
return m1 is nomask
return (m1 == m2).all()
#def timingTest():
# for f in [testf, testinplace]:
# for n in [1000,10000,50000]:
# t = testta(n, f)
# t1 = testtb(n, f)
# t2 = testtc(n, f)
# print f.test_name
# print """\
#n = %7d
#numpy time (ms) %6.1f
#MA maskless ratio %6.1f
#MA masked ratio %6.1f
#""" % (n, t*1000.0, t1/t, t2/t)
#def testta(n, f):
# x=np.arange(n) + 1.0
# tn0 = time.time()
# z = f(x)
# return time.time() - tn0
#def testtb(n, f):
# x=arange(n) + 1.0
# tn0 = time.time()
# z = f(x)
# return time.time() - tn0
#def testtc(n, f):
# x=arange(n) + 1.0
# x[0] = masked
# tn0 = time.time()
# z = f(x)
# return time.time() - tn0
#def testf(x):
# for i in range(25):
# y = x **2 + 2.0 * x - 1.0
# w = x **2 + 1.0
# z = (y / w) ** 2
# return z
#testf.test_name = 'Simple arithmetic'
#def testinplace(x):
# for i in range(25):
# y = x**2
# y += 2.0*x
# y -= 1.0
# y /= x
# return y
#testinplace.test_name = 'Inplace operations'
if __name__ == "__main__":
run_module_suite()
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Run Hadoop Mapreduce jobs using Hadoop Streaming. To run a job, you need
to subclass :py:class:`luigi.contrib.hadoop.JobTask` and implement a
``mapper`` and ``reducer`` methods. See :doc:`/example_top_artists` for
an example of how to run a Hadoop job.
"""
from __future__ import print_function
import abc
import datetime
import glob
import logging
import os
import pickle
import random
import re
import shutil
import signal
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import subprocess
import sys
import tempfile
import warnings
from hashlib import md5
from itertools import groupby
from luigi import six
from luigi import configuration
import luigi
import luigi.task
import luigi.contrib.hdfs
import luigi.s3
from luigi import mrrunner
if six.PY2:
from itertools import imap as map
try:
# See benchmark at https://gist.github.com/mvj3/02dca2bcc8b0ef1bbfb5
import ujson as json
except ImportError:
import json
logger = logging.getLogger('luigi-interface')
_attached_packages = []
TRACKING_RE = re.compile(r'(tracking url|the url to track the job):\s+(?P<url>.+)$')
class hadoop(luigi.task.Config):
pool = luigi.Parameter(default=None,
description='Hadoop pool so use for Hadoop tasks. '
'To specify pools per tasks, see '
'BaseHadoopJobTask.pool')
def attach(*packages):
"""
Attach a python package to hadoop map reduce tarballs to make those packages available
on the hadoop cluster.
"""
_attached_packages.extend(packages)
def dereference(f):
if os.path.islink(f):
# by joining with the dirname we are certain to get the absolute path
return dereference(os.path.join(os.path.dirname(f), os.readlink(f)))
else:
return f
def get_extra_files(extra_files):
result = []
for f in extra_files:
if isinstance(f, str):
src, dst = f, os.path.basename(f)
elif isinstance(f, tuple):
src, dst = f
else:
raise Exception()
if os.path.isdir(src):
src_prefix = os.path.join(src, '')
for base, dirs, files in os.walk(src):
for f in files:
f_src = os.path.join(base, f)
f_src_stripped = f_src[len(src_prefix):]
f_dst = os.path.join(dst, f_src_stripped)
result.append((f_src, f_dst))
else:
result.append((src, dst))
return result
def create_packages_archive(packages, filename):
"""
Create a tar archive which will contain the files for the packages listed in packages.
"""
import tarfile
tar = tarfile.open(filename, "w")
def add(src, dst):
logger.debug('adding to tar: %s -> %s', src, dst)
tar.add(src, dst)
def add_files_for_package(sub_package_path, root_package_path, root_package_name):
for root, dirs, files in os.walk(sub_package_path):
if '.svn' in dirs:
dirs.remove('.svn')
for f in files:
if not f.endswith(".pyc") and not f.startswith("."):
add(dereference(root + "/" + f), root.replace(root_package_path, root_package_name) + "/" + f)
for package in packages:
# Put a submodule's entire package in the archive. This is the
# magic that usually packages everything you need without
# having to attach packages/modules explicitly
if not getattr(package, "__path__", None) and '.' in package.__name__:
package = __import__(package.__name__.rpartition('.')[0], None, None, 'non_empty')
n = package.__name__.replace(".", "/")
if getattr(package, "__path__", None):
# TODO: (BUG) picking only the first path does not
# properly deal with namespaced packages in different
# directories
p = package.__path__[0]
if p.endswith('.egg') and os.path.isfile(p):
raise 'egg files not supported!!!'
# Add the entire egg file
# p = p[:p.find('.egg') + 4]
# add(dereference(p), os.path.basename(p))
else:
# include __init__ files from parent projects
root = []
for parent in package.__name__.split('.')[0:-1]:
root.append(parent)
module_name = '.'.join(root)
directory = '/'.join(root)
add(dereference(__import__(module_name, None, None, 'non_empty').__path__[0] + "/__init__.py"),
directory + "/__init__.py")
add_files_for_package(p, p, n)
# include egg-info directories that are parallel:
for egg_info_path in glob.glob(p + '*.egg-info'):
logger.debug(
'Adding package metadata to archive for "%s" found at "%s"',
package.__name__,
egg_info_path
)
add_files_for_package(egg_info_path, p, n)
else:
f = package.__file__
if f.endswith("pyc"):
f = f[:-3] + "py"
if n.find(".") == -1:
add(dereference(f), os.path.basename(f))
else:
add(dereference(f), n + ".py")
tar.close()
def flatten(sequence):
"""
A simple generator which flattens a sequence.
Only one level is flattened.
.. code-block:: python
(1, (2, 3), 4) -> (1, 2, 3, 4)
"""
for item in sequence:
if hasattr(item, "__iter__") and not isinstance(item, str) and not isinstance(item, bytes):
for i in item:
yield i
else:
yield item
class HadoopRunContext(object):
def __init__(self):
self.job_id = None
self.application_id = None
def __enter__(self):
self.__old_signal = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGTERM, self.kill_job)
return self
def kill_job(self, captured_signal=None, stack_frame=None):
if self.application_id:
logger.info('Job interrupted, killing application %s' % self.application_id)
subprocess.call(['yarn', 'application', '-kill', self.application_id])
elif self.job_id:
logger.info('Job interrupted, killing job %s', self.job_id)
subprocess.call(['mapred', 'job', '-kill', self.job_id])
if captured_signal is not None:
# adding 128 gives the exit code corresponding to a signal
sys.exit(128 + captured_signal)
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is KeyboardInterrupt:
self.kill_job()
signal.signal(signal.SIGTERM, self.__old_signal)
class HadoopJobError(RuntimeError):
def __init__(self, message, out=None, err=None):
super(HadoopJobError, self).__init__(message, out, err)
self.message = message
self.out = out
self.err = err
def run_and_track_hadoop_job(arglist, tracking_url_callback=None, env=None):
"""
Runs the job by invoking the command from the given arglist.
Finds tracking urls from the output and attempts to fetch errors using those urls if the job fails.
Throws HadoopJobError with information about the error
(including stdout and stderr from the process)
on failure and returns normally otherwise.
:param arglist:
:param tracking_url_callback:
:param env:
:return:
"""
logger.info('%s', subprocess.list2cmdline(arglist))
def write_luigi_history(arglist, history):
"""
Writes history to a file in the job's output directory in JSON format.
Currently just for tracking the job ID in a configuration where
no history is stored in the output directory by Hadoop.
"""
history_filename = configuration.get_config().get('core', 'history-filename', '')
if history_filename and '-output' in arglist:
output_dir = arglist[arglist.index('-output') + 1]
f = luigi.contrib.hdfs.HdfsTarget(os.path.join(output_dir, history_filename)).open('w')
f.write(json.dumps(history))
f.close()
def track_process(arglist, tracking_url_callback, env=None):
# Dump stdout to a temp file, poll stderr and log it
temp_stdout = tempfile.TemporaryFile('w+t')
proc = subprocess.Popen(arglist, stdout=temp_stdout, stderr=subprocess.PIPE, env=env, close_fds=True, universal_newlines=True)
# We parse the output to try to find the tracking URL.
# This URL is useful for fetching the logs of the job.
tracking_url = None
job_id = None
application_id = None
err_lines = []
with HadoopRunContext() as hadoop_context:
while proc.poll() is None:
err_line = proc.stderr.readline()
err_lines.append(err_line)
err_line = err_line.strip()
if err_line:
logger.info('%s', err_line)
err_line = err_line.lower()
tracking_url_match = TRACKING_RE.search(err_line)
if tracking_url_match:
tracking_url = tracking_url_match.group('url')
try:
tracking_url_callback(tracking_url)
except Exception as e:
logger.error("Error in tracking_url_callback, disabling! %s", e)
def tracking_url_callback(x):
return None
if err_line.find('running job') != -1:
# hadoop jar output
job_id = err_line.split('running job: ')[-1]
if err_line.find('submitted hadoop job:') != -1:
# scalding output
job_id = err_line.split('submitted hadoop job: ')[-1]
if err_line.find('submitted application ') != -1:
application_id = err_line.split('submitted application ')[-1]
hadoop_context.job_id = job_id
hadoop_context.application_id = application_id
# Read the rest + stdout
err = ''.join(err_lines + [an_err_line for an_err_line in proc.stderr])
temp_stdout.seek(0)
out = ''.join(temp_stdout.readlines())
if proc.returncode == 0:
write_luigi_history(arglist, {'job_id': job_id})
return (out, err)
# Try to fetch error logs if possible
message = 'Streaming job failed with exit code %d. ' % proc.returncode
if not tracking_url:
raise HadoopJobError(message + 'Also, no tracking url found.', out, err)
try:
task_failures = fetch_task_failures(tracking_url)
except Exception as e:
raise HadoopJobError(message + 'Additionally, an error occurred when fetching data from %s: %s' %
(tracking_url, e), out, err)
if not task_failures:
raise HadoopJobError(message + 'Also, could not fetch output from tasks.', out, err)
else:
raise HadoopJobError(message + 'Output from tasks below:\n%s' % task_failures, out, err)
if tracking_url_callback is None:
def tracking_url_callback(x): return None
return track_process(arglist, tracking_url_callback, env)
def fetch_task_failures(tracking_url):
"""
Uses mechanize to fetch the actual task logs from the task tracker.
This is highly opportunistic, and we might not succeed.
So we set a low timeout and hope it works.
If it does not, it's not the end of the world.
TODO: Yarn has a REST API that we should probably use instead:
http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/WebServicesIntro.html
"""
import mechanize
timeout = 3.0
failures_url = tracking_url.replace('jobdetails.jsp', 'jobfailures.jsp') + '&cause=failed'
logger.debug('Fetching data from %s', failures_url)
b = mechanize.Browser()
b.open(failures_url, timeout=timeout)
links = list(b.links(text_regex='Last 4KB')) # For some reason text_regex='All' doesn't work... no idea why
links = random.sample(links, min(10, len(links))) # Fetch a random subset of all failed tasks, so not to be biased towards the early fails
error_text = []
for link in links:
task_url = link.url.replace('&start=-4097', '&start=-100000') # Increase the offset
logger.debug('Fetching data from %s', task_url)
b2 = mechanize.Browser()
try:
r = b2.open(task_url, timeout=timeout)
data = r.read()
except Exception as e:
logger.debug('Error fetching data from %s: %s', task_url, e)
continue
# Try to get the hex-encoded traceback back from the output
for exc in re.findall(r'luigi-exc-hex=[0-9a-f]+', data):
error_text.append('---------- %s:' % task_url)
error_text.append(exc.split('=')[-1].decode('hex'))
return '\n'.join(error_text)
class JobRunner(object):
run_job = NotImplemented
class HadoopJobRunner(JobRunner):
"""
Takes care of uploading & executing a Hadoop job using Hadoop streaming.
TODO: add code to support Elastic Mapreduce (using boto) and local execution.
"""
def __init__(self, streaming_jar, modules=None, streaming_args=None,
libjars=None, libjars_in_hdfs=None, jobconfs=None,
input_format=None, output_format=None,
end_job_with_atomic_move_dir=True):
def get(x, default):
return x is not None and x or default
self.streaming_jar = streaming_jar
self.modules = get(modules, [])
self.streaming_args = get(streaming_args, [])
self.libjars = get(libjars, [])
self.libjars_in_hdfs = get(libjars_in_hdfs, [])
self.jobconfs = get(jobconfs, {})
self.input_format = input_format
self.output_format = output_format
self.end_job_with_atomic_move_dir = end_job_with_atomic_move_dir
self.tmp_dir = False
def run_job(self, job, tracking_url_callback=None):
packages = [luigi] + self.modules + job.extra_modules() + list(_attached_packages)
# find the module containing the job
packages.append(__import__(job.__module__, None, None, 'dummy'))
# find the path to out runner.py
runner_path = mrrunner.__file__
# assume source is next to compiled
if runner_path.endswith("pyc"):
runner_path = runner_path[:-3] + "py"
base_tmp_dir = configuration.get_config().get('core', 'tmp-dir', None)
if base_tmp_dir:
warnings.warn("The core.tmp-dir configuration item is"
" deprecated, please use the TMPDIR"
" environment variable if you wish"
" to control where luigi.contrib.hadoop may"
" create temporary files and directories.")
self.tmp_dir = os.path.join(base_tmp_dir, 'hadoop_job_%016x' % random.getrandbits(64))
os.makedirs(self.tmp_dir)
else:
self.tmp_dir = tempfile.mkdtemp()
logger.debug("Tmp dir: %s", self.tmp_dir)
# build arguments
config = configuration.get_config()
python_executable = config.get('hadoop', 'python-executable', 'python')
map_cmd = '{0} mrrunner.py map'.format(python_executable)
cmb_cmd = '{0} mrrunner.py combiner'.format(python_executable)
red_cmd = '{0} mrrunner.py reduce'.format(python_executable)
output_final = job.output().path
# atomic output: replace output with a temporary work directory
if self.end_job_with_atomic_move_dir:
if isinstance(job.output(), luigi.s3.S3FlagTarget):
raise TypeError("end_job_with_atomic_move_dir is not supported"
" for S3FlagTarget")
output_hadoop = '{output}-temp-{time}'.format(
output=output_final,
time=datetime.datetime.now().isoformat().replace(':', '-'))
else:
output_hadoop = output_final
arglist = luigi.contrib.hdfs.load_hadoop_cmd() + ['jar', self.streaming_jar]
# 'libjars' is a generic option, so place it first
libjars = [libjar for libjar in self.libjars]
for libjar in self.libjars_in_hdfs:
run_cmd = luigi.contrib.hdfs.load_hadoop_cmd() + ['fs', '-get', libjar, self.tmp_dir]
logger.debug(subprocess.list2cmdline(run_cmd))
subprocess.call(run_cmd)
libjars.append(os.path.join(self.tmp_dir, os.path.basename(libjar)))
if libjars:
arglist += ['-libjars', ','.join(libjars)]
# Add static files and directories
extra_files = get_extra_files(job.extra_files())
files = []
for src, dst in extra_files:
dst_tmp = '%s_%09d' % (dst.replace('/', '_'), random.randint(0, 999999999))
files += ['%s#%s' % (src, dst_tmp)]
# -files doesn't support subdirectories, so we need to create the dst_tmp -> dst manually
job.add_link(dst_tmp, dst)
if files:
arglist += ['-files', ','.join(files)]
jobconfs = job.jobconfs()
for k, v in six.iteritems(self.jobconfs):
jobconfs.append('%s=%s' % (k, v))
for conf in jobconfs:
arglist += ['-D', conf]
arglist += self.streaming_args
arglist += ['-mapper', map_cmd]
if job.combiner != NotImplemented:
arglist += ['-combiner', cmb_cmd]
if job.reducer != NotImplemented:
arglist += ['-reducer', red_cmd]
files = [runner_path, self.tmp_dir + '/packages.tar', self.tmp_dir + '/job-instance.pickle']
for f in files:
arglist += ['-file', f]
if self.output_format:
arglist += ['-outputformat', self.output_format]
if self.input_format:
arglist += ['-inputformat', self.input_format]
for target in luigi.task.flatten(job.input_hadoop()):
if not isinstance(target, luigi.contrib.hdfs.HdfsTarget) \
and not isinstance(target, luigi.s3.S3Target):
raise TypeError('target must be an HdfsTarget or S3Target')
arglist += ['-input', target.path]
if not isinstance(job.output(), luigi.contrib.hdfs.HdfsTarget) \
and not isinstance(job.output(), luigi.s3.S3FlagTarget):
raise TypeError('output must be an HdfsTarget or S3FlagTarget')
arglist += ['-output', output_hadoop]
# submit job
create_packages_archive(packages, self.tmp_dir + '/packages.tar')
job.dump(self.tmp_dir)
run_and_track_hadoop_job(arglist, tracking_url_callback=tracking_url_callback)
if self.end_job_with_atomic_move_dir:
luigi.contrib.hdfs.HdfsTarget(output_hadoop).move_dir(output_final)
self.finish()
def finish(self):
# FIXME: check for isdir?
if self.tmp_dir and os.path.exists(self.tmp_dir):
logger.debug('Removing directory %s', self.tmp_dir)
shutil.rmtree(self.tmp_dir)
def __del__(self):
self.finish()
class DefaultHadoopJobRunner(HadoopJobRunner):
"""
The default job runner just reads from config and sets stuff.
"""
def __init__(self):
config = configuration.get_config()
streaming_jar = config.get('hadoop', 'streaming-jar')
super(DefaultHadoopJobRunner, self).__init__(streaming_jar=streaming_jar)
# TODO: add more configurable options
class LocalJobRunner(JobRunner):
"""
Will run the job locally.
This is useful for debugging and also unit testing. Tries to mimic Hadoop Streaming.
TODO: integrate with JobTask
"""
def __init__(self, samplelines=None):
self.samplelines = samplelines
def sample(self, input_stream, n, output):
for i, line in enumerate(input_stream):
if n is not None and i >= n:
break
output.write(line)
def group(self, input_stream):
output = StringIO()
lines = []
for i, line in enumerate(input_stream):
parts = line.rstrip('\n').split('\t')
blob = md5(str(i).encode('ascii')).hexdigest() # pseudo-random blob to make sure the input isn't sorted
lines.append((parts[:-1], blob, line))
for _, _, line in sorted(lines):
output.write(line)
output.seek(0)
return output
def run_job(self, job):
map_input = StringIO()
for i in luigi.task.flatten(job.input_hadoop()):
self.sample(i.open('r'), self.samplelines, map_input)
map_input.seek(0)
if job.reducer == NotImplemented:
# Map only job; no combiner, no reducer
map_output = job.output().open('w')
job.run_mapper(map_input, map_output)
map_output.close()
return
# run job now...
map_output = StringIO()
job.run_mapper(map_input, map_output)
map_output.seek(0)
if job.combiner == NotImplemented:
reduce_input = self.group(map_output)
else:
combine_input = self.group(map_output)
combine_output = StringIO()
job.run_combiner(combine_input, combine_output)
combine_output.seek(0)
reduce_input = self.group(combine_output)
reduce_output = job.output().open('w')
job.run_reducer(reduce_input, reduce_output)
reduce_output.close()
class BaseHadoopJobTask(luigi.Task):
pool = luigi.Parameter(default=None, significant=False, positional=False)
# This value can be set to change the default batching increment. Default is 1 for backwards compatibility.
batch_counter_default = 1
final_mapper = NotImplemented
final_combiner = NotImplemented
final_reducer = NotImplemented
mr_priority = NotImplemented
_counter_dict = {}
task_id = None
def _get_pool(self):
""" Protected method """
if self.pool:
return self.pool
if hadoop().pool:
return hadoop().pool
@abc.abstractmethod
def job_runner(self):
pass
def jobconfs(self):
jcs = []
jcs.append('mapred.job.name=%s' % self.task_id)
if self.mr_priority != NotImplemented:
jcs.append('mapred.job.priority=%s' % self.mr_priority())
pool = self._get_pool()
if pool is not None:
# Supporting two schedulers: fair (default) and capacity using the same option
scheduler_type = configuration.get_config().get('hadoop', 'scheduler', 'fair')
if scheduler_type == 'fair':
jcs.append('mapred.fairscheduler.pool=%s' % pool)
elif scheduler_type == 'capacity':
jcs.append('mapred.job.queue.name=%s' % pool)
return jcs
def init_local(self):
"""
Implement any work to setup any internal datastructure etc here.
You can add extra input using the requires_local/input_local methods.
Anything you set on the object will be pickled and available on the Hadoop nodes.
"""
pass
def init_hadoop(self):
pass
# available formats are "python" and "json".
data_interchange_format = "python"
def run(self, tracking_url_callback=None):
# The best solution is to store them as lazy `cached_property`, but it
# has extraneous dependency. And `property` is slow (need to be
# calculated every time when called), so we save them as attributes
# directly.
self.serialize = DataInterchange[self.data_interchange_format]['serialize']
self.internal_serialize = DataInterchange[self.data_interchange_format]['internal_serialize']
self.deserialize = DataInterchange[self.data_interchange_format]['deserialize']
self.init_local()
try:
self.job_runner().run_job(self, tracking_url_callback=tracking_url_callback)
except TypeError as ex:
if 'unexpected keyword argument' not in ex.message:
raise
self.job_runner().run_job(self)
def requires_local(self):
"""
Default impl - override this method if you need any local input to be accessible in init().
"""
return []
def requires_hadoop(self):
return self.requires() # default impl
def input_local(self):
return luigi.task.getpaths(self.requires_local())
def input_hadoop(self):
return luigi.task.getpaths(self.requires_hadoop())
def deps(self):
# Overrides the default implementation
return luigi.task.flatten(self.requires_hadoop()) + luigi.task.flatten(self.requires_local())
def on_failure(self, exception):
if isinstance(exception, HadoopJobError):
return """Hadoop job failed with message: {message}
stdout:
{stdout}
stderr:
{stderr}
""".format(message=exception.message, stdout=exception.out, stderr=exception.err)
else:
return super(BaseHadoopJobTask, self).on_failure(exception)
DataInterchange = {
"python": {"serialize": str,
"internal_serialize": repr,
"deserialize": eval},
"json": {"serialize": json.dumps,
"internal_serialize": json.dumps,
"deserialize": json.loads}
}
class JobTask(BaseHadoopJobTask):
n_reduce_tasks = 25
reducer = NotImplemented
def jobconfs(self):
jcs = super(JobTask, self).jobconfs()
if self.reducer == NotImplemented:
jcs.append('mapred.reduce.tasks=0')
else:
jcs.append('mapred.reduce.tasks=%s' % self.n_reduce_tasks)
return jcs
def init_mapper(self):
pass
def init_combiner(self):
pass
def init_reducer(self):
pass
def _setup_remote(self):
self._setup_links()
def job_runner(self):
# We recommend that you define a subclass, override this method and set up your own config
"""
Get the MapReduce runner for this job.
If all outputs are HdfsTargets, the DefaultHadoopJobRunner will be used.
Otherwise, the LocalJobRunner which streams all data through the local machine
will be used (great for testing).
"""
outputs = luigi.task.flatten(self.output())
for output in outputs:
if not isinstance(output, luigi.contrib.hdfs.HdfsTarget):
warnings.warn("Job is using one or more non-HdfsTarget outputs" +
" so it will be run in local mode")
return LocalJobRunner()
else:
return DefaultHadoopJobRunner()
def reader(self, input_stream):
"""
Reader is a method which iterates over input lines and outputs records.
The default implementation yields one argument containing the line for each line in the input."""
for line in input_stream:
yield line,
def writer(self, outputs, stdout, stderr=sys.stderr):
"""
Writer format is a method which iterates over the output records
from the reducer and formats them for output.
The default implementation outputs tab separated items.
"""
for output in outputs:
try:
output = flatten(output)
if self.data_interchange_format == "json":
# Only dump one json string, and skip another one, maybe key or value.
output = filter(lambda x: x, output)
else:
# JSON is already serialized, so we put `self.serialize` in a else statement.
output = map(self.serialize, output)
print("\t".join(output), file=stdout)
except:
print(output, file=stderr)
raise
def mapper(self, item):
"""
Re-define to process an input item (usually a line of input data).
Defaults to identity mapper that sends all lines to the same reducer.
"""
yield None, item
combiner = NotImplemented
def incr_counter(self, *args, **kwargs):
"""
Increments a Hadoop counter.
Since counters can be a bit slow to update, this batches the updates.
"""
threshold = kwargs.get("threshold", self.batch_counter_default)
if len(args) == 2:
# backwards compatibility with existing hadoop jobs
group_name, count = args
key = (group_name,)
else:
group, name, count = args
key = (group, name)
ct = self._counter_dict.get(key, 0)
ct += count
if ct >= threshold:
new_arg = list(key) + [ct]
self._incr_counter(*new_arg)
ct = 0
self._counter_dict[key] = ct
def _flush_batch_incr_counter(self):
"""
Increments any unflushed counter values.
"""
for key, count in six.iteritems(self._counter_dict):
if count == 0:
continue
args = list(key) + [count]
self._incr_counter(*args)
self._counter_dict[key] = 0
def _incr_counter(self, *args):
"""
Increments a Hadoop counter.
Note that this seems to be a bit slow, ~1 ms
Don't overuse this function by updating very frequently.
"""
if len(args) == 2:
# backwards compatibility with existing hadoop jobs
group_name, count = args
print('reporter:counter:%s,%s' % (group_name, count), file=sys.stderr)
else:
group, name, count = args
print('reporter:counter:%s,%s,%s' % (group, name, count), file=sys.stderr)
def extra_modules(self):
return [] # can be overridden in subclass
def extra_files(self):
"""
Can be overriden in subclass.
Each element is either a string, or a pair of two strings (src, dst).
* `src` can be a directory (in which case everything will be copied recursively).
* `dst` can include subdirectories (foo/bar/baz.txt etc)
Uses Hadoop's -files option so that the same file is reused across tasks.
"""
return []
def add_link(self, src, dst):
if not hasattr(self, '_links'):
self._links = []
self._links.append((src, dst))
def _setup_links(self):
if hasattr(self, '_links'):
missing = []
for src, dst in self._links:
d = os.path.dirname(dst)
if d:
try:
os.makedirs(d)
except OSError:
pass
if not os.path.exists(src):
missing.append(src)
continue
if not os.path.exists(dst):
# If the combiner runs, the file might already exist,
# so no reason to create the link again
os.link(src, dst)
if missing:
raise HadoopJobError(
'Missing files for distributed cache: ' +
', '.join(missing))
def dump(self, directory=''):
"""
Dump instance to file.
"""
file_name = os.path.join(directory, 'job-instance.pickle')
if self.__module__ == '__main__':
d = pickle.dumps(self)
module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0]
d = d.replace(b'(c__main__', "(c" + module_name)
open(file_name, "wb").write(d)
else:
pickle.dump(self, open(file_name, "wb"))
def _map_input(self, input_stream):
"""
Iterate over input and call the mapper for each item.
If the job has a parser defined, the return values from the parser will
be passed as arguments to the mapper.
If the input is coded output from a previous run,
the arguments will be splitted in key and value.
"""
for record in self.reader(input_stream):
for output in self.mapper(*record):
yield output
if self.final_mapper != NotImplemented:
for output in self.final_mapper():
yield output
self._flush_batch_incr_counter()
def _reduce_input(self, inputs, reducer, final=NotImplemented):
"""
Iterate over input, collect values with the same key, and call the reducer for each unique key.
"""
for key, values in groupby(inputs, key=lambda x: self.internal_serialize(x[0])):
for output in reducer(self.deserialize(key), (v[1] for v in values)):
yield output
if final != NotImplemented:
for output in final():
yield output
self._flush_batch_incr_counter()
def run_mapper(self, stdin=sys.stdin, stdout=sys.stdout):
"""
Run the mapper on the hadoop node.
"""
self.init_hadoop()
self.init_mapper()
outputs = self._map_input((line[:-1] for line in stdin))
if self.reducer == NotImplemented:
self.writer(outputs, stdout)
else:
self.internal_writer(outputs, stdout)
def run_reducer(self, stdin=sys.stdin, stdout=sys.stdout):
"""
Run the reducer on the hadoop node.
"""
self.init_hadoop()
self.init_reducer()
outputs = self._reduce_input(self.internal_reader((line[:-1] for line in stdin)), self.reducer, self.final_reducer)
self.writer(outputs, stdout)
def run_combiner(self, stdin=sys.stdin, stdout=sys.stdout):
self.init_hadoop()
self.init_combiner()
outputs = self._reduce_input(self.internal_reader((line[:-1] for line in stdin)), self.combiner, self.final_combiner)
self.internal_writer(outputs, stdout)
def internal_reader(self, input_stream):
"""
Reader which uses python eval on each part of a tab separated string.
Yields a tuple of python objects.
"""
for input_line in input_stream:
yield list(map(self.deserialize, input_line.split("\t")))
def internal_writer(self, outputs, stdout):
"""
Writer which outputs the python repr for each item.
"""
for output in outputs:
print("\t".join(map(self.internal_serialize, output)), file=stdout)
|
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Serves JSON for a graph.
This serves the JSON in the format consumed by Flot:
https://github.com/flot/flot/blob/master/API.md
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import copy
import datetime
import json
import logging
import math
import re
from google.appengine.ext import ndb
from dashboard import alerts
from dashboard import can_bisect
from dashboard import list_tests
from dashboard.common import datastore_hooks
from dashboard.common import request_handler
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import graph_data
# Default number of points to fetch per test.
# This can be overridden by specifying num_points or start_rev and end_rev.
_DEFAULT_NUM_POINTS = 150
# If data for more than this many tests is requested for unselected tests,
# an empty response will be returned.
_MAX_UNSELECTED_TESTS = 55
# Dictionary mapping improvement directions constants to strings.
_BETTER_DICT = {
anomaly.UP: 'Higher',
anomaly.DOWN: 'Lower',
anomaly.UNKNOWN: '?',
}
class GraphJsonHandler(request_handler.RequestHandler):
"""Request handler for requests for graph data."""
def post(self):
"""Fetches and prepares data for a graph.
Request parameters:
graphs: A JSON serialization of a dict that contains the arguments
for GetGraphJson.
Outputs:
JSON serialization of data to be used for plotting a graph.
"""
self.response.headers.add_header('Access-Control-Allow-Origin', '*')
arguments = self._ParseRequestArguments()
if not arguments:
self.ReportError('Bad Graph JSON Request')
return
self.response.out.write(GetGraphJson(**arguments))
def _ParseRequestArguments(self):
"""Parses parameters from a request and checks for errors.
The post request is expected to pass one parameter, called 'graphs',
whose value is a JSON serialization of a dict of parameters.
Returns:
A dict of arguments that can be given to GetGraphJson, or None if
no valid dict of arguments can be constructed.
"""
graphs = self.request.get('graphs')
if graphs is None:
logging.error('No graph names specified')
return None
try:
graphs = json.loads(graphs)
except ValueError:
logging.error('Invalid JSON string for graphs')
return None
test_path_dict = graphs.get('test_path_dict')
test_path_list = graphs.get('test_path_list')
is_selected = graphs.get('is_selected')
if test_path_dict and test_path_list:
logging.error(
'Only one of test_path_dict and test_path_list may be specified')
return None
elif test_path_dict:
test_paths = _ResolveTestPathDict(test_path_dict, is_selected)
elif test_path_list:
test_paths = test_path_list
else:
logging.error(
'Exactly one of test_path_dict or test_path_list must be specified')
return None
arguments = {
'test_paths': test_paths,
'rev': _PositiveIntOrNone(graphs.get('rev')),
'num_points': (_PositiveIntOrNone(graphs.get('num_points'))
or _DEFAULT_NUM_POINTS),
'is_selected': is_selected,
'start_rev': _PositiveIntOrNone(graphs.get('start_rev')),
'end_rev': _PositiveIntOrNone(graphs.get('end_rev')),
}
return arguments
def _ResolveTestPathDict(test_path_dict, is_selected):
# TODO(eakuefner): These are old-style test path dicts which means that []
# doesn't mean 'no tests' but rather 'all tests'. Remove this hack.
if is_selected:
for test, selected in test_path_dict.items():
if selected == []:
test_path_dict[test] = 'all'
return list_tests.GetTestsForTestPathDict(
test_path_dict, bool(is_selected))['tests']
def GetGraphJson(
test_paths, rev=None, num_points=None,
is_selected=True, start_rev=None, end_rev=None):
"""Makes a JSON serialization of data for one chart with multiple series.
This function can return data for one chart (with multiple data series
plotted on it) with revisions on the x-axis, for a certain range of
revisions. The particular set of revisions to get data for can be specified
with the arguments rev, num_points, start_rev, and end_rev.
Args:
test_paths: A list of test paths.
rev: A revision number that the chart may be clamped relative to.
num_points: Number of points to plot.
is_selected: Whether this request is for selected or un-selected series.
start_rev: The lowest revision to get trace data for.
end_rev: The highest revision to get trace data for.
Returns:
JSON serialization of a dict with info that will be used to plot a chart.
"""
# If a particular test has a lot of children, then a request will be made
# for data for a lot of unselected series, which may be very slow and may
# time out. In this case, return nothing; see issue #1876.
if not is_selected and len(test_paths) > _MAX_UNSELECTED_TESTS:
return json.dumps({'data': {}, 'annotations': {}, 'error_bars': {}})
test_keys = list(map(utils.TestKey, test_paths))
test_entities = ndb.get_multi(test_keys)
test_entities = [t for t in test_entities if t is not None and t.has_rows]
# Filter out deprecated tests, but only if not all the tests are deprecated.
all_deprecated = all(t.deprecated for t in test_entities)
if not all_deprecated:
test_entities = [t for t in test_entities if not t.deprecated]
test_entities = [t for t in test_entities if t.has_rows]
revision_map = {}
num_points = num_points or _DEFAULT_NUM_POINTS
for test in test_entities:
_UpdateRevisionMap(revision_map, test, rev, num_points, start_rev, end_rev)
if not (start_rev and end_rev):
_ClampRevisionMap(revision_map, rev, num_points)
return _GetFlotJson(revision_map, test_entities)
def _PositiveIntOrNone(input_str):
"""Parses a string as a positive int if possible, otherwise returns None."""
if not input_str:
return None
try:
parsed = int(input_str)
except ValueError:
return None
if parsed < 0:
return None
return parsed
def _GetAnomalyAnnotationMap(test):
"""Gets a map of revision numbers to Anomaly entities."""
anomalies, _, _ = anomaly.Anomaly.QueryAsync(
test=test, limit=1000).get_result()
return dict((a.end_revision, a) for a in anomalies)
def _UpdateRevisionMap(revision_map, parent_test, rev, num_points,
start_rev=None, end_rev=None):
"""Updates a dict of revisions to data point information for one test.
Depending on which arguments are given, there are several ways that
this function can update the dict of revisions:
1. If start_rev and end_rev are given, then revisions in this range
are used. The num_points argument is ignored.
2. Otherwise, if rev is given, then revisions before and after the
specified revision are used.
3. Otherwise, the latest revisions are used.
Args:
revision_map: A dict mapping revision numbers to dicts of point info.
Each point info dict contains information from a Row entity.
parent_test: A TestMetadata entity with Row children.
rev: The middle revision in the revision map (could be None).
num_points: The number of points to include in the revision map.
start_rev: Start revision number (optional).
end_rev: End revision number (optional).
"""
anomaly_annotation_map = _GetAnomalyAnnotationMap(parent_test.key)
assert(datastore_hooks.IsUnalteredQueryPermitted() or
not parent_test.internal_only)
if start_rev and end_rev:
rows = graph_data.GetRowsForTestInRange(parent_test.key, start_rev, end_rev)
elif rev:
assert num_points
rows = graph_data.GetRowsForTestAroundRev(parent_test.key, rev, num_points)
else:
assert num_points
rows = graph_data.GetLatestRowsForTest(parent_test.key, num_points)
parent_test_key = parent_test.key.urlsafe()
for row in rows:
if row.revision not in revision_map:
revision_map[row.revision] = {}
revision_map[row.revision][parent_test_key] = _PointInfoDict(
row, anomaly_annotation_map)
def _PointInfoDict(row, anomaly_annotation_map):
"""Makes a dict of properties of one Row."""
point_info = {
'value': row.value,
}
tracing_uri = _GetTracingUri(row)
if tracing_uri:
point_info['a_tracing_uri'] = tracing_uri
if row.error is not None:
point_info['error'] = row.error
if anomaly_annotation_map.get(row.revision):
anomaly_entity = anomaly_annotation_map.get(row.revision)
point_info['g_anomaly'] = alerts.GetAnomalyDict(anomaly_entity)
row_dict = row.to_dict()
for name, val in row_dict.items():
if _IsMarkdownLink(val) and 'Buildbot stdio' in val:
logdog_link, status_page_link = _GetUpdatedBuildbotLinks(val)
if logdog_link:
val = logdog_link
# TODO(simonhatch): Remove this sometime in 2019.
# crbug.com/891424
if status_page_link and not 'a_build_uri' in row_dict:
point_info['a_buildbot_status_page'] = status_page_link
if name.startswith('r_'):
point_info[name] = val
elif name == 'a_default_rev':
point_info['a_default_rev'] = val
elif name == 'timestamp':
point_info['timestamp'] = val
elif name.startswith('a_') and _IsMarkdownLink(val):
point_info[name] = val
return point_info
def _IsMarkdownLink(value):
"""Checks whether |value| is a markdown link."""
if not isinstance(value, str):
return False
return re.match(r'\[.+?\]\(.+?\)', value)
def _GetUpdatedBuildbotLinks(old_stdio_link):
# Links take a markdown format, [title](url)
logdog_markdown = None
logdog_link = utils.GetLogdogLogUriFromStdioLink(old_stdio_link)
if logdog_link:
logdog_markdown = '[Buildbot stdio](%s)' % logdog_link
buildbot_status_markdown = None
buildbot_link = utils.GetBuildbotStatusPageUriFromStdioLink(
old_stdio_link)
if buildbot_link:
buildbot_status_markdown = '[Buildbot status page](%s)' % buildbot_link
return logdog_markdown, buildbot_status_markdown
def _CreateLinkProperty(name, label, url):
"""Returns a dict containing markdown link to show on dashboard."""
return {'a_' + name: '[%s](%s)' % (label, url)}
def _GetSeriesAnnotations(tests):
"""Makes a list of metadata about each series (i.e. each test).
Args:
tests: List of TestMetadata entities.
Returns:
A list of dicts of metadata about each series. One dict for each test.
"""
series_annotations = {}
for i, test in enumerate(tests):
series_annotations[i] = {
'name': test.test_name,
'path': test.test_path,
'units': test.units,
'better': _BETTER_DICT[test.improvement_direction],
'description': test.description,
'can_bisect': can_bisect.IsValidTestForBisect(test.test_path),
}
return series_annotations
def _ClampRevisionMap(revision_map, rev, num_points):
"""Clamps the results down to the requested number of points before/after rev.
Not all of the Tests have Rows for the exact same revisions. If one test has
gaps in the requested range, the query for points before/after rev will
extend outside the range, but the other tests with complete data will not
extend their query range. We only want the num_points/2 rows nearest rev
because the extended range didn't query all of the tests. See crbug.com/236718
Args:
revision_map: The dict with all found revisions. This will be modified.
rev: The requested revision.
num_points: The requested number of points to plot.
"""
revisions = sorted(revision_map.keys())
if len(revisions) <= num_points:
return
# Default to clamping to the last revision, then try to fill in better.
index = len(revisions) - 1
if rev is not None:
for i, r in enumerate(revisions):
if r >= rev:
index = i
break
rows_before = int(num_points / 2) if rev is not None else num_points
clamp_before = max(index - rows_before, 0)
rows_after = int(num_points / 2) if rev is not None else 0
clamp_after = index + rows_after + 1
for rev_to_delete in (
revisions[:clamp_before] + revisions[clamp_after:]):
del revision_map[rev_to_delete]
def _GetTracingUri(point):
"""Gets the URI string for tracing in cloud storage, if available.
Args:
point: A Row entity.
Returns:
An URI string, or None if there is no trace available.
"""
if not hasattr(point, 'a_tracing_uri'):
return None
return point.a_tracing_uri
def _GetFlotJson(revision_map, tests):
"""Constructs JSON in the format expected by Flot.
Args:
revision_map: A dict which maps revision numbers to data point info.
tests: A list of TestMetadata entities.
Returns:
JSON serialization of a dict with line data, annotations, error range data,
(This data may not be passed exactly as-is to the Flot plot function, but
it will all be used when plotting.)
"""
# Each entry in the following dict is one Flot series object. The actual
# x-y values will be put into the 'data' properties for each object.
cols = {i: _FlotSeries(i, test) for i, test in enumerate(tests)}
flot_annotations = {}
flot_annotations['series'] = _GetSeriesAnnotations(tests)
# For each TestMetadata (which corresponds to a trace line), the shaded error
# region is specified by two series objects. For a demo, see:
# http://www.flotcharts.org/flot/examples/percentiles/index.html
error_bars = {x: [
{
'id': 'bottom_%d' % x,
'data': [],
'color': x,
'clickable': False,
'hoverable': False,
'lines': {
'show': True,
'lineWidth': 0,
'fill': 0.2,
},
'fillBetween': 'line_%d' % x,
},
{
'id': 'top_%d' % x,
'data': [],
'color': x,
'clickable': False,
'hoverable': False,
'lines': {
'show': True,
'lineWidth': 0,
'fill': 0.2,
},
'fillBetween': 'line_%d' % x,
}
] for x, _ in enumerate(tests)}
test_keys = [t.key.urlsafe() for t in tests]
for revision in sorted(revision_map.keys()):
for series_index, key in enumerate(test_keys):
point_info = revision_map[revision].get(key, None)
if not point_info:
continue
timestamp = point_info.get('timestamp')
if timestamp and isinstance(timestamp, datetime.datetime):
point_info['timestamp'] = utils.TimestampMilliseconds(timestamp)
# TODO(simonhatch): Need to filter out NaN values.
# https://github.com/catapult-project/catapult/issues/3474
point_list = [revision, point_info['value']]
if math.isnan(point_info['value']):
continue
if 'error' in point_info:
error = point_info['error']
error_bars[series_index][0]['data'].append(
[revision, point_info['value'] - error])
error_bars[series_index][1]['data'].append(
[revision, point_info['value'] + error])
cols[series_index]['data'].append(point_list)
data_index = len(cols[series_index]['data']) - 1
series_dict = flot_annotations.setdefault(series_index, {})
data_dict = copy.deepcopy(point_info)
del data_dict['value']
series_dict.setdefault(data_index, data_dict)
return json.dumps(
{
'data': cols,
'annotations': flot_annotations,
'error_bars': error_bars,
},
allow_nan=False)
def _FlotSeries(index, test):
return {
'data': [],
'color': index,
'id': 'line_%d' % index,
'testpath': test.test_path,
}
|
|
# Copyright 2012-2013 Ravello Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import sys
import time
import socket
import select
import struct
import errno
import textwrap
import copy
import functools
from testmill import (cache, console, keypair, util, ravello, error,
manifest, inflect)
from testmill.state import env
# Starting and waiting for applications and blueprints ..
vm_ordered_states = ['PUBLISHING', 'STOPPED', 'STARTING', 'STARTED']
bp_ordered_states = ['SAVING', 'DONE']
def combine_states(state1, state2, ordering):
"""Combine two states *state1* and *state2* into a single state.
The *ordering* parameter must be a sequence containing the valid states in
some ordering. The combined state is then the minimum state according this
ordering. If one of *state1* or *state2* is unknown, the combined state is
the unknown state.
This function is useful to know the progress for objects that transition
one or more intermediary states into a single end state.
"""
try:
index1 = ordering.index(state1)
except ValueError:
return state1
try:
index2 = ordering.index(state2)
except ValueError:
return state2
return ordering[min(index1, index2)]
def start_application(app):
"""Start up all stopped VMs in an application."""
for vm in app['vms']:
if vm['dynamicMetadata']['state'] == 'STOPPED':
env.api.start_vm(app, vm)
app = cache.get_application(app['id'], force_reload=True)
return app
def stop_application(app):
"""Stop all started VMs in an application."""
for vm in app['vms']:
if vm['dynamicMetadata']['state'] == 'STARTED':
env.api.stop_vm(app, vm)
app = cache.get_application(app['id'], force_reload=True)
return app
def get_application_state(app):
"""Return the state of an application.
The state is obtained by reducing the states of all the application VMs
using ``combine_states()``.
"""
vms = app.get('vms', [])
if not vms:
return 'DRAFT'
combine = functools.partial(combine_states, ordering=vm_ordered_states)
states = map(lambda vm: vm['dynamicMetadata']['state'] \
if vm.get('dynamicMetadata') else 'DRAFT', vms)
state = functools.reduce(combine, states)
return state
def get_blueprint_state(bp):
"""Return the state of a blueprint.
The state is obtained by reducing the states of all the blueprint VMs
using ``combine_states()``.
"""
vms = bp.get('vms', [])
if not vms:
return 'EMPTY'
combine = functools.partial(combine_states, ordering=bp_ordered_states)
states = map(lambda vm: vm['loadingStatus'], vms)
state = functools.reduce(combine, states)
return state
def new_application_name(template):
"""Return a new application name based on *template*."""
name = util.get_unused_name(template, cache.get_applications())
return name
def new_blueprint_name(template):
"""Return a new blueprint name based on *template*."""
name = util.get_unused_name(template, cache.get_blueprints())
return name
def wait_until_application_is_in_state(app, state, timeout=None,
poll_timeout=None):
"""Wait until an application is in a given state."""
if timeout is None:
timeout = 900
if poll_timeout is None:
poll_timeout = 10
end_time = time.time() + timeout
while True:
if time.time() > end_time:
break
poll_end_time = time.time() + poll_timeout
app = cache.get_application(app['id'], force_reload=True)
appstate = get_application_state(app)
if appstate == state:
return app
console.show_progress(appstate[0])
time.sleep(max(0, poll_end_time - time.time()))
error.raise_error("Application `{0}` did not reach state '{1}' within "
"{2} seconds.", app['name'], state, timeout)
return app
def wait_until_blueprint_is_in_state(bp, state, timeout=None,
poll_timeout=None):
"""Wait until a blueprint is in a given state."""
if timeout is None:
timeout = 300
if poll_timeout is None:
poll_timeout = 5
end_time = time.time() + timeout
while True:
if time.time() > end_time:
break
poll_end_time = time.time() + poll_timeout
bp = cache.get_blueprint(bp['id'], force_reload=True)
bpstate = get_blueprint_state(bp)
if bpstate == state:
return bp
time.sleep(max(0, poll_end_time - time.time()))
error.raise_error("Blueprint `{0}` did not reach state '{1}' within "
"{2} seconds.", bp['name'], state, timeout)
return bp
nb_connect_errors = set((errno.EINPROGRESS,))
if sys.platform.startswith('win'):
nb_connect_errors.add(errno.WSAEWOULDBLOCK)
def wait_until_application_accepts_ssh(app, vms, timeout=None,
poll_timeout=None):
"""Wait until an application is reachable by ssh.
An application is reachable by SSH if all the VMs that have a public key in
their userdata are connect()able on port 22.
"""
if timeout is None:
timeout = 300
if poll_timeout is None:
poll_timeout = 5
waitaddrs = set((vm['dynamicMetadata']['externalIp']
for vm in app['vms'] if vm['name'] in vms))
aliveaddrs = set()
end_time = time.time() + timeout
# For the intricate details on non-blocking connect()'s, see Stevens,
# UNIX network programming, volume 1, chapter 16.3 and following.
while True:
if time.time() > end_time:
break
waitfds = {}
for addr in waitaddrs:
sock = socket.socket()
sock.setblocking(False)
try:
sock.connect((addr, 22))
except socket.error as e:
if e.errno not in nb_connect_errors:
console.debug('connect(): errno {.errno}'.format(e))
continue
waitfds[sock.fileno()] = (sock, addr)
poll_end_time = time.time() + poll_timeout
while True:
timeout = poll_end_time - time.time()
if timeout < 0:
for fd in waitfds:
sock, _ = waitfds[fd]
sock.close()
break
try:
wfds = list(waitfds)
_, wfds, _ = select.select([], wfds, [], timeout)
except select.error as e:
if e.args[0] == errno.EINTR:
continue
console.debug('select(): errno {.errno}'.format(e))
raise
for fd in wfds:
assert fd in waitfds
sock, addr = waitfds[fd]
try:
err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
except socket.error as e:
err = e.errno
sock.close()
if not err:
aliveaddrs.add(addr)
waitaddrs.remove(addr)
del waitfds[fd]
if not waitfds:
break
if not waitaddrs:
return
console.show_progress('C') # 'C' = Connecting
time.sleep(max(0, poll_end_time - time.time()))
unreachable = set((vm['name'] for vm in app['vms']
if vm['dynamicMetadata']['externalIp'] in waitaddrs))
noun = inflect.plural_noun('VM', len(unreachable))
vmnames = '`{0}`'.format('`, `'.join(sorted(unreachable)))
error.raise_error('{0} `{1}` did not become reachable within {2} seconds.',
noun, vmnames, timeout)
vm_reuse_states = ['STARTED', 'STARTING', 'STOPPED', 'PUBLISHING']
def reuse_existing_application(appdef):
"""Try to re-use an existing application."""
candidates = []
pubkey = env.public_key
if appdef.get('blueprint'):
blueprint = cache.get_blueprint(name=appdef['blueprint'])
else:
blueprint = None
project = env.manifest['project']
for app in cache.get_applications():
parts = app['name'].split(':')
if len(parts) != 3:
continue
if parts[0] != project['name'] or parts[1] != appdef['name']:
continue
app = cache.get_application(app['id'])
vms = app.get('vms', [])
if not vms:
continue
state = get_application_state(app)
if state not in vm_reuse_states:
continue
if blueprint and blueprint['name'] != app.get('blueprintName'):
continue
vmsfound = []
for vmdef in appdef['vms']:
for vm in vms:
if vm['name'] == vmdef['name']:
break
if not blueprint:
image = cache.get_image(name=vmdef['image'])
if not image:
continue
if vm['shelfVmId'] != image['id']:
continue
userdata = vm.get('customVmConfigurationData', {})
keypair = userdata.get('keypair')
if keypair.get('id') != pubkey['id']:
continue
vmsfound.append(vmdef['name'])
if len(vmsfound) != len(appdef['vms']):
continue
candidates.append((state, app))
if not candidates:
return
candidates.sort(key=lambda x: vm_reuse_states.index(x[0]))
return candidates[0][1]
# Ravello OUI = 2C-C2-60
_ravello_base = 0x2cc260
def get_new_mac():
"""Allocate a new Mac address."""
if not hasattr(env, '_mac_base'):
start = _ravello_base
offset = struct.unpack('>i', '\x00' + os.urandom(3))[0]
env._mac_base = (start << 24) + offset
# Do not use a random Mac in the Ravello OUI range but instead use a
# random offset + sequential allocation. The range is too small for
# random Macs to have a small enough probability not to conflict.
mac = env._mac_base
env._mac_base += 1
# Why is a value of FF in the fourth byte considered invalid by the API?
# As a workaround we our 24-bit range a bit wrap earlier.
if (env._mac_base & 0xffffff) >= 0xfeffffff:
env._mac_base = _ravello_base
parts = ['{0:02X}'.format((mac >> ((5-i)*8)) & 0xff) for i in range(6)]
mac = ':'.join(parts)
return mac
def create_new_vm(vmdef):
image = cache.get_image(name=vmdef['image'])
image = copy.deepcopy(image)
vm = ravello.update_luids(image)
vm['name'] = vmdef['name']
vm['customVmConfigurationData'] = { 'keypair': env.public_key }
vm['hostname'] = [ vmdef['name'] ]
vm['numCpus'] = vmdef['smp']
vm['memorySize'] = { 'unit': 'MB', 'value': vmdef['memory'] }
vm.setdefault('suppliedServices', [])
for svcdef in vmdef.get('services', []):
if isinstance(svcdef, int):
port = str(svcdef)
svcdef = 'port-{0}'.format(svcdef)
else:
port = socket.getservbyname(svcdef)
svc = { 'globalService': True, 'id': ravello.random_luid(),
'ip': None, 'name': svcdef, 'portRange': port,
'protocol': 'ANY_OVER_TCP' }
vm['suppliedServices'].append({'baseService': svc})
# Set a fixed Mac. This way applications created from blueprints
# created from these VMs will have the same Mac.
# See also https://github.com/ravello/testmill/issues/15
conn = vm['networkConnections'][0]
conn['device']['mac'] = get_new_mac()
conn['device']['useAutomaticMac'] = False
return vm
def create_new_application(appdef, name_is_template=True):
"""Create a new application based on ``appdef``."""
if name_is_template:
project = env.manifest['project']
template = '{0}:{1}'.format(project['name'], appdef['name'])
name = util.get_unused_name(template, cache.get_applications())
else:
name = appdef['name']
app = { 'name': name }
bpname = appdef.get('blueprint')
if bpname:
blueprint = cache.get_blueprint(name=bpname)
app['blueprintId'] = blueprint['id']
else:
vms = []
for vmdef in appdef.get('vms', []):
vm = create_new_vm(vmdef)
vms.append(vm)
app['vms'] = vms
app = env.api.create_application(app)
app = cache.get_application(app['id']) # update cache
return app
def publish_application(app, cloud=None, region=None):
"""Publish the application ``app``."""
req = {}
if cloud is None:
cloud = 'AMAZON'
region = 'Virginia'
req = { 'prefferedCloud': cloud, # sic
'prefferedRegion': region }
env.api.publish_application(app, req)
app = cache.get_application(app['id'], force_reload=True)
return app
def remove_application(app):
"""Remove an application."""
env.api.remove_application(app)
cache.get_application(app['id'], force_reload=True)
def create_blueprint(bpname, app):
"""Create a new blueprint."""
bp = env.api.create_blueprint(bpname, app)
bp = cache.get_blueprint(bp['id'])
return bp
def remove_blueprint(bp):
"""Remove a blueprint."""
env.api.remove_blueprint(bp)
bp = cache.get_blueprint(bp['id'], force_reload=True) # expunge from cache
def appdef_from_app(app):
"""Turn an application back into ``appdef`` format."""
appdef = { 'id': app['id'] }
appdef['name'] = app['name']
appdef['description'] = app.get('description') or ''
appdef['state'] = get_application_state(app)
# Blueprints do not have cloud/regionName
if 'cloud' in app:
appdef['cloud'] = app['cloud']
if 'regionName' in app:
appdef['region'] = app['regionName']
vmdefs = appdef['vms'] = []
for vm in app.get('vms', []):
vmdef = { 'id': vm['id'] }
vmdef['name'] = vm['name']
vmdef['description'] = vm.get('description') or ''
vmdef['smp'] = vm['numCpus']
vmdef['memory'] = vm['memorySize']['value'] * \
(1024 if vm['memorySize']['unit'] == 'GB' else 1)
if vm.get('dynamicMetadata'):
# otherwise this is a blueprint or a draft app
vmdef['ip'] = vm['dynamicMetadata']['externalIp']
vmdef['state'] = vm['dynamicMetadata']['state']
svcdefs = vmdef['services'] = []
for svc in vm.get('suppliedServices', []):
svc = svc.get('baseService')
if not svc:
continue
svcdef = { 'name': svc['name'],
'port': int(svc['portRange'].split('-')[0]) }
svcdefs.append(svcdef)
vmdefs.append(vmdef)
return appdef
def create_or_reuse_application(appdef, force_new):
"""Create a new application or re-use a suitable existing one."""
app = None
if not force_new:
app = reuse_existing_application(appdef)
if app is not None:
state = get_application_state(app)
parts = app['name'].split(':')
console.info('Re-using {0} application `{1}:{2}`.',
state.lower(), parts[1], parts[2])
app = start_application(app)
if app is None:
app = create_new_application(appdef)
app = publish_application(app)
parts = app['name'].split(':')
console.info('Created new application `{1}:{2}`.', *parts)
console.info('Published to {0[cloud]}/{0[regionName]}.', app)
return app
def wait_for_application(app, vms, timeout=None):
"""Wait until an is UP and connectable over ssh."""
console.start_progressbar(textwrap.dedent("""\
Waiting until application is ready...
Progress: 'P' = Publishing, 'S' = Starting, 'C' = Connecting
===> """))
# XXX: At first boot cloud-init deploys our authorized keys file.
# This process can finish after ssh has started up. The images
# need to be fixed to ensure cloud-init has finished before ssh
# starts up.
state = get_application_state(app)
if timeout:
timeleft = max(120, timeout) # anything < 120 does not make sense
start = time.time()
else:
timeleft = None
if state == 'PUBLISHING':
if timeleft:
timeleft -= 30
# Fudge factor. When an application is first started up, ssh needs to
# create its ssh host keys. In theory this wait should not be necessary
# as ssh binds to port 22 after creating the host keys. In practise,
# something doesn't quite work our and it is needed. Needs more
# investigation to understand. For now, take the easy way..
extra_sleep = 30
else:
extra_sleep = 0
console.debug('State {0}, extra sleep {1}.', state, extra_sleep)
app = wait_until_application_is_in_state(app, 'STARTED', timeleft)
if timeleft:
timeleft = max(0, timeleft - (time.time()-start))
wait_until_application_accepts_ssh(app, vms, timeleft)
console.end_progressbar('DONE')
time.sleep(extra_sleep)
return app
def get_vm(app, vmname):
"""Return the VM ``vmname`` from application ``application``."""
for vm in app.get('vms', []):
if vm.get('name') == vmname:
return vm
def get_vms(app):
return app.get('vms', [])
def default_application(appname):
"""The default application loading function."""
parts = appname.split(':')
if len(parts) in (1, 2) and env.manifest is None:
error.raise_error('No manifest found ({0}).\n'
'Please specify the fully qualified app name.\n'
"Use 'ravtest ps -a' for a list.",
manifest.manifest_name())
if len(parts) in (1, 2):
project = env.manifest['project']['name']
console.info('Project name is `{0}`.', project)
defname = parts[0]
instance = parts[1] if len(parts) == 2 else None
elif len(parts) == 3:
project, defname, instance = parts
else:
error.raise_error('Illegal application name: `{0}`.', appname)
apps = cache.find_applications(project, defname, instance)
if len(apps) == 0:
error.raise_error('No instances of application `{0}` exist.',
defname)
elif len(apps) > 1:
error.raise_error('Multiple instances of `{0}` exist.\n'
'Use `ravtest ps` to list the instances and then\n'
'specify the application with its instance id.',
defname)
app = cache.get_application(apps[0]['id'])
return app
|
|
import copy
import intprim
import matplotlib.pyplot as plt
import matplotlib.animation
import numpy as np
import numpy.random
import sklearn.metrics
try:
import IPython.display
except:
pass
animation_plots = []
def create_2d_handwriting_data(num_trajectories, translation_mean, translation_std, noise_std, length_mean, length_std):
# A single example of a handwriting trajectory
xdata = np.array([
2.52147861, 2.68261873, 2.84009521, 2.99269205, 3.13926385,
3.27876056, 3.41025573, 3.5329778 , 3.64634321, 3.74998937,
3.8438048 , 3.92795314, 4.00288777, 4.0693539 , 4.12837543,
4.18122498, 4.22937664, 4.27444203, 4.31809201, 4.36196737,
4.40758299, 4.4562309 , 4.50888808, 4.56613502, 4.62809093,
4.69437067, 4.76406782, 4.83576665, 4.90758435, 4.97724312,
5.04216954, 5.099617 , 5.14680484, 5.18106677, 5.1999997 ,
5.20160394, 5.18440564, 5.14755368, 5.09088427, 5.01494897,
4.92100438, 4.8109641 , 4.68731662, 4.55301474, 4.41134412,
4.26577973, 4.11983926, 3.97694226, 3.84028296, 3.71272292,
3.59670796, 3.4942117 , 3.4067061 , 3.33515726, 3.28004369,
3.24139282, 3.21883106, 3.21164261, 3.21883242, 3.23918946,
3.27134723, 3.31383944, 3.36515007, 3.42375745, 3.48817336,
3.55697803, 3.62885243, 3.70260907, 3.77722187, 3.85185522,
3.92589153, 3.99895578, 4.07093474, 4.14198835, 4.21255021,
4.2833145 , 4.35520693, 4.42933819, 4.50693958, 4.5892814 ,
4.67757669, 4.7728736 , 4.87594169, 4.98715824, 5.10640159,
5.23295916, 5.36545793, 5.50182437, 5.63928031, 5.7743792 ,
5.90308534, 6.02089593, 6.12300271, 6.20448725, 6.26054043,
6.28669463, 6.27905489, 6.23451447, 6.15094057, 6.02731681
])
ydata = np.array([
2.60877965, 2.76485925, 2.91587601, 3.06074461, 3.19850088,
3.32832259, 3.44955237, 3.56172269, 3.66458245, 3.75812375,
3.84260667, 3.9185795 , 3.98689125, 4.04869382, 4.10543106,
4.1588132 , 4.21077576, 4.26342334, 4.31895999, 4.37960871,
4.44752397, 4.52470161, 4.61289081, 4.71351323, 4.82759375,
4.95570667, 5.09794052, 5.25388323, 5.42262803, 5.60279957,
5.79259769, 5.98985598, 6.19211079, 6.39667626, 6.60072087,
6.80134129, 6.99563046, 7.18073763, 7.35391969, 7.51258424,
7.6543261 , 7.77695956, 7.87854902, 7.95744025, 8.0122939 ,
8.0421214 , 8.0463223 , 8.0247204 , 7.97759496, 7.90570262,
7.81028529, 7.69306011, 7.55618819, 7.40222104, 7.23402506,
7.05468668, 6.86740265, 6.67536129, 6.48162182, 6.28899902,
6.09996034, 5.916542 , 5.74028898, 5.57222266, 5.41283782,
5.26212897, 5.11964415, 4.98456294, 4.85579367, 4.73208409,
4.61213865, 4.49473531, 4.37883468, 4.26367447, 4.14884334,
4.0343288 , 3.9205359 , 3.80827461, 3.69871613, 3.59332021,
3.49373739, 3.40169213, 3.31885379, 3.24670384, 3.18640788,
3.13870115, 3.10379544, 3.08131435, 3.07026211, 3.06902906,
3.07543489, 3.08680804, 3.10009753, 3.11201102, 3.11917145,
3.1182827 , 3.10629444, 3.08055594, 3.03894936, 2.97999426
])
new_data = []
basis_model = intprim.basis.GaussianModel(8, 0.1, ["X", "Y"])
# From this single example, create noisy demonstrations.
# Approximate the original data with a basis model so that we can sub/super sample it to create
# trajectories of different lengths while maintaining the same shape.
# Add 30 demonstrations which are generated from the writing sample
for demo in range(num_trajectories):
# Randomly generate a new length
demonstration_length = int(np.round(np.random.normal(length_mean, length_std)))
# Fit the single demonstration to the pre-defined basis model
domain = np.linspace(0, 1, xdata.shape[0], dtype = intprim.constants.DTYPE)
weights = basis_model.fit_basis_functions_linear_closed_form(domain, np.array([xdata, ydata]).T).T
# Resample a new trajectory from the basis model with the desired length
new_interaction = np.zeros((2, demonstration_length))
domain = np.linspace(0, 1, demonstration_length, dtype = intprim.constants.DTYPE)
for idx in range(demonstration_length):
new_interaction[:, idx] = basis_model.apply_coefficients(domain[idx], weights)
# Apply a random translation
new_interaction = (new_interaction.T + np.random.normal(translation_mean, translation_std)).T
new_interaction = np.random.normal(new_interaction, noise_std)
new_data.append(new_interaction)
return new_data
def train_model(primitive, training_trajectories):
for trajectory in training_trajectories:
primitive.compute_standardization(trajectory)
for trajectory in training_trajectories:
primitive.add_demonstration(trajectory)
return primitive
def get_phase_stats(training_trajectories):
phase_velocities = []
for trajectory in training_trajectories:
phase_velocities.append(1.0 / trajectory.shape[1])
return np.mean(phase_velocities), np.var(phase_velocities)
def get_observation_noise(basis_selector, basis_model, training_trajectories, bias):
for trajectory in training_trajectories:
basis_selector.add_demonstration(trajectory)
error = basis_selector.get_model_mse(basis_model, np.array(range(training_trajectories[0].shape[0])), 0.0, 1.0)
observation_noise = np.diag(error) * bias
observation_noise[0, 0] = 10000
return observation_noise
def animate_results(generated_data, observed_data, mean_data):
fig = plt.figure()
ax = plt.axes(xlim=(-5, 15), ylim=(-5, 15))
# plot_lines = [plt.plot([], [])[0] for _ in range(3)]
plot_lines = [
plt.plot([], [], "--", color = "#ff6a6a", label = "Generated", linewidth = 2.0)[0],
plt.plot([], [], color = "#6ba3ff", label = "Observed", linewidth = 2.0)[0],
plt.plot([], [], color = "#85d87f", label = "Mean")[0]
]
fig.suptitle('Probable trajectory')
def init():
plot_lines[0].set_data([], [])
plot_lines[1].set_data([], [])
plot_lines[2].set_data(mean_data[0], mean_data[1])
return plot_lines
def animate(i):
plot_lines[0].set_data(generated_data[i][0], generated_data[i][1])
plot_lines[1].set_data(observed_data[i][0], observed_data[i][1])
return plot_lines
anim = matplotlib.animation.FuncAnimation(fig, animate, init_func = init,
frames = len(generated_data), interval = 500, blit = True)
animation_plots.append(anim)
plt.legend(loc = "upper left")
plt.show()
def evaluate_trajectories(primitive, filter, test_trajectories, observation_noise, delay_prob = 0.0, delay_ratio = 0.0):
for test_trajectory in test_trajectories:
test_trajectory_partial = np.array(test_trajectory, copy = True)
test_trajectory_partial[0, :] = 0.0
new_filter = copy.deepcopy(filter)
primitive.set_filter(new_filter)
# all_gen_trajectories = []
# all_test_trajectories = []
mean_trajectory = primitive.get_mean_trajectory()
mean_mse = 0.0
phase_mae = 0.0
mse_count = 0
prev_observed_index = 0
for observed_index in range(8, test_trajectory.shape[1], 8):
gen_trajectory, phase, mean, var = primitive.generate_probable_trajectory_recursive(test_trajectory_partial[:, prev_observed_index:observed_index], observation_noise, np.array([1]), num_samples = test_trajectory_partial.shape[1] - observed_index)
mse = sklearn.metrics.mean_squared_error(test_trajectory[:, observed_index:], gen_trajectory)
mean_mse += mse
mse_count += 1
phase_mae += np.abs((float(observed_index) / test_trajectory.shape[1]) - phase)
if(delay_prob > 0.0 and np.random.binomial(1, delay_prob) == 1):
length = int(delay_ratio * test_trajectory.shape[1])
# Repeat the last observation for delay_ratio times.
delay_trajectory = np.tile(test_trajectory[:, observed_index - 1], (length, 1)).T
gen_trajectory, phase, mean, var = primitive.generate_probable_trajectory_recursive(delay_trajectory, observation_noise, np.array([1]), num_samples = test_trajectory_partial.shape[1] - observed_index)
mse = sklearn.metrics.mean_squared_error(test_trajectory[:, observed_index:], gen_trajectory)
mean_mse += mse
mse_count += 1
phase_mae += np.abs((float(observed_index) / test_trajectory.shape[1]) - phase)
# Plot the phase/phase velocity PDF for each time step? Want to show it for temporal non-linearity.
intprim.util.visualization.plot_partial_trajectory(gen_trajectory, test_trajectory[:, :observed_index], mean_trajectory)
# all_gen_trajectories.append(gen_trajectory)
# all_test_trajectories.append(test_trajectory[:, :observed_index])
prev_observed_index = observed_index
print("Mean DoF MSE: " + str(mean_mse / mse_count) + ". Phase MAE: " + str(phase_mae / mse_count))
# animate_results(all_gen_trajectories, all_test_trajectories, mean_trajectory)
|
|
# Copyright 2014-2020 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing various date parsing related utility functions.
All the functions for parsing dates in RFC3339 format have been optimized for performance since
they are part of a critical code path when ingesting Docker / Kubernetes logs.
Per micro benchmarks (benchmarks/micro/test_date_parsing.py), "udatetime" based implementations are
the fastest.
All the functions also support time formats in arbitrary timezones (aka non-UTC), but non-udatetime
pure Python versions fall back on python-dateutil which is rather slow. Luckily the common case and
code path is UTC where we can skip that code path.
It's also worth noting that our custom implementations also supports some extended formats which are
not fully valid as per RFC (e.g. very long fractional part). Because of that, some compatibility
change needed to be implemented for udatetime implementation as well and that adds small amount of
overhead.
"""
from __future__ import absolute_import
if False: # NOSONAR
from typing import Optional
from typing import List
import re
import calendar
import datetime
# Work around with a striptime race we see every now and then with docker monitor run() method.
# That race would occur very rarely, since it depends on the order threads are started and when
# strptime is first called.
# See:
# 1. https://github.com/scalyr/scalyr-agent-2/pull/700#issuecomment-761676613
# 2. https://bugs.python.org/issue7980
import _strptime # NOQA
import six
from six.moves import map
try:
import udatetime
except ImportError:
# if udatetime is not available, we fall back to the second fastest approach for date parsing
# (string.split approach)
udatetime = None
try:
from dateutil.parser import isoparse # NOQA
except ImportError:
isoparse = None
if six.PY3:
# re.ASCII makes this regex only match ASCII digits which is tiny bit faster than the version
# without re.ASCII flag
RFC3339_STR_REGEX = re.compile(
r"(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})", re.ASCII
)
RFC3339_STR_NON_UTC_REGEX = re.compile(r"^.*[\+\-](\d{2}):(\d{2})$", re.ASCII)
else:
RFC3339_STR_REGEX = re.compile(r"(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2})")
RFC3339_STR_NON_UTC_REGEX = re.compile(r"^.*[\+\-](\d{2}):(\d{2})$")
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
TZ_UTC = UTC()
def _contains_non_utc_tz(string):
# type: (str) -> bool
"""
Returns True if the provided RFC3339 strings contains a non UTC timezone.
"""
return bool(RFC3339_STR_NON_UTC_REGEX.match(string))
def _rfc3339_to_nanoseconds_since_epoch_string_split(string):
# type: (str) -> Optional[int]
"""
rfc3339_to_nanoseconds_since_epoch variation which utilizes string.split approach.
Returns nanoseconds from unix epoch from a rfc3339 formatted timestamp.
@param string: a date/time in rfc3339 format, e.g. 2015-08-03T09:12:43.143757463Z
@rtype int
"""
if _contains_non_utc_tz(string):
dt = _rfc3339_to_datetime_dateutil(string)
else:
# split the string in to main time and fractional component
parts = string.split(".")
# it's possible that the time does not have a fractional component
# e.g 2015-08-03T09:12:43Z, in this case 'parts' will only have a
# single element that should end in Z. Strip the Z if it exists
# so we can use the same format string for processing the main
# date+time regardless of whether the time has a fractional component.
if parts[0].endswith("Z"):
parts[0] = parts[0][:-1]
try:
dt = datetime.datetime(
*list(map(int, RFC3339_STR_REGEX.match(parts[0]).groups())) # type: ignore
)
except Exception:
return None
nano_seconds = (
calendar.timegm(
(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond)
)
* 1000000000
)
nanos = _get_fractional_nanos(value=string)
return nano_seconds + nanos
def _rfc3339_to_nanoseconds_since_epoch_dateutil(string):
# type: (str) -> Optional[int]
"""
Special version of rfc3339_to_nanoseconds_since_epoch which supports timezones and uses
dateutil library.
NOTE: python-dateutil is slow so using udatetime is preferred when timestamp is non-UTC.
"""
dt = _rfc3339_to_datetime_dateutil(string)
nano_seconds = (
calendar.timegm((dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second))
* 1000000000
)
nanos = _get_fractional_nanos(value=string)
return nano_seconds + nanos
def _rfc3339_to_nanoseconds_since_epoch_udatetime(string):
# type: (str) -> Optional[int]
"""
rfc3339_to_nanoseconds_since_epoch variation which utilizes udatetime library.
"""
original_string = string
string = _get_udatetime_safe_string(string)
try:
dt = udatetime.from_string(string)
except ValueError:
# For backward compatibility reasons with other functions we return None on edge cases
# (e.g. invalid format or similar). Not great.
return None
# NOTE: udatetime supports tzinfo, but this function always return non-timezone aware objects
# UTC so we perform the conversion here.
if dt.tzinfo and dt.tzinfo.offset != 0:
dt = dt.astimezone(TZ_UTC)
dt = dt.replace(tzinfo=None)
nano_seconds = (
calendar.timegm(
(dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond)
)
* 1000000000
)
nanos = _get_fractional_nanos(value=original_string)
return nano_seconds + nanos
def _rfc3339_to_datetime_string_split(string):
# type: (str) -> Optional[datetime.datetime]
"""
rfc3339_to_datetime variation which utilizes string.split approach.
Returns a date time from a rfc3339 formatted timestamp.
This doesn't do any complex testing and assumes the string is well formed and in UTC (e.g.
uses Z at the end rather than a time offset).
@param string: a date/time in rfc3339 format, e.g. 2015-08-03T09:12:43.143757463Z
@rtype datetime.datetime
"""
if _contains_non_utc_tz(string):
return _rfc3339_to_datetime_dateutil(string)
# split the string in to main time and fractional component
parts = string.split(".")
# it's possible that the time does not have a fractional component
# e.g 2015-08-03T09:12:43Z, in this case 'parts' will only have a
# single element that should end in Z. Strip the Z if it exists
# so we can use the same format string for processing the main
# date+time regardless of whether the time has a fractional component.
if parts[0].endswith("Z"):
parts[0] = parts[0][:-1]
# create a datetime object
try:
date_parts, time_parts = parts[0].split("T")
date_parts = date_parts.split("-") # type: ignore
time_parts = time_parts.split(":") # type: ignore
dt = datetime.datetime(
int(date_parts[0]),
int(date_parts[1]),
int(date_parts[2]),
int(time_parts[0]),
int(time_parts[1]),
int(time_parts[2]),
)
except Exception:
return None
dt = _add_fractional_part_to_dt(dt=dt, parts=parts)
return dt
def _rfc3339_to_datetime_dateutil(string):
# type: (str) -> Optional[datetime.datetime]
"""
Special version of rfc3339_to_datetime which supports timezones and uses dateutil library
underneath.
NOTE: Other functions which don't support timezones have been heavily optimized for performance
so using this function will have non trivial overhead.
"""
if not isoparse:
# Library not available, warning is already emitted on import (emitting it on each call
# could get very noisy)
return None
try:
return isoparse(string).astimezone(TZ_UTC).replace(tzinfo=None)
except Exception:
return None
def _rfc3339_to_datetime_udatetime(string):
# type: (str) -> Optional[datetime.datetime]
"""
rfc3339_to_datetime variation which utilizes udatetime library.
"""
# split the string in to main time and fractional component
parts = string.split(".")
string = _get_udatetime_safe_string(string)
try:
dt = udatetime.from_string(string)
except ValueError:
# For backward compatibility reasons with other functions we return None on edge cases
# (e.g. invalid format or similar). Not great.
return None
# NOTE: udatetime supports tzinfo, but this function always return non-timezone aware objects
# UTC so we perform the conversion here.
if dt.tzinfo and dt.tzinfo.offset != 0:
dt = dt.astimezone(TZ_UTC)
dt = dt.replace(tzinfo=None)
dt = _add_fractional_part_to_dt(dt=dt, parts=parts)
return dt
def _get_udatetime_safe_string(string):
# type: (str) -> str
"""
Function which returns RFC3339 string which can be safely passed to udatetime.
udatetime doesn't support values with many fractional components, but our custom implementation
does.
To work around that, we pass date + time + timezone string to udatetime to handle the parsing
and then handle fractional part (nanoseconds) ourselves.
"""
# split the string in to main time and fractional component
parts = string.split(".")
if len(parts) > 1:
if parts[1].endswith("Z"):
# UTC, string ends with Z
return parts[0]
elif "+" in parts[1] or "-" in parts[1]:
# Custom timezone (e.g. -08:00), we strip it and move it to the string which we parse
# to udatetime. This way udatetime handles time zone conversion and we handle
# nanoseconds manually (since udatetime doesn't support non valid dates with large
# fractional parts)
tz_str = parts[1][-6:]
return parts[0] + tz_str
return string
def _add_fractional_part_to_dt(dt, parts):
# type: (datetime.datetime, List[str]) -> datetime.datetime
"""
Add fractional part (if any) to the provided datetime object.
"""
if len(parts) < 2:
# No fractional component
return dt
fractions = parts[1]
# strip the tzinfo
if fractions.endswith("Z"):
# in UTC, with Z at the end
fractions = fractions[:-1]
elif "-" not in fractions and "+" not in fractions:
# in UTC, without Z at the end (nothing to strip)
pass
else:
# Custom timezone offset, e.g. -08:00
fractions = fractions[:-6]
to_micros = 6 - len(fractions)
micro = int(int(fractions) * 10**to_micros)
dt = dt.replace(microsecond=micro)
return dt
def _get_fractional_nanos(value):
# type: (str) -> int
"""
Return nanoseconds (if any) for the provided date string fractional part of the RFC3336 value.
"""
parts = value.split(".")
if len(parts) < 2:
return 0
fractions = parts[1]
# strip the tzinfo
if fractions.endswith("Z"):
# in UTC, with Z at the end
fractions = fractions[:-1]
elif "-" not in fractions and "+" not in fractions:
# in UTC, without Z at the end (nothing to strip)
pass
else:
# Custom timezone offset, e.g. -08:00
fractions = fractions[:-6]
to_nanos = 9 - len(fractions)
nanos = int(int(fractions) * 10**to_nanos)
return nanos
if udatetime:
rfc3339_to_nanoseconds_since_epoch = _rfc3339_to_nanoseconds_since_epoch_udatetime
rfc3339_to_datetime = _rfc3339_to_datetime_udatetime
else:
rfc3339_to_nanoseconds_since_epoch = (
_rfc3339_to_nanoseconds_since_epoch_string_split
)
rfc3339_to_datetime = _rfc3339_to_datetime_string_split
|
|
################################# LICENSE ##################################
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer #
# in the documentation and/or other materials provided with the #
# distribution. #
# * Neither the name of the South African Astronomical Observatory #
# (SAAO) nor the names of its contributors may be used to endorse #
# or promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE SAAO ''AS IS'' AND ANY EXPRESS OR #
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE SAAO BE LIABLE FOR ANY #
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS #
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
#!/usr/bin/env python
"""
SALTWEATHER updates the SDB database with weather information
from the ELS database.
+-------------------+------------------+------+-----+---------+-------+
| Field | Type | Null | Key | Default | Extra |
+-------------------+------------------+------+-----+---------+-------+
| Weather_Time | datetime | NO | PRI | | |
| NightInfo_Id | int(10) unsigned | YES | MUL | NULL | x |
| TemperatureInside | float | YES | | NULL | |
| Temperature2m | float | YES | | NULL | x |
| Temperature30m | float | YES | | NULL | x |
| WindSpeed | float unsigned | YES | | NULL | x |
| WindDirection | float unsigned | YES | | NULL | x |
| DewPointInside | float | YES | | NULL | |
| DewPointOutside | float | YES | | NULL | x |
| AirPressure | float unsigned | YES | | NULL | x |
| RelativeHumidty | float unsigned | YES | | NULL | |
| Rain | tinyint(1) | YES | | NULL | |
+-------------------+------------------+------+-----+---------+-------+
Author Version Date
-----------------------------------------------
S M Crawford (SAAO) 0.1 14 Aug 2013
UPDATES
------------------------------------------------
"""
# Ensure python 2.5 compatibility
from __future__ import with_statement
import string, datetime, time
import struct
import numpy as np
import saltsafeio as saltio
import saltsafemysql as saltmysql
from salterror import SaltError, SaltIOError
# -----------------------------------------------------------
# core routine
def saltweather(weathertime, timespan, elshost, elsname, elsuser, elspass,
sdbhost,sdbname,sdbuser, password):
print weathertime
#open the database
els=saltmysql.connectdb(elshost, elsname, elsuser, elspass)
sdb=saltmysql.connectdb(sdbhost,sdbname,sdbuser, password)
#determine the obsdate
obsdate=weathertime-datetime.timedelta(seconds=43200)
obsdate='%i%s%s' % (obsdate.year, string.zfill(obsdate.month,2), string.zfill(obsdate.day,2))
nid=saltmysql.getnightinfoid(sdb, obsdate)
print nid
#get the most recent weather data
airpressure, dewout, extrelhum, temp2m, temp30m, windspeed, winddir, rain= getweatherdata(els, weathertime, timespan)
dewin, relhum=getinsidedata(els, weathertime, timespan)
tempin=getacdata(els, weathertime, timespan)
#upload that to the sdb
upcmd="Weather_Time='%s', NightInfo_Id=%i" % (weathertime, nid)
if tempin is not None: upcmd+=',TemperatureInside=%4.2f' % tempin
if temp2m is not None: upcmd+=',Temperature2m=%4.2f' % temp2m
if temp30m is not None: upcmd+=',Temperature30m=%4.2f' % temp30m
if windspeed is not None: upcmd+=',WindSpeed=%5.2f' % windspeed
if winddir is not None: upcmd+=',WindDirection=%5.2f' % winddir
if dewin is not None: upcmd+=',DewPointInside=%3.2f' % dewin
if dewout is not None: upcmd+=',DewPointOutside=%3.2f' % dewout
if airpressure is not None: upcmd+=',AirPressure=%4.2f' % airpressure
if relhum is not None: upcmd+=',RelativeHumidty=%4.2f' % relhum
if extrelhum is not None: upcmd+=',ExternalRelativeHumidity=%4.2f' % extrelhum
if rain is not None: upcmd+=',Rain=%i' % rain
saltmysql.insert(sdb, upcmd, 'Weather')
print upcmd
return
def getacdata(els, weathertime, timespan):
"""Determien the AC temperature
weathertime should be SAST
"""
#mktime converts weather time into seconds from UNIX epoch
#7200 converts to UT
#2082852000 converts from Unix Epoch to Labview Epoch of 1 Jan 1904
etime=time.mktime(weathertime.timetuple())+2082852000-7200
stime=etime-timespan
#now extact weather information from the els
sel_cmd='AVG(timestamp), AVG(actual_a_c_temperature)'
tab_cmd='bms_status'
log_cmd="timestamp>'%s' and timestamp<'%s'" % (stime, etime)
wea_rec=saltmysql.select(els, sel_cmd, tab_cmd, log_cmd)
if len(wea_rec)<1: return None
return wea_rec[0][1]
def getinsidedata(els, weathertime, timespan):
"""Creates the inside data from the els database
Weathertime should be in SAST
"""
#mktime converts weather time into seconds from UNIX epoch
#7200 converts to UT
#2082852000 converts from Unix Epoch to Labview Epoch of 1 Jan 1904
etime=time.mktime(weathertime.timetuple())+2082852000-7200
stime=etime-timespan
#now extact weather information from the els
sel_cmd='AVG(timestamp), AVG(dew_point), AVG(rel_humidity)'
tab_cmd='bms_internal_conditions'
log_cmd="timestamp>'%s' and timestamp<'%s'" % (stime, etime)
wea_rec=saltmysql.select(els, sel_cmd, tab_cmd, log_cmd)
if len(wea_rec)<1: return None, None
return wea_rec[0][1], wea_rec[0][2]
def getweatherdata(els, weathertime, timespan):
"""Creates the weather table from the data in the els
"""
etime=time.mktime(weathertime.timetuple())+2082852000-7200
stime=etime-timespan
#now extact weather information from the els
sel_cmd='timestamp, air_pressure, dewpoint, rel_humidity, wind_mag_30m, wind_dir_30m, wind_mag_10m, wind_dir_10m, temperatures, rain_detected'
tab_cmd='bms_external_conditions'
log_cmd="timestamp>'%s' and timestamp<'%s'" % (stime, etime)
wea_rec=saltmysql.select(els, sel_cmd, tab_cmd, log_cmd)
if len(wea_rec)<1: return 8*[None]
time_list=[]
air_arr=np.zeros(len(wea_rec))
dew_arr=np.zeros(len(wea_rec))
hum_arr=np.zeros(len(wea_rec))
w30_arr=np.zeros(len(wea_rec))
w30d_arr=np.zeros(len(wea_rec))
w10_arr=np.zeros(len(wea_rec))
w10d_arr=np.zeros(len(wea_rec))
rain_list=[]
t02_arr=np.zeros(len(wea_rec))
t05_arr=np.zeros(len(wea_rec))
t10_arr=np.zeros(len(wea_rec))
t15_arr=np.zeros(len(wea_rec))
t20_arr=np.zeros(len(wea_rec))
t25_arr=np.zeros(len(wea_rec))
t30_arr=np.zeros(len(wea_rec))
for i in range(len(wea_rec)):
time_list.append(str(wea_rec[i][0]))
air_arr[i]=wea_rec[i][1]
dew_arr[i]=wea_rec[i][2]
hum_arr[i]=wea_rec[i][3]
w30_arr[i]=wea_rec[i][4]
w30d_arr[i]=wea_rec[i][5]
w10_arr[i]=wea_rec[i][6]
w10d_arr[i]=wea_rec[i][7]
rain_list.append(wea_rec[i][9])
t_arr=converttemperature(wea_rec[i][8])
t02_arr[i]=t_arr[0]
t05_arr[i]=t_arr[1]
t10_arr[i]=t_arr[2]
t15_arr[i]=t_arr[3]
t20_arr[i]=t_arr[4]
t25_arr[i]=t_arr[5]
t30_arr[i]=t_arr[6]
#average the wind direction by taking the arctan of the average of the sin and cos
wdir=np.degrees(np.arctan2(np.sin(np.radians(w30d_arr)).mean(),np.cos(np.radians(w30d_arr)).mean()))
if wdir<0: wdir+=360
#determine if there was any rain during the period
rain=0
if 'T' in rain_list: rain=1
return air_arr.mean(), dew_arr.mean(), hum_arr.mean(), t02_arr.mean(), t30_arr.mean(), w30_arr.mean(), wdir, rain
def converttemperature(tstruct, nelements=7):
t_arr=np.zeros(nelements)
for i in range(nelements):
t_arr[i]=float(struct.unpack('>d', tstruct[4+8*i:4+8*(i+1)])[0])
return t_arr
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as const
from neutron_lib.plugins.ml2 import api
class TestMechanismDriver(api.MechanismDriver):
"""Test mechanism driver for testing mechanism driver api."""
def initialize(self):
self.bound_ports = set()
def _check_network_context(self, context, original_expected):
assert(isinstance(context, api.NetworkContext))
assert(isinstance(context.current, dict))
assert(context.current['id'] is not None)
if original_expected:
assert(isinstance(context.original, dict))
assert(context.current['id'] == context.original['id'])
else:
assert(not context.original)
def create_network_precommit(self, context):
self._check_network_context(context, False)
def create_network_postcommit(self, context):
self._check_network_context(context, False)
def update_network_precommit(self, context):
self._check_network_context(context, True)
def update_network_postcommit(self, context):
self._check_network_context(context, True)
def delete_network_precommit(self, context):
self._check_network_context(context, False)
def delete_network_postcommit(self, context):
self._check_network_context(context, False)
def _check_subnet_context(self, context, original_expected):
assert(isinstance(context, api.SubnetContext))
assert(isinstance(context.current, dict))
assert(context.current['id'] is not None)
if original_expected:
assert(isinstance(context.original, dict))
assert(context.current['id'] == context.original['id'])
else:
assert(not context.original)
network_context = context.network
assert(isinstance(network_context, api.NetworkContext))
self._check_network_context(network_context, False)
def create_subnet_precommit(self, context):
self._check_subnet_context(context, False)
def create_subnet_postcommit(self, context):
self._check_subnet_context(context, False)
def update_subnet_precommit(self, context):
self._check_subnet_context(context, True)
def update_subnet_postcommit(self, context):
self._check_subnet_context(context, True)
def delete_subnet_precommit(self, context):
self._check_subnet_context(context, False)
def delete_subnet_postcommit(self, context):
self._check_subnet_context(context, False)
def _check_port_context(self, context, original_expected):
assert(isinstance(context, api.PortContext))
self._check_port_info(context.current, context.host,
context.vif_type, context.vif_details)
if context.vif_type in (portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_BINDING_FAILED):
if (context.segments_to_bind and
context.segments_to_bind[0][api.NETWORK_TYPE] == 'vlan'):
# Partially bound.
self._check_bound(context.binding_levels,
context.top_bound_segment,
context.bottom_bound_segment)
else:
self._check_unbound(context.binding_levels,
context.top_bound_segment,
context.bottom_bound_segment)
assert((context.current['id'], context.host)
not in self.bound_ports)
else:
self._check_bound(context.binding_levels,
context.top_bound_segment,
context.bottom_bound_segment)
assert((context.current['id'], context.host) in self.bound_ports)
if original_expected:
self._check_port_info(context.original, context.original_host,
context.original_vif_type,
context.original_vif_details)
assert(context.current['id'] == context.original['id'])
if (context.original_vif_type in
(portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_BINDING_FAILED)):
self._check_unbound(context.original_binding_levels,
context.original_top_bound_segment,
context.original_bottom_bound_segment)
else:
self._check_bound(context.original_binding_levels,
context.original_top_bound_segment,
context.original_bottom_bound_segment)
else:
assert(context.original is None)
assert(context.original_host is None)
assert(context.original_vif_type is None)
assert(context.original_vif_details is None)
assert(context.original_status is None)
self._check_unbound(context.original_binding_levels,
context.original_top_bound_segment,
context.original_bottom_bound_segment)
network_context = context.network
assert(isinstance(network_context, api.NetworkContext))
self._check_network_context(network_context, False)
def _check_port_info(self, port, host, vif_type, vif_details):
assert(isinstance(port, dict))
assert(port['id'] is not None)
assert(vif_type in (portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_BINDING_FAILED,
portbindings.VIF_TYPE_DISTRIBUTED,
portbindings.VIF_TYPE_OVS,
portbindings.VIF_TYPE_BRIDGE))
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
assert(port[portbindings.HOST_ID] == '')
assert(port[portbindings.VIF_TYPE] ==
portbindings.VIF_TYPE_DISTRIBUTED)
assert(port[portbindings.VIF_DETAILS] == {})
else:
assert(port[portbindings.HOST_ID] == host)
assert(port[portbindings.VIF_TYPE] !=
portbindings.VIF_TYPE_DISTRIBUTED)
assert(port[portbindings.VIF_TYPE] == vif_type)
assert(isinstance(vif_details, dict))
assert(port[portbindings.VIF_DETAILS] == vif_details)
def _check_unbound(self, levels, top_segment, bottom_segment):
assert(levels is None)
assert(top_segment is None)
assert(bottom_segment is None)
def _check_bound(self, levels, top_segment, bottom_segment):
assert(isinstance(levels, list))
top_level = levels[0]
assert(isinstance(top_level, dict))
assert(isinstance(top_segment, dict))
assert(top_segment == top_level[api.BOUND_SEGMENT])
assert('test' == top_level[api.BOUND_DRIVER])
bottom_level = levels[-1]
assert(isinstance(bottom_level, dict))
assert(isinstance(bottom_segment, dict))
assert(bottom_segment == bottom_level[api.BOUND_SEGMENT])
assert('test' == bottom_level[api.BOUND_DRIVER])
def create_port_precommit(self, context):
self._check_port_context(context, False)
def create_port_postcommit(self, context):
self._check_port_context(context, False)
def update_port_precommit(self, context):
if ((context.original_top_bound_segment and
not context.top_bound_segment) or
(context.host == "host-fail")):
self.bound_ports.remove((context.original['id'],
context.original_host))
self._check_port_context(context, True)
def update_port_postcommit(self, context):
self._check_port_context(context, True)
def delete_port_precommit(self, context):
self._check_port_context(context, False)
def delete_port_postcommit(self, context):
self._check_port_context(context, False)
def bind_port(self, context):
self._check_port_context(context, False)
host = context.host
segment = context.segments_to_bind[0]
segment_id = segment[api.ID]
if host == "host-ovs-no_filter":
context.set_binding(segment_id, portbindings.VIF_TYPE_OVS,
{portbindings.CAP_PORT_FILTER: False})
self.bound_ports.add((context.current['id'], host))
elif host == "host-bridge-filter":
context.set_binding(segment_id, portbindings.VIF_TYPE_BRIDGE,
{portbindings.CAP_PORT_FILTER: True})
self.bound_ports.add((context.current['id'], host))
elif host == "host-ovs-filter-active":
context.set_binding(segment_id, portbindings.VIF_TYPE_OVS,
{portbindings.CAP_PORT_FILTER: True},
status=const.PORT_STATUS_ACTIVE)
self.bound_ports.add((context.current['id'], host))
elif host == "host-hierarchical":
segment_type = segment[api.NETWORK_TYPE]
if segment_type == 'local':
next_segment = context.allocate_dynamic_segment(
{api.NETWORK_TYPE: 'vlan',
api.PHYSICAL_NETWORK: 'physnet1'}
)
context.continue_binding(segment_id, [next_segment])
elif segment_type == 'vlan':
context.set_binding(segment_id,
portbindings.VIF_TYPE_OVS,
{portbindings.CAP_PORT_FILTER: False})
self.bound_ports.add((context.current['id'], host))
elif host == "host-fail":
context.set_binding(None,
portbindings.VIF_TYPE_BINDING_FAILED,
{portbindings.CAP_PORT_FILTER: False})
self.bound_ports.add((context.current['id'], host))
def filter_hosts_with_segment_access(
self, context, segments, candidate_hosts, agent_getter):
return set()
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import re
import sys
from telemetry.core import exceptions
from telemetry.core.platform.tracing_agent import chrome_tracing_agent
from telemetry import decorators
from telemetry.internal.backends.chrome_inspector import devtools_http
from telemetry.internal.backends.chrome_inspector import inspector_backend
from telemetry.internal.backends.chrome_inspector import tracing_backend
from telemetry.timeline import trace_data as trace_data_module
class TabNotFoundError(exceptions.Error):
pass
def IsDevToolsAgentAvailable(port):
"""Returns True if a DevTools agent is available on the given port."""
devtools_http_instance = devtools_http.DevToolsHttp(port)
try:
return _IsDevToolsAgentAvailable(devtools_http.DevToolsHttp(port))
finally:
devtools_http_instance.Disconnect()
# TODO(nednguyen): Find a more reliable way to check whether the devtool agent
# is still alive.
def _IsDevToolsAgentAvailable(devtools_http_instance):
try:
devtools_http_instance.Request('')
except devtools_http.DevToolsClientConnectionError:
return False
else:
return True
class DevToolsClientBackend(object):
"""An object that communicates with Chrome's devtools.
This class owns a map of InspectorBackends. It is responsible for creating
them and destroying them.
"""
def __init__(self, devtools_port, remote_devtools_port, app_backend):
"""Creates a new DevToolsClientBackend.
A DevTools agent must exist on the given devtools_port.
Args:
devtools_port: The port to use to connect to DevTools agent.
remote_devtools_port: In some cases (e.g., app running on
Android device, devtools_port is the forwarded port on the
host platform. We also need to know the remote_devtools_port
so that we can uniquely identify the DevTools agent.
app_backend: For the app that contains the DevTools agent.
"""
self._devtools_port = devtools_port
self._remote_devtools_port = remote_devtools_port
self._devtools_http = devtools_http.DevToolsHttp(devtools_port)
self._tracing_backend = None
self._app_backend = app_backend
self._devtools_context_map_backend = _DevToolsContextMapBackend(
self._app_backend, self)
chrome_tracing_agent.ChromeTracingAgent.RegisterDevToolsClient(
self, self._app_backend.platform_backend)
@property
def remote_port(self):
return self._remote_devtools_port
def IsAlive(self):
"""Whether the DevTools server is available and connectable."""
return _IsDevToolsAgentAvailable(self._devtools_http)
def Close(self):
if self._tracing_backend:
self._tracing_backend.Close()
self._tracing_backend = None
@decorators.Cache
def GetChromeBranchNumber(self):
# Detect version information.
resp = self._devtools_http.RequestJson('version')
if 'Protocol-Version' in resp:
if 'Browser' in resp:
branch_number_match = re.search(r'Chrome/\d+\.\d+\.(\d+)\.\d+',
resp['Browser'])
else:
branch_number_match = re.search(
r'Chrome/\d+\.\d+\.(\d+)\.\d+ (Mobile )?Safari',
resp['User-Agent'])
if branch_number_match:
branch_number = int(branch_number_match.group(1))
if branch_number:
return branch_number
# Branch number can't be determined, so fail any branch number checks.
return 0
def _ListInspectableContexts(self):
return self._devtools_http.RequestJson('')
def CreateNewTab(self, timeout):
"""Creates a new tab.
Raises:
devtools_http.DevToolsClientConnectionError
"""
self._devtools_http.Request('new', timeout=timeout)
def CloseTab(self, tab_id, timeout):
"""Closes the tab with the given id.
Raises:
devtools_http.DevToolsClientConnectionError
TabNotFoundError
"""
try:
return self._devtools_http.Request('close/%s' % tab_id,
timeout=timeout)
except devtools_http.DevToolsClientUrlError:
error = TabNotFoundError(
'Unable to close tab, tab id not found: %s' % tab_id)
raise error, None, sys.exc_info()[2]
def ActivateTab(self, tab_id, timeout):
"""Activates the tab with the given id.
Raises:
devtools_http.DevToolsClientConnectionError
TabNotFoundError
"""
try:
return self._devtools_http.Request('activate/%s' % tab_id,
timeout=timeout)
except devtools_http.DevToolsClientUrlError:
error = TabNotFoundError(
'Unable to activate tab, tab id not found: %s' % tab_id)
raise error, None, sys.exc_info()[2]
def GetUrl(self, tab_id):
"""Returns the URL of the tab with |tab_id|, as reported by devtools.
Raises:
devtools_http.DevToolsClientConnectionError
"""
for c in self._ListInspectableContexts():
if c['id'] == tab_id:
return c['url']
return None
def IsInspectable(self, tab_id):
"""Whether the tab with |tab_id| is inspectable, as reported by devtools.
Raises:
devtools_http.DevToolsClientConnectionError
"""
contexts = self._ListInspectableContexts()
return tab_id in [c['id'] for c in contexts]
def GetUpdatedInspectableContexts(self):
"""Returns an updated instance of _DevToolsContextMapBackend."""
contexts = self._ListInspectableContexts()
self._devtools_context_map_backend._Update(contexts)
return self._devtools_context_map_backend
def _CreateTracingBackendIfNeeded(self):
if not self._tracing_backend:
self._tracing_backend = tracing_backend.TracingBackend(
self._devtools_port)
def IsChromeTracingSupported(self):
self._CreateTracingBackendIfNeeded()
return self._tracing_backend.IsTracingSupported()
def StartChromeTracing(
self, trace_options, custom_categories=None, timeout=10):
"""
Args:
trace_options: An tracing_options.TracingOptions instance.
custom_categories: An optional string containing a list of
comma separated categories that will be traced
instead of the default category set. Example: use
"webkit,cc,disabled-by-default-cc.debug" to trace only
those three event categories.
"""
assert trace_options and trace_options.enable_chrome_trace
self._CreateTracingBackendIfNeeded()
return self._tracing_backend.StartTracing(
trace_options, custom_categories, timeout)
def StopChromeTracing(self, trace_data_builder, timeout=30):
context_map = self.GetUpdatedInspectableContexts()
for context in context_map.contexts:
if context['type'] not in ['iframe', 'page', 'webview']:
continue
context_id = context['id']
backend = context_map.GetInspectorBackend(context_id)
success = backend.EvaluateJavaScript(
"console.time('" + backend.id + "');" +
"console.timeEnd('" + backend.id + "');" +
"console.time.toString().indexOf('[native code]') != -1;")
if not success:
raise Exception('Page stomped on console.time')
trace_data_builder.AddEventsTo(
trace_data_module.TAB_ID_PART, [backend.id])
assert self._tracing_backend
return self._tracing_backend.StopTracing(trace_data_builder, timeout)
class _DevToolsContextMapBackend(object):
def __init__(self, app_backend, devtools_client):
self._app_backend = app_backend
self._devtools_client = devtools_client
self._contexts = None
self._inspector_backends_dict = {}
@property
def contexts(self):
"""The most up to date contexts data.
Returned in the order returned by devtools agent."""
return self._contexts
def GetContextInfo(self, context_id):
for context in self._contexts:
if context['id'] == context_id:
return context
raise KeyError('Cannot find a context with id=%s' % context_id)
def GetInspectorBackend(self, context_id):
"""Gets an InspectorBackend instance for the given context_id.
This lazily creates InspectorBackend for the context_id if it does
not exist yet. Otherwise, it will return the cached instance."""
if context_id in self._inspector_backends_dict:
return self._inspector_backends_dict[context_id]
for context in self._contexts:
if context['id'] == context_id:
new_backend = inspector_backend.InspectorBackend(
self._app_backend.app, self._devtools_client, context)
self._inspector_backends_dict[context_id] = new_backend
return new_backend
raise KeyError('Cannot find a context with id=%s' % context_id)
def _Update(self, contexts):
# Remove InspectorBackend that is not in the current inspectable
# contexts list.
context_ids = [context['id'] for context in contexts]
for context_id in self._inspector_backends_dict.keys():
if context_id not in context_ids:
backend = self._inspector_backends_dict[context_id]
backend.Disconnect()
del self._inspector_backends_dict[context_id]
valid_contexts = []
for context in contexts:
# If the context does not have webSocketDebuggerUrl, skip it.
# If an InspectorBackend is already created for the tab,
# webSocketDebuggerUrl will be missing, and this is expected.
context_id = context['id']
if context_id not in self._inspector_backends_dict:
if 'webSocketDebuggerUrl' not in context:
logging.debug('webSocketDebuggerUrl missing, removing %s'
% context_id)
continue
valid_contexts.append(context)
self._contexts = valid_contexts
|
|
from __future__ import unicode_literals
import django.test
import django.test.utils
import django.test.client
from django.conf import settings
import django.contrib.auth.models
import django.core.mail
import logging
from test_models import (UnitTestCompany, UnitTestUser)
import test_admin
logging.disable(logging.CRITICAL)
MIDDLEWARE_CLASSES_NO_DEBUG_TOOLBAR = list(settings.MIDDLEWARE)
if 'debug_toolbar.middleware.DebugToolbarMiddleware' in MIDDLEWARE_CLASSES_NO_DEBUG_TOOLBAR:
MIDDLEWARE_CLASSES_NO_DEBUG_TOOLBAR.remove('debug_toolbar.middleware.DebugToolbarMiddleware')
INSTALLED_APPS_NO_DEBUG_TOOLBAR = list(settings.INSTALLED_APPS)
if 'debug_toolbar' in INSTALLED_APPS_NO_DEBUG_TOOLBAR:
INSTALLED_APPS_NO_DEBUG_TOOLBAR.remove('debug_toolbar')
@django.test.utils.override_settings(
MIDDLEWARE=MIDDLEWARE_CLASSES_NO_DEBUG_TOOLBAR,
INSTALLED_APPS=INSTALLED_APPS_NO_DEBUG_TOOLBAR,
AUTH_USER_MODEL='accountsplus.UnitTestUser',
ACCOUNTS_AUDIT_LOG_EVENT_MODEL='accountsplus.UnitTestAuditLogEvent',
)
class MasqueradeStartTestCase(django.test.TestCase):
urls = 'accountsplus.tests.test_urls'
@classmethod
def setUpTestData(cls):
company_1 = UnitTestCompany.objects.create(name='Example')
company_2 = UnitTestCompany.objects.create(name='Other Company')
superuser = UnitTestUser.objects.create_superuser(
email='[email protected]', password='password', first_name='Super', last_name='User')
superuser.company = company_1
superuser.save()
staffuser = UnitTestUser.objects.create_user(
email='[email protected]', password='password', first_name='Staff', last_name='User')
staffuser.is_staff = True
staffuser.company = company_1
staffuser.save()
regular_user = UnitTestUser.objects.create_user(
email='[email protected]', password='password', first_name='Regular', last_name='User')
regular_user.company = company_1
regular_user.save()
superuser = UnitTestUser.objects.create_superuser(
email='[email protected]', password='password', first_name='Super', last_name='User 2')
superuser.company = company_1
superuser.save()
group = django.contrib.auth.models.Group.objects.create(name='Masquerade')
permission_masquerade = django.contrib.auth.models.Permission.objects.get(codename='masquerade')
group.permissions.add(permission_masquerade)
def setUp(self):
self.group_masquerade = django.contrib.auth.models.Group.objects.get(name='Masquerade')
def test_user_masquerade_as_superuser(self):
c = django.test.client.Client()
self.assertTrue(c.login(email='[email protected]', password='password'))
r = c.get('/admin/masquerade/3/')
# test that the user that is now logged in is user 3
self.assertEqual(c.session['_auth_user_id'], '3')
# test that the user is a masquerading user, and that the user that is masquerading as user 3 is user 1 (superuser)
self.assertTrue(c.session['is_masquerading'])
self.assertEqual(c.session['masquerade_user_id'], 1)
self.assertTrue(c.session['masquerade_is_superuser'])
self.assertEqual(c.session['return_page'], 'admin:index')
def test_staff_masquerade_as_superuser(self):
c = django.test.client.Client()
self.assertTrue(c.login(email='[email protected]', password='password'))
r = c.get('/admin/masquerade/2/')
# test that the user that is now logged in is user 2
self.assertEqual(c.session['_auth_user_id'], '2')
# test that the user is a masquerading user, and that the user that is masquerading as user 3 is user 1 (superuser)
self.assertTrue(c.session['is_masquerading'])
self.assertEqual(c.session['masquerade_user_id'], 1)
self.assertTrue(c.session['masquerade_is_superuser'])
self.assertEqual(c.session['return_page'], 'admin:index')
def test_super_masquerade_as_superuser(self):
c = django.test.client.Client()
self.assertTrue(c.login(email='[email protected]', password='password'))
r = c.get('/admin/masquerade/4/')
# test that the user that still logged in is user 1
self.assertEqual(c.session['_auth_user_id'], '1')
# test that the user is not masquerading as another superuser
self.assertFalse('is_masquerading' in c.session)
self.assertFalse('masquerade_user_id' in c.session)
self.assertFalse('masquerade_is_superuser' in c.session)
self.assertFalse('return_page' in c.session)
def test_super_masquerade_as_self(self):
c = django.test.client.Client()
self.assertTrue(c.login(email='[email protected]', password='password'))
r = c.get('/admin/masquerade/1/')
# test that the user that still logged in is user 1
self.assertEqual(c.session['_auth_user_id'], '1')
# test that the user is not masquerading as themselves
self.assertFalse('is_masquerading' in c.session)
self.assertFalse('masquerade_user_id' in c.session)
self.assertFalse('masquerade_is_superuser' in c.session)
self.assertFalse('return_page' in c.session)
def test_user_masquerade_staff_user_no_perm(self):
c = django.test.client.Client()
self.assertTrue(c.login(email='[email protected]', password='password'))
r = c.get('/admin/masquerade/3/')
# test that the user that still logged in is user 2
self.assertEqual(c.session['_auth_user_id'], '2')
# test that the user is not able to masquerade
self.assertFalse('is_masquerading' in c.session)
self.assertFalse('masquerade_user_id' in c.session)
self.assertFalse('masquerade_is_superuser' in c.session)
self.assertFalse('return_page' in c.session)
def test_staff_masquerade_staff_user_no_perm(self):
c = django.test.client.Client()
self.assertTrue(c.login(email='[email protected]', password='password'))
r = c.get('/admin/masquerade/2/')
# test that the user that still logged in is user 2
self.assertEqual(c.session['_auth_user_id'], '2')
# test that the user is not able to masquerade
self.assertFalse('is_masquerading' in c.session)
self.assertFalse('masquerade_user_id' in c.session)
self.assertFalse('masquerade_is_superuser' in c.session)
self.assertFalse('return_page' in c.session)
def test_super_masquerade_staff_user_no_perm(self):
c = django.test.client.Client()
self.assertTrue(c.login(email='[email protected]', password='password'))
r = c.get('/admin/masquerade/1/')
# test that the user that still logged in is user 2
self.assertEqual(c.session['_auth_user_id'], '2')
# test that the user is not able to masquerade
self.assertFalse('is_masquerading' in c.session)
self.assertFalse('masquerade_user_id' in c.session)
self.assertFalse('masquerade_is_superuser' in c.session)
self.assertFalse('return_page' in c.session)
def test_user_masquerade_staff_user(self):
# give the user masquerade privileges
u = UnitTestUser.objects.get(pk=2)
u.groups.add(self.group_masquerade)
c = django.test.client.Client()
self.assertTrue(c.login(email='[email protected]', password='password'))
r = c.get('/admin/masquerade/3/')
# test that the user that is now logged in is user 3
self.assertEqual(c.session['_auth_user_id'], '3')
# test that the user is a masquerading user, and that the user that is masquerading as user 3 is user 2 (staffuser
self.assertTrue(c.session['is_masquerading'])
self.assertEqual(c.session['masquerade_user_id'], 2)
self.assertFalse(c.session['masquerade_is_superuser'])
self.assertEqual(c.session['return_page'], 'admin:index')
def test_staff_masquerade_staff_user(self):
# give the user masquerade privileges
u = UnitTestUser.objects.get(pk=2)
u.groups.add(self.group_masquerade)
c = django.test.client.Client()
self.assertTrue(c.login(email='[email protected]', password='password'))
r = c.get('/admin/masquerade/2/')
# test that the user that is now logged in is user 2
self.assertEqual(c.session['_auth_user_id'], '2')
# test that the user is a masquerading user, and that the user that is masquerading as user 3 is user 2 (staffuser
self.assertTrue(c.session['is_masquerading'])
self.assertEqual(c.session['masquerade_user_id'], 2)
self.assertFalse(c.session['masquerade_is_superuser'])
self.assertEqual(c.session['return_page'], 'admin:index')
def test_super_masquerade_staff_user(self):
# give the user masquerade privileges
u = UnitTestUser.objects.get(pk=2)
u.groups.add(self.group_masquerade)
c = django.test.client.Client()
self.assertTrue(c.login(email='[email protected]', password='password'))
r = c.get('/admin/masquerade/1/')
# test that the user that still logged in is user 2
self.assertEqual(c.session['_auth_user_id'], '2')
# test that the user is not able to masquerade as superuser
self.assertFalse('is_masquerading' in c.session)
self.assertFalse('masquerade_user_id' in c.session)
self.assertFalse('masquerade_is_superuser' in c.session)
self.assertFalse('return_page' in c.session)
def test_user_masquerade_regular_user_no_perm(self):
c = django.test.client.Client()
self.assertTrue(c.login(email='[email protected]', password='password'))
r = c.get('/admin/masquerade/3/')
# test that the user that still logged in is user 3
self.assertEqual(c.session['_auth_user_id'], '3')
# test that the user is not able to masquerade as a regular user
self.assertFalse('is_masquerading' in c.session)
self.assertFalse('masquerade_user_id' in c.session)
self.assertFalse('masquerade_is_superuser' in c.session)
self.assertFalse('return_page' in c.session)
def test_staff_masquerade_regular_user_no_perm(self):
c = django.test.client.Client()
self.assertTrue(c.login(email='[email protected]', password='password'))
r = c.get('/admin/masquerade/2/')
# test that the user that still logged in is user 3
self.assertEqual(c.session['_auth_user_id'], '3')
# test that the user is not able to masquerade as a regular user
self.assertFalse('is_masquerading' in c.session)
self.assertFalse('masquerade_user_id' in c.session)
self.assertFalse('masquerade_is_superuser' in c.session)
self.assertFalse('return_page' in c.session)
def test_super_masquerade_regular_user_no_perm(self):
c = django.test.client.Client()
self.assertTrue(c.login(email='[email protected]', password='password'))
r = c.get('/admin/masquerade/1/')
# test that the user that still logged in is user 3
self.assertEqual(c.session['_auth_user_id'], '3')
# test that the user is not able to masquerade as a regular user
self.assertFalse('is_masquerading' in c.session)
self.assertFalse('masquerade_user_id' in c.session)
self.assertFalse('masquerade_is_superuser' in c.session)
self.assertFalse('return_page' in c.session)
def test_user_masquerade_regular_user(self):
# give the user masquerade privileges
u = UnitTestUser.objects.get(pk=3)
u.groups.add(self.group_masquerade)
c = django.test.client.Client()
self.assertTrue(c.login(email='[email protected]', password='password'))
r = c.get('/admin/masquerade/3/')
# test that the user that still logged in is user 3
self.assertEqual(c.session['_auth_user_id'], '3')
# test that the user is not able to masquerade as a regular user (even with perms)
self.assertFalse('is_masquerading' in c.session)
self.assertFalse('masquerade_user_id' in c.session)
self.assertFalse('masquerade_is_superuser' in c.session)
self.assertFalse('return_page' in c.session)
def test_staff_masquerade_regular_user(self):
# give the user masquerade privileges
u = UnitTestUser.objects.get(pk=3)
u.groups.add(self.group_masquerade)
c = django.test.client.Client()
self.assertTrue(c.login(email='[email protected]', password='password'))
r = c.get('/admin/masquerade/2/')
self.assertEqual(c.session['_auth_user_id'], '3')
# test that the user is not able to masquerade as a regular user (even with perms)
self.assertFalse('is_masquerading' in c.session)
self.assertFalse('masquerade_user_id' in c.session)
self.assertFalse('masquerade_is_superuser' in c.session)
self.assertFalse('return_page' in c.session)
def test_super_masquerade_regular_user(self):
# give the user masquerade privileges
u = UnitTestUser.objects.get(pk=3)
u.groups.add(self.group_masquerade)
c = django.test.client.Client()
self.assertTrue(c.login(email='[email protected]', password='password'))
r = c.get('/admin/masquerade/1/')
self.assertEqual(c.session['_auth_user_id'], '3')
# test that the user is not able to masquerade as a regular user (even with perms)
self.assertFalse('is_masquerading' in c.session)
self.assertFalse('masquerade_user_id' in c.session)
self.assertFalse('masquerade_is_superuser' in c.session)
self.assertFalse('return_page' in c.session)
@django.test.utils.override_settings(
MIDDLEWARE=MIDDLEWARE_CLASSES_NO_DEBUG_TOOLBAR,
INSTALLED_APPS=INSTALLED_APPS_NO_DEBUG_TOOLBAR,
AUTH_USER_MODEL='accountsplus.UnitTestUser',
ACCOUNTS_AUDIT_LOG_EVENT_MODEL='accountsplus.UnitTestAuditLogEvent',
)
class PasswordResetActionTestCase(django.test.TestCase):
urls = 'accountsplus.tests.test_urls'
@classmethod
def setUpTestData(cls):
company_1 = UnitTestCompany.objects.create(name='Example')
company_2 = UnitTestCompany.objects.create(name='Other Company')
superuser = UnitTestUser.objects.create_superuser(
email='[email protected]', password='password', first_name='Super', last_name='User')
superuser.company = company_1
superuser.save()
staffuser = UnitTestUser.objects.create_user(
email='[email protected]', password='password', first_name='Staff', last_name='User')
staffuser.is_staff = True
staffuser.company = company_1
staffuser.save()
regular_user = UnitTestUser.objects.create_user(
email='[email protected]', password='password', first_name='Regular', last_name='User')
regular_user.company = company_1
regular_user.save()
group = django.contrib.auth.models.Group.objects.create(name='Masquerade')
permission_masquerade = django.contrib.auth.models.Permission.objects.get(codename='masquerade')
group.permissions.add(permission_masquerade)
def setUp(self):
group_change_user = django.contrib.auth.models.Group(name='Change User')
group_change_user.save()
# make sure masquerade group has masquerade permission
change_user_permission = django.contrib.auth.models.Permission.objects.get(codename='change_user')
group_change_user.permissions = [change_user_permission, ]
u = UnitTestUser.objects.get(pk=2)
u.groups.add(group_change_user)
def test_password_reset_action_admin_user(self):
c = django.test.client.Client()
self.assertTrue(c.login(email='[email protected]', password='password'))
r = c.post('/admin/accountsplus/unittestuser/', data={'action': 'reset_passwords', '_selected_action': ['3', '2', ], })
self.assertRedirects(r, '/admin/accountsplus/unittestuser/')
# check that we have 2 emails queued up
self.assertEqual(2, len(django.core.mail.outbox))
self.assertEqual(django.core.mail.outbox[0].subject, 'Password reset on example.com')
self.assertEqual(django.core.mail.outbox[1].subject, 'Password reset on example.com')
def test_password_reset_action_staff_user_no_permission(self):
c = django.test.client.Client()
self.assertTrue(c.login(email='[email protected]', password='password'))
# test that a staff user without change permission can't reset a password
r = c.post('/admin/accountsplus/unittestuser/', data={'action': 'reset_passwords', '_selected_action': ['3', '2', ], })
self.assertEqual(r.status_code, 403)
def test_password_reset_action_staff_user_with_permission(self):
c = django.test.client.Client()
# give the staffuser the permission to change users so that it can send a password reset
staffuser = UnitTestUser.objects.get(email='[email protected]')
staffuser.user_permissions.add(django.contrib.auth.models.Permission.objects.get(codename='change_unittestuser'))
# test that a staff user with change permission can reset a password
self.assertTrue(c.login(email='[email protected]', password='password'))
r = c.post('/admin/accountsplus/unittestuser/', data={'action': 'reset_passwords', '_selected_action': ['3', '2', ], })
self.assertRedirects(r, '/admin/accountsplus/unittestuser/')
# check that we have 2 emails queued up
self.assertEqual(2, len(django.core.mail.outbox))
self.assertEqual(django.core.mail.outbox[0].subject, 'Password reset on example.com')
self.assertEqual(django.core.mail.outbox[1].subject, 'Password reset on example.com')
def test_password_reset_action_regular_user(self):
c = django.test.client.Client()
self.assertTrue(c.login(email='[email protected]', password='password'))
r = c.post('/admin/accountsplus/unittestuser/', data={'action': 'reset_passwords', '_selected_action': ['3', '2', ], })
self.assertRedirects(r, '/admin/login/?next=/admin/accountsplus/unittestuser/')
|
|
# Purpose: Main Function - Training Models
# Info: Change the Parameters at the top of the scrip to change how the Agent interacts
# Developed as part of the Software Agents Course at City University
# Dev: Dan Dixey and Enrico Lopedoto
# Updated: 1/3/2016
import logging
import os
import sys
from time import time
import json
import pylab
if os.getcwd() not in sys.path:
sys.path.append(os.getcwd())
from Model.Helicopter import helicopter
from Model import World as W
from Model.Plotting import plotting_model
from Settings import *
import matplotlib
import matplotlib.pyplot as plt
from random import choice
matplotlib.style.use('ggplot')
################ Model Settings
case = 'case_five'
settings_ = case_lookup[case]
iterations, settings = get_indicies(settings_)
file_name="Track_1.npy"
################ Plot Settings
plot_settings = dict(print_up_to=-1,
end_range=list(range(settings['trials'] - 0,
settings['trials'] + 1)),
print_rate=1)
# Plotting Colors
colors = ['coral', 'green', 'red', 'cyan', 'magenta',
'yellow', 'blue', 'white', 'fuchsia', 'orangered', 'steelblue']
HeliWorld = W.helicopter_world(file_name=file_name)
# file_name=None - Loads a Randomly Generated Track
Helicopter1 = helicopter(world=HeliWorld, settings=settings)
st = time()
time_metrics = []
results = dict(time_chart=[],
final_location=[],
best_test=[],
q_plot=[],
model_names=[],
q_matrix=[],
paths=[],
returns=[])
t_array = [] # Storing Time to Complete
f_array = [] # Storing Final Locations
b_array = [] # Storing Full Control
a_array=[]
a = np.zeros(shape=(HeliWorld.track_height,
HeliWorld.track_width))
path = []
fig = plt.figure()
for value_iter in range(iterations):
if case != 'case_one':
rowz = 5
colz = 2
indexz = value_iter+1
figsize=(15, 15)
else:
rowz = 1
colz = 1
indexz =1
figsize=(15, 15)
if value_iter > 0:
settings = get_settings(dictionary=settings_,
ind=value_iter)
HeliWorld = W.helicopter_world(file_name=file_name)
Helicopter1 = helicopter(world=HeliWorld,
settings=settings)
a = np.zeros(shape=(HeliWorld.track_height,
HeliWorld.track_width))
t_array = [] # Storing Time to Complete
f_array = [] # Storing Final Locations
b_array = [] # Storing Full Control
a_array = [] #storing matrix value
logging.info('Changing Values: {}'.format(settings_['change_values']))
while HeliWorld.trials <= settings['trials']:
# On the Last Trail give the Model full control
if HeliWorld.trials == settings[
'trials'] or HeliWorld.trials in plot_settings['end_range']:
Helicopter1.ai.epsilon, settings['epsilon'] = 0, 0
# Print out logging metrics
if HeliWorld.trials % plot_settings[
'print_rate'] == 0 and HeliWorld.trials > 0:
rate = ((time() - st + 0.01) / HeliWorld.trials)
value = [HeliWorld.trials, rate]
time_metrics.append(value)
logging.info(
"Trials Completed: {} at {:.4f} seconds / trial".format(value[0], value[1]))
# Inner loop of episodes
while True:
output = Helicopter1.update()
if HeliWorld.trials == settings['trials']:
b_array.append(Helicopter1.current_location)
if not output:
f_array.append(
[HeliWorld.trials, Helicopter1.current_location[0]])
Helicopter1.reset()
rate = (time() - st + 0.01) / HeliWorld.trials
value = [HeliWorld.trials,
rate]
t_array.append(value)
if HeliWorld.trials <= plot_settings[
'print_up_to'] or HeliWorld.trials in plot_settings['end_range']:
results['paths'].append(path)
path = []
break
if HeliWorld.trials <= plot_settings[
'print_up_to'] or HeliWorld.trials in plot_settings['end_range']:
# Primary Title
rate = (time() - st + 0.01) / HeliWorld.trials
value = [HeliWorld.trials,
rate]
path.append(Helicopter1.current_location)
pos, array_masked = Helicopter1.return_q_view()
a[:, pos - 1] += array_masked
a_array.append([HeliWorld.trials,a.sum()])
HeliWorld.trials += 1
################################
plt.subplot(rowz, colz, indexz)
plt.title('Q Plot of Helicopter Path', fontsize=8)
plt.xlabel('Track Length', fontsize=8)
plt.ylabel('Track Width', fontsize=8)
my_axis = plt.gca()
my_axis.set_xlim(0, HeliWorld.track_width)
my_axis.set_ylim(0, HeliWorld.track_height)
im1 = plt.imshow(a,
cmap=plt.cm.jet,
interpolation='nearest')
plt.colorbar(im1, fraction=0.01, pad=0.01)
plt.show()
name = 'model_{}_case_{}_iter_{}'.format(
settings['model'],
case.split('_')[1],
value_iter)
# Record Results
results['time_chart'].append(t_array),
results['final_location'].append(f_array)
results['best_test'].append(b_array)
results['q_plot'].append(a.tolist())
results['model_names'].append(settings)
results['returns'].append(a_array)
et = time()
################################
xlim_val = int(settings['trials'])
nb_action = int(settings['nb_actions'])
n_items = len(results['best_test'])
## Save all results to a JSON file
#f = open(
# os.path.join(
# os.getcwd(),
# 'Results',
# case,
# 'Model{}'.format(
# settings['model']) +
# '.json'),
# 'w').write(
# json.dumps(results))
################################
model_plot = plotting_model()
model_plot.get_q_matrix(model_q=Helicopter1.ai.q,
nb_actions=settings['nb_actions'])
model_plot.plot_q_matrix('Q-Matrix - {}'.format(name))
q_data = model_plot.get_details()
results['q_matrix'].append(q_data)
plt.show()
################################
fig = plt.figure(figsize=figsize)
plt.title('Real-time Plot of Helicopter Path', fontsize=10)
plt.xlabel('Track Length', fontsize=8)
plt.ylabel('Track Width', fontsize=8)
my_axis = plt.gca()
my_axis.set_xlim(0, HeliWorld.track_width)
my_axis.set_ylim(0, HeliWorld.track_height)
im1 = plt.imshow(HeliWorld.track,
cmap=plt.cm.jet,
interpolation='nearest',
vmin=-1,
vmax=8)
plt.colorbar(im1, fraction=0.01, pad=0.01)
# For each set of results in dictionary
for i in range(n_items):
x, y = [], []
for each_item in results['best_test'][i]:
x.append(each_item[0])
y.append(each_item[1])
# Plot Scatter
plt.scatter(x=x,
y=y,
s=np.pi * (1 * 1) ** 2,
c=colors[i])
plt.show()
################################
fig = plt.figure(figsize=figsize)
selection = choice(range(len(results['best_test'])))
plt.title('Final Q Matrix', fontsize=10)
plt.xlabel('Track Length', fontsize=8)
plt.ylabel('Track Width', fontsize=8)
my_axis = plt.gca()
my_axis.set_xlim(0, HeliWorld.track_width)
my_axis.set_ylim(0, HeliWorld.track_height)
q_data = np.array(results['q_plot'][selection])
im1 = plt.imshow(q_data,
cmap=plt.cm.jet,
interpolation='nearest')
plt.colorbar(im1, fraction=0.01, pad=0.01)
plt.show()
################################
par=np.arange(0.1, 1.1, 0.1)
fig = plt.figure(figsize=figsize)
plt.title('Completion Chart - Time per Trial', fontsize=10)
plt.xlabel('Trial Numbers', fontsize=8)
plt.ylabel('LOG(Seconds Per Trial)', fontsize=8)
my_axis = plt.gca()
my_axis.set_xlim(0, xlim_val)
# For each set of results in dictionary
for i in range(n_items):
x, y = [], []
for each_item in results['time_chart'][i]:
x.append(each_item[0])
y.append(each_item[1])
# Plot Scatter
plt.scatter(x=x,
y=np.log(y),
s=np.pi * (1 * 1) ** 2,
c=colors[i],
label=par[i])
plt.legend(title="Parameters")
plt.show()
################################
par=np.arange(0.1, 1.1, 0.1)
fig = plt.figure(figsize=figsize)
plt.title('Reward Chart - Tot Q Values per Trial', fontsize=10)
plt.xlabel('Trial Numbers', fontsize=8)
plt.ylabel('LOG(Q Values)', fontsize=8)
my_axis = plt.gca()
my_axis.set_xlim(0, xlim_val)
# For each set of results in dictionary
for i in range(n_items):
x, y = [], []
for each_item in results['returns'][i]:
x.append(each_item[0])
y.append(each_item[1])
# Plot Scatter
plt.scatter(x=x,
y=np.log(y),
s=np.pi * (1 * 1) ** 2,
c=colors[i],
label=par[i])
plt.legend(title="Parameters")
plt.show()
################################
#fig = plt.figure(figsize=figsize)
#plt.title('Learning Chart - Averaged Trial Plot', fontsize=10)
#plt.xlabel('Trial Numbers', fontsize=8)
#plt.ylabel('End Location', fontsize=8)
#for i in range(n_items):
# x, y = [], []
# for each_item in results['final_location'][i]:
# x.append(each_item[0])
# y.append(each_item[1])
# y = y
# plt.plot(x, y, linewidth=0.5, c=colors[i])
#
#title_text = '|| Case - {} | Number of Trials - {} | Model - {} | Number of Actions - {} ||\n\
# || TRACK | Width - {} | Height - {} ||'.format(case,
# xlim_val,
# settings['model'],
# nb_action,
# HeliWorld.track_width,
# HeliWorld.track_height)
#fig.suptitle(title_text)
#plt.show()
################################
fig = plt.figure(figsize=figsize)
plt.hist(results['q_matrix'][0]['data'], bins=50)
plt.title("Q-Value Distribution - Min={} to Max={}".format(
results['q_matrix'][0]['min'], results['q_matrix'][0]['max']))
plt.xlabel("Value")
plt.ylabel("Frequency")
fig.suptitle('Q Matrix')
plt.show()
|
|
"""
Django settings for django_get_started project.
"""
import sys
from os import path
from redcap import Project, RedcapError
from twilio.access_token import AccessToken, IpMessagingGrant
from twilio.rest.ip_messaging import TwilioIpMessagingClient
from twilio.rest import TwilioRestClient
from azure.storage.blob import BlockBlobService
from azure.storage import CloudStorageAccount
from pyfcm import FCMNotification
from django.utils.translation import ugettext_lazy as _
PROJECT_ROOT = path.dirname(path.abspath(path.dirname(__file__)))
# API KEY is actually our cloud messaging SERVER KEY... they misnamed it?
FCM_API_KEY = "AAAAZ4czPsc:APA91bGapJWFGh7h97L7_TO9TV6UB9vqjeA1rMxATMwDTvleJr9hvn5cB9Dppz7y_Sa4mmYD6UfePK0FOriwphvyJmEM-_MJLwkkas21uFRZgflqbk_f367uqwcWyAQ6AThRDSe_275_"
FCM_SERVICE = FCMNotification(api_key=FCM_API_KEY)
"""
URL = "https://hcbredcap.com.br/api/"
USER_TOKEN = "F2C5AEE8A2594B0A9E442EE91C56CC7A"
MEDICATION_TOKEN = "2D58F93CB3B9C8C2FD00E64BD12080A3"
ESAS_TOKEN = "7048C161E7BE6A8B86F5100D5FDA7C20"
PAIN_TOKEN = "B91EDA097393C04D5F2C1526E1F7CD37"
PATIENT_TOKEN = "A1E9884F5A90E5270385D131B66659CE"
DOCTOR_TOKEN = "8400B14C28F7E6C644E0ADB5AE5F4628"
REDCAP_USER_PROJECT = Project(URL, USER_TOKEN)
REDCAP_MEDICATION_PROJECT = Project(URL, MEDICATION_TOKEN)
REDCAP_ESAS_PROJECT = Project(URL, ESAS_TOKEN)
REDCAP_PAIN_PROJECT = Project(URL, PAIN_TOKEN)
REDCAP_PATIENT_PROJECT = Project(URL, PATIENT_TOKEN)
REDCAP_DOCTOR_PROJECT = Project(URL, DOCTOR_TOKEN)
"""
# get credentials for environment variables
TWILIO_ACCOUNT_SID = 'ACbf05fc8a591d9136132c9d62d8319eb1'
TWILIO_AUTH_TOKEN = '09f9ba77cd7c40b602cab2f484e58c07'
TWILIO_API_SECRET = 'R3W2DYt3Eq1hbwj2GRKQV531XeVDU9sJ'
TWILIO_API_KEY = 'SKeed5a60867e8f918ac7f2e9fa819d98a'
TWILIO_IPM_SERVICE_SID = 'IS2ec68050ef5e4c79b15b78c3ded7ddc5'
# old one with testchannel nd general
#TWILIO_SERVICE_SID = 'IS7d421d86df064d9698e91ee6e3d4bcf5'
# Initialize the client
TWILIO_IPM_CLIENT = TwilioIpMessagingClient(TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN)
TWILIO_IPM_SERVICE = TWILIO_IPM_CLIENT.services.get(sid=TWILIO_IPM_SERVICE_SID)
AZURE_STORAGE_ACCOUNT = CloudStorageAccount(
"palliassistblobstorage", # account name
"r9tHMEj5VV/PwJyjN3KYySUqsnq9tCrxh6kDKFvVY3vrm+GluHN/a1LQjXKYIUzoHEle7x3EyIQwoOijzRJiOA==", # access key
"?sv=2016-05-31&ss=b&srt=sco&sp=rwdlac&se=2017-05-25T08:02:01Z&st=2017-04-04T00:02:01Z&spr=https,http&sig=DshFBBFKzV20Ml6sN8D8ZRpbIakU8jlbj8zIBDZP4z8%3D" # sas token
)
BLOCK_BLOB_SERVICE = AZURE_STORAGE_ACCOUNT.create_block_blob_service()
#print "AZURE_STORAGE_ACCOUNT", AZURE_STORAGE_ACCOUNT
#print "BLOCK_BLOB_SERVICE", BLOCK_BLOB_SERVICE
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf8')
ENABLE_XMPP = False
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ALLOWED_HOSTS = (
'localhost',
'palliassist-dev-us.azurewebsites.net',
'127.0.0.1',
'599632a7.ngrok.io',
'*',
)
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
#AUTHENTICATION_BACKENDS = ('app.backends.REDCapBackend',)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': path.join(PROJECT_ROOT, 'db.sqlite3'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
LOGIN_URL = '/login'
LOGIN_REDIRECT_URL = 'dashboard'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
#MEDIA_ROOT = ''
MEDIA_ROOT = path.join(PROJECT_ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
#MEDIA_URL = ''
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = path.join(PROJECT_ROOT, 'static').replace('\\', '/')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
#path.join(PROJECT_ROOT, 'app/static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'n(bd1f1c%e8=_xad02x5qtfn%wgwpi492e$8_erx+d)!tpeoim'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
LANGUAGES = [
('pt-br', _('Brazilian Portuguese')),
('en', _('English')),
]
LOCALE_PATHS = [
path.join(PROJECT_ROOT, 'locale'),
]
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'django_get_started.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'django_get_started.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
path.join(path.dirname(__file__), 'templates'),
PROJECT_ROOT + '/app/templates/app',
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
'phonenumber_field',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Specify the default test runner.
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
|
|
"""Thin wrapper of CUDA Driver API.
There are four differences compared to the original C API.
1. Not all functions are ported.
2. Errors are translated into CUDADriverError exceptions.
3. The 'cu' prefix of each API is omitted and the next character is set to
lower case.
4. The resulting values are returned directly instead of references.
"""
import ctypes
import six
from cupy.cuda import internal
_cuda = internal.load_library('cuda')
###############################################################################
# Types
###############################################################################
Device = ctypes.c_int
Context = ctypes.c_void_p
Module = ctypes.c_void_p
Function = ctypes.c_void_p
Stream = ctypes.c_void_p
Event = ctypes.c_void_p
###############################################################################
# Error handling
###############################################################################
_cuda.cuGetErrorName.argtypes = [ctypes.c_int, ctypes.c_void_p]
_cuda.cuGetErrorString.argtypes = [ctypes.c_int, ctypes.c_void_p]
class CUDADriverError(RuntimeError):
def __init__(self, status):
self.status = status
name = ctypes.c_char_p()
msg = ctypes.c_char_p()
_cuda.cuGetErrorName(status, ctypes.byref(name))
_cuda.cuGetErrorString(status, ctypes.byref(msg))
super(CUDADriverError, self).__init__(
'%s: %s' % (name.value, msg.value))
def check_status(status):
if status != 0:
raise CUDADriverError(status)
###############################################################################
# Initialization
###############################################################################
_cuda.cuInit.argtypes = [ctypes.c_uint]
def init():
status = _cuda.cuInit(0)
check_status(status)
_cuda.cuDriverGetVersion.argtypes = [ctypes.c_void_p]
def driverGetVersion():
version = ctypes.c_int()
status = _cuda.cuDriverGetVersion(ctypes.byref(version))
check_status(status)
return version.value
###############################################################################
# Device and context operations
###############################################################################
_cuda.cuDeviceGet.argtypes = [ctypes.c_void_p, ctypes.c_int]
def deviceGet(device_id):
device = Device()
status = _cuda.cuDeviceGet(ctypes.byref(device), device_id)
check_status(status)
return device
_cuda.cuDeviceGetAttribute.argtypes = [ctypes.c_void_p, ctypes.c_int]
def deviceGetAttribute(attrib, device):
ret = ctypes.c_int()
status = _cuda.cuDeviceGetAttribute(ctypes.byref(ret), attrib, device)
check_status(status)
return ret
_cuda.cuDeviceGetCount.argtypes = [ctypes.c_void_p]
def deviceGetCount():
count = ctypes.c_int()
status = _cuda.cuDeviceGetCount(ctypes.byref(count))
check_status(status)
return count.value
_cuda.cuDeviceTotalMem.argtypes = [ctypes.c_void_p, Device]
def deviceTotalMem(device):
mem = ctypes.c_size_t()
status = _cuda.cuDeviceTotalMem(ctypes.byref(mem), device)
check_status(status)
return mem.value
_cuda.cuCtxCreate_v2.argtypes = [ctypes.c_void_p, ctypes.c_uint, Device]
def ctxCreate(flag, device):
ctx = Context()
status = _cuda.cuCtxCreate_v2(ctypes.byref(ctx), flag, device)
check_status(status)
return ctx
_cuda.cuCtxDestroy_v2.argtypes = [ctypes.c_void_p]
def ctxDestroy(ctx):
status = _cuda.cuCtxDestroy_v2(ctx)
check_status(status)
_cuda.cuCtxGetApiVersion.argtypes = [Context, ctypes.c_void_p]
def ctxGetApiVersion(ctx):
version = ctypes.c_uint()
status = _cuda.cuCtxGetApiVersion(ctx, ctypes.byref(version))
check_status(status)
return version.value
_cuda.cuCtxGetCurrent.argtypes = [ctypes.c_void_p]
def ctxGetCurrent():
ctx = Context()
status = _cuda.cuCtxGetCurrent(ctypes.byref(ctx))
check_status(status)
return ctx
_cuda.cuCtxGetDevice.argtypes = [ctypes.c_void_p]
def ctxGetDevice():
device = Device()
status = _cuda.cuCtxGetDevice(ctypes.byref(device))
check_status(status)
return device
_cuda.cuCtxPopCurrent_v2.argtypes = [ctypes.c_void_p]
def ctxPopCurrent():
ctx = Context()
status = _cuda.cuCtxPopCurrent_v2(ctypes.byref(ctx))
check_status(status)
return ctx
_cuda.cuCtxPushCurrent_v2.argtypes = [Context]
def ctxPushCurrent(ctx):
status = _cuda.cuCtxPushCurrent_v2(ctx)
check_status(status)
_cuda.cuCtxSetCurrent.argtypes = [Context]
def ctxSetCurrent(ctx):
status = _cuda.cuCtxSetCurrent(ctx)
check_status(status)
def ctxSynchronize():
status = _cuda.cuCtxSynchronize()
check_status(status)
###############################################################################
# Module load and kernel execution
###############################################################################
_cuda.cuModuleLoad.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
def moduleLoad(filename):
module = Module()
status = _cuda.cuModuleLoad(ctypes.byref(module), filename)
check_status(status)
return module
_cuda.cuModuleLoadData.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
def moduleLoadData(image):
module = Module()
status = _cuda.cuModuleLoadData(ctypes.byref(module), image)
check_status(status)
return module
_cuda.cuModuleUnload.argtypes = [Module]
def moduleUnload(module):
status = _cuda.cuModuleUnload(module)
check_status(status)
_cuda.cuModuleGetFunction.argtypes = [ctypes.c_void_p, Module, ctypes.c_char_p]
def moduleGetFunction(module, funcname):
func = Function()
if isinstance(funcname, six.text_type):
funcname = funcname.encode('utf-8')
status = _cuda.cuModuleGetFunction(ctypes.byref(func), module, funcname)
check_status(status)
return func
_cuda.cuModuleGetGlobal_v2.argtypes = [
ctypes.c_void_p, Module, ctypes.c_char_p]
def moduleGetGlobal(module, varname):
var = ctypes.c_void_p()
status = _cuda.cuModuleGetGlobal_v2(ctypes.byref(var), module, varname)
check_status(status)
return var
_cuda.cuLaunchKernel.argtypes = [
Function, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint,
ctypes.c_uint, ctypes.c_uint, ctypes.c_uint, Stream, ctypes.c_void_p,
ctypes.c_void_p]
def launchKernel(f, grid_dim_x, grid_dim_y, grid_dim_z, block_dim_x,
block_dim_y, block_dim_z, shared_mem_bytes, stream,
kernel_params, extra):
status = _cuda.cuLaunchKernel(
f, grid_dim_x, grid_dim_y, grid_dim_z, block_dim_x, block_dim_y,
block_dim_z, shared_mem_bytes, stream, kernel_params, extra)
check_status(status)
###############################################################################
# Memory management
###############################################################################
_cuda.cuMemAlloc_v2.argtypes = [ctypes.c_void_p, ctypes.c_size_t]
def memAlloc(size):
ptr = ctypes.c_void_p()
status = _cuda.cuMemAlloc_v2(ctypes.byref(ptr), size)
check_status(status)
return ptr
_cuda.cuMemFree_v2.argtypes = [ctypes.c_void_p]
def memFree(ptr):
status = _cuda.cuMemFree_v2(ptr)
check_status(status)
_cuda.cuMemGetInfo_v2.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
def memGetinfo():
free = ctypes.c_size_t()
total = ctypes.c_size_t()
status = _cuda.cuMemGetInfo_v2(ctypes.byref(free), ctypes.byref(total))
check_status(status)
return free.value, total.value
_cuda.cuMemcpy.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t]
def memcpy(dst, src, size):
status = _cuda.cuMemcpy(dst, src, size)
check_status(status)
_cuda.cuMemcpyAsync.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_size_t, Stream]
def memcpyAsync(dst, src, size, stream):
status = _cuda.cuMemcpyAsync(dst, src, size, stream)
check_status(status)
_cuda.cuMemcpyDtoD_v2.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_size_t]
def memcpyDtoD(dst, src, size):
status = _cuda.cuMemcpyDtoD_v2(dst, src, size)
check_status(status)
_cuda.cuMemcpyDtoDAsync_v2.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_size_t, Stream]
def memcpyDtoDAsync(dst, src, size, stream):
status = _cuda.cuMemcpyDtoDAsync_v2(dst, src, size, stream)
check_status(status)
_cuda.cuMemcpyDtoH_v2.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_size_t]
def memcpyDtoH(dst, src, size):
status = _cuda.cuMemcpyDtoH_v2(dst, src, size)
check_status(status)
_cuda.cuMemcpyDtoHAsync_v2.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_size_t, Stream]
def memcpyDtoHAsync(dst, src, size, stream):
status = _cuda.cuMemcpyDtoHAsync_v2(dst, src, size, stream)
check_status(status)
_cuda.cuMemcpyHtoD_v2.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_size_t]
def memcpyHtoD(dst, src, size):
status = _cuda.cuMemcpyHtoD_v2(dst, src, size)
check_status(status)
_cuda.cuMemcpyHtoDAsync_v2.argtypes = [ctypes.c_void_p, ctypes.c_void_p,
ctypes.c_size_t, Stream]
def memcpyHtoDAsync(dst, src, size, stream):
status = _cuda.cuMemcpyHtoDAsync_v2(dst, src, size, stream)
check_status(status)
_cuda.cuMemcpyPeer.argtypes = [ctypes.c_void_p, Context, ctypes.c_void_p,
Context, ctypes.c_size_t]
def memcpyPeer(dst, dst_ctx, src, src_ctx, size):
status = _cuda.cuMemcpyPeer(dst, dst_ctx, src, src_ctx, size)
check_status(status)
_cuda.cuMemcpyPeerAsync.argtypes = [ctypes.c_void_p, Context, ctypes.c_void_p,
Context, ctypes.c_size_t, Stream]
def memcpyPeerAsync(dst, dst_ctx, src, src_ctx, size, stream):
status = _cuda.cuMemcpyPeerAsync(dst, dst_ctx, src, src_ctx, size, stream)
check_status(status)
_cuda.cuMemsetD32_v2.argtypes = [
ctypes.c_void_p, ctypes.c_uint, ctypes.c_size_t]
def memsetD32(ptr, value, size):
status = _cuda.cuMemsetD32_v2(ptr, value, size)
check_status(status)
_cuda.cuMemsetD32Async.argtypes = [ctypes.c_void_p, ctypes.c_uint,
ctypes.c_size_t, Stream]
def memsetD32Async(ptr, value, size, stream):
status = _cuda.cuMemsetD32Async(ptr, value, size, stream)
check_status(status)
_cuda.cuPointerGetAttribute.argtypes = [ctypes.c_void_p, ctypes.c_int,
ctypes.c_void_p]
def pointerGetAttribute(attribute, ptr):
assert attribute == 0 # Currently only context query is supported
ctx = Context()
status = _cuda.cuPointerGetAttribute(ctypes.byref(ctx), attribute, ptr)
check_status(status)
return ctx
###############################################################################
# Stream and Event
###############################################################################
_cuda.cuStreamCreate.argtypes = [ctypes.c_void_p, ctypes.c_uint]
def streamCreate(flag=0):
stream = Stream()
status = _cuda.cuStreamCreate(ctypes.byref(stream), flag)
check_status(status)
return stream
_cuda.cuStreamDestroy_v2.argtypes = [Stream]
def streamDestroy(stream):
status = _cuda.cuStreamDestroy_v2(stream)
check_status(status)
_cuda.cuStreamSynchronize.argtypes = [Stream]
def streamSynchronize(stream):
status = _cuda.cuStreamSynchronize(stream)
check_status(status)
StreamCallback = ctypes.CFUNCTYPE(Stream, ctypes.c_int, ctypes.c_void_p)
_cuda.cuStreamAddCallback.argtypes = [Stream, StreamCallback, ctypes.c_void_p,
ctypes.c_uint]
def streamAddCallback(stream, callback, arg, flags=0):
status = _cuda.cuStreamAddCallback(stream, StreamCallback(callback),
ctypes.byref(arg), flags)
check_status(status)
EVENT_DEFAULT = 0
EVENT_BLOCKING_SYNC = 1
EVENT_DISABLE_TIMING = 2
EVENT_INTERPROCESS = 4
_cuda.cuEventCreate.argtypes = [ctypes.c_void_p, ctypes.c_uint]
def eventCreate(flag):
event = Event()
status = _cuda.cuEventCreate(ctypes.byref(event), flag)
check_status(status)
return event
_cuda.cuEventDestroy_v2.argtypes = [Event]
def eventDestroy(event):
status = _cuda.cuEventDestroy_v2(event)
check_status(status)
_cuda.cuEventRecord.argtypes = [Event, Stream]
def eventRecord(event, stream):
status = _cuda.cuEventRecord(event, stream)
check_status(status)
_cuda.cuEventSynchronize.argtypes = [Event]
def eventSynchronize(event):
status = _cuda.cuEventSynchronize(event)
check_status(status)
|
|
""""""
from datetime import datetime
from typing import List
from mongoengine import (
Document,
DateTimeField,
FloatField,
StringField,
IntField,
connect,
QuerySet
)
from mongoengine.errors import DoesNotExist
from vnpy.trader.constant import Exchange, Interval
from vnpy.trader.object import BarData, TickData
from vnpy.trader.database import (
BaseDatabase,
BarOverview,
DB_TZ,
convert_tz
)
from vnpy.trader.setting import SETTINGS
class DbBarData(Document):
""""""
symbol: str = StringField()
exchange: str = StringField()
datetime: datetime = DateTimeField()
interval: str = StringField()
volume: float = FloatField()
open_interest: float = FloatField()
open_price: float = FloatField()
high_price: float = FloatField()
low_price: float = FloatField()
close_price: float = FloatField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "interval", "datetime"),
"unique": True,
}
]
}
class DbTickData(Document):
""""""
symbol: str = StringField()
exchange: str = StringField()
datetime: datetime = DateTimeField()
name: str = StringField()
volume: float = FloatField()
open_interest: float = FloatField()
last_price: float = FloatField()
last_volume: float = FloatField()
limit_up: float = FloatField()
limit_down: float = FloatField()
open_price: float = FloatField()
high_price: float = FloatField()
low_price: float = FloatField()
close_price: float = FloatField()
pre_close: float = FloatField()
bid_price_1: float = FloatField()
bid_price_2: float = FloatField()
bid_price_3: float = FloatField()
bid_price_4: float = FloatField()
bid_price_5: float = FloatField()
ask_price_1: float = FloatField()
ask_price_2: float = FloatField()
ask_price_3: float = FloatField()
ask_price_4: float = FloatField()
ask_price_5: float = FloatField()
bid_volume_1: float = FloatField()
bid_volume_2: float = FloatField()
bid_volume_3: float = FloatField()
bid_volume_4: float = FloatField()
bid_volume_5: float = FloatField()
ask_volume_1: float = FloatField()
ask_volume_2: float = FloatField()
ask_volume_3: float = FloatField()
ask_volume_4: float = FloatField()
ask_volume_5: float = FloatField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "datetime"),
"unique": True,
}
],
}
class DbBarOverview(Document):
""""""
symbol: str = StringField()
exchange: str = StringField()
interval: str = StringField()
count: int = IntField()
start: datetime = DateTimeField()
end: datetime = DateTimeField()
meta = {
"indexes": [
{
"fields": ("symbol", "exchange", "interval"),
"unique": True,
}
],
}
class MongodbDatabase(BaseDatabase):
""""""
def __init__(self) -> None:
""""""
database = SETTINGS["database.database"]
host = SETTINGS["database.host"]
port = SETTINGS["database.port"]
username = SETTINGS["database.user"]
password = SETTINGS["database.password"]
authentication_source = SETTINGS["database.authentication_source"]
if not username:
username = None
password = None
authentication_source = None
connect(
db=database,
host=host,
port=port,
username=username,
password=password,
authentication_source=authentication_source,
)
def save_bar_data(self, bars: List[BarData]) -> bool:
""""""
# Store key parameters
bar = bars[0]
symbol = bar.symbol
exchange = bar.exchange
interval = bar.interval
# Upsert data into mongodb
for bar in bars:
bar.datetime = convert_tz(bar.datetime)
d = bar.__dict__
d["exchange"] = d["exchange"].value
d["interval"] = d["interval"].value
d.pop("gateway_name")
d.pop("vt_symbol")
param = to_update_param(d)
DbBarData.objects(
symbol=d["symbol"],
exchange=d["exchange"],
interval=d["interval"],
datetime=d["datetime"],
).update_one(upsert=True, **param)
# Update bar overview
try:
overview: DbBarOverview = DbBarOverview.objects(
symbol=symbol,
exchange=exchange.value,
interval=interval.value
).get()
except DoesNotExist:
overview: DbBarOverview = DbBarOverview(
symbol=symbol,
exchange=exchange.value,
interval=interval.value
)
if not overview.start:
overview.start = bars[0].datetime
overview.end = bars[-1].datetime
overview.count = len(bars)
else:
overview.start = min(bars[0].datetime, overview.start)
overview.end = max(bars[-1].datetime, overview.end)
overview.count = DbBarData.objects(
symbol=symbol,
exchange=exchange.value,
interval=interval.value
).count()
overview.save()
def save_tick_data(self, ticks: List[TickData]) -> bool:
""""""
for tick in ticks:
tick.datetime = convert_tz(tick.datetime)
d = tick.__dict__
d["exchange"] = d["exchange"].value
d["interval"] = d["interval"].value
d.pop("gateway_name")
d.pop("vt_symbol")
param = to_update_param(d)
DbTickData.objects(
symbol=d["symbol"],
exchange=d["exchange"],
datetime=d["datetime"],
).update_one(upsert=True, **param)
def load_bar_data(
self,
symbol: str,
exchange: Exchange,
interval: Interval,
start: datetime,
end: datetime
) -> List[BarData]:
""""""
s: QuerySet = DbBarData.objects(
symbol=symbol,
exchange=exchange.value,
interval=interval.value,
datetime__gte=convert_tz(start),
datetime__lte=convert_tz(end),
)
vt_symbol = f"{symbol}.{exchange.value}"
bars: List[BarData] = []
for db_bar in s:
db_bar.datetime = DB_TZ.localize(db_bar.datetime)
db_bar.exchange = Exchange(db_bar.exchange)
db_bar.interval = Interval(db_bar.interval)
db_bar.gateway_name = "DB"
db_bar.vt_symbol = vt_symbol
bars.append(db_bar)
return bars
def load_tick_data(
self,
symbol: str,
exchange: Exchange,
start: datetime,
end: datetime
) -> List[TickData]:
""""""
s: QuerySet = DbTickData.objects(
symbol=symbol,
exchange=exchange.value,
datetime__gte=convert_tz(start),
datetime__lte=convert_tz(end),
)
vt_symbol = f"{symbol}.{exchange.value}"
ticks: List[TickData] = []
for db_tick in s:
db_tick.datetime = DB_TZ.localize(db_tick.datetime)
db_tick.exchange = Exchange(db_tick.exchange)
db_tick.gateway_name = "DB"
db_tick.vt_symbol = vt_symbol
ticks.append(db_tick)
return ticks
def delete_bar_data(
self,
symbol: str,
exchange: Exchange,
interval: Interval
) -> int:
""""""
count = DbBarData.objects(
symbol=symbol,
exchange=exchange.value,
interval=interval.value
).delete()
# Delete bar overview
DbBarOverview.objects(
symbol=symbol,
exchange=exchange.value,
interval=interval.value
).delete()
return count
def delete_tick_data(
self,
symbol: str,
exchange: Exchange
) -> int:
""""""
count = DbTickData.objects(
symbol=symbol,
exchange=exchange.value
).delete()
return count
def get_bar_overview(self) -> List[BarOverview]:
"""
Return data avaible in database.
"""
# Init bar overview for old version database
data_count = DbBarData.objects.count()
overview_count = DbBarOverview.objects.count()
if data_count and not overview_count:
self.init_bar_overview()
s: QuerySet = DbBarOverview.objects()
overviews = []
for overview in s:
overview.exchange = Exchange(overview.exchange)
overview.interval = Interval(overview.interval)
overviews.append(overview)
return overviews
def init_bar_overview(self) -> None:
"""
Init overview table if not exists.
"""
s: QuerySet = (
DbBarData.objects.aggregate({
"$group": {
"_id": {
"symbol": "$symbol",
"exchange": "$exchange",
"interval": "$interval",
},
"count": {"$sum": 1}
}
})
)
for d in s:
id_data = d["_id"]
overview = DbBarOverview()
overview.symbol = id_data["symbol"]
overview.exchange = id_data["exchange"]
overview.interval = id_data["interval"]
overview.count = d["count"]
start_bar: DbBarData = (
DbBarData.objects(
symbol=id_data["symbol"],
exchange=id_data["exchange"],
interval=id_data["interval"],
)
.order_by("+datetime")
.first()
)
overview.start = start_bar.datetime
end_bar: DbBarData = (
DbBarData.objects(
symbol=id_data["symbol"],
exchange=id_data["exchange"],
interval=id_data["interval"],
)
.order_by("-datetime")
.first()
)
overview.end = end_bar.datetime
overview.save()
def to_update_param(d: dict) -> dict:
"""
Convert data dict to update parameters.
"""
param = {f"set__{k}": v for k, v in d.items()}
return param
database_manager = MongodbDatabase()
|
|
"""
Command handler
This module contains the infrastructure for accepting commands on the
command line. The processing of a command works as follows:
1. The calling object (caller) is analyzed based on its callertype.
2. Cmdsets are gathered from different sources:
- channels: all available channel names are auto-created into a cmdset, to allow
for giving the channel name and have the following immediately
sent to the channel. The sending is performed by the CMD_CHANNEL
system command.
- object cmdsets: all objects at caller's location are scanned for non-empty
cmdsets. This includes cmdsets on exits.
- caller: the caller is searched for its own currently active cmdset.
- player: lastly the cmdsets defined on caller.player are added.
3. The collected cmdsets are merged together to a combined, current cmdset.
4. If the input string is empty -> check for CMD_NOINPUT command in
current cmdset or fallback to error message. Exit.
5. The Command Parser is triggered, using the current cmdset to analyze the
input string for possible command matches.
6. If multiple matches are found -> check for CMD_MULTIMATCH in current
cmdset, or fallback to error message. Exit.
7. If no match was found -> check for CMD_NOMATCH in current cmdset or
fallback to error message. Exit.
8. A single match was found. If this is a channel-command (i.e. the
ommand name is that of a channel), --> check for CMD_CHANNEL in
current cmdset or use channelhandler default. Exit.
9. At this point we have found a normal command. We assign useful variables to it that
will be available to the command coder at run-time.
12. We have a unique cmdobject, primed for use. Call all hooks:
`at_pre_cmd()`, `cmdobj.parse()`, `cmdobj.func()` and finally `at_post_cmd()`.
13. Return deferred that will fire with the return from `cmdobj.func()` (unused by default).
"""
from weakref import WeakValueDictionary
from copy import copy
from traceback import format_exc
from twisted.internet.defer import inlineCallbacks, returnValue
from django.conf import settings
from evennia.comms.channelhandler import CHANNELHANDLER
from evennia.utils import logger, utils
from evennia.utils.utils import string_suggestions, to_unicode
from django.utils.translation import ugettext as _
__all__ = ("cmdhandler",)
_GA = object.__getattribute__
_CMDSET_MERGE_CACHE = WeakValueDictionary()
# This decides which command parser is to be used.
# You have to restart the server for changes to take effect.
_COMMAND_PARSER = utils.variable_from_module(*settings.COMMAND_PARSER.rsplit('.', 1))
# System command names - import these variables rather than trying to
# remember the actual string constants. If not defined, Evennia
# hard-coded defaults are used instead.
# command to call if user just presses <return> with no input
CMD_NOINPUT = "__noinput_command"
# command to call if no command match was found
CMD_NOMATCH = "__nomatch_command"
# command to call if multiple command matches were found
CMD_MULTIMATCH = "__multimatch_command"
# command to call if found command is the name of a channel
CMD_CHANNEL = "__send_to_channel_command"
# command to call as the very first one when the user connects.
# (is expected to display the login screen)
CMD_LOGINSTART = "__unloggedin_look_command"
# Function for handling multiple command matches.
_AT_MULTIMATCH_CMD = utils.variable_from_module(*settings.SEARCH_AT_MULTIMATCH_CMD.rsplit('.', 1))
# Output strings
_ERROR_UNTRAPPED = "{traceback}\n" \
"Above traceback is from an untrapped error. " \
"Please file a bug report."
_ERROR_CMDSETS = "{traceback}\n" \
"Above traceback is from a cmdset merger error. " \
"Please file a bug report."
_ERROR_NOCMDSETS = "No command sets found! This is a sign of a critical bug." \
"\nThe error was logged. If disconnecting/reconnecting doesn't" \
"\nsolve the problem, try to contact the server admin through" \
"\nsome other means for assistance."
_ERROR_CMDHANDLER = "{traceback}\n"\
"Above traceback is from a Command handler bug." \
"Please file a bug report with the Evennia project."
def _msg_err(receiver, string):
"""
Helper function for returning an error to the caller.
Args:
receiver (Object): object to get the error message
string (str): string with a {traceback} format marker inside it.
"""
receiver.msg(string.format(traceback=format_exc(), _nomulti=True))
# custom Exceptions
class NoCmdSets(Exception):
"No cmdsets found. Critical error."
pass
class ExecSystemCommand(Exception):
"Run a system command"
def __init__(self, syscmd, sysarg):
self.args = (syscmd, sysarg) # needed by exception error handling
self.syscmd = syscmd
self.sysarg = sysarg
class ErrorReported(Exception):
"Re-raised when a subsructure already reported the error"
# Helper function
@inlineCallbacks
def get_and_merge_cmdsets(caller, session, player, obj,
callertype, sessid=None):
"""
Gather all relevant cmdsets and merge them.
Args:
caller (Session, Player or Object): The entity executing the command. Which
type of object this is depends on the current game state; for example
when the user is not logged in, this will be a Session, when being OOC
it will be a Player and when puppeting an object this will (often) be
a Character Object. In the end it depends on where the cmdset is stored.
session (Session or None): The Session associated with caller, if any.
player (Player or None): The calling Player associated with caller, if any.
obj (Object or None): The Object associated with caller, if any.
callertype (str): This identifies caller as either "player", "object" or "session"
to avoid having to do this check internally.
sessid (int, optional): Session ID. This is not used at the moment.
Returns:
cmdset (Deferred): This deferred fires with the merged cmdset
result once merger finishes.
Notes:
The cdmsets are merged in order or generality, so that the
Object's cmdset is merged last (and will thus take precedence
over same-named and same-prio commands on Player and Session).
"""
try:
local_obj_cmdsets = [None]
@inlineCallbacks
def _get_channel_cmdsets(player, player_cmdset):
"""
Helper-method; Get channel-cmdsets
"""
# Create cmdset for all player's available channels
try:
channel_cmdset = None
if not player_cmdset.no_channels:
channel_cmdset = yield CHANNELHANDLER.get_cmdset(player)
returnValue(channel_cmdset)
except Exception:
logger.log_trace()
_msg_err(caller, _ERROR_CMDSETS)
raise ErrorReported
@inlineCallbacks
def _get_local_obj_cmdsets(obj, obj_cmdset):
"""
Helper-method; Get Object-level cmdsets
"""
# Gather cmdsets from location, objects in location or carried
try:
local_obj_cmdsets = [None]
try:
location = obj.location
except Exception:
location = None
if location and not obj_cmdset.no_objs:
# Gather all cmdsets stored on objects in the room and
# also in the caller's inventory and the location itself
local_objlist = yield (location.contents_get(exclude=obj) +
obj.contents_get() + [location])
local_objlist = [o for o in local_objlist if not o._is_deleted]
for lobj in local_objlist:
try:
# call hook in case we need to do dynamic changing to cmdset
_GA(lobj, "at_cmdset_get")()
except Exception:
logger.log_trace()
# the call-type lock is checked here, it makes sure a player
# is not seeing e.g. the commands on a fellow player (which is why
# the no_superuser_bypass must be True)
local_obj_cmdsets = \
yield [lobj.cmdset.current for lobj in local_objlist
if (lobj.cmdset.current and
lobj.access(caller, access_type='call', no_superuser_bypass=True))]
for cset in local_obj_cmdsets:
#This is necessary for object sets, or we won't be able to
# separate the command sets from each other in a busy room. We
# only keep the setting if duplicates were set to False/True
# explicitly.
cset.old_duplicates = cset.duplicates
cset.duplicates = True if cset.duplicates is None else cset.duplicates
returnValue(local_obj_cmdsets)
except Exception:
logger.log_trace()
_msg_err(caller, _ERROR_CMDSETS)
raise ErrorReported
@inlineCallbacks
def _get_cmdset(obj):
"""
Helper method; Get cmdset while making sure to trigger all
hooks safely.
"""
try:
yield obj.at_cmdset_get()
except Exception:
logger.log_trace()
_msg_err(caller, _ERROR_CMDSETS)
raise ErrorReported
try:
returnValue(obj.cmdset.current)
except AttributeError:
returnValue(None)
if callertype == "session":
# we are calling the command from the session level
report_to = session
session_cmdset = yield _get_cmdset(session)
cmdsets = [session_cmdset]
if player: # this automatically implies logged-in
player_cmdset = yield _get_cmdset(player)
channel_cmdset = yield _get_channel_cmdsets(player, player_cmdset)
cmdsets.extend([player_cmdset, channel_cmdset])
if obj:
obj_cmdset = yield _get_cmdset(obj)
local_obj_cmdsets = yield _get_local_obj_cmdsets(obj, obj_cmdset)
cmdsets.extend([obj_cmdset] + local_obj_cmdsets)
elif callertype == "player":
# we are calling the command from the player level
report_to = player
player_cmdset = yield _get_cmdset(player)
channel_cmdset = yield _get_channel_cmdsets(player, player_cmdset)
cmdsets = [player_cmdset, channel_cmdset]
if obj:
obj_cmdset = yield _get_cmdset(obj)
local_obj_cmdsets = yield _get_local_obj_cmdsets(obj, obj_cmdset)
cmdsets.extend([obj_cmdset] + local_obj_cmdsets)
elif callertype == "object":
# we are calling the command from the object level
report_to = obj
obj_cmdset = yield _get_cmdset(obj)
local_obj_cmdsets = yield _get_local_obj_cmdsets(obj, obj_cmdset)
cmdsets = [obj_cmdset] + local_obj_cmdsets
else:
raise Exception("get_and_merge_cmdsets: callertype %s is not valid." % callertype)
#cmdsets = yield [caller_cmdset] + [player_cmdset] +
# [channel_cmdset] + local_obj_cmdsets
# weed out all non-found sets
cmdsets = yield [cmdset for cmdset in cmdsets
if cmdset and cmdset.key != "_EMPTY_CMDSET"]
# report cmdset errors to user (these should already have been logged)
yield [report_to.msg(cmdset.errmessage) for cmdset in cmdsets
if cmdset.key == "_CMDSET_ERROR"]
if cmdsets:
# faster to do tuple on list than to build tuple directly
mergehash = tuple([id(cmdset) for cmdset in cmdsets])
if mergehash in _CMDSET_MERGE_CACHE:
# cached merge exist; use that
cmdset = _CMDSET_MERGE_CACHE[mergehash]
else:
# we group and merge all same-prio cmdsets separately (this avoids
# order-dependent clashes in certain cases, such as
# when duplicates=True)
tempmergers = {}
for cmdset in cmdsets:
prio = cmdset.priority
#print cmdset.key, prio
if prio in tempmergers:
# merge same-prio cmdset together separately
tempmergers[prio] = yield cmdset + tempmergers[prio]
else:
tempmergers[prio] = cmdset
# sort cmdsets after reverse priority (highest prio are merged in last)
cmdsets = yield sorted(tempmergers.values(), key=lambda x: x.priority)
# Merge all command sets into one, beginning with the lowest-prio one
cmdset = cmdsets[0]
for merging_cmdset in cmdsets[1:]:
#print "<%s(%s,%s)> onto <%s(%s,%s)>" % (merging_cmdset.key, merging_cmdset.priority, merging_cmdset.mergetype,
# cmdset.key, cmdset.priority, cmdset.mergetype)
cmdset = yield merging_cmdset + cmdset
# store the full sets for diagnosis
cmdset.merged_from = cmdsets
# cache
_CMDSET_MERGE_CACHE[mergehash] = cmdset
else:
cmdset = None
for cset in (cset for cset in local_obj_cmdsets if cset):
cset.duplicates = cset.old_duplicates
#print "merged set:", cmdset.key
returnValue(cmdset)
except ErrorReported:
raise
except Exception:
logger.log_trace()
_msg_err(caller, _ERROR_CMDSETS)
raise ErrorReported
# Main command-handler function
@inlineCallbacks
def cmdhandler(called_by, raw_string, _testing=False, callertype="session", sessid=None, **kwargs):
"""
This is the main mechanism that handles any string sent to the engine.
Args:
called_by (Session, Player or Object): Object from which this
command was called. which this was called from. What this is
depends on the game state.
raw_string (str): The command string as given on the command line.
_testing (bool, optional): Used for debug purposes and decides if we
should actually execute the command or not. If True, the
command instance will be returned.
callertype (str, optional): One of "session", "player" or
"object". These are treated in decending order, so when the
Session is the caller, it will merge its own cmdset into
cmdsets from both Player and eventual puppeted Object (and
cmdsets in its room etc). A Player will only include its own
cmdset and the Objects and so on. Merge order is the same
order, so that Object cmdsets are merged in last, giving them
precendence for same-name and same-prio commands.
sessid (int, optional): Relevant if callertype is "player" - the session id will help
retrieve the correct cmdsets from puppeted objects.
Kwargs:
kwargs (any): other keyword arguments will be assigned as named variables on the
retrieved command object *before* it is executed. This is unused
in default Evennia but may be used by code to set custom flags or
special operating conditions for a command as it executes.
Returns:
deferred (Deferred): This deferred is fired with the return
value of the command's `func` method. This is not used in
default Evennia.
"""
@inlineCallbacks
def _run_command(cmd, cmdname, args):
"""
Helper function: This initializes and runs the Command
instance once the parser has identified it as either a normal
command or one of the system commands.
Args:
cmd (Command): command object
cmdname (str): name of command
args (str): extra text entered after the identified command
Returns:
deferred (Deferred): this will fire with the return of the
command's `func` method.
"""
try:
# Assign useful variables to the instance
cmd.caller = caller
cmd.cmdstring = cmdname
cmd.args = args
cmd.cmdset = cmdset
cmd.sessid = session.sessid if session else sessid
cmd.session = session
cmd.player = player
cmd.raw_string = unformatted_raw_string
#cmd.obj # set via on-object cmdset handler for each command,
# since this may be different for every command when
# merging multuple cmdsets
if hasattr(cmd, 'obj') and hasattr(cmd.obj, 'scripts'):
# cmd.obj is automatically made available by the cmdhandler.
# we make sure to validate its scripts.
yield cmd.obj.scripts.validate()
if _testing:
# only return the command instance
returnValue(cmd)
# assign custom kwargs to found cmd object
for key, val in kwargs.items():
setattr(cmd, key, val)
# pre-command hook
abort = yield cmd.at_pre_cmd()
if abort:
# abort sequence
returnValue(abort)
# Parse and execute
yield cmd.parse()
# main command code
# (return value is normally None)
ret = yield cmd.func()
# post-command hook
yield cmd.at_post_cmd()
if cmd.save_for_next:
# store a reference to this command, possibly
# accessible by the next command.
caller.ndb.last_cmd = yield copy(cmd)
else:
caller.ndb.last_cmd = None
# return result to the deferred
returnValue(ret)
except Exception:
logger.log_trace()
_msg_err(caller, _ERROR_UNTRAPPED)
raise ErrorReported
raw_string = to_unicode(raw_string, force_string=True)
session, player, obj = None, None, None
if callertype == "session":
session = called_by
player = session.player
if player:
obj = yield player.get_puppet(session.sessid)
elif callertype == "player":
player = called_by
if sessid:
session = player.get_session(sessid)
obj = yield player.get_puppet(sessid)
elif callertype == "object":
obj = called_by
else:
raise RuntimeError("cmdhandler: callertype %s is not valid." % callertype)
# the caller will be the one to receive messages and excert its permissions.
# we assign the caller with preference 'bottom up'
caller = obj or player or session
# The error_to is the default recipient for errors. Tries to make sure a player
# does not get spammed for errors while preserving character mirroring.
error_to = obj or session or player
try: # catch bugs in cmdhandler itself
try: # catch special-type commands
cmdset = yield get_and_merge_cmdsets(caller, session, player, obj,
callertype, sessid)
if not cmdset:
# this is bad and shouldn't happen.
raise NoCmdSets
unformatted_raw_string = raw_string
raw_string = raw_string.strip()
if not raw_string:
# Empty input. Test for system command instead.
syscmd = yield cmdset.get(CMD_NOINPUT)
sysarg = ""
raise ExecSystemCommand(syscmd, sysarg)
# Parse the input string and match to available cmdset.
# This also checks for permissions, so all commands in match
# are commands the caller is allowed to call.
matches = yield _COMMAND_PARSER(raw_string, cmdset, caller)
# Deal with matches
if len(matches) > 1:
# We have a multiple-match
syscmd = yield cmdset.get(CMD_MULTIMATCH)
sysarg = _("There were multiple matches.")
if syscmd:
# use custom CMD_MULTIMATCH
syscmd.matches = matches
else:
# fall back to default error handling
sysarg = yield _AT_MULTIMATCH_CMD(caller, matches)
raise ExecSystemCommand(syscmd, sysarg)
if len(matches) == 1:
# We have a unique command match. But it may still be invalid.
match = matches[0]
cmdname, args, cmd = match[0], match[1], match[2]
# check if we allow this type of command
if cmdset.no_channels and hasattr(cmd, "is_channel") and cmd.is_channel:
matches = []
if cmdset.no_exits and hasattr(cmd, "is_exit") and cmd.is_exit:
matches = []
if not matches:
# No commands match our entered command
syscmd = yield cmdset.get(CMD_NOMATCH)
if syscmd:
# use custom CMD_NOMATCH command
sysarg = raw_string
else:
# fallback to default error text
sysarg = _("Command '%s' is not available.") % raw_string
suggestions = string_suggestions(raw_string,
cmdset.get_all_cmd_keys_and_aliases(caller),
cutoff=0.7, maxnum=3)
if suggestions:
sysarg += _(" Maybe you meant %s?") % utils.list_to_string(suggestions, _('or'), addquote=True)
else:
sysarg += _(" Type \"help\" for help.")
raise ExecSystemCommand(syscmd, sysarg)
# Check if this is a Channel-cmd match.
if hasattr(cmd, 'is_channel') and cmd.is_channel:
# even if a user-defined syscmd is not defined, the
# found cmd is already a system command in its own right.
syscmd = yield cmdset.get(CMD_CHANNEL)
if syscmd:
# replace system command with custom version
cmd = syscmd
cmd.sessid = session.sessid if session else None
sysarg = "%s:%s" % (cmdname, args)
raise ExecSystemCommand(cmd, sysarg)
# A normal command.
ret = yield _run_command(cmd, cmdname, args)
returnValue(ret)
except ErrorReported:
# this error was already reported, so we
# catch it here and don't pass it on.
pass
except ExecSystemCommand, exc:
# Not a normal command: run a system command, if available,
# or fall back to a return string.
syscmd = exc.syscmd
sysarg = exc.sysarg
if syscmd:
ret = yield _run_command(syscmd, syscmd.key, sysarg)
returnValue(ret)
elif sysarg:
# return system arg
error_to.msg(exc.sysarg, _nomulti=True)
except NoCmdSets:
# Critical error.
logger.log_errmsg("No cmdsets found: %s" % caller)
error_to.msg(_ERROR_NOCMDSETS, _nomulti=True)
except Exception:
# We should not end up here. If we do, it's a programming bug.
logger.log_trace()
_msg_err(error_to, _ERROR_UNTRAPPED)
except Exception:
# This catches exceptions in cmdhandler exceptions themselves
logger.log_trace()
_msg_err(error_to, _ERROR_CMDHANDLER)
|
|
#!/usr/bin/python
"""
DeployMan - Management of things that need to be dynamically deployed. Used after initial deployment of base configuration, which is more static, less dynamic.
DeployMan is a combination of RunMan and Sysync, which connects to a remote server to get work that needs to be deployed for this machine's component/service instances.
Once the dynamic deployment information is gathered, then a process like Sysync will apply the required changes to the target node running DeployMan.
Copyright Geoff Howland, 2014. MIT License.
TODO:
- ...
-
"""
import sys
import os
import getopt
import yaml
import json
import pprint
import logging
import utility
from utility.log import log
# Default configuration file path
DEFAULT_CONFIG_PATH = 'deployman.yaml'
# Default handler path
DEFAULT_HANDLER_PATH = 'handlers/defaults/'
# Output formats we support
OUTPUT_FORMATS = ['json', 'yaml', 'pprint']
def ProcessCommand(config, command_options, command_args):
"""Process a command against this run_spec_path"""
output_data = {}
while utility.client.RUNNING:
#try:
if 1:
# Become a client forever, requesting dynamic deployments
utility.client.ProcessRequestsForever(config, command_options, command_args)
#except Exception, e:
# log('ProcessCommand Client ProcessRequestsForever Exception: %s' % e, level=logging.ERROR)
return output_data
def FormatAndOuput(result, command_options):
"""Format the output and return it"""
# PPrint
if command_options['format'] == 'pprint':
pprint.pprint(result)
# YAML
elif command_options['format'] == 'yaml':
print yaml.dump(result)
# JSON
elif command_options['format'] == 'json':
print json.dumps(result)
else:
raise Exception('Unknown output format "%s", result as text: %s' % (command_options['format'], result))
def Usage(error=None):
"""Print usage information, any errors, and exit.
If errors, exit code = 1, otherwise 0.
"""
if error:
print '\nerror: %s' % error
exit_code = 1
else:
exit_code = 0
print
print 'usage: %s [options]' % os.path.basename(sys.argv[0])
print
print 'example usage: "python %s --cconfig deployman.yaml"' % os.path.basename(sys.argv[0])
print
print
print 'Options:'
print
print ' -h, -?, --help This usage information'
print ' -v, --verbose Verbose output'
print ' -f, --format <format> Format output, types: %s' % ', '.join(OUTPUT_FORMATS)
print ' -c, --config <path> Path to config file (Format specified by suffic: (.yaml, .json)'
print ' --override-host <hostname> Hostname to run jobs as. Allows '
print
sys.exit(exit_code)
def Main(args=None):
if not args:
args = []
long_options = ['help', 'format=', 'verbose', 'config=']
try:
(options, args) = getopt.getopt(args, '?hvnsc:f:', long_options)
except getopt.GetoptError, e:
Usage(e)
# Dictionary of command options, with defaults
command_options = {}
command_options['remote'] = False # Remote invocation. When quitting or Error(), report back remotely with details.
command_options['platform'] = utility.platform.GetPlatform()
#command_options['verbose'] = False
command_options['verbose'] = True
command_options['format'] = 'pprint'
command_options['config_path'] = DEFAULT_CONFIG_PATH
command_options['handler_data_path'] = DEFAULT_HANDLER_PATH
command_options['files_path'] = None
command_options['override_host'] = None
# Process out CLI options
for (option, value) in options:
# Help
if option in ('-h', '-?', '--help'):
Usage()
# Verbose output information
elif option in ('-v', '--verbose'):
command_options['verbose'] = True
# Noninteractive. Doesnt use STDIN to gather any missing data.
elif option in ('-c', '--config'):
command_options['config_path'] = value
# Format output
elif option in ('-f', '--format'):
if value not in (OUTPUT_FORMATS):
Usage('Unsupported output format "%s", supported formats: %s' % (value, ', '.join(OUTPUT_FORMATS)))
command_options['format'] = value
# Overrride: Host name for running jobs
elif option == '--override-host':
command_options['override_host'] = value
# Invalid option
else:
Usage('Unknown option: %s' % option)
# Store the command options for our logging
utility.log.RUN_OPTIONS = command_options
# Load the configuration
if not os.path.isfile(command_options['config_path']):
Usage('Config file does not exist: %s' % command_options['config_path'])
try:
config = yaml.load(open(command_options['config_path']))
# Put the files in the default temp path
command_options['files_path'] = config['deploy_temp_path']
except Exception, e:
Usage('Failed to load config: %s: %s' % (command_options['config_path'], e))
# If there are any command args, get them
command_args = args
# Process the command
if 1:
#try:
# Process the command and retrieve a result
result = ProcessCommand(config, command_options, command_args)
# Format and output the result (pprint/json/yaml to stdout/file)
FormatAndOuput(result, command_options)
#NOTE(g): Catch all exceptions, and return in properly formatted output
#TODO(g): Implement stack trace in Exception handling so we dont lose where this
# exception came from, and can then wrap all runs and still get useful
# debugging information
#except Exception, e:
else:
utility.error.Error({'exception':str(e)}, command_options)
if __name__ == '__main__':
#NOTE(g): Fixing the path here. If you're calling this as a module, you have to
# fix the utility/handlers module import problem yourself.
sys.path.append(os.path.dirname(sys.argv[0]))
Main(sys.argv[1:])
|
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import uuid
import mock
from oslo.config import cfg
from oslo.utils import timeutils
from nova.compute import flavors
from nova.compute import resource_tracker
from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import objects
from nova.objects import base as obj_base
from nova.openstack.common import jsonutils
from nova import rpc
from nova import test
from nova.tests.compute.monitors import test_monitors
from nova.tests.objects import test_migration
from nova.tests.pci import pci_fakes
from nova.virt import driver
from nova.virt import hardware
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
FAKE_VIRT_NUMA_TOPOLOGY = hardware.VirtNUMAHostTopology(
cells=[hardware.VirtNUMATopologyCellUsage(0, set([1, 2]), 3072),
hardware.VirtNUMATopologyCellUsage(1, set([3, 4]), 3072)])
FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = hardware.VirtNUMALimitTopology(
cells=[hardware.VirtNUMATopologyCellLimit(
0, set([1, 2]), 3072, 4, 10240),
hardware.VirtNUMATopologyCellLimit(
1, set([3, 4]), 3072, 4, 10240)])
ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False, stats=None,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.numa_topology = numa_topology
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
self.pci_devices = [{
'label': 'forza-napoli',
'dev_type': 'foo',
'compute_node_id': 1,
'address': '0000:00:00.1',
'product_id': 'p1',
'vendor_id': 'v1',
'status': 'available',
'extra_k1': 'v1'}] if self.pci_support else []
self.pci_stats = [{
'count': 1,
'vendor_id': 'v1',
'product_id': 'p1'}] if self.pci_support else []
if stats is not None:
self.stats = stats
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': (
self.numa_topology.to_json() if self.numa_topology else None),
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
if hasattr(self, 'stats'):
d['stats'] = self.stats
return d
def estimate_instance_overhead(self, instance_info):
instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._numa_topologies = {}
self._instance_types = {}
self.stubs.Set(self.conductor.db,
'instance_get_all_by_host_and_node',
self._fake_instance_get_all_by_host_and_node)
self.stubs.Set(db, 'instance_extra_get_by_instance_uuid',
self._fake_instance_extra_get_by_instance_uuid)
self.stubs.Set(self.conductor.db,
'instance_update_and_get_original',
self._fake_instance_update_and_get_original)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
self.host = 'fakehost'
def _create_compute_node(self, values=None):
compute = {
"id": 1,
"service_id": 1,
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"numa_topology": None,
"stats": {
"num_instances": "1",
},
"hypervisor_hostname": "fakenode",
}
if values:
compute.update(values)
return compute
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
}
return service
def _fake_instance_system_metadata(self, instance_type, prefix=''):
sys_meta = []
for key in flavors.system_metadata_flavor_props.keys():
sys_meta.append({'key': '%sinstance_type_%s' % (prefix, key),
'value': instance_type[key]})
return sys_meta
def _fake_instance(self, stash=True, flavor=None, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
sys_meta = self._fake_instance_system_metadata(flavor)
if stash:
# stash instance types in system metadata.
sys_meta = (sys_meta +
self._fake_instance_system_metadata(flavor, 'new_') +
self._fake_instance_system_metadata(flavor, 'old_'))
instance_uuid = str(uuid.uuid1())
instance = {
'uuid': instance_uuid,
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': sys_meta,
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'scheduled_at': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
}
numa_topology = kwargs.pop('numa_topology', None)
if numa_topology:
numa_topology = {
'id': 1, 'created_at': None, 'updated_at': None,
'deleted_at': None, 'deleted': None,
'instance_uuid': instance['uuid'],
'numa_topology': numa_topology.to_json()
}
instance.update(kwargs)
self._instances[instance_uuid] = instance
self._numa_topologies[instance_uuid] = numa_topology
return instance
def _fake_flavor_create(self, **kwargs):
instance_type = {
'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'disabled': False,
'is_public': True,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor',
'extra_specs': {},
}
instance_type.update(**kwargs)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_all_by_host_and_node(self, context, host, nodename):
return [i for i in self._instances.values() if i['host'] == host]
def _fake_instance_extra_get_by_instance_uuid(self, context,
instance_uuid):
return self._numa_topologies.get(instance_uuid)
def _fake_flavor_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_instance_update_and_get_original(self, context, instance_uuid,
values):
instance = self._instances[instance_uuid]
instance.update(values)
# the test doesn't care what the original instance values are, it's
# only used in the subsequent notification:
return (instance, instance)
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
tracker.ext_resources_handler = \
resources.ResourceHandler(RESOURCE_NAMES, True)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertIsNone(self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_context_claim(self):
# instance context manager variation:
instance = self._fake_instance()
claim = self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance()
instance_type = self._fake_flavor_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance()
instance_type = self._fake_flavor_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingServiceTestCase(BaseTestCase):
def setUp(self):
super(MissingServiceTestCase, self).setUp()
self.context = context.get_admin_context()
self.tracker = self._tracker()
def test_missing_service(self):
self.tracker.update_available_resource(self.context)
self.assertTrue(self.tracker.disabled)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_create',
self._fake_create_compute_node)
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node()
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def test_create_compute_node(self):
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.updated = False
self.deleted = False
self.update_call_count = 0
self.tracker = self._tracker()
self._migrations = {}
self.stubs.Set(db, 'service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stubs.Set(db, 'compute_node_update',
self._fake_compute_node_update)
self.stubs.Set(db, 'compute_node_delete',
self._fake_compute_node_delete)
self.stubs.Set(db, 'migration_update',
self._fake_migration_update)
self.stubs.Set(db, 'migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
# Note that this must be called before the call to _init_tracker()
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._init_tracker()
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.compute = self._create_compute_node()
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted', 'error']
migrations = []
for migration in self._migrations.values():
migration = obj_base.obj_to_primitive(migration)
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = self._migrations.values()[0]
migration.update(values)
return migration
def _init_tracker(self):
self.tracker.update_available_resource(self.context)
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus,
'numa_topology': numa_topology.to_json() if numa_topology else None
}
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = tracker.compute_node[field]
if field == 'numa_topology':
self.assertEqualNUMAHostTopology(
value, hardware.VirtNUMAHostTopology.from_json(x))
else:
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb'])
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb'])
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
self.assertEqual(driver.pci_stats,
jsonutils.loads(self.tracker.compute_node['pci_stats']))
class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
def setUp(self):
super(SchedulerClientTrackerTestCase, self).setUp()
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def test_create_resource(self):
self.tracker._write_ext_resources = mock.Mock()
self.tracker.conductor_api.compute_node_create = mock.Mock(
return_value=dict(id=1))
values = {'stats': {}, 'foo': 'bar', 'baz_count': 0}
self.tracker._create(self.context, values)
expected = {'stats': '{}', 'foo': 'bar', 'baz_count': 0,
'id': 1}
self.tracker.scheduler_client.update_resource_stats.\
assert_called_once_with(self.context,
("fakehost", "fakenode"),
expected)
def test_update_resource(self):
self.tracker._write_ext_resources = mock.Mock()
values = {'stats': {}, 'foo': 'bar', 'baz_count': 0}
self.tracker._update(self.context, values)
expected = {'stats': '{}', 'foo': 'bar', 'baz_count': 0,
'id': 1}
self.tracker.scheduler_client.update_resource_stats.\
assert_called_once_with(self.context,
("fakehost", "fakenode"),
expected)
class TrackerPciStatsTestCase(BaseTrackerTestCase):
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
self.assertEqual(driver.pci_stats,
jsonutils.loads(self.tracker.compute_node['pci_stats']))
def _driver(self):
return FakeVirtDriver(pci_support=True)
class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
def setUp(self):
super(TrackerExtraResourcesTestCase, self).setUp()
self.driver = self._driver()
def _driver(self):
return FakeVirtDriver()
def test_set_empty_ext_resources(self):
resources = self.driver.get_available_resource(self.tracker.nodename)
self.assertNotIn('stats', resources)
self.tracker._write_ext_resources(resources)
self.assertIn('stats', resources)
def test_set_extra_resources(self):
def fake_write_resources(resources):
resources['stats']['resA'] = '123'
resources['stats']['resB'] = 12
self.stubs.Set(self.tracker.ext_resources_handler,
'write_resources',
fake_write_resources)
resources = self.driver.get_available_resource(self.tracker.nodename)
self.tracker._write_ext_resources(resources)
expected = {"resA": "123", "resB": 12}
self.assertEqual(sorted(expected),
sorted(resources['stats']))
class InstanceClaimTestCase(BaseTrackerTestCase):
def _instance_topology(self, mem):
mem = mem * 1024
return hardware.VirtNUMAInstanceTopology(
cells=[hardware.VirtNUMATopologyCell(0, set([1]), mem),
hardware.VirtNUMATopologyCell(1, set([3]), mem)])
def _claim_topology(self, mem, cpus=1):
if self.tracker.driver.numa_topology is None:
return None
mem = mem * 1024
return hardware.VirtNUMAHostTopology(
cells=[hardware.VirtNUMATopologyCellUsage(
0, set([1, 2]), 3072, cpu_usage=cpus,
memory_usage=mem),
hardware.VirtNUMATopologyCellUsage(
1, set([3, 4]), 3072, cpu_usage=cpus,
memory_usage=mem)])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_usage_only_for_tracked(self, mock_get):
flavor = self._fake_flavor_create()
claim_mem = flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD
claim_gb = flavor['root_gb'] + flavor['ephemeral_gb']
claim_topology = self._claim_topology(claim_mem / 2)
instance_topology = self._instance_topology(claim_mem / 2)
instance = self._fake_instance(
flavor=flavor, task_state=None,
numa_topology=instance_topology)
self.tracker.update_usage(self.context, instance)
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'current_workload')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertNotEqual(0, claim.memory_mb)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
# now update should actually take effect
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self._assert(claim_mem, 'memory_mb_used')
self._assert(claim_gb, 'local_gb_used')
self._assert(claim_topology, 'numa_topology')
self._assert(1, 'current_workload')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_audit(self, mock_get):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
claim_topology = self._claim_topology(claim_mem_total / 2)
instance_topology = self._instance_topology(claim_mem_total / 2)
instance = self._fake_instance(memory_mb=claim_mem, root_gb=claim_disk,
ephemeral_gb=0, numa_topology=instance_topology)
self.tracker.instance_claim(self.context, instance, self.limits)
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["memory_mb"])
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
claim_topology, hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["local_gb"])
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute["free_disk_gb"])
# 1st pretend that the compute operation finished and claimed the
# desired resources from the virt layer
driver = self.tracker.driver
driver.memory_mb_used = claim_mem
driver.local_gb_used = claim_disk
self.tracker.update_available_resource(self.context)
# confirm tracker is adding in host_ip
self.assertIsNotNone(self.compute.get('host_ip'))
# confirm that resource usage is derived from instance usages,
# not virt layer:
self.assertEqual(claim_mem_total, self.compute['memory_mb_used'])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute['free_ram_mb'])
self.assertEqualNUMAHostTopology(
claim_topology, hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(claim_disk, self.compute['local_gb_used'])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute['free_disk_gb'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_abort(self, mock_get):
claim_mem = 3
claim_mem_total = 3 + FAKE_VIRT_MEMORY_OVERHEAD
claim_disk = 2
claim_topology = self._claim_topology(claim_mem_total / 2)
instance_topology = self._instance_topology(claim_mem_total / 2)
instance = self._fake_instance(memory_mb=claim_mem,
root_gb=claim_disk, ephemeral_gb=0,
numa_topology=instance_topology)
claim = self.tracker.instance_claim(self.context, instance,
self.limits)
self.assertIsNotNone(claim)
self.assertEqual(claim_mem_total, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB - claim_mem_total,
self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
claim_topology, hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(claim_disk, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB - claim_disk,
self.compute["free_disk_gb"])
claim.abort()
self.assertEqual(0, self.compute["memory_mb_used"])
self.assertEqual(FAKE_VIRT_MEMORY_MB, self.compute["free_ram_mb"])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(0, self.compute["local_gb_used"])
self.assertEqual(FAKE_VIRT_LOCAL_GB, self.compute["free_disk_gb"])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_claim_with_oversubscription(self, mock_get):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
claim_topology = self._claim_topology(memory_mb)
instance_topology = self._instance_topology(memory_mb)
limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
'disk_gb': root_gb * 2,
'vcpu': vcpus,
'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD.to_json()}
instance = self._fake_instance(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb,
numa_topology=instance_topology)
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(root_gb * 2,
self.tracker.compute_node['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get):
self.limits['vcpu'] = 2
claim_topology = self._claim_topology(2, cpus=2)
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=1, ephemeral_gb=0)
instance_topology = self._instance_topology(1)
instance = self._fake_instance(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
self.tracker.compute_node['local_gb_used'])
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node['vcpus_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_context_claim_with_exception(self, mock_get):
instance = self._fake_instance(memory_mb=1, root_gb=1, ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_context_claim(self, mock_get):
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=2, ephemeral_gb=3)
claim_topology = self._claim_topology(1)
instance_topology = self._instance_topology(1)
instance = self._fake_instance(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
# after exiting claim context, build is marked as finished. usage
# totals should be same:
self.tracker.update_available_resource(self.context)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
claim_topology,
hardware.VirtNUMAHostTopology.from_json(
self.compute['numa_topology']))
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.compute['local_gb_used'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_load_stats_for_instance(self, mock_get):
instance = self._fake_instance(task_state=task_states.SCHEDULING)
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node['current_workload'])
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_cpu_stats(self, mock_get):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
vcpus = 1
instance = self._fake_instance(vcpus=vcpus)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
add_vcpus = 10
vcpus += add_vcpus
instance = self._fake_instance(vcpus=add_vcpus)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
vcpus -= add_vcpus
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
class ResizeClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
def _fake_migration_create(mig_self, ctxt):
self._migrations[mig_self.instance_uuid] = mig_self
mig_self.obj_reset_changes()
self.stubs.Set(objects.Migration, 'create',
_fake_migration_create)
self.instance = self._fake_instance()
self.instance_type = self._fake_flavor_create()
def _fake_migration_create(self, context, values=None):
instance_uuid = str(uuid.uuid1())
mig_dict = test_migration.fake_db_migration()
mig_dict.update({
'id': 1,
'source_compute': 'host1',
'source_node': 'fakenode',
'dest_compute': 'host2',
'dest_node': 'fakenode',
'dest_host': '127.0.0.1',
'old_instance_type_id': 1,
'new_instance_type_id': 2,
'instance_uuid': instance_uuid,
'status': 'pre-migrating',
'updated_at': timeutils.utcnow()
})
if values:
mig_dict.update(values)
migration = objects.Migration()
migration.update(mig_dict)
# This hits the stub in setUp()
migration.create('fake')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_abort(self, mock_get):
try:
with self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits):
raise test.TestingException("abort")
except test.TestingException:
pass
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
2 * FAKE_VIRT_LOCAL_GB,
2 * FAKE_VIRT_VCPUS)
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, limits)
instance2 = self._fake_instance()
self.tracker.resize_claim(self.context, instance2, self.instance_type,
limits)
self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_and_audit(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, self.limits)
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_same_host(self, mock_get):
self.limits['vcpu'] = 3
src_dict = {
'memory_mb': 1, 'root_gb': 1, 'ephemeral_gb': 0, 'vcpus': 1}
dest_dict = dict((k, v + 1) for (k, v) in src_dict.iteritems())
src_type = self._fake_flavor_create(
id=10, name="srcflavor", **src_dict)
dest_type = self._fake_flavor_create(
id=11, name="destflavor", **dest_dict)
# make an instance of src_type:
instance = self._fake_instance(flavor=src_type)
instance['system_metadata'] = self._fake_instance_system_metadata(
dest_type)
self.tracker.instance_claim(self.context, instance, self.limits)
# resize to dest_type:
claim = self.tracker.resize_claim(self.context, instance,
dest_type, self.limits)
self._assert(src_dict['memory_mb'] + dest_dict['memory_mb']
+ 2 * FAKE_VIRT_MEMORY_OVERHEAD, 'memory_mb_used')
self._assert(src_dict['root_gb'] + src_dict['ephemeral_gb']
+ dest_dict['root_gb'] + dest_dict['ephemeral_gb'],
'local_gb_used')
self._assert(src_dict['vcpus'] + dest_dict['vcpus'], 'vcpus_used')
self.tracker.update_available_resource(self.context)
claim.abort()
# only the original instance should remain, not the migration:
self._assert(src_dict['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
'memory_mb_used')
self._assert(src_dict['root_gb'] + src_dict['ephemeral_gb'],
'local_gb_used')
self._assert(src_dict['vcpus'], 'vcpus_used')
self.assertEqual(1, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_revert(self, mock_get):
self.tracker.resize_claim(self.context, self.instance,
self.instance_type, {}, self.limits)
self.tracker.drop_resize_claim(self.context, self.instance)
self.assertEqual(0, len(self.tracker.tracked_instances))
self.assertEqual(0, len(self.tracker.tracked_migrations))
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_revert_reserve_source(self, mock_get):
# if a revert has started at the API and audit runs on
# the source compute before the instance flips back to source,
# resources should still be held at the source based on the
# migration:
dest = "desthost"
dest_tracker = self._tracker(host=dest)
dest_tracker.update_available_resource(self.context)
self.instance = self._fake_instance(memory_mb=FAKE_VIRT_MEMORY_MB,
root_gb=FAKE_VIRT_LOCAL_GB, ephemeral_gb=0,
vcpus=FAKE_VIRT_VCPUS, instance_type_id=1)
values = {'source_compute': self.host, 'dest_compute': dest,
'old_instance_type_id': 1, 'new_instance_type_id': 1,
'status': 'post-migrating',
'instance_uuid': self.instance['uuid']}
self._fake_migration_create(self.context, values)
# attach an instance to the destination host tracker:
dest_tracker.instance_claim(self.context, self.instance)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD,
'memory_mb_used', tracker=dest_tracker)
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
tracker=dest_tracker)
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
tracker=dest_tracker)
# audit and recheck to confirm migration doesn't get double counted
# on dest:
dest_tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD,
'memory_mb_used', tracker=dest_tracker)
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used',
tracker=dest_tracker)
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used',
tracker=dest_tracker)
# apply the migration to the source host tracker:
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
# flag the instance and migration as reverting and re-audit:
self.instance['vm_state'] = vm_states.RESIZED
self.instance['task_state'] = task_states.RESIZE_REVERTING
self.tracker.update_available_resource(self.context)
self._assert(FAKE_VIRT_MEMORY_MB + 1, 'memory_mb_used')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(FAKE_VIRT_VCPUS, 'vcpus_used')
def test_resize_filter(self):
instance = self._fake_instance(vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING)
self.assertFalse(self.tracker._instance_in_resize_state(instance))
instance = self._fake_instance(vm_state=vm_states.RESIZED,
task_state=task_states.SUSPENDING)
self.assertTrue(self.tracker._instance_in_resize_state(instance))
states = [task_states.RESIZE_PREP, task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED, task_states.RESIZE_FINISH]
for vm_state in [vm_states.ACTIVE, vm_states.STOPPED]:
for task_state in states:
instance = self._fake_instance(vm_state=vm_state,
task_state=task_state)
result = self.tracker._instance_in_resize_state(instance)
self.assertTrue(result)
def test_dupe_filter(self):
instance = self._fake_instance(host=self.host)
values = {'source_compute': self.host, 'dest_compute': self.host,
'instance_uuid': instance['uuid'], 'new_instance_type_id': 2}
self._fake_flavor_create(id=2)
self._fake_migration_create(self.context, values)
self._fake_migration_create(self.context, values)
self.tracker.update_available_resource(self.context)
self.assertEqual(1, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_set_instance_host_and_node(self, mock_get):
instance = self._fake_instance()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])
self.assertIsNone(instance['node'])
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class NoInstanceTypesInSysMetadata(ResizeClaimTestCase):
"""Make sure we handle the case where the following are true:
#) Compute node C gets upgraded to code that looks for instance types in
system metadata. AND
#) C already has instances in the process of migrating that do not have
stashed instance types.
bug 1164110
"""
def setUp(self):
super(NoInstanceTypesInSysMetadata, self).setUp()
self.instance = self._fake_instance(stash=False)
def test_get_instance_type_stash_false(self):
with (mock.patch.object(objects.Flavor, 'get_by_id',
return_value=self.instance_type)):
flavor = self.tracker._get_instance_type(self.context,
self.instance, "new_")
self.assertEqual(self.instance_type, flavor)
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
class ComputeMonitorTestCase(BaseTestCase):
def setUp(self):
super(ComputeMonitorTestCase, self).setUp()
fake_monitors = [
'nova.tests.compute.monitors.test_monitors.FakeMonitorClass1',
'nova.tests.compute.monitors.test_monitors.FakeMonitorClass2']
self.flags(compute_available_monitors=fake_monitors)
self.tracker = self._tracker()
self.node_name = 'nodename'
self.user_id = 'fake'
self.project_id = 'fake'
self.info = {}
self.context = context.RequestContext(self.user_id,
self.project_id)
def test_get_host_metrics_none(self):
self.flags(compute_monitors=['FakeMontorClass1', 'FakeMonitorClass4'])
self.tracker.monitors = []
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertEqual(len(metrics), 0)
def test_get_host_metrics_one_failed(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass4'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class4 = test_monitors.FakeMonitorClass4(self.tracker)
self.tracker.monitors = [class1, class4]
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertTrue(len(metrics) > 0)
def test_get_host_metrics(self):
self.flags(compute_monitors=['FakeMonitorClass1', 'FakeMonitorClass2'])
class1 = test_monitors.FakeMonitorClass1(self.tracker)
class2 = test_monitors.FakeMonitorClass2(self.tracker)
self.tracker.monitors = [class1, class2]
mock_notifier = mock.Mock()
with mock.patch.object(rpc, 'get_notifier',
return_value=mock_notifier) as mock_get:
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_get.assert_called_once_with(service='compute',
host=self.node_name)
expected_metrics = [{
'timestamp': 1232,
'name': 'key1',
'value': 2600,
'source': 'libvirt'
}, {
'name': 'key2',
'source': 'libvirt',
'timestamp': 123,
'value': 1600
}]
payload = {
'metrics': expected_metrics,
'host': self.tracker.host,
'host_ip': CONF.my_ip,
'nodename': self.node_name
}
mock_notifier.info.assert_called_once_with(
self.context, 'compute.metrics.update', payload)
self.assertEqual(metrics, expected_metrics)
class TrackerPeriodicTestCase(BaseTrackerTestCase):
def test_periodic_status_update(self):
# verify update called on instantiation
self.assertEqual(1, self.update_call_count)
# verify update not called if no change to resources
self.tracker.update_available_resource(self.context)
self.assertEqual(1, self.update_call_count)
# verify update is called when resources change
driver = self.tracker.driver
driver.memory_mb += 1
self.tracker.update_available_resource(self.context)
self.assertEqual(2, self.update_call_count)
def test_update_available_resource_calls_locked_inner(self):
@mock.patch.object(self.tracker, 'driver')
@mock.patch.object(self.tracker,
'_update_available_resource')
@mock.patch.object(self.tracker, '_verify_resources')
@mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
resources = {'there is someone in my head': 'but it\'s not me'}
mock_driver.get_available_resource.return_value = resources
self.tracker.update_available_resource(self.context)
mock_uar.assert_called_once_with(self.context, resources)
_test()
class StatsDictTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a dictionary.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS)
def _get_stats(self):
return jsonutils.loads(self.tracker.compute_node['stats'])
def test_virt_stats(self):
# start with virt driver stats
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
# adding an instance should keep virt driver stats
self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
expected_stats = {}
expected_stats.update(FAKE_VIRT_STATS)
expected_stats.update(self.tracker.stats)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
class StatsJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a json string.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON)
def _get_stats(self):
return jsonutils.loads(self.tracker.compute_node['stats'])
def test_virt_stats(self):
# start with virt driver stats
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
# adding an instance should keep virt driver stats
# and add rt stats
self._fake_instance(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
expected_stats = {}
expected_stats.update(FAKE_VIRT_STATS)
expected_stats.update(self.tracker.stats)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
class StatsInvalidJsonTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats='this is not json')
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for string that does not parse as json
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
class StatsInvalidTypeTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats=10)
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for incorrect stats value type
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
|
|
import threading
import time
from sqlalchemy import pool, select, event
import sqlalchemy as tsa
from sqlalchemy import testing
from sqlalchemy.testing.util import gc_collect, lazy_gc
from sqlalchemy.testing import eq_, assert_raises, is_not_, is_
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing import fixtures
import random
from sqlalchemy.testing.mock import Mock, call, patch, ANY
import weakref
import collections
join_timeout = 10
def MockDBAPI():
def cursor():
return Mock()
def connect(*arg, **kw):
return Mock(cursor=Mock(side_effect=cursor))
def shutdown(value):
if value:
db.connect = Mock(side_effect=Exception("connect failed"))
else:
db.connect = Mock(side_effect=connect)
db.is_shutdown = value
db = Mock(
connect=Mock(side_effect=connect),
shutdown=shutdown,
is_shutdown=False)
return db
class PoolTestBase(fixtures.TestBase):
def setup(self):
pool.clear_managers()
self._teardown_conns = []
def teardown(self):
for ref in self._teardown_conns:
conn = ref()
if conn:
conn.close()
@classmethod
def teardown_class(cls):
pool.clear_managers()
def _with_teardown(self, connection):
self._teardown_conns.append(weakref.ref(connection))
return connection
def _queuepool_fixture(self, **kw):
dbapi, pool = self._queuepool_dbapi_fixture(**kw)
return pool
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
return dbapi, pool.QueuePool(creator=lambda: dbapi.connect('foo.db'),
**kw)
class PoolTest(PoolTestBase):
def test_manager(self):
manager = pool.manage(MockDBAPI(), use_threadlocal=True)
c1 = manager.connect('foo.db')
c2 = manager.connect('foo.db')
c3 = manager.connect('bar.db')
c4 = manager.connect("foo.db", bar="bat")
c5 = manager.connect("foo.db", bar="hoho")
c6 = manager.connect("foo.db", bar="bat")
assert c1.cursor() is not None
assert c1 is c2
assert c1 is not c3
assert c4 is c6
assert c4 is not c5
def test_manager_with_key(self):
dbapi = MockDBAPI()
manager = pool.manage(dbapi, use_threadlocal=True)
c1 = manager.connect('foo.db', sa_pool_key="a")
c2 = manager.connect('foo.db', sa_pool_key="b")
c3 = manager.connect('bar.db', sa_pool_key="a")
assert c1.cursor() is not None
assert c1 is not c2
assert c1 is c3
eq_(dbapi.connect.mock_calls,
[
call("foo.db"),
call("foo.db"),
]
)
def test_bad_args(self):
manager = pool.manage(MockDBAPI())
manager.connect(None)
def test_non_thread_local_manager(self):
manager = pool.manage(MockDBAPI(), use_threadlocal=False)
connection = manager.connect('foo.db')
connection2 = manager.connect('foo.db')
self.assert_(connection.cursor() is not None)
self.assert_(connection is not connection2)
@testing.fails_on('+pyodbc',
"pyodbc cursor doesn't implement tuple __eq__")
@testing.fails_on("+pg8000", "returns [1], not (1,)")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select([1], bind=testing.db)))
expected = [(1, )]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (pool.SingletonThreadPool, pool.StaticPool,
pool.QueuePool, pool.NullPool, pool.AssertionPool):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.connect.side_effect = Exception("error!")
p.dispose()
p.recreate()
def test_threadlocal_del(self):
self._do_testthreadlocal(useclose=False)
def test_threadlocal_close(self):
self._do_testthreadlocal(useclose=True)
def _do_testthreadlocal(self, useclose=False):
dbapi = MockDBAPI()
for p in pool.QueuePool(creator=dbapi.connect,
pool_size=3, max_overflow=-1,
use_threadlocal=True), \
pool.SingletonThreadPool(creator=dbapi.connect,
use_threadlocal=True):
c1 = p.connect()
c2 = p.connect()
self.assert_(c1 is c2)
c3 = p.unique_connection()
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
c2 = p.connect()
self.assert_(c1 is c2)
self.assert_(c3 is not c1)
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
if useclose:
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
c3.close()
c2.close()
self.assert_(c1.connection is not None)
c1.close()
c1 = c2 = c3 = None
# extra tests with QueuePool to ensure connections get
# __del__()ed when dereferenced
if isinstance(p, pool.QueuePool):
lazy_gc()
self.assert_(p.checkedout() == 0)
c1 = p.connect()
c2 = p.connect()
if useclose:
c2.close()
c1.close()
else:
c2 = None
c1 = None
lazy_gc()
self.assert_(p.checkedout() == 0)
def test_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info['foo'] = 'bar'
c.close()
del c
c = p.connect()
self.assert_('foo' in c.info)
c.invalidate()
c = p.connect()
self.assert_('foo' not in c.info)
c.info['foo2'] = 'bar2'
c.detach()
self.assert_('foo2' in c.info)
c2 = p.connect()
is_not_(c.connection, c2.connection)
assert not c2.info
assert 'foo2' in c.info
class PoolDialectTest(PoolTestBase):
def _dialect(self):
canary = []
class PoolDialect(object):
def do_rollback(self, dbapi_connection):
canary.append('R')
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
canary.append('C')
dbapi_connection.commit()
def do_close(self, dbapi_connection):
canary.append('CL')
dbapi_connection.close()
return PoolDialect(), canary
def _do_test(self, pool_cls, assertion):
mock_dbapi = MockDBAPI()
dialect, canary = self._dialect()
p = pool_cls(creator=mock_dbapi.connect)
p._dialect = dialect
conn = p.connect()
conn.close()
p.dispose()
p.recreate()
conn = p.connect()
conn.close()
eq_(canary, assertion)
def test_queue_pool(self):
self._do_test(pool.QueuePool, ['R', 'CL', 'R'])
def test_assertion_pool(self):
self._do_test(pool.AssertionPool, ['R', 'CL', 'R'])
def test_singleton_pool(self):
self._do_test(pool.SingletonThreadPool, ['R', 'CL', 'R'])
def test_null_pool(self):
self._do_test(pool.NullPool, ['R', 'CL', 'R', 'CL'])
def test_static_pool(self):
self._do_test(pool.StaticPool, ['R', 'R'])
class PoolEventsTest(PoolTestBase):
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def first_connect(*arg, **kw):
canary.append('first_connect')
event.listen(p, 'first_connect', first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def connect(*arg, **kw):
canary.append('connect')
event.listen(p, 'connect', connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkout(*arg, **kw):
canary.append('checkout')
event.listen(p, 'checkout', checkout)
return p, canary
def _checkin_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkin(*arg, **kw):
canary.append('checkin')
event.listen(p, 'checkin', checkin)
return p, canary
def _reset_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def reset(*arg, **kw):
canary.append('reset')
event.listen(p, 'reset', reset)
return p, canary
def _invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, 'invalidate', canary)
return p, canary
def _soft_invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, 'soft_invalidate', canary)
return p, canary
def _close_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, 'close', canary)
return p, canary
def _detach_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, 'detach', canary)
return p, canary
def _close_detached_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, 'close_detached', canary)
return p, canary
def test_close(self):
p, canary = self._close_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.close()
eq_(canary.mock_calls, [])
p.dispose()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach(self):
p, canary = self._detach_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.detach()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach_close(self):
p, canary = self._close_detached_event_fixture()
c1 = p.connect()
connection = c1.connection
c1.detach()
c1.close()
eq_(canary.mock_calls, [call(connection)])
def test_first_connect_event(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
eq_(canary, ['first_connect'])
def test_first_connect_event_fires_once(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['first_connect'])
def test_first_connect_on_previously_recreated(self):
p, canary = self._first_connect_event_fixture()
p2 = p.recreate()
c1 = p.connect()
c2 = p2.connect()
eq_(canary, ['first_connect', 'first_connect'])
def test_first_connect_on_subsequently_recreated(self):
p, canary = self._first_connect_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['first_connect', 'first_connect'])
def test_connect_event(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
eq_(canary, ['connect'])
def test_connect_event_fires_subsequent(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['connect', 'connect'])
def test_connect_on_previously_recreated(self):
p, canary = self._connect_event_fixture()
p2 = p.recreate()
c1 = p.connect()
c2 = p2.connect()
eq_(canary, ['connect', 'connect'])
def test_connect_on_subsequently_recreated(self):
p, canary = self._connect_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['connect', 'connect'])
def test_checkout_event(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
eq_(canary, ['checkout'])
def test_checkout_event_fires_subsequent(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
c2 = p.connect()
eq_(canary, ['checkout', 'checkout'])
def test_checkout_event_on_subsequently_recreated(self):
p, canary = self._checkout_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, ['checkout', 'checkout'])
def test_checkin_event(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['checkin'])
def test_reset_event(self):
p, canary = self._reset_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['reset'])
def test_soft_invalidate_event_no_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate(soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_soft_invalidate_event_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc, soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_invalidate_event_no_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate()
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_invalidate_event_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_checkin_event_gc(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
del c1
lazy_gc()
eq_(canary, ['checkin'])
def test_checkin_event_on_subsequently_recreated(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, [])
c1.close()
eq_(canary, ['checkin'])
c2.close()
eq_(canary, ['checkin', 'checkin'])
def test_listen_targets_scope(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
def listen_four(*args):
canary.append("listen_four")
engine = testing_engine(testing.db.url)
event.listen(pool.Pool, 'connect', listen_one)
event.listen(engine.pool, 'connect', listen_two)
event.listen(engine, 'connect', listen_three)
event.listen(engine.__class__, 'connect', listen_four)
engine.execute(select([1])).close()
eq_(
canary,
["listen_one", "listen_four", "listen_two", "listen_three"]
)
def test_listen_targets_per_subclass(self):
"""test that listen() called on a subclass remains specific to
that subclass."""
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, 'connect', listen_one)
event.listen(pool.QueuePool, 'connect', listen_two)
event.listen(pool.SingletonThreadPool, 'connect', listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def teardown(self):
# TODO: need to get remove() functionality
# going
pool.Pool.dispatch._clear()
class PoolFirstConnectSyncTest(PoolTestBase):
# test [ticket:2964]
@testing.requires.timing_intensive
def test_sync(self):
pool = self._queuepool_fixture(pool_size=3, max_overflow=0)
evt = Mock()
@event.listens_for(pool, 'first_connect')
def slow_first_connect(dbapi_con, rec):
time.sleep(1)
evt.first_connect()
@event.listens_for(pool, 'connect')
def on_connect(dbapi_con, rec):
evt.connect()
def checkout():
for j in range(2):
c1 = pool.connect()
time.sleep(.02)
c1.close()
time.sleep(.02)
threads = []
for i in range(5):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
eq_(evt.mock_calls,
[
call.first_connect(),
call.connect(),
call.connect(),
call.connect()]
)
class DeprecatedPoolListenerTest(PoolTestBase):
@testing.requires.predictable_gc
@testing.uses_deprecated(r".*Use event.listen")
def test_listeners(self):
class InstrumentingListener(object):
def __init__(self):
if hasattr(self, 'connect'):
self.connect = self.inst_connect
if hasattr(self, 'first_connect'):
self.first_connect = self.inst_first_connect
if hasattr(self, 'checkout'):
self.checkout = self.inst_checkout
if hasattr(self, 'checkin'):
self.checkin = self.inst_checkin
self.clear()
def clear(self):
self.connected = []
self.first_connected = []
self.checked_out = []
self.checked_in = []
def assert_total(innerself, conn, fconn, cout, cin):
eq_(len(innerself.connected), conn)
eq_(len(innerself.first_connected), fconn)
eq_(len(innerself.checked_out), cout)
eq_(len(innerself.checked_in), cin)
def assert_in(innerself, item, in_conn, in_fconn,
in_cout, in_cin):
self.assert_((item in innerself.connected) == in_conn)
self.assert_((item in innerself.first_connected) == in_fconn)
self.assert_((item in innerself.checked_out) == in_cout)
self.assert_((item in innerself.checked_in) == in_cin)
def inst_connect(self, con, record):
print("connect(%s, %s)" % (con, record))
assert con is not None
assert record is not None
self.connected.append(con)
def inst_first_connect(self, con, record):
print("first_connect(%s, %s)" % (con, record))
assert con is not None
assert record is not None
self.first_connected.append(con)
def inst_checkout(self, con, record, proxy):
print("checkout(%s, %s, %s)" % (con, record, proxy))
assert con is not None
assert record is not None
assert proxy is not None
self.checked_out.append(con)
def inst_checkin(self, con, record):
print("checkin(%s, %s)" % (con, record))
# con can be None if invalidated
assert record is not None
self.checked_in.append(con)
class ListenAll(tsa.interfaces.PoolListener, InstrumentingListener):
pass
class ListenConnect(InstrumentingListener):
def connect(self, con, record):
pass
class ListenFirstConnect(InstrumentingListener):
def first_connect(self, con, record):
pass
class ListenCheckOut(InstrumentingListener):
def checkout(self, con, record, proxy, num):
pass
class ListenCheckIn(InstrumentingListener):
def checkin(self, con, record):
pass
def assert_listeners(p, total, conn, fconn, cout, cin):
for instance in (p, p.recreate()):
self.assert_(len(instance.dispatch.connect) == conn)
self.assert_(len(instance.dispatch.first_connect) == fconn)
self.assert_(len(instance.dispatch.checkout) == cout)
self.assert_(len(instance.dispatch.checkin) == cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0, 0)
p.add_listener(ListenAll())
assert_listeners(p, 1, 1, 1, 1, 1)
p.add_listener(ListenConnect())
assert_listeners(p, 2, 2, 1, 1, 1)
p.add_listener(ListenFirstConnect())
assert_listeners(p, 3, 2, 2, 1, 1)
p.add_listener(ListenCheckOut())
assert_listeners(p, 4, 2, 2, 2, 1)
p.add_listener(ListenCheckIn())
assert_listeners(p, 5, 2, 2, 2, 2)
del p
snoop = ListenAll()
p = self._queuepool_fixture(listeners=[snoop])
assert_listeners(p, 1, 1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
cc = c.connection
snoop.assert_in(cc, True, True, True, False)
c.close()
snoop.assert_in(cc, True, True, True, True)
del c, cc
snoop.clear()
# this one depends on immediate gc
c = p.connect()
cc = c.connection
snoop.assert_in(cc, False, False, True, False)
snoop.assert_total(0, 0, 1, 0)
del c, cc
lazy_gc()
snoop.assert_total(0, 0, 1, 1)
p.dispose()
snoop.clear()
c = p.connect()
c.close()
c = p.connect()
snoop.assert_total(1, 0, 2, 1)
c.close()
snoop.assert_total(1, 0, 2, 2)
# invalidation
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.invalidate()
snoop.assert_total(1, 0, 1, 1)
c.close()
snoop.assert_total(1, 0, 1, 1)
del c
lazy_gc()
snoop.assert_total(1, 0, 1, 1)
c = p.connect()
snoop.assert_total(2, 0, 2, 1)
c.close()
del c
lazy_gc()
snoop.assert_total(2, 0, 2, 2)
# detached
p.dispose()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 0, 1, 0)
c.detach()
snoop.assert_total(1, 0, 1, 0)
c.close()
del c
snoop.assert_total(1, 0, 1, 0)
c = p.connect()
snoop.assert_total(2, 0, 2, 0)
c.close()
del c
snoop.assert_total(2, 0, 2, 1)
# recreated
p = p.recreate()
snoop.clear()
c = p.connect()
snoop.assert_total(1, 1, 1, 0)
c.close()
snoop.assert_total(1, 1, 1, 1)
c = p.connect()
snoop.assert_total(1, 1, 2, 1)
c.close()
snoop.assert_total(1, 1, 2, 2)
@testing.uses_deprecated(r".*Use event.listen")
def test_listeners_callables(self):
def connect(dbapi_con, con_record):
counts[0] += 1
def checkout(dbapi_con, con_record, con_proxy):
counts[1] += 1
def checkin(dbapi_con, con_record):
counts[2] += 1
i_all = dict(connect=connect, checkout=checkout, checkin=checkin)
i_connect = dict(connect=connect)
i_checkout = dict(checkout=checkout)
i_checkin = dict(checkin=checkin)
for cls in (pool.QueuePool, pool.StaticPool):
counts = [0, 0, 0]
def assert_listeners(p, total, conn, cout, cin):
for instance in (p, p.recreate()):
eq_(len(instance.dispatch.connect), conn)
eq_(len(instance.dispatch.checkout), cout)
eq_(len(instance.dispatch.checkin), cin)
p = self._queuepool_fixture()
assert_listeners(p, 0, 0, 0, 0)
p.add_listener(i_all)
assert_listeners(p, 1, 1, 1, 1)
p.add_listener(i_connect)
assert_listeners(p, 2, 1, 1, 1)
p.add_listener(i_checkout)
assert_listeners(p, 3, 1, 1, 1)
p.add_listener(i_checkin)
assert_listeners(p, 4, 1, 1, 1)
del p
p = self._queuepool_fixture(listeners=[i_all])
assert_listeners(p, 1, 1, 1, 1)
c = p.connect()
assert counts == [1, 1, 0]
c.close()
assert counts == [1, 1, 1]
c = p.connect()
assert counts == [1, 2, 1]
p.add_listener(i_checkin)
c.close()
assert counts == [1, 2, 2]
class QueuePoolTest(PoolTestBase):
def test_queuepool_del(self):
self._do_testqueuepool(useclose=False)
def test_queuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1)
def status(pool):
return pool.size(), pool.checkedin(), pool.overflow(), \
pool.checkedout()
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
lazy_gc()
assert not pool._refs
@testing.requires.timing_intensive
def test_timeout(self):
p = self._queuepool_fixture(pool_size=3,
max_overflow=0,
timeout=2)
c1 = p.connect()
c2 = p.connect()
c3 = p.connect()
now = time.time()
try:
c4 = p.connect()
assert False
except tsa.exc.TimeoutError:
assert int(time.time() - now) == 2
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=lambda: dbapi.connect(delay=.05),
pool_size=2,
max_overflow=1, use_threadlocal=False, timeout=3)
timeouts = []
def checkout():
for x in range(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
gc_collect()
dbapi = MockDBAPI()
mutex = threading.Lock()
def creator():
time.sleep(.05)
with mutex:
return dbapi.connect()
p = pool.QueuePool(creator=creator,
pool_size=3, timeout=2,
max_overflow=max_overflow)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in range(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
self.assert_(max(peaks) <= max_overflow)
lazy_gc()
assert not pool._refs
def test_overflow_reset_on_failed_connect(self):
dbapi = Mock()
def failing_dbapi():
time.sleep(2)
raise Exception("connection failed")
creator = dbapi.connect
def create():
return creator()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
c1 = self._with_teardown(p.connect())
c2 = self._with_teardown(p.connect())
c3 = self._with_teardown(p.connect())
eq_(p._overflow, 1)
creator = failing_dbapi
assert_raises(Exception, p.connect)
eq_(p._overflow, 1)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_hanging_connect_within_overflow(self):
"""test that a single connect() call which is hanging
does not block other connections from proceeding."""
dbapi = Mock()
mutex = threading.Lock()
def hanging_dbapi():
time.sleep(2)
with mutex:
return dbapi.connect()
def fast_dbapi():
with mutex:
return dbapi.connect()
creator = threading.local()
def create():
return creator.mock_connector()
def run_test(name, pool, should_hang):
if should_hang:
creator.mock_connector = hanging_dbapi
else:
creator.mock_connector = fast_dbapi
conn = pool.connect()
conn.operation(name)
time.sleep(1)
conn.close()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
threads = [
threading.Thread(
target=run_test, args=("success_one", p, False)),
threading.Thread(
target=run_test, args=("success_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_one", p, True)),
threading.Thread(
target=run_test, args=("overflow_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_three", p, False))
]
for t in threads:
t.start()
time.sleep(.2)
for t in threads:
t.join(timeout=join_timeout)
eq_(
dbapi.connect().operation.mock_calls,
[call("success_one"), call("success_two"),
call("overflow_two"), call("overflow_three"),
call("overflow_one")]
)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_waiters_handled(self):
"""test that threads waiting for connections are
handled when the pool is replaced.
"""
mutex = threading.Lock()
dbapi = MockDBAPI()
def creator():
mutex.acquire()
try:
return dbapi.connect()
finally:
mutex.release()
success = []
for timeout in (None, 30):
for max_overflow in (0, -1, 3):
p = pool.QueuePool(creator=creator,
pool_size=2, timeout=timeout,
max_overflow=max_overflow)
def waiter(p, timeout, max_overflow):
success_key = (timeout, max_overflow)
conn = p.connect()
success.append(success_key)
time.sleep(.1)
conn.close()
c1 = p.connect()
c2 = p.connect()
threads = []
for i in range(2):
t = threading.Thread(target=waiter,
args=(p, timeout, max_overflow))
t.daemon = True
t.start()
threads.append(t)
# this sleep makes sure that the
# two waiter threads hit upon wait()
# inside the queue, before we invalidate the other
# two conns
time.sleep(.2)
p._invalidate(c2)
for t in threads:
t.join(join_timeout)
eq_(len(success), 12, "successes: %s" % success)
def test_connrec_invalidated_within_checkout_no_race(self):
"""Test that a concurrent ConnectionRecord.invalidate() which
occurs after the ConnectionFairy has called _ConnectionRecord.checkout()
but before the ConnectionFairy tests "fairy.connection is None"
will not result in an InvalidRequestError.
This use case assumes that a listener on the checkout() event
will be raising DisconnectionError so that a reconnect attempt
may occur.
"""
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
p = pool.QueuePool(creator=creator, pool_size=1, max_overflow=0)
conn = p.connect()
conn.close()
_existing_checkout = pool._ConnectionRecord.checkout
@classmethod
def _decorate_existing_checkout(cls, *arg, **kw):
fairy = _existing_checkout(*arg, **kw)
connrec = fairy._connection_record
connrec.invalidate()
return fairy
with patch(
"sqlalchemy.pool._ConnectionRecord.checkout",
_decorate_existing_checkout):
conn = p.connect()
is_(conn._connection_record.connection, None)
conn.close()
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator():
canary.append(1)
return dbapi.connect()
p1 = pool.QueuePool(creator=creator,
pool_size=1, timeout=None,
max_overflow=0)
def waiter(p):
conn = p.connect()
canary.append(2)
time.sleep(.5)
conn.close()
c1 = p1.connect()
threads = []
for i in range(5):
t = threading.Thread(target=waiter, args=(p1, ))
t.start()
threads.append(t)
time.sleep(.5)
eq_(canary, [1])
# this also calls invalidate()
# on c1
p1._invalidate(c1)
for t in threads:
t.join(join_timeout)
eq_(canary, [1, 1, 2, 2, 2, 2, 2])
def test_dispose_closes_pooled(self):
dbapi = MockDBAPI()
p = pool.QueuePool(creator=dbapi.connect,
pool_size=2, timeout=None,
max_overflow=0)
c1 = p.connect()
c2 = p.connect()
c1_con = c1.connection
c2_con = c2.connection
c1.close()
eq_(c1_con.close.call_count, 0)
eq_(c2_con.close.call_count, 0)
p.dispose()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# currently, if a ConnectionFairy is closed
# after the pool has been disposed, there's no
# flag that states it should be invalidated
# immediately - it just gets returned to the
# pool normally...
c2.close()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# ...and that's the one we'll get back next.
c3 = p.connect()
assert c3.connection is c2_con
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_no_overflow(self):
self._test_overflow(40, 0)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_mixed_close(self):
pool._refs.clear()
p = self._queuepool_fixture(pool_size=3, max_overflow=-1,
use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = None
assert p.checkedout() == 1
c1 = None
lazy_gc()
assert p.checkedout() == 0
lazy_gc()
assert not pool._refs
def test_overflow_no_gc_tlocal(self):
self._test_overflow_no_gc(True)
def test_overflow_no_gc(self):
self._test_overflow_no_gc(False)
def _test_overflow_no_gc(self, threadlocal):
p = self._queuepool_fixture(pool_size=2,
max_overflow=2)
# disable weakref collection of the
# underlying connections
strong_refs = set()
def _conn():
c = p.connect()
strong_refs.add(c.connection)
return c
for j in range(5):
# open 4 conns at a time. each time this
# will yield two pooled connections + two
# overflow connections.
conns = [_conn() for i in range(4)]
for c in conns:
c.close()
# doing that for a total of 5 times yields
# ten overflow connections closed plus the
# two pooled connections unclosed.
eq_(
set([c.close.call_count for c in strong_refs]),
set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0])
)
@testing.requires.predictable_gc
def test_weakref_kaboom(self):
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2 = None
del c1
del c2
gc_collect()
assert p.checkedout() == 0
c3 = p.connect()
assert c3 is not None
def test_trick_the_counter(self):
"""this is a "flaw" in the connection pool; since threadlocal
uses a single ConnectionFairy per thread with an open/close
counter, you can fool the counter into giving you a
ConnectionFairy with an ambiguous counter. i.e. its not true
reference counting."""
p = self._queuepool_fixture(pool_size=3,
max_overflow=-1, use_threadlocal=True)
c1 = p.connect()
c2 = p.connect()
assert c1 is c2
c1.close()
c2 = p.connect()
c2.close()
self.assert_(p.checkedout() != 0)
c2.close()
self.assert_(p.checkedout() == 0)
def test_recycle(self):
p = self._queuepool_fixture(pool_size=1,
max_overflow=0,
recycle=3)
c1 = p.connect()
c_id = id(c1.connection)
c1.close()
c2 = p.connect()
assert id(c2.connection) == c_id
c2.close()
time.sleep(4)
c3 = p.connect()
assert id(c3.connection) != c_id
@testing.requires.timing_intensive
def test_recycle_on_invalidate(self):
p = self._queuepool_fixture(pool_size=1,
max_overflow=0)
c1 = p.connect()
c_id = id(c1.connection)
c1.close()
c2 = p.connect()
assert id(c2.connection) == c_id
c2_rec = c2._connection_record
p._invalidate(c2)
assert c2_rec.connection is None
c2.close()
time.sleep(.5)
c3 = p.connect()
assert id(c3.connection) != c_id
@testing.requires.timing_intensive
def test_recycle_on_soft_invalidate(self):
p = self._queuepool_fixture(pool_size=1,
max_overflow=0)
c1 = p.connect()
c_id = id(c1.connection)
c1.close()
c2 = p.connect()
assert id(c2.connection) == c_id
c2_rec = c2._connection_record
c2.invalidate(soft=True)
assert c2_rec.connection is c2.connection
c2.close()
time.sleep(.5)
c3 = p.connect()
assert id(c3.connection) != c_id
assert c3._connection_record is c2_rec
assert c2_rec.connection is c3.connection
def _no_wr_finalize(self):
finalize_fairy = pool._finalize_fairy
def assert_no_wr_callback(
connection, connection_record,
pool, ref, echo, fairy=None):
if fairy is None:
raise AssertionError(
"finalize fairy was called as a weakref callback")
return finalize_fairy(
connection, connection_record, pool, ref, echo, fairy)
return patch.object(
pool, '_finalize_fairy', assert_no_wr_callback)
def _assert_cleanup_on_pooled_reconnect(self, dbapi, p):
# p is QueuePool with size=1, max_overflow=2,
# and one connection in the pool that will need to
# reconnect when next used (either due to recycle or invalidate)
with self._no_wr_finalize():
eq_(p.checkedout(), 0)
eq_(p._overflow, 0)
dbapi.shutdown(True)
assert_raises(
Exception,
p.connect
)
eq_(p._overflow, 0)
eq_(p.checkedout(), 0) # and not 1
dbapi.shutdown(False)
c1 = self._with_teardown(p.connect())
assert p._pool.empty() # poolsize is one, so we're empty OK
c2 = self._with_teardown(p.connect())
eq_(p._overflow, 1) # and not 2
# this hangs if p._overflow is 2
c3 = self._with_teardown(p.connect())
def test_error_on_pooled_reconnect_cleanup_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.invalidate()
c1.close()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_error_on_pooled_reconnect_cleanup_recycle(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1,
max_overflow=2, recycle=1)
c1 = p.connect()
c1.close()
time.sleep(1.5)
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
def test_connect_handler_not_called_for_recycled(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(
pool_size=2, max_overflow=2)
canary = Mock()
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
event.listen(p, "connect", canary.connect)
event.listen(p, "checkout", canary.checkout)
assert_raises(
Exception,
p.connect
)
p._pool.queue = collections.deque(
[
c for c in p._pool.queue
if c.connection is not None
]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
eq_(
canary.mock_calls,
[
call.connect(ANY, ANY),
call.checkout(ANY, ANY, ANY)
]
)
def test_connect_checkout_handler_always_gets_info(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(
pool_size=2, max_overflow=2)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
@event.listens_for(p, "connect")
def connect(conn, conn_rec):
conn_rec.info['x'] = True
@event.listens_for(p, "checkout")
def checkout(conn, conn_rec, conn_f):
assert 'x' in conn_rec.info
assert_raises(
Exception,
p.connect
)
p._pool.queue = collections.deque(
[
c for c in p._pool.queue
if c.connection is not None
]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1,
max_overflow=2)
c1 = p.connect()
c1.close()
@event.listens_for(p, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if dbapi.is_shutdown:
raise tsa.exc.DisconnectionError()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_recycle_pool_no_race(self):
def slow_close():
slow_closing_connection._slow_close()
time.sleep(.5)
slow_closing_connection = Mock()
slow_closing_connection.connect.return_value.close = slow_close
class Error(Exception):
pass
dialect = Mock()
dialect.is_disconnect = lambda *arg, **kw: True
dialect.dbapi.Error = Error
pools = []
class TrackQueuePool(pool.QueuePool):
def __init__(self, *arg, **kw):
pools.append(self)
super(TrackQueuePool, self).__init__(*arg, **kw)
def creator():
return slow_closing_connection.connect()
p1 = TrackQueuePool(creator=creator, pool_size=20)
from sqlalchemy import create_engine
eng = create_engine(testing.db.url, pool=p1, _initialize=False)
eng.dialect = dialect
# 15 total connections
conns = [eng.connect() for i in range(15)]
# return 8 back to the pool
for conn in conns[3:10]:
conn.close()
def attempt(conn):
time.sleep(random.random())
try:
conn._handle_dbapi_exception(Error(), "statement", {},
Mock(), Mock())
except tsa.exc.DBAPIError:
pass
# run an error + invalidate operation on the remaining 7 open
#connections
threads = []
for conn in conns:
t = threading.Thread(target=attempt, args=(conn, ))
t.start()
threads.append(t)
for t in threads:
t.join()
# return all 15 connections to the pool
for conn in conns:
conn.close()
# re-open 15 total connections
conns = [eng.connect() for i in range(15)]
# 15 connections have been fully closed due to invalidate
assert slow_closing_connection._slow_close.call_count == 15
# 15 initial connections + 15 reconnections
assert slow_closing_connection.connect.call_count == 30
assert len(pools) <= 2, len(pools)
def test_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_recreate(self):
p = self._queuepool_fixture(reset_on_return=None, pool_size=1,
max_overflow=0)
p2 = p.recreate()
assert p2.size() == 1
assert p2._reset_on_return is pool.reset_none
assert p2._use_threadlocal is False
assert p2._max_overflow == 0
def test_reconnect(self):
"""tests reconnect operations at the pool level. SA's
engine/dialect includes another layer of reconnect support for
'database was lost' errors."""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_detach(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1.detach()
c2 = p.connect()
eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")])
c1_con = c1.connection
assert c1_con is not None
eq_(c1_con.close.call_count, 0)
c1.close()
eq_(c1_con.close.call_count, 1)
def test_detach_via_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1_con = c1.connection
c1.invalidate()
assert c1.connection is None
eq_(c1_con.close.call_count, 1)
c2 = p.connect()
assert c2.connection is not c1_con
c2_con = c2.connection
c2.close()
eq_(c2_con.close.call_count, 0)
def test_threadfairy(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1,
use_threadlocal=True)
c1 = p.connect()
c1.close()
c2 = p.connect()
assert c2.connection is not None
class ResetOnReturnTest(PoolTestBase):
def _fixture(self, **kw):
dbapi = Mock()
return dbapi, pool.QueuePool(creator=lambda: dbapi.connect('foo.db'),
**kw)
def test_plain_rollback(self):
dbapi, p = self._fixture(reset_on_return='rollback')
c1 = p.connect()
c1.close()
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_plain_commit(self):
dbapi, p = self._fixture(reset_on_return='commit')
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_plain_none(self):
dbapi, p = self._fixture(reset_on_return=None)
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_agent_rollback(self):
dbapi, p = self._fixture(reset_on_return='rollback')
class Agent(object):
def __init__(self, conn):
self.conn = conn
def rollback(self):
self.conn.special_rollback()
def commit(self):
self.conn.special_commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
assert dbapi.connect().special_rollback.called
assert not dbapi.connect().special_commit.called
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
c1 = p.connect()
c1.close()
eq_(dbapi.connect().special_rollback.call_count, 1)
eq_(dbapi.connect().special_commit.call_count, 0)
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_agent_commit(self):
dbapi, p = self._fixture(reset_on_return='commit')
class Agent(object):
def __init__(self, conn):
self.conn = conn
def rollback(self):
self.conn.special_rollback()
def commit(self):
self.conn.special_commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
assert not dbapi.connect().special_rollback.called
assert dbapi.connect().special_commit.called
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
c1 = p.connect()
c1.close()
eq_(dbapi.connect().special_rollback.call_count, 0)
eq_(dbapi.connect().special_commit.call_count, 1)
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
class SingletonThreadPoolTest(PoolTestBase):
@testing.requires.threading_with_mock
def test_cleanup(self):
self._test_cleanup(False)
@testing.requires.threading_with_mock
def test_cleanup_no_gc(self):
self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
"""test that the pool's connections are OK after cleanup() has
been called."""
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in range(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(.1)
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(p._all_conns) == 3
if strong_refs:
still_opened = len([c for c in sr if not c.close.call_count])
eq_(still_opened, 3)
class AssertionPoolTest(PoolTestBase):
def test_connect_error(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect()
assert_raises(AssertionError, p.connect)
def test_connect_multiple(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect()
c1.close()
c2 = p.connect()
c2.close()
c3 = p.connect()
assert_raises(AssertionError, p.connect)
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator=lambda: dbapi.connect('foo.db'))
c1 = p.connect()
c1.close()
c1 = None
c1 = p.connect()
c1.invalidate()
c1 = None
c1 = p.connect()
dbapi.connect.assert_has_calls([
call('foo.db'),
call('foo.db')],
any_order=True)
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
creator = lambda: dbapi.connect('foo.db')
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
class CreatorCompatibilityTest(PoolTestBase):
def test_creator_callable_outside_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator()
finally:
conn.close()
def test_creator_callable_outside_witharg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator(Mock())
finally:
conn.close()
def test_creator_patching_arg_to_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
# the creator is the two-arg form
conn = creator(Mock())
finally:
conn.close()
def mock_create():
return creator()
conn = e.connect()
conn.invalidate()
conn.close()
# test that the 'should_wrap_creator' status
# will dynamically switch if the _creator is monkeypatched.
# patch it with a zero-arg form
with patch.object(e.pool, "_creator", mock_create):
conn = e.connect()
conn.invalidate()
conn.close()
conn = e.connect()
conn.close()
|
|
import numpy as np
import pytest
import pandas as pd
from pandas import (
Series,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import PeriodArray
class TestSeriesIsIn:
def test_isin(self):
s = Series(["A", "B", "C", "a", "B", "B", "A", "C"])
result = s.isin(["A", "C"])
expected = Series([True, False, True, False, False, False, True, True])
tm.assert_series_equal(result, expected)
# GH#16012
# This specific issue has to have a series over 1e6 in len, but the
# comparison array (in_list) must be large enough so that numpy doesn't
# do a manual masking trick that will avoid this issue altogether
s = Series(list("abcdefghijk" * 10 ** 5))
# If numpy doesn't do the manual comparison/mask, these
# unorderable mixed types are what cause the exception in numpy
in_list = [-1, "a", "b", "G", "Y", "Z", "E", "K", "E", "S", "I", "R", "R"] * 6
assert s.isin(in_list).sum() == 200000
def test_isin_with_string_scalar(self):
# GH#4763
s = Series(["A", "B", "C", "a", "B", "B", "A", "C"])
msg = (
r"only list-like objects are allowed to be passed to isin\(\), "
r"you passed a \[str\]"
)
with pytest.raises(TypeError, match=msg):
s.isin("a")
s = Series(["aaa", "b", "c"])
with pytest.raises(TypeError, match=msg):
s.isin("aaa")
def test_isin_with_i8(self):
# GH#5021
expected = Series([True, True, False, False, False])
expected2 = Series([False, True, False, False, False])
# datetime64[ns]
s = Series(date_range("jan-01-2013", "jan-05-2013"))
result = s.isin(s[0:2])
tm.assert_series_equal(result, expected)
result = s.isin(s[0:2].values)
tm.assert_series_equal(result, expected)
# fails on dtype conversion in the first place
result = s.isin(np.asarray(s[0:2].values).astype("datetime64[D]"))
tm.assert_series_equal(result, expected)
result = s.isin([s[1]])
tm.assert_series_equal(result, expected2)
result = s.isin([np.datetime64(s[1])])
tm.assert_series_equal(result, expected2)
result = s.isin(set(s[0:2]))
tm.assert_series_equal(result, expected)
# timedelta64[ns]
s = Series(pd.to_timedelta(range(5), unit="d"))
result = s.isin(s[0:2])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])
def test_isin_empty(self, empty):
# see GH#16991
s = Series(["a", "b"])
expected = Series([False, False])
result = s.isin(empty)
tm.assert_series_equal(expected, result)
def test_isin_read_only(self):
# https://github.com/pandas-dev/pandas/issues/37174
arr = np.array([1, 2, 3])
arr.setflags(write=False)
s = Series([1, 2, 3])
result = s.isin(arr)
expected = Series([True, True, True])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [object, None])
def test_isin_dt64_values_vs_ints(self, dtype):
# GH#36621 dont cast integers to datetimes for isin
dti = date_range("2013-01-01", "2013-01-05")
ser = Series(dti)
comps = np.asarray([1356998400000000000], dtype=dtype)
res = dti.isin(comps)
expected = np.array([False] * len(dti), dtype=bool)
tm.assert_numpy_array_equal(res, expected)
res = ser.isin(comps)
tm.assert_series_equal(res, Series(expected))
res = pd.core.algorithms.isin(ser, comps)
tm.assert_numpy_array_equal(res, expected)
def test_isin_tzawareness_mismatch(self):
dti = date_range("2013-01-01", "2013-01-05")
ser = Series(dti)
other = dti.tz_localize("UTC")
res = dti.isin(other)
expected = np.array([False] * len(dti), dtype=bool)
tm.assert_numpy_array_equal(res, expected)
res = ser.isin(other)
tm.assert_series_equal(res, Series(expected))
res = pd.core.algorithms.isin(ser, other)
tm.assert_numpy_array_equal(res, expected)
def test_isin_period_freq_mismatch(self):
dti = date_range("2013-01-01", "2013-01-05")
pi = dti.to_period("M")
ser = Series(pi)
# We construct another PeriodIndex with the same i8 values
# but different dtype
dtype = dti.to_period("Y").dtype
other = PeriodArray._simple_new(pi.asi8, dtype=dtype)
res = pi.isin(other)
expected = np.array([False] * len(pi), dtype=bool)
tm.assert_numpy_array_equal(res, expected)
res = ser.isin(other)
tm.assert_series_equal(res, Series(expected))
res = pd.core.algorithms.isin(ser, other)
tm.assert_numpy_array_equal(res, expected)
@pytest.mark.parametrize("values", [[-9.0, 0.0], [-9, 0]])
def test_isin_float_in_int_series(self, values):
# GH#19356 GH#21804
ser = Series(values)
result = ser.isin([-9, -0.5])
expected = Series([True, False])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", ["boolean", "Int64", "Float64"])
@pytest.mark.parametrize(
"data,values,expected",
[
([0, 1, 0], [1], [False, True, False]),
([0, 1, 0], [1, pd.NA], [False, True, False]),
([0, pd.NA, 0], [1, 0], [True, False, True]),
([0, 1, pd.NA], [1, pd.NA], [False, True, True]),
([0, 1, pd.NA], [1, np.nan], [False, True, False]),
([0, pd.NA, pd.NA], [np.nan, pd.NaT, None], [False, False, False]),
],
)
def test_isin_masked_types(self, dtype, data, values, expected):
# GH#42405
ser = Series(data, dtype=dtype)
result = ser.isin(values)
expected = Series(expected, dtype="boolean")
tm.assert_series_equal(result, expected)
@pytest.mark.slow
def test_isin_large_series_mixed_dtypes_and_nan():
# https://github.com/pandas-dev/pandas/issues/37094
# combination of object dtype for the values and > 1_000_000 elements
ser = Series([1, 2, np.nan] * 1_000_000)
result = ser.isin({"foo", "bar"})
expected = Series([False] * 3 * 1_000_000)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"array,expected",
[
(
[0, 1j, 1j, 1, 1 + 1j, 1 + 2j, 1 + 1j],
Series([False, True, True, False, True, True, True], dtype=bool),
)
],
)
def test_isin_complex_numbers(array, expected):
# GH 17927
result = Series(array).isin([1j, 1 + 1j, 1 + 2j])
tm.assert_series_equal(result, expected)
|
|
# Copyright 2016 by MPI-SWS and Data-Ken Research.
# Licensed under the Apache 2.0 License.
"""Common utilities for the tests
"""
import time
import unittest
import random
random.seed()
from antevents.base import IterableAsPublisher, DefaultSubscriber, FatalError,\
SensorEvent
class RandomSensor:
def __init__(self, sensor_id, mean=100.0, stddev=20.0, stop_after_events=None):
self.sensor_id = sensor_id
self.mean = mean
self.stddev = stddev
self.stop_after_events = stop_after_events
if stop_after_events is not None:
def generator():
for i in range(stop_after_events):
yield random.gauss(mean, stddev)
else: # go on forever
def generator():
while True:
yield random.gauss(mean, stddev)
self.generator = generator()
def sample(self):
return self.generator.__next__()
def __repr__(self):
if self.stop_after_events is None:
return 'RandomSensor(%s, mean=%s, stddev=%s)' % \
(self.sensor_id, self.mean, self.stddev)
else:
return 'RandomSensor(%s, mean=%s, stddev=%s, stop_after_events=%s)' % \
(self.sensor_id, self.mean, self.stddev, self.stop_after_events)
class ValueListSensor:
def __init__(self, sensor_id, values):
self.sensor_id = sensor_id
def generator():
for v in values:
yield v
self.generator = generator()
def sample(self):
return self.generator.__next__()
def __repr__(self):
return 'ValueListSensor(%s)' % self.sensor_id
def make_test_publisher(sensor_id, mean=100.0, stddev=20.0, stop_after_events=None):
"""Here is an exmple test publisher that generates a random value"""
if stop_after_events is not None:
def generator():
for i in range(stop_after_events):
yield SensorEvent(sensor_id, time.time(),
random.gauss(mean, stddev))
else: # go on forever
def generator():
while True:
yield SensorEvent(sensor_id, time.time(),
random.gauss(mean, stddev))
g = generator()
o = IterableAsPublisher(g, name='Sensor(%s)' % sensor_id)
return o
def make_test_publisher_from_vallist(sensor_id, values):
"""Create a publisher that generates the list of values when sampled, but uses
real timestamps.
"""
def generator():
for val in values:
yield SensorEvent(sensor_id, time.time(), val)
o = IterableAsPublisher(generator(), name='Sensor(%s)' % sensor_id)
return o
class ValidationSubscriber(DefaultSubscriber):
"""Compare the values in a event stream to the expected values.
Use the test_case for the assertions (for proper error reporting in a unit
test).
"""
def __init__(self, expected_stream, test_case,
extract_value_fn=lambda event:event.val):
self.expected_stream = expected_stream
self.next_idx = 0
self.test_case = test_case # this can be either a method or a class
self.extract_value_fn = extract_value_fn
self.completed = False
self.name = "ValidationSubscriber(%s)" % \
test_case.__class__.__name__ \
if isinstance(test_case, unittest.TestCase) \
else "ValidationSubscriber(%s.%s)" % \
(test_case.__self__.__class__.__name__,
test_case.__name__)
def on_next(self, x):
tcls = self.test_case if isinstance(self.test_case, unittest.TestCase)\
else self.test_case.__self__
tcls.assertLess(self.next_idx, len(self.expected_stream),
"Got an event after reaching the end of the expected stream")
expected = self.expected_stream[self.next_idx]
actual = self.extract_value_fn(x)
tcls.assertEqual(actual, expected,
"Values for element %d of event stream mismatch" %
self.next_idx)
self.next_idx += 1
def on_completed(self):
tcls = self.test_case if isinstance(self.test_case, unittest.TestCase)\
else self.test_case.__self__
tcls.assertEqual(self.next_idx, len(self.expected_stream),
"Got on_completed() before end of stream")
self.completed = True
def on_error(self, exc):
tcls = self.test_case if isinstance(self.test_case, unittest.TestCase)\
else self.test_case.__self__
tcls.assertTrue(False,
"Got an unexpected on_error call with parameter: %s" %
exc)
def __repr__(self):
return self.name
class SensorEventValidationSubscriber(DefaultSubscriber):
"""Compare the full events in a sensor event stream to the expected events.
Use the test_case for the assertions (for proper error reporting in a unit
test).
"""
def __init__(self, expected_sensor_events, test_case):
self.expected_sensor_events = expected_sensor_events
self.next_idx = 0
self.test_case = test_case
self.completed = False
def on_next(self, x):
tc = self.test_case
tc.assertLess(self.next_idx, len(self.expected_sensor_events),
"Got an event after reaching the end of the expected stream")
expected = self.expected_sensor_events[self.next_idx]
actual = x
tc.assertEqual(actual.val, expected.val,
"Values for element %d of event stream mismatch" % self.next_idx)
tc.assertEqual(actual.sensor_id, expected.sensor_id,
"sensor ids for element %d of event stream mismatch" % self.next_idx)
# since the timestamp is a floating point number, we only check that
# the timestamps are "close enough"
tc.assertAlmostEqual(actual.ts, expected.ts, places=5,
msg="Timestamps for element %d of event stream mismatch" % self.next_idx)
self.next_idx += 1
def on_completed(self):
tc = self.test_case
tc.assertEqual(self.next_idx, len(self.expected_sensor_events),
"Got on_completed() before end of stream")
self.completed = True
def on_error(self, exc):
tc = self.test_case
tc.assertTrue(False,
"Got an unexpected on_error call with parameter: %s" % exc)
class ValidateAndStopSubscriber(ValidationSubscriber):
"""A version of ValidationSubscriber that calls a stop
function after the specified events have been received.
"""
def __init__(self, expected_stream, test_case, stop_fn,
extract_value_fn=lambda event:event.val):
super().__init__(expected_stream, test_case,
extract_value_fn=extract_value_fn)
self.stop_fn = stop_fn
def on_next(self, x):
super().on_next(x)
if self.next_idx==len(self.expected_stream):
print("ValidateAndStopSubscriber: stopping")
self.stop_fn()
class CaptureSubscriber(DefaultSubscriber):
"""Capture the sequence of events in a list for later use.
"""
def __init__(self):
self.events = []
self.completed = False
def on_next(self, x):
self.events.append(x)
def on_completed(self):
self.completed = True
def on_error(self, e):
raise FatalError("Should not get on_error, got on_error(%s)" % e)
|
|
import logging
from pprint import pprint
import numpy as np
from copy import deepcopy
from pycqed.measurement.waveform_control import pulsar as ps
from pycqed.measurement.waveform_control import sequence as sequence
from pycqed.measurement.waveform_control import segment as segment
from pycqed.measurement.randomized_benchmarking import \
randomized_benchmarking as rb
import logging
log = logging.getLogger(__name__)
def rabi_seq_active_reset(amps, qb_name, operation_dict, cal_points,
upload=True, n=1, for_ef=False,
last_ge_pulse=False, prep_params=dict()):
'''
Rabi sequence for a single qubit using the tektronix.
Args:
amps: array of pulse amplitudes (V)
pulse_pars: dict containing the pulse parameters
RO_pars: dict containing the RO parameters
active_reset: boolean flag specifying if active reset is used
n: number of pulses (1 is conventional Rabi)
post_msmt_delay: extra wait time for resetless compatibility
cal_points: whether to use calibration points or not
upload: whether to upload sequence to instrument or not
Returns:
sequence (Sequence): sequence object
segment_indices (list): array of range of n_segments including
calibration_segments. To be used as sweep_points for the MC.
'''
seq_name = 'Rabi_sequence'
# add Rabi amplitudes segments
rabi_ops = ["X180_ef " + qb_name if for_ef else "X180 " + qb_name] * n
if for_ef:
rabi_ops = ["X180 " + qb_name] + rabi_ops # prepend ge pulse
if last_ge_pulse:
rabi_ops += ["X180 " + qb_name] # append ge pulse
rabi_ops += ["RO " + qb_name]
rabi_pulses = [deepcopy(operation_dict[op]) for op in rabi_ops]
for i in np.arange(1 if for_ef else 0, n + 1 if for_ef else n):
rabi_pulses[i]["name"] = "Rabi_" + str(i-1 if for_ef else i)
swept_pulses = sweep_pulse_params(rabi_pulses,
{f'Rabi_{i}.amplitude':
amps for i in range(n)})
swept_pulses_with_prep = \
[add_preparation_pulses(p, operation_dict, [qb_name], **prep_params)
for p in swept_pulses]
seq = pulse_list_list_seq(swept_pulses_with_prep, seq_name, upload=False)
# add calibration segments
seq.extend(cal_points.create_segments(operation_dict, **prep_params))
# reuse sequencer memory by repeating readout pattern
seq.repeat_ro(f"RO {qb_name}", operation_dict)
log.debug(seq)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
return seq, np.arange(seq.n_acq_elements())
def t1_active_reset(times, qb_name, operation_dict, cal_points,
upload=True, for_ef=False, last_ge_pulse=False,
prep_params=dict()):
'''
T1 sequence for a single qubit using the tektronix.
SSB_Drag pulse is used for driving, simple modulation used for RO
Input pars:
times: array of times to wait after the initial pi-pulse
pulse_pars: dict containing the pulse parameters
RO_pars: dict containing the RO parameters
'''
if np.any(times>1e-3):
logging.warning('The values in the times array might be too large.'
'The units should be seconds.')
seq_name = 'T1_sequence'
#Operations
if for_ef:
ops = ["X180", "X180_ef"]
if last_ge_pulse:
ops += ["X180"]
else:
ops = ["X180"]
ops += ["RO"]
ops = add_suffix(ops, " " + qb_name)
pulses = [deepcopy(operation_dict[op]) for op in ops]
# name delayed pulse: last ge pulse if for_ef and last_ge_pulse
# otherwise readout pulse
if for_ef and last_ge_pulse:
delayed_pulse = -2 # last_ge_pulse
delays = np.array(times)
else:
delayed_pulse = -1 # readout pulse
delays = np.array(times) + pulses[-1]["pulse_delay"]
pulses[delayed_pulse]['name'] = "Delayed_pulse"
# vary delay of readout pulse or last ge pulse
swept_pulses = sweep_pulse_params(pulses, {'Delayed_pulse.pulse_delay': delays})
# add preparation pulses
swept_pulses_with_prep = \
[add_preparation_pulses(p, operation_dict, [qb_name], **prep_params)
for p in swept_pulses]
seq = pulse_list_list_seq(swept_pulses_with_prep, seq_name, upload=False)
# add calibration segments
seq.extend(cal_points.create_segments(operation_dict, **prep_params))
# reuse sequencer memory by repeating readout pattern
seq.repeat_ro(f"RO {qb_name}", operation_dict)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
return seq, np.arange(seq.n_acq_elements())
def ramsey_seq_Echo(times, pulse_pars, RO_pars, nr_echo_pulses=4,
artificial_detuning=None,
cal_points=True, cpmg_scheme=True,
upload=True, return_seq=False):
'''
Ramsey sequence for a single qubit using the tektronix.
SSB_Drag pulse is used for driving, simple modualtion used for RO
Input pars:
times: array of times between (start of) pulses (s)
pulse_pars: dict containing the pulse parameters
RO_pars: dict containing the RO parameters
artificial_detuning: artificial_detuning (Hz) implemented using phase
cal_points: whether to use calibration points or not
'''
if np.any(times > 1e-3):
logging.warning('The values in the times array might be too large.'
'The units should be seconds.')
seq_name = 'Ramsey_sequence'
seq = sequence.Sequence(seq_name)
seg_list = []
# First extract values from input, later overwrite when generating
# waveforms
pulses = get_pulse_dict_from_pars(pulse_pars)
pulse_pars_x2 = deepcopy(pulses['X90'])
pulse_pars_x2['ref_point'] = 'start'
X180_pulse = deepcopy(pulses['X180'])
Echo_pulses = nr_echo_pulses*[X180_pulse]
DRAG_length = pulse_pars['nr_sigma']*pulse_pars['sigma']
for i, tau in enumerate(times):
if artificial_detuning is not None:
Dphase = ((tau-times[0]) * artificial_detuning * 360) % 360
pulse_pars_x2['phase'] = Dphase
if cal_points and (i == (len(times)-4) or i == (len(times)-3)):
seg = segment.Segment('segment_{}'.format(i), [pulses['I'], RO_pars])
elif cal_points and (i == (len(times)-2) or i == (len(times)-1)):
seg = segment.Segment('segment_{}'.format(i), [pulses['X180'], RO_pars])
else:
X90_separation = tau - DRAG_length
if cpmg_scheme:
if i == 0:
print('cpmg')
echo_pulse_delay = (X90_separation -
nr_echo_pulses*DRAG_length) / \
nr_echo_pulses
if echo_pulse_delay < 0:
pulse_pars_x2['pulse_delay'] = tau
pulse_dict_list = [pulses['X90'], pulse_pars_x2, RO_pars]
else:
pulse_dict_list = [pulses['X90']]
start_end_delay = echo_pulse_delay/2
for p_nr, pulse_dict in enumerate(Echo_pulses):
pd = deepcopy(pulse_dict)
pd['ref_point'] = 'end'
pd['pulse_delay'] = \
(start_end_delay if p_nr == 0 else echo_pulse_delay)
pulse_dict_list.append(pd)
pulse_pars_x2['ref_point'] = 'end'
pulse_pars_x2['pulse_delay'] = start_end_delay
pulse_dict_list += [pulse_pars_x2, RO_pars]
else:
if i == 0:
print('UDD')
pulse_positions_func = \
lambda idx, N: np.sin(np.pi*idx/(2*N+2))**2
pulse_delays_func = (lambda idx, N: X90_separation*(
pulse_positions_func(idx, N) -
pulse_positions_func(idx-1, N)) -
((0.5 if idx == 1 else 1)*DRAG_length))
if nr_echo_pulses*DRAG_length > X90_separation:
pulse_pars_x2['pulse_delay'] = tau
pulse_dict_list = [pulses['X90'], pulse_pars_x2, RO_pars]
else:
pulse_dict_list = [pulses['X90']]
for p_nr, pulse_dict in enumerate(Echo_pulses):
pd = deepcopy(pulse_dict)
pd['ref_point'] = 'end'
pd['pulse_delay'] = pulse_delays_func(
p_nr+1, nr_echo_pulses)
pulse_dict_list.append(pd)
pulse_pars_x2['ref_point'] = 'end'
pulse_pars_x2['pulse_delay'] = pulse_delays_func(
1, nr_echo_pulses)
pulse_dict_list += [pulse_pars_x2, RO_pars]
seg = segment.Segment('segment_{}'.format(i), pulse_dict_list)
seg_list.append(seg)
seq.add(seg)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
if return_seq:
return seq, seg_list
else:
return seq_name
def ramsey_seq_cont_drive(times, pulse_pars, RO_pars,
artificial_detuning=None, cal_points=True,
upload=True, return_seq=False, **kw):
'''
Ramsey sequence for a single qubit using the tektronix.
SSB_Drag pulse is used for driving, simple modualtion used for RO
Input pars:
times: array of times between (start of) pulses (s)
pulse_pars: dict containing the pulse parameters
RO_pars: dict containing the RO parameters
artificial_detuning: artificial_detuning (Hz) implemented using phase
cal_points: whether to use calibration points or not
'''
if np.any(times > 1e-3):
logging.warning('The values in the times array might be too large.'
'The units should be seconds.')
seq_name = 'Ramsey_sequence'
seq = sequence.Sequence(seq_name)
seg_list = []
# First extract values from input, later overwrite when generating
# waveforms
pulses = get_pulse_dict_from_pars(pulse_pars)
pulse_pars_x2 = deepcopy(pulses['X90'])
DRAG_length = pulse_pars['nr_sigma']*pulse_pars['sigma']
cont_drive_ampl = 0.1 * pulse_pars['amplitude']
X180_pulse = deepcopy(pulses['X180'])
cos_pulse = {'pulse_type': 'CosPulse_gauss_rise',
'channel': X180_pulse['I_channel'],
'frequency': X180_pulse['mod_frequency'],
'length': 0,
'phase': X180_pulse['phi_skew'],
'amplitude': cont_drive_ampl * X180_pulse['alpha'],
'pulse_delay': 0,
'ref_point': 'end'}
sin_pulse = {'pulse_type': 'CosPulse_gauss_rise',
'channel': X180_pulse['Q_channel'],
'frequency': X180_pulse['mod_frequency'],
'length': 0,
'phase': 90,
'amplitude': cont_drive_ampl * X180_pulse['alpha'],
'pulse_delay': 0,
'ref_point': 'simultaneous'}
for i, tau in enumerate(times):
if artificial_detuning is not None:
Dphase = ((tau-times[0]) * artificial_detuning * 360) % 360
pulse_pars_x2['phase'] = Dphase
if cal_points and (i == (len(times)-4) or i == (len(times)-3)):
seg = segment.Segment('segment_{}'.format(i), [pulses['I'], RO_pars])
elif cal_points and (i == (len(times)-2) or i == (len(times)-1)):
seg = segment.Segment('segment_{}'.format(i), [pulses['X180'], RO_pars])
else:
X90_separation = tau - DRAG_length
if X90_separation > 0:
pulse_pars_x2['ref_point'] = 'end'
cos_pls1 = deepcopy(cos_pulse)
sin_pls1 = deepcopy(sin_pulse)
cos_pls1['length'] = X90_separation/2
sin_pls1['length'] = X90_separation/2
cos_pls2 = deepcopy(cos_pls1)
sin_pls2 = deepcopy(sin_pls1)
cos_pls2['amplitude'] = -cos_pls1['amplitude']
cos_pls2['pulse_type'] = 'CosPulse_gauss_fall'
sin_pls2['amplitude'] = -sin_pls1['amplitude']
sin_pls2['pulse_type'] = 'CosPulse_gauss_fall'
pulse_dict_list = [pulses['X90'], cos_pls1, sin_pls1,
cos_pls2, sin_pls2, pulse_pars_x2, RO_pars]
else:
pulse_pars_x2['ref_point'] = 'start'
pulse_pars_x2['pulse_delay'] = tau
pulse_dict_list = [pulses['X90'], pulse_pars_x2, RO_pars]
seg = segment.Segment('segment_{}'.format(i), pulse_dict_list)
seg_list.append(seg)
seq.add(seg)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
if return_seq:
return seq, seg_list
else:
return seq_name
def ramsey_seq(times, pulse_pars, RO_pars,
artificial_detuning=None,
cal_points=True, upload=True, return_seq=False):
'''
Ramsey sequence for a single qubit using the tektronix.
SSB_Drag pulse is used for driving, simple modualtion used for RO
Input pars:
times: array of times between (start of) pulses (s)
pulse_pars: dict containing the pulse parameters
RO_pars: dict containing the RO parameters
artificial_detuning: artificial_detuning (Hz) implemented using phase
cal_points: whether to use calibration points or not
'''
if np.any(times > 1e-3):
logging.warning('The values in the times array might be too large.'
'The units should be seconds.')
seq_name = 'Ramsey_sequence'
seq = sequence.Sequence(seq_name)
seg_list = []
# First extract values from input, later overwrite when generating
# waveforms
pulses = get_pulse_dict_from_pars(pulse_pars)
pulse_pars_x2 = deepcopy(pulses['X90'])
pulse_pars_x2['ref_point'] = 'start'
for i, tau in enumerate(times):
pulse_pars_x2['pulse_delay'] = tau
if artificial_detuning is not None:
Dphase = ((tau-times[0]) * artificial_detuning * 360) % 360
pulse_pars_x2['phase'] = Dphase
if cal_points and (i == (len(times)-4) or i == (len(times)-3)):
seg = segment.Segment('segment_{}'.format(i),
[pulses['I'], RO_pars])
elif cal_points and (i == (len(times)-2) or i == (len(times)-1)):
seg = segment.Segment('segment_{}'.format(i),
[pulses['X180'], RO_pars])
else:
seg = segment.Segment('segment_{}'.format(i),
[pulses['X90'], pulse_pars_x2, RO_pars])
seg_list.append(seg)
seq.add(seg)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
if return_seq:
return seq, seg_list
else:
return seq_name
def ramsey_seq_VZ(times, pulse_pars, RO_pars,
artificial_detuning=None,
cal_points=True, upload=True, return_seq=False):
'''
Ramsey sequence for a single qubit using the tektronix.
SSB_Drag pulse is used for driving, simple modualtion used for RO
Input pars:
times: array of times between (start of) pulses (s)
pulse_pars: dict containing the pulse parameters
RO_pars: dict containing the RO parameters
artificial_detuning: artificial_detuning (Hz) implemented using phase
cal_points: whether to use calibration points or not
'''
if np.any(times>1e-3):
logging.warning('The values in the times array might be too large.'
'The units should be seconds.')
seq_name = 'Ramsey_sequence'
seq = sequence.Sequence(seq_name)
seg_list = []
# First extract values from input, later overwrite when generating
# waveforms
pulses = get_pulse_dict_from_pars(pulse_pars)
pulse_pars_x2 = deepcopy(pulses['X90'])
pulse_pars_x2['ref_point'] = 'start'
for i, tau in enumerate(times):
pulse_pars_x2['pulse_delay'] = tau
if artificial_detuning is not None:
Dphase = ((tau-times[0]) * artificial_detuning * 360) % 360
else:
Dphase = ((tau-times[0]) * 1e6 * 360) % 360
Z_gate = Z(Dphase, pulse_pars)
if cal_points and (i == (len(times)-4) or i == (len(times)-3)):
seg = segment.Segment('segment_{}'.format(i), [pulses['I'], RO_pars])
elif cal_points and (i == (len(times)-2) or i == (len(times)-1)):
seg = segment.Segment('segment_{}'.format(i), [pulses['X180'], RO_pars])
else:
pulse_list = [pulses['X90'], Z_gate, pulse_pars_x2, RO_pars]
seg = segment.Segment('segment_{}'.format(i), pulse_list)
seg_list.append(seg)
seq.add(seg)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
if return_seq:
return seq, seg_list
else:
return seq_name
def ramsey_seq_multiple_detunings(times, pulse_pars, RO_pars,
artificial_detunings=None, cal_points=True,
upload=True, return_seq=False):
'''
Ramsey sequence for a single qubit using the tektronix.
SSB_Drag pulse is used for driving, simple modualtion used for RO
!!! Each value in the times array must be repeated len(artificial_detunings)
times!!!
Input pars:
times: array of times between (start of) pulses (s)
pulse_pars: dict containing the pulse parameters
RO_pars: dict containing the RO parameters
artificial_detunings: list of artificial_detunings (Hz) implemented
using phase
cal_points: whether to use calibration points or not
'''
seq_name = 'Ramsey_sequence_multiple_detunings'
seq = sequence.Sequence(seq_name)
ps.Pulsar.get_instance().update_channel_settings()
seg_list = []
# First extract values from input, later overwrite when generating
# waveforms
pulses = get_pulse_dict_from_pars(pulse_pars)
pulse_pars_x2 = deepcopy(pulses['X90'])
pulse_pars_x2['ref_point'] = 'start'
for i, tau in enumerate(times):
pulse_pars_x2['pulse_delay'] = tau
art_det = artificial_detunings[i % len(artificial_detunings)]
if art_det is not None:
Dphase = ((tau-times[0]) * art_det * 360) % 360
pulse_pars_x2['phase'] = Dphase
if cal_points and (i == (len(times)-4) or i == (len(times)-3)):
seg = segment.Segment('segment_{}'.format(i), [pulses['I'], RO_pars])
elif cal_points and (i == (len(times)-2) or i == (len(times)-1)):
seg = segment.Segment('segment_{}'.format(i), [pulses['X180'], RO_pars])
else:
seg = segment.Segment('segment_{}'.format(i),
[pulses['X90'], pulse_pars_x2, RO_pars])
seg_list.append(seg)
seq.add(seg)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
if return_seq:
return seq, seg_list
else:
return seq_name
def ramsey_active_reset(times, qb_name, operation_dict, cal_points, n=1,
artificial_detunings=0, upload=True,
for_ef=False, last_ge_pulse=False, prep_params=dict()):
'''
Ramsey sequence for the second excited state
Input pars:
times: array of delays (s)
n: number of pulses (1 is conventional Ramsey)
'''
seq_name = 'Ramsey_sequence'
# Operations
if for_ef:
ramsey_ops = ["X180"] + ["X90_ef"] * 2 * n
if last_ge_pulse:
ramsey_ops += ["X180"]
else:
ramsey_ops = ["X90"] * 2 * n
ramsey_ops += ["RO"]
ramsey_ops = add_suffix(ramsey_ops, " " + qb_name)
# pulses
ramsey_pulses = [deepcopy(operation_dict[op]) for op in ramsey_ops]
pulse_length = ramsey_pulses[2 if for_ef else 1]['sigma'] *\
ramsey_pulses[2 if for_ef else 1]['nr_sigma']
# name and reference swept pulse
for i in range(n):
idx = (2 if for_ef else 1) + i * 2
ramsey_pulses[idx]["name"] = f"Ramsey_x2_{i}"
ramsey_pulses[idx]['ref_point'] = 'start'
# compute dphase
a_d = artificial_detunings if np.ndim(artificial_detunings) == 1 \
else [artificial_detunings]
dphase = [((t - times[0]) * a_d[i % len(a_d)] * 360) % 360
for i, t in enumerate(times)]
# sweep pulses
params = {f'Ramsey_x2_{i}.pulse_delay': times for i in range(n)}
params.update({f'Ramsey_x2_{i}.phase': dphase for i in range(n)})
swept_pulses = sweep_pulse_params(ramsey_pulses, params)
#add preparation pulses
swept_pulses_with_prep = \
[add_preparation_pulses(p, operation_dict, [qb_name], **prep_params)
for p in swept_pulses]
# make sure Ramsey pulses are put into separate elements
# if possible
i = 0
for sequence in swept_pulses_with_prep:
for pulse in sequence:
if 'name' not in pulse:
continue
pulse["element_name"] = f"Ramsey_x2_{i}_element"
i += 1
seq = pulse_list_list_seq(swept_pulses_with_prep, seq_name, upload=False)
# add calibration segments
seq.extend(cal_points.create_segments(operation_dict, **prep_params))
# reuse sequencer memory by repeating readout pattern
seq.repeat_ro(f"RO {qb_name}", operation_dict)
log.debug(seq)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
return seq, np.arange(seq.n_acq_elements())
def echo_seq(times, pulse_pars, RO_pars,
artificial_detuning=None,
cal_points=True, upload=True, return_seq=False):
'''
Echo sequence for a single qubit using the tektronix.
Input pars:
times: array of times between (start of) pulses (s)
artificial_detuning: artificial_detuning (Hz) implemented using phase
pulse_pars: dict containing the pulse parameters
RO_pars: dict containing the RO parameters
cal_points: whether to use calibration points or not
'''
seq_name = 'Echo_sequence'
seq = sequence.Sequence(seq_name)
seg_list = []
pulses = get_pulse_dict_from_pars(pulse_pars)
center_X180 = deepcopy(pulses['X180'])
final_X90 = deepcopy(pulses['X90'])
center_X180['ref_point'] = 'start'
final_X90['ref_point'] = 'start'
for i, tau in enumerate(times):
center_X180['pulse_delay'] = tau/2
final_X90['pulse_delay'] = tau/2
if artificial_detuning is not None:
final_X90['phase'] = (tau-times[0]) * artificial_detuning * 360
if cal_points and (i == (len(times)-4) or i == (len(times)-3)):
seg = segment.Segment('segment_{}'.format(i),
[pulses['I'], RO_pars])
elif cal_points and (i == (len(times)-2) or i == (len(times)-1)):
seg = segment.Segment('segment_{}'.format(i),
[pulses['X180'], RO_pars])
else:
seg = segment.Segment('segment_{}'.format(i),
[pulses['X90'], center_X180,
final_X90, RO_pars])
seg_list.append(seg)
seq.add(seg)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
if return_seq:
return seq, seg_list
else:
return seq_name
def single_state_active_reset(operation_dict, qb_name,
state='e', upload=True, prep_params={}):
'''
OffOn sequence for a single qubit using the tektronix.
SSB_Drag pulse is used for driving, simple modualtion used for RO
Input pars:
pulse_pars: dict containing the pulse parameters
RO_pars: dict containing the RO parameters
pulse_pars_2nd: dict containing the pulse parameters of ef transition.
Required if state is 'f'.
Initialize: adds an exta measurement before state preparation
to allow initialization by post-selection
Post-measurement delay: should be sufficiently long to avoid
photon-induced gate errors when post-selecting.
state: specifies for which state a pulse should be
generated (g,e,f)
preselection: adds an extra readout pulse before other pulses.
'''
seq_name = 'single_state_sequence'
seq = sequence.Sequence(seq_name)
# Create dicts with the parameters for all the pulses
state_ops = dict(g=["I", "RO"], e=["X180", "RO"], f=["X180", "X180_ef", "RO"])
pulses = [deepcopy(operation_dict[op])
for op in add_suffix(state_ops[state], " " + qb_name)]
#add preparation pulses
pulses_with_prep = \
add_preparation_pulses(pulses, operation_dict, [qb_name], **prep_params)
seg = segment.Segment('segment_{}_level'.format(state), pulses_with_prep)
seq.add(seg)
# reuse sequencer memory by repeating readout pattern
seq.repeat_ro(f"RO {qb_name}", operation_dict)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
return seq, np.arange(seq.n_acq_elements())
def randomized_renchmarking_seqs(
qb_name, operation_dict, cliffords, nr_seeds=None, net_clifford=0,
gate_decomposition='HZ', interleaved_gate=None, upload=True,
cl_sequence=None, sampling_seeds=None,
cal_points=None, prep_params=dict()):
"""
Args
qb_name (str): name of qubit
operation_dict (dict): dict with all pulse dicts of qubit
cliffords (array): array of ints specifying the number of random
Cliffords to generate in each sequence
nr_seeds (array): array of the form np.arange(nr_seeds_value)
net_clifford (int): 0 or 1; whether the recovery Clifford returns
qubits to ground statea (0) or puts them in the excited states (1)
gate_decomposition (str): the decomposition of Clifford gates
into primitives; can be "XY", "HZ", or "5Primitives"
interleaved_gate (str): pycqed name for a gate
upload (bool): whether to upload sequence to AWGs
cl_sequence (list): the Clifford sequence to use for all seeds. Can
also be lists of lists in which case the user must ensure that
len(nr seeds) % len(cl_sequence) == 0.
sampling_seeds (array of ints): ints that will be used as seeds for
the random generation of Cliffords. Should have the same length
as nr_seeds.
cal_points (CalibrationPoints): instance of CalibrationPoints
prep_params (dict): qubit preparation_params dict
"""
seq_name = '1Qb_RB_sequence'
if sampling_seeds is None:
if nr_seeds is None:
raise ValueError('Please provide either "sampling_seeds" or '
'"nr_seeds."')
sampling_seeds = [None] * len(nr_seeds)
else:
nr_seeds = np.arange(len(sampling_seeds))
if cl_sequence is not None:
if isinstance(cl_sequence[0], list):
# if cl_sequence is a list of lists such that
# len(nr_seeds) != len(cl_sequence) but
# len(nr_seeds) % len(cl_sequence) == 0,
# then create as many copies of the lists in cl_sequence until
# len(cl_sequence) == len(nr_seeds).
assert len(nr_seeds) % len(cl_sequence) == 0
k = len(nr_seeds) // len(cl_sequence)
cl_seq_temp = k * cl_sequence
sequences = []
for nCl in cliffords:
pulse_list_list_all = []
for s in nr_seeds:
if cl_sequence is None:
cl_seq = rb.randomized_benchmarking_sequence(
nCl, desired_net_cl=net_clifford,
interleaved_gate=interleaved_gate,
seed=sampling_seeds[s])
elif isinstance(cl_sequence[0], list):
cl_seq = cl_seq_temp[s]
else:
cl_seq = cl_sequence
pulse_keys = rb.decompose_clifford_seq(
cl_seq, gate_decomp=gate_decomposition)
# to avoid having only virtual gates in segment:
pulse_keys = ['I'] + pulse_keys
pulse_list = [operation_dict[x + ' ' + qb_name] for x in pulse_keys]
pulse_list += [operation_dict['RO ' + qb_name]]
pulse_list_w_prep = add_preparation_pulses(
pulse_list, operation_dict, [qb_name], **prep_params)
pulse_list_list_all.append(pulse_list_w_prep)
seq = pulse_list_list_seq(pulse_list_list_all, seq_name+f'_{nCl}',
upload=False)
if cal_points is not None:
seq.extend(cal_points.create_segments(operation_dict,
**prep_params))
sequences.append(seq)
# reuse sequencer memory by repeating readout pattern
[s.repeat_ro(f"RO {qb_name}", operation_dict) for s in sequences]
if upload:
ps.Pulsar.get_instance().program_awgs(sequences[0])
return sequences, np.arange(sequences[0].n_acq_elements()), \
np.arange(len(cliffords))
def qscale_active_reset(qscales, qb_name, operation_dict, cal_points,
upload=True, prep_params={}, for_ef=False,
last_ge_pulse=False):
'''
Sequence used for calibrating the QScale factor used in the DRAG pulses.
Applies X(pi/2)X(pi), X(pi/2)Y(pi), X(pi/2)Y(-pi) for each value of
QScale factor.
Beware that the elements alternate, in order to perform these 3
measurements per QScale factor, the qscales sweep values must be
repeated 3 times. This was chosen to be more easily compatible with
standard detector functions and sweep pts.
Input pars:
qscales: array of qscale factors
pulse_pars: dict containing the DRAG pulse parameters
RO_pars: dict containing the RO parameters
cal_points: if True, replaces the last 3*4 segments with
calibration points
'''
seq_name = f'QScale{"_ef" if for_ef else ""}_sequence'
# Operations
qscale_base_ops = [['X90', 'X180'], ['X90', 'Y180'], ['X90', 'mY180']]
final_pulses = []
for i, qscale_ops in enumerate(qscale_base_ops):
qscale_ops = add_suffix(qscale_ops, "_ef" if for_ef else "")
if for_ef:
qscale_ops = ['X180'] + qscale_ops
if last_ge_pulse:
qscale_ops += ["X180"]
qscale_ops += ['RO']
qscale_ops = add_suffix(qscale_ops, " " + qb_name)
# pulses
qscale_pulses = [deepcopy(operation_dict[op]) for op in qscale_ops]
# name and reference swept pulse
for i in range(2):
idx = (1 if for_ef else 0) + i
qscale_pulses[idx]["name"] = f"Qscale_{i}"
# sweep pulses
params = {"Qscale_*.motzoi": qscales[i::3]}
swept_pulses = sweep_pulse_params(qscale_pulses, params)
# add preparation pulses
swept_pulses_with_prep = \
[add_preparation_pulses(p, operation_dict, [qb_name], **prep_params)
for p in swept_pulses]
final_pulses.append(swept_pulses_with_prep)
# intertwine pulses in same order as base_ops
# 1. get one list of list from the 3 lists of list
f_p = np.array(final_pulses)
reordered_pulses = [[X90X180, X90Y180, X90mY180]
for X90X180, X90Y180, X90mY180
in zip(f_p[0], f_p[1], f_p[2])]
# 2. reshape to list of list
final_pulses = np.squeeze(np.reshape(reordered_pulses,
(len(qscales), -1))).tolist()
seq = pulse_list_list_seq(final_pulses, seq_name, upload=False)
# add calibration segments
seq.extend(cal_points.create_segments(operation_dict, **prep_params))
# reuse sequencer memory by repeating readout pattern
seq.repeat_ro(f"RO {qb_name}", operation_dict)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
log.debug(seq)
return seq, np.arange(seq.n_acq_elements())
def over_under_rotation_seq(qb_name, nr_pi_pulses_array, operation_dict,
pi_pulse_amp=None, cal_points=True, upload=True):
seq_name = 'Over-under rotation sequence'
seq = sequence.Sequence(seq_name)
seg_list = []
X90 = deepcopy(operation_dict['X90 ' + qb_name])
X180 = deepcopy(operation_dict['X180 ' + qb_name])
if pi_pulse_amp is not None:
X90['amplitude'] = pi_pulse_amp/2
X180['amplitude'] = pi_pulse_amp
for i, N in enumerate(nr_pi_pulses_array):
if cal_points and (i == (len(nr_pi_pulses_array)-4) or
i == (len(nr_pi_pulses_array)-3)):
seg = segment.Segment('segment_{}'.format(i),
[operation_dict['I ' + qb_name],
operation_dict['RO ' + qb_name]])
elif cal_points and (i == (len(nr_pi_pulses_array)-2) or
i == (len(nr_pi_pulses_array)-1)):
seg = segment.Segment('segment_{}'.format(i),
[operation_dict['X180 ' + qb_name],
operation_dict['RO ' + qb_name]])
else:
pulse_list = [X90]
pulse_list += N*[X180]
pulse_list += [operation_dict['RO ' + qb_name]]
seg = segment.Segment('segment_{}'.format(i), pulse_list)
seg_list.append(seg)
seq.add(seg)
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
return
# Helper functions
def pulse_list_list_seq(pulse_list_list, name='pulse_list_list_sequence',
upload=True):
seq = sequence.Sequence(name)
for i, pulse_list in enumerate(pulse_list_list):
seq.add(segment.Segment('segment_{}'.format(i), pulse_list))
if upload:
ps.Pulsar.get_instance().program_awgs(seq)
return seq
def prepend_pulses(pulse_list, pulses_to_prepend):
"""
Prepends a list of pulse to a list of pulses with correct referencing.
:param pulse_list: initial pulse list
:param pulses_to_prepend: pulse to prepend
:return:
list of pulses where prepended pulses are at the beginning of the
returned list
"""
all_pulses = deepcopy(pulse_list)
for i, p in enumerate(reversed(pulses_to_prepend)):
try:
p['ref_pulse'] = all_pulses[0]['name']
except KeyError:
all_pulses[0]['name'] = 'fist_non_prepended_pulse'
p['ref_pulse'] = all_pulses[0]['name']
p['name'] = p.get('name',
f'prepended_pulse_{len(pulses_to_prepend) - i - 1}')
p['ref_point'] = 'start'
p['ref_point_new'] = 'end'
all_pulses = [p] + all_pulses
return all_pulses
def add_preparation_pulses(pulse_list, operation_dict, qb_names,
preparation_type='wait', post_ro_wait=1e-6,
ro_separation=1.5e-6,
reset_reps=1, final_reset_pulse=True,
threshold_mapping=None):
"""
Prepends to pulse_list the preparation pulses corresponding to preparation
preparation:
for active reset on |e>: ('active_reset_e', nr_resets)
for active reset on |e> and |f>: ('active_reset_ef', nr_resets)
for preselection: ('preselection', nr_readouts)
"""
if threshold_mapping is None:
threshold_mapping = {qbn: {0: 'g', 1: 'e'} for qbn in qb_names}
# Calculate the length of a ge pulse, assumed the same for all qubits
state_ops = dict(g=["I "], e=["X180 "], f=["X180_ef ", "X180 "])
if 'ref_pulse' not in pulse_list[0]:
first_pulse = deepcopy(pulse_list[0])
first_pulse['ref_pulse'] = 'segment_start'
pulse_list[0] = first_pulse
if preparation_type == 'wait':
return pulse_list
elif 'active_reset' in preparation_type:
reset_ro_pulses = []
ops_and_codewords = {}
for i, qbn in enumerate(qb_names):
reset_ro_pulses.append(deepcopy(operation_dict['RO ' + qbn]))
reset_ro_pulses[-1]['ref_point'] = 'start' if i != 0 else 'end'
if preparation_type == 'active_reset_e':
ops_and_codewords[qbn] = [
(state_ops[threshold_mapping[qbn][0]], 0),
(state_ops[threshold_mapping[qbn][1]], 1)]
elif preparation_type == 'active_reset_ef':
assert len(threshold_mapping[qbn]) == 4, \
"Active reset for the f-level requires a mapping of length 4" \
f" but only {len(threshold_mapping)} were given: " \
f"{threshold_mapping}"
ops_and_codewords[qbn] = [
(state_ops[threshold_mapping[qbn][0]], 0),
(state_ops[threshold_mapping[qbn][1]], 1),
(state_ops[threshold_mapping[qbn][2]], 2),
(state_ops[threshold_mapping[qbn][3]], 3)]
else:
raise ValueError(f'Invalid preparation type {preparation_type}')
reset_pulses = []
for i, qbn in enumerate(qb_names):
for ops, codeword in ops_and_codewords[qbn]:
for j, op in enumerate(ops):
reset_pulses.append(deepcopy(operation_dict[op + qbn]))
reset_pulses[-1]['codeword'] = codeword
if j == 0:
reset_pulses[-1]['ref_point'] = 'start'
reset_pulses[-1]['pulse_delay'] = post_ro_wait
else:
reset_pulses[-1]['ref_point'] = 'start'
pulse_length = 0
for jj in range(1, j+1):
if 'pulse_length' in reset_pulses[-1-jj]:
pulse_length += reset_pulses[-1-jj]['pulse_length']
else:
pulse_length += reset_pulses[-1-jj]['sigma'] * \
reset_pulses[-1-jj]['nr_sigma']
reset_pulses[-1]['pulse_delay'] = post_ro_wait+pulse_length
prep_pulse_list = []
for rep in range(reset_reps):
ro_list = deepcopy(reset_ro_pulses)
ro_list[0]['name'] = 'refpulse_reset_element_{}'.format(rep)
for pulse in ro_list:
pulse['element_name'] = 'reset_ro_element_{}'.format(rep)
if rep == 0:
ro_list[0]['ref_pulse'] = 'segment_start'
ro_list[0]['pulse_delay'] = -reset_reps * ro_separation
else:
ro_list[0]['ref_pulse'] = 'refpulse_reset_element_{}'.format(
rep-1)
ro_list[0]['pulse_delay'] = ro_separation
ro_list[0]['ref_point'] = 'start'
rp_list = deepcopy(reset_pulses)
for j, pulse in enumerate(rp_list):
pulse['element_name'] = 'reset_pulse_element_{}'.format(rep)
pulse['ref_pulse'] = 'refpulse_reset_element_{}'.format(rep)
prep_pulse_list += ro_list
prep_pulse_list += rp_list
if final_reset_pulse:
rp_list = deepcopy(reset_pulses)
for pulse in rp_list:
pulse['element_name'] = f'reset_pulse_element_{reset_reps}'
pulse_list += rp_list
return prep_pulse_list + pulse_list
elif preparation_type == 'preselection':
preparation_pulses = []
for i, qbn in enumerate(qb_names):
preparation_pulses.append(deepcopy(operation_dict['RO ' + qbn]))
preparation_pulses[-1]['ref_point'] = 'start'
preparation_pulses[-1]['element_name'] = 'preselection_element'
preparation_pulses[0]['ref_pulse'] = 'segment_start'
preparation_pulses[0]['pulse_delay'] = -ro_separation
return preparation_pulses + pulse_list
def sweep_pulse_params(pulses, params, pulse_not_found_warning=True):
"""
Sweeps a list of pulses over specified parameters.
Args:
pulses (list): All pulses. Pulses which have to be swept over need to
have a 'name' key.
params (dict): keys in format <pulse_name>.<pulse_param_name>,
values are the sweep values. <pulse_name> can be formatted as
exact name or '<pulse_starts_with>*<pulse_endswith>'. In that case
all pulses with name starting with <pulse_starts_with> and ending
with <pulse_endswith> will be modified. eg. "Rabi_*" will modify
Rabi_1, Rabi_2 in [Rabi_1, Rabi_2, Other_Pulse]
pulse_not_found_warning (bool, default: True) whether a warning
should be issued if no pulse matches a given pulse name.
Returns: a list of pulses_lists where each element is to be used
for a single segment
"""
def check_pulse_name(pulse, target_name):
"""
Checks if an asterisk is found in the name, in that case only the first
part of the name is compared
"""
target_name_splitted = target_name.split("*")
if len(target_name_splitted) == 1:
return pulse.get('name', "") == target_name
elif len(target_name_splitted) == 2:
return pulse.get('name', "").startswith(target_name_splitted[0]) \
and pulse.get('name', "").endswith(target_name_splitted[1])
else:
raise Exception(f"Only one asterisk in pulse_name is allowed,"
f" more than one in {target_name}")
swept_pulses = []
if len(params.keys()) == 0:
log.warning("No params to sweep. Returning unchanged pulses.")
return pulses
n_sweep_points = len(list(params.values())[0])
assert np.all([len(v) == n_sweep_points for v in params.values()]), \
"Parameter sweep values are not all of the same length: {}" \
.format({n: len(v) for n, v in params.items()})
for i in range(n_sweep_points):
pulses_cp = deepcopy(pulses)
for name, sweep_values in params.items():
pulse_name, param_name = name.split('.')
pulse_indices = [i for i, p in enumerate(pulses)
if check_pulse_name(p, pulse_name)]
if len(pulse_indices) == 0 and pulse_not_found_warning:
log.warning(f"No pulse with name {pulse_name} found in list:"
f"{[p.get('name', 'No Name') for p in pulses]}")
for p_idx in pulse_indices:
pulses_cp[p_idx][param_name] = sweep_values[i]
# pulses_cp[p_idx].pop('name', 0)
swept_pulses.append(pulses_cp)
return swept_pulses
def get_pulse_dict_from_pars(pulse_pars):
'''
Returns a dictionary containing pulse_pars for all the primitive pulses
based on a single set of pulse_pars.
Using this function deepcopies the pulse parameters preventing accidently
editing the input dictionary.
input args:
pulse_pars: dictionary containing pulse_parameters
return:
pulses: dictionary of pulse_pars dictionaries
'''
pulses = {'I': deepcopy(pulse_pars),
'X180': deepcopy(pulse_pars),
'mX180': deepcopy(pulse_pars),
'X90': deepcopy(pulse_pars),
'mX90': deepcopy(pulse_pars),
'Y180': deepcopy(pulse_pars),
'mY180': deepcopy(pulse_pars),
'Y90': deepcopy(pulse_pars),
'mY90': deepcopy(pulse_pars)}
pi_amp = pulse_pars['amplitude']
pi2_amp = pulse_pars['amplitude'] * pulse_pars['amp90_scale']
pulses['I']['amplitude'] = 0
pulses['mX180']['amplitude'] = -pi_amp
pulses['X90']['amplitude'] = pi2_amp
pulses['mX90']['amplitude'] = -pi2_amp
pulses['Y180']['phase'] += 90
pulses['mY180']['phase'] += 90
pulses['mY180']['amplitude'] = -pi_amp
pulses['Y90']['amplitude'] = pi2_amp
pulses['Y90']['phase'] += 90
pulses['mY90']['amplitude'] = -pi2_amp
pulses['mY90']['phase'] += 90
pulses_sim = {key + 's': deepcopy(val) for key, val in pulses.items()}
for val in pulses_sim.values():
val['ref_point'] = 'start'
pulses.update(pulses_sim)
# Software Z-gate: apply phase offset to all subsequent X and Y pulses
target_qubit = pulse_pars.get('basis', None)
if target_qubit is not None:
Z0 = {'pulse_type': 'VirtualPulse',
'basis_rotation': {target_qubit: 0},
'operation_type': 'Virtual'}
pulses.update({'Z0': Z0,
'Z180': deepcopy(Z0),
'mZ180': deepcopy(Z0),
'Z90': deepcopy(Z0),
'mZ90': deepcopy(Z0)})
pulses['Z180']['basis_rotation'][target_qubit] += 180
pulses['mZ180']['basis_rotation'][target_qubit] += -180
pulses['Z90']['basis_rotation'][target_qubit] += 90
pulses['mZ90']['basis_rotation'][target_qubit] += -90
return pulses
def Z(theta=0, pulse_pars=None):
"""
Software Z-gate of arbitrary rotation.
:param theta: rotation angle
:param pulse_pars: pulse parameters (dict)
:return: Pulse dict of the Z-gate
"""
if pulse_pars is None:
raise ValueError('Pulse_pars is None.')
else:
pulses = get_pulse_dict_from_pars(pulse_pars)
Z_gate = deepcopy(pulses['Z180'])
Z_gate['phase'] = theta
return Z_gate
def add_suffix(operation_list, suffix):
return [op + suffix for op in operation_list]
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import cherrypy
import mako
import os
from girder.constants import VERSION
from . import docs, access
from .rest import Resource, RestException
"""
Whenever we add new return values or new options we should increment the
maintenance value. Whenever we add new endpoints, we should increment the minor
version. If we break backward compatibility in any way, we should increment the
major version. This value is derived from the version number given in
the top level package.json.
"""
API_VERSION = VERSION['apiVersion']
SWAGGER_VERSION = '1.2'
class Description(object):
"""
This class provides convenient chainable semantics to allow api route
handlers to describe themselves to the documentation. A route handler
function can set a description property on itself to an instance of this
class in order to describe itself.
"""
def __init__(self, summary):
self._summary = summary
self._params = []
self._responses = []
self._consumes = []
self._responseClass = None
self._notes = None
def asDict(self):
"""
Returns this description object as an appropriately formatted dict
"""
resp = {
'summary': self._summary,
'notes': self._notes,
'parameters': self._params,
'responseMessages': self._responses,
'responseClass': self._responseClass
}
if self._consumes is not None:
resp['consumes'] = self._consumes
return resp
def responseClass(self, obj):
self._responseClass = obj
return self
def param(self, name, description, paramType='query', dataType='string',
required=True, enum=None):
"""
This helper will build a parameter declaration for you. It has the most
common options as defaults, so you won't have to repeat yourself as much
when declaring the APIs.
Note that we could expose more parameters: allowMultiple, format,
defaultValue, minimum, maximum, uniqueItems, $ref, type (return type).
We also haven't exposed the complex data types.
:param name: name of the parameter used in the REST query.
:param description: explanation of the parameter.
:param paramType: how is the parameter sent. One of 'query', 'path',
'body', 'header', or 'form'.
:param dataType: the data type expected in the parameter. This is one
of 'integer', 'long', 'float', 'double', 'string',
'byte', 'boolean', 'date', 'dateType', 'array', or
'File'.
:param required: True if the request will fail if this parameter is not
present, False if the parameter is optional.
:param enum: a fixed list of possible values for the field.
"""
param = {
'name': name,
'description': description,
'paramType': paramType,
'type': dataType,
'allowMultiple': False,
'required': required
}
if enum:
param['enum'] = enum
self._params.append(param)
return self
def consumes(self, value):
self._consumes.append(value)
return self
def notes(self, notes):
self._notes = notes
return self
def errorResponse(self, reason='A parameter was invalid.', code=400):
"""
This helper will build an errorResponse declaration for you. Many
endpoints will be able to use the default parameter values for one of
their responses.
"""
self._responses.append({
'message': reason,
'code': code
})
return self
class ApiDocs(object):
"""
This serves up the swagger page.
"""
exposed = True
indexHtml = None
vars = {
'staticRoot': '',
'title': 'Girder - REST API Documentation'
}
template = r"""
<!DOCTYPE html>
<html lang="en">
<head>
<title>${title}</title>
<link rel="stylesheet"
href="//fonts.googleapis.com/css?family=Droid+Sans:400,700">
<link rel="stylesheet"
href="${staticRoot}/lib/fontello/css/fontello.css">
<link rel="stylesheet"
href="${staticRoot}/built/swagger/css/reset.css">
<link rel="stylesheet"
href="${staticRoot}/built/swagger/css/screen.css">
<link rel="stylesheet"
href="${staticRoot}/built/swagger/docs.css">
<link rel="icon"
type="image/png"
href="${staticRoot}/img/Girder_Favicon.png">
</head>
<body>
<div class="docs-header">
<span>Girder REST API Documentation</span>
<i class="icon-book-alt right"></i>
</div>
<div class="docs-body">
<p>Below you will find the list of all of the resource types exposed
by the Girder RESTful Web API. Click any of the resource links to open
up a list of all available endpoints related to each resource type.
</p>
<p>Clicking any of those endpoints will display detailed documentation
about the purpose of each endpoint and the input parameters and output
values. You can also call API endpoints directly from this page by
typing in the parameters you wish to pass and then clicking the "Try
it out!" button.</p>
<p><b>Warning:</b> This is not a sandbox—calls that you make
from this page are the same as calling the API with any other client,
so update or delete calls that you make will affect the actual data on
the server.</p>
</div>
<div class="swagger-section">
<div id="swagger-ui-container"
class="swagger-ui-wrap docs-swagger-container">
</div>
</div>
<script src="${staticRoot}/built/swagger/lib/jquery-1.8.0.min.js">
</script>
<script src="${staticRoot}/built/swagger/lib/jquery.slideto.min.js">
</script>
<script src="${staticRoot}/built/swagger/lib/jquery.wiggle.min.js">
</script>
<script src="${staticRoot}/built/swagger/lib/jquery.ba-bbq.min.js">
</script>
<script src="${staticRoot}/built/swagger/lib/handlebars-1.0.0.js">
</script>
<script src="${staticRoot}/built/swagger/lib/underscore-min.js">
</script>
<script src="${staticRoot}/built/swagger/lib/backbone-min.js"></script>
<script src="${staticRoot}/built/swagger/lib/shred.bundle.js"></script>
<script src="${staticRoot}/built/swagger/lib/swagger.js"></script>
<script src="${staticRoot}/built/swagger/swagger-ui.min.js"></script>
<script src="${staticRoot}/built/swagger/lib/highlight.7.3.pack.js">
</script>
<script src="${staticRoot}/girder-swagger.js"></script>
</body>
</html>
"""
def updateHtmlVars(self, vars):
self.vars.update(vars)
self.indexHtml = None
def GET(self, **params):
if self.indexHtml is None:
self.indexHtml = mako.template.Template(self.template).render(
**self.vars)
return self.indexHtml
def DELETE(self, **params):
raise cherrypy.HTTPError(405)
def PATCH(self, **params):
raise cherrypy.HTTPError(405)
def POST(self, **params):
raise cherrypy.HTTPError(405)
def PUT(self, **params):
raise cherrypy.HTTPError(405)
class Describe(Resource):
def __init__(self):
self.route('GET', (), self.listResources, nodoc=True)
self.route('GET', (':resource',), self.describeResource, nodoc=True)
@access.public
def listResources(self, params):
return {
'apiVersion': API_VERSION,
'swaggerVersion': SWAGGER_VERSION,
'basePath': cherrypy.url(),
'apis': [{'path': '/{}'.format(resource)}
for resource in sorted(docs.discovery)]
}
def _compareRoutes(self, routeOp1, routeOp2):
"""
Order routes based on path. Alphabetize this, treating parameters as
before fixed paths.
:param routeOp1: tuple of (route, op) to compare
:param routeOp2: tuple of (route, op) to compare
:returns: negative if routeOp1<routeOp2, positive if routeOp1>routeOp2.
"""
# replacing { with ' ' is a simple way to make ASCII sort do what we
# want for routes. We would have to do more work if we allow - in
# routes
return cmp(routeOp1[0].replace('{', ' '), routeOp2[0].replace('{', ' '))
def _compareOperations(self, op1, op2):
"""
Order operations in our preferred method order. methods not in our
list are put afterwards and sorted alphabetically.
:param op1: first operation dictionary to compare.
:param op2: second operation dictionary to compare.
:returns: negative if op1<op2, positive if op1>op2.
"""
methodOrder = ['GET', 'PUT', 'POST', 'PATCH', 'DELETE']
method1 = op1.get('httpMethod', '')
method2 = op2.get('httpMethod', '')
if method1 in methodOrder and method2 in methodOrder:
return cmp(methodOrder.index(method1), methodOrder.index(method2))
if method1 in methodOrder or method2 in methodOrder:
return cmp(method1 not in methodOrder, method2 not in methodOrder)
return cmp(method1, method2)
@access.public
def describeResource(self, resource, params):
if resource not in docs.routes:
raise RestException('Invalid resource: {}'.format(resource))
return {
'apiVersion': API_VERSION,
'swaggerVersion': SWAGGER_VERSION,
'basePath': os.path.dirname(os.path.dirname(cherrypy.url())),
'models': docs.models,
'apis': [{'path': route,
'operations': sorted(op, self._compareOperations)}
for route, op in sorted(docs.routes[resource].iteritems(),
self._compareRoutes)]
}
|
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<[email protected]>
# http://binux.me
# Created on 2014-02-07 17:05:11
import os
import json
import time
import logging
import itertools
from six.moves import queue as Queue
from collections import deque
from six import iteritems, itervalues
from pyspider.libs import counter, utils
from .task_queue import TaskQueue
logger = logging.getLogger('scheduler')
class Scheduler(object):
UPDATE_PROJECT_INTERVAL = 5 * 60
default_schedule = {
'priority': 0,
'retries': 3,
'exetime': 0,
'age': -1,
'itag': None,
}
LOOP_LIMIT = 1000
LOOP_INTERVAL = 0.1
ACTIVE_TASKS = 100
INQUEUE_LIMIT = 0
EXCEPTION_LIMIT = 3
DELETE_TIME = 24 * 60 * 60
def __init__(self, taskdb, projectdb, newtask_queue, status_queue,
out_queue, data_path='./data', resultdb=None):
self.taskdb = taskdb
self.projectdb = projectdb
self.resultdb = resultdb
self.newtask_queue = newtask_queue
self.status_queue = status_queue
self.out_queue = out_queue
self.data_path = data_path
self._send_buffer = deque()
self._quit = False
self._exceptions = 0
self.projects = dict()
self._force_update_project = False
self._last_update_project = 0
self.task_queue = dict()
self._last_tick = int(time.time())
self._cnt = {
"5m_time": counter.CounterManager(
lambda: counter.TimebaseAverageEventCounter(30, 10)),
"5m": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(30, 10)),
"1h": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(60, 60)),
"1d": counter.CounterManager(
lambda: counter.TimebaseAverageWindowCounter(10 * 60, 24 * 6)),
"all": counter.CounterManager(
lambda: counter.TotalCounter()),
}
self._cnt['1h'].load(os.path.join(self.data_path, 'scheduler.1h'))
self._cnt['1d'].load(os.path.join(self.data_path, 'scheduler.1d'))
self._cnt['all'].load(os.path.join(self.data_path, 'scheduler.all'))
self._last_dump_cnt = 0
def _update_projects(self):
'''Check project update'''
now = time.time()
if (
not self._force_update_project
and self._last_update_project + self.UPDATE_PROJECT_INTERVAL > now
):
return
for project in self.projectdb.check_update(self._last_update_project):
self._update_project(project)
logger.debug("project: %s updated.", project['name'])
self._force_update_project = False
self._last_update_project = now
def _update_project(self, project):
'''update one project'''
if project['name'] not in self.projects:
self.projects[project['name']] = {}
self.projects[project['name']].update(project)
self.projects[project['name']]['md5sum'] = utils.md5string(project['script'])
if not self.projects[project['name']].get('active_tasks', None):
self.projects[project['name']]['active_tasks'] = deque(maxlen=self.ACTIVE_TASKS)
# load task queue when project is running and delete task_queue when project is stoped
if project['status'] in ('RUNNING', 'DEBUG'):
if project['name'] not in self.task_queue:
self._load_tasks(project['name'])
self.task_queue[project['name']].rate = project['rate']
self.task_queue[project['name']].burst = project['burst']
# update project runtime info from processor by sending a _on_get_info
# request, result is in status_page.track.save
self.on_select_task({
'taskid': '_on_get_info',
'project': project['name'],
'url': 'data:,_on_get_info',
'status': self.taskdb.SUCCESS,
'fetch': {
'save': ['min_tick', ],
},
'process': {
'callback': '_on_get_info',
},
})
else:
if project['name'] in self.task_queue:
self.task_queue[project['name']].rate = 0
self.task_queue[project['name']].burst = 0
del self.task_queue[project['name']]
scheduler_task_fields = ['taskid', 'project', 'schedule', ]
def _load_tasks(self, project):
'''load tasks from database'''
self.task_queue[project] = TaskQueue(rate=0, burst=0)
for task in self.taskdb.load_tasks(
self.taskdb.ACTIVE, project, self.scheduler_task_fields
):
taskid = task['taskid']
_schedule = task.get('schedule', self.default_schedule)
priority = _schedule.get('priority', self.default_schedule['priority'])
exetime = _schedule.get('exetime', self.default_schedule['exetime'])
self.task_queue[project].put(taskid, priority, exetime)
logger.debug('project: %s loaded %d tasks.', project, len(self.task_queue[project]))
if self.projects[project]['status'] in ('RUNNING', 'DEBUG'):
self.task_queue[project].rate = self.projects[project]['rate']
self.task_queue[project].burst = self.projects[project]['burst']
else:
self.task_queue[project].rate = 0
self.task_queue[project].burst = 0
if project not in self._cnt['all']:
status_count = self.taskdb.status_count(project)
self._cnt['all'].value(
(project, 'success'),
status_count.get(self.taskdb.SUCCESS, 0)
)
self._cnt['all'].value(
(project, 'failed'),
status_count.get(self.taskdb.FAILED, 0) + status_count.get(self.taskdb.BAD, 0)
)
self._cnt['all'].value((project, 'pending'), len(self.task_queue[project]))
def task_verify(self, task):
'''
return False if any of 'taskid', 'project', 'url' is not in task dict
or project in not in task_queue
'''
for each in ('taskid', 'project', 'url', ):
if each not in task or not task[each]:
logger.error('%s not in task: %.200r', each, task)
return False
if task['project'] not in self.task_queue:
logger.error('unknown project: %s', task['project'])
return False
return True
def insert_task(self, task):
'''insert task into database'''
return self.taskdb.insert(task['project'], task['taskid'], task)
def update_task(self, task):
'''update task in database'''
return self.taskdb.update(task['project'], task['taskid'], task)
def put_task(self, task):
'''put task to task queue'''
_schedule = task.get('schedule', self.default_schedule)
self.task_queue[task['project']].put(
task['taskid'],
priority=_schedule.get('priority', self.default_schedule['priority']),
exetime=_schedule.get('exetime', self.default_schedule['exetime'])
)
def send_task(self, task, force=True):
'''
dispatch task to fetcher
out queue may have size limit to prevent block, a send_buffer is used
'''
try:
self.out_queue.put_nowait(task)
except Queue.Full:
if force:
self._send_buffer.appendleft(task)
else:
raise
def _check_task_done(self):
'''Check status queue'''
cnt = 0
try:
while True:
task = self.status_queue.get_nowait()
# check _on_get_info result here
if task.get('taskid') == '_on_get_info' and 'project' in task and 'track' in task:
self.projects[task['project']].update(task['track'].get('save') or {})
logger.info(
'%s on_get_info %r', task['project'], task['track'].get('save', {})
)
continue
elif not self.task_verify(task):
continue
self.on_task_status(task)
cnt += 1
except Queue.Empty:
pass
return cnt
merge_task_fields = ['taskid', 'project', 'url', 'status', 'schedule', 'lastcrawltime']
def _check_request(self):
'''Check new task queue'''
tasks = {}
while len(tasks) < self.LOOP_LIMIT:
try:
task = self.newtask_queue.get_nowait()
except Queue.Empty:
break
if isinstance(task, list):
_tasks = task
else:
_tasks = (task, )
for task in _tasks:
if not self.task_verify(task):
continue
if task['taskid'] in self.task_queue[task['project']]:
if not task.get('schedule', {}).get('force_update', False):
logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task)
continue
if task['taskid'] in tasks:
if not task.get('schedule', {}).get('force_update', False):
continue
tasks[task['taskid']] = task
for task in itervalues(tasks):
if self.INQUEUE_LIMIT and len(self.task_queue[task['project']]) >= self.INQUEUE_LIMIT:
logger.debug('overflow task %(project)s:%(taskid)s %(url)s', task)
continue
oldtask = self.taskdb.get_task(task['project'], task['taskid'],
fields=self.merge_task_fields)
if oldtask:
task = self.on_old_request(task, oldtask)
else:
task = self.on_new_request(task)
return len(tasks)
def _check_cronjob(self):
"""Check projects cronjob tick, return True when a new tick is sended"""
now = time.time()
self._last_tick = int(self._last_tick)
if now - self._last_tick < 1:
return False
self._last_tick += 1
for project in itervalues(self.projects):
if project['status'] not in ('DEBUG', 'RUNNING'):
continue
if project.get('min_tick', 0) == 0:
continue
if self._last_tick % int(project['min_tick']) != 0:
continue
self.on_select_task({
'taskid': '_on_cronjob',
'project': project['name'],
'url': 'data:,_on_cronjob',
'status': self.taskdb.SUCCESS,
'fetch': {
'save': {
'tick': self._last_tick,
},
},
'process': {
'callback': '_on_cronjob',
},
})
return True
request_task_fields = [
'taskid',
'project',
'url',
'status',
'schedule',
'fetch',
'process',
'track',
'lastcrawltime'
]
def _check_select(self):
'''Select task to fetch & process'''
while self._send_buffer:
_task = self._send_buffer.pop()
try:
# use force=False here to prevent automatic send_buffer append and get exception
self.send_task(_task, False)
except Queue.Full:
self._send_buffer.append(_task)
break
if self.out_queue.full():
return {}
taskids = []
cnt = 0
cnt_dict = dict()
limit = self.LOOP_LIMIT
for project, task_queue in iteritems(self.task_queue):
if cnt >= limit:
break
# task queue
self.task_queue[project].check_update()
project_cnt = 0
# check send_buffer here. when not empty, out_queue may blocked. Not sending tasks
while cnt < limit and project_cnt < limit / 10:
taskid = task_queue.get()
if not taskid:
break
taskids.append((project, taskid))
project_cnt += 1
cnt += 1
cnt_dict[project] = project_cnt
for project, taskid in taskids:
task = self.taskdb.get_task(project, taskid, fields=self.request_task_fields)
if not task:
continue
task = self.on_select_task(task)
return cnt_dict
def _print_counter_log(self):
# print top 5 active counters
keywords = ('pending', 'success', 'retry', 'failed')
total_cnt = {}
project_actives = []
project_fails = []
for key in keywords:
total_cnt[key] = 0
for project, subcounter in iteritems(self._cnt['5m']):
actives = 0
for key in keywords:
cnt = subcounter.get(key, None)
if cnt:
cnt = cnt.sum
total_cnt[key] += cnt
actives += cnt
project_actives.append((actives, project))
fails = subcounter.get('failed', None)
if fails:
project_fails.append((fails.sum, project))
top_2_fails = sorted(project_fails, reverse=True)[:2]
top_3_actives = sorted([x for x in project_actives if x[1] not in top_2_fails],
reverse=True)[:5 - len(top_2_fails)]
log_str = ("in 5m: new:%(pending)d,success:%(success)d,"
"retry:%(retry)d,failed:%(failed)d" % total_cnt)
for _, project in itertools.chain(top_3_actives, top_2_fails):
subcounter = self._cnt['5m'][project].to_dict(get_value='sum')
log_str += " %s:%d,%d,%d,%d" % (project,
subcounter.get('pending', 0),
subcounter.get('success', 0),
subcounter.get('retry', 0),
subcounter.get('failed', 0))
logger.info(log_str)
def _dump_cnt(self):
'''Dump counters to file'''
self._cnt['1h'].dump(os.path.join(self.data_path, 'scheduler.1h'))
self._cnt['1d'].dump(os.path.join(self.data_path, 'scheduler.1d'))
self._cnt['all'].dump(os.path.join(self.data_path, 'scheduler.all'))
def _try_dump_cnt(self):
'''Dump counters every 60 seconds'''
now = time.time()
if now - self._last_dump_cnt > 60:
self._last_dump_cnt = now
self._dump_cnt()
self._print_counter_log()
def _check_delete(self):
'''Check project delete'''
now = time.time()
for project in list(itervalues(self.projects)):
if project['status'] != 'STOP':
continue
if now - project['updatetime'] < self.DELETE_TIME:
continue
if 'delete' not in self.projectdb.split_group(project['group']):
continue
logger.warning("deleting project: %s!", project['name'])
if project['name'] in self.task_queue:
self.task_queue[project['name']].rate = 0
self.task_queue[project['name']].burst = 0
del self.task_queue[project['name']]
del self.projects[project['name']]
self.taskdb.drop(project['name'])
self.projectdb.drop(project['name'])
if self.resultdb:
self.resultdb.drop(project['name'])
def __len__(self):
return sum(len(x) for x in itervalues(self.task_queue))
def quit(self):
'''Set quit signal'''
self._quit = True
def run_once(self):
'''comsume queues and feed tasks to fetcher, once'''
self._update_projects()
self._check_task_done()
self._check_request()
while self._check_cronjob():
pass
self._check_select()
self._check_delete()
self._try_dump_cnt()
def run(self):
'''Start scheduler loop'''
logger.info("loading projects")
while not self._quit:
try:
time.sleep(self.LOOP_INTERVAL)
self.run_once()
self._exceptions = 0
except KeyboardInterrupt:
break
except Exception as e:
logger.exception(e)
self._exceptions += 1
if self._exceptions > self.EXCEPTION_LIMIT:
break
continue
logger.info("scheduler exiting...")
self._dump_cnt()
def trigger_on_start(self, project):
'''trigger an on_start callback of project'''
self.newtask_queue.put({
"project": project,
"taskid": "on_start",
"url": "data:,on_start",
"process": {
"callback": "on_start",
},
})
def xmlrpc_run(self, port=23333, bind='127.0.0.1', logRequests=False):
'''Start xmlrpc interface'''
try:
from six.moves.xmlrpc_server import SimpleXMLRPCServer
except ImportError:
from SimpleXMLRPCServer import SimpleXMLRPCServer
server = SimpleXMLRPCServer((bind, port), allow_none=True, logRequests=logRequests)
server.register_introspection_functions()
server.register_multicall_functions()
server.register_function(self.quit, '_quit')
server.register_function(self.__len__, 'size')
def dump_counter(_time, _type):
try:
return self._cnt[_time].to_dict(_type)
except:
logger.exception('')
server.register_function(dump_counter, 'counter')
def new_task(task):
if self.task_verify(task):
self.newtask_queue.put(task)
return True
return False
server.register_function(new_task, 'newtask')
def send_task(task):
'''dispatch task to fetcher'''
self.send_task(task)
return True
server.register_function(send_task, 'send_task')
def update_project():
self._force_update_project = True
server.register_function(update_project, 'update_project')
def get_active_tasks(project=None, limit=100):
allowed_keys = set((
'taskid',
'project',
'status',
'url',
'lastcrawltime',
'updatetime',
'track',
))
track_allowed_keys = set((
'ok',
'time',
'follows',
'status_code',
))
iters = [iter(x['active_tasks']) for k, x in iteritems(self.projects)
if x and (k == project if project else True)]
tasks = [next(x, None) for x in iters]
result = []
while len(result) < limit and tasks and not all(x is None for x in tasks):
updatetime, task = t = max(tasks)
i = tasks.index(t)
tasks[i] = next(iters[i], None)
for key in list(task):
if key == 'track':
for k in list(task[key].get('fetch', [])):
if k not in track_allowed_keys:
del task[key]['fetch'][k]
for k in list(task[key].get('process', [])):
if k not in track_allowed_keys:
del task[key]['process'][k]
if key in allowed_keys:
continue
del task[key]
result.append(t)
# fix for "<type 'exceptions.TypeError'>:dictionary key must be string"
# have no idea why
return json.loads(json.dumps(result))
server.register_function(get_active_tasks, 'get_active_tasks')
server.timeout = 0.5
while not self._quit:
server.handle_request()
server.server_close()
def on_new_request(self, task):
'''Called when a new request is arrived'''
task['status'] = self.taskdb.ACTIVE
self.insert_task(task)
self.put_task(task)
project = task['project']
self._cnt['5m'].event((project, 'pending'), +1)
self._cnt['1h'].event((project, 'pending'), +1)
self._cnt['1d'].event((project, 'pending'), +1)
self._cnt['all'].event((project, 'pending'), +1)
logger.info('new task %(project)s:%(taskid)s %(url)s', task)
return task
def on_old_request(self, task, old_task):
'''Called when a crawled task is arrived'''
now = time.time()
_schedule = task.get('schedule', self.default_schedule)
old_schedule = old_task.get('schedule', {})
restart = False
schedule_age = _schedule.get('age', self.default_schedule['age'])
if _schedule.get('itag') and _schedule['itag'] != old_schedule.get('itag'):
restart = True
elif schedule_age >= 0 and schedule_age + (old_task.get('lastcrawltime', 0) or 0) < now:
restart = True
elif _schedule.get('force_update'):
restart = True
if not restart:
logger.debug('ignore newtask %(project)s:%(taskid)s %(url)s', task)
return
task['status'] = self.taskdb.ACTIVE
self.update_task(task)
self.put_task(task)
project = task['project']
if old_task['status'] != self.taskdb.ACTIVE:
self._cnt['5m'].event((project, 'pending'), +1)
self._cnt['1h'].event((project, 'pending'), +1)
self._cnt['1d'].event((project, 'pending'), +1)
if old_task['status'] == self.taskdb.SUCCESS:
self._cnt['all'].event((project, 'success'), -1).event((project, 'pending'), +1)
elif old_task['status'] == self.taskdb.FAILED:
self._cnt['all'].event((project, 'failed'), -1).event((project, 'pending'), +1)
logger.info('restart task %(project)s:%(taskid)s %(url)s', task)
return task
def on_task_status(self, task):
'''Called when a status pack is arrived'''
try:
procesok = task['track']['process']['ok']
if not self.task_queue[task['project']].done(task['taskid']):
logging.error('not processing pack: %(project)s:%(taskid)s %(url)s', task)
return None
except KeyError as e:
logger.error("Bad status pack: %s", e)
return None
if procesok:
ret = self.on_task_done(task)
else:
ret = self.on_task_failed(task)
if task['track']['fetch'].get('time'):
self._cnt['5m_time'].event((task['project'], 'fetch_time'),
task['track']['fetch']['time'])
if task['track']['process'].get('time'):
self._cnt['5m_time'].event((task['project'], 'process_time'),
task['track']['process'].get('time'))
self.projects[task['project']]['active_tasks'].appendleft((time.time(), task))
return ret
def on_task_done(self, task):
'''Called when a task is done and success, called by `on_task_status`'''
task['status'] = self.taskdb.SUCCESS
task['lastcrawltime'] = time.time()
if 'schedule' in task:
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
task['status'] = self.taskdb.ACTIVE
next_exetime = task['schedule'].get('age')
task['schedule']['exetime'] = time.time() + next_exetime
self.put_task(task)
else:
del task['schedule']
self.update_task(task)
project = task['project']
self._cnt['5m'].event((project, 'success'), +1)
self._cnt['1h'].event((project, 'success'), +1)
self._cnt['1d'].event((project, 'success'), +1)
self._cnt['all'].event((project, 'success'), +1).event((project, 'pending'), -1)
logger.info('task done %(project)s:%(taskid)s %(url)s', task)
return task
def on_task_failed(self, task):
'''Called when a task is failed, called by `on_task_status`'''
if 'schedule' not in task:
old_task = self.taskdb.get_task(task['project'], task['taskid'], fields=['schedule'])
if old_task is None:
logging.error('unknown status pack: %s' % task)
return
task['schedule'] = old_task.get('schedule', {})
retries = task['schedule'].get('retries', self.default_schedule['retries'])
retried = task['schedule'].get('retried', 0)
if retried == 0:
next_exetime = 0
elif retried == 1:
next_exetime = 1 * 60 * 60
else:
next_exetime = 6 * (2 ** retried) * 60 * 60
if task['schedule'].get('auto_recrawl') and 'age' in task['schedule']:
next_exetime = min(next_exetime, task['schedule'].get('age'))
elif retried >= retries:
next_exetime = -1
if next_exetime < 0:
task['status'] = self.taskdb.FAILED
task['lastcrawltime'] = time.time()
self.update_task(task)
project = task['project']
self._cnt['5m'].event((project, 'failed'), +1)
self._cnt['1h'].event((project, 'failed'), +1)
self._cnt['1d'].event((project, 'failed'), +1)
self._cnt['all'].event((project, 'failed'), +1).event((project, 'pending'), -1)
logger.info('task failed %(project)s:%(taskid)s %(url)s' % task)
return task
else:
task['schedule']['retried'] = retried + 1
task['schedule']['exetime'] = time.time() + next_exetime
task['lastcrawltime'] = time.time()
self.update_task(task)
self.put_task(task)
project = task['project']
self._cnt['5m'].event((project, 'retry'), +1)
self._cnt['1h'].event((project, 'retry'), +1)
self._cnt['1d'].event((project, 'retry'), +1)
# self._cnt['all'].event((project, 'retry'), +1)
logger.info('task retry %d/%d %%(project)s:%%(taskid)s %%(url)s' % (
retried, retries), task)
return task
def on_select_task(self, task):
'''Called when a task is selected to fetch & process'''
# inject informations about project
logger.info('select %(project)s:%(taskid)s %(url)s', task)
project_info = self.projects.get(task['project'])
assert project_info, 'no such project'
task['group'] = project_info.get('group')
task['project_md5sum'] = project_info.get('md5sum')
task['project_updatetime'] = project_info.get('updatetime', 0)
project_info['active_tasks'].appendleft((time.time(), task))
self.send_task(task)
return task
from tornado import gen
class OneScheduler(Scheduler):
"""
Scheduler Mixin class for one mode
overwirted send_task method
call processor.on_task(fetcher.fetch(task)) instead of consuming queue
"""
def _check_select(self):
"""
interactive mode of select tasks
"""
if not self.interactive:
return super(OneScheduler, self)._check_select()
# waiting for running tasks
if self.running_task > 0:
return
is_crawled = []
def run(project=None):
return crawl('on_start', project=project)
def crawl(url, project=None, **kwargs):
"""
Crawl given url, same parameters as BaseHandler.crawl
url - url or taskid, parameters will be used if in taskdb
project - can be ignored if only one project exists.
"""
# looking up the project instance
if project is None:
if len(self.projects) == 1:
project = list(self.projects.keys())[0]
else:
raise LookupError('You need specify the project: %r'
% list(self.projects.keys()))
project_data = self.processor.project_manager.get(project)
if not project_data:
raise LookupError('no such project: %s' % project)
# get task package
instance = project_data['instance']
instance._reset()
task = instance.crawl(url, **kwargs)
if isinstance(task, list):
raise Exception('url list is not allowed in interactive mode')
# check task in taskdb
if not kwargs:
dbtask = self.taskdb.get_task(task['project'], task['taskid'],
fields=self.request_task_fields)
if not dbtask:
dbtask = self.taskdb.get_task(task['project'], task['url'],
fields=self.request_task_fields)
if dbtask:
task = dbtask
# select the task
self.on_select_task(task)
is_crawled.append(True)
shell.ask_exit()
def quit_interactive():
'''Quit interactive mode'''
is_crawled.append(True)
self.interactive = False
shell.ask_exit()
def quit_pyspider():
'''Close pyspider'''
is_crawled[:] = []
shell.ask_exit()
shell = utils.get_python_console()
shell.interact(
'pyspider shell - Select task\n'
'crawl(url, project=None, **kwargs) - same parameters as BaseHandler.crawl\n'
'quit_interactive() - Quit interactive mode\n'
'quit_pyspider() - Close pyspider'
)
if not is_crawled:
self.ioloop.stop()
def __getattr__(self, name):
"""patch for crawl(url, callback=self.index_page) API"""
if self.interactive:
return name
raise AttributeError(name)
def on_task_status(self, task):
"""Ignore not processing error in interactive mode"""
if not self.interactive:
super(OneScheduler, self).on_task_status(task)
try:
procesok = task['track']['process']['ok']
except KeyError as e:
logger.error("Bad status pack: %s", e)
return None
if procesok:
ret = self.on_task_done(task)
else:
ret = self.on_task_failed(task)
if task['track']['fetch'].get('time'):
self._cnt['5m_time'].event((task['project'], 'fetch_time'),
task['track']['fetch']['time'])
if task['track']['process'].get('time'):
self._cnt['5m_time'].event((task['project'], 'process_time'),
task['track']['process'].get('time'))
self.projects[task['project']]['active_tasks'].appendleft((time.time(), task))
return ret
def init_one(self, ioloop, fetcher, processor,
result_worker=None, interactive=False):
self.ioloop = ioloop
self.fetcher = fetcher
self.processor = processor
self.result_worker = result_worker
self.interactive = interactive
self.running_task = 0
@gen.coroutine
def do_task(self, task):
self.running_task += 1
result = yield gen.Task(self.fetcher.fetch, task)
type, task, response = result.args
self.processor.on_task(task, response)
# do with message
while not self.processor.inqueue.empty():
_task, _response = self.processor.inqueue.get()
self.processor.on_task(_task, _response)
# do with results
while not self.processor.result_queue.empty():
_task, _result = self.processor.result_queue.get()
if self.result_worker:
self.result_worker.on_result(_task, _result)
self.running_task -= 1
def send_task(self, task, force=True):
if self.fetcher.http_client.free_size() <= 0:
if force:
self._send_buffer.appendleft(task)
else:
raise self.outqueue.Full
self.ioloop.add_future(self.do_task(task), lambda x: x.result())
def run(self):
import tornado.ioloop
tornado.ioloop.PeriodicCallback(self.run_once, 100,
io_loop=self.ioloop).start()
self.ioloop.start()
def quit(self):
self.ioloop.stop()
logger.info("scheduler exiting...")
|
|
# Copyright 2015-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import functools
import json
import os
import shutil
import tempfile
import time # NOQA needed for some recordings
from unittest import TestCase
from botocore.exceptions import ClientError
from c7n.executor import MainThreadExecutor
from c7n.resources import s3
from c7n.mu import LambdaManager
from c7n.ufuncs import s3crypt
from .common import (
BaseTest, ConfigTest, event_data, skip_if_not_validating, functional)
class RestoreCompletionTest(TestCase):
def test_restore_complete(self):
self.assertTrue(
s3.restore_complete(
('ongoing-request="false", '
'expiry-date="Fri, 23 Dec 2012 00:00:00 GMT"')))
self.assertFalse(s3.restore_complete('ongoing-request="true"'))
class BucketScanLogTests(TestCase):
def setUp(self):
self.log_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.log_dir)
self.log = s3.BucketScanLog(self.log_dir, 'test')
def test_scan_log(self):
first_five = list(range(5))
next_five = list(range(5, 10))
with self.log:
self.log.add(first_five)
self.log.add(next_five)
with open(self.log.path) as fh:
data = json.load(fh)
self.assertEqual(
data,
[first_five, next_five, []])
def destroyBucket(client, bucket):
for o in client.list_objects(Bucket=bucket).get('Contents', ()):
client.delete_object(Bucket=bucket, Key=o['Key'])
client.delete_bucket(Bucket=bucket)
def destroyVersionedBucket(client, bucket):
for o in client.list_object_versions(Bucket=bucket).get('Versions'):
client.delete_object(
Bucket=bucket, Key=o['Key'], VersionId=o['VersionId'])
client.delete_bucket(Bucket=bucket)
def generateBucketContents(s3, bucket, contents=None):
default_contents = {
'home.txt': 'hello',
'AWSLogs/2015/10/10': 'out',
'AWSLogs/2015/10/11': 'spot'}
if contents is None:
contents = default_contents
b = s3.Bucket(bucket)
for k, v in contents.items():
key = s3.Object(bucket, k)
key.put(
Body=v,
ContentLength=len(v),
ContentType='text/plain')
class BucketMetrics(BaseTest):
def test_metrics(self):
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
self.patch(s3, 'S3_AUGMENT_TABLE', [])
session_factory = self.replay_flight_data('test_s3_metrics')
p = self.load_policy({
'name': 's3-obj-count',
'resource': 's3',
'filters': [
{'type': 'metrics',
'value': 10000,
'name': 'NumberOfObjects',
'op': 'greater-than'}],
}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['Name'], 'custodian-skunk-trails')
self.assertTrue('c7n.metrics' in resources[0])
self.assertTrue(
'AWS/S3.NumberOfObjects.Average' in resources[0]['c7n.metrics'])
class BucketInventory(BaseTest):
def test_inventory(self):
bname = 'custodian-test-data'
inv_bname = 'custodian-inv'
inv_name = 'something'
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
self.patch(s3, 'S3_AUGMENT_TABLE', [])
session_factory = self.replay_flight_data('test_s3_inventory')
client = session_factory().client('s3')
client.create_bucket(Bucket=bname)
client.create_bucket(Bucket=inv_bname)
self.addCleanup(client.delete_bucket, Bucket=bname)
self.addCleanup(client.delete_bucket, Bucket=inv_bname)
inv = {
'Destination': {
'S3BucketDestination': {
'Bucket': "arn:aws:s3:::%s" % inv_bname,
'Format': 'CSV',
'Prefix': 'abcdef'},
},
'IsEnabled': True,
'Id': inv_name,
'IncludedObjectVersions': 'All',
'OptionalFields': ['LastModifiedDate'],
'Schedule': {
'Frequency': 'Daily'}
}
client.put_bucket_inventory_configuration(
Bucket=bname,
Id=inv_name,
InventoryConfiguration=inv)
p = self.load_policy({
'name': 's3-inv',
'resource': 's3',
'filters': [
{'Name': 'custodian-test-data'}],
'actions': [
{'type': 'set-inventory',
'destination': inv_bname,
'name': inv_name}]
}, session_factory=session_factory)
self.assertEqual(len(p.run()), 1)
invs = client.list_bucket_inventory_configurations(
Bucket=bname).get('InventoryConfigurationList')
self.assertTrue(invs)
self.assertEqual(sorted(invs[0]['OptionalFields']), ['LastModifiedDate', 'Size'])
p = self.load_policy({
'name': 's3-inv',
'resource': 's3',
'filters': [
{'Name': 'custodian-test-data'}],
'actions': [
{'type': 'set-inventory',
'destination': inv_bname,
'state': 'absent',
'name': inv_name}]
}, session_factory=session_factory)
self.assertEqual(len(p.run()), 1)
self.assertFalse(
client.list_bucket_inventory_configurations(
Bucket=bname).get('InventoryConfigurationList'))
class BucketDelete(BaseTest):
def test_delete_replicated_bucket(self):
# the iam setup is a little for replication to duplicate in a test
# preconditions - custodian-replicated and custodian-replicated-west
# buckets setup with replication, we're deleting the custodian-replicated
# bucket (source).
bname = 'custodian-replicated'
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
self.patch(
s3, 'S3_AUGMENT_TABLE',
[('get_bucket_replication', 'Replication', None, None),
('get_bucket_versioning', 'Versioning', None, None)])
session_factory = self.replay_flight_data(
'test_s3_delete_replicated_bucket')
p = self.load_policy({
'name': 's3-delete-bucket',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': [{'type': 'delete', 'remove-contents': True}]
}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
session = session_factory()
client = session.client('s3')
buckets = set([b['Name'] for b in client.list_buckets()['Buckets']])
self.assertFalse(bname in buckets)
def test_delete_versioned_bucket(self):
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
self.patch(s3, 'S3_AUGMENT_TABLE',
[('get_bucket_versioning', 'Versioning', None, None)])
session_factory = self.replay_flight_data(
'test_s3_delete_versioned_bucket')
session = session_factory()
client = session.client('s3')
s3_resource = session.resource('s3')
bname = 'custodian-byebye'
client.create_bucket(Bucket=bname)
client.put_bucket_versioning(
Bucket=bname,
VersioningConfiguration={'Status': 'Enabled'})
generateBucketContents(s3_resource, bname)
# Generate some versions
generateBucketContents(s3_resource, bname)
upload_info = client.create_multipart_upload(
Bucket=bname, Key='abcdef12345')
client.upload_part(
Body='1' * 1024,
Bucket=bname,
Key='abcdef12345',
PartNumber=1,
UploadId=upload_info['UploadId'])
p = self.load_policy({
'name': 's3-delete-bucket',
'resource': 's3',
'filters': [
{'Name': bname}],
'actions': [{'type': 'delete', 'remove-contents': True}]
}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
buckets = set([b['Name'] for b in client.list_buckets()['Buckets']])
self.assertFalse(bname in buckets)
def test_delete_bucket(self):
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
self.patch(
s3.DeleteBucket, 'executor_factory', MainThreadExecutor)
self.patch(s3, 'S3_AUGMENT_TABLE', [])
session_factory = self.replay_flight_data('test_s3_delete_bucket')
session = session_factory()
client = session.client('s3')
bname = 'custodian-byebye'
client.create_bucket(Bucket=bname)
generateBucketContents(session.resource('s3'), bname)
p = self.load_policy({
'name': 's3-delete-bucket',
'resource': 's3',
'filters': [
{'Name': bname}],
'actions': [{'type': 'delete', 'remove-contents': True}]
}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
buckets = set([b['Name'] for b in client.list_buckets()['Buckets']])
self.assertFalse(bname in buckets)
def test_delete_bucket_with_failure(self):
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
self.patch(s3.DeleteBucket, 'executor_factory', MainThreadExecutor)
self.patch(s3, 'S3_AUGMENT_TABLE', [])
session_factory = self.replay_flight_data('test_s3_delete_bucket_with_failure')
session = session_factory()
client = session.client('s3')
bname = 'custodian-perm-denied'
client.create_bucket(Bucket=bname)
generateBucketContents(session.resource('s3'), bname)
# This bucket policy prevents viewing contents
policy = {
"Version": "2012-10-17",
"Id": "Policy1487359365244",
"Statement": [{
"Sid": "Stmt1487359361981",
"Effect": "Deny",
"Principal": "*",
"Action": "s3:DeleteBucket",
"Resource":"arn:aws:s3:::{}".format(bname)
}]
}
client.put_bucket_policy(Bucket=bname, Policy=json.dumps(policy))
p = self.load_policy({
'name': 's3-delete-bucket',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': [{'type': 'delete', 'remove-contents': True}]
}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
buckets = set([b['Name'] for b in client.list_buckets()['Buckets']])
self.assertIn(bname, buckets)
# Make sure file got written
denied_file = os.path.join(p.resource_manager.log_dir, 'denied.json')
self.assertIn(bname, open(denied_file).read())
#
# Now delete it for real
#
client.delete_bucket_policy(Bucket=bname)
resources = p.run()
self.assertEqual(len(resources), 1)
buckets = set([b['Name'] for b in client.list_buckets()['Buckets']])
self.assertFalse(bname in buckets)
class BucketTag(BaseTest):
def test_tag_bucket(self):
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
self.patch(
s3.EncryptExtantKeys, 'executor_factory', MainThreadExecutor)
self.patch(s3, 'S3_AUGMENT_TABLE', [
('get_bucket_tagging', 'Tags', [], 'TagSet')])
session_factory = self.replay_flight_data('test_s3_tag')
session = session_factory()
client = session.client('s3')
bname = 'custodian-tagger'
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
client.put_bucket_tagging(
Bucket=bname,
Tagging={'TagSet': [
{'Key': 'rudolph', 'Value': 'reindeer'},
{'Key': 'platform', 'Value': 'lxwee'}]})
p = self.load_policy({
'name': 's3-tagger',
'resource': 's3',
'filters': [
{'Name': bname}],
'actions': [
{'type': 'tag', 'tags': {
'borrowed': 'new', 'platform': 'serverless'}}]
}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
tags = {t['Key']: t['Value'] for t in client.get_bucket_tagging(
Bucket=bname)['TagSet']}
self.assertEqual(
{'rudolph': 'reindeer',
'platform': 'serverless',
'borrowed': 'new'},
tags)
class S3ConfigSource(ConfigTest):
maxDiff = None
@functional
def test_normalize(self):
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
augments = list(s3.S3_AUGMENT_TABLE)
augments.remove(('get_bucket_location', 'Location', None, None))
self.patch(s3, 'S3_AUGMENT_TABLE', augments)
bname = 'custodian-test-data-23'
session_factory = self.replay_flight_data('test_s3_normalize')
session = session_factory()
queue_url = self.initialize_config_subscriber(session)
client = session.client('s3')
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
sns = session.client('sns')
notify_topic = sns.create_topic(Name=bname).get('TopicArn')
sns.set_topic_attributes(
TopicArn=notify_topic,
AttributeName='Policy',
AttributeValue=json.dumps({
'Statement': [{
'Action': 'SNS:Publish',
'Effect': 'Allow',
'Resource': notify_topic,
'Principal': {'Service': 's3.amazonaws.com'}}]}))
self.addCleanup(sns.delete_topic, TopicArn=notify_topic)
public = 'http://acs.amazonaws.com/groups/global/AuthenticatedUsers'
client.put_bucket_acl(
Bucket=bname,
AccessControlPolicy={
"Owner": {
"DisplayName": "mandeep.bal",
"ID": "e7c8bb65a5fc49cf906715eae09de9e4bb7861a96361ba79b833aa45f6833b15",
},
'Grants': [
{'Grantee': {
'Type': 'Group',
'URI': public},
'Permission': 'READ'},
{'Grantee': {
'Type': 'Group',
'URI': 'http://acs.amazonaws.com/groups/s3/LogDelivery'},
'Permission': 'WRITE'},
{'Grantee': {
'Type': 'Group',
'URI': 'http://acs.amazonaws.com/groups/s3/LogDelivery'},
'Permission': 'READ_ACP'},
]})
client.put_bucket_tagging(
Bucket=bname,
Tagging={'TagSet': [
{'Key': 'rudolph', 'Value': 'rabbit'},
{'Key': 'platform', 'Value': 'tyre'}]})
client.put_bucket_logging(
Bucket=bname,
BucketLoggingStatus={
'LoggingEnabled': {
'TargetBucket': bname,
'TargetPrefix': 's3-logs/'}})
client.put_bucket_versioning(
Bucket=bname,
VersioningConfiguration={'Status': 'Enabled'})
client.put_bucket_accelerate_configuration(
Bucket=bname,
AccelerateConfiguration={'Status': 'Enabled'})
client.put_bucket_website(
Bucket=bname,
WebsiteConfiguration={
'IndexDocument': {
'Suffix': 'index.html'}})
client.put_bucket_policy(
Bucket=bname,
Policy=json.dumps({
'Version': '2012-10-17',
'Statement': [{
'Sid': 'Zebra',
'Effect': 'Deny',
'Principal': '*',
'Action': 's3:PutObject',
'Resource': 'arn:aws:s3:::%s/*' % bname,
'Condition': {
'StringNotEquals': {
's3:x-amz-server-side-encryption': [
'AES256', 'aws:kms']}}}]}))
client.put_bucket_notification_configuration(
Bucket=bname,
NotificationConfiguration={
'TopicConfigurations': [{
'Id': bname,
'TopicArn': notify_topic,
'Events': ['s3:ObjectCreated:*'],
'Filter': {
'Key': {
'FilterRules': [
{'Name': 'prefix',
'Value': 's3-logs/'}
]
}
}
}]
})
p = self.load_policy({
'name': 's3-inv',
'resource': 's3',
'filters': [{'Name': bname}]}, session_factory=session_factory)
manager = p.get_resource_manager()
resource_a = manager.get_resources([bname])[0]
results = self.wait_for_config(session, queue_url, bname)
resource_b = s3.ConfigS3(manager).load_resource(results[0])
self.maxDiff = None
for k in ('Logging',
'Policy',
'Versioning',
'Name',
'Website'):
self.assertEqual(resource_a[k], resource_b[k])
self.assertEqual(
{t['Key']: t['Value'] for t in resource_a.get('Tags')},
{t['Key']: t['Value'] for t in resource_b.get('Tags')})
def test_config_normalize_notification(self):
event = event_data('s3-rep-and-notify.json', 'config')
p = self.load_policy({'name': 's3cfg', 'resource': 's3'})
source = p.resource_manager.get_source('config')
resource = source.load_resource(event)
self.assertEqual(
resource['Notification'],
{u'TopicConfigurations': [
{u'Filter': {
u'Key': {
u'FilterRules': [
{u'Name': 'Prefix', u'Value': 'oids/'}]}},
u'Id': 'rabbit',
u'TopicArn': 'arn:aws:sns:us-east-1:644160558196:custodian-test-data-22',
u'Events': ['s3:ReducedRedundancyLostObject',
's3:ObjectCreated:CompleteMultipartUpload']}],
u'LambdaFunctionConfigurations': [
{u'Filter': {
u'Key': {
u'FilterRules': [
{u'Name': 'Prefix', u'Value': 'void/'}]}},
u'LambdaFunctionArn': 'arn:aws:lambda:us-east-1:644160558196:function:lambdaenv',
u'Id': 'ZDAzZDViMTUtNGU3MS00ZWIwLWI0MzgtOTZiMWQ3ZWNkZDY1',
u'Events': ['s3:ObjectRemoved:Delete']}],
u'QueueConfigurations': [
{u'Filter': {
u'Key': {
u'FilterRules': [
{u'Name': 'Prefix', u'Value': 'images/'}]}},
u'Id': 'OGQ5OTAyNjYtYjBmNy00ZTkwLWFiMjUtZjE4ODBmYTgwNTE0',
u'QueueArn': 'arn:aws:sqs:us-east-1:644160558196:test-queue',
u'Events': ['s3:ObjectCreated:*']}]})
def test_config_normalize_lifecycle_and_predicate(self):
event = event_data('s3-lifecycle-and-predicate.json', 'config')
p = self.load_policy({'name': 's3cfg', 'resource': 's3'})
source = p.resource_manager.get_source('config')
resource = source.load_resource(event)
rfilter = resource['Lifecycle']['Rules'][0]['Filter']
self.assertEqual(
rfilter['And']['Prefix'],
'docs/')
self.assertEqual(
rfilter['And']['Tags'],
[{"Value": "Archive", "Key": "Workflow"},
{"Value": "Complete", "Key": "State"}])
def test_config_normalize_lifecycle(self):
event = event_data('s3-lifecycle.json', 'config')
p = self.load_policy({'name': 's3cfg', 'resource': 's3'})
source = p.resource_manager.get_source('config')
resource = source.load_resource(event)
self.assertEqual(
resource['Lifecycle'], {
"Rules": [
{
"Status": "Enabled",
"NoncurrentVersionExpiration": {
"NoncurrentDays": 545
},
"Filter": {
"Prefix": "docs/"
},
"Transitions": [{
"Days": 30,
"StorageClass": "STANDARD_IA"
}],
"Expiration": {
"ExpiredObjectDeleteMarker": True
},
"AbortIncompleteMultipartUpload": {
"DaysAfterInitiation": 7
},
"NoncurrentVersionTransitions": [{
"NoncurrentDays": 180,
"StorageClass": "GLACIER"
}],
"ID": "Docs"
}
]
})
def test_config_normalize_replication(self):
event = event_data('s3-rep-and-notify.json', 'config')
p = self.load_policy({'name': 's3cfg', 'resource': 's3'})
source = p.resource_manager.get_source('config')
resource = source.load_resource(event)
self.assertEqual(
resource['Replication'], {
u'ReplicationConfiguration': {
u'Rules': [{u'Status': 'Enabled',
u'Prefix': '',
u'Destination': {
u'Bucket': 'arn:aws:s3:::testing-west'},
u'ID': 'testing-99'}],
u'Role': (
'arn:aws:iam::644160558196:role'
'/custodian-replicated-custodian-replicated'
'-west-s3-repl-role')}})
def test_config_normalize_website(self):
event = event_data('s3-website.json', 'config')
p = self.load_policy({'name': 's3cfg', 'resource': 's3'})
source = p.resource_manager.get_source('config')
self.maxDiff = None
resource = source.load_resource(event)
self.assertEqual(
resource['Website'],
{u'IndexDocument': {u'Suffix': 'index.html'},
u'RoutingRules': [
{u'Redirect': {u'ReplaceKeyWith': 'error.html'},
u'Condition': {u'HttpErrorCodeReturnedEquals': '404',
u'KeyPrefixEquals': 'docs/'}}]})
def test_load_item_resource(self):
event = event_data('s3.json', 'config')
p = self.load_policy({
'name': 's3cfg',
'resource': 's3'})
source = p.resource_manager.get_source('config')
self.maxDiff = None
resource = source.load_resource(event)
resource.pop('CreationDate')
self.assertEqual(
{'Planet': 'Earth', 'Verbose': 'Game'},
{t['Key']: t['Value'] for t in resource.pop('Tags')}
)
self.assertEqual(
resource,
{'Location': {'LocationConstraint': u'us-east-2'},
'Name': u'config-rule-sanity',
'Lifecycle': None,
'Website': None,
'Policy': None,
'Replication': None,
'Versioning': None,
'Logging': None,
'Notification': None,
"Acl": {
"Owner": {
"ID": u"e7c8bb65a5fc49cf906715eae09de9e4bb7861a96361ba79b833aa45f6833b15"
},
"Grants": [
{
"Grantee": {
"Type": "CanonicalUser",
"ID": u"e7c8bb65a5fc49cf906715eae09de9e4bb7861a96361ba79b833aa45f6833b15"
},
"Permission": "FULL_CONTROL"
}
]}
})
class S3Test(BaseTest):
def test_multipart_large_file(self):
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
self.patch(
s3.EncryptExtantKeys, 'executor_factory', MainThreadExecutor)
self.patch(s3, 'S3_AUGMENT_TABLE', [])
self.patch(s3, 'MAX_COPY_SIZE', (1024 * 1024 * 6.1))
session_factory = self.replay_flight_data('test_s3_multipart_file')
session = session_factory()
client = session.client('s3')
bname = 'custodian-largef-test'
key = 'hello'
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
class wrapper(object):
def __init__(self, d, length):
self.d = d
self.len = length
self.counter = length
def read(self, size):
if self.counter == 0:
return ""
if size > self.counter:
size = self.counter
self.counter = 0
else:
self.counter -= size
return self.d.read(size)
def seek(self, offset, whence=0):
if whence == 2 and offset == 0:
self.counter = 0
elif whence == 0 and offset == 0:
self.counter = self.len
def tell(self):
return self.len - self.counter
size = 1024 * 1024 * 16
client.put_object(
Bucket=bname, Key=key,
Metadata={'planet': 'earth'},
Body=wrapper(open('/dev/zero'), size), ContentLength=size)
info = client.head_object(Bucket=bname, Key=key)
p = self.load_policy({
'name': 'encrypt-obj',
'resource': 's3',
'filters': [{"Name": bname}],
'actions': ['encrypt-keys']}, session_factory=session_factory)
p.run()
post_info = client.head_object(Bucket=bname, Key='hello')
self.assertTrue('ServerSideEncryption' in post_info)
self.assertEqual(post_info['Metadata'], {'planet': 'earth'})
# etags on multipart do not reflect md5 :-(
self.assertTrue(info['ContentLength'], post_info['ContentLength'])
def test_self_log(self):
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
self.patch(s3, 'S3_AUGMENT_TABLE', [
('get_bucket_logging', 'Logging', None, 'LoggingEnabled')])
session_factory = self.replay_flight_data('test_s3_self_log_target')
session = session_factory()
client = session.client('s3')
bname = 'custodian-log-test'
client.create_bucket(Bucket=bname)
self.addCleanup(client.delete_bucket, Bucket=bname)
client.put_bucket_acl(
Bucket=bname,
AccessControlPolicy={
"Owner": {
"DisplayName": "k_vertigo",
"ID": "904fc4c4790937100e9eb293a15e6a0a1f265a064888055b43d030034f8881ee"
},
'Grants': [
{'Grantee': {
'Type': 'Group',
'URI': 'http://acs.amazonaws.com/groups/s3/LogDelivery'},
'Permission': 'WRITE'},
{'Grantee': {
'Type': 'Group',
'URI': 'http://acs.amazonaws.com/groups/s3/LogDelivery'},
'Permission': 'READ_ACP'},
]})
client.put_bucket_logging(
Bucket=bname,
BucketLoggingStatus={
'LoggingEnabled': {
'TargetBucket': bname,
'TargetPrefix': 's3-logs/'}})
p = self.load_policy({
'name': 's3-log-targets',
'resource': 's3',
'filters': [
{'Name': bname},
{'type': 'is-log-target', 'self': True}]},
session_factory=session_factory)
resources = p.run()
names = [b['Name'] for b in resources]
self.assertEqual(names[0], bname)
self.assertEqual(len(names), 1)
def test_log_target(self):
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
self.patch(s3, 'S3_AUGMENT_TABLE', [
('get_bucket_logging', 'Logging', None, 'LoggingEnabled')])
session_factory = self.replay_flight_data('test_s3_log_target')
session = session_factory()
client = session.client('s3')
bname = 'custodian-log-test'
client.create_bucket(Bucket='custodian-log-test')
self.addCleanup(client.delete_bucket, Bucket=bname)
client.put_bucket_acl(
Bucket=bname,
AccessControlPolicy={
"Owner": {
"DisplayName": "k_vertigo",
"ID": "904fc4c4790937100e9eb293a15e6a0a1f265a064888055b43d030034f8881ee"
},
'Grants': [
{'Grantee': {
'Type': 'Group',
'URI': 'http://acs.amazonaws.com/groups/s3/LogDelivery'},
'Permission': 'WRITE'},
{'Grantee': {
'Type': 'Group',
'URI': 'http://acs.amazonaws.com/groups/s3/LogDelivery'},
'Permission': 'READ_ACP'},
]})
client.put_bucket_logging(
Bucket=bname,
BucketLoggingStatus={
'LoggingEnabled': {
'TargetBucket': bname,
'TargetPrefix': 's3-logs/'}})
p = self.load_policy({
'name': 's3-log-targets',
'resource': 's3',
'filters': ['is-log-target']}, session_factory=session_factory)
resources = p.run()
names = [b['Name'] for b in resources]
self.assertTrue(bname in names)
def test_has_statement(self):
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
self.patch(
s3.MissingPolicyStatementFilter, 'executor_factory',
MainThreadExecutor)
self.patch(s3, 'S3_AUGMENT_TABLE', [
('get_bucket_policy', 'Policy', None, 'Policy'),
])
session_factory = self.replay_flight_data('test_s3_has_statement')
bname = "custodian-policy-test"
session = session_factory()
client = session.client('s3')
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
client.put_bucket_policy(
Bucket=bname,
Policy=json.dumps({
'Version': '2012-10-17',
'Statement': [{
'Sid': 'Zebra',
'Effect': 'Deny',
'Principal': '*',
'Action': 's3:PutObject',
'Resource': 'arn:aws:s3:::%s/*' % bname,
'Condition': {
'StringNotEquals': {
's3:x-amz-server-side-encryption': [
'AES256', 'aws:kms']}}}]}))
p = self.load_policy({
'name': 's3-has-policy',
'resource': 's3',
'filters': [
{'Name': bname},
{'type': 'has-statement',
'statement_ids': ['Zebra']}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_no_encryption_statement(self):
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
self.patch(
s3.MissingPolicyStatementFilter, 'executor_factory',
MainThreadExecutor)
self.patch(s3, 'S3_AUGMENT_TABLE', [
('get_bucket_policy', 'Policy', None, 'Policy'),
])
session_factory = self.replay_flight_data('test_s3_no_encryption_statement')
bname = "custodian-encryption-test"
session = session_factory()
client = session.client('s3')
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
client.put_bucket_policy(
Bucket=bname,
Policy=json.dumps({
'Version': '2017-3-28',
'Statement': [{
'Sid': 'RequiredEncryptedObject',
'Effect': 'Allow',
'Principal': '*',
'Action': 's3:PutObject',
'Resource': 'arn:aws:s3:::%s/*' % bname,
'Condition': {
'StringNotEquals': {
's3:x-amz-server-side-encryption': [
'AES256', 'aws:kms']}}}]}))
p = self.load_policy({
'name': 's3-no-encryption-policy',
'resource': 's3',
'filters': [
{'Name': bname},
{'type': 'no-encryption-statement'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_missing_policy_statement(self):
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
self.patch(
s3.MissingPolicyStatementFilter, 'executor_factory',
MainThreadExecutor)
self.patch(s3, 'S3_AUGMENT_TABLE', [
('get_bucket_policy', 'Policy', None, 'Policy'),
])
session_factory = self.replay_flight_data('test_s3_missing_policy')
bname = "custodian-encrypt-test"
session = session_factory()
client = session.client('s3')
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
client.put_bucket_policy(
Bucket=bname,
Policy=json.dumps({
'Version': '2012-10-17',
'Statement': [{
'Sid': 'Zebra',
'Effect': 'Deny',
'Principal': '*',
'Action': 's3:PutObject',
'Resource': 'arn:aws:s3:::%s/*' % bname,
'Condition': {
'StringNotEquals': {
's3:x-amz-server-side-encryption': [
'AES256', 'aws:kms']}}}]}))
p = self.load_policy({
'name': 'encrypt-keys',
'resource': 's3',
'filters': [
{'Name': bname},
{'type': 'missing-policy-statement',
'statement_ids': ['RequireEncryptedPutObject']}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_enable_versioning(self):
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
self.patch(s3, 'S3_AUGMENT_TABLE', [
('get_bucket_versioning', 'Versioning', None, None)])
session_factory = self.replay_flight_data('test_s3_enable_versioning')
bname = 'superduper-and-magic'
session = session_factory()
client = session.client('s3')
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy({
'name': 's3-version',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': ['toggle-versioning']
}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['Name'], bname)
# eventual consistency fun for recording
#time.sleep(10)
versioning = client.get_bucket_versioning(Bucket=bname)['Status']
self.assertEqual('Enabled', versioning)
# running against a bucket with versioning already on
# is idempotent
resources = p.run()
self.assertEqual(len(resources), 1)
p = self.load_policy({
'name': 's3-version',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': [
{'type': 'toggle-versioning', 'enabled': False}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
# eventual consistency fun for recording
#time.sleep(10)
versioning = client.get_bucket_versioning(Bucket=bname)['Status']
self.assertEqual('Suspended', versioning)
def test_enable_logging(self):
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
self.patch(s3, 'S3_AUGMENT_TABLE', [
('get_bucket_logging', 'Logging', None, None)])
session_factory = self.replay_flight_data('test_s3_enable_logging')
bname = 'superduper-and-magic'
session = session_factory()
client = session.client('s3')
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy({
'name': 's3-version',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': [
{'type': 'toggle-logging',
'target_bucket': bname}]
}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]['Name'], bname)
# eventual consistency fun for recording
#time.sleep(10)
logging = client.get_bucket_logging(Bucket=bname)['Status']
self.assertEqual('Enabled', logging)
# running against a bucket with logging already on
# is idempotent
resources = p.run()
self.assertEqual(len(resources), 1)
p = self.load_policy({
'name': 's3-version',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': [
{'type': 'toggle-logging', 'enabled': False}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
# eventual consistency fun for recording
#time.sleep(10)
logging = client.get_bucket_logging(Bucket=bname)['Status']
self.assertEqual('Disabled', logging)
def test_encrypt_policy(self):
self.patch(s3, 'S3_AUGMENT_TABLE', [
('get_bucket_policy', 'Policy', None, 'Policy'),
])
session_factory = self.replay_flight_data('test_s3_encrypt_policy')
bname = "custodian-encrypt-test"
session = session_factory()
client = session.client('s3')
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy({
'name': 'encrypt-keys',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': ['encryption-policy']}, session_factory=session_factory)
resources = p.run()
try:
resource = session.resource('s3')
key = resource.Object(bname, 'home.txt')
key.put(Body='hello', ContentLength=5, ContentType='text/plain')
except ClientError as e:
self.assertEqual(e.response['Error']['Code'], 'AccessDenied')
else:
self.fail("Encryption required policy")
def test_remove_policy_none_extant(self):
self.patch(s3, 'S3_AUGMENT_TABLE', [
('get_bucket_policy', 'Policy', None, 'Policy'),
])
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data(
'test_s3_remove_empty_policy')
bname = "custodian-policy-test"
session = session_factory()
client = session.client('s3')
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy({
'name': 'remove-policy',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': [
{'type': 'remove-statements', 'statement_ids': [
'Zebra', 'Moon']}],
}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertRaises(ClientError, client.get_bucket_policy, Bucket=bname)
def test_remove_policy(self):
self.patch(s3, 'S3_AUGMENT_TABLE', [
('get_bucket_policy', 'Policy', None, 'Policy'),
])
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
self.patch(
s3.RemovePolicyStatement, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_s3_remove_policy')
bname = "custodian-policy-test"
session = session_factory()
client = session.client('s3')
client.create_bucket(Bucket=bname)
client.put_bucket_policy(
Bucket=bname,
Policy=json.dumps({
'Version': '2012-10-17',
'Statement': [{
'Sid': 'Zebra',
'Effect': 'Deny',
'Principal': '*',
'Action': 's3:PutObject',
'Resource': 'arn:aws:s3:::%s/*' % bname,
'Condition': {
'StringNotEquals': {
's3:x-amz-server-side-encryption': [
'AES256', 'aws:kms']}}}]}))
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy({
'name': 'remove-policy',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': [
{'type': 'remove-statements', 'statement_ids': [
'Zebra', 'Moon']}],
}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertRaises(ClientError, client.get_bucket_policy, Bucket=bname)
def test_remove_policy_matched(self):
self.patch(s3, 'S3_AUGMENT_TABLE', [
('get_bucket_policy', 'Policy', None, 'Policy'),
])
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
self.patch(
s3.RemovePolicyStatement, 'executor_factory', MainThreadExecutor)
self.patch(MainThreadExecutor, 'async', False)
bname = "custodian-policy-test"
statement = {
'Sid': 'Zebra',
'Effect': 'Deny',
'Principal': '*',
'Action': 's3:PutObject',
'Resource': 'arn:aws:s3:::%s/*' % bname,
'Condition': {
'StringNotEquals': {
's3:x-amz-server-side-encryption': [
'AES256', 'aws:kms']}}}
process_buckets = s3.RemovePolicyStatement.process
def enrich(self, buckets):
buckets[0]['CrossAccountViolations'] = [statement]
process_buckets(self, buckets)
self.patch(s3.RemovePolicyStatement, 'process', enrich)
session_factory = self.replay_flight_data('test_s3_remove_policy')
session = session_factory()
client = session.client('s3')
client.create_bucket(Bucket=bname)
client.put_bucket_policy(
Bucket=bname,
Policy=json.dumps({
'Version': '2012-10-17', 'Statement': [statement]}))
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy({
'name': 'remove-policy',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': [
{'type': 'remove-statements', 'statement_ids': 'matched'}],
}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertRaises(ClientError, client.get_bucket_policy, Bucket=bname)
def test_attach_encrypt_requires_role(self):
self.assertRaises(
ValueError, self.load_policy,
{'name': 'attach-encrypt',
'resource': 's3',
'actions': [{'type': 'attach-encrypt'}]})
@skip_if_not_validating
def test_attach_encrypt_accepts_topic(self):
p = self.load_policy(
{'name': 'attach-encrypt',
'resource': 's3',
'actions': [{
'type': 'attach-encrypt', 'role': '-', 'topic': 'default'}]})
self.assertEqual(p.data['actions'][0]['topic'], 'default')
def test_create_bucket_event(self):
self.patch(s3, 'S3_AUGMENT_TABLE', [
('get_bucket_policy', 'Policy', None, 'Policy'),
])
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_s3_create')
bname = 'custodian-create-bucket-v4'
session = session_factory()
client = session.client('s3')
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy({
'name': 'bucket-create-v2',
'resource': 's3',
'mode': {
'type': 'cloudtrail',
'role': 'arn:aws:iam::619193117841:role/CustodianDemoRole',
'events': ['CreateBucket'],
},
'actions': [
'encryption-policy']}, session_factory=session_factory)
p.push(event_data('event-cloud-trail-create-bucket.json'), None)
try:
result = client.get_bucket_policy(Bucket=bname)
except:
self.fail("Could not get bucket policy")
self.assertTrue('Policy' in result)
policy = json.loads(result['Policy'])
self.assertEqual(
policy,
{u'Statement': [
{u'Action': u's3:PutObject',
u'Condition': {
u'StringNotEquals': {
u's3:x-amz-server-side-encryption': [
u'AES256',
u'aws:kms']}},
u'Effect': u'Deny',
u'Principal': u'*',
u'Resource': u'arn:aws:s3:::custodian-create-bucket-v4/*',
u'Sid': u'RequireEncryptedPutObject'}],
u'Version': u'2012-10-17'})
def test_attach_encrypt_via_bucket_notification(self):
self.patch(s3, 'S3_AUGMENT_TABLE',
[('get_bucket_location', 'Location', None, None)])
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data(
'test_s3_attach_encrypt_via_bucket_notification')
bname = "custodian-attach-encrypt-test"
role = "arn:aws:iam::644160558196:role/custodian-mu"
self.maxDiff = None
session = session_factory(region='us-west-2')
client = session.client('s3')
client.create_bucket(
Bucket=bname,
CreateBucketConfiguration={
'LocationConstraint': 'us-west-2'})
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy({
'name': 'attach-encrypt',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': [{
'type': 'attach-encrypt',
'role': role}]
}, session_factory=session_factory)
self.addCleanup(
LambdaManager(functools.partial(session_factory, region='us-west-2')).remove,
s3crypt.get_function(None, role))
resources = p.run()
self.assertEqual(len(resources), 1)
#time.sleep(10)
notifications = client.get_bucket_notification_configuration(
Bucket=bname)
notifications.pop('ResponseMetadata')
self.assertEqual(
notifications,
{'LambdaFunctionConfigurations': [{
'Events': ['s3:ObjectCreated:*'],
'Id': 'c7n-s3-encrypt',
'LambdaFunctionArn':'arn:aws:lambda:us-west-2:644160558196:function:c7n-s3-encrypt'}]})
client.put_object(
Bucket=bname, Key='hello-world.txt',
Body='hello world', ContentType='text/plain')
#time.sleep(30)
info = client.head_object(Bucket=bname, Key='hello-world.txt')
self.assertTrue('ServerSideEncryption' in info)
def test_attach_encrypt_via_new_topic(self):
self.patch(s3, 'S3_AUGMENT_TABLE', [(
'get_bucket_notification_configuration', 'Notification', None,
None)])
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data(
'test_s3_attach_encrypt_via_new_topic')
bname = "custodian-attach-encrypt-test"
role = "arn:aws:iam::644160558196:role/custodian-mu"
self.maxDiff = None
session = session_factory(region='us-east-1')
client = session.client('s3')
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy({
'name': 'attach-encrypt',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': [{
'type': 'attach-encrypt',
'role': role,
'topic': 'default'}]
}, session_factory=session_factory)
self.addCleanup(
LambdaManager(
functools.partial(session_factory, region='us-east-1')).remove,
s3crypt.get_function(None, role))
arn = 'arn:aws:sns:us-east-1:644160558196:custodian-attach-encrypt-test'
self.addCleanup(session.client('sns').delete_topic, TopicArn=arn)
self.addCleanup(session.client('logs').delete_log_group,
logGroupName='/aws/lambda/c7n-s3-encrypt')
# Check that the policy sets stuff up properly.
resources = p.run()
self.assertEqual(len(resources), 1)
#time.sleep(10)
topic_notifications = client.get_bucket_notification_configuration(
Bucket=bname).get('TopicConfigurations', [])
us = [t for t in topic_notifications if t.get('TopicArn') == arn]
self.assertEqual(len(us), 1)
# Check that the stuff behaves properly.
client.put_object(
Bucket=bname, Key='hello-world.txt',
Body='hello world', ContentType='text/plain')
#time.sleep(30)
info = client.head_object(Bucket=bname, Key='hello-world.txt')
self.assertTrue('ServerSideEncryption' in info)
def test_attach_encrypt_via_implicit_existing_topic(self):
self.patch(s3, 'S3_AUGMENT_TABLE', [(
'get_bucket_notification_configuration', 'Notification', None,
None)])
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data(
'test_s3_attach_encrypt_via_implicit_existing_topic')
bname = "custodian-attach-encrypt-test"
role = "arn:aws:iam::644160558196:role/custodian-mu"
self.maxDiff = None
session = session_factory(region='us-east-1')
client = session.client('s3')
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
# Create two sns topics
topic_configs = []
for suffix in ('.jpg', '.txt'):
sns = session.client('sns')
existing_topic_arn = sns.create_topic(
Name='existing-{}-{}'.format(bname, suffix[1:]))['TopicArn']
policy = {
'Statement': [{
'Action': 'SNS:Publish',
'Effect': 'Allow',
'Resource': existing_topic_arn,
'Principal': {'Service': 's3.amazonaws.com'}}]}
sns.set_topic_attributes(
TopicArn=existing_topic_arn,
AttributeName='Policy',
AttributeValue=json.dumps(policy))
self.addCleanup(session.client('sns').delete_topic,
TopicArn=existing_topic_arn)
topic_configs.append({
'TopicArn': existing_topic_arn,
'Events': ['s3:ObjectCreated:*'],
'Filter': {'Key': {'FilterRules': [{
'Name': 'suffix',
'Value': suffix}]}}})
session.resource('s3').BucketNotification(bname).put(
NotificationConfiguration={'TopicConfigurations': topic_configs})
# Now define the policy.
p = self.load_policy({
'name': 'attach-encrypt',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': [{
'type': 'attach-encrypt',
'role': role,
'topic': 'default'}]
}, session_factory=session_factory)
self.addCleanup(
LambdaManager(
functools.partial(session_factory, region='us-east-1')).remove,
s3crypt.get_function(None, role))
self.addCleanup(session.client('logs').delete_log_group,
logGroupName='/aws/lambda/c7n-s3-encrypt')
# Check that the policy sets stuff up properly.
resources = p.run()
self.assertEqual(len(resources), 1)
#time.sleep(10)
notifies = client.get_bucket_notification_configuration(
Bucket=bname).get('TopicConfigurations', [])
existing = [t for t in notifies if 'existing' in t['TopicArn']]
self.assertEqual(len(existing), 2)
# Check that the stuff behaves properly.
client.put_object(
Bucket=bname, Key='hello-world.txt',
Body='hello world', ContentType='text/plain')
#time.sleep(30)
info = client.head_object(Bucket=bname, Key='hello-world.txt')
self.assertTrue('ServerSideEncryption' in info)
def test_attach_encrypt_via_explicit_existing_topic(self):
self.patch(s3, 'S3_AUGMENT_TABLE', [(
'get_bucket_notification_configuration', 'Notification', None,
None)])
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data(
'test_s3_attach_encrypt_via_explicit_existing_topic')
bname = "custodian-attach-encrypt-test"
role = "arn:aws:iam::644160558196:role/custodian-mu"
self.maxDiff = None
session = session_factory(region='us-east-1')
client = session.client('s3')
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
# Create an sns topic
topic_configs = []
sns = session.client('sns')
existing_topic_arn = sns.create_topic(
Name='preexisting-{}'.format(bname))['TopicArn']
policy = {
'Statement': [{
'Action': 'SNS:Publish',
'Effect': 'Allow',
'Resource': existing_topic_arn,
'Principal': {'Service': 's3.amazonaws.com'}}]}
sns.set_topic_attributes(
TopicArn=existing_topic_arn,
AttributeName='Policy',
AttributeValue=json.dumps(policy))
self.addCleanup(session.client('sns').delete_topic,
TopicArn=existing_topic_arn)
topic_configs.append({
'TopicArn': existing_topic_arn,
'Events': ['s3:ObjectCreated:*']})
session.resource('s3').BucketNotification(bname).put(
NotificationConfiguration={'TopicConfigurations': topic_configs})
# Now define the policy.
p = self.load_policy({
'name': 'attach-encrypt',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': [{
'type': 'attach-encrypt',
'role': role,
'topic': existing_topic_arn}]
}, session_factory=session_factory)
self.addCleanup(
LambdaManager(
functools.partial(session_factory, region='us-east-1')).remove,
s3crypt.get_function(None, role))
self.addCleanup(session.client('logs').delete_log_group,
logGroupName='/aws/lambda/c7n-s3-encrypt')
# Check that the policy sets stuff up properly.
resources = p.run()
self.assertEqual(len(resources), 1)
#time.sleep(10)
notifies = client.get_bucket_notification_configuration(
Bucket=bname).get('TopicConfigurations', [])
existing = [t for t in notifies if 'existing' in t['TopicArn']]
self.assertEqual(len(existing), 1)
# Check that the stuff behaves properly.
client.put_object(
Bucket=bname, Key='hello-world.txt',
Body='hello world', ContentType='text/plain')
#time.sleep(30)
info = client.head_object(Bucket=bname, Key='hello-world.txt')
self.assertTrue('ServerSideEncryption' in info)
def test_encrypt_versioned_bucket(self):
self.patch(s3, 'S3_AUGMENT_TABLE', [
('get_bucket_versioning', 'Versioning', None, None)])
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
self.patch(
s3.EncryptExtantKeys, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_s3_encrypt_versioned')
bname = "custodian-encrypt-test"
session = session_factory()
client = session.client('s3')
client.create_bucket(Bucket=bname)
client.put_bucket_versioning(
Bucket=bname,
VersioningConfiguration={'Status': 'Enabled'})
self.addCleanup(destroyVersionedBucket, client, bname)
generateBucketContents(session.resource('s3'), bname)
p = self.load_policy({
'name': 'encrypt-keys',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': ['encrypt-keys']}, session_factory=session_factory)
resources = p.run()
self.assertTrue(
len(client.list_object_versions(Bucket=bname)['Versions']) == 3)
self.assertTrue(
'ServerSideEncryption' in client.head_object(
Bucket=bname, Key='home.txt'))
def test_encrypt_key_empty_bucket(self):
self.patch(s3, 'S3_AUGMENT_TABLE', [])
self.patch(
s3.EncryptExtantKeys, 'executor_factory', MainThreadExecutor)
session_factory = self.replay_flight_data('test_s3_encrypt_empty')
bname = "custodian-encrypt-test"
session = session_factory()
client = session.client('s3')
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
p = self.load_policy({
'name': 'encrypt-keys',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': ['encrypt-keys']}, session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_encrypt_keys(self):
self.patch(s3, 'S3_AUGMENT_TABLE', [])
session_factory = self.replay_flight_data('test_s3_encrypt')
bname = "custodian-encrypt-test"
session = session_factory()
client = session.client('s3')
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
generateBucketContents(session.resource('s3'), bname)
# start with a report-only option since it doesn't modify the bucket
report_policy = self.load_policy({
'name': 'encrypt-keys',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': [{'type': 'encrypt-keys',
'report-only': True}]},
session_factory=session_factory)
report_resources = report_policy.run()
self.assertEqual(report_resources[0]['KeyRemediated'], 3)
p = self.load_policy({
'name': 'encrypt-keys',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': ['encrypt-keys']}, session_factory=session_factory)
p.run()
self.assertTrue(
'ServerSideEncryption' in client.head_object(
Bucket=bname, Key='home.txt'))
# re-run the report policy after to ensure we have no items
# needing remediation
report_resources = report_policy.run()
self.assertEqual(report_resources[0]['KeyRemediated'], 0)
def test_encrypt_keys_aes256_sufficient(self):
self.patch(s3, 'S3_AUGMENT_TABLE', [])
session_factory = self.replay_flight_data(
'test_s3_encrypt_aes256_sufficient')
bname = "custodian-encrypt-sufficient-test"
session = session_factory()
client = session.client('s3')
kms = session.client('kms')
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
key_id = [
k for k in kms.list_aliases().get('Aliases', ())
if k['AliasName'] == 'alias/aws/s3'][0]['AliasArn']
client.put_object(
Bucket=bname, Key='testing-abc', ServerSideEncryption='aws:kms',
SSEKMSKeyId=key_id)
client.put_object(
Bucket=bname, Key='testing-123', ServerSideEncryption='AES256')
p = self.load_policy({
'name': 'encrypt-keys',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': [{'type': 'encrypt-keys'}]},
session_factory=session_factory)
p.run()
result = client.head_object(Bucket=bname, Key='testing-123')
self.assertTrue(result['ServerSideEncryption'] == 'AES256')
result = client.head_object(Bucket=bname, Key='testing-abc')
self.assertTrue(result['ServerSideEncryption'] == 'aws:kms')
data = json.load(open(
os.path.join(p.ctx.output_path, 'action-encryptextantkeys')))
self.assertEqual(
[{'Count': 2, 'Remediated': 0, 'Bucket': bname}], data)
def test_encrypt_keys_key_id_option(self):
self.patch(s3, 'S3_AUGMENT_TABLE', [])
session_factory = self.replay_flight_data(
'test_s3_encrypt_key_id_option')
bname = "custodian-encrypt-test"
session = session_factory()
client = session.client('s3')
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
generateBucketContents(session.resource('s3'), bname)
key_one = '845ab6f1-744c-4edc-b702-efae6836818a'
p = self.load_policy({
'name': 'encrypt-keys',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': [{'type': 'encrypt-keys',
'crypto': 'aws:kms',
'key-id': key_one}]},
session_factory=session_factory)
p.run()
result = client.head_object(Bucket=bname, Key='home.txt')
self.assertTrue('SSEKMSKeyId' in result)
self.assertTrue(key_one in result['SSEKMSKeyId'])
# Now test that we can re-key it to something else
key_two = '5fd9f6d6-4294-4926-8719-1e85695e2ad6'
p = self.load_policy({
'name': 'encrypt-keys',
'resource': 's3',
'filters': [{'Name': bname}],
'actions': [{'type': 'encrypt-keys',
'crypto': 'aws:kms',
'key-id': key_two}]},
session_factory=session_factory)
p.run()
result = client.head_object(Bucket=bname, Key='home.txt')
self.assertTrue('SSEKMSKeyId' in result)
self.assertTrue(key_two in result['SSEKMSKeyId'])
def test_global_grants_filter_option(self):
self.patch(s3.S3, 'executor_factory', MainThreadExecutor)
self.patch(s3, 'S3_AUGMENT_TABLE', [
('get_bucket_acl', 'Acl', None, None)
])
session_factory = self.replay_flight_data(
'test_s3_global_grants_filter')
bname = 'custodian-testing-grants'
session = session_factory()
client = session.client('s3')
client.create_bucket(Bucket=bname)
self.addCleanup(destroyBucket, client, bname)
public = 'http://acs.amazonaws.com/groups/global/AllUsers'
client.put_bucket_acl(
Bucket=bname,
AccessControlPolicy={
"Owner": {
"DisplayName": "k_vertigo",
"ID": "904fc4c4790937100e9eb293a15e6a0a1f265a064888055b43d030034f8881ee"
},
'Grants': [
{'Grantee': {
'Type': 'Group',
'URI': public},
'Permission': 'WRITE'}
]})
p = self.load_policy(
{'name': 's3-global-check',
'resource': 's3',
'filters': [
{'Name': 'custodian-testing-grants'},
{'type': 'global-grants',
'permissions': ['READ_ACP']}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 0)
p = self.load_policy(
{'name': 's3-global-check',
'resource': 's3',
'filters': [
{'Name': 'custodian-testing-grants'},
{'type': 'global-grants',
'permissions': ['WRITE']}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
def test_global_grants_filter_and_remove(self):
self.patch(s3, 'S3_AUGMENT_TABLE', [
('get_bucket_acl', 'Acl', None, None)
])
session_factory = self.replay_flight_data('test_s3_grants')
bname = 'custodian-testing-grants'
session = session_factory()
client = session.client('s3')
client.create_bucket(Bucket=bname)
public = 'http://acs.amazonaws.com/groups/global/AllUsers'
client.put_bucket_acl(
Bucket=bname,
AccessControlPolicy={
"Owner": {
"DisplayName": "k_vertigo",
"ID": "904fc4c4790937100e9eb293a15e6a0a1f265a064888055b43d030034f8881ee"
},
'Grants': [
{'Grantee': {
'Type': 'Group',
'URI': public},
'Permission': 'WRITE'}
]})
p = self.load_policy(
{'name': 's3-remove-global',
'resource': 's3',
'filters': [
{'Name': 'custodian-testing-grants'},
{'type': 'global-grants'}],
'actions': [
{'type': 'delete-global-grants',
'grantees': [public]}]
}, session_factory=session_factory)
resources = p.run()
grants = client.get_bucket_acl(Bucket=bname)
client.delete_bucket(Bucket=bname)
self.assertEqual(grants['Grants'], [])
self.assertEqual(resources[0]['Name'], bname)
def test_s3_mark_for_op(self):
self.patch(s3, 'S3_AUGMENT_TABLE', [
('get_bucket_tagging', 'Tags', [], 'TagSet')])
session_factory = self.replay_flight_data('test_s3_mark_for_op')
session = session_factory()
client = session.client('s3')
bname = 'custodian-mark-test'
p = self.load_policy({
'name': 's3-mark',
'resource': 's3',
'filters': [
{'Name': bname}],
'actions': [
{'type': 'mark-for-op', 'days': 3,
'op': 'delete'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
tags = client.get_bucket_tagging(Bucket=bname)
tag_map = {t['Key']: t['Value'] for t in tags.get('TagSet', {})}
self.assertTrue('maid_status' in tag_map)
self.assertTrue('delete' in tag_map.get('maid_status'))
def test_s3_remove_tag(self):
self.patch(s3, 'S3_AUGMENT_TABLE', [
('get_bucket_tagging', 'Tags', [], 'TagSet')])
session_factory = self.replay_flight_data('test_s3_remove_tag')
session = session_factory()
client = session.client('s3')
bname = 'custodian-mark-test'
p = self.load_policy({
'name': 's3-unmark',
'resource': 's3',
'filters': [{"Name": bname}],
'actions': ['unmark']},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
tags = client.get_bucket_tagging(Bucket=bname)
tag_map = {t['Key']: t['Value'] for t in tags.get('TagSet', {})}
self.assertTrue('maid_status' not in tag_map)
def test_hosts_website(self):
self.patch(s3, 'S3_AUGMENT_TABLE', [
('get_bucket_website', 'Website', None, None)])
session_factory = self.replay_flight_data('test_s3_hosts_website')
session = session_factory()
client = session.client('s3')
bname = 'custodian-static-website-test'
client.create_bucket(Bucket=bname)
client.put_bucket_website(
Bucket=bname,
WebsiteConfiguration={
'ErrorDocument': {
'Key': 'error.html'
},
'IndexDocument': {
'Suffix': 'index.html'
}
})
self.addCleanup(client.delete_bucket, Bucket=bname)
p = self.load_policy({
'name': 's3-website-hosting',
'resource': 's3',
'filters': [{'Website': 'not-null'}]},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 1)
names = [b['Name'] for b in resources]
self.assertTrue(bname in names)
p = self.load_policy({
'name': 's3-website-hosting',
'resource': 's3',
'filters': [{'Website': 'not-null'}],
'actions': ['remove-website-hosting']},
session_factory=session_factory)
resources = p.run()
self.assertEqual(len(resources), 0)
|
|
import datetime
import api.fake
from flask import Blueprint, request, jsonify, render_template
from fuzzywuzzy import process
from flask_weasyprint import HTML, render_pdf
from flask.ext.login import current_user
from api.core import db
from api.models.users import User
from api.models.finance import ReceiptItem, Receipt, Transaction,\
BudgetCategory, Discount
from api.utils import delete_item, get_item, get_items,\
get_part_by_id, engineering_notation
from api.decorators import requires_login, requires_roles, requires_keys,\
requires_debug
blueprint = Blueprint('receipts', __name__, url_prefix='/receipts')
FUZZ_THRESHOLD = 65
@blueprint.route('/', methods=['GET'])
@requires_login
@requires_roles('treasurer')
def get_all():
return jsonify(get_items('receipts', Receipt, request))
@blueprint.route('/<int:id>/', methods=['DELETE'])
@requires_login
@requires_roles('treasurer')
def delete(id):
return jsonify(delete_item(id, Receipt))
@blueprint.route('/<int:id>/', methods=['GET'])
@requires_login
@requires_roles('parts_seller')
def get(id):
return jsonify(get_item(id, 'receipt', Receipt))
@blueprint.route('/search/<string:q>/', methods=['GET'])
@requires_login
@requires_roles('parts_seller')
def search(q):
errors = []
receipts = process.extract(q, [r.purchaser for r in Receipt.query.all()])
receipts = [Receipt.query.filter_by(purchaser=pair[0]).first().serialize
for pair in receipts
if pair[1] > FUZZ_THRESHOLD]
return jsonify(success=not errors, errors=errors, receipts=receipts)
@blueprint.route('/finalize/', methods=['POST'])
@requires_login
@requires_roles('parts_seller')
@requires_keys('items')
def finalize():
json = request.get_json(force=True)
errors = []
if not len(json['items']):
errors.append('Cannot create empty receipt')
receipt = {
'items': [i for i in json['items'] if i['category'] != 'DISCOUNT'],
'total_price': 0
}
for item in receipt['items']:
receipt['total_price'] += item['quantity'] * item['price']
for item in [i for i in json['items'] if i['category'] == 'DISCOUNT']:
discount = Discount.query.get(item['id'] - Discount.START_ID)
if discount.amount:
item['price'] = -1 * discount.amount * 100
else:
item['price'] = -1 * (discount.percent / 100) * \
receipt['total_price']
receipt['items'].append(item)
# Recalculate the receipt's total price taking into account all discounts
receipt['total_price'] = 0
for item in receipt['items']:
receipt['total_price'] += item['quantity'] * item['price']
return jsonify(success=not errors, errors=errors, receipt=receipt)
# make a new receipt, will also make a new transaction
# allowed to all officers
@blueprint.route('/', methods=['POST'])
@requires_login
@requires_roles('parts_seller')
@requires_keys('total_price', 'items', 'purchaser', 'comment')
def new_receipt():
json = request.get_json(force=True)
receipt_id = None
errors = []
if not len(json['items']) and not json['comment']:
errors.append('Receipt must contain at least 1 item')
if int(json['total_price']) == 0:
errors.append('total_price must be a non-zero amount in cents')
for item in json['items']:
if not isinstance(item, dict) or 'quantity' not in item.keys():
errors.append('Bad item object structure')
else:
if not item['category'] == 'DISCOUNT' and \
not item['category'] == 'MANUAL' and \
not get_part_by_id(item['id']):
errors.append('IEEE part id ' + str(item['id']) +
' not found in database')
if int(item['quantity']) == 0:
errors.append('Quantity cannot be 0.')
# this is where we actually construct the receipt and transaction
if not errors:
budget_category = BudgetCategory.query\
.filter_by(name='Part Sales').first()
transaction = Transaction(
user_id=current_user.id,
budget_category=budget_category,
amount=float(json['total_price']) / 100,
date=datetime.datetime.now()
)
db.session.add(transaction)
db.session.commit()
receipt = Receipt(
seller_id=current_user.id,
transaction_id=transaction.id,
purchaser=json['purchaser'],
comment=json['comment']
)
db.session.add(receipt)
db.session.commit()
receipt_id = receipt.id
for item in [i for i in json['items'] if i['category'] != 'DISCOUNT'
and i['category'] != 'MANUAL']:
receipt_item = ReceiptItem(
receipt_id=receipt.id,
ieee_part_no=item['id'],
quantity=item['quantity']
)
part = get_part_by_id(item['id'])
if 'stock' in part.__dict__.keys() and part.stock > 0:
part.stock -= item['quantity']
db.session.add(receipt_item)
for item in [i for i in json['items'] if i['category'] == 'DISCOUNT']:
discount = Discount.query.get(item['id'] - Discount.START_ID)
receipt_item = ReceiptItem(
receipt_id=receipt.id,
ieee_part_no=item['id'],
discount_id=discount.id,
discount_amount=float(item['price']) / 100
)
db.session.add(receipt_item)
manual_string = ''
for item in [i for i in json['items'] if i['category'] == 'MANUAL']:
manual_string += 'This receipt includes an item of price $' + \
str(float(item['price']) / 100) + ' not in the database. '
receipt.comment = manual_string + receipt.comment
db.session.commit()
return jsonify(success=not errors, errors=errors, receipt_id=receipt_id)
@blueprint.route('/fake/', methods=['POST'])
@requires_debug
@requires_keys('count')
def fake():
json = request.get_json(force=True)
user = User.query.get(json['user_id'])\
if 'user_id' in json.keys() else None
receipts = [api.fake.receipt(user) for _ in xrange(json['count'])]
return jsonify(success=True, errors=[], ids=[i.id for i in receipts])
@blueprint.route('/pdf/<int:id>.pdf/', methods=['GET'])
@requires_login
@requires_roles('officer', 'parts_seller')
def get_receipt_pdf(id):
receipt = Receipt.query.get_or_404(id)
return render_pdf(HTML(string=render_template(
'receipt.html',
receipt=receipt,
items=receipt.serialize['items'],
engineering_notation=engineering_notation
)))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# copyright 2014 Hamilton Kibbe <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Gerber (RS-274X) Statements
===========================
**Gerber RS-274X file statement classes**
"""
from .utils import (parse_gerber_value, write_gerber_value, decimal_string,
inch, metric)
from .am_statements import *
from .am_read import read_macro
from .am_eval import eval_macro
class Statement(object):
""" Gerber statement Base class
The statement class provides a type attribute.
Parameters
----------
type : string
String identifying the statement type.
Attributes
----------
type : string
String identifying the statement type.
"""
def __init__(self, stype, units='inch'):
self.type = stype
self.units = units
def __str__(self):
s = "<{0} ".format(self.__class__.__name__)
for key, value in self.__dict__.items():
s += "{0}={1} ".format(key, value)
s = s.rstrip() + ">"
return s
def to_inch(self):
self.units = 'inch'
def to_metric(self):
self.units = 'metric'
def offset(self, x_offset=0, y_offset=0):
pass
def __eq__(self, other):
return self.__dict__ == other.__dict__
class ParamStmt(Statement):
""" Gerber parameter statement Base class
The parameter statement class provides a parameter type attribute.
Parameters
----------
param : string
two-character code identifying the parameter statement type.
Attributes
----------
param : string
Parameter type code
"""
def __init__(self, param):
Statement.__init__(self, "PARAM")
self.param = param
class FSParamStmt(ParamStmt):
""" FS - Gerber Format Specification Statement
"""
@classmethod
def from_dict(cls, stmt_dict):
"""
"""
param = stmt_dict.get('param')
if stmt_dict.get('zero') == 'L':
zeros = 'leading'
elif stmt_dict.get('zero') == 'T':
zeros = 'trailing'
else:
zeros = 'none'
notation = 'absolute' if stmt_dict.get('notation') == 'A' else 'incremental'
fmt = tuple(map(int, stmt_dict.get('x')))
return cls(param, zeros, notation, fmt)
def __init__(self, param, zero_suppression='leading',
notation='absolute', format=(2, 4)):
""" Initialize FSParamStmt class
.. note::
The FS command specifies the format of the coordinate data. It
must only be used once at the beginning of a file. It must be
specified before the first use of coordinate data.
Parameters
----------
param : string
Parameter.
zero_suppression : string
Zero-suppression mode. May be either 'leading', 'trailing' or 'none' (all zeros are present)
notation : string
Notation mode. May be either 'absolute' or 'incremental'
format : tuple (int, int)
Gerber precision format expressed as a tuple containing:
(number of integer-part digits, number of decimal-part digits)
Returns
-------
ParamStmt : FSParamStmt
Initialized FSParamStmt class.
"""
ParamStmt.__init__(self, param)
self.zero_suppression = zero_suppression
self.notation = notation
self.format = format
def to_gerber(self, settings=None):
if settings:
zero_suppression = 'L' if settings.zero_suppression == 'leading' else 'T'
notation = 'A' if settings.notation == 'absolute' else 'I'
fmt = ''.join(map(str, settings.format))
else:
zero_suppression = 'L' if self.zero_suppression == 'leading' else 'T'
notation = 'A' if self.notation == 'absolute' else 'I'
fmt = ''.join(map(str, self.format))
return '%FS{0}{1}X{2}Y{3}*%'.format(zero_suppression, notation, fmt, fmt)
def __str__(self):
return ('<Format Spec: %d:%d %s zero suppression %s notation>' %
(self.format[0], self.format[1], self.zero_suppression, self.notation))
class MOParamStmt(ParamStmt):
""" MO - Gerber Mode (measurement units) Statement.
"""
@classmethod
def from_dict(cls, stmt_dict):
param = stmt_dict.get('param')
if stmt_dict.get('mo') is None:
mo = None
elif stmt_dict.get('mo').lower() not in ('in', 'mm'):
raise ValueError('Mode may be mm or in')
elif stmt_dict.get('mo').lower() == 'in':
mo = 'inch'
else:
mo = 'metric'
return cls(param, mo)
def __init__(self, param, mo):
""" Initialize MOParamStmt class
Parameters
----------
param : string
Parameter.
mo : string
Measurement units. May be either 'inch' or 'metric'
Returns
-------
ParamStmt : MOParamStmt
Initialized MOParamStmt class.
"""
ParamStmt.__init__(self, param)
self.mode = mo
def to_gerber(self, settings=None):
mode = 'MM' if self.mode == 'metric' else 'IN'
return '%MO{0}*%'.format(mode)
def to_inch(self):
self.mode = 'inch'
def to_metric(self):
self.mode = 'metric'
def __str__(self):
mode_str = 'millimeters' if self.mode == 'metric' else 'inches'
return ('<Mode: %s>' % mode_str)
class LPParamStmt(ParamStmt):
""" LP - Gerber Level Polarity statement
"""
@classmethod
def from_dict(cls, stmt_dict):
param = stmt_dict['param']
lp = 'clear' if stmt_dict.get('lp') == 'C' else 'dark'
return cls(param, lp)
def __init__(self, param, lp):
""" Initialize LPParamStmt class
Parameters
----------
param : string
Parameter
lp : string
Level polarity. May be either 'clear' or 'dark'
Returns
-------
ParamStmt : LPParamStmt
Initialized LPParamStmt class.
"""
ParamStmt.__init__(self, param)
self.lp = lp
def to_gerber(self, settings=None):
lp = 'C' if self.lp == 'clear' else 'D'
return '%LP{0}*%'.format(lp)
def __str__(self):
return '<Level Polarity: %s>' % self.lp
class ADParamStmt(ParamStmt):
""" AD - Gerber Aperture Definition Statement
"""
@classmethod
def from_dict(cls, stmt_dict):
param = stmt_dict.get('param')
d = int(stmt_dict.get('d'))
shape = stmt_dict.get('shape')
modifiers = stmt_dict.get('modifiers')
return cls(param, d, shape, modifiers)
def __init__(self, param, d, shape, modifiers):
""" Initialize ADParamStmt class
Parameters
----------
param : string
Parameter code
d : int
Aperture D-code
shape : string
aperture name
modifiers : list of lists of floats
Shape modifiers
Returns
-------
ParamStmt : ADParamStmt
Initialized ADParamStmt class.
"""
ParamStmt.__init__(self, param)
self.d = d
self.shape = shape
if modifiers:
self.modifiers = [tuple([float(x) for x in m.split("X") if len(x)]) for m in modifiers.split(",") if len(m)]
else:
self.modifiers = [tuple()]
def to_inch(self):
if self.units == 'metric':
self.units = 'inch'
self.modifiers = [tuple([inch(x) for x in modifier]) for modifier in self.modifiers]
def to_metric(self):
if self.units == 'inch':
self.units = 'metric'
self.modifiers = [tuple([metric(x) for x in modifier]) for modifier in self.modifiers]
def to_gerber(self, settings=None):
if any(self.modifiers):
return '%ADD{0}{1},{2}*%'.format(self.d, self.shape, ','.join(['X'.join(["%.4g" % x for x in modifier]) for modifier in self.modifiers]))
else:
return '%ADD{0}{1}*%'.format(self.d, self.shape)
def __str__(self):
if self.shape == 'C':
shape = 'circle'
elif self.shape == 'R':
shape = 'rectangle'
elif self.shape == 'O':
shape = 'obround'
else:
shape = self.shape
return '<Aperture Definition: %d: %s>' % (self.d, shape)
class AMParamStmt(ParamStmt):
""" AM - Aperture Macro Statement
"""
@classmethod
def from_dict(cls, stmt_dict):
return cls(**stmt_dict)
def __init__(self, param, name, macro):
""" Initialize AMParamStmt class
Parameters
----------
param : string
Parameter code
name : string
Aperture macro name
macro : string
Aperture macro string
Returns
-------
ParamStmt : AMParamStmt
Initialized AMParamStmt class.
"""
ParamStmt.__init__(self, param)
self.name = name
self.macro = macro
self.instructions = self.read(macro)
self.primitives = []
def read(self, macro):
return read_macro(macro)
def build(self, modifiers=[[]]):
self.primitives = []
for primitive in eval_macro(self.instructions, modifiers[0]):
if primitive[0] == '0':
self.primitives.append(AMCommentPrimitive.from_gerber(primitive))
elif primitive[0] == '1':
self.primitives.append(AMCirclePrimitive.from_gerber(primitive))
elif primitive[0:2] in ('2,', '20'):
self.primitives.append(AMVectorLinePrimitive.from_gerber(primitive))
elif primitive[0:2] == '21':
self.primitives.append(AMCenterLinePrimitive.from_gerber(primitive))
elif primitive[0:2] == '22':
self.primitives.append(AMLowerLeftLinePrimitive.from_gerber(primitive))
elif primitive[0] == '4':
self.primitives.append(AMOutlinePrimitive.from_gerber(primitive))
elif primitive[0] == '5':
self.primitives.append(AMPolygonPrimitive.from_gerber(primitive))
elif primitive[0] =='6':
self.primitives.append(AMMoirePrimitive.from_gerber(primitive))
elif primitive[0] == '7':
self.primitives.append(AMThermalPrimitive.from_gerber(primitive))
else:
self.primitives.append(AMUnsupportPrimitive.from_gerber(primitive))
def to_inch(self):
if self.units == 'metric':
self.units = 'inch'
for primitive in self.primitives:
primitive.to_inch()
def to_metric(self):
if self.units == 'inch':
self.units = 'metric'
for primitive in self.primitives:
primitive.to_metric()
def to_gerber(self, settings=None):
return '%AM{0}*{1}*%'.format(self.name, self.macro)
def __str__(self):
return '<Aperture Macro %s: %s>' % (self.name, self.macro)
class ASParamStmt(ParamStmt):
""" AS - Axis Select. (Deprecated)
"""
@classmethod
def from_dict(cls, stmt_dict):
param = stmt_dict.get('param')
mode = stmt_dict.get('mode')
return cls(param, mode)
def __init__(self, param, mode):
""" Initialize ASParamStmt class
Parameters
----------
param : string
Parameter string.
mode : string
Axis select. May be either 'AXBY' or 'AYBX'
Returns
-------
ParamStmt : ASParamStmt
Initialized ASParamStmt class.
"""
ParamStmt.__init__(self, param)
self.mode = mode
def to_gerber(self, settings=None):
return '%AS{0}*%'.format(self.mode)
def __str__(self):
return ('<Axis Select: %s>' % self.mode)
class INParamStmt(ParamStmt):
""" IN - Image Name Statement (Deprecated)
"""
@classmethod
def from_dict(cls, stmt_dict):
return cls(**stmt_dict)
def __init__(self, param, name):
""" Initialize INParamStmt class
Parameters
----------
param : string
Parameter code
name : string
Image name
Returns
-------
ParamStmt : INParamStmt
Initialized INParamStmt class.
"""
ParamStmt.__init__(self, param)
self.name = name
def to_gerber(self, settings=None):
return '%IN{0}*%'.format(self.name)
def __str__(self):
return '<Image Name: %s>' % self.name
class IPParamStmt(ParamStmt):
""" IP - Gerber Image Polarity Statement. (Deprecated)
"""
@classmethod
def from_dict(cls, stmt_dict):
param = stmt_dict.get('param')
ip = 'positive' if stmt_dict.get('ip') == 'POS' else 'negative'
return cls(param, ip)
def __init__(self, param, ip):
""" Initialize IPParamStmt class
Parameters
----------
param : string
Parameter string.
ip : string
Image polarity. May be either'positive' or 'negative'
Returns
-------
ParamStmt : IPParamStmt
Initialized IPParamStmt class.
"""
ParamStmt.__init__(self, param)
self.ip = ip
def to_gerber(self, settings=None):
ip = 'POS' if self.ip == 'positive' else 'NEG'
return '%IP{0}*%'.format(ip)
def __str__(self):
return ('<Image Polarity: %s>' % self.ip)
class IRParamStmt(ParamStmt):
""" IR - Image Rotation Param (Deprecated)
"""
@classmethod
def from_dict(cls, stmt_dict):
angle = int(stmt_dict['angle'])
return cls(stmt_dict['param'], angle)
def __init__(self, param, angle):
""" Initialize IRParamStmt class
Parameters
----------
param : string
Parameter code
angle : int
Image angle
Returns
-------
ParamStmt : IRParamStmt
Initialized IRParamStmt class.
"""
ParamStmt.__init__(self, param)
self.angle = angle
def to_gerber(self, settings=None):
return '%IR{0}*%'.format(self.angle)
def __str__(self):
return '<Image Angle: %s>' % self.angle
class MIParamStmt(ParamStmt):
""" MI - Image Mirror Param (Deprecated)
"""
@classmethod
def from_dict(cls, stmt_dict):
param = stmt_dict.get('param')
a = int(stmt_dict.get('a', 0))
b = int(stmt_dict.get('b', 0))
return cls(param, a, b)
def __init__(self, param, a, b):
""" Initialize MIParamStmt class
Parameters
----------
param : string
Parameter code
a : int
Mirror for A output devices axis (0=disabled, 1=mirrored)
b : int
Mirror for B output devices axis (0=disabled, 1=mirrored)
Returns
-------
ParamStmt : MIParamStmt
Initialized MIParamStmt class.
"""
ParamStmt.__init__(self, param)
self.a = a
self.b = b
def to_gerber(self, settings=None):
ret = "%MI"
if self.a is not None:
ret += "A{0}".format(self.a)
if self.b is not None:
ret += "B{0}".format(self.b)
ret += "*%"
return ret
def __str__(self):
return '<Image Mirror: A=%d B=%d>' % (self.a, self.b)
class OFParamStmt(ParamStmt):
""" OF - Gerber Offset statement (Deprecated)
"""
@classmethod
def from_dict(cls, stmt_dict):
param = stmt_dict.get('param')
a = float(stmt_dict.get('a', 0))
b = float(stmt_dict.get('b', 0))
return cls(param, a, b)
def __init__(self, param, a, b):
""" Initialize OFParamStmt class
Parameters
----------
param : string
Parameter
a : float
Offset along the output device A axis
b : float
Offset along the output device B axis
Returns
-------
ParamStmt : OFParamStmt
Initialized OFParamStmt class.
"""
ParamStmt.__init__(self, param)
self.a = a
self.b = b
def to_gerber(self, settings=None):
ret = '%OF'
if self.a is not None:
ret += 'A' + decimal_string(self.a, precision=5)
if self.b is not None:
ret += 'B' + decimal_string(self.b, precision=5)
return ret + '*%'
def to_inch(self):
if self.units == 'metric':
self.units = 'inch'
if self.a is not None:
self.a = inch(self.a)
if self.b is not None:
self.b = inch(self.b)
def to_metric(self):
if self.units == 'inch':
self.units = 'metric'
if self.a is not None:
self.a = metric(self.a)
if self.b is not None:
self.b = metric(self.b)
def offset(self, x_offset=0, y_offset=0):
if self.a is not None:
self.a += x_offset
if self.b is not None:
self.b += y_offset
def __str__(self):
offset_str = ''
if self.a is not None:
offset_str += ('X: %f ' % self.a)
if self.b is not None:
offset_str += ('Y: %f ' % self.b)
return ('<Offset: %s>' % offset_str)
class SFParamStmt(ParamStmt):
""" SF - Scale Factor Param (Deprecated)
"""
@classmethod
def from_dict(cls, stmt_dict):
param = stmt_dict.get('param')
a = float(stmt_dict.get('a', 1))
b = float(stmt_dict.get('b', 1))
return cls(param, a, b)
def __init__(self, param, a, b):
""" Initialize OFParamStmt class
Parameters
----------
param : string
Parameter
a : float
Scale factor for the output device A axis
b : float
Scale factor for the output device B axis
Returns
-------
ParamStmt : SFParamStmt
Initialized SFParamStmt class.
"""
ParamStmt.__init__(self, param)
self.a = a
self.b = b
def to_gerber(self, settings=None):
ret = '%SF'
if self.a is not None:
ret += 'A' + decimal_string(self.a, precision=5)
if self.b is not None:
ret += 'B' + decimal_string(self.b, precision=5)
return ret + '*%'
def to_inch(self):
if self.units == 'metric':
self.units = 'inch'
if self.a is not None:
self.a = inch(self.a)
if self.b is not None:
self.b = inch(self.b)
def to_metric(self):
if self.units == 'inch':
self.units = 'metric'
if self.a is not None:
self.a = metric(self.a)
if self.b is not None:
self.b = metric(self.b)
def offset(self, x_offset=0, y_offset=0):
if self.a is not None:
self.a += x_offset
if self.b is not None:
self.b += y_offset
def __str__(self):
scale_factor = ''
if self.a is not None:
scale_factor += ('X: %g ' % self.a)
if self.b is not None:
scale_factor += ('Y: %g' % self.b)
return ('<Scale Factor: %s>' % scale_factor)
class LNParamStmt(ParamStmt):
""" LN - Level Name Statement (Deprecated)
"""
@classmethod
def from_dict(cls, stmt_dict):
return cls(**stmt_dict)
def __init__(self, param, name):
""" Initialize LNParamStmt class
Parameters
----------
param : string
Parameter code
name : string
Level name
Returns
-------
ParamStmt : LNParamStmt
Initialized LNParamStmt class.
"""
ParamStmt.__init__(self, param)
self.name = name
def to_gerber(self, settings=None):
return '%LN{0}*%'.format(self.name)
def __str__(self):
return '<Level Name: %s>' % self.name
class DeprecatedStmt(Statement):
""" Unimportant deprecated statement, will be parsed but not emitted.
"""
@classmethod
def from_gerber(cls, line):
return cls(line)
def __init__(self, line):
""" Initialize DeprecatedStmt class
Parameters
----------
line : string
Deprecated statement text
Returns
-------
DeprecatedStmt
Initialized DeprecatedStmt class.
"""
Statement.__init__(self, "DEPRECATED")
self.line = line
def to_gerber(self, settings=None):
return self.line
def __str__(self):
return '<Deprecated Statement: \'%s\'>' % self.line
class CoordStmt(Statement):
""" Coordinate Data Block
"""
@classmethod
def from_dict(cls, stmt_dict, settings):
function = stmt_dict['function']
x = stmt_dict.get('x')
y = stmt_dict.get('y')
i = stmt_dict.get('i')
j = stmt_dict.get('j')
op = stmt_dict.get('op')
if x is not None:
x = parse_gerber_value(stmt_dict.get('x'), settings.format, settings.zero_suppression)
if y is not None:
y = parse_gerber_value(stmt_dict.get('y'), settings.format, settings.zero_suppression)
if i is not None:
i = parse_gerber_value(stmt_dict.get('i'), settings.format, settings.zero_suppression)
if j is not None:
j = parse_gerber_value(stmt_dict.get('j'), settings.format, settings.zero_suppression)
return cls(function, x, y, i, j, op, settings)
def __init__(self, function, x, y, i, j, op, settings):
""" Initialize CoordStmt class
Parameters
----------
function : string
function
x : float
X coordinate
y : float
Y coordinate
i : float
Coordinate offset in the X direction
j : float
Coordinate offset in the Y direction
op : string
Operation code
settings : dict {'zero_suppression', 'format'}
Gerber file coordinate format
Returns
-------
Statement : CoordStmt
Initialized CoordStmt class.
"""
Statement.__init__(self, "COORD")
self.function = function
self.x = x
self.y = y
self.i = i
self.j = j
self.op = op
def to_gerber(self, settings=None):
ret = ''
if self.function:
ret += self.function
if self.x is not None:
ret += 'X{0}'.format(write_gerber_value(self.x, settings.format, settings.zero_suppression))
if self.y is not None:
ret += 'Y{0}'.format(write_gerber_value(self.y, settings.format, settings.zero_suppression))
if self.i is not None:
ret += 'I{0}'.format(write_gerber_value(self.i, settings.format, settings.zero_suppression))
if self.j is not None:
ret += 'J{0}'.format(write_gerber_value(self.j, settings.format, settings.zero_suppression))
if self.op:
ret += self.op
return ret + '*'
def to_inch(self):
if self.units == 'metric':
self.units = 'inch'
if self.x is not None:
self.x = inch(self.x)
if self.y is not None:
self.y = inch(self.y)
if self.i is not None:
self.i = inch(self.i)
if self.j is not None:
self.j = inch(self.j)
if self.function == "G71":
self.function = "G70"
def to_metric(self):
if self.units == 'inch':
self.units = 'metric'
if self.x is not None:
self.x = metric(self.x)
if self.y is not None:
self.y = metric(self.y)
if self.i is not None:
self.i = metric(self.i)
if self.j is not None:
self.j = metric(self.j)
if self.function == "G70":
self.function = "G71"
def offset(self, x_offset=0, y_offset=0):
if self.x is not None:
self.x += x_offset
if self.y is not None:
self.y += y_offset
if self.i is not None:
self.i += x_offset
if self.j is not None:
self.j += y_offset
def __str__(self):
coord_str = ''
if self.function:
coord_str += 'Fn: %s ' % self.function
if self.x is not None:
coord_str += 'X: %g ' % self.x
if self.y is not None:
coord_str += 'Y: %g ' % self.y
if self.i is not None:
coord_str += 'I: %g ' % self.i
if self.j is not None:
coord_str += 'J: %g ' % self.j
if self.op:
if self.op == 'D01':
op = 'Lights On'
elif self.op == 'D02':
op = 'Lights Off'
elif self.op == 'D03':
op = 'Flash'
else:
op = self.op
coord_str += 'Op: %s' % op
return '<Coordinate Statement: %s>' % coord_str
class ApertureStmt(Statement):
""" Aperture Statement
"""
def __init__(self, d, deprecated=None):
Statement.__init__(self, "APERTURE")
self.d = int(d)
self.deprecated = True if deprecated is not None and deprecated is not False else False
def to_gerber(self, settings=None):
if self.deprecated:
return 'G54D{0}*'.format(self.d)
else:
return 'D{0}*'.format(self.d)
def __str__(self):
return '<Aperture: %d>' % self.d
class CommentStmt(Statement):
""" Comment Statment
"""
def __init__(self, comment):
Statement.__init__(self, "COMMENT")
self.comment = comment if comment is not None else ""
def to_gerber(self, settings=None):
return 'G04{0}*'.format(self.comment)
def __str__(self):
return '<Comment: %s>' % self.comment
class EofStmt(Statement):
""" EOF Statement
"""
def __init__(self):
Statement.__init__(self, "EOF")
def to_gerber(self, settings=None):
return 'M02*'
def __str__(self):
return '<EOF Statement>'
class QuadrantModeStmt(Statement):
@classmethod
def from_gerber(cls, line):
if 'G74' not in line and 'G75' not in line:
raise ValueError('%s is not a valid quadrant mode statement'
% line)
return (cls('single-quadrant') if line[:3] == 'G74'
else cls('multi-quadrant'))
def __init__(self, mode):
super(QuadrantModeStmt, self).__init__('QuadrantMode')
mode = mode.lower()
if mode not in ['single-quadrant', 'multi-quadrant']:
raise ValueError('Quadrant mode must be "single-quadrant" \
or "multi-quadrant"')
self.mode = mode
def to_gerber(self, settings=None):
return 'G74*' if self.mode == 'single-quadrant' else 'G75*'
class RegionModeStmt(Statement):
@classmethod
def from_gerber(cls, line):
if 'G36' not in line and 'G37' not in line:
raise ValueError('%s is not a valid region mode statement' % line)
return (cls('on') if line[:3] == 'G36' else cls('off'))
def __init__(self, mode):
super(RegionModeStmt, self).__init__('RegionMode')
mode = mode.lower()
if mode not in ['on', 'off']:
raise ValueError('Valid modes are "on" or "off"')
self.mode = mode
def to_gerber(self, settings=None):
return 'G36*' if self.mode == 'on' else 'G37*'
class UnknownStmt(Statement):
""" Unknown Statement
"""
def __init__(self, line):
Statement.__init__(self, "UNKNOWN")
self.line = line
def to_gerber(self, settings=None):
return self.line
def __str__(self):
return '<Unknown Statement: \'%s\'>' % self.line
|
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import copy
import grp
import inspect
try:
import argparse
except ImportError: # python 2.6
from . import argparse_compat as argparse
import os
import pwd
import sys
import textwrap
import types
from gunicorn import __version__
from gunicorn.errors import ConfigError
from gunicorn import six
from gunicorn import util
KNOWN_SETTINGS = []
PLATFORM = sys.platform
def wrap_method(func):
def _wrapped(instance, *args, **kwargs):
return func(*args, **kwargs)
return _wrapped
def make_settings(ignore=None):
settings = {}
ignore = ignore or ()
for s in KNOWN_SETTINGS:
setting = s()
if setting.name in ignore:
continue
settings[setting.name] = setting.copy()
return settings
class Config(object):
def __init__(self, usage=None, prog=None):
self.settings = make_settings()
self.usage = usage
self.prog = prog or os.path.basename(sys.argv[0])
self.env_orig = os.environ.copy()
def __getattr__(self, name):
if name not in self.settings:
raise AttributeError("No configuration setting for: %s" % name)
return self.settings[name].get()
def __setattr__(self, name, value):
if name != "settings" and name in self.settings:
raise AttributeError("Invalid access!")
super(Config, self).__setattr__(name, value)
def set(self, name, value):
if name not in self.settings:
raise AttributeError("No configuration setting for: %s" % name)
self.settings[name].set(value)
def parser(self):
kwargs = {
"usage": self.usage,
"prog": self.prog
}
parser = argparse.ArgumentParser(**kwargs)
parser.add_argument("-v", "--version",
action="version", default=argparse.SUPPRESS,
version="%(prog)s (version " + __version__ + ")\n",
help="show program's version number and exit")
parser.add_argument("args", nargs="*", help=argparse.SUPPRESS)
keys = list(self.settings)
def sorter(k):
return (self.settings[k].section, self.settings[k].order)
keys = sorted(self.settings, key=self.settings.__getitem__)
for k in keys:
self.settings[k].add_option(parser)
return parser
@property
def worker_class(self):
uri = self.settings['worker_class'].get()
worker_class = util.load_class(uri)
if hasattr(worker_class, "setup"):
worker_class.setup()
return worker_class
@property
def workers(self):
return self.settings['workers'].get()
@property
def address(self):
s = self.settings['bind'].get()
return [util.parse_address(six.bytes_to_str(bind)) for bind in s]
@property
def uid(self):
return self.settings['user'].get()
@property
def gid(self):
return self.settings['group'].get()
@property
def proc_name(self):
pn = self.settings['proc_name'].get()
if pn is not None:
return pn
else:
return self.settings['default_proc_name'].get()
@property
def logger_class(self):
uri = self.settings['logger_class'].get()
logger_class = util.load_class(uri, default="simple",
section="gunicorn.loggers")
if hasattr(logger_class, "install"):
logger_class.install()
return logger_class
@property
def is_ssl(self):
return self.certfile or self.keyfile
@property
def ssl_options(self):
opts = {}
if self.certfile:
opts['certfile'] = self.certfile
if self.keyfile:
opts['keyfile'] = self.keyfile
return opts
@property
def env(self):
raw_env = self.settings['raw_env'].get()
env = {}
if not raw_env:
return env
for e in raw_env:
s = six.bytes_to_str(e)
try:
k, v = s.split('=')
except ValueError:
raise RuntimeError("environement setting %r invalid" % s)
env[k] = v
return env
class SettingMeta(type):
def __new__(cls, name, bases, attrs):
super_new = super(SettingMeta, cls).__new__
parents = [b for b in bases if isinstance(b, SettingMeta)]
if not parents:
return super_new(cls, name, bases, attrs)
attrs["order"] = len(KNOWN_SETTINGS)
attrs["validator"] = wrap_method(attrs["validator"])
new_class = super_new(cls, name, bases, attrs)
new_class.fmt_desc(attrs.get("desc", ""))
KNOWN_SETTINGS.append(new_class)
return new_class
def fmt_desc(cls, desc):
desc = textwrap.dedent(desc).strip()
setattr(cls, "desc", desc)
setattr(cls, "short", desc.splitlines()[0])
class Setting(object):
name = None
value = None
section = None
cli = None
validator = None
type = None
meta = None
action = None
default = None
short = None
desc = None
nargs = None
const = None
def __init__(self):
if self.default is not None:
self.set(self.default)
def add_option(self, parser):
if not self.cli:
return
args = tuple(self.cli)
help_txt = "%s [%s]" % (self.short, self.default)
help_txt = help_txt.replace("%", "%%")
kwargs = {
"dest": self.name,
"action": self.action or "store",
"type": self.type or str,
"default": None,
"help": help_txt
}
if self.meta is not None:
kwargs['metavar'] = self.meta
if kwargs["action"] != "store":
kwargs.pop("type")
if self.nargs is not None:
kwargs["nargs"] = self.nargs
if self.const is not None:
kwargs["const"] = self.const
parser.add_argument(*args, **kwargs)
def copy(self):
return copy.copy(self)
def get(self):
return self.value
def set(self, val):
assert six.callable(self.validator), "Invalid validator: %s" % self.name
self.value = self.validator(val)
def __lt__(self, other):
return (self.section == other.section and
self.order < other.order)
__cmp__ = __lt__
Setting = SettingMeta('Setting', (Setting,), {})
def validate_bool(val):
if isinstance(val, bool):
return val
if not isinstance(val, six.string_types):
raise TypeError("Invalid type for casting: %s" % val)
if val.lower().strip() == "true":
return True
elif val.lower().strip() == "false":
return False
else:
raise ValueError("Invalid boolean: %s" % val)
def validate_dict(val):
if not isinstance(val, dict):
raise TypeError("Value is not a dictionary: %s " % val)
return val
def validate_pos_int(val):
if not isinstance(val, six.integer_types):
val = int(val, 0)
else:
# Booleans are ints!
val = int(val)
if val < 0:
raise ValueError("Value must be positive: %s" % val)
return val
def validate_string(val):
if val is None:
return None
if not isinstance(val, six.string_types):
raise TypeError("Not a string: %s" % val)
return val.strip()
def validate_list_string(val):
if not val:
return []
# legacy syntax
if isinstance(val, six.string_types):
val = [val]
return [validate_string(v) for v in val]
def validate_string_to_list(val):
val = validate_string(val)
if not val:
return []
return [v.strip() for v in val.split(",") if v]
def validate_class(val):
if inspect.isfunction(val) or inspect.ismethod(val):
val = val()
if inspect.isclass(val):
return val
return validate_string(val)
def validate_callable(arity):
def _validate_callable(val):
if isinstance(val, six.string_types):
try:
mod_name, obj_name = val.rsplit(".", 1)
except ValueError:
raise TypeError("Value '%s' is not import string. "
"Format: module[.submodules...].object" % val)
try:
mod = __import__(mod_name, fromlist=[obj_name])
val = getattr(mod, obj_name)
except ImportError as e:
raise TypeError(str(e))
except AttributeError:
raise TypeError("Can not load '%s' from '%s'"
"" % (obj_name, mod_name))
if not six.callable(val):
raise TypeError("Value is not six.callable: %s" % val)
if arity != -1 and arity != len(inspect.getargspec(val)[0]):
raise TypeError("Value must have an arity of: %s" % arity)
return val
return _validate_callable
def validate_user(val):
if val is None:
return os.geteuid()
if isinstance(val, int):
return val
elif val.isdigit():
return int(val)
else:
try:
return pwd.getpwnam(val).pw_uid
except KeyError:
raise ConfigError("No such user: '%s'" % val)
def validate_group(val):
if val is None:
return os.getegid()
if isinstance(val, int):
return val
elif val.isdigit():
return int(val)
else:
try:
return grp.getgrnam(val).gr_gid
except KeyError:
raise ConfigError("No such group: '%s'" % val)
def validate_post_request(val):
val = validate_callable(-1)(val)
largs = len(inspect.getargspec(val)[0])
if largs == 4:
return val
elif largs == 3:
return lambda worker, req, env, _r: val(worker, req, env)
elif largs == 2:
return lambda worker, req, _e, _r: val(worker, req)
else:
raise TypeError("Value must have an arity of: 4")
def validate_chdir(val):
# valid if the value is a string
val = validate_string(val)
# transform relative paths
path = os.path.abspath(os.path.normpath(os.path.join(util.getcwd(), val)))
# test if the path exists
if not os.path.exists(path):
raise ConfigError("can't chdir to %r" % val)
return path
def validate_file(val):
if val is None:
return None
# valid if the value is a string
val = validate_string(val)
# transform relative paths
path = os.path.abspath(os.path.normpath(os.path.join(util.getcwd(), val)))
# test if the path exists
if not os.path.exists(path):
raise ConfigError("%r not found" % val)
return path
def get_default_config_file():
config_path = os.path.join(os.path.abspath(os.getcwd()),
'gunicorn.conf.py')
if os.path.exists(config_path):
return config_path
return None
class ConfigFile(Setting):
name = "config"
section = "Config File"
cli = ["-c", "--config"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
The path to a Gunicorn config file.
Only has an effect when specified on the command line or as part of an
application specific configuration.
"""
class Bind(Setting):
name = "bind"
action = "append"
section = "Server Socket"
cli = ["-b", "--bind"]
meta = "ADDRESS"
validator = validate_list_string
if 'PORT' in os.environ:
default = ['0.0.0.0:{0}'.format(os.environ.get('PORT'))]
else:
default = ['127.0.0.1:8000']
desc = """\
The socket to bind.
A string of the form: 'HOST', 'HOST:PORT', 'unix:PATH'. An IP is a valid
HOST.
Multiple addresses can be bound. ex.::
$ gunicorn -b 127.0.0.1:8000 -b [::1]:8000 test:app
will bind the `test:app` application on localhost both on ipv6
and ipv4 interfaces.
"""
class Backlog(Setting):
name = "backlog"
section = "Server Socket"
cli = ["--backlog"]
meta = "INT"
validator = validate_pos_int
type = int
default = 2048
desc = """\
The maximum number of pending connections.
This refers to the number of clients that can be waiting to be served.
Exceeding this number results in the client getting an error when
attempting to connect. It should only affect servers under significant
load.
Must be a positive integer. Generally set in the 64-2048 range.
"""
class Workers(Setting):
name = "workers"
section = "Worker Processes"
cli = ["-w", "--workers"]
meta = "INT"
validator = validate_pos_int
type = int
default = 1
desc = """\
The number of worker process for handling requests.
A positive integer generally in the 2-4 x $(NUM_CORES) range. You'll
want to vary this a bit to find the best for your particular
application's work load.
"""
class WorkerClass(Setting):
name = "worker_class"
section = "Worker Processes"
cli = ["-k", "--worker-class"]
meta = "STRING"
validator = validate_class
default = "sync"
desc = """\
The type of workers to use.
The default class (sync) should handle most 'normal' types of
workloads. You'll want to read
http://docs.gunicorn.org/en/latest/design.html for information
on when you might want to choose one of the other worker
classes.
A string referring to one of the following bundled classes:
* ``sync``
* ``eventlet`` - Requires eventlet >= 0.9.7
* ``gevent`` - Requires gevent >= 0.12.2 (?)
* ``tornado`` - Requires tornado >= 0.2
Optionally, you can provide your own worker by giving gunicorn a
python path to a subclass of gunicorn.workers.base.Worker. This
alternative syntax will load the gevent class:
``gunicorn.workers.ggevent.GeventWorker``. Alternatively the syntax
can also load the gevent class with ``egg:gunicorn#gevent``
"""
class WorkerConnections(Setting):
name = "worker_connections"
section = "Worker Processes"
cli = ["--worker-connections"]
meta = "INT"
validator = validate_pos_int
type = int
default = 1000
desc = """\
The maximum number of simultaneous clients.
This setting only affects the Eventlet and Gevent worker types.
"""
class MaxRequests(Setting):
name = "max_requests"
section = "Worker Processes"
cli = ["--max-requests"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
The maximum number of requests a worker will process before restarting.
Any value greater than zero will limit the number of requests a work
will process before automatically restarting. This is a simple method
to help limit the damage of memory leaks.
If this is set to zero (the default) then the automatic worker
restarts are disabled.
"""
class Timeout(Setting):
name = "timeout"
section = "Worker Processes"
cli = ["-t", "--timeout"]
meta = "INT"
validator = validate_pos_int
type = int
default = 30
desc = """\
Workers silent for more than this many seconds are killed and restarted.
Generally set to thirty seconds. Only set this noticeably higher if
you're sure of the repercussions for sync workers. For the non sync
workers it just means that the worker process is still communicating and
is not tied to the length of time required to handle a single request.
"""
class GracefulTimeout(Setting):
name = "graceful_timeout"
section = "Worker Processes"
cli = ["--graceful-timeout"]
meta = "INT"
validator = validate_pos_int
type = int
default = 30
desc = """\
Timeout for graceful workers restart.
Generally set to thirty seconds. How max time worker can handle
request after got restart signal. If the time is up worker will
be force killed.
"""
class Keepalive(Setting):
name = "keepalive"
section = "Worker Processes"
cli = ["--keep-alive"]
meta = "INT"
validator = validate_pos_int
type = int
default = 2
desc = """\
The number of seconds to wait for requests on a Keep-Alive connection.
Generally set in the 1-5 seconds range.
"""
class LimitRequestLine(Setting):
name = "limit_request_line"
section = "Security"
cli = ["--limit-request-line"]
meta = "INT"
validator = validate_pos_int
type = int
default = 4094
desc = """\
The maximum size of HTTP request line in bytes.
This parameter is used to limit the allowed size of a client's
HTTP request-line. Since the request-line consists of the HTTP
method, URI, and protocol version, this directive places a
restriction on the length of a request-URI allowed for a request
on the server. A server needs this value to be large enough to
hold any of its resource names, including any information that
might be passed in the query part of a GET request. Value is a number
from 0 (unlimited) to 8190.
This parameter can be used to prevent any DDOS attack.
"""
class LimitRequestFields(Setting):
name = "limit_request_fields"
section = "Security"
cli = ["--limit-request-fields"]
meta = "INT"
validator = validate_pos_int
type = int
default = 100
desc = """\
Limit the number of HTTP headers fields in a request.
This parameter is used to limit the number of headers in a request to
prevent DDOS attack. Used with the `limit_request_field_size` it allows
more safety. By default this value is 100 and can't be larger than
32768.
"""
class LimitRequestFieldSize(Setting):
name = "limit_request_field_size"
section = "Security"
cli = ["--limit-request-field_size"]
meta = "INT"
validator = validate_pos_int
type = int
default = 8190
desc = """\
Limit the allowed size of an HTTP request header field.
Value is a number from 0 (unlimited) to 8190. to set the limit
on the allowed size of an HTTP request header field.
"""
class Debug(Setting):
name = "debug"
section = "Debugging"
cli = ["--debug"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Turn on debugging in the server.
This limits the number of worker processes to 1 and changes some error
handling that's sent to clients.
"""
class Spew(Setting):
name = "spew"
section = "Debugging"
cli = ["--spew"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Install a trace function that spews every line executed by the server.
This is the nuclear option.
"""
class ConfigCheck(Setting):
name = "check_config"
section = "Debugging"
cli = ["--check-config", ]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Check the configuration..
"""
class PreloadApp(Setting):
name = "preload_app"
section = "Server Mechanics"
cli = ["--preload"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Load application code before the worker processes are forked.
By preloading an application you can save some RAM resources as well as
speed up server boot times. Although, if you defer application loading
to each worker process, you can reload your application code easily by
restarting workers.
"""
class Chdir(Setting):
name = "chdir"
section = "Server Mechanics"
cli = ["--chdir"]
validator = validate_chdir
default = util.getcwd()
desc = """\
Chdir to specified directory before apps loading.
"""
class Daemon(Setting):
name = "daemon"
section = "Server Mechanics"
cli = ["-D", "--daemon"]
validator = validate_bool
action = "store_true"
default = False
desc = """\
Daemonize the Gunicorn process.
Detaches the server from the controlling terminal and enters the
background.
"""
class Env(Setting):
name = "raw_env"
action = "append"
section = "Server Mechanic"
cli = ["-e", "--env"]
meta = "ENV"
validator = validate_list_string
default = []
desc = """\
Set environment variable (key=value).
Pass variables to the execution environment. Ex.::
$ gunicorn -b 127.0.0.1:8000 --env FOO=1 test:app
and test for the foo variable environement in your application.
"""
class Pidfile(Setting):
name = "pidfile"
section = "Server Mechanics"
cli = ["-p", "--pid"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
A filename to use for the PID file.
If not set, no PID file will be written.
"""
class User(Setting):
name = "user"
section = "Server Mechanics"
cli = ["-u", "--user"]
meta = "USER"
validator = validate_user
default = os.geteuid()
desc = """\
Switch worker processes to run as this user.
A valid user id (as an integer) or the name of a user that can be
retrieved with a call to pwd.getpwnam(value) or None to not change
the worker process user.
"""
class Group(Setting):
name = "group"
section = "Server Mechanics"
cli = ["-g", "--group"]
meta = "GROUP"
validator = validate_group
default = os.getegid()
desc = """\
Switch worker process to run as this group.
A valid group id (as an integer) or the name of a user that can be
retrieved with a call to pwd.getgrnam(value) or None to not change
the worker processes group.
"""
class Umask(Setting):
name = "umask"
section = "Server Mechanics"
cli = ["-m", "--umask"]
meta = "INT"
validator = validate_pos_int
type = int
default = 0
desc = """\
A bit mask for the file mode on files written by Gunicorn.
Note that this affects unix socket permissions.
A valid value for the os.umask(mode) call or a string compatible with
int(value, 0) (0 means Python guesses the base, so values like "0",
"0xFF", "0022" are valid for decimal, hex, and octal representations)
"""
class TmpUploadDir(Setting):
name = "tmp_upload_dir"
section = "Server Mechanics"
meta = "DIR"
validator = validate_string
default = None
desc = """\
Directory to store temporary request data as they are read.
This may disappear in the near future.
This path should be writable by the process permissions set for Gunicorn
workers. If not specified, Gunicorn will choose a system generated
temporary directory.
"""
class SecureSchemeHeader(Setting):
name = "secure_scheme_headers"
section = "Server Mechanics"
validator = validate_dict
default = {
"X-FORWARDED-PROTOCOL": "ssl",
"X-FORWARDED-PROTO": "https",
"X-FORWARDED-SSL": "on"
}
desc = """\
A dictionary containing headers and values that the front-end proxy
uses to indicate HTTPS requests. These tell gunicorn to set
wsgi.url_scheme to "https", so your application can tell that the
request is secure.
The dictionary should map upper-case header names to exact string
values. The value comparisons are case-sensitive, unlike the header
names, so make sure they're exactly what your front-end proxy sends
when handling HTTPS requests.
It is important that your front-end proxy configuration ensures that
the headers defined here can not be passed directly from the client.
"""
class XForwardedFor(Setting):
name = "x_forwarded_for_header"
section = "Server Mechanics"
meta = "STRING"
validator = validate_string
default = 'X-FORWARDED-FOR'
desc = """\
Set the X-Forwarded-For header that identify the originating IP
address of the client connection to gunicorn via a proxy.
"""
class ForwardedAllowIPS(Setting):
name = "forwarded_allow_ips"
section = "Server Mechanics"
meta = "STRING"
validator = validate_string_to_list
default = "127.0.0.1"
desc = """\
Front-end's IPs from which allowed to handle X-Forwarded-* headers.
(comma separate).
Set to "*" to disable checking of Front-end IPs (useful for setups
where you don't know in advance the IP address of Front-end, but
you still trust the environment)
"""
class AccessLog(Setting):
name = "accesslog"
section = "Logging"
cli = ["--access-logfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
The Access log file to write to.
"-" means log to stderr.
"""
class AccessLogFormat(Setting):
name = "access_log_format"
section = "Logging"
cli = ["--access-logformat"]
meta = "STRING"
validator = validate_string
default = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
desc = """\
The Access log format .
By default:
%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"
h: remote address
l: '-'
u: currently '-', may be user name in future releases
t: date of the request
r: status line (ex: GET / HTTP/1.1)
s: status
b: response length or '-'
f: referer
a: user agent
T: request time in seconds
D: request time in microseconds,
p: process ID
{Header}i: request header
{Header}o: response header
"""
class ErrorLog(Setting):
name = "errorlog"
section = "Logging"
cli = ["--error-logfile", "--log-file"]
meta = "FILE"
validator = validate_string
default = "-"
desc = """\
The Error log file to write to.
"-" means log to stderr.
"""
class Loglevel(Setting):
name = "loglevel"
section = "Logging"
cli = ["--log-level"]
meta = "LEVEL"
validator = validate_string
default = "info"
desc = """\
The granularity of Error log outputs.
Valid level names are:
* debug
* info
* warning
* error
* critical
"""
class LoggerClass(Setting):
name = "logger_class"
section = "Logging"
cli = ["--logger-class"]
meta = "STRING"
validator = validate_class
default = "simple"
desc = """\
The logger you want to use to log events in gunicorn.
The default class (``gunicorn.glogging.Logger``) handle most of
normal usages in logging. It provides error and access logging.
You can provide your own worker by giving gunicorn a
python path to a subclass like gunicorn.glogging.Logger.
Alternatively the syntax can also load the Logger class
with `egg:gunicorn#simple`
"""
class LogConfig(Setting):
name = "logconfig"
section = "Logging"
cli = ["--log-config"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
The log config file to use.
Gunicorn uses the standard Python logging module's Configuration
file format.
"""
class SyslogTo(Setting):
name = "syslog_addr"
section = "Logging"
cli = ["--log-syslog-to"]
meta = "SYSLOG_ADDR"
validator = validate_string
if PLATFORM == "darwin":
default = "unix:///var/run/syslog"
elif PLATFORM in ('freebsd', 'dragonfly', ):
default = "unix:///var/run/log"
elif PLATFORM == "openbsd":
default = "unix:///dev/log"
else:
default = "udp://localhost:514"
desc = """\
Address to send syslog messages
"""
class Syslog(Setting):
name = "syslog"
section = "Logging"
cli = ["--log-syslog"]
validator = validate_bool
action = 'store_true'
default = False
desc = """\
Log to syslog.
"""
class SyslogPrefix(Setting):
name = "syslog_prefix"
section = "Logging"
cli = ["--log-syslog-prefix"]
meta = "SYSLOG_PREFIX"
validator = validate_string
default = None
desc = """\
makes gunicorn use the parameter as program-name in the syslog entries.
All entries will be prefixed by gunicorn.<prefix>. By default the program
name is the name of the process.
"""
class SyslogFacility(Setting):
name = "syslog_facility"
section = "Logging"
cli = ["--log-syslog-facility"]
meta = "SYSLOG_FACILITY"
validator = validate_string
default = "user"
desc = """\
Syslog facility name
"""
class EnableStdioInheritance(Setting):
name = "enable_stdio_inheritance"
section = "Logging"
cli = ["-R", "--enable-stdio-inheritance"]
validator = validate_bool
default = False
action = "store_true"
desc = """\
Enable stdio inheritance
Enable inheritance for stdio file descriptors in daemon mode.
Note: To disable the python stdout buffering, you can to set the user
environment variable ``PYTHONUNBUFFERED`` .
"""
class Procname(Setting):
name = "proc_name"
section = "Process Naming"
cli = ["-n", "--name"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
A base to use with setproctitle for process naming.
This affects things like ``ps`` and ``top``. If you're going to be
running more than one instance of Gunicorn you'll probably want to set a
name to tell them apart. This requires that you install the setproctitle
module.
It defaults to 'gunicorn'.
"""
class DefaultProcName(Setting):
name = "default_proc_name"
section = "Process Naming"
validator = validate_string
default = "gunicorn"
desc = """\
Internal setting that is adjusted for each type of application.
"""
class DjangoSettings(Setting):
name = "django_settings"
section = "Django"
cli = ["--settings"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
The Python path to a Django settings module. (deprecated)
e.g. 'myproject.settings.main'. If this isn't provided, the
DJANGO_SETTINGS_MODULE environment variable will be used.
**DEPRECATED**: use the --env argument instead.
"""
class PythonPath(Setting):
name = "pythonpath"
section = "Server Mechanics"
cli = ["--pythonpath"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
A directory to add to the Python path.
e.g.
'/home/djangoprojects/myproject'.
"""
class Paste(Setting):
name = "paste"
section = "Server Mechanics"
cli = ["--paster"]
meta = "STRING"
validator = validate_string
default = None
desc = """\
Load a paste.deploy config file.
"""
class OnStarting(Setting):
name = "on_starting"
section = "Server Hooks"
validator = validate_callable(1)
type = six.callable
def on_starting(server):
pass
default = staticmethod(on_starting)
desc = """\
Called just before the master process is initialized.
The callable needs to accept a single instance variable for the Arbiter.
"""
class OnReload(Setting):
name = "on_reload"
section = "Server Hooks"
validator = validate_callable(1)
type = six.callable
def on_reload(server):
pass
default = staticmethod(on_reload)
desc = """\
Called to recycle workers during a reload via SIGHUP.
The callable needs to accept a single instance variable for the Arbiter.
"""
class WhenReady(Setting):
name = "when_ready"
section = "Server Hooks"
validator = validate_callable(1)
type = six.callable
def when_ready(server):
pass
default = staticmethod(when_ready)
desc = """\
Called just after the server is started.
The callable needs to accept a single instance variable for the Arbiter.
"""
class Prefork(Setting):
name = "pre_fork"
section = "Server Hooks"
validator = validate_callable(2)
type = six.callable
def pre_fork(server, worker):
pass
default = staticmethod(pre_fork)
desc = """\
Called just before a worker is forked.
The callable needs to accept two instance variables for the Arbiter and
new Worker.
"""
class Postfork(Setting):
name = "post_fork"
section = "Server Hooks"
validator = validate_callable(2)
type = six.callable
def post_fork(server, worker):
pass
default = staticmethod(post_fork)
desc = """\
Called just after a worker has been forked.
The callable needs to accept two instance variables for the Arbiter and
new Worker.
"""
class PostWorkerInit(Setting):
name = "post_worker_init"
section = "Server Hooks"
validator = validate_callable(1)
type = six.callable
def post_worker_init(worker):
pass
default = staticmethod(post_worker_init)
desc = """\
Called just after a worker has initialized the application.
The callable needs to accept one instance variable for the initialized
Worker.
"""
class PreExec(Setting):
name = "pre_exec"
section = "Server Hooks"
validator = validate_callable(1)
type = six.callable
def pre_exec(server):
pass
default = staticmethod(pre_exec)
desc = """\
Called just before a new master process is forked.
The callable needs to accept a single instance variable for the Arbiter.
"""
class PreRequest(Setting):
name = "pre_request"
section = "Server Hooks"
validator = validate_callable(2)
type = six.callable
def pre_request(worker, req):
worker.log.debug("%s %s" % (req.method, req.path))
default = staticmethod(pre_request)
desc = """\
Called just before a worker processes the request.
The callable needs to accept two instance variables for the Worker and
the Request.
"""
class PostRequest(Setting):
name = "post_request"
section = "Server Hooks"
validator = validate_post_request
type = six.callable
def post_request(worker, req, environ, resp):
pass
default = staticmethod(post_request)
desc = """\
Called after a worker processes the request.
The callable needs to accept two instance variables for the Worker and
the Request.
"""
class WorkerExit(Setting):
name = "worker_exit"
section = "Server Hooks"
validator = validate_callable(2)
type = six.callable
def worker_exit(server, worker):
pass
default = staticmethod(worker_exit)
desc = """\
Called just after a worker has been exited.
The callable needs to accept two instance variables for the Arbiter and
the just-exited Worker.
"""
class NumWorkersChanged(Setting):
name = "nworkers_changed"
section = "Server Hooks"
validator = validate_callable(3)
type = six.callable
def nworkers_changed(server, new_value, old_value):
pass
default = staticmethod(nworkers_changed)
desc = """\
Called just after num_workers has been changed.
The callable needs to accept an instance variable of the Arbiter and
two integers of number of workers after and before change.
If the number of workers is set for the first time, old_value would be
None.
"""
class ProxyProtocol(Setting):
name = "proxy_protocol"
section = "Server Mechanics"
cli = ["--proxy-protocol"]
validator = validate_bool
default = False
action = "store_true"
desc = """\
Enable detect PROXY protocol (PROXY mode).
Allow using Http and Proxy together. It's may be useful for work with
stunnel as https frondend and gunicorn as http server.
PROXY protocol: http://haproxy.1wt.eu/download/1.5/doc/proxy-protocol.txt
Example for stunnel config::
[https]
protocol = proxy
accept = 443
connect = 80
cert = /etc/ssl/certs/stunnel.pem
key = /etc/ssl/certs/stunnel.key
"""
class ProxyAllowFrom(Setting):
name = "proxy_allow_ips"
section = "Server Mechanics"
cli = ["--proxy-allow-from"]
validator = validate_string_to_list
default = "127.0.0.1"
desc = """\
Front-end's IPs from which allowed accept proxy requests (comma separate).
"""
class KeyFile(Setting):
name = "keyfile"
section = "Ssl"
cli = ["--keyfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
SSL key file
"""
class CertFile(Setting):
name = "certfile"
section = "Ssl"
cli = ["--certfile"]
meta = "FILE"
validator = validate_string
default = None
desc = """\
SSL certificate file
"""
|
|
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: showind
# Purpose: Display and subset wind data
# Author: Kyle Shannon <[email protected]>
#
###############################################################################
# Copyright (c) 2013, Kyle Shannon <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
from collections import namedtuple
import datetime
import logging
import math
import os
import sqlite3
import sys
import unittest
import zipfile
import numpy
import scipy.stats as stats
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
sys.path.append(os.path.abspath('windrose'))
from windrose import *
logging.basicConfig(level=logging.INFO)
def _import_date(string):
'''
Parse a datetime from a UTC string
'''
dt = datetime.datetime.strptime(string, "%Y-%m-%dT%H:%M:%S")
return dt
def _export_date(dt):
'''
Parse date time and return a string for query
'''
return dt.strftime('%Y-%m-%d %H:%M:%S')
def _extract_xy(wkt):
'''
Extract x and y coordinates from wkt in the db. Strip 'POINT' from the
front, and split the remaining data losing the parentheses
'''
wkt = wkt.strip().upper()
if wkt.find('POINT') < 0:
raise ValueError
wkt = wkt[wkt.find('(')+1:wkt.find(')')].split()
if len(wkt) != 2:
raise ValueError
return tuple([float(c) for c in wkt])
def _to_decdeg(d):
d = d.split("'")
s = float(d[-1])
s = s / 60.0
d, m = [float(f) for f in d[0].split('DEG')]
m += s
m = m / 60.0
if d < 0:
m = m * -1
d += m
return d
class ShoWind:
'''
Extract a given amount of data with time and space constraints.
'''
def __init__(self, dbfile, start=None, end=None, geomfilter=None):
self.dbfile = dbfile
if start:
self.start = _import_date(start)
else:
self.start = None
if end:
self.end = _import_date(end)
else:
self.end = None
self.geomfilter = geomfilter
self.db = sqlite3.connect(dbfile)
self.cursor = self.db.cursor()
def point_location(self, plot):
'''
Fetch the x and y coordinate of the plot
'''
if plot.startswith('L1-'):
plot = plot.split('-')
plot = 'L1G-' + plot[1]
sql = """SELECT geometry FROM plot_location WHERE plot_id=?"""
self.cursor.execute(sql, (plot,))
row = self.cursor.fetchone()
return _extract_xy(row[0])
def fetch_point_data(self, plot):
'''
Fetch data for a single point
'''
sql = """SELECT * FROM mean_flow_obs
WHERE plot_id=? AND date_time BETWEEN ? AND ? AND
quality='OK'"""
self.cursor.execute(sql, (plot, self.start, self.end))
data = self.cursor.fetchall()
logging.info('Query fetched %i result(s)' % len(data))
return data
def _point_kml(self, plot, data, images=[]):
'''
Create a kml representation of a plot
'''
lon, lat = self.point_location(plot)
stats = self.statistics(data)
if stats is None:
logging.warning('Could not calculate stats')
return ''
d = stats[2][0]
if d < 0:
d = d + 360.0
kml = ' <Placemark>\n' \
' <Style>\n' \
' <IconStyle>\n' \
' <Icon>\n' \
' <href>http://maps.google.com/mapfiles/kml/shapes/arrow.png</href>\n' \
' </Icon>\n' \
' <heading>%s</heading>\n' \
' </IconStyle>\n' \
' </Style>\n' \
' <Point>\n' \
' <coordinates>%.9f,%.9f,0</coordinates>\n' \
' </Point>\n' % (d, lon, lat)
kml = kml + ' <name>%s</name>\n' \
' <description>\n' \
' <![CDATA[\n' % plot
for image in images:
kml = kml + ' <img src = "%s" />\n' % image
kml = kml + ' <table border="1">' \
' <tr>\n' \
' <th>Stats</th>\n' \
' </tr>\n' \
' <tr>\n' \
' <td>Average Speed</td>\n' \
' <td>%.2f</td>\n' \
' </tr>\n' \
' <tr>\n' \
' <td>STDDEV Speed</td>\n' \
' <td>%.2f</td>\n' \
' </tr>\n' \
' <tr>\n' \
' <td>Max Gust</td>\n' \
' <td>%.2f</td>\n' \
' </tr>\n' \
' <tr>\n' \
' <td>Average Direction</td>\n' \
' <td>%.2f</td>\n' \
' </tr>\n' \
' <tr>\n' \
' <td>STDDEV Direction</td>\n' \
' <td>%.2f</td>\n' \
' </tr>\n' \
' </table>\n'% (stats[0][0], stats[0][1],
stats[1], stats[2][0],
stats[2][1])
kml = kml + ' ]]>\n' \
' </description>\n' \
' </Placemark>\n'
return kml
def statistics(self, data):
'''
Calculate the stats for speed and direction data
'''
spd = [spd[2] for spd in data]
gust = [gust[3] for gust in data]
dir = [dir[4] for dir in data]
samples = numpy.array(spd)
spd_mean = numpy.mean(samples)
spd_stddev = numpy.std(samples)
samples = numpy.array(gust)
gust_max = numpy.max(samples)
samples = numpy.array(dir)
direction_mean = stats.morestats.circmean(samples, 360, 0)
direction_stddev = stats.morestats.circstd(samples, 360, 0)
return (spd_mean, spd_stddev), (gust_max), (direction_mean, direction_stddev)
def create_time_series_image(self, data, plt_title, filename = ''):
'''
Create a time series image for the plot over the time span
'''
spd = [d[2] for d in data]
gust = [d[3] for d in data]
dir = [d[4] for d in data]
time = [mdates.date2num(datetime.datetime.strptime(d[1],
'%Y-%m-%d %H:%M:%S')) for d in data]
#fig = plt.figure(figsize=(8,8), dpi=80)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.plot_date(time, spd, 'b-')
#ax1.plot_date(time, gust, 'g-')
ax1.set_xlabel('Time (s)')
ax1.set_ylabel('Speed(mph)', color = 'b')
ax2 = fig.add_subplot(212)
ax2.plot_date(time, dir, 'r.')
ax2.set_ylabel('Direction', color='r')
fig.autofmt_xdate()
plt.suptitle('Plot %s from %s to %s' % (plt_title,
self.start.strftime('%Y-%m-%d %H:%M:%S'),
self.end.strftime('%Y-%m-%d %H:%M:%S')))
if not filename:
plt.show()
plt.close()
else:
plt.savefig(filename)
plt.close()
return filename
def create_windrose(self, data, filename=''):
'''
Create a windrose from a dataset.
'''
spd = [d[2] for d in data]
gust = [d[3] for d in data]
dir = [d[4] for d in data]
time = [mdates.date2num(datetime.datetime.strptime(d[1],
'%Y-%m-%d %H:%M:%S')) for d in data]
if len(data) >= 1:
#fig = plt.figure(figsize=(8, 8), dpi=80, facecolor='w', edgecolor='w')
fig = plt.figure(facecolor='w', edgecolor='w')
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect, axisbg='w')
fig.add_axes(ax)
ax.bar(dir, spd, normed=True, opening=0.8, edgecolor='white')
l = ax.legend(axespad=-0.10)
plt.setp(l.get_texts(), fontsize=8)
if filename == '':
plt.show()
plt.close()
else:
plt.savefig(filename)
plt.close()
return filename
else:
if __debug__:
print 'Unknown failure in bigbutte.create_image()'
return None
def create_field_kmz(self, filename):
'''
Write a kmz with a time series and wind rose. The stats are included
in the html bubble as well.
'''
sql = '''SELECT DISTINCT(plot_id) FROM mean_flow_obs
WHERE date_time BETWEEN ? AND ?'''
self.cursor.execute(sql, (self.start, self.end))
kmz = zipfile.ZipFile( filename, 'w', 0, True)
kmlfile = 'doc.kml'
fout = open(kmlfile, 'w')
fout.write('<Document>\n')
plots = self.cursor.fetchall()
for plot in plots:
plot = plot[0]
logging.info('Processing plot %s' % plot)
if filename == '':
filename = plot
if filename[-4:] != '.kmz':
filename = filename + '.kmz'
data = self.fetch_point_data(plot)
if not data:
continue
try:
pngfile = self.create_time_series_image(data, plot, plot + '_time.png')
rosefile = self.create_windrose(data, plot + '_rose.png')
kml = self._point_kml(plot, data, [pngfile,rosefile])
except Exception as e:
logging.warning('Unknown exception has occurred')
if os.path.exists(pngfile):
os.remove(pngfile)
if os.path.exists(rosefile):
os.remove(rosefile)
continue
fout.write(kml)
fout.flush()
kmz.write(pngfile)
kmz.write(rosefile)
os.remove(pngfile)
os.remove(rosefile)
fout.write('</Document>\n')
fout.close()
kmz.write(kmlfile)
kmz.close()
os.remove(kmlfile)
return filename
def create_kmz(self, plot, filename = ''):
'''
Write a kmz with a time series and wind rose. The stats are included
in the html bubble as well.
'''
if filename == '':
filename = plot
if filename[-4:] != '.kmz':
filename = filename + '.kmz'
data = self.fetch_point_data(plot)
pngfile = self.create_time_series_image(data, plot, plot + '_time.png')
rosefile = self.create_windrose(data, plot + '_rose.png')
kml = self._point_kml(plot, data, [pngfile,rosefile])
kmlfile = 'doc.kml'
fout = open(kmlfile, 'w')
fout.write(kml)
fout.close()
kmz = zipfile.ZipFile( filename, 'w', 0, True)
kmz.write(kmlfile)
kmz.write(pngfile)
kmz.write(rosefile)
kmz.close()
os.remove(kmlfile)
os.remove(pngfile)
os.remove(rosefile)
return filename
def create_tables(self, dbfile):
'''
Create a new database and tables for mean flow. Two tables are created,
one for plot locations, another for the measured data. These are made
under the assumption of similar set up for big butte.
'''
db = sqlite3.connect(dbfile)
curs = db.cursor()
sql = '''CREATE TABLE plot_location(plot_id TEXT NOT NULL,
datalogger_id TEXT,
geometry TEXT,
constraint plt_loc_pk
primary key(plot_id))'''
curs.execute(sql)
sql = ''' create table mean_flow_obs(plot_id text not null,
date_time datetime not null,
wind_speed double,
wind_gust double,
wind_dir double,
quality text,
sensor_qual text,
constraint mean_obs_pk
primary key(plot_id,date_time),
constraint mean_obs_fk
foreign key(plot_id) references
plot_location(plot_id))'''
curs.execute(sql)
db.commit()
db.close()
def import_hobo(self, path):
'''
Import csv files from hobo wind sensors. Import all records in all csv
files in the path provided. Tailored to hobo loggers.
'''
csv_files = [csv for csv in os.listdir(path) if os.path.splitext(csv)[1] == '.csv']
csv_files = [os.path.join(path, csv) for csv in csv_files]
if not csv_files:
logging.error('No csv files in directory')
return None
for csv in csv_files:
fin = open(csv)
plot = os.path.splitext(os.path.basename(csv))[0].upper()
#self.cursor.execute('INSERT INTO plot_location(plot_id) VALUES(?)',
# (plot,))
header = 0
for line in fin:
if header < 2:
header += 1
continue
line = line.split(',')
if len(line) != 5:
logging.error('Could not parse csv file properly, not'
'enough records. Check file: %s' % csv)
continue
date = datetime.datetime.strptime(line[1], '%m/%d/%y %I:%M:%S %p')
spd = float(line[2])
gust = float(line[3])
dir = float(line[4])
quality = 'OK'
if spd < 0.0:
logging.error('Invalid speed (%f) for plot:%s' % (spd, plot))
quality = 'SUSPECT'
if gust < 0.0:
logging.error('Invalid gust (%f) for plot:%s' % (gust, plot))
quality = 'SUSPECT'
if dir < 0.0 or dir > 360.0:
logging.error('Invalid dir (%f) for plot:%s' % (dir, plot))
quality = 'SUSPECT'
self.cursor.execute('''INSERT INTO mean_flow_obs(plot_id,
date_time, wind_speed, wind_gust,
wind_dir, quality)
VALUES(?, ?, ?, ?, ?, ?)''',
(plot, date, spd, gust, dir, quality))
self.db.commit()
class TestMisc(unittest.TestCase):
'''
Test the smaller functions
'''
def test_wkt_1(self):
''' Test various whitespace in wkt '''
point = _extract_xy('POINT(10.0 10.0)')
self.assertEqual(point, (10.0, 10.0))
def test_wkt_2(self):
''' Test various whitespace in wkt '''
point = _extract_xy(' POINT(10.0 10.0)')
self.assertEqual(point, (10.0, 10.0))
def test_wkt_3(self):
''' Test various whitespace in wkt '''
point = _extract_xy('POINT(10.0 10.0) ')
self.assertEqual(point, (10.0, 10.0))
def test_wkt_4(self):
''' Test various whitespace in wkt '''
point = _extract_xy('POINT( 10.0 10.0)')
self.assertEqual(point, (10.0, 10.0))
def test_wkt_5(self):
''' Test various whitespace in wkt '''
point = _extract_xy('POINT(10.0 10.0 )')
self.assertEqual(point, (10.0, 10.0))
def test_wkt_6(self):
''' Test various whitespace in wkt '''
point = _extract_xy('POINT( 10.0 10.0 )')
self.assertEqual(point, (10.0, 10.0))
def test_wkt_7(self):
''' Test various whitespace in wkt '''
point = _extract_xy('POINT ( 10.0 10.0)')
self.assertEqual(point, (10.0, 10.0))
def test_wkt_8(self):
''' Test various whitespace in wkt '''
point = _extract_xy('POINT ( 10.0 10.0 )')
self.assertEqual(point, (10.0, 10.0))
def test_wkt_9(self):
''' Test various whitespace in wkt '''
self.assertRaises(ValueError, _extract_xy, 'POLYGON ( 10.0 10.0 )')
def test_wkt_10(self):
''' Test various whitespace in wkt '''
self.assertRaises(ValueError, _extract_xy, 'POLYGON ( 10.0 10.0 10.0 )')
def test_wkt_11(self):
''' Test various decimal in wkt '''
point = _extract_xy('POINT (10 10)')
self.assertEqual(point, (10.0, 10.0))
def test_wkt_12(self):
''' Test various decimal in wkt '''
point = _extract_xy('POINT (10 10.0)')
self.assertEqual(point, (10.0, 10.0))
def test_wkt_13(self):
''' Test various decimal in wkt '''
point = _extract_xy('POINT (10.0 10)')
self.assertEqual(point, (10.0, 10.0))
class TestShoWind(unittest.TestCase):
'''
Test the access
'''
def usage():
print('showind.py [--write] [--windrose] [--timeseries]')
print(' [--start starttime] [--end endtime]')
print(' [--event name] plot output_file')
sys.exit(1)
if __name__ == '__main__':
if False:
unittest.main(verbosity=2)
plot = None
outfile = None
start = None
end = None
event = None
write = False
windrose = False
timeseries = False
args = sys.argv
i = 1
while i < len(args):
arg = args[i]
if arg == '--write':
write = True
elif arg == '--windrose':
windrose = True
elif arg == '--timeseries':
timeseries = True
elif arg == '--start':
i += 1
start = args[i]
elif arg == '--end':
i += 1
end = args[i]
elif arg == '--event':
i += 1
event = args[i]
elif plot is None:
plot = arg
elif outfile is None:
outfile = arg
i += 1
if not plot:
usage()
if not windrose and not timeseries and plot != 'all':
usage()
if not(start and end) and not event:
usage()
s = ShoWind('dan.sqlite', start, end)
if event:
s.cursor.execute('SELECT start, end FROM events WHERE name=?', (event,))
row = s.cursor.fetchone()
start = row[0]
end = row[1]
s.start = _import_date(start.replace(' ', 'T'))
s.end = _import_date(end.replace(' ', 'T'))
if plot != 'all':
d = s.fetch_point_data(plot)
if not d:
print('Could not fetch data for plot')
usage()
if write:
if outfile:
f = outfile
else:
f = plot + '.png'
else:
f = ''
if timeseries:
s.create_time_series_image(d, plot, f.replace('.', '_time.'))
if windrose:
s.create_windrose(d, f.replace('.', '_rose.'))
else:
if not outfile and event:
outfile = event + '.kmz'
elif not outfile:
outfile = 'out.kmz'
s.create_field_kmz(outfile)
|
|
#!/usr/bin/env python
#
# $Id$
#
"""psutil is a module providing convenience functions for managing
processes in a portable way by using Python.
"""
__version__ = "0.3.1"
version_info = tuple([int(num) for num in __version__.split('.')])
__all__ = [
# exceptions
"Error", "NoSuchProcess", "AccessDenied", "TimeoutExpired",
# constants
"NUM_CPUS", "TOTAL_PHYMEM", "BOOT_TIME",
"version_info", "__version__",
"STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
"STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
"STATUS_WAKING", "STATUS_LOCKED",
# classes
"Process", "Popen",
# functions
"test", "pid_exists", "get_pid_list", "process_iter", "get_process_list",
"phymem_usage", "virtmem_usage"
"cpu_times", "per_cpu_times", "cpu_percent", "per_cpu_percent",
"network_io_counters", "disk_io_counters",
]
import sys
import os
import time
import signal
import warnings
import errno
import subprocess
try:
import pwd
except ImportError:
pwd = None
from psutil.error import Error, NoSuchProcess, AccessDenied, TimeoutExpired
from psutil._compat import property
from psutil._common import (STATUS_RUNNING, STATUS_IDLE, STATUS_SLEEPING,
STATUS_DISK_SLEEP, STATUS_STOPPED,
STATUS_TRACING_STOP, STATUS_ZOMBIE, STATUS_DEAD,
STATUS_WAKING, STATUS_LOCKED
)
# import the appropriate module for our platform only
if sys.platform.lower().startswith("linux"):
import psutil._pslinux as _psplatform
from psutil._pslinux import (phymem_buffers,
cached_phymem,
IOPRIO_CLASS_NONE,
IOPRIO_CLASS_RT,
IOPRIO_CLASS_BE,
IOPRIO_CLASS_IDLE)
phymem_buffers = _psplatform.phymem_buffers
cached_phymem = _psplatform.cached_phymem
elif sys.platform.lower().startswith("win32"):
import psutil._psmswindows as _psplatform
from psutil._psmswindows import (ABOVE_NORMAL_PRIORITY_CLASS,
BELOW_NORMAL_PRIORITY_CLASS,
HIGH_PRIORITY_CLASS,
IDLE_PRIORITY_CLASS,
NORMAL_PRIORITY_CLASS,
REALTIME_PRIORITY_CLASS)
elif sys.platform.lower().startswith("darwin"):
import psutil._psosx as _psplatform
elif sys.platform.lower().startswith("freebsd"):
import psutil._psbsd as _psplatform
else:
raise NotImplementedError('platform %s is not supported' % sys.platform)
__all__.extend(_psplatform.__extra__all__)
NUM_CPUS = _psplatform.NUM_CPUS
BOOT_TIME = _psplatform.BOOT_TIME
TOTAL_PHYMEM = _psplatform.phymem_usage()[0]
get_pid_list = _psplatform.get_pid_list
pid_exists = _psplatform.pid_exists
class Process(object):
"""Represents an OS process."""
def __init__(self, pid):
"""Create a new Process object, raises NoSuchProcess if the PID
does not exist, and ValueError if the parameter is not an
integer PID.
"""
if not isinstance(pid, int):
raise ValueError("an integer is required")
if not pid_exists(pid):
raise NoSuchProcess(pid, None, "no process found with pid %s" % pid)
self._pid = pid
# platform-specific modules define an _psplatform.Process
# implementation class
self._platform_impl = _psplatform.Process(pid)
self._last_sys_cpu_times = None
self._last_proc_cpu_times = None
def __str__(self):
try:
pid = self.pid
name = repr(self.name)
except NoSuchProcess:
details = "(pid=%s (terminated))" % self.pid
except AccessDenied:
details = "(pid=%s)" % (self.pid)
else:
details = "(pid=%s, name=%s)" % (pid, name)
return "%s.%s%s" % (self.__class__.__module__,
self.__class__.__name__, details)
def __repr__(self):
return "<%s at %s>" % (self.__str__(), id(self))
@property
def pid(self):
"""The process pid."""
return self._pid
@property
def ppid(self):
"""The process parent pid."""
return self._platform_impl.get_process_ppid()
@property
def parent(self):
"""Return the parent process as a Process object. If no parent
pid is known return None.
"""
ppid = self.ppid
if ppid is not None:
try:
return Process(ppid)
except NoSuchProcess:
pass
@property
def name(self):
"""The process name."""
name = self._platform_impl.get_process_name()
if os.name == 'posix':
# On UNIX the name gets truncated to the first 15 characters.
# If it matches the first part of the cmdline we return that
# one instead because it's usually more explicative.
# Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
cmdline = self.cmdline
if cmdline:
extended_name = os.path.basename(cmdline[0])
if extended_name.startswith(name):
name = extended_name
# XXX - perhaps needs refactoring
self._platform_impl._process_name = name
return name
@property
def exe(self):
"""The process executable as an absolute path name."""
exe = self._platform_impl.get_process_exe()
# if we have the cmdline but not the exe, figure it out from argv[0]
if not exe:
cmdline = self.cmdline
if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
_exe = os.path.realpath(cmdline[0])
if os.path.isfile(_exe) and os.access(_exe, os.X_OK):
return _exe
if not exe:
raise AccessDenied(self.pid, self._platform_impl._process_name)
return exe
@property
def cmdline(self):
"""The command line process has been called with."""
return self._platform_impl.get_process_cmdline()
@property
def status(self):
"""The process current status as a STATUS_* constant."""
return self._platform_impl.get_process_status()
@property
def nice(self):
"""Get or set process niceness (priority)."""
return self._platform_impl.get_process_nice()
@nice.setter
def nice(self, value):
# invoked on "p.nice = num"; change process niceness
return self._platform_impl.set_process_nice(value)
if os.name == 'posix':
@property
def uids(self):
"""Return a named tuple denoting the process real,
effective, and saved user ids.
"""
return self._platform_impl.get_process_uids()
@property
def gids(self):
"""Return a named tuple denoting the process real,
effective, and saved group ids.
"""
return self._platform_impl.get_process_gids()
@property
def terminal(self):
"""The terminal associated with this process, if any,
else None.
"""
return self._platform_impl.get_process_terminal()
@property
def username(self):
"""The name of the user that owns the process.
On UNIX this is calculated by using *real* process uid.
"""
if os.name == 'posix':
if pwd is None:
# might happen if python was installed from sources
raise ImportError("requires pwd module shipped with standard python")
return pwd.getpwuid(self.uids.real).pw_name
else:
return self._platform_impl.get_process_username()
@property
def create_time(self):
"""The process creation time as a floating point number
expressed in seconds since the epoch, in UTC.
"""
return self._platform_impl.get_process_create_time()
# available for Windows and Linux only
if hasattr(_psplatform.Process, "get_process_cwd"):
def getcwd(self):
"""Return a string representing the process current working
directory.
"""
return self._platform_impl.get_process_cwd()
# Linux, BSD and Windows only
if hasattr(_psplatform.Process, "get_process_io_counters"):
def get_io_counters(self):
"""Return process I/O statistics as a namedtuple including
the number of read/write calls performed and the amount of
bytes read and written by the process.
"""
return self._platform_impl.get_process_io_counters()
# available only on Linux
if hasattr(_psplatform.Process, "get_process_ionice"):
def get_ionice(self):
"""Return process I/O niceness (priority) as a namedtuple."""
return self._platform_impl.get_process_ionice()
def set_ionice(self, ioclass, value=None):
"""Set process I/O niceness (priority).
ioclass is one of the IOPRIO_CLASS_* constants.
iodata is a number which goes from 0 to 7. The higher the
value, the lower the I/O priority of the process.
"""
return self._platform_impl.set_process_ionice(ioclass, value)
def get_num_threads(self):
"""Return the number of threads used by this process."""
return self._platform_impl.get_process_num_threads()
def get_threads(self):
"""Return threads opened by process as a list of namedtuples
including thread id and thread CPU times (user/system).
"""
return self._platform_impl.get_process_threads()
def get_children(self):
"""Return the children of this process as a list of Process
objects.
"""
if not self.is_running():
name = self._platform_impl._process_name
raise NoSuchProcess(self.pid, name)
retlist = []
for proc in process_iter():
try:
if proc.ppid == self.pid:
retlist.append(proc)
except NoSuchProcess:
pass
return retlist
def get_cpu_percent(self, interval=0.1):
"""Return a float representing the current process CPU
utilization as a percentage.
When interval is > 0.0 compares process times to system CPU
times elapsed before and after the interval (blocking).
When interval is 0.0 or None compares process times to system CPU
times elapsed since last call, returning immediately.
In this case is recommended for accuracy that this function be
called with at least 0.1 seconds between calls.
"""
blocking = interval is not None and interval > 0.0
if blocking:
st1 = sum(cpu_times())
pt1 = self._platform_impl.get_cpu_times()
time.sleep(interval)
st2 = sum(cpu_times())
pt2 = self._platform_impl.get_cpu_times()
else:
st1 = self._last_sys_cpu_times
pt1 = self._last_proc_cpu_times
st2 = sum(cpu_times())
pt2 = self._platform_impl.get_cpu_times()
if st1 is None or pt1 is None:
self._last_sys_cpu_times = st2
self._last_proc_cpu_times = pt2
return 0.0
delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
delta_time = st2 - st1
# reset values for next call in case of interval == None
self._last_sys_cpu_times = st2
self._last_proc_cpu_times = pt2
try:
# the utilization split between all CPUs
overall_percent = (delta_proc / delta_time) * 100
except ZeroDivisionError:
# interval was too low
return 0.0
# the utilization of a single CPU
single_cpu_percent = overall_percent * NUM_CPUS
# ugly hack to avoid troubles with float precision issues
if single_cpu_percent > 100.0:
return 100.0
return round(single_cpu_percent, 1)
def get_cpu_times(self):
"""Return a tuple whose values are process CPU user and system
times. The same as os.times() but per-process.
"""
return self._platform_impl.get_cpu_times()
def get_memory_info(self):
"""Return a tuple representing RSS (Resident Set Size) and VMS
(Virtual Memory Size) in bytes.
On UNIX RSS and VMS are the same values shown by ps.
On Windows RSS and VMS refer to "Mem Usage" and "VM Size" columns
of taskmgr.exe.
"""
return self._platform_impl.get_memory_info()
def get_memory_percent(self):
"""Compare physical system memory to process resident memory and
calculate process memory utilization as a percentage.
"""
rss = self._platform_impl.get_memory_info()[0]
try:
return (rss / float(TOTAL_PHYMEM)) * 100
except ZeroDivisionError:
return 0.0
def get_open_files(self):
"""Return files opened by process as a list of namedtuples
including absolute file name and file descriptor number.
"""
return self._platform_impl.get_open_files()
def get_connections(self):
"""Return TCP and UPD connections opened by process as a list
of namedtuples.
On BSD and OSX results for third party processes (!= os.getpid())
can differ depending on user privileges.
"""
return self._platform_impl.get_connections()
def is_running(self):
"""Return whether this process is running."""
try:
# Test for equality with another Process object based
# on pid and creation time.
# This pair is supposed to indentify a Process instance
# univocally over the time (the PID alone is not enough as
# it might refer to a process which is gone in meantime
# and its PID reused by another process).
new_self = Process(self.pid)
p1 = (self.pid, self.create_time)
p2 = (new_self.pid, new_self.create_time)
except NoSuchProcess:
return False
else:
return p1 == p2
def send_signal(self, sig):
"""Send a signal to process (see signal module constants).
On Windows only SIGTERM is valid and is treated as an alias
for kill().
"""
# safety measure in case the current process has been killed in
# meantime and the kernel reused its PID
if not self.is_running():
name = self._platform_impl._process_name
raise NoSuchProcess(self.pid, name)
if os.name == 'posix':
try:
os.kill(self.pid, sig)
except OSError, err:
name = self._platform_impl._process_name
if err.errno == errno.ESRCH:
raise NoSuchProcess(self.pid, name)
if err.errno == errno.EPERM:
raise AccessDenied(self.pid, name)
raise
else:
if sig == signal.SIGTERM:
self._platform_impl.kill_process()
else:
raise ValueError("only SIGTERM is supported on Windows")
def suspend(self):
"""Suspend process execution."""
# safety measure in case the current process has been killed in
# meantime and the kernel reused its PID
if not self.is_running():
name = self._platform_impl._process_name
raise NoSuchProcess(self.pid, name)
# windows
if hasattr(self._platform_impl, "suspend_process"):
self._platform_impl.suspend_process()
else:
# posix
self.send_signal(signal.SIGSTOP)
def resume(self):
"""Resume process execution."""
# safety measure in case the current process has been killed in
# meantime and the kernel reused its PID
if not self.is_running():
name = self._platform_impl._process_name
raise NoSuchProcess(self.pid, name)
# windows
if hasattr(self._platform_impl, "resume_process"):
self._platform_impl.resume_process()
else:
# posix
self.send_signal(signal.SIGCONT)
def terminate(self):
"""Terminate the process with SIGTERM.
On Windows this is an alias for kill().
"""
self.send_signal(signal.SIGTERM)
def kill(self):
"""Kill the current process."""
# safety measure in case the current process has been killed in
# meantime and the kernel reused its PID
if not self.is_running():
name = self._platform_impl._process_name
raise NoSuchProcess(self.pid, name)
if os.name == 'posix':
self.send_signal(signal.SIGKILL)
else:
self._platform_impl.kill_process()
def wait(self, timeout=None):
"""Wait for process to terminate and, if process is a children
of the current one also return its exit code, else None.
"""
if timeout is not None and not timeout >= 0:
raise ValueError("timeout must be a positive integer")
return self._platform_impl.process_wait(timeout)
class Popen(Process):
"""A more convenient interface to stdlib subprocess module.
It starts a sub process and deals with it exactly as when using
subprocess.Popen class but in addition also provides all the
property and methods of psutil.Process class in a unique interface:
>>> import psutil
>>> from subprocess import PIPE
>>> p = psutil.Popen(["/usr/bin/python", "-c", "print 'hi'"], stdout=PIPE)
>>> p.name
'python'
>>> p.uids
user(real=1000, effective=1000, saved=1000)
>>> p.username
'giampaolo'
>>> p.communicate()
('hi\n', None)
>>> p.terminate()
>>> p.wait(timeout=2)
0
>>>
For method names common to both classes such as kill(), terminate()
and wait(), psutil.Process implementation takes precedence.
For a complete documentation refers to:
http://docs.python.org/library/subprocess.html
"""
def __init__(self, *args, **kwargs):
self.__subproc = subprocess.Popen(*args, **kwargs)
Process.__init__(self, self.__subproc.pid)
def __dir__(self):
return list(set(dir(Popen) + dir(subprocess.Popen)))
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
try:
return object.__getattribute__(self.__subproc, name)
except AttributeError:
raise AttributeError("%s instance has no attribute '%s'"
%(self.__class__.__name__, name))
def process_iter():
"""Return an iterator yielding a Process class instances for all
running processes on the local machine.
"""
pids = get_pid_list()
for pid in pids:
try:
yield Process(pid)
except (NoSuchProcess, AccessDenied):
continue
def get_process_list():
"""Return a list of Process class instances for all running
processes on the local machine.
"""
return list(process_iter())
def cpu_times(percpu=False):
"""Return system-wide CPU times as a namedtuple object.
Every CPU time represents the time CPU has spent in the given mode.
The attributes availability varies depending on the platform.
Here follows a list of all available attributes:
- user
- system
- idle
- nice (UNIX)
- iowait (Linux)
- irq (Linux, FreeBSD)
- softirq (Linux)
When percpu is True return a list of nameduples for each CPU.
First element of the list refers to first CPU, second element
to second CPU and so on.
The order of the list is consistent across calls.
"""
if not percpu:
return _psplatform.get_system_cpu_times()
else:
return _psplatform.get_system_per_cpu_times()
_last_cpu_times = cpu_times()
_last_per_cpu_times = cpu_times(percpu=True)
def cpu_percent(interval=0.1, percpu=False):
"""Return a float representing the current system-wide CPU
utilization as a percentage.
When interval is > 0.0 compares system CPU times elapsed before
and after the interval (blocking).
When interval is 0.0 or None compares system CPU times elapsed
since last call or module import, returning immediately.
In this case is recommended for accuracy that this function be
called with at least 0.1 seconds between calls.
When percpu is True returns a list of floats representing the
utilization as a percentage for each CPU.
First element of the list refers to first CPU, second element
to second CPU and so on.
The order of the list is consistent across calls.
"""
global _last_cpu_times
global _last_per_cpu_times
blocking = interval is not None and interval > 0.0
def calculate(t1, t2):
t1_all = sum(t1)
t1_busy = t1_all - t1.idle
t2_all = sum(t2)
t2_busy = t2_all - t2.idle
# this usually indicates a float precision issue
if t2_busy <= t1_busy:
return 0.0
busy_delta = t2_busy - t1_busy
all_delta = t2_all - t1_all
busy_perc = (busy_delta / all_delta) * 100
return round(busy_perc, 1)
# system-wide usage
if not percpu:
if blocking:
t1 = cpu_times()
time.sleep(interval)
else:
t1 = _last_cpu_times
_last_cpu_times = cpu_times()
return calculate(t1, _last_cpu_times)
# per-cpu usage
else:
ret = []
if blocking:
tot1 = cpu_times(percpu=True)
time.sleep(interval)
else:
tot1 = _last_per_cpu_times
_last_per_cpu_times = cpu_times(percpu=True)
for t1, t2 in zip(tot1, _last_per_cpu_times):
ret.append(calculate(t1, t2))
return ret
def phymem_usage():
"""Return the amount of total, used and free physical memory
on the system in bytes plus the percentage usage.
"""
return _psplatform.phymem_usage()
def virtmem_usage():
"""Return the amount of total, used and free virtual memory
on the system in bytes plus the percentage usage.
On Linux they match the values returned by free command line utility.
On OS X and FreeBSD they represent the same values as returned by
sysctl vm.vmtotal. On Windows they are determined by reading the
PageFile values of MEMORYSTATUSEX structure.
"""
return _psplatform.virtmem_usage()
def disk_usage(path):
"""Return disk usage statistics about the given path as a namedtuple
including total, used and free space expressed in bytes plus the
percentage usage.
"""
return _psplatform.get_disk_usage(path)
def disk_partitions(all=False):
"""Return mounted partitions as a list of namedtuples including
device, mount point and filesystem type.
If "all" parameter is False return physical devices only and ignore
all others.
"""
return _psplatform.disk_partitions(all)
if hasattr(_psplatform, "network_io_counters"):
def network_io_counters(pernic=False):
"""Return network I/O statistics as a namedtuple including:
- number of bytes sent
- number of bytes received
- number of packets sent
- number of packets received
If pernic is True return the same information for every
network interface installed on the system as a dictionary
with network interface names as the keys and the namedtuple
described above as the values.
"""
from psutil._common import ntuple_net_iostat
rawdict = _psplatform.network_io_counters()
if pernic:
for nic, fields in rawdict.iteritems():
rawdict[nic] = ntuple_net_iostat(*fields)
return rawdict
else:
bytes_sent, bytes_recv, packets_sent, packets_recv = 0, 0, 0, 0
for _, fields in rawdict.iteritems():
bytes_sent += fields[0]
bytes_recv += fields[1]
packets_sent += fields[2]
packets_recv += fields[3]
return ntuple_net_iostat(bytes_sent, bytes_recv,
packets_sent, packets_recv)
if hasattr(_psplatform, "disk_io_counters"):
def disk_io_counters(perdisk=False):
"""Return system disk I/O statistics as a namedtuple including:
- number of bytes read
- number of bytes written
- number of reads
- number of writes
- time spent reading from disk (in nanoseconds)
- time spent writing to disk (in nanoseconds)
If perdisk is True return the same information for every
physical disk installed on the system as a dictionary
with partition names as the keys and the namedutuple
described above as the values.
"""
from psutil._common import ntuple_disk_iostat
rawdict = _psplatform.disk_io_counters()
if perdisk:
for disk, fields in rawdict.iteritems():
rawdict[disk] = ntuple_disk_iostat(*fields)
return rawdict
else:
reads, writes, rbytes, wbytes, rtime, wtime = 0, 0, 0, 0, 0, 0
for _, fields in rawdict.iteritems():
reads += fields[0]
writes += fields[1]
rbytes += fields[2]
wbytes += fields[3]
rtime += fields[4]
wtime += fields[5]
return ntuple_disk_iostat(reads, writes, rbytes, wbytes, rtime, wtime)
def _deprecated(replacement):
# a decorator which can be used to mark functions as deprecated
def outer(fun):
def inner(*args, **kwargs):
msg = "psutil.%s is deprecated; use %s instead" \
% (fun.__name__, replacement)
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return fun(*args, **kwargs)
return inner
return outer
# --- deprecated functions
@_deprecated("psutil.phymem_usage")
def avail_phymem():
return phymem_usage().free
@_deprecated("psutil.phymem_usage")
def used_phymem():
return phymem_usage().used
@_deprecated("psutil.virtmem_usage")
def total_virtmem():
return virtmem_usage().total
@_deprecated("psutil.virtmem_usage")
def used_virtmem():
return virtmem_usage().used
@_deprecated("psutil.virtmem_usage")
def avail_virtmem():
return virtmem_usage().free
def test():
"""List info of all currently running processes emulating a
ps -aux output.
"""
import datetime
today_day = datetime.date.today()
def get_process_info(pid):
proc = Process(pid)
user = proc.username
if os.name == 'nt' and '\\' in user:
user = user.split('\\')[1]
pid = proc.pid
cpu = round(proc.get_cpu_percent(interval=None), 1)
mem = round(proc.get_memory_percent(), 1)
rss, vsz = [x / 1024 for x in proc.get_memory_info()]
# If process has been created today print H:M, else MonthDay
start = datetime.datetime.fromtimestamp(proc.create_time)
if start.date() == today_day:
start = start.strftime("%H:%M")
else:
start = start.strftime("%b%d")
cputime = time.strftime("%M:%S", time.localtime(sum(proc.get_cpu_times())))
cmd = ' '.join(proc.cmdline)
# where cmdline is not available UNIX shows process name between
# [] parentheses
if not cmd:
cmd = "[%s]" % proc.name
return "%-9s %-5s %-4s %4s %7s %7s %5s %8s %s" \
% (user, pid, cpu, mem, vsz, rss, start, cputime, cmd)
print "%-9s %-5s %-4s %4s %7s %7s %5s %7s %s" \
% ("USER", "PID", "%CPU", "%MEM", "VSZ", "RSS", "START", "TIME", "COMMAND")
pids = get_pid_list()
pids.sort()
for pid in pids:
try:
line = get_process_info(pid)
except (AccessDenied, NoSuchProcess):
pass
else:
print line
if __name__ == "__main__":
test()
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.ruby as ruby
import logging
import os
import re
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
v1_library = gapic.ruby_library(
'containeranalysis', 'v1', artman_output_name='google-cloud-ruby/google-cloud-containeranalysis',
config_path='/google/devtools/containeranalysis/artman_containeranalysis_v1.yaml'
)
s.copy(v1_library / 'lib')
s.copy(v1_library / 'test')
s.copy(v1_library / 'LICENSE')
s.copy(v1_library / '.gitignore')
s.copy(v1_library / '.yardopts')
s.copy(v1_library / 'google-cloud-container_analysis.gemspec', merge=ruby.merge_gemspec)
# Copy common templates
templates = gcp.CommonTemplates().ruby_library()
s.copy(templates)
# Hack grpc service class name and location
s.replace(
'lib/google/devtools/containeranalysis/v1/containeranalysis_services_pb.rb',
' module ContainerAnalysis\n',
' module ContainerAnalysisService\n'
)
s.replace(
[
'lib/google/cloud/container_analysis/v1/container_analysis_client.rb',
'test/google/cloud/container_analysis/v1/container_analysis_client_test.rb'
],
'Google::Devtools::Containeranalysis::V1::ContainerAnalysis::',
'Google::Cloud::ContainerAnalysis::V1::ContainerAnalysisService::'
)
# Support for service_address
s.replace(
[
'lib/google/cloud/container_analysis.rb',
'lib/google/cloud/container_analysis/v*.rb',
'lib/google/cloud/container_analysis/v*/*_client.rb'
],
'\n(\\s+)#(\\s+)@param exception_transformer',
'\n\\1#\\2@param service_address [String]\n' +
'\\1#\\2 Override for the service hostname, or `nil` to leave as the default.\n' +
'\\1#\\2@param service_port [Integer]\n' +
'\\1#\\2 Override for the service port, or `nil` to leave as the default.\n' +
'\\1#\\2@param exception_transformer'
)
s.replace(
[
'lib/google/cloud/container_analysis/v*.rb',
'lib/google/cloud/container_analysis/v*/*_client.rb'
],
'\n(\\s+)metadata: nil,\n\\s+exception_transformer: nil,\n',
'\n\\1metadata: nil,\n\\1service_address: nil,\n\\1service_port: nil,\n\\1exception_transformer: nil,\n'
)
s.replace(
[
'lib/google/cloud/container_analysis/v*.rb',
'lib/google/cloud/container_analysis/v*/*_client.rb'
],
',\n(\\s+)lib_name: lib_name,\n\\s+lib_version: lib_version',
',\n\\1lib_name: lib_name,\n\\1service_address: service_address,\n\\1service_port: service_port,\n\\1lib_version: lib_version'
)
s.replace(
'lib/google/cloud/container_analysis/v*/*_client.rb',
'service_path = self\\.class::SERVICE_ADDRESS',
'service_path = service_address || self.class::SERVICE_ADDRESS'
)
s.replace(
'lib/google/cloud/container_analysis/v*/*_client.rb',
'port = self\\.class::DEFAULT_SERVICE_PORT',
'port = service_port || self.class::DEFAULT_SERVICE_PORT'
)
# Container analysis should depend on grafeas-client for now
s.replace(
'google-cloud-container_analysis.gemspec',
'\n\n gem.add_dependency "google-gax", "~> 1\\.[\\d\\.]+"',
'\n\n gem.add_dependency "grafeas-client", "~> 0.1"\n gem.add_dependency "google-gax", "~> 1.7"',
)
s.replace(
'lib/google/cloud/container_analysis.rb',
'\n\nrequire "google/gax"\n',
'\n\nrequire "grafeas"\nrequire "google/gax"\n'
)
# Expose the grafeas client as an attribute of the container_analysis client
s.replace(
'lib/google/cloud/container_analysis/v*/*_client.rb',
'\n\n(\\s+)(credentials \\|\\|= \\S+)\n',
'\n\n\\1\\2\n\n\\1@grafeas_client = ::Grafeas.new(\n\\1 credentials: credentials, scopes: scopes, client_config: client_config,\n\\1 timeout: timeout, lib_name: lib_name, lib_version: lib_version,\n\\1 service_address: service_address, service_port: service_port, metadata: metadata)\n'
)
s.replace(
'lib/google/cloud/container_analysis/v*/*_client.rb',
'\n(\\s+)attr_reader :container_analysis_stub\n',
'\n\\1attr_reader :container_analysis_stub\n\n\\1# @return [Grafeas::V1::GrafeasClient] a client for the Grafeas service\n\\1attr_reader :grafeas_client\n'
)
# Credentials env vars
s.replace(
'lib/**/credentials.rb',
'CONTAINERANALYSIS_',
'CONTAINER_ANALYSIS_'
)
# https://github.com/googleapis/gapic-generator/issues/2196
s.replace(
[
'README.md',
'lib/google/cloud/container_analysis.rb',
'lib/google/cloud/container_analysis/v1.rb'
],
'\\[Product Documentation\\]: https://cloud\\.google\\.com/containeranalysis\n',
'[Product Documentation]: https://cloud.google.com/container-registry/docs/container-analysis\n')
# https://github.com/googleapis/gapic-generator/issues/2242
def escape_braces(match):
expr = re.compile('^([^`]*(`[^`]*`[^`]*)*)([^`#\\$\\\\])\\{([\\w,]+)\\}')
content = match.group(0)
while True:
content, count = expr.subn('\\1\\3\\\\\\\\{\\4}', content)
if count == 0:
return content
s.replace(
'lib/google/cloud/container_analysis/v1/**/*.rb',
'\n(\\s+)#[^\n]*[^\n#\\$\\\\]\\{[\\w,]+\\}',
escape_braces)
# https://github.com/googleapis/gapic-generator/issues/2243
s.replace(
'lib/google/cloud/container_analysis/*/*_client.rb',
'(\n\\s+class \\w+Client\n)(\\s+)(attr_reader :\\w+_stub)',
'\\1\\2# @private\n\\2\\3')
# https://github.com/googleapis/gapic-generator/issues/2279
s.replace(
'lib/google/**/*.rb',
'\\A(((#[^\n]*)?\n)*# (Copyright \\d+|Generated by the protocol buffer compiler)[^\n]+\n(#[^\n]*\n)*\n)([^\n])',
'\\1\n\\6')
# https://github.com/googleapis/gapic-generator/issues/2323
s.replace(
[
'lib/**/*.rb',
'README.md'
],
'https://github\\.com/GoogleCloudPlatform/google-cloud-ruby',
'https://github.com/googleapis/google-cloud-ruby'
)
s.replace(
[
'lib/**/*.rb',
'README.md'
],
'https://googlecloudplatform\\.github\\.io/google-cloud-ruby',
'https://googleapis.github.io/google-cloud-ruby'
)
# https://github.com/googleapis/gapic-generator/issues/2393
s.replace(
'google-cloud-container_analysis.gemspec',
'gem.add_development_dependency "rubocop".*$',
'gem.add_development_dependency "rubocop", "~> 0.64.0"'
)
s.replace(
'google-cloud-container_analysis.gemspec',
'"README.md", "LICENSE"',
'"README.md", "AUTHENTICATION.md", "LICENSE"'
)
s.replace(
'.yardopts',
'README.md\n',
'README.md\nAUTHENTICATION.md\nLICENSE\n'
)
# https://github.com/googleapis/google-cloud-ruby/issues/3058
s.replace(
'google-cloud-container_analysis.gemspec',
'\nGem::Specification.new do',
'require File.expand_path("../lib/google/cloud/container_analysis/version", __FILE__)\n\nGem::Specification.new do'
)
s.replace(
'google-cloud-container_analysis.gemspec',
'(gem.version\s+=\s+).\d+.\d+.\d.*$',
'\\1Google::Cloud::ContainerAnalysis::VERSION'
)
s.replace(
'lib/google/cloud/container_analysis/v1/*_client.rb',
'(require \".*credentials\"\n)\n',
'\\1require "google/cloud/container_analysis/version"\n\n'
)
s.replace(
'lib/google/cloud/container_analysis/v1/*_client.rb',
'Gem.loaded_specs\[.*\]\.version\.version',
'Google::Cloud::ContainerAnalysis::VERSION'
)
# Fix links for devsite migration
s.replace(
'lib/**/*.rb',
'https://googleapis.github.io/google-cloud-ruby/#/docs/google-cloud-logging/latest/google/cloud/logging/logger',
'https://googleapis.dev/ruby/google-cloud-logging/latest'
)
s.replace(
'lib/**/*.rb',
'https://googleapis.github.io/google-cloud-ruby/#/docs/.*/authentication',
'https://googleapis.dev/ruby/google-cloud-container_analysis/latest/file.AUTHENTICATION.html'
)
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.utils import filters
POLICY_CHECK = getattr(settings, "POLICY_CHECK_FUNCTION",
lambda policy, request, target: True)
class DeleteGroup(tables.DeleteAction):
data_type_singular = _("Security Group")
data_type_plural = _("Security Groups")
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"project_id": project_id}
def allowed(self, request, security_group=None):
policy_target = self.get_policy_target(request, security_group)
if api.base.is_service_enabled(request, "network"):
policy = (("network", "delete_security_group"),)
else:
policy = (("compute", "compute_extension:security_groups"),)
if not POLICY_CHECK(policy, request, policy_target):
return False
if not security_group:
return True
return security_group.name != 'default'
def delete(self, request, obj_id):
api.network.security_group_delete(request, obj_id)
class CreateGroup(tables.LinkAction):
name = "create"
verbose_name = _("Create Security Group")
url = "horizon:project:access_and_security:security_groups:create"
classes = ("ajax-modal",)
icon = "plus"
def allowed(self, request, security_group=None):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "create_security_group"),)
else:
policy = (("compute", "compute_extension:security_groups"),)
return POLICY_CHECK(policy, request, target={})
class EditGroup(tables.LinkAction):
name = "edit"
verbose_name = _("Edit Security Group")
url = "horizon:project:access_and_security:security_groups:update"
classes = ("ajax-modal",)
icon = "pencil"
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"project_id": project_id}
def allowed(self, request, security_group=None):
policy_target = self.get_policy_target(request, security_group)
if api.base.is_service_enabled(request, "network"):
policy = (("network", "update_security_group"),)
else:
policy = (("compute", "compute_extension:security_groups"),)
if not POLICY_CHECK(policy, request, policy_target):
return False
if not security_group:
return True
return security_group.name != 'default'
class ManageRules(tables.LinkAction):
name = "manage_rules"
verbose_name = _("Manage Rules")
url = "horizon:project:access_and_security:security_groups:detail"
icon = "pencil"
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"project_id": project_id}
def allowed(self, request, security_group=None):
policy_target = self.get_policy_target(request, security_group)
if api.base.is_service_enabled(request, "network"):
policy = (("network", "get_security_group"),)
else:
policy = (("compute", "compute_extension:security_groups"),)
return POLICY_CHECK(policy, request, policy_target)
class SecurityGroupsTable(tables.DataTable):
name = tables.Column("name", verbose_name=_("Name"))
description = tables.Column("description", verbose_name=_("Description"))
def sanitize_id(self, obj_id):
return filters.get_int_or_uuid(obj_id)
class Meta:
name = "security_groups"
verbose_name = _("Security Groups")
table_actions = (CreateGroup, DeleteGroup)
row_actions = (ManageRules, EditGroup, DeleteGroup)
class CreateRule(tables.LinkAction):
name = "add_rule"
verbose_name = _("Add Rule")
url = "horizon:project:access_and_security:security_groups:add_rule"
classes = ("ajax-modal",)
icon = "plus"
def allowed(self, request, security_group_rule=None):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "create_security_group_rule"),)
else:
policy = (("compute", "compute_extension:security_groups"),)
return POLICY_CHECK(policy, request, target={})
def get_link_url(self):
return reverse(self.url, args=[self.table.kwargs['security_group_id']])
class DeleteRule(tables.DeleteAction):
data_type_singular = _("Rule")
data_type_plural = _("Rules")
def allowed(self, request, security_group_rule=None):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "delete_security_group_rule"),)
else:
policy = (("compute", "compute_extension:security_groups"),)
return POLICY_CHECK(policy, request, target={})
def delete(self, request, obj_id):
api.network.security_group_rule_delete(request, obj_id)
def get_success_url(self, request):
sg_id = self.table.kwargs['security_group_id']
return reverse("horizon:project:access_and_security:"
"security_groups:detail", args=[sg_id])
def get_remote(rule):
if 'cidr' in rule.ip_range:
if rule.ip_range['cidr'] is None:
range = '::/0' if rule.ethertype == 'IPv6' else '0.0.0.0/0'
else:
range = rule.ip_range['cidr']
return range + ' (CIDR)'
elif 'name' in rule.group:
return rule.group['name']
else:
return None
def get_port_range(rule):
ip_proto = rule.ip_protocol
if rule.from_port == rule.to_port:
return check_rule_template(rule.from_port, ip_proto)
else:
return (u"%(from)s - %(to)s" %
{'from': check_rule_template(rule.from_port, ip_proto),
'to': check_rule_template(rule.to_port, ip_proto)})
def filter_direction(direction):
if direction is None or direction.lower() == 'ingress':
return _('Ingress')
else:
return _('Egress')
def filter_protocol(protocol):
if protocol is None:
return _('Any')
return unicode.upper(protocol)
def check_rule_template(port, ip_proto):
rules_dict = getattr(settings, 'SECURITY_GROUP_RULES', {})
if not rules_dict:
return port
templ_rule = filter(lambda rule: str(port) == rule['from_port']
and str(port) == rule['to_port']
and ip_proto == rule['ip_protocol'],
[rule for rule in rules_dict.values()])
if templ_rule:
return u"%(from_port)s (%(name)s)" % templ_rule[0]
return port
class RulesTable(tables.DataTable):
direction = tables.Column("direction",
verbose_name=_("Direction"),
filters=(filter_direction,))
ethertype = tables.Column("ethertype",
verbose_name=_("Ether Type"))
protocol = tables.Column("ip_protocol",
verbose_name=_("IP Protocol"),
filters=(filter_protocol,))
port_range = tables.Column(get_port_range,
verbose_name=_("Port Range"))
remote = tables.Column(get_remote, verbose_name=_("Remote"))
def sanitize_id(self, obj_id):
return filters.get_int_or_uuid(obj_id)
def get_object_display(self, rule):
return unicode(rule)
class Meta:
name = "rules"
verbose_name = _("Security Group Rules")
table_actions = (CreateRule, DeleteRule)
row_actions = (DeleteRule,)
|
|
"""
Return config information
"""
import os
import re
import salt.syspaths as syspaths
import salt.utils.data
import salt.utils.files
# Set up the default values for all systems
DEFAULTS = {
"mongo.db": "salt",
"mongo.host": "salt",
"mongo.password": "",
"mongo.port": 27017,
"mongo.user": "",
"redis.db": "0",
"redis.host": "salt",
"redis.port": 6379,
"test.foo": "unconfigured",
"ca.cert_base_path": "/etc/pki",
"solr.cores": [],
"solr.host": "localhost",
"solr.port": "8983",
"solr.baseurl": "/solr",
"solr.type": "master",
"solr.request_timeout": None,
"solr.init_script": "/etc/rc.d/solr",
"solr.dih.import_options": {
"clean": False,
"optimize": True,
"commit": True,
"verbose": False,
},
"solr.backup_path": None,
"solr.num_backups": 1,
"poudriere.config": "/usr/local/etc/poudriere.conf",
"poudriere.config_dir": "/usr/local/etc/poudriere.d",
"ldap.server": "localhost",
"ldap.port": "389",
"ldap.tls": False,
"ldap.scope": 2,
"ldap.attrs": None,
"ldap.binddn": "",
"ldap.bindpw": "",
"hosts.file": "/etc/hosts",
"aliases.file": "/etc/aliases",
"virt": {
"tunnel": False,
"images": os.path.join(syspaths.SRV_ROOT_DIR, "salt-images"),
},
}
def backup_mode(backup=""):
"""
Return the backup mode
CLI Example:
.. code-block:: bash
salt '*' config.backup_mode
"""
if backup:
return backup
return option("backup_mode")
def manage_mode(mode):
"""
Return a mode value, normalized to a string
CLI Example:
.. code-block:: bash
salt '*' config.manage_mode
"""
# config.manage_mode should no longer be invoked from the __salt__ dunder
# in Salt code, this function is only being left here for backwards
# compatibility.
return salt.utils.files.normalize_mode(mode)
def valid_fileproto(uri):
"""
Returns a boolean value based on whether or not the URI passed has a valid
remote file protocol designation
CLI Example:
.. code-block:: bash
salt '*' config.valid_fileproto salt://path/to/file
"""
try:
return bool(re.match("^(?:salt|https?|ftp)://", uri))
except Exception: # pylint: disable=broad-except
return False
def option(value, default="", omit_opts=False, omit_master=False, omit_pillar=False):
"""
Pass in a generic option and receive the value that will be assigned
CLI Example:
.. code-block:: bash
salt '*' config.option redis.host
"""
if not omit_opts:
if value in __opts__:
return __opts__[value]
if not omit_master:
if value in __pillar__.get("master", {}):
return __pillar__["master"][value]
if not omit_pillar:
if value in __pillar__:
return __pillar__[value]
if value in DEFAULTS:
return DEFAULTS[value]
return default
def merge(value, default="", omit_opts=False, omit_master=False, omit_pillar=False):
"""
Retrieves an option based on key, merging all matches.
Same as ``option()`` except that it merges all matches, rather than taking
the first match.
CLI Example:
.. code-block:: bash
salt '*' config.merge schedule
"""
ret = None
if not omit_opts:
if value in __opts__:
ret = __opts__[value]
if isinstance(ret, str):
return ret
if not omit_master:
if value in __pillar__.get("master", {}):
tmp = __pillar__["master"][value]
if ret is None:
ret = tmp
if isinstance(ret, str):
return ret
elif isinstance(ret, dict) and isinstance(tmp, dict):
tmp.update(ret)
ret = tmp
elif isinstance(ret, (list, tuple)) and isinstance(tmp, (list, tuple)):
ret = list(ret) + list(tmp)
if not omit_pillar:
if value in __pillar__:
tmp = __pillar__[value]
if ret is None:
ret = tmp
if isinstance(ret, str):
return ret
elif isinstance(ret, dict) and isinstance(tmp, dict):
tmp.update(ret)
ret = tmp
elif isinstance(ret, (list, tuple)) and isinstance(tmp, (list, tuple)):
ret = list(ret) + list(tmp)
if ret is None and value in DEFAULTS:
return DEFAULTS[value]
return ret or default
def get(key, default=""):
"""
.. versionadded:: 0.14.0
Attempt to retrieve the named value from opts, pillar, grains of the master
config, if the named value is not available return the passed default.
The default return is an empty string.
The value can also represent a value in a nested dict using a ":" delimiter
for the dict. This means that if a dict looks like this::
{'pkg': {'apache': 'httpd'}}
To retrieve the value associated with the apache key in the pkg dict this
key can be passed::
pkg:apache
This routine traverses these data stores in this order:
- Local minion config (opts)
- Minion's grains
- Minion's pillar
- Master config
CLI Example:
.. code-block:: bash
salt '*' config.get pkg:apache
"""
ret = salt.utils.data.traverse_dict_and_list(__opts__, key, "_|-")
if ret != "_|-":
return ret
ret = salt.utils.data.traverse_dict_and_list(__grains__, key, "_|-")
if ret != "_|-":
return ret
ret = salt.utils.data.traverse_dict_and_list(__pillar__, key, "_|-")
if ret != "_|-":
return ret
ret = salt.utils.data.traverse_dict_and_list(
__pillar__.get("master", {}), key, "_|-"
)
if ret != "_|-":
return ret
return default
def dot_vals(value):
"""
Pass in a configuration value that should be preceded by the module name
and a dot, this will return a list of all read key/value pairs
CLI Example:
.. code-block:: bash
salt '*' config.dot_vals host
"""
ret = {}
for key, val in __pillar__.get("master", {}).items():
if key.startswith("{}.".format(value)):
ret[key] = val
for key, val in __opts__.items():
if key.startswith("{}.".format(value)):
ret[key] = val
return ret
|
|
#!/usr/bin/env python
"""These are flows designed to discover information about the host."""
from grr.client.client_actions import admin as admin_actions
from grr.client.client_actions import cloud as cloud_actions
from grr.client.client_actions import operating_system as operating_system_actions
from grr.client.client_actions import standard as standard_actions
from grr.lib import aff4
from grr.lib import client_index
from grr.lib import config_lib
from grr.lib import flow
from grr.lib import queues
from grr.lib import rdfvalue
from grr.lib.aff4_objects import aff4_grr
from grr.lib.aff4_objects import standard
from grr.lib.rdfvalues import cloud
from grr.lib.rdfvalues import paths as rdf_paths
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import flows_pb2
class EnrolmentInterrogateEvent(flow.EventListener):
"""An event handler which will schedule interrogation on client enrollment."""
EVENTS = ["ClientEnrollment"]
well_known_session_id = rdfvalue.SessionID(
queue=queues.ENROLLMENT, flow_name="Interrogate")
def CheckSource(self, source):
if not isinstance(source, rdfvalue.SessionID):
try:
source = rdfvalue.SessionID(source)
except rdfvalue.InitializeError:
return False
return source.Queue() == queues.ENROLLMENT
@flow.EventHandler(source_restriction=True)
def ProcessMessage(self, message=None, event=None):
_ = message
flow.GRRFlow.StartFlow(
client_id=event,
flow_name="Interrogate",
queue=queues.ENROLLMENT,
token=self.token)
class InterrogateArgs(rdf_structs.RDFProtoStruct):
protobuf = flows_pb2.InterrogateArgs
class Interrogate(flow.GRRFlow):
"""Interrogate various things about the host."""
category = "/Administrative/"
client = None
args_type = InterrogateArgs
behaviours = flow.GRRFlow.behaviours + "BASIC"
def _OpenClient(self, mode="r"):
return aff4.FACTORY.Open(
self.client_id,
aff4_type=aff4_grr.VFSGRRClient,
mode=mode,
token=self.token)
def _CreateClient(self, mode="w"):
return aff4.FACTORY.Create(
self.client_id,
aff4_type=aff4_grr.VFSGRRClient,
mode=mode,
token=self.token)
@flow.StateHandler()
def Start(self):
"""Start off all the tests."""
# Create the objects we need to exist.
self.Load()
# Make sure we always have a VFSDirectory with a pathspec at fs/os
pathspec = rdf_paths.PathSpec(
path="/", pathtype=rdf_paths.PathSpec.PathType.OS)
urn = aff4_grr.VFSGRRClient.PathspecToURN(pathspec, self.client_id)
with aff4.FACTORY.Create(
urn, standard.VFSDirectory, mode="w", token=self.token) as fd:
fd.Set(fd.Schema.PATHSPEC, pathspec)
self.CallClient(admin_actions.GetPlatformInfo, next_state="Platform")
self.CallClient(
standard_actions.GetMemorySize, next_state="StoreMemorySize")
self.CallClient(
operating_system_actions.GetInstallDate, next_state="InstallDate")
self.CallClient(admin_actions.GetClientInfo, next_state="ClientInfo")
self.CallClient(
admin_actions.GetConfiguration, next_state="ClientConfiguration")
self.CallClient(
admin_actions.GetLibraryVersions, next_state="ClientLibraries")
self.CallClient(
operating_system_actions.EnumerateInterfaces,
next_state="EnumerateInterfaces")
self.CallClient(
operating_system_actions.EnumerateFilesystems,
next_state="EnumerateFilesystems")
@flow.StateHandler()
def CloudMetadata(self, responses):
"""Process cloud metadata and store in the client."""
if not responses.success:
# We want to log this but it's not serious enough to kill the whole flow.
self.Log("Failed to collect cloud metadata: %s" % responses.status)
return
metadata_responses = responses.First()
# Expected for non-cloud machines.
if not metadata_responses:
return
with self._CreateClient() as client:
client.Set(
client.Schema.CLOUD_INSTANCE(
cloud.ConvertCloudMetadataResponsesToCloudInstance(
metadata_responses)))
@flow.StateHandler()
def StoreMemorySize(self, responses):
if not responses.success:
return
with self._CreateClient() as client:
client.Set(client.Schema.MEMORY_SIZE(responses.First()))
@flow.StateHandler()
def Platform(self, responses):
"""Stores information about the platform."""
if responses.success:
response = responses.First()
# These need to be in separate attributes because they get searched on in
# the GUI
with self._OpenClient(mode="rw") as client:
client.Set(client.Schema.HOSTNAME(response.node))
client.Set(client.Schema.SYSTEM(response.system))
client.Set(client.Schema.OS_RELEASE(response.release))
client.Set(client.Schema.OS_VERSION(response.version))
client.Set(client.Schema.KERNEL(response.kernel))
client.Set(client.Schema.FQDN(response.fqdn))
# response.machine is the machine value of platform.uname()
# On Windows this is the value of:
# HKEY_LOCAL_MACHINE\System\CurrentControlSet\Control\Session
# Manager\Environment\PROCESSOR_ARCHITECTURE
# "AMD64", "IA64" or "x86"
client.Set(client.Schema.ARCH(response.machine))
client.Set(
client.Schema.UNAME("%s-%s-%s" % (response.system, response.release,
response.version)))
# Update the client index
client_index.CreateClientIndex(token=self.token).AddClient(client)
if response.system == "Windows":
with aff4.FACTORY.Create(
self.client_id.Add("registry"),
standard.VFSDirectory,
token=self.token) as fd:
fd.Set(fd.Schema.PATHSPEC,
fd.Schema.PATHSPEC(
path="/", pathtype=rdf_paths.PathSpec.PathType.REGISTRY))
# No support for OS X cloud machines as yet.
if response.system in ["Linux", "Windows"]:
self.CallClient(
cloud_actions.GetCloudVMMetadata,
cloud.BuildCloudMetadataRequests(),
next_state="CloudMetadata")
known_system_type = True
else:
client = self._OpenClient()
known_system_type = client.Get(client.Schema.SYSTEM)
self.Log("Could not retrieve Platform info.")
if known_system_type:
# We will accept a partial KBInit rather than raise, so pass
# require_complete=False.
self.CallFlow(
"KnowledgeBaseInitializationFlow",
require_complete=False,
lightweight=self.args.lightweight,
next_state="ProcessKnowledgeBase")
else:
self.Log("Unknown system type, skipping KnowledgeBaseInitializationFlow")
@flow.StateHandler()
def InstallDate(self, responses):
if responses.success:
response = responses.First()
with self._CreateClient() as client:
install_date = client.Schema.INSTALL_DATE(response.integer * 1000000)
client.Set(install_date)
else:
self.Log("Could not get InstallDate")
def _GetExtraArtifactsForCollection(self):
original_set = set(config_lib.CONFIG["Artifacts.interrogate_store_in_aff4"])
add_set = set(config_lib.CONFIG[
"Artifacts.interrogate_store_in_aff4_additions"])
skip_set = set(config_lib.CONFIG[
"Artifacts.interrogate_store_in_aff4_skip"])
return original_set.union(add_set) - skip_set
@flow.StateHandler()
def ProcessKnowledgeBase(self, responses):
"""Collect and store any extra non-kb artifacts."""
if not responses.success:
raise flow.FlowError("Error collecting artifacts: %s" % responses.status)
# Collect any non-knowledgebase artifacts that will be stored in aff4.
artifact_list = self._GetExtraArtifactsForCollection()
if artifact_list:
self.CallFlow(
"ArtifactCollectorFlow",
artifact_list=artifact_list,
next_state="ProcessArtifactResponses",
store_results_in_aff4=True)
# Update the client index
client = self._OpenClient()
client_index.CreateClientIndex(token=self.token).AddClient(client)
@flow.StateHandler()
def ProcessArtifactResponses(self, responses):
if not responses.success:
self.Log("Error collecting artifacts: %s", responses.status)
FILTERED_IPS = ["127.0.0.1", "::1", "fe80::1"]
@flow.StateHandler()
def EnumerateInterfaces(self, responses):
"""Enumerates the interfaces."""
if not (responses.success and responses):
self.Log("Could not enumerate interfaces: %s" % responses.status)
return
with self._CreateClient() as client:
interface_list = client.Schema.INTERFACES()
mac_addresses = []
ip_addresses = []
for response in responses:
interface_list.Append(response)
# Add a hex encoded string for searching
if (response.mac_address and
response.mac_address != "\x00" * len(response.mac_address)):
mac_addresses.append(response.mac_address.human_readable_address)
for address in response.addresses:
if address.human_readable_address not in self.FILTERED_IPS:
ip_addresses.append(address.human_readable_address)
client.Set(client.Schema.MAC_ADDRESS("\n".join(mac_addresses)))
client.Set(client.Schema.HOST_IPS("\n".join(ip_addresses)))
client.Set(client.Schema.INTERFACES(interface_list))
@flow.StateHandler()
def EnumerateFilesystems(self, responses):
"""Store all the local filesystems in the client."""
if responses.success and len(responses):
filesystems = aff4_grr.VFSGRRClient.SchemaCls.FILESYSTEM()
for response in responses:
filesystems.Append(response)
if response.type == "partition":
(device, offset) = response.device.rsplit(":", 1)
offset = int(offset)
pathspec = rdf_paths.PathSpec(
path=device,
pathtype=rdf_paths.PathSpec.PathType.OS,
offset=offset)
pathspec.Append(path="/", pathtype=rdf_paths.PathSpec.PathType.TSK)
urn = aff4_grr.VFSGRRClient.PathspecToURN(pathspec, self.client_id)
fd = aff4.FACTORY.Create(urn, standard.VFSDirectory, token=self.token)
fd.Set(fd.Schema.PATHSPEC(pathspec))
fd.Close()
continue
if response.device:
pathspec = rdf_paths.PathSpec(
path=response.device, pathtype=rdf_paths.PathSpec.PathType.OS)
pathspec.Append(path="/", pathtype=rdf_paths.PathSpec.PathType.TSK)
urn = aff4_grr.VFSGRRClient.PathspecToURN(pathspec, self.client_id)
fd = aff4.FACTORY.Create(urn, standard.VFSDirectory, token=self.token)
fd.Set(fd.Schema.PATHSPEC(pathspec))
fd.Close()
if response.mount_point:
# Create the OS device
pathspec = rdf_paths.PathSpec(
path=response.mount_point,
pathtype=rdf_paths.PathSpec.PathType.OS)
urn = aff4_grr.VFSGRRClient.PathspecToURN(pathspec, self.client_id)
with aff4.FACTORY.Create(
urn, standard.VFSDirectory, token=self.token) as fd:
fd.Set(fd.Schema.PATHSPEC(pathspec))
with self._CreateClient() as client:
client.Set(client.Schema.FILESYSTEM, filesystems)
else:
self.Log("Could not enumerate file systems.")
@flow.StateHandler()
def ClientInfo(self, responses):
"""Obtain some information about the GRR client running."""
if responses.success:
response = responses.First()
with self._OpenClient(mode="rw") as client:
client.Set(client.Schema.CLIENT_INFO(response))
client.AddLabels(*response.labels, owner="GRR")
else:
self.Log("Could not get ClientInfo.")
@flow.StateHandler()
def ClientConfiguration(self, responses):
"""Process client config."""
if responses.success:
response = responses.First()
with self._CreateClient() as client:
client.Set(client.Schema.GRR_CONFIGURATION(response))
@flow.StateHandler()
def ClientLibraries(self, responses):
"""Process client library information."""
if responses.success:
response = responses.First()
with self._CreateClient() as client:
client.Set(client.Schema.LIBRARY_VERSIONS(response))
def NotifyAboutEnd(self):
self.Notify("Discovery", self.client_id, "Client Discovery Complete")
@flow.StateHandler()
def End(self):
"""Finalize client registration."""
# Update summary and publish to the Discovery queue.
client = self._OpenClient()
summary = client.GetSummary()
self.Publish("Discovery", summary)
self.SendReply(summary)
# Update the client index
client_index.CreateClientIndex(token=self.token).AddClient(client)
|
|
"""Manage flooding to ports on VLANs."""
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2017 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ryu.lib import mac
from ryu.ofproto import ofproto_v1_3 as ofp
try:
import valve_of
import valve_packet
except ImportError:
from faucet import valve_of
from faucet import valve_packet
class ValveFloodManager(object):
# Enumerate possible eth_dst flood destinations.
# First bool says whether to flood this destination, if the VLAN
# has unicast flooding enabled (if unicast flooding is enabled,
# then we flood all destination eth_dsts).
FLOOD_DSTS = (
(True, None, None),
(False, valve_packet.BRIDGE_GROUP_ADDRESS, valve_packet.mac_byte_mask(3)), # 802.x
(False, '01:00:5E:00:00:00', valve_packet.mac_byte_mask(3)), # IPv4 multicast
(False, '33:33:00:00:00:00', valve_packet.mac_byte_mask(2)), # IPv6 multicast
(False, mac.BROADCAST_STR, None), # flood on ethernet broadcasts
)
def __init__(self, flood_table, flood_priority,
dp_stack, dp_ports, dp_shortest_path_to_root,
use_group_table, groups):
self.flood_table = flood_table
self.flood_priority = flood_priority
self.stack = dp_stack
self.use_group_table = use_group_table
self.groups = groups
self.stack_ports = [
port for port in list(dp_ports.values()) if port.stack is not None]
self.towards_root_stack_ports = []
self.away_from_root_stack_ports = []
my_root_distance = dp_shortest_path_to_root()
for port in self.stack_ports:
peer_dp = port.stack['dp']
peer_root_distance = peer_dp.shortest_path_to_root()
if peer_root_distance > my_root_distance:
self.away_from_root_stack_ports.append(port)
elif peer_root_distance < my_root_distance:
self.towards_root_stack_ports.append(port)
def _build_flood_port_outputs(self, ports, in_port):
flood_acts = []
for port in ports:
if port == in_port:
if port.hairpin:
flood_acts.append(valve_of.output_in_port())
else:
flood_acts.append(valve_of.output_port(port.number))
return flood_acts
def _build_flood_local_rule_actions(self, vlan, exclude_unicast, in_port):
flood_acts = []
tagged_ports = vlan.tagged_flood_ports(exclude_unicast)
flood_acts.extend(self._build_flood_port_outputs(
tagged_ports, in_port))
untagged_ports = vlan.untagged_flood_ports(exclude_unicast)
if untagged_ports:
flood_acts.append(valve_of.pop_vlan())
flood_acts.extend(self._build_flood_port_outputs(
untagged_ports, in_port))
return flood_acts
def _port_is_dp_local(self, port):
if (port in self.away_from_root_stack_ports or
port in self.towards_root_stack_ports):
return False
return True
def _dp_is_root(self):
return self.stack is not None and 'priority' in self.stack
def _build_flood_rule_actions(self, vlan, exclude_unicast, in_port):
"""Calculate flooding destinations based on this DP's position.
If a standalone switch, then flood to local VLAN ports.
If a distributed switch, see the following example.
Hosts
||||
||||
+----+ +----+ +----+
---+1 | |1234| | 1+---
Hosts ---+2 | | | | 2+--- Hosts
---+3 | | | | 3+---
---+4 5+-------+5 6+-------+5 4+---
+----+ +----+ +----+
Root DP
The basic strategy is flood-towards-root. The root
reflects the flood back out. There are no loops and flooding
is done entirely in the dataplane.
On the root switch (left), flood destinations are:
1: 2 3 4 5(s)
2: 1 3 4 5(s)
3: 1 2 4 5(s)
4: 1 2 3 5(s)
5: 1 2 3 4 5(s, note reflection)
On the middle switch:
1: 5(s)
2: 5(s)
3: 5(s)
4: 5(s)
5: 1 2 3 4 6(s)
6: 5(s)
On the rightmost switch:
1: 5(s)
2: 5(s)
3: 5(s)
4: 5(s)
5: 1 2 3 4
"""
local_flood_actions = self._build_flood_local_rule_actions(
vlan, exclude_unicast, in_port)
# If we're a standalone switch, then flood local VLAN
if self.stack is None:
return local_flood_actions
away_flood_actions = self._build_flood_port_outputs(
self.away_from_root_stack_ports, in_port)
toward_flood_actions = self._build_flood_port_outputs(
self.towards_root_stack_ports, in_port)
flood_all_except_self = away_flood_actions + local_flood_actions
# If we're the root of a distributed switch..
if self._dp_is_root():
# If the input port was local, then flood local VLAN and stacks.
if self._port_is_dp_local(in_port):
return flood_all_except_self
# If input port non-local, then flood outward again
return [valve_of.output_in_port()] + flood_all_except_self
# We are not the root of the distributed switch
# If input port was connected to a switch closer to the root,
# then flood outwards (local VLAN and stacks further than us)
if in_port in self.towards_root_stack_ports:
return flood_all_except_self
# If input port local or from a further away switch, flood
# towards the root.
return toward_flood_actions
def _build_flood_rule_for_port(self, vlan, eth_dst, eth_dst_mask,
exclude_unicast, command, flood_priority,
port, preflood_acts):
ofmsgs = []
match = self.flood_table.match(
vlan=vlan, in_port=port.number,
eth_dst=eth_dst, eth_dst_mask=eth_dst_mask)
flood_acts = self._build_flood_rule_actions(
vlan, exclude_unicast, port)
ofmsgs.append(self.flood_table.flowmod(
match=match,
command=command,
inst=[valve_of.apply_actions(preflood_acts + flood_acts)],
priority=flood_priority))
return ofmsgs
def _build_unmirrored_flood_rules(self, vlan, eth_dst, eth_dst_mask,
exclude_unicast, command, flood_priority):
ofmsgs = []
vlan_all_ports = []
vlan_all_ports.extend(vlan.flood_ports(vlan.get_ports(), exclude_unicast))
vlan_all_ports.extend(self.away_from_root_stack_ports)
vlan_all_ports.extend(self.towards_root_stack_ports)
for port in vlan_all_ports:
ofmsgs.extend(self._build_flood_rule_for_port(
vlan, eth_dst, eth_dst_mask,
exclude_unicast, command, flood_priority,
port, []))
return ofmsgs
def _build_mirrored_flood_rules(self, vlan, eth_dst, eth_dst_mask,
exclude_unicast, command, flood_priority):
ofmsgs = []
mirrored_ports = vlan.mirrored_ports()
for port in mirrored_ports:
mirror_acts = [valve_of.output_port(port.mirror)]
ofmsgs.extend(self._build_flood_rule_for_port(
vlan, eth_dst, eth_dst_mask,
exclude_unicast, command, flood_priority,
port, mirror_acts))
return ofmsgs
def _build_group_buckets(self, vlan, unicast_flood):
buckets = []
for port in vlan.tagged_flood_ports(unicast_flood):
buckets.append(valve_of.bucket(
actions=[valve_of.output_port(port.number)]))
for port in vlan.untagged_flood_ports(unicast_flood):
buckets.append(valve_of.bucket(
actions=[
valve_of.pop_vlan(),
valve_of.output_port(port.number)]))
return buckets
def _build_group_flood_rules(self, vlan, modify, command):
flood_priority = self.flood_priority
broadcast_group = self.groups.get_entry(
vlan.vid,
self._build_group_buckets(vlan, False))
unicast_group = self.groups.get_entry(
vlan.vid + valve_of.VLAN_GROUP_OFFSET,
self._build_group_buckets(vlan, vlan.unicast_flood))
ofmsgs = []
if modify:
ofmsgs.append(broadcast_group.modify())
ofmsgs.append(unicast_group.modify())
else:
ofmsgs.extend(broadcast_group.add())
ofmsgs.extend(unicast_group.add())
for unicast_eth_dst, eth_dst, eth_dst_mask in self.FLOOD_DSTS:
if unicast_eth_dst and not vlan.unicast_flood:
continue
group = broadcast_group
if not eth_dst:
group = unicast_group
match = self.flood_table.match(
vlan=vlan, eth_dst=eth_dst, eth_dst_mask=eth_dst_mask)
ofmsgs.append(self.flood_table.flowmod(
match=match,
command=command,
inst=[valve_of.apply_actions([valve_of.group_act(group.group_id)])],
priority=flood_priority))
flood_priority += 1
return ofmsgs
def _build_multiout_flood_rules(self, vlan, command):
flood_priority = self.flood_priority
ofmsgs = []
for unicast_eth_dst, eth_dst, eth_dst_mask in self.FLOOD_DSTS:
if unicast_eth_dst and not vlan.unicast_flood:
continue
ofmsgs.extend(self._build_unmirrored_flood_rules(
vlan, eth_dst, eth_dst_mask,
unicast_eth_dst, command, flood_priority))
flood_priority += 1
ofmsgs.extend(self._build_mirrored_flood_rules(
vlan, eth_dst, eth_dst_mask,
unicast_eth_dst, command, flood_priority))
flood_priority += 1
return ofmsgs
def build_flood_rules(self, vlan, modify=False):
"""Add flows to flood packets to unknown destinations on a VLAN."""
# TODO: group table support is still fairly uncommon, so
# group tables are currently optional.
command = ofp.OFPFC_ADD
if modify:
command = ofp.OFPFC_MODIFY_STRICT
if self.use_group_table:
hairpin_ports = [port for port in vlan.get_ports() if port.hairpin]
# TODO: group tables for stacking and hairpin flooding modes.
if self.stack is None and not hairpin_ports:
return self._build_group_flood_rules(vlan, modify, command)
return self._build_multiout_flood_rules(vlan, command)
|
|
# -*- coding: utf-8 -*-
import json
import os
import re
from functools import partial
from django import template
from django.conf import settings
from django.contrib.admin import helpers
from django.contrib.admin.utils import quote, unquote, capfirst
from django.contrib import messages
from filer.admin.patched.admin_utils import get_deleted_objects
from django.core.exceptions import PermissionDenied
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.urls import reverse, re_path
from django.db import router
from django.db.models import Q
from django.contrib.sites.models import Site
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth import get_permission_codename
from django.http import HttpResponseRedirect, Http404, HttpResponse
from django.shortcuts import render
from django.template import RequestContext
from django.template.response import TemplateResponse
from django.utils.encoding import force_text
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext, ugettext_lazy
from filer.admin.forms import CopyFilesAndFoldersForm
from filer.admin.common_admin import FolderPermissionModelAdmin
from filer.views import (popup_status, popup_param, selectfolder_status,
selectfolder_param, current_site_param,
get_param_from_request)
from filer.admin.tools import (folders_available, files_available,
get_admin_sites_for_user,
has_multi_file_action_permission,
is_valid_destination,)
from filer.models import (Folder, FolderRoot, UnfiledImages, File, tools,
ImagesWithMissingData,
Archive, Image, DummyFolder)
from filer.settings import FILER_STATICMEDIA_PREFIX, FILER_PAGINATE_BY
from filer.utils.multi_model_qs import MultiMoldelQuerysetChain
ELEM_ID = re.compile(r'.*<a href=".*/(?P<file_id>[0-9]+)/.*".*a>$')
class FolderAdmin(FolderPermissionModelAdmin):
list_display = ('name',)
list_per_page = 20
list_filter = ('owner',)
search_fields = ['name', 'files__name']
actions_affecting_position = [
'move_to_clipboard',
'delete_files_or_folders',
'move_files_and_folders',
]
actions_restrictions = [
'disable_restriction',
'enable_restriction',
] if getattr(settings, 'FILER_ENABLE_RESTRICTION_ACTIONS', True) else []
actions = actions_restrictions + [
'copy_files_and_folders',
'extract_files',
] + actions_affecting_position
# form fields
exclude = ('parent', 'owner', 'folder_type')
raw_id_fields = ('owner', )
def get_readonly_fields(self, request, obj=None):
self.readonly_fields = [ro_field
for ro_field in self.readonly_fields]
self._make_restricted_field_readonly(request.user, obj)
return super(FolderAdmin, self).get_readonly_fields(
request, obj)
def _get_sites_available_for_user(self, user):
if user.is_superuser:
return Site.objects.all()
admin_sites = [site.id
for site in get_admin_sites_for_user(user)]
return Site.objects.filter(id__in=admin_sites)
def formfield_for_foreignkey(self, db_field, request=None, **kwargs):
"""
Filters sites available to the user based on his roles on sites
"""
formfield = super(FolderAdmin, self).formfield_for_foreignkey(
db_field, request, **kwargs)
if request and db_field.remote_field.model is Site:
formfield.queryset = self._get_sites_available_for_user(
request.user)
return formfield
def formfield_for_manytomany(self, db_field, request, **kwargs):
"""
Filters sites available to the user based on his roles on sites
"""
formfield = super(FolderAdmin, self).formfield_for_manytomany(
db_field, request, **kwargs)
if request and db_field.remote_field.model is Site:
formfield.queryset = self._get_sites_available_for_user(
request.user)
return formfield
def get_form(self, request, obj=None, **kwargs):
"""
Returns a Form class for use in the admin add view. This is used by
add_view and change_view.
Sets the parent folder and owner for the folder that will be edited
in the form
"""
folder_form = super(FolderAdmin, self).get_form(
request, obj=obj, **kwargs)
if 'site' in folder_form.base_fields:
folder_form.base_fields['site'].widget.can_add_related = False
folder_form.base_fields['site'].widget.can_delete_related = False
folder_form.base_fields['site'].widget.can_change_related = False
if 'shared' in folder_form.base_fields:
folder_form.base_fields['shared'].widget.can_add_related = False
# do show share sites field only for superusers
if not request.user.is_superuser:
folder_form.base_fields.pop('shared', None)
# check if site field should be visible in the form or not
is_core_folder = False
if obj and obj.pk:
# change view
parent_id = obj.parent_id
is_core_folder = obj.is_core()
else:
# add view
parent_id = get_param_from_request(request, 'parent_id')
folder_form.base_fields.pop('restricted', None)
# shouldn't show site field if has parent or is core folder
pop_site_fields = parent_id or is_core_folder
if pop_site_fields:
folder_form.base_fields.pop('site', None)
folder_form.base_fields.pop('shared', None)
def clean(form_instance):
# make sure owner and parent are passed to the model clean method
current_folder = form_instance.instance
if not current_folder.owner:
current_folder.owner = request.user
if parent_id:
current_folder.parent = Folder.objects.get(id=parent_id)
return form_instance.cleaned_data
folder_form.clean = clean
return folder_form
def icon_img(self, xs):
return mark_safe(('<img src="%simg/icons/plainfolder_32x32.png" ' +
'alt="Folder Icon" />') % FILER_STATICMEDIA_PREFIX)
icon_img.allow_tags = True
def get_urls(self):
from django.conf.urls import url
urls = super(FolderAdmin, self).get_urls()
url_patterns = [
# we override the default list view with our own directory listing
# of the root directories
re_path(r'^$', self.admin_site.admin_view(self.directory_listing),
name='filer-directory_listing-root'),
re_path(r'^(?P<folder_id>\d+)/list/$',
self.admin_site.admin_view(self.directory_listing),
name='filer-directory_listing'),
re_path(r'^make_folder/$',
self.admin_site.admin_view(self.make_folder),
name='filer-directory_listing-make_root_folder'),
re_path(r'^images_with_missing_data/$',
self.admin_site.admin_view(self.directory_listing),
{'viewtype': 'images_with_missing_data'},
name='filer-directory_listing-images_with_missing_data'),
re_path(r'^unfiled_images/$',
self.admin_site.admin_view(self.directory_listing),
{'viewtype': 'unfiled_images'},
name='filer-directory_listing-unfiled_images'),
re_path(r'^destination_folders/$',
self.admin_site.admin_view(self.destination_folders),
name='filer-destination_folders'),
]
url_patterns.extend(urls)
return url_patterns
def add_view(self, request, *args, **kwargs):
raise PermissionDenied
def make_folder(self, request, folder_id=None, *args, **kwargs):
response = super(FolderAdmin, self).add_view(request, *args, **kwargs)
# since filer overwrites django's dismissPopup we need to make sure
# that the response from django's add_view is the
# dismiss popup response so we can overwrite it
# since only save button appears its enough to make sure that the
# request is a POST from a popup view and the response is a
# successed HttpResponse
if (request.method == 'POST' and popup_status(request) and
response.status_code == 200 and
not isinstance(response, HttpResponseRedirect)):
return HttpResponse('<script type="text/javascript">' +
'opener.dismissPopupAndReload(window);' +
'</script>')
return response
def delete_view(self, request, object_id, extra_context=None):
# override delete view since we need to hide already trashed
# files/folders
opts = self.model._meta
obj = self.get_object(request, unquote(object_id))
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r '
'does not exist.') % {
'name': force_text(opts.verbose_name),
'key': escape(object_id)})
if obj.parent:
redirect_url = reverse('admin:filer-directory_listing',
kwargs={'folder_id': obj.parent_id})
else:
redirect_url = reverse('admin:filer-directory_listing-root')
redirect_url = "%s%s%s%s" % (redirect_url, popup_param(request),
selectfolder_param(request, "&"),
current_site_param(request),)
setattr(request, 'current_dir_list_folder',
obj.parent or FolderRoot())
response = self.delete_files_or_folders(
request,
File.objects.none(),
Folder.objects.filter(id=obj.id))
if response is None:
return HttpResponseRedirect(redirect_url)
return response
# custom views
def directory_listing(self, request, folder_id=None, viewtype=None):
user = request.user
clipboard = tools.get_user_clipboard(user)
file_type = request.GET.get('file_type', None)
if viewtype == 'images_with_missing_data':
folder = ImagesWithMissingData()
folder_file_qs = folder.files
elif viewtype == 'unfiled_images':
folder = UnfiledImages()
folder_file_qs = folder.files
elif folder_id is None:
folder = FolderRoot()
folder_file_qs = File.objects.none()
else:
try:
folder = Folder.objects.get(id=folder_id)
if not self.can_view_folder_content(request, folder):
raise PermissionDenied
except Folder.DoesNotExist:
raise Http404
if file_type == 'image':
folder_file_qs = Image.objects.filter(folder=folder)
else:
folder_file_qs = File.objects.filter(folder=folder)
if file_type == 'image':
all_file_qs = Image.objects
else:
all_file_qs = File.objects
setattr(request, 'current_dir_list_folder', folder)
# search
q = request.GET.get('q', None)
if q:
search_terms = q.split(" ")
else:
search_terms = []
q = ''
# Check actions to see if any are available on this changelist
# do not let any actions available if we're in search view since
# there is no way to detect the current folder
actions = {}
if not search_terms:
actions = self.get_actions(request)
# Remove action checkboxes if there aren't any actions available.
list_display = list(self.list_display)
if not actions:
try:
list_display.remove('action_checkbox')
except ValueError:
pass
limit_search_to_folder = request.GET.get('limit_search_to_folder',
False) in (True, 'on')
current_site = request.GET.get('current_site', None)
_filter_folders = partial(folders_available, current_site, request.user)
_filter_files = partial(files_available, current_site, request.user)
if len(search_terms) > 0:
if folder and limit_search_to_folder and not folder.is_root:
descendants = folder.get_descendants(
include_self=True).filter(deleted_at__isnull=True)
folder_qs = _filter_folders(descendants.exclude(id=folder.id))
file_qs = _filter_files(all_file_qs.filter(folder__in=descendants))
else:
folder_qs = _filter_folders(Folder.objects.all())
file_qs = _filter_files(all_file_qs)
def folder_search_qs(qs, terms=[]):
for term in terms:
qs = qs.filter(Q(name__icontains=term) |
Q(owner__username__icontains=term) |
Q(owner__first_name__icontains=term) |
Q(owner__last_name__icontains=term))
return qs
def file_search_qs(qs, terms=[]):
for term in terms:
qs = qs.filter(Q(name__icontains=term) |
Q(description__icontains=term) |
Q(original_filename__icontains=term) |
Q(owner__username__icontains=term) |
Q(owner__first_name__icontains=term) |
Q(owner__last_name__icontains=term))
return qs
folder_qs = folder_search_qs(folder_qs, search_terms)
file_qs = file_search_qs(file_qs, search_terms)
show_result_count = True
else:
folder_qs = _filter_folders(folder.children.all())
file_qs = _filter_files(folder_file_qs)
show_result_count = False
folder_qs = folder_qs.order_by('name')
file_qs = file_qs.order_by('name')
if show_result_count:
show_result_count = {
'files_found': file_qs.count(),
'folders_found': folder_qs.count(),
}
items = MultiMoldelQuerysetChain([folder_qs, file_qs])
paginator = Paginator(items, FILER_PAGINATE_BY)
# Are we moving to clipboard?
if request.method == 'POST' and '_save' not in request.POST:
for f in file_qs:
if "move-to-clipboard-%d" % (f.id,) in request.POST:
if (f.is_readonly_for_user(user) or
f.is_restricted_for_user(user)):
raise PermissionDenied
tools.move_file_to_clipboard(request, [f], clipboard)
return HttpResponseRedirect(request.get_full_path())
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
# Actions with no confirmation
if (actions and request.method == 'POST' and
'index' in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request,
files_queryset=file_qs,
folders_queryset=folder_qs)
if response:
return response
else:
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg)
# Actions with confirmation
if (actions and request.method == 'POST' and
helpers.ACTION_CHECKBOX_NAME in request.POST and
'index' not in request.POST and '_save' not in request.POST):
if selected:
response = self.response_action(request,
files_queryset=file_qs,
folders_queryset=folder_qs)
if response:
return response
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = \
self.get_action_choices(request)
else:
action_form = None
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', paginator.count)
# Make sure page request is an int. If not, deliver first page.
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
# If page request (9999) is out of range, deliver last page of results.
try:
paginated_items = paginator.page(page)
except (EmptyPage, InvalidPage):
paginated_items = paginator.page(paginator.num_pages)
context = self.admin_site.each_context(request)
context.update({
'folder': folder,
'user_clipboard': clipboard,
'clipboard_files': clipboard.files.distinct(),
'current_site': get_param_from_request(request, 'current_site'),
'paginator': paginator,
'paginated_items': paginated_items,
'current_url': request.path,
'title': 'Directory listing for %s' % folder.name,
'search_string': ' '.join(search_terms),
'q': q,
'show_result_count': show_result_count,
'limit_search_to_folder': limit_search_to_folder,
'is_popup': popup_status(request),
'select_folder': selectfolder_status(request),
# needed in the admin/base.html template for logout links
'root_path': reverse('admin:index'),
'action_form': action_form,
'actions_on_top': self.actions_on_top,
'actions_on_bottom': self.actions_on_bottom,
'actions_selection_counter': self.actions_selection_counter,
'selection_note': _('0 of %(cnt)s selected') % {
'cnt': len(paginated_items.object_list)},
'selection_note_all': selection_note_all % {
'total_count': paginator.count},
'media': self.media,
'file_type': file_type,
})
response = render(request, 'admin/filer/folder/directory_listing.html', context)
return response
def response_action(self, request, files_queryset, folders_queryset):
"""
Handle an admin action. This is called if a request is POSTed to the
changelist; it returns an HttpResponse if the action was handled, and
None otherwise.
"""
# There can be multiple action forms on the page (at the top
# and bottom of the change list, for example). Get the action
# whose button was pushed.
try:
action_index = int(request.POST.get('index', 0))
except ValueError:
action_index = 0
# Construct the action form.
data = request.POST.copy()
data.pop(helpers.ACTION_CHECKBOX_NAME, None)
data.pop("index", None)
# Use the action whose button was pushed
try:
data.update({'action': data.getlist('action')[action_index]})
except IndexError:
# If we didn't get an action from the chosen form that's invalid
# POST data, so by deleting action it'll fail the validation check
# below. So no need to do anything here
pass
action_form = self.action_form(data, auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
# If the form's valid we can handle the action.
if action_form.is_valid():
action = action_form.cleaned_data['action']
select_across = action_form.cleaned_data['select_across']
func, name, description = self.get_actions(request)[action]
# Get the list of selected PKs. If nothing's selected, we can't
# perform an action on it, so bail. Except we want to perform
# the action explicitly on all objects.
selected = request.POST.getlist(helpers.ACTION_CHECKBOX_NAME)
if not selected and not select_across:
# Reminder that something needs to be selected or
# nothing will happen
msg = _("Items must be selected in order to perform "
"actions on them. No items have been changed.")
self.message_user(request, msg)
return None
if not select_across:
selected_files = []
selected_folders = []
for pk in selected:
if pk[:5] == "file-":
selected_files.append(pk[5:])
else:
selected_folders.append(pk[7:])
# Perform the action only on the selected objects
files_queryset = files_queryset.filter(pk__in=selected_files)
folders_queryset = folders_queryset.filter(
pk__in=selected_folders)
response = func(self, request, files_queryset, folders_queryset)
# Actions may return an HttpResponse, which will be used as the
# response from the POST. If not, we'll be a good little HTTP
# citizen and redirect back to the changelist page.
if isinstance(response, HttpResponse):
return response
else:
return HttpResponseRedirect(request.get_full_path())
else:
msg = _("No action selected.")
self.message_user(request, msg)
return None
def get_actions(self, request):
actions = super(FolderAdmin, self).get_actions(request)
def pop_actions(*actions_to_remove):
for action in actions_to_remove:
actions.pop(action, None)
pop_actions('delete_selected')
if not self.has_delete_permission(request, None):
pop_actions(*self.actions_affecting_position)
current_folder = getattr(request, 'current_dir_list_folder', None)
if not current_folder:
return actions
if current_folder.is_root:
pop_actions('extract_files')
if (not current_folder.is_root and
current_folder.is_readonly_for_user(request.user)):
return {}
if isinstance(current_folder, UnfiledImages):
pop_actions('enable_restriction', 'copy_files_and_folders',
'disable_restriction')
return actions
# actions are available for descendants not for current folder
if not (current_folder.can_change_restricted(request.user) and
not current_folder.restricted):
pop_actions('enable_restriction', 'disable_restriction')
if (actions and current_folder.is_restricted_for_user(request.user)):
# allow only copy
if 'copy_files_and_folders' in actions:
return {'copy_files_and_folders':
actions['copy_files_and_folders']}
return actions
def move_to_clipboard(self, request, files_queryset, folders_queryset):
"""
Action which moves the selected files to clipboard.
"""
if request.method != 'POST':
return None
if not has_multi_file_action_permission(
request, files_queryset,
Folder.objects.none()):
raise PermissionDenied
clipboard = tools.get_user_clipboard(request.user)
# We define it like that so that we can modify it inside the
# move_files function
files_count = [0]
def move_files(files):
files_count[0] += tools.move_file_to_clipboard(
request, files, clipboard)
move_files(files_queryset)
if files_count[0] > 0:
self.message_user(request,
_("Successfully moved %(count)d files to clipboard.") % {
"count": files_count[0], })
else:
self.message_user(request,
_("No files were moved to clipboard."))
return None
move_to_clipboard.short_description = ugettext_lazy(
"Move selected files to clipboard")
def _get_unique_items(self, deletable_items, unique_items, depth=5):
count = 0
if depth < 0:
return count
for elem in deletable_items:
if isinstance(elem, (list, tuple)):
count += self._get_unique_items(elem, unique_items, depth-1)
elif isinstance(elem, str):
match = ELEM_ID.match(elem)
elem_id = match and match.group('file_id')
if elem_id and elem_id not in unique_items:
unique_items.append(elem_id)
count += 1
return count
def delete_files_or_folders(self, request,
files_queryset, folders_queryset):
"""
Action which deletes the selected files and/or folders.
This action first displays a confirmation page whichs shows all the
deleteable files and/or folders, or, if the user has no permission
on one of the related childs (foreignkeys), a "permission denied"
message.
Next, it delets all selected files and/or folders and redirects back
to the folder.
"""
# Check that the user has delete permission for the actual model
if not self.has_delete_permission(request):
raise PermissionDenied
if not has_multi_file_action_permission(
request, files_queryset, folders_queryset):
raise PermissionDenied
opts = self.model._meta
app_label = opts.app_label
current_folder = self._get_current_action_folder(
request, files_queryset, folders_queryset)
all_protected = []
using = router.db_for_write(self.model)
deletable_files, perms_needed_files, protected_files = \
get_deleted_objects(
files_queryset, files_queryset.model._meta,
request.user, self.admin_site, using)
files_count = self._get_unique_items(deletable_files, unique_items=[])
deletable_folders, perms_needed_folders, protected_folders = \
get_deleted_objects(
folders_queryset, folders_queryset.model._meta,
request.user, self.admin_site, using)
folders_count = self._get_unique_items(deletable_folders, unique_items=[])
all_protected.extend(protected_files)
all_protected.extend(protected_folders)
all_deletable_objects = [deletable_files, deletable_folders]
all_perms_needed = perms_needed_files.union(perms_needed_folders)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list
# view again.
if request.POST.get('post'):
if all_perms_needed:
raise PermissionDenied
total_count = files_count + folders_count
if total_count:
# delete all explicitly selected files
for file_obj in files_queryset:
self.log_deletion(request, file_obj, force_text(file_obj))
file_obj.delete()
# delete all folders
for file_id in folders_queryset.values_list('id', flat=True):
file_obj = Folder.objects.get(id=file_id)
self.log_deletion(request, file_obj, force_text(file_obj))
file_obj.delete()
self.message_user(request,
_("Successfully deleted %(count)d files "
"and/or folders.") % {"count": total_count, })
# Return None to display the change list page again.
return None
if all_perms_needed or all_protected:
title = _("Cannot delete files and/or folders")
else:
title = _("Are you sure?")
context = {
"title": title,
"instance": current_folder,
"breadcrumbs_action": _("Delete files and/or folders"),
"deletable_objects": all_deletable_objects,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"perms_lacking": all_perms_needed,
"protected": all_protected,
"opts": opts,
'is_popup': popup_status(request),
'select_folder': selectfolder_status(request),
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
}
context.update(self.admin_site.each_context(request))
# Display the destination folder selection page
return render(request, "admin/filer/delete_selected_files_confirmation.html", context)
delete_files_or_folders.short_description = ugettext_lazy(
"Delete selected files and/or folders")
# Copied from django.contrib.admin.util
def _format_callback(self, obj, user, admin_site, perms_needed):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
if has_admin:
admin_url = reverse('%s:%s_%s_change'
% (admin_site.name,
opts.app_label,
opts.object_name.lower()),
None, (quote(obj._get_pk_val()),))
get_permission_codename('delete', opts)
p = '%s.%s' % (opts.app_label,
get_permission_codename('delete', opts))
# Also check permissions on individual objects
if not user.has_perm(p, obj) and not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return mark_safe('%s: <a href="%s">%s</a>' %
(escape(capfirst(opts.verbose_name)),
admin_url,
escape(obj.actual_name)))
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj.actual_name))
def _get_current_action_folder(self, request, files_qs, folders_qs):
current_folder = getattr(request, 'current_dir_list_folder', None)
if current_folder:
return current_folder
if files_qs:
return files_qs[0].folder
elif folders_qs:
return folders_qs[0].parent
else:
return None
def _list_folders_to_copy_or_move(self, request, folders):
for fo in folders:
yield self._format_callback(
fo, request.user, self.admin_site, set())
children = list(self._list_folders_to_copy_or_move(
request, fo.children.all()))
children.extend([self._format_callback(
f, request.user, self.admin_site, set())
for f in sorted(fo.files)])
if children:
yield children
def _list_all_to_copy_or_move(self, request,
files_queryset, folders_queryset):
to_copy_or_move = list(self._list_folders_to_copy_or_move(
request, folders_queryset))
to_copy_or_move.extend([self._format_callback(
f, request.user, self.admin_site, set())
for f in sorted(files_queryset)])
return to_copy_or_move
def _move_files_and_folders_impl(self, files_queryset, folders_queryset,
destination):
for f in files_queryset:
f.folder = destination
f.save()
for f_id in folders_queryset.values_list('id', flat=True):
f = Folder.objects.get(id=f_id)
f.parent = destination
f.save()
def _as_folder(self, request_data, param):
try:
return Folder.objects.get(id=int(request_data.get(param, None)))
except (Folder.DoesNotExist, ValueError, TypeError):
return None
def _clean_destination(self, request, current_folder,
selected_folders):
destination = self._as_folder(request.POST, 'destination')
if not destination:
raise PermissionDenied
# check destination permissions
if not is_valid_destination(request, destination):
raise PermissionDenied
# don't allow copy/move from folder to the same folder
if (hasattr(current_folder, 'pk') and
destination.pk == current_folder.pk):
raise PermissionDenied
# don't allow selected folders to be copied/moved inside
# themselves or inside any of their descendants
for folder in selected_folders:
destination_in_selected = folder.get_descendants(include_self=True).filter(id=destination.pk).exists()
if destination_in_selected:
raise PermissionDenied
return destination
def destination_folders(self, request):
all_required = all((
request.method == 'GET',
request.is_ajax(),
request.user.is_authenticated,
'parent' in request.GET
))
if not all_required:
raise PermissionDenied
def _valid_candidates(request, candidates_qs, selected):
# exclude orphaned/core/shared/restricted or any selected folders
current_site = request.GET.get('current_site', None)
return folders_available(current_site, request.user, candidates_qs) \
.valid_destinations(request.user) \
.unrestricted(request.user) \
.exclude(id__in=selected)
current_folder = self._as_folder(request.GET, 'current_folder')
parent = self._as_folder(request.GET, 'parent')
selected_ids = [_f for _f in [f_id or None for f_id in json.loads(request.GET.get('selected_folders') or '[]')] if _f]
candidates = Folder.objects.filter(parent=parent)
fancytree_candidates = []
for folder in _valid_candidates(request, candidates, selected_ids):
has_children = _valid_candidates(
request, Folder.objects.filter(parent=folder), selected_ids
).exists()
# don't allow move/copy files&folders to itself
disabled = current_folder and current_folder.pk == folder.pk
fancytree_candidates.append({
'title': folder.name,
'key': "%d" % folder.pk,
'folder': has_children,
'lazy': has_children,
'hideCheckbox': disabled,
'unselectable': disabled,
'icon': folder.icons.get('32', '')
})
return HttpResponse(
json.dumps(fancytree_candidates), content_type="application/json")
def move_files_and_folders(self, request,
selected_files, selected_folders):
opts = self.model._meta
app_label = opts.app_label
if not has_multi_file_action_permission(request, selected_files, selected_folders):
messages.error(request, "You are not allowed to move some of the "\
"files and folders you selected.")
return
if selected_folders.filter(parent=None).exists():
messages.error(request, "To prevent potential problems, users "
"are not allowed to move root folders. You may copy folders "
"and files.")
return
current_folder = self._get_current_action_folder(
request, selected_files, selected_folders)
to_move = self._list_all_to_copy_or_move(
request, selected_files, selected_folders)
if request.method == 'POST' and request.POST.get('post'):
try:
destination = self._clean_destination(
request, current_folder, selected_folders)
except PermissionDenied:
messages.error(request, "The destination was not valid so the selected "\
"files and folders were not moved. Please try again.")
return
# all folders need to belong to the same site as the
# destination site folder
sites_from_folders = \
set(selected_folders.values_list('site_id', flat=True)) | \
set(selected_files.exclude(folder__isnull=True).\
values_list('folder__site_id', flat=True))
if (sites_from_folders and
None in sites_from_folders):
messages.error(request, "Some of the selected files/folders "
"do not belong to any site. Folders need to be assigned "
"to a site before you can move files/folders from it.")
return
elif len(sites_from_folders) > 1:
# it gets here if selection is made through a search view
messages.error(request, "You cannot move files/folders that "
"belong to several sites. Select files/folders that "
"belong to only one site.")
return
elif (sites_from_folders and
sites_from_folders.pop() != destination.site.id):
messages.error(request, "Selected files/folders need to "
"belong to the same site as the destination folder.")
return
if not self._are_candidate_names_valid(
request, selected_files, selected_folders, destination):
return
# We count only topmost files and folders here
n = selected_files.count() + selected_folders.count()
if n:
self._move_files_and_folders_impl(
selected_files, selected_folders, destination)
self.message_user(request,
_("Successfully moved %(count)d files and/or "
"folders to folder '%(destination)s'.") % {
"count": n,
"destination": destination,
})
return None
context = {
"title": _("Move files and/or folders"),
"instance": current_folder,
"breadcrumbs_action": _("Move files and/or folders"),
"to_move": to_move,
"files_queryset": selected_files,
"folders_queryset": selected_folders,
"opts": opts,
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
}
context.update(self.admin_site.each_context(request))
# Display the destination folder selection page
return render(request, "admin/filer/folder/choose_move_destination.html", context)
move_files_and_folders.short_description = ugettext_lazy(
"Move selected files and/or folders")
def extract_files(self, request, files_queryset, folder_queryset):
success_format = "Successfully extracted archive {}."
files_queryset = files_queryset.filter(
polymorphic_ctype=ContentType.objects.get_for_model(Archive).id)
# cannot extract in unfiled files folder
if files_queryset.filter(folder__isnull=True).exists():
raise PermissionDenied
if not has_multi_file_action_permission(request, files_queryset,
Folder.objects.none()):
raise PermissionDenied
def is_valid_archive(filer_file):
is_valid = filer_file.is_valid()
if not is_valid:
error_format = "{} is not a valid zip file"
message = error_format.format(filer_file.clean_actual_name)
messages.error(request, _(message))
return is_valid
def has_collisions(filer_file):
collisions = filer_file.collisions()
if collisions:
error_format = "Files/Folders from {archive} with names:"
error_format += "{names} already exist."
names = ", ".join(collisions)
archive = filer_file.clean_actual_name
message = error_format.format(
archive=archive,
names=names,
)
messages.error(request, _(message))
return len(collisions) > 0
for f in files_queryset:
if not is_valid_archive(f) or has_collisions(f):
continue
f.extract()
message = success_format.format(f.actual_name)
self.message_user(request, _(message))
for err_msg in f.extract_errors:
messages.warning(
request,
_("%s: %s" % (f.actual_name, err_msg))
)
extract_files.short_description = ugettext_lazy(
"Extract selected zip files")
def _copy_file(self, file_obj, destination, suffix, overwrite):
if overwrite:
# Not yet implemented as we have to find a portable
# (for different storage backends) way to overwrite files
raise NotImplementedError
# We are assuming here that we are operating on an already saved
# database objects with current database state available
# Due to how inheritance works, we have to set both pk and id to None
file_obj.pk = None
file_obj.id = None
file_obj.restricted = False
file_obj.folder = destination
# add suffix to actual name
if file_obj.name in ('', None):
file_obj.original_filename = self._generate_name(
file_obj.original_filename, suffix)
else:
file_obj.name = self._generate_name(file_obj.name, suffix)
new_path = file_obj.file.field.upload_to(file_obj, file_obj.actual_name)
file_obj.file = file_obj._copy_file(new_path)
file_obj.save()
def _copy_files(self, files, destination, suffix, overwrite):
for f in files:
self._copy_file(f, destination, suffix, overwrite)
return len(files)
def _copy_folder(self, folder, destination, suffix, overwrite):
if overwrite:
# Not yet implemented as we have to find a portable
# (for different storage backends) way to overwrite files
raise NotImplementedError
foldername = self._generate_name(folder.name, suffix)
old_folder = Folder.objects.get(pk=folder.pk)
# Due to how inheritance works, we have to set both pk and id to None
# lft and rght need to be reset since otherwise will see this node
# as 'already set up for insertion' and will not recalculate tree
# values
folder.pk = folder.id = folder.lft = folder.rght = None
folder.restricted = False
folder.name = foldername
folder.parent = destination
folder.save()
return 1 + self._copy_files_and_folders_impl(
old_folder.files.all(), old_folder.children.all(),
folder, suffix, overwrite)
def _copy_files_and_folders_impl(self, files_queryset, folders_queryset,
destination, suffix, overwrite):
n = self._copy_files(files_queryset, destination, suffix, overwrite)
for f_id in folders_queryset.values_list('id', flat=True):
f = Folder.objects.get(id=f_id)
destination = Folder.objects.get(id=destination.id)
n += self._copy_folder(f, destination, suffix, overwrite)
return n
def _generate_name(self, filename, suffix):
if not suffix:
return filename
basename, extension = os.path.splitext(filename)
return basename + suffix + extension
def _are_candidate_names_valid(
self, request, file_qs, folder_qs, destination, suffix=None):
candidate_folder_names = [self._generate_name(name, suffix)
for name in folder_qs.values_list(
'name', flat=True)]
candidate_file_names = [
self._generate_name(file_obj.actual_name, suffix)
for file_obj in file_qs]
existing_names = [f.actual_name
for f in destination.entries_with_names(
candidate_folder_names + candidate_file_names)]
if existing_names:
messages.error(request,
_("File or folders with names %s already exist at the "
"selected destination") % ", ".join(existing_names))
return False
return True
def copy_files_and_folders(self, request,
files_queryset, folders_queryset):
opts = self.model._meta
app_label = opts.app_label
current_folder = self._get_current_action_folder(
request, files_queryset, folders_queryset)
to_copy = self._list_all_to_copy_or_move(
request, files_queryset, folders_queryset)
if request.method == 'POST' and request.POST.get('post'):
form = CopyFilesAndFoldersForm(request.POST)
if form.is_valid():
try:
destination = self._clean_destination(
request, current_folder, folders_queryset)
except PermissionDenied:
messages.error(request,
_("The selected destination was not valid, so the selected "\
"files and folders were not copied. Please try again."))
return None
suffix = form.cleaned_data['suffix']
if not self._are_candidate_names_valid(
request, files_queryset, folders_queryset,
destination, suffix): return
if files_queryset.count() + folders_queryset.count():
# We count all files and folders here (recursivelly)
n = self._copy_files_and_folders_impl(
files_queryset, folders_queryset, destination,
suffix, False)
self.message_user(request,
_("Successfully copied %(count)d files and/or "
"folders to folder '%(destination)s'.") % {
"count": n,
"destination": destination,
})
return None
else:
form = CopyFilesAndFoldersForm()
try:
selected_destination_folder = \
int(request.POST.get('destination', 0))
except ValueError:
if current_folder:
selected_destination_folder = current_folder.pk
else:
selected_destination_folder = 0
context = {
"title": _("Copy files and/or folders"),
"instance": current_folder,
"breadcrumbs_action": _("Copy files and/or folders"),
"to_copy": to_copy,
"selected_destination_folder": selected_destination_folder,
"copy_form": form,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"opts": opts,
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
}
context.update(self.admin_site.each_context(request))
# Display the destination folder selection page
return render(request, "admin/filer/folder/choose_copy_destination.html", context)
copy_files_and_folders.short_description = ugettext_lazy(
"Copy selected files and/or folders")
def files_toggle_restriction(self, request, restriction,
files_qs, folders_qs):
"""
Action which enables or disables restriction for files/folders.
"""
if request.method != 'POST':
return None
# cannot restrict/unrestrict unfiled files
unfiled_files = files_qs.filter(folder__isnull=True)
if unfiled_files.exists():
messages.warning(request, _("Some of the selected files do not have parents: %s, "
"so their rights cannot be changed.") %
', '.join([str(unfiled_file) for unfiled_file in unfiled_files.all()]))
return None
if not has_multi_file_action_permission(request, files_qs, folders_qs):
messages.warning(request, _("You are not allowed to modify the restrictions on "\
"the selected files and folders."))
return None
count = [0]
def set_files_or_folders(filer_obj):
for f in filer_obj:
if f.restricted != restriction:
f.restricted = restriction
f.save()
count[0] += 1
set_files_or_folders(files_qs)
set_files_or_folders(folders_qs)
count = count[0]
if restriction:
self.message_user(request,
_("Successfully enabled restriction for %(count)d files "
"and/or folders.") % {"count": count,})
else:
self.message_user(request,
_("Successfully disabled restriction for %(count)d files "
"and/or folders.") % {"count": count,})
return None
def enable_restriction(self, request, files_qs, folders_qs):
return self.files_toggle_restriction(
request, True, files_qs, folders_qs)
enable_restriction.short_description = ugettext_lazy(
"Enable restriction for selected and/or folders")
def disable_restriction(self, request, files_qs, folders_qs):
return self.files_toggle_restriction(
request, False, files_qs, folders_qs)
disable_restriction.short_description = ugettext_lazy(
"Disable restriction for selected and/or folders")
'''
def _rename_file(self, file_obj, form_data, counter, global_counter):
original_basename, original_extension = os.path.splitext(
file_obj.original_filename)
if file_obj.name:
current_basename, current_extension = os.path.splitext(
file_obj.name)
else:
current_basename = ""
current_extension = ""
file_obj.name = form_data['rename_format'] % {
'original_filename': file_obj.original_filename,
'original_basename': original_basename,
'original_extension': original_extension,
'current_filename': file_obj.name or "",
'current_basename': current_basename,
'current_extension': current_extension,
'current_folder': file_obj.folder.name,
'counter': counter + 1, # 1-based
'global_counter': global_counter + 1, # 1-based
}
file_obj.save()
def _rename_files(self, files, form_data, global_counter):
n = 0
for f in sorted(files):
self._rename_file(f, form_data, n, global_counter + n)
n += 1
return n
def _rename_folder(self, folder, form_data, global_counter):
return self._rename_files_impl(
folder.files.all(), folder.children.all(),
form_data, global_counter)
def _rename_files_impl(self, files_queryset, folders_queryset,
form_data, global_counter):
n = 0
for f in folders_queryset:
n += self._rename_folder(f, form_data, global_counter + n)
n += self._rename_files(files_queryset, form_data, global_counter + n)
return n
def rename_files(self, request, files_queryset, folders_queryset):
# this logic needs to be suplimented with folder type permission layer
opts = self.model._meta
app_label = opts.app_label
current_folder = self._get_current_action_folder(
request, files_queryset, folders_queryset)
to_rename = self._list_all_to_copy_or_move(
request, files_queryset, folders_queryset)
if request.method == 'POST' and request.POST.get('post'):
form = RenameFilesForm(request.POST)
if form.is_valid():
if files_queryset.count() + folders_queryset.count():
n = self._rename_files_impl(
files_queryset, folders_queryset,
form.cleaned_data, 0)
self.message_user(request,
_("Successfully renamed %(count)d files.") % {
"count": n,
})
return None
else:
form = RenameFilesForm()
context = {
"title": _("Rename files"),
"instance": current_folder,
"breadcrumbs_action": _("Rename files"),
"to_rename": to_rename,
"rename_form": form,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"opts": opts,
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
}
# Display the rename format selection page
return render(request, "admin/filer/folder/choose_rename_format.html", context=context)
rename_files.short_description = ugettext_lazy("Rename files")
def _list_folders_to_resize(self, request, folders):
for fo in folders:
children = list(self._list_folders_to_resize(
request, fo.children.all()))
children.extend([self._format_callback(
f, request.user, self.admin_site, set())
for f in sorted(fo.files)
if isinstance(f, Image)])
if children:
yield self._format_callback(
fo, request.user, self.admin_site, set())
yield children
def _list_all_to_resize(self, request, files_queryset, folders_queryset):
to_resize = list(self._list_folders_to_resize(
request, folders_queryset))
to_resize.extend([self._format_callback(
f, request.user, self.admin_site, set())
for f in sorted(files_queryset)
if isinstance(f, Image)])
return to_resize
def _new_subject_location(self, original_width, original_height,
new_width, new_height, x, y, crop):
# TODO: We could probably do better
return (round(new_width / 2), round(new_height / 2))
def _resize_image(self, image, form_data):
original_width = float(image.width)
original_height = float(image.height)
thumbnailer = FilerActionThumbnailer(
file=image.file.file,
name=image.file.name,
source_storage=image.file.source_storage,
thumbnail_storage=image.file.source_storage)
# This should overwrite the original image
new_image = thumbnailer.get_thumbnail({
'size': (form_data['width'], form_data['height']),
'crop': form_data['crop'],
'upscale': form_data['upscale'],
'subject_location': image.subject_location,
})
from django.db.models.fields.files import ImageFieldFile
image.file.file = new_image.file
image.generate_sha1()
image.save() # Also gets new width and height
subject_location = normalize_subject_location(image.subject_location)
if subject_location:
(x, y) = subject_location
x = float(x)
y = float(y)
new_width = float(image.width)
new_height = float(image.height)
(new_x, new_y) = self._new_subject_location(
original_width, original_height, new_width, new_height,
x, y, form_data['crop'])
image.subject_location = "%d,%d" % (new_x, new_y)
image.save()
def _resize_images(self, files, form_data):
n = 0
for f in files:
if isinstance(f, Image):
self._resize_image(f, form_data)
n += 1
return n
def _resize_folder(self, folder, form_data):
return self._resize_images_impl(
folder.files.all(), folder.children.all(), form_data)
def _resize_images_impl(self, files_queryset,
folders_queryset, form_data):
n = self._resize_images(files_queryset, form_data)
for f in folders_queryset:
n += self._resize_folder(f, form_data)
return n
def resize_images(self, request, files_queryset, folders_queryset):
opts = self.model._meta
app_label = opts.app_label
current_folder = self._get_current_action_folder(
request, files_queryset, folders_queryset)
to_resize = self._list_all_to_resize(
request, files_queryset, folders_queryset)
if request.method == 'POST' and request.POST.get('post'):
form = ResizeImagesForm(request.POST)
if form.is_valid():
if form.cleaned_data.get('thumbnail_option'):
form.cleaned_data['width'] = \
form.cleaned_data['thumbnail_option'].width
form.cleaned_data['height'] = \
form.cleaned_data['thumbnail_option'].height
form.cleaned_data['crop'] = \
form.cleaned_data['thumbnail_option'].crop
form.cleaned_data['upscale'] = \
form.cleaned_data['thumbnail_option'].upscale
if files_queryset.count() + folders_queryset.count():
# We count all files here (recursivelly)
n = self._resize_images_impl(
files_queryset, folders_queryset, form.cleaned_data)
self.message_user(request,
_("Successfully resized %(count)d images.") % {
"count": n,
})
return None
else:
form = ResizeImagesForm()
context = {
"title": _("Resize images"),
"instance": current_folder,
"breadcrumbs_action": _("Resize images"),
"to_resize": to_resize,
"resize_form": form,
"cmsplugin_enabled": ('cmsplugin_filer_image'
in django_settings.INSTALLED_APPS),
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"opts": opts,
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
}
# Display the resize options page
return render("admin/filer/folder/choose_images_resize_options.html", context=context)
resize_images.short_description = ugettext_lazy("Resize selected images")
def files_set_public_or_private(self, request, set_public,
files_queryset, folders_queryset):
"""
Action which enables or disables permissions for selected
files and files in selected folders to clipboard
(set them private or public).
"""
if not self.has_change_permission(request):
raise PermissionDenied
if request.method != 'POST':
return None
# We define it like that so that we can modify it inside the
# set_files function
files_count = [0]
def set_files(files):
for f in files:
if f.is_public != set_public:
f.is_public = set_public
f.save()
files_count[0] += 1
def set_folders(folders):
for f in folders:
set_files(f.files)
set_folders(f.children.all())
set_files(files_queryset)
set_folders(folders_queryset)
if set_public:
self.message_user(request,
_("Successfully disabled permissions for %(count)d files.") % {
"count": files_count[0], })
else:
self.message_user(request,
_("Successfully enabled permissions for %(count)d files.") % {
"count": files_count[0], })
return None
def files_set_private(self, request, files_queryset, folders_queryset):
return self.files_set_public_or_private(
request, False, files_queryset, folders_queryset)
files_set_private.short_description = ugettext_lazy(
"Enable permissions for selected files")
def files_set_public(self, request, files_queryset, folders_queryset):
return self.files_set_public_or_private(
request, True, files_queryset, folders_queryset)
files_set_public.short_description = ugettext_lazy(
"Disable permissions for selected files")
'''
|
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import munch
import ipaddress
import six
import socket
from openstack import _log
from openstack import utils
from openstack.cloud import exc
NON_CALLABLES = (six.string_types, bool, dict, int, float, list, type(None))
def find_nova_interfaces(addresses, ext_tag=None, key_name=None, version=4,
mac_addr=None):
ret = []
for (k, v) in iter(addresses.items()):
if key_name is not None and k != key_name:
# key_name is specified and it doesn't match the current network.
# Continue with the next one
continue
for interface_spec in v:
if ext_tag is not None:
if 'OS-EXT-IPS:type' not in interface_spec:
# ext_tag is specified, but this interface has no tag
# We could actually return right away as this means that
# this cloud doesn't support OS-EXT-IPS. Nevertheless,
# it would be better to perform an explicit check. e.g.:
# cloud._has_nova_extension('OS-EXT-IPS')
# But this needs cloud to be passed to this function.
continue
elif interface_spec['OS-EXT-IPS:type'] != ext_tag:
# Type doesn't match, continue with next one
continue
if mac_addr is not None:
if 'OS-EXT-IPS-MAC:mac_addr' not in interface_spec:
# mac_addr is specified, but this interface has no mac_addr
# We could actually return right away as this means that
# this cloud doesn't support OS-EXT-IPS-MAC. Nevertheless,
# it would be better to perform an explicit check. e.g.:
# cloud._has_nova_extension('OS-EXT-IPS-MAC')
# But this needs cloud to be passed to this function.
continue
elif interface_spec['OS-EXT-IPS-MAC:mac_addr'] != mac_addr:
# MAC doesn't match, continue with next one
continue
if interface_spec['version'] == version:
ret.append(interface_spec)
return ret
def find_nova_addresses(addresses, ext_tag=None, key_name=None, version=4,
mac_addr=None):
interfaces = find_nova_interfaces(addresses, ext_tag, key_name, version,
mac_addr)
floating_addrs = []
fixed_addrs = []
for i in interfaces:
if i.get('OS-EXT-IPS:type') == 'floating':
floating_addrs.append(i['addr'])
else:
fixed_addrs.append(i['addr'])
return floating_addrs + fixed_addrs
def get_server_ip(server, public=False, cloud_public=True, **kwargs):
"""Get an IP from the Nova addresses dict
:param server: The server to pull the address from
:param public: Whether the address we're looking for should be considered
'public' and therefore reachabiliity tests should be
used. (defaults to False)
:param cloud_public: Whether the cloud has been configured to use private
IPs from servers as the interface_ip. This inverts the
public reachability logic, as in this case it's the
private ip we expect shade to be able to reach
"""
addrs = find_nova_addresses(server['addresses'], **kwargs)
return find_best_address(
addrs, public=public, cloud_public=cloud_public)
def get_server_private_ip(server, cloud=None):
"""Find the private IP address
If Neutron is available, search for a port on a network where
`router:external` is False and `shared` is False. This combination
indicates a private network with private IP addresses. This port should
have the private IP.
If Neutron is not available, or something goes wrong communicating with it,
as a fallback, try the list of addresses associated with the server dict,
looking for an IP type tagged as 'fixed' in the network named 'private'.
Last resort, ignore the IP type and just look for an IP on the 'private'
network (e.g., Rackspace).
"""
if cloud and not cloud.use_internal_network():
return None
# Try to get a floating IP interface. If we have one then return the
# private IP address associated with that floating IP for consistency.
fip_ints = find_nova_interfaces(server['addresses'], ext_tag='floating')
fip_mac = None
if fip_ints:
fip_mac = fip_ints[0].get('OS-EXT-IPS-MAC:mac_addr')
# Short circuit the ports/networks search below with a heavily cached
# and possibly pre-configured network name
if cloud:
int_nets = cloud.get_internal_ipv4_networks()
for int_net in int_nets:
int_ip = get_server_ip(
server, key_name=int_net['name'],
ext_tag='fixed',
cloud_public=not cloud.private,
mac_addr=fip_mac)
if int_ip is not None:
return int_ip
# Try a second time without the fixed tag. This is for old nova-network
# results that do not have the fixed/floating tag.
for int_net in int_nets:
int_ip = get_server_ip(
server, key_name=int_net['name'],
cloud_public=not cloud.private,
mac_addr=fip_mac)
if int_ip is not None:
return int_ip
ip = get_server_ip(
server, ext_tag='fixed', key_name='private', mac_addr=fip_mac)
if ip:
return ip
# Last resort, and Rackspace
return get_server_ip(
server, key_name='private')
def get_server_external_ipv4(cloud, server):
"""Find an externally routable IP for the server.
There are 5 different scenarios we have to account for:
* Cloud has externally routable IP from neutron but neutron APIs don't
work (only info available is in nova server record) (rackspace)
* Cloud has externally routable IP from neutron (runabove, ovh)
* Cloud has externally routable IP from neutron AND supports optional
private tenant networks (vexxhost, unitedstack)
* Cloud only has private tenant network provided by neutron and requires
floating-ip for external routing (dreamhost, hp)
* Cloud only has private tenant network provided by nova-network and
requires floating-ip for external routing (auro)
:param cloud: the cloud we're working with
:param server: the server dict from which we want to get an IPv4 address
:return: a string containing the IPv4 address or None
"""
if not cloud.use_external_network():
return None
if server['accessIPv4']:
return server['accessIPv4']
# Short circuit the ports/networks search below with a heavily cached
# and possibly pre-configured network name
ext_nets = cloud.get_external_ipv4_networks()
for ext_net in ext_nets:
ext_ip = get_server_ip(
server, key_name=ext_net['name'], public=True,
cloud_public=not cloud.private)
if ext_ip is not None:
return ext_ip
# Try to get a floating IP address
# Much as I might find floating IPs annoying, if it has one, that's
# almost certainly the one that wants to be used
ext_ip = get_server_ip(
server, ext_tag='floating', public=True,
cloud_public=not cloud.private)
if ext_ip is not None:
return ext_ip
# The cloud doesn't support Neutron or Neutron can't be contacted. The
# server might have fixed addresses that are reachable from outside the
# cloud (e.g. Rax) or have plain ol' floating IPs
# Try to get an address from a network named 'public'
ext_ip = get_server_ip(
server, key_name='public', public=True,
cloud_public=not cloud.private)
if ext_ip is not None:
return ext_ip
# Nothing else works, try to find a globally routable IP address
for interfaces in server['addresses'].values():
for interface in interfaces:
try:
ip = ipaddress.ip_address(interface['addr'])
except Exception:
# Skip any error, we're looking for a working ip - if the
# cloud returns garbage, it wouldn't be the first weird thing
# but it still doesn't meet the requirement of "be a working
# ip address"
continue
if ip.version == 4 and not ip.is_private:
return str(ip)
return None
def find_best_address(addresses, public=False, cloud_public=True):
do_check = public == cloud_public
if not addresses:
return None
if len(addresses) == 1:
return addresses[0]
if len(addresses) > 1 and do_check:
# We only want to do this check if the address is supposed to be
# reachable. Otherwise we're just debug log spamming on every listing
# of private ip addresses
for address in addresses:
try:
for count in utils.iterate_timeout(
5, "Timeout waiting for %s" % address, wait=0.1):
# Return the first one that is reachable
try:
for res in socket.getaddrinfo(
address, 22, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0):
family, socktype, proto, _, sa = res
connect_socket = socket.socket(
family, socktype, proto)
connect_socket.settimeout(1)
connect_socket.connect(sa)
return address
except socket.error:
# Sometimes a "no route to address" type error
# will fail fast, but can often come alive
# when retried.
continue
except Exception:
pass
# Give up and return the first - none work as far as we can tell
if do_check:
log = _log.setup_logging('openstack')
log.debug(
"The cloud returned multiple addresses %s:, and we could not "
"connect to port 22 on either. That might be what you wanted, "
"but we have no clue what's going on, so we picked the first one "
"%s" % (addresses, addresses[0]))
return addresses[0]
def get_server_external_ipv6(server):
""" Get an IPv6 address reachable from outside the cloud.
This function assumes that if a server has an IPv6 address, that address
is reachable from outside the cloud.
:param server: the server from which we want to get an IPv6 address
:return: a string containing the IPv6 address or None
"""
if server['accessIPv6']:
return server['accessIPv6']
addresses = find_nova_addresses(addresses=server['addresses'], version=6)
return find_best_address(addresses, public=True)
def get_server_default_ip(cloud, server):
""" Get the configured 'default' address
It is possible in clouds.yaml to configure for a cloud a network that
is the 'default_interface'. This is the network that should be used
to talk to instances on the network.
:param cloud: the cloud we're working with
:param server: the server dict from which we want to get the default
IPv4 address
:return: a string containing the IPv4 address or None
"""
ext_net = cloud.get_default_network()
if ext_net:
if (cloud._local_ipv6 and not cloud.force_ipv4):
# try 6 first, fall back to four
versions = [6, 4]
else:
versions = [4]
for version in versions:
ext_ip = get_server_ip(
server, key_name=ext_net['name'], version=version, public=True,
cloud_public=not cloud.private)
if ext_ip is not None:
return ext_ip
return None
def _get_interface_ip(cloud, server):
""" Get the interface IP for the server
Interface IP is the IP that should be used for communicating with the
server. It is:
- the IP on the configured default_interface network
- if cloud.private, the private ip if it exists
- if the server has a public ip, the public ip
"""
default_ip = get_server_default_ip(cloud, server)
if default_ip:
return default_ip
if cloud.private and server['private_v4']:
return server['private_v4']
if (server['public_v6'] and cloud._local_ipv6 and not cloud.force_ipv4):
return server['public_v6']
else:
return server['public_v4']
def get_groups_from_server(cloud, server, server_vars):
groups = []
# NOTE(efried): This is hardcoded to 'compute' because this method is only
# used from ComputeCloudMixin.
region = cloud.config.get_region_name('compute')
cloud_name = cloud.name
# Create a group for the cloud
groups.append(cloud_name)
# Create a group on region
groups.append(region)
# And one by cloud_region
groups.append("%s_%s" % (cloud_name, region))
# Check if group metadata key in servers' metadata
group = server['metadata'].get('group')
if group:
groups.append(group)
for extra_group in server['metadata'].get('groups', '').split(','):
if extra_group:
groups.append(extra_group)
groups.append('instance-%s' % server['id'])
for key in ('flavor', 'image'):
if 'name' in server_vars[key]:
groups.append('%s-%s' % (key, server_vars[key]['name']))
for key, value in iter(server['metadata'].items()):
groups.append('meta-%s_%s' % (key, value))
az = server_vars.get('az', None)
if az:
# Make groups for az, region_az and cloud_region_az
groups.append(az)
groups.append('%s_%s' % (region, az))
groups.append('%s_%s_%s' % (cloud.name, region, az))
return groups
def expand_server_vars(cloud, server):
"""Backwards compatibility function."""
return add_server_interfaces(cloud, server)
def _make_address_dict(fip, port):
address = dict(version=4, addr=fip['floating_ip_address'])
address['OS-EXT-IPS:type'] = 'floating'
address['OS-EXT-IPS-MAC:mac_addr'] = port['mac_address']
return address
def _get_supplemental_addresses(cloud, server):
fixed_ip_mapping = {}
for name, network in server['addresses'].items():
for address in network:
if address['version'] == 6:
continue
if address.get('OS-EXT-IPS:type') == 'floating':
# We have a floating IP that nova knows about, do nothing
return server['addresses']
fixed_ip_mapping[address['addr']] = name
try:
# Don't bother doing this before the server is active, it's a waste
# of an API call while polling for a server to come up
if (cloud.has_service('network')
and cloud._has_floating_ips()
and server['status'] == 'ACTIVE'):
for port in cloud.search_ports(
filters=dict(device_id=server['id'])):
# This SHOULD return one and only one FIP - but doing it as a
# search/list lets the logic work regardless
for fip in cloud.search_floating_ips(
filters=dict(port_id=port['id'])):
fixed_net = fixed_ip_mapping.get(fip['fixed_ip_address'])
if fixed_net is None:
log = _log.setup_logging('openstack')
log.debug(
"The cloud returned floating ip %(fip)s attached"
" to server %(server)s but the fixed ip associated"
" with the floating ip in the neutron listing"
" does not exist in the nova listing. Something"
" is exceptionally broken.",
dict(fip=fip['id'], server=server['id']))
else:
server['addresses'][fixed_net].append(
_make_address_dict(fip, port))
except exc.OpenStackCloudException:
# If something goes wrong with a cloud call, that's cool - this is
# an attempt to provide additional data and should not block forward
# progress
pass
return server['addresses']
def add_server_interfaces(cloud, server):
"""Add network interface information to server.
Query the cloud as necessary to add information to the server record
about the network information needed to interface with the server.
Ensures that public_v4, public_v6, private_v4, private_v6, interface_ip,
accessIPv4 and accessIPv6 are always set.
"""
# First, add an IP address. Set it to '' rather than None if it does
# not exist to remain consistent with the pre-existing missing values
server['addresses'] = _get_supplemental_addresses(cloud, server)
server['public_v4'] = get_server_external_ipv4(cloud, server) or ''
server['public_v6'] = get_server_external_ipv6(server) or ''
server['private_v4'] = get_server_private_ip(server, cloud) or ''
server['interface_ip'] = _get_interface_ip(cloud, server) or ''
# Some clouds do not set these, but they're a regular part of the Nova
# server record. Since we know them, go ahead and set them. In the case
# where they were set previous, we use the values, so this will not break
# clouds that provide the information
if cloud.private and server['private_v4']:
server['accessIPv4'] = server['private_v4']
else:
server['accessIPv4'] = server['public_v4']
server['accessIPv6'] = server['public_v6']
return server
def expand_server_security_groups(cloud, server):
try:
groups = cloud.list_server_security_groups(server)
except exc.OpenStackCloudException:
groups = []
server['security_groups'] = groups or []
def get_hostvars_from_server(cloud, server, mounts=None):
"""Expand additional server information useful for ansible inventory.
Variables in this function may make additional cloud queries to flesh out
possibly interesting info, making it more expensive to call than
expand_server_vars if caching is not set up. If caching is set up,
the extra cost should be minimal.
"""
server_vars = add_server_interfaces(cloud, server)
flavor_id = server['flavor'].get('id')
if flavor_id:
# In newer nova, the flavor record can be kept around for flavors
# that no longer exist. The id and name are not there.
flavor_name = cloud.get_flavor_name(flavor_id)
if flavor_name:
server_vars['flavor']['name'] = flavor_name
elif 'original_name' in server['flavor']:
# Users might be have code still expecting name. That name is in
# original_name.
server_vars['flavor']['name'] = server['flavor']['original_name']
expand_server_security_groups(cloud, server)
# OpenStack can return image as a string when you've booted from volume
if str(server['image']) == server['image']:
image_id = server['image']
server_vars['image'] = dict(id=image_id)
else:
image_id = server['image'].get('id', None)
if image_id:
image_name = cloud.get_image_name(image_id)
if image_name:
server_vars['image']['name'] = image_name
volumes = []
if cloud.has_service('volume'):
try:
for volume in cloud.get_volumes(server):
# Make things easier to consume elsewhere
volume['device'] = volume['attachments'][0]['device']
volumes.append(volume)
except exc.OpenStackCloudException:
pass
server_vars['volumes'] = volumes
if mounts:
for mount in mounts:
for vol in server_vars['volumes']:
if vol['display_name'] == mount['display_name']:
if 'mount' in mount:
vol['mount'] = mount['mount']
return server_vars
def obj_to_munch(obj):
""" Turn an object with attributes into a dict suitable for serializing.
Some of the things that are returned in OpenStack are objects with
attributes. That's awesome - except when you want to expose them as JSON
structures. We use this as the basis of get_hostvars_from_server above so
that we can just have a plain dict of all of the values that exist in the
nova metadata for a server.
"""
if obj is None:
return None
elif isinstance(obj, munch.Munch) or hasattr(obj, 'mock_add_spec'):
# If we obj_to_munch twice, don't fail, just return the munch
# Also, don't try to modify Mock objects - that way lies madness
return obj
elif isinstance(obj, dict):
# The new request-id tracking spec:
# https://specs.openstack.org/openstack/nova-specs/specs/juno/approved/log-request-id-mappings.html
# adds a request-ids attribute to returned objects. It does this even
# with dicts, which now become dict subclasses. So we want to convert
# the dict we get, but we also want it to fall through to object
# attribute processing so that we can also get the request_ids
# data into our resulting object.
instance = munch.Munch(obj)
else:
instance = munch.Munch()
for key in dir(obj):
try:
value = getattr(obj, key)
# some attributes can be defined as a @propierty, so we can't assure
# to have a valid value
# e.g. id in python-novaclient/tree/novaclient/v2/quotas.py
except AttributeError:
continue
if isinstance(value, NON_CALLABLES) and not key.startswith('_'):
instance[key] = value
return instance
obj_to_dict = obj_to_munch
def obj_list_to_munch(obj_list):
"""Enumerate through lists of objects and return lists of dictonaries.
Some of the objects returned in OpenStack are actually lists of objects,
and in order to expose the data structures as JSON, we need to facilitate
the conversion to lists of dictonaries.
"""
return [obj_to_munch(obj) for obj in obj_list]
obj_list_to_dict = obj_list_to_munch
def get_and_munchify(key, data):
"""Get the value associated to key and convert it.
The value will be converted in a Munch object or a list of Munch objects
based on the type
"""
result = data.get(key, []) if key else data
if isinstance(result, list):
return obj_list_to_munch(result)
elif isinstance(result, dict):
return obj_to_munch(result)
return result
|
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Tests for ``admin.release``.
"""
import json
import os
from gzip import GzipFile
from StringIO import StringIO
import tempfile
from textwrap import dedent
from unittest import skipUnless
from effect import sync_perform, ComposedDispatcher, base_dispatcher
from git import GitCommandError, Repo
from requests.exceptions import HTTPError
from twisted.python.filepath import FilePath
from twisted.python.procutils import which
from twisted.python.usage import UsageError
from twisted.trial.unittest import SynchronousTestCase
from .. import release
from ..release import (
upload_python_packages, upload_packages, update_repo,
publish_docs, Environments,
DocumentationRelease, DOCUMENTATION_CONFIGURATIONS, NotTagged, NotARelease,
calculate_base_branch, create_release_branch,
CreateReleaseBranchOptions, BranchExists, TagExists,
MissingPreRelease, NoPreRelease,
UploadOptions, create_pip_index, upload_pip_index,
publish_homebrew_recipe, PushFailed,
publish_vagrant_metadata, TestRedirectsOptions, get_expected_redirects,
update_license_file,
)
from ..packaging import Distribution
from ..aws import FakeAWS, CreateCloudFrontInvalidation
from ..yum import FakeYum, yum_dispatcher
from hashlib import sha256
FLOCKER_PATH = FilePath(__file__).parent().parent().parent()
def hard_linking_possible():
"""
Return True if hard linking is possible in the current directory, else
return False.
"""
scratch_directory = FilePath(tempfile.mkdtemp())
file = scratch_directory.child('src')
file.touch()
try:
os.link(file.path, scratch_directory.child('dst').path)
return True
except:
return False
finally:
scratch_directory.remove()
class PublishDocsTests(SynchronousTestCase):
"""
Tests for :func:``publish_docs``.
"""
def publish_docs(self, aws,
flocker_version, doc_version, environment):
"""
Call :func:``publish_docs``, interacting with a fake AWS.
:param FakeAWS aws: Fake AWS to interact with.
:param flocker_version: See :py:func:`publish_docs`.
:param doc_version: See :py:func:`publish_docs`.
:param environment: See :py:func:`environment`.
"""
sync_perform(
ComposedDispatcher([aws.get_dispatcher(), base_dispatcher]),
publish_docs(flocker_version, doc_version,
environment=environment))
def test_copies_documentation(self):
"""
Calling :func:`publish_docs` copies documentation from
``s3://clusterhq-dev-docs/<flocker_version>/`` to
``s3://clusterhq-staging-docs/en/<doc_version>/``.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-staging-docs': {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
},
'clusterhq-dev-docs': {
'0.3.0+444.gf05215b/index.html': 'index-content',
'0.3.0+444.gf05215b/sub/index.html': 'sub-index-content',
'0.3.0+444.gf05215b/other.html': 'other-content',
'0.3.0+392.gd50b558/index.html': 'bad-index',
'0.3.0+392.gd50b558/sub/index.html': 'bad-sub-index',
'0.3.0+392.gd50b558/other.html': 'bad-other',
},
})
self.publish_docs(aws, '0.3.0+444.gf05215b', '0.3.1',
environment=Environments.STAGING)
self.assertEqual(
aws.s3_buckets['clusterhq-staging-docs'], {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
'en/0.3.1/index.html': 'index-content',
'en/0.3.1/sub/index.html': 'sub-index-content',
'en/0.3.1/other.html': 'other-content',
})
def test_copies_documentation_production(self):
"""
Calling :func:`publish_docs` in production copies documentation from
``s3://clusterhq-dev-docs/<flocker_version>/`` to
``s3://clusterhq-docs/en/<doc_version>/``.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-docs': {
'en/latest/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-docs': {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
},
'clusterhq-dev-docs': {
'0.3.1/index.html': 'index-content',
'0.3.1/sub/index.html': 'sub-index-content',
'0.3.1/other.html': 'other-content',
'0.3.0+392.gd50b558/index.html': 'bad-index',
'0.3.0+392.gd50b558/sub/index.html': 'bad-sub-index',
'0.3.0+392.gd50b558/other.html': 'bad-other',
}
})
self.publish_docs(aws, '0.3.1', '0.3.1',
environment=Environments.PRODUCTION)
self.assertEqual(
aws.s3_buckets['clusterhq-docs'], {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
'en/0.3.1/index.html': 'index-content',
'en/0.3.1/sub/index.html': 'sub-index-content',
'en/0.3.1/other.html': 'other-content',
})
def test_deletes_removed_documentation(self):
"""
Calling :func:`publish_docs` replaces documentation from
``s3://clusterhq-staging-docs/en/<doc_version>/``.
with documentation from ``s3://clusterhq-dev-docs/<flocker_version>/``.
In particular, files with changed content are updated, and removed
files are deleted.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-staging-docs': {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
'en/0.3.1/index.html': 'old-index-content',
'en/0.3.1/sub/index.html': 'old-sub-index-content',
'en/0.3.1/other.html': 'other-content',
},
'clusterhq-dev-docs': {
'0.3.0+444.gf05215b/index.html': 'index-content',
'0.3.0+444.gf05215b/sub/index.html': 'sub-index-content',
},
})
self.publish_docs(aws, '0.3.0+444.gf05215b', '0.3.1',
environment=Environments.STAGING)
self.assertEqual(
aws.s3_buckets['clusterhq-staging-docs'], {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
'en/0.3.1/index.html': 'index-content',
'en/0.3.1/sub/index.html': 'sub-index-content',
})
def test_updates_redirects(self):
"""
Calling :func:`publish_docs` with a release version updates the
redirect for ``en/latest/*`` to point at ``en/<doc_version>/*``. Any
other redirects are left untouched.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.0/',
'en/devel/': 'en/0.3.1.dev4/',
},
},
s3_buckets={
'clusterhq-staging-docs': {},
'clusterhq-dev-docs': {},
})
self.publish_docs(aws, '0.3.0+444.gf05215b', '0.3.1',
environment=Environments.STAGING)
self.assertEqual(
aws.routing_rules, {
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.1/',
'en/devel/': 'en/0.3.1.dev4/',
},
})
def test_updates_redirects_devel(self):
"""
Calling :func:`publish_docs` for a development version updates the
redirect for ``en/devel/*`` to point at ``en/<doc_version>/*``. Any
other redirects are left untouched.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.0/',
'en/devel/': 'en/0.3.1.dev4/',
},
},
s3_buckets={
'clusterhq-staging-docs': {},
'clusterhq-dev-docs': {},
})
self.publish_docs(aws, '0.3.0+444.gf01215b', '0.3.1.dev5',
environment=Environments.STAGING)
self.assertEqual(
aws.routing_rules, {
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.0/',
'en/devel/': 'en/0.3.1.dev5/',
},
})
def test_updates_redirects_production(self):
"""
Calling :func:`publish_docs` with a release or documentation version
and in production updates the redirect for the
``clusterhq-docs`` S3 bucket.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-docs': {
'en/latest/': 'en/0.3.0/',
'en/devel/': 'en/0.3.1.dev4/',
},
},
s3_buckets={
'clusterhq-docs': {},
'clusterhq-dev-docs': {},
})
self.publish_docs(aws, '0.3.1', '0.3.1',
environment=Environments.PRODUCTION)
self.assertEqual(
aws.routing_rules, {
'clusterhq-docs': {
'en/latest/': 'en/0.3.1/',
'en/devel/': 'en/0.3.1.dev4/',
},
})
def test_creates_cloudfront_invalidation_new_files(self):
"""
Calling :func:`publish_docs` with a release or documentation version
creates an invalidation for
- en/latest/
- en/<doc_version>/
each for every path in the new documentation for <doc_version>.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-staging-docs': {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
'en/0.3.1/index.html': '',
'en/0.3.1/sub/index.html': '',
},
'clusterhq-dev-docs': {
'0.3.0+444.gf05215b/index.html': '',
'0.3.0+444.gf05215b/sub/index.html': '',
'0.3.0+444.gf05215b/sub/other.html': '',
},
})
self.publish_docs(aws, '0.3.0+444.gf05215b', '0.3.1',
environment=Environments.STAGING)
self.assertEqual(
aws.cloudfront_invalidations, [
CreateCloudFrontInvalidation(
cname='docs.staging.clusterhq.com',
paths={
'en/latest/',
'en/latest/index.html',
'en/latest/sub/',
'en/latest/sub/index.html',
'en/latest/sub/other.html',
'en/0.3.1/',
'en/0.3.1/index.html',
'en/0.3.1/sub/',
'en/0.3.1/sub/index.html',
'en/0.3.1/sub/other.html',
}),
])
def test_creates_cloudfront_invalidation_trailing_index(self):
"""
Calling :func:`publish_docs` with a release or documentation version
doesn't creates an invalidation for files that end in ``index.html``.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-staging-docs': {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
},
'clusterhq-dev-docs': {
'0.3.0+444.gf05215b/sub_index.html': '',
},
})
self.publish_docs(aws, '0.3.0+444.gf05215b', '0.3.1',
environment=Environments.STAGING)
self.assertEqual(
aws.cloudfront_invalidations, [
CreateCloudFrontInvalidation(
cname='docs.staging.clusterhq.com',
paths={
'en/latest/',
'en/latest/sub_index.html',
'en/0.3.1/',
'en/0.3.1/sub_index.html',
}),
])
def test_creates_cloudfront_invalidation_removed_files(self):
"""
Calling :func:`publish_docs` with a release or documentation version
creates an invalidation for
- en/latest/
- en/<doc_version>/
each for every path in the old documentation for <doc_version>.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-staging-docs': {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
'en/0.3.1/index.html': '',
'en/0.3.1/sub/index.html': '',
},
'clusterhq-dev-docs': {},
})
self.publish_docs(aws, '0.3.0+444.gf05215b', '0.3.1',
environment=Environments.STAGING)
self.assertEqual(
aws.cloudfront_invalidations, [
CreateCloudFrontInvalidation(
cname='docs.staging.clusterhq.com',
paths={
'en/latest/',
'en/latest/index.html',
'en/latest/sub/',
'en/latest/sub/index.html',
'en/0.3.1/',
'en/0.3.1/index.html',
'en/0.3.1/sub/',
'en/0.3.1/sub/index.html',
}),
])
def test_creates_cloudfront_invalidation_previous_version(self):
"""
Calling :func:`publish_docs` with a release or documentation version
creates an invalidation for
- en/latest/
- en/<doc_version>/
each for every path in the documentation for version that was
previously `en/latest/`.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-staging-docs': {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
'en/0.3.0/index.html': '',
'en/0.3.0/sub/index.html': '',
},
'clusterhq-dev-docs': {},
})
self.publish_docs(aws, '0.3.0+444.gf05215b', '0.3.1',
environment=Environments.STAGING)
self.assertEqual(
aws.cloudfront_invalidations, [
CreateCloudFrontInvalidation(
cname='docs.staging.clusterhq.com',
paths={
'en/latest/',
'en/latest/index.html',
'en/latest/sub/',
'en/latest/sub/index.html',
'en/0.3.1/',
'en/0.3.1/index.html',
'en/0.3.1/sub/',
'en/0.3.1/sub/index.html',
}),
])
def test_creates_cloudfront_invalidation_devel_new_files(self):
"""
Calling :func:`publish_docs` with a development version creates an
invalidation for
- en/devel/
- en/<doc_version>/
each for every path in the new documentation for <doc_version>.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/devel/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-staging-docs': {
'index.html': '',
'en/index.html': '',
'en/devel/index.html': '',
'en/0.3.1.dev1/index.html': '',
'en/0.3.1.dev1/sub/index.html': '',
},
'clusterhq-dev-docs': {
'0.3.0+444.gf05215b/index.html': '',
'0.3.0+444.gf05215b/sub/index.html': '',
'0.3.0+444.gf05215b/sub/other.html': '',
},
})
self.publish_docs(aws, '0.3.0+444.gf05215b', '0.3.1.dev1',
environment=Environments.STAGING)
self.assertEqual(
aws.cloudfront_invalidations, [
CreateCloudFrontInvalidation(
cname='docs.staging.clusterhq.com',
paths={
'en/devel/',
'en/devel/index.html',
'en/devel/sub/',
'en/devel/sub/index.html',
'en/devel/sub/other.html',
'en/0.3.1.dev1/',
'en/0.3.1.dev1/index.html',
'en/0.3.1.dev1/sub/',
'en/0.3.1.dev1/sub/index.html',
'en/0.3.1.dev1/sub/other.html',
}),
])
def test_creates_cloudfront_invalidation_devel_removed_files(self):
"""
Calling :func:`publish_docs` with a development version creates an
invalidation for
- en/devel/
- en/<doc_version>/
each for every path in the old documentation for <doc_version>.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/devel/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-staging-docs': {
'index.html': '',
'en/index.html': '',
'en/devel/index.html': '',
'en/0.3.1.dev1/index.html': '',
'en/0.3.1.dev1/sub/index.html': '',
},
'clusterhq-dev-docs': {},
})
self.publish_docs(aws, '0.3.0+444.gf05215b', '0.3.1.dev1',
environment=Environments.STAGING)
self.assertEqual(
aws.cloudfront_invalidations, [
CreateCloudFrontInvalidation(
cname='docs.staging.clusterhq.com',
paths={
'en/devel/',
'en/devel/index.html',
'en/devel/sub/',
'en/devel/sub/index.html',
'en/0.3.1.dev1/',
'en/0.3.1.dev1/index.html',
'en/0.3.1.dev1/sub/',
'en/0.3.1.dev1/sub/index.html',
}),
])
def test_creates_cloudfront_invalidation_devel_previous_version(self):
"""
Calling :func:`publish_docs` with a development version creates an
invalidation for
- en/devel/
- en/<doc_version>/
each for every path in the documentation for version that was
previously `en/devel/`.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/devel/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-staging-docs': {
'index.html': '',
'en/index.html': '',
'en/devel/index.html': '',
'en/0.3.0/index.html': '',
'en/0.3.0/sub/index.html': '',
},
'clusterhq-dev-docs': {},
})
self.publish_docs(aws, '0.3.0+444.gf05215b', '0.3.1.dev1',
environment=Environments.STAGING)
self.assertEqual(
aws.cloudfront_invalidations, [
CreateCloudFrontInvalidation(
cname='docs.staging.clusterhq.com',
paths={
'en/devel/',
'en/devel/index.html',
'en/devel/sub/',
'en/devel/sub/index.html',
'en/0.3.1.dev1/',
'en/0.3.1.dev1/index.html',
'en/0.3.1.dev1/sub/',
'en/0.3.1.dev1/sub/index.html',
}),
])
def test_creates_cloudfront_invalidation_production(self):
"""
Calling :func:`publish_docs` in production creates an invalidation for
``docs.clusterhq.com``.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-docs': {
'en/latest/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-docs': {
'index.html': '',
'en/index.html': '',
'en/latest/index.html': '',
'en/0.3.1/index.html': '',
'en/0.3.1/sub/index.html': '',
},
'clusterhq-dev-docs': {},
})
self.publish_docs(aws, '0.3.1', '0.3.1',
environment=Environments.PRODUCTION)
self.assertEqual(
aws.cloudfront_invalidations, [
CreateCloudFrontInvalidation(
cname='docs.clusterhq.com',
paths={
'en/latest/',
'en/latest/index.html',
'en/latest/sub/',
'en/latest/sub/index.html',
'en/0.3.1/',
'en/0.3.1/index.html',
'en/0.3.1/sub/',
'en/0.3.1/sub/index.html',
}),
])
def test_production_gets_tagged_version(self):
"""
Trying to publish to production, when the version being pushed isn't
tagged raises an exception.
"""
aws = FakeAWS(routing_rules={}, s3_buckets={})
self.assertRaises(
NotTagged,
self.publish_docs,
aws, '0.3.0+444.gf05215b', '0.3.1.dev1',
environment=Environments.PRODUCTION)
def test_publish_to_doc_version(self):
"""
Trying to publish to a documentation version in a staging environment
publishes to to the version being updated.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-staging-docs': {
'en/latest/': '',
},
},
s3_buckets={
'clusterhq-staging-docs': {},
'clusterhq-dev-docs': {},
})
self.publish_docs(
aws, '0.3.1+444.gf05215b', '0.3.1.post1',
environment=Environments.STAGING)
self.assertEqual(
aws.routing_rules, {
'clusterhq-staging-docs': {
'en/latest/': 'en/0.3.1/',
},
})
def test_production_can_publish_doc_version(self):
"""
Publishing a documentation version to the version of the latest full
release in production succeeds.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-docs': {
'en/latest/': 'en/0.3.0/',
},
},
s3_buckets={
'clusterhq-docs': {},
'clusterhq-dev-docs': {},
})
# Does not raise:
self.publish_docs(
aws, '0.3.1.post1', '0.3.1', environment=Environments.PRODUCTION)
def test_production_can_publish_prerelease(self):
"""
Publishing a pre-release succeeds.
"""
aws = FakeAWS(
routing_rules={
'clusterhq-docs': {
'en/devel/': 'en/0.3.1.dev4/',
},
},
s3_buckets={
'clusterhq-docs': {},
'clusterhq-dev-docs': {},
})
# Does not raise:
self.publish_docs(
aws, '0.3.2rc1', '0.3.2rc1', environment=Environments.PRODUCTION)
def test_publish_non_release_fails(self):
"""
Trying to publish to version that isn't a release fails.
"""
aws = FakeAWS(routing_rules={}, s3_buckets={})
self.assertRaises(
NotARelease,
self.publish_docs,
aws, '0.3.0+444.gf05215b', '0.3.0+444.gf05215b',
environment=Environments.STAGING)
def assert_error_key_update(self, doc_version, environment, should_update):
"""
Call ``publish_docs`` and assert that only the expected buckets have an
updated error_key property.
:param unicode doc_version: The version of the documentation that is
being published.
:param NamedConstant environment: One of the ``NamedConstants`` in
``Environments``.
:param bool should_update: A flag indicating whether the error_key for
the bucket associated with ``environment`` is expected to be
updated.
:raises: ``FailTest`` if an error_key in any of the S3 buckets has been
updated unexpectedly.
"""
# Get a set of all target S3 buckets.
bucket_names = set()
for e in Environments.iterconstants():
bucket_names.add(
DOCUMENTATION_CONFIGURATIONS[e].documentation_bucket
)
# Pretend that both devel and latest aliases are currently pointing to
# an older version.
empty_routes = {
'en/devel/': 'en/0.0.0/',
'en/latest/': 'en/0.0.0/',
}
# In all the S3 buckets.
empty_routing_rules = {
bucket_name: empty_routes.copy()
for bucket_name in bucket_names
}
# And that all the buckets themselves are empty.
empty_buckets = {bucket_name: {} for bucket_name in bucket_names}
# Including the dev bucket
empty_buckets['clusterhq-dev-docs'] = {}
# And that all the buckets have an empty error_key
empty_error_keys = {bucket_name: b'' for bucket_name in bucket_names}
aws = FakeAWS(
routing_rules=empty_routing_rules,
s3_buckets=empty_buckets,
error_key=empty_error_keys
)
# The value of any updated error_key will include the version that's
# being published.
expected_error_path = 'en/{}/error_pages/404.html'.format(doc_version)
expected_updated_bucket = (
DOCUMENTATION_CONFIGURATIONS[environment].documentation_bucket
)
# Grab a copy of the current error_key before it gets mutated.
expected_error_keys = aws.error_key.copy()
if should_update:
# And if an error_key is expected to be updated we expect it to be
# for the bucket corresponding to the environment that we're
# publishing to.
expected_error_keys[expected_updated_bucket] = expected_error_path
self.publish_docs(
aws,
flocker_version=doc_version,
doc_version=doc_version,
environment=environment
)
self.assertEqual(expected_error_keys, aws.error_key)
def test_error_key_dev_staging(self):
"""
Publishing documentation for a development release to the staging
bucket, updates the error_key in that bucket only.
"""
self.assert_error_key_update(
doc_version='0.4.1.dev1',
environment=Environments.STAGING,
should_update=True
)
def test_error_key_dev_production(self):
"""
Publishing documentation for a development release to the production
bucket, does not update the error_key in any of the buckets.
"""
self.assert_error_key_update(
doc_version='0.4.1.dev1',
environment=Environments.PRODUCTION,
should_update=False
)
def test_error_key_pre_staging(self):
"""
Publishing documentation for a pre-release to the staging
bucket, updates the error_key in that bucket only.
"""
self.assert_error_key_update(
doc_version='0.4.1rc1',
environment=Environments.STAGING,
should_update=True
)
def test_error_key_pre_production(self):
"""
Publishing documentation for a pre-release to the production
bucket, does not update the error_key in any of the buckets.
"""
self.assert_error_key_update(
doc_version='0.4.1rc1',
environment=Environments.PRODUCTION,
should_update=False
)
def test_error_key_marketing_staging(self):
"""
Publishing documentation for a marketing release to the staging
bucket, updates the error_key in that bucket.
"""
self.assert_error_key_update(
doc_version='0.4.1',
environment=Environments.STAGING,
should_update=True
)
def test_error_key_marketing_production(self):
"""
Publishing documentation for a marketing release to the production
bucket, updates the error_key in that bucket.
"""
self.assert_error_key_update(
doc_version='0.4.1',
environment=Environments.PRODUCTION,
should_update=True
)
class UpdateRepoTests(SynchronousTestCase):
"""
Tests for :func:``update_repo``.
"""
def setUp(self):
pass
self.target_bucket = 'test-target-bucket'
self.target_key = 'test/target/key'
self.package_directory = FilePath(self.mktemp())
self.packages = ['clusterhq-flocker-cli', 'clusterhq-flocker-node']
def update_repo(self, aws, yum,
package_directory, target_bucket, target_key, source_repo,
packages, flocker_version, distribution):
"""
Call :func:``update_repo``, interacting with a fake AWS and yum
utilities.
:param FakeAWS aws: Fake AWS to interact with.
:param FakeYum yum: Fake yum utilities to interact with.
See :py:func:`update_repo` for other parameter documentation.
"""
dispatchers = [aws.get_dispatcher(), yum.get_dispatcher(),
base_dispatcher]
sync_perform(
ComposedDispatcher(dispatchers),
update_repo(
package_directory=package_directory,
target_bucket=target_bucket,
target_key=target_key,
source_repo=source_repo,
packages=packages,
flocker_version=flocker_version,
distribution=distribution,
)
)
def test_fake_rpm(self):
"""
Calling :func:`update_repo` downloads the new RPMs, creates the
metadata, and uploads it to S3.
- Existing packages on S3 are preserved in the metadata.
- Other packages on the buildserver are not downloaded.
- Existing metadata files are left untouched.
"""
existing_s3_keys = {
os.path.join(self.target_key, 'existing_package.rpm'): '',
os.path.join(self.target_key,
'clusterhq-flocker-cli-0.3.3-0.dev.7.noarch.rpm'):
'existing-content-to-be-replaced', # noqa
os.path.join(self.target_key, 'repodata', 'repomod.xml'):
'<oldhash>-metadata.xml',
os.path.join(self.target_key, 'repodata',
'<oldhash>-metadata.xml'):
'metadata for: existing_package.rpm',
}
# Copy before passing to FakeAWS
expected_keys = existing_s3_keys.copy()
aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: existing_s3_keys,
},
)
unspecified_package = 'unspecified-package-0.3.3-0.dev.7.noarch.rpm'
repo_contents = {
'clusterhq-flocker-cli-0.3.3-0.dev.7.noarch.rpm': 'cli-package',
'clusterhq-flocker-node-0.3.3-0.dev.7.noarch.rpm': 'node-package',
unspecified_package: 'unspecified-package-content',
}
self.update_repo(
aws=aws,
yum=FakeYum(),
package_directory=self.package_directory,
target_bucket=self.target_bucket,
target_key=self.target_key,
source_repo=create_fake_repository(self, files=repo_contents),
packages=self.packages,
flocker_version='0.3.3.dev7',
distribution=Distribution(name='centos', version='7'),
)
# The expected files are the new files plus the package which already
# existed in S3.
expected_packages = {
'existing_package.rpm',
'clusterhq-flocker-cli-0.3.3-0.dev.7.noarch.rpm',
'clusterhq-flocker-node-0.3.3-0.dev.7.noarch.rpm',
}
expected_keys.update({
'test/target/key/clusterhq-flocker-cli-0.3.3-0.dev.7.noarch.rpm':
'cli-package',
'test/target/key/clusterhq-flocker-node-0.3.3-0.dev.7.noarch.rpm':
'node-package',
})
expected_keys.update({
os.path.join(self.target_key, 'repodata', 'repomod.xml'):
'<newhash>-metadata.xml',
os.path.join(self.target_key, 'repodata',
'<newhash>-metadata.xml'):
'metadata content for: ' + ','.join(expected_packages),
})
self.assertEqual(
expected_keys,
aws.s3_buckets[self.target_bucket])
def test_fake_deb(self):
"""
Calling :func:`update_repo` downloads the new DEBs, creates the
metadata, and uploads it to S3.
- Existing packages on S3 are preserved in the metadata.
- Other packages on the buildserver are not downloaded.
"""
existing_s3_keys = {
os.path.join(self.target_key, 'existing_package.deb'): '',
os.path.join(self.target_key,
'clusterhq-flocker-cli_0.3.3-0.dev.7_all.deb'):
'existing-content-to-be-replaced', # noqa
os.path.join(self.target_key, 'Packages.gz'):
'metadata for: existing_package.deb',
}
# Copy before passing to FakeAWS
expected_keys = existing_s3_keys.copy()
aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: existing_s3_keys,
},
)
unspecified_package = 'unspecified-package_0.3.3-0.dev.7_all.deb'
repo_contents = {
'clusterhq-flocker-cli_0.3.3-0.dev.7_all.deb': 'cli-package',
'clusterhq-flocker-node_0.3.3-0.dev.7_all.deb': 'node-package',
unspecified_package: 'unspecified-package-content',
}
self.update_repo(
aws=aws,
yum=FakeYum(),
package_directory=self.package_directory,
target_bucket=self.target_bucket,
target_key=self.target_key,
source_repo=create_fake_repository(self, files=repo_contents),
packages=self.packages,
flocker_version='0.3.3.dev7',
distribution=Distribution(name='ubuntu', version='14.04'),
)
# The expected files are the new files plus the package which already
# existed in S3.
expected_packages = {
'existing_package.deb',
'clusterhq-flocker-cli_0.3.3-0.dev.7_all.deb',
'clusterhq-flocker-node_0.3.3-0.dev.7_all.deb',
}
expected_keys.update({
'test/target/key/Release': 'Origin: ClusterHQ\n',
'test/target/key/clusterhq-flocker-cli_0.3.3-0.dev.7_all.deb':
'cli-package',
'test/target/key/clusterhq-flocker-node_0.3.3-0.dev.7_all.deb':
'node-package',
'test/target/key/Packages.gz':
'Packages.gz for: ' + ','.join(expected_packages),
})
self.assertEqual(
expected_keys,
aws.s3_buckets[self.target_bucket])
def test_package_not_available_exception(self):
"""
If a requested package is not available in the repository, a 404 error
is raised.
"""
aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: {},
},
)
with self.assertRaises(HTTPError) as exception:
self.update_repo(
aws=aws,
yum=FakeYum(),
package_directory=self.package_directory,
target_bucket=self.target_bucket,
target_key=self.target_key,
source_repo=create_fake_repository(
self, files={}),
packages=self.packages,
flocker_version='0.3.3.dev7',
distribution=Distribution(name="centos", version="7"),
)
self.assertEqual(404, exception.exception.response.status_code)
@skipUnless(which('createrepo'),
"Tests require the ``createrepo`` command.")
def test_real_yum_utils(self):
"""
Calling :func:`update_repo` with real yum utilities creates a
repository in S3.
"""
source_repo = FilePath(self.mktemp())
source_repo.createDirectory()
FilePath(__file__).sibling('yum-repo').copyTo(source_repo)
repo_uri = 'file://' + source_repo.path
aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: {},
},
)
class RealYum(object):
def get_dispatcher(self):
return yum_dispatcher
self.update_repo(
aws=aws,
yum=RealYum(),
package_directory=self.package_directory,
target_bucket=self.target_bucket,
target_key=self.target_key,
source_repo=repo_uri,
packages=self.packages,
flocker_version='0.3.3.dev7',
distribution=Distribution(name='centos', version='7'),
)
expected_files = {
os.path.join(self.target_key, file)
for file in [
'clusterhq-flocker-cli-0.3.3-0.dev.7.noarch.rpm',
'clusterhq-flocker-node-0.3.3-0.dev.7.noarch.rpm',
'repodata/repomd.xml',
]
}
files_on_s3 = aws.s3_buckets[self.target_bucket]
repodata_path = os.path.join(self.target_key, 'repodata')
# Yum repositories prefix metadata files with the sha256 hash
# of the file. Since these files contain timestamps, we calculate
# the hash from the file, to determine the expected file names.
for metadata_file in [
'other.sqlite.bz2',
'filelists.xml.gz',
'primary.xml.gz',
'filelists.sqlite.bz2',
'primary.sqlite.bz2',
'other.xml.gz',
]:
for key in files_on_s3:
if (key.endswith(metadata_file)
and key.startswith(repodata_path)):
expected_files.add(
os.path.join(
repodata_path,
sha256(files_on_s3[key]).hexdigest()
+ '-' + metadata_file)
)
break
else:
expected_files.add(
os.path.join(
repodata_path, '<missing>-' + metadata_file))
# The original source repository contains no metadata.
# This tests that CreateRepo creates the expected metadata files from
# given RPMs, not that any metadata files are copied.
self.assertEqual(expected_files, set(files_on_s3.keys()))
@skipUnless(which('dpkg-scanpackages'),
"Tests require the ``dpkg-scanpackages`` command.")
def test_real_dpkg_utils(self):
"""
Calling :func:`update_repo` with real dpkg utilities creates a
repository in S3.
The filenames in the repository metadata do not have the build
directory in them.
"""
source_repo = FilePath(self.mktemp())
source_repo.createDirectory()
FilePath(__file__).sibling('apt-repo').copyTo(source_repo)
repo_uri = 'file://' + source_repo.path
aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: {},
},
)
class RealYum(object):
def get_dispatcher(self):
return yum_dispatcher
self.update_repo(
aws=aws,
yum=RealYum(),
package_directory=self.package_directory,
target_bucket=self.target_bucket,
target_key=self.target_key,
source_repo=repo_uri,
packages=self.packages,
flocker_version='0.3.3.dev7',
distribution=Distribution(name="ubuntu", version="14.04"),
)
expected_files = {
os.path.join(self.target_key, file)
for file in [
'clusterhq-flocker-cli_0.3.3-0.dev.7_all.deb',
'clusterhq-flocker-node_0.3.3-0.dev.7_all.deb',
'Packages.gz',
'Release',
]
}
files_on_s3 = aws.s3_buckets[self.target_bucket]
# The original source repository contains no metadata.
# This tests that CreateRepo creates the expected metadata files from
# given RPMs, not that any metadata files are copied.
self.assertEqual(expected_files, set(files_on_s3.keys()))
# The repository is built in self.packages_directory
# Ensure that that does not leak into the metadata.
packages_gz = files_on_s3[os.path.join(self.target_key, 'Packages.gz')]
with GzipFile(fileobj=StringIO(packages_gz), mode="r") as f:
packages_metadata = f.read()
self.assertNotIn(self.package_directory.path, packages_metadata)
class UploadPackagesTests(SynchronousTestCase):
"""
Tests for :func:``upload_packages``.
"""
def upload_packages(self, aws, yum,
scratch_directory, target_bucket, version,
build_server, top_level):
"""
Call :func:``upload_packages``, interacting with a fake AWS and yum
utilities.
:param FakeAWS aws: Fake AWS to interact with.
:param FakeYum yum: Fake yum utilities to interact with.
See :py:func:`upload_packages` for other parameter documentation.
"""
dispatchers = [aws.get_dispatcher(), yum.get_dispatcher(),
base_dispatcher]
sync_perform(
ComposedDispatcher(dispatchers),
upload_packages(
scratch_directory=scratch_directory,
target_bucket=target_bucket,
version=version,
build_server=build_server,
top_level=top_level,
),
)
def setUp(self):
self.scratch_directory = FilePath(self.mktemp())
self.scratch_directory.createDirectory()
self.target_bucket = 'test-target-bucket'
self.aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: {},
},
)
self.build_server = 'http://test-build-server.example'
def test_repositories_created(self):
"""
Calling :func:`upload_packages` creates repositories for supported
distributions.
"""
repo_contents = {
'results/omnibus/0.3.3.dev1/centos-7/clusterhq-flocker-cli-0.3.3-0.dev.1.noarch.rpm': '', # noqa
'results/omnibus/0.3.3.dev1/centos-7/clusterhq-flocker-node-0.3.3-0.dev.1.noarch.rpm': '', # noqa
'results/omnibus/0.3.3.dev1/centos-7/clusterhq-python-flocker-0.3.3-0.dev.1.x86_64.rpm': '', # noqa
'results/omnibus/0.3.3.dev1/ubuntu-14.04/clusterhq-flocker-cli_0.3.3-0.dev.1_all.deb': '', # noqa
'results/omnibus/0.3.3.dev1/ubuntu-14.04/clusterhq-flocker-node_0.3.3-0.dev.1_all.deb': '', # noqa
'results/omnibus/0.3.3.dev1/ubuntu-14.04/clusterhq-python-flocker_0.3.3-0.dev.1_amd64.deb': '', # noqa
'results/omnibus/0.3.3.dev1/ubuntu-15.04/clusterhq-flocker-cli_0.3.3-0.dev.1_all.deb': '', # noqa
'results/omnibus/0.3.3.dev1/ubuntu-15.04/clusterhq-flocker-node_0.3.3-0.dev.1_all.deb': '', # noqa
'results/omnibus/0.3.3.dev1/ubuntu-15.04/clusterhq-python-flocker_0.3.3-0.dev.1_amd64.deb': '', # noqa
}
self.upload_packages(
aws=self.aws,
yum=FakeYum(),
scratch_directory=self.scratch_directory,
target_bucket=self.target_bucket,
version='0.3.3.dev1',
build_server=create_fake_repository(self, files=repo_contents),
top_level=FLOCKER_PATH,
)
expected_files = {
'centos-testing/7/x86_64/clusterhq-flocker-cli-0.3.3-0.dev.1.noarch.rpm', # noqa
'centos-testing/7/x86_64/clusterhq-flocker-node-0.3.3-0.dev.1.noarch.rpm', # noqa
'centos-testing/7/x86_64/clusterhq-python-flocker-0.3.3-0.dev.1.x86_64.rpm', # noqa
'centos-testing/7/x86_64/repodata/repomod.xml', # noqa
'centos-testing/7/x86_64/repodata/<newhash>-metadata.xml', # noqa
'ubuntu-testing/14.04/amd64/clusterhq-flocker-cli_0.3.3-0.dev.1_all.deb', # noqa
'ubuntu-testing/14.04/amd64/clusterhq-flocker-node_0.3.3-0.dev.1_all.deb', # noqa
'ubuntu-testing/14.04/amd64/clusterhq-python-flocker_0.3.3-0.dev.1_amd64.deb', # noqa
'ubuntu-testing/14.04/amd64/Packages.gz',
'ubuntu-testing/14.04/amd64/Release',
'ubuntu-testing/15.04/amd64/clusterhq-flocker-cli_0.3.3-0.dev.1_all.deb', # noqa
'ubuntu-testing/15.04/amd64/clusterhq-flocker-node_0.3.3-0.dev.1_all.deb', # noqa
'ubuntu-testing/15.04/amd64/clusterhq-python-flocker_0.3.3-0.dev.1_amd64.deb', # noqa
'ubuntu-testing/15.04/amd64/Packages.gz',
'ubuntu-testing/15.04/amd64/Release',
}
files_on_s3 = self.aws.s3_buckets[self.target_bucket].keys()
self.assertEqual(expected_files, set(files_on_s3))
def test_key_suffixes(self):
"""
The OS part of the keys for created repositories have suffixes (or not)
appropriate for the release type. In particular there is no "-testing"
in keys created for a marketing release.
"""
repo_contents = {
'results/omnibus/0.3.3/centos-7/clusterhq-flocker-cli-0.3.3-1.noarch.rpm': '', # noqa
'results/omnibus/0.3.3/centos-7/clusterhq-flocker-node-0.3.3-1.noarch.rpm': '', # noqa
'results/omnibus/0.3.3/centos-7/clusterhq-python-flocker-0.3.3-1.x86_64.rpm': '', # noqa
'results/omnibus/0.3.3/ubuntu-14.04/clusterhq-flocker-cli_0.3.3-1_all.deb': '', # noqa
'results/omnibus/0.3.3/ubuntu-14.04/clusterhq-flocker-node_0.3.3-1_all.deb': '', # noqa
'results/omnibus/0.3.3/ubuntu-14.04/clusterhq-python-flocker_0.3.3-1_amd64.deb': '', # noqa
'results/omnibus/0.3.3/ubuntu-15.04/clusterhq-flocker-cli_0.3.3-1_all.deb': '', # noqa
'results/omnibus/0.3.3/ubuntu-15.04/clusterhq-flocker-node_0.3.3-1_all.deb': '', # noqa
'results/omnibus/0.3.3/ubuntu-15.04/clusterhq-python-flocker_0.3.3-1_amd64.deb': '', # noqa
}
self.upload_packages(
aws=self.aws,
yum=FakeYum(),
scratch_directory=self.scratch_directory,
target_bucket=self.target_bucket,
version='0.3.3',
build_server=create_fake_repository(self, files=repo_contents),
top_level=FLOCKER_PATH,
)
files_on_s3 = self.aws.s3_buckets[self.target_bucket].keys()
self.assertEqual(set(), {f for f in files_on_s3 if '-testing' in f})
def create_fake_repository(test_case, files):
"""
Create files in a directory to mimic a repository of packages.
:param TestCase test_case: The test case to use for creating a temporary
directory.
:param dict source_repo: Dictionary mapping names of files to create to
contents.
:return: FilePath of directory containing fake package files.
"""
source_repo = FilePath(test_case.mktemp())
source_repo.createDirectory
for key in files:
new_file = source_repo.preauthChild(key)
if not new_file.parent().exists():
new_file.parent().makedirs()
new_file.setContent(files[key])
return 'file://' + source_repo.path
class UploadPythonPackagesTests(SynchronousTestCase):
"""
Tests for :func:``upload_python_packages``.
"""
def setUp(self):
self.target_bucket = 'test-target-bucket'
self.scratch_directory = FilePath(self.mktemp())
self.top_level = FilePath(self.mktemp())
self.top_level.makedirs()
self.aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: {},
})
def upload_python_packages(self):
"""
Call :func:``upload_python_packages``, discarding output.
:param bytes version: Version to upload packages for.
See :py:func:`upload_python_packages` for other parameter
documentation.
"""
dispatchers = [self.aws.get_dispatcher(), base_dispatcher]
with open(os.devnull, "w") as discard:
sync_perform(
ComposedDispatcher(dispatchers),
upload_python_packages(
scratch_directory=self.scratch_directory,
target_bucket=self.target_bucket,
top_level=self.top_level,
output=discard,
error=discard,
)
)
@skipUnless(hard_linking_possible(),
"Hard linking is not possible in the current directory.")
def test_distributions_uploaded(self):
"""
Source and binary distributions of Flocker are uploaded to S3.
"""
self.top_level.child('setup.py').setContent(
dedent("""
from setuptools import setup
setup(
name="Flocker",
version="{package_version}",
py_modules=["Flocker"],
)
""").format(package_version='0.3.0')
)
self.upload_python_packages()
aws_keys = self.aws.s3_buckets[self.target_bucket].keys()
self.assertEqual(
sorted(aws_keys),
['python/Flocker-0.3.0-py2-none-any.whl',
'python/Flocker-0.3.0.tar.gz'])
class UploadOptionsTests(SynchronousTestCase):
"""
Tests for :class:`UploadOptions`.
"""
def test_must_be_release_version(self):
"""
Trying to upload artifacts for a version which is not a release
fails.
"""
options = UploadOptions()
self.assertRaises(
NotARelease,
options.parseOptions,
['--flocker-version', '0.3.0+444.gf05215b'])
def test_documentation_release_fails(self):
"""
Trying to upload artifacts for a documentation version fails.
"""
options = UploadOptions()
self.assertRaises(
DocumentationRelease,
options.parseOptions,
['--flocker-version', '0.3.0.post1'])
class CreateReleaseBranchOptionsTests(SynchronousTestCase):
"""
Tests for :class:`CreateReleaseBranchOptions`.
"""
def test_flocker_version_required(self):
"""
The ``--flocker-version`` option is required.
"""
options = CreateReleaseBranchOptions()
self.assertRaises(
UsageError,
options.parseOptions, [])
def create_git_repository(test_case, bare=False):
"""
Create a git repository with a ``master`` branch and ``README``.
:param test_case: The ``TestCase`` calling this.
"""
directory = FilePath(test_case.mktemp())
repository = Repo.init(path=directory.path, bare=bare)
if not bare:
directory.child('README').makedirs()
directory.child('README').touch()
repository.index.add(['README'])
repository.index.commit('Initial commit')
repository.create_head('master')
return repository
class CreateReleaseBranchTests(SynchronousTestCase):
"""
Tests for :func:`create_release_branch`.
"""
def setUp(self):
self.repo = create_git_repository(test_case=self)
def test_branch_exists_fails(self):
"""
Trying to create a release when a branch already exists for the given
version fails.
"""
branch = self.repo.create_head('release/flocker-0.3.0')
self.assertRaises(
BranchExists,
create_release_branch, '0.3.0', base_branch=branch)
def test_active_branch(self):
"""
Creating a release branch changes the active branch on the given
branch's repository.
"""
branch = self.repo.create_head('release/flocker-0.3.0rc1')
create_release_branch(version='0.3.0', base_branch=branch)
self.assertEqual(
self.repo.active_branch.name,
"release/flocker-0.3.0")
def test_branch_created_from_base(self):
"""
The new branch is created from the given branch.
"""
master = self.repo.active_branch
branch = self.repo.create_head('release/flocker-0.3.0rc1')
branch.checkout()
FilePath(self.repo.working_dir).child('NEW_FILE').touch()
self.repo.index.add(['NEW_FILE'])
self.repo.index.commit('Add NEW_FILE')
master.checkout()
create_release_branch(version='0.3.0', base_branch=branch)
self.assertIn((u'NEW_FILE', 0), self.repo.index.entries)
class CreatePipIndexTests(SynchronousTestCase):
"""
Tests for :func:`create_pip_index`.
"""
def setUp(self):
self.scratch_directory = FilePath(self.mktemp())
self.scratch_directory.makedirs()
def test_index_created(self):
"""
A pip index file is created for all wheel files.
"""
index = create_pip_index(
scratch_directory=self.scratch_directory,
packages=[
'Flocker-0.3.0-py2-none-any.whl',
'Flocker-0.3.1-py2-none-any.whl'
]
)
expected = (
'<html>\nThis is an index for pip\n<div>'
'<a href="Flocker-0.3.0-py2-none-any.whl">'
'Flocker-0.3.0-py2-none-any.whl</a><br />\n</div><div>'
'<a href="Flocker-0.3.1-py2-none-any.whl">'
'Flocker-0.3.1-py2-none-any.whl</a><br />\n</div></html>'
)
self.assertEqual(expected, index.getContent())
def test_index_not_included(self):
"""
The pip index file does not reference itself.
"""
index = create_pip_index(
scratch_directory=self.scratch_directory,
packages=[
'Flocker-0.3.0-py2-none-any.whl',
'Flocker-0.3.1-py2-none-any.whl',
'index.html',
]
)
expected = (
'<html>\nThis is an index for pip\n<div>'
'<a href="Flocker-0.3.0-py2-none-any.whl">'
'Flocker-0.3.0-py2-none-any.whl</a><br />\n</div><div>'
'<a href="Flocker-0.3.1-py2-none-any.whl">'
'Flocker-0.3.1-py2-none-any.whl</a><br />\n</div></html>'
)
self.assertEqual(expected, index.getContent())
def test_quoted_destination(self):
"""
Destination links are quoted.
"""
index = create_pip_index(
scratch_directory=self.scratch_directory,
packages=[
'"Flocker-0.3.0-py2-none-any.whl',
]
)
expected = (
'<html>\nThis is an index for pip\n<div>'
'<a href=""Flocker-0.3.0-py2-none-any.whl">'
'"Flocker-0.3.0-py2-none-any.whl</a><br />\n</div></html>'
)
self.assertEqual(expected, index.getContent())
def test_escaped_title(self):
"""
Link titles are escaped.
"""
index = create_pip_index(
scratch_directory=self.scratch_directory,
packages=[
'>Flocker-0.3.0-py2-none-any.whl',
]
)
expected = (
'<html>\nThis is an index for pip\n<div>'
'<a href=">Flocker-0.3.0-py2-none-any.whl">'
'>Flocker-0.3.0-py2-none-any.whl</a><br />\n</div></html>'
)
self.assertEqual(expected, index.getContent())
class UploadPipIndexTests(SynchronousTestCase):
"""
Tests for :func:`upload_pip_index`.
"""
def test_index_uploaded(self):
"""
An index file is uploaded to S3.
"""
bucket = 'clusterhq-archive'
aws = FakeAWS(
routing_rules={},
s3_buckets={
bucket: {
'python/Flocker-0.3.1-py2-none-any.whl': '',
},
})
scratch_directory = FilePath(self.mktemp())
scratch_directory.makedirs()
sync_perform(
ComposedDispatcher([aws.get_dispatcher(), base_dispatcher]),
upload_pip_index(
scratch_directory=scratch_directory,
target_bucket=bucket))
self.assertEqual(
aws.s3_buckets[bucket]['python/index.html'],
(
'<html>\nThis is an index for pip\n<div>'
'<a href="Flocker-0.3.1-py2-none-any.whl">'
'Flocker-0.3.1-py2-none-any.whl</a><br />\n</div></html>'
))
class CalculateBaseBranchTests(SynchronousTestCase):
"""
Tests for :func:`calculate_base_branch`.
"""
def setUp(self):
self.repo = create_git_repository(test_case=self)
def calculate_base_branch(self, version):
return calculate_base_branch(
version=version, path=self.repo.working_dir)
def test_calculate_base_branch_for_non_release_fails(self):
"""
Calling :func:`calculate_base_branch` with a version that isn't a
release fails.
"""
self.assertRaises(
NotARelease,
self.calculate_base_branch, '0.3.0+444.gf05215b')
def test_weekly_release_base(self):
"""
A weekly release is created from the "master" branch.
"""
self.assertEqual(
self.calculate_base_branch(version='0.3.0.dev1').name,
"master")
def test_doc_release_base(self):
"""
A documentation release is created from the release which is having
its documentation changed.
"""
self.repo.create_head('release/flocker-0.3.0')
self.assertEqual(
self.calculate_base_branch(version='0.3.0.post1').name,
"release/flocker-0.3.0")
def test_first_pre_release(self):
"""
The first pre-release for a marketing release is created from the
"master" branch.
"""
self.assertEqual(
self.calculate_base_branch(version='0.3.0rc1').name,
"master")
def test_uses_previous_pre_release(self):
"""
The second pre-release for a marketing release is created from the
previous pre-release release branch.
"""
self.repo.create_head('release/flocker-0.3.0rc1')
self.repo.create_tag('0.3.0rc1')
self.repo.create_head('release/flocker-0.3.0rc2')
self.repo.create_tag('0.3.0rc2')
self.assertEqual(
self.calculate_base_branch(version='0.3.0rc3').name,
"release/flocker-0.3.0rc2")
def test_unparseable_tags(self):
"""
There is no error raised if the repository contains a tag which cannot
be parsed as a version.
"""
self.repo.create_head('release/flocker-0.3.0unparseable')
self.repo.create_tag('0.3.0unparseable')
self.repo.create_head('release/flocker-0.3.0rc2')
self.repo.create_tag('0.3.0rc2')
self.assertEqual(
self.calculate_base_branch(version='0.3.0rc3').name,
"release/flocker-0.3.0rc2")
def test_parent_repository_used(self):
"""
If a path is given as the repository path, the parents of that file
are searched until a Git repository is found.
"""
self.assertEqual(
calculate_base_branch(
version='0.3.0.dev1',
path=FilePath(self.repo.working_dir).child('README').path,
).name,
"master")
def test_no_pre_releases_fails(self):
"""
Trying to release a marketing release when no pre-release exists for it
fails.
"""
self.assertRaises(
NoPreRelease,
self.calculate_base_branch, '0.3.0')
def test_missing_pre_release_fails(self):
"""
Trying to release a pre-release when the previous pre-release does not
exist fails.
"""
self.repo.create_head('release/flocker-0.3.0rc1')
self.repo.create_tag('0.3.0rc1')
self.assertRaises(
MissingPreRelease,
self.calculate_base_branch, '0.3.0rc3')
def test_base_branch_does_not_exist_fails(self):
"""
Trying to create a release when the base branch does not exist fails.
"""
self.repo.create_tag('0.3.0rc1')
self.assertRaises(
GitCommandError,
self.calculate_base_branch, '0.3.0')
def test_tag_exists_fails(self):
"""
Trying to create a release when a tag already exists for the given
version fails.
"""
self.repo.create_tag('0.3.0')
self.assertRaises(
TagExists,
self.calculate_base_branch, '0.3.0')
def test_branch_only_exists_remote(self):
"""
If the test branch does not exist locally, but does exist as a remote
branch a base branch can still be calculated.
"""
self.repo.create_head('release/flocker-0.3.0rc1')
self.repo.create_tag('0.3.0rc1')
directory = FilePath(self.mktemp())
clone = self.repo.clone(path=directory.path)
self.assertEqual(
calculate_base_branch(
version='0.3.0rc2',
path=clone.working_dir).name,
"release/flocker-0.3.0rc1")
class PublishVagrantMetadataTests(SynchronousTestCase):
"""
Tests for :func:`publish_vagrant_metadata`.
"""
def setUp(self):
self.target_bucket = 'clusterhq-archive'
self.metadata_key = 'vagrant/flocker-tutorial.json'
def metadata_version(self, version, box_filename, provider="virtualbox"):
"""
Create a version section for Vagrant metadata, for a given box, with
one provider: virtualbox.
:param bytes version: The version of the box, normalised for Vagrant.
:param bytes box_filename: The filename of the box.
:param bytes provider: The provider for the box.
:return: Dictionary to be used as a version section in Vagrant
metadata.
"""
return {
"version": version,
"providers": [
{
"url": "https://example.com/" + box_filename,
"name": provider,
}
],
}
def tutorial_metadata(self, versions):
"""
Create example tutorial metadata.
:param list versions: List of dictionaries of version sections.
:return: Dictionary to be used as Vagrant metadata.
"""
return {
"description": "clusterhq/flocker-tutorial box.",
"name": "clusterhq/flocker-tutorial",
"versions": versions,
}
def publish_vagrant_metadata(self, aws, version):
"""
Call :func:``publish_vagrant_metadata``, interacting with a fake AWS.
:param FakeAWS aws: Fake AWS to interact with.
:param version: See :py:func:`publish_vagrant_metadata`.
"""
scratch_directory = FilePath(self.mktemp())
scratch_directory.makedirs()
box_url = "https://example.com/flocker-tutorial-{}.box".format(version)
box_name = 'flocker-tutorial'
sync_perform(
ComposedDispatcher([aws.get_dispatcher(), base_dispatcher]),
publish_vagrant_metadata(
version=version,
box_url=box_url,
box_name=box_name,
target_bucket=self.target_bucket,
scratch_directory=scratch_directory))
def test_no_metadata_exists(self):
"""
A metadata file is added when one does not exist.
"""
aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: {},
},
)
self.publish_vagrant_metadata(aws=aws, version='0.3.0')
expected_version = self.metadata_version(
version="0.3.0",
box_filename="flocker-tutorial-0.3.0.box",
)
self.assertEqual(
json.loads(aws.s3_buckets[self.target_bucket][self.metadata_key]),
self.tutorial_metadata(versions=[expected_version]),
)
def test_metadata_content_type(self):
"""
Vagrant requires a JSON metadata file to have a Content-Type of
application/json.
"""
aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: {},
},
)
self.publish_vagrant_metadata(aws=aws, version='0.3.0')
self.assertEqual(
aws.s3_buckets[self.target_bucket][self.metadata_key].content_type,
'application/json'
)
def test_version_added(self):
"""
A version is added to an existing metadata file.
"""
existing_old_version = self.metadata_version(
version="0.3.0",
box_filename="flocker-tutorial-0.3.0.box",
)
existing_metadata = json.dumps(
self.tutorial_metadata(versions=[existing_old_version])
)
aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: {
'vagrant/flocker-tutorial.json': existing_metadata,
},
},
)
expected_new_version = self.metadata_version(
version="0.4.0",
box_filename="flocker-tutorial-0.4.0.box",
)
expected_metadata = self.tutorial_metadata(
versions=[existing_old_version, expected_new_version])
self.publish_vagrant_metadata(aws=aws, version='0.4.0')
self.assertEqual(
json.loads(aws.s3_buckets[self.target_bucket][self.metadata_key]),
expected_metadata,
)
def test_version_normalised(self):
"""
The version given is converted to a version number acceptable to
Vagrant.
"""
aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: {},
},
)
self.publish_vagrant_metadata(aws=aws, version='0.3.0_1')
metadata = json.loads(
aws.s3_buckets[self.target_bucket][self.metadata_key])
# The underscore is converted to a period in the version.
self.assertEqual(metadata['versions'][0]['version'], "0.3.0.1")
def test_version_already_exists(self):
"""
If a version already exists then its data is overwritten by the new
metadata. This works even if the version is changed when being
normalised.
"""
existing_version = self.metadata_version(
version="0.4.0.2314.g941011b",
box_filename="old_filename",
provider="old_provider",
)
existing_metadata = json.dumps(
self.tutorial_metadata(versions=[existing_version])
)
aws = FakeAWS(
routing_rules={},
s3_buckets={
self.target_bucket: {
'vagrant/flocker-tutorial.json': existing_metadata,
},
},
)
expected_version = self.metadata_version(
version="0.4.0.2314.g941011b",
box_filename="flocker-tutorial-0.4.0-2314-g941011b.box",
provider="virtualbox",
)
self.publish_vagrant_metadata(aws=aws, version='0.4.0-2314-g941011b')
metadata_versions = json.loads(
aws.s3_buckets[self.target_bucket][self.metadata_key])['versions']
self.assertEqual(metadata_versions, [expected_version])
class PublishHomebrewRecipeTests(SynchronousTestCase):
"""
Tests for :func:`publish_homebrew_recipe`.
"""
def setUp(self):
self.source_repo = create_git_repository(test_case=self, bare=True)
# Making a recipe involves interacting with PyPI, this should be
# a parameter, not a patch. See:
# https://clusterhq.atlassian.net/browse/FLOC-1759
self.patch(
release, 'make_recipe',
lambda version, sdist_url, requirements_path:
"Recipe for " + version + " at " + sdist_url
)
def test_commit_message(self):
"""
The recipe is committed with a sensible message.
"""
publish_homebrew_recipe(
homebrew_repo_url=self.source_repo.git_dir,
version='0.3.0',
scratch_directory=FilePath(self.mktemp()),
source_bucket="archive",
top_level=FLOCKER_PATH,
)
self.assertEqual(
self.source_repo.head.commit.summary,
u'Add recipe for Flocker version 0.3.0')
def test_recipe_contents(self):
"""
The passed in contents are in the recipe.
"""
publish_homebrew_recipe(
homebrew_repo_url=self.source_repo.git_dir,
version='0.3.0',
scratch_directory=FilePath(self.mktemp()),
source_bucket="bucket-name",
top_level=FLOCKER_PATH,
)
recipe = self.source_repo.head.commit.tree['flocker-0.3.0.rb']
self.assertEqual(recipe.data_stream.read(),
'Recipe for 0.3.0 at https://bucket-name.s3.amazonaws.com/python/Flocker-0.3.0.tar.gz') # noqa
def test_push_fails(self):
"""
If the push fails, an error is raised.
"""
non_bare_repo = create_git_repository(test_case=self, bare=False)
self.assertRaises(
PushFailed,
publish_homebrew_recipe,
non_bare_repo.git_dir,
'0.3.0',
"archive",
FilePath(self.mktemp()),
top_level=FLOCKER_PATH,
)
def test_recipe_already_exists(self):
"""
If a recipe already exists with the same name, it is overwritten.
"""
publish_homebrew_recipe(
homebrew_repo_url=self.source_repo.git_dir,
version='0.3.0',
scratch_directory=FilePath(self.mktemp()),
source_bucket="archive",
top_level=FLOCKER_PATH,
)
self.patch(release, 'make_recipe',
lambda version, sdist_url, requirements_path: "New content")
publish_homebrew_recipe(
homebrew_repo_url=self.source_repo.git_dir,
version='0.3.0',
scratch_directory=FilePath(self.mktemp()),
source_bucket="archive",
top_level=FLOCKER_PATH,
)
recipe = self.source_repo.head.commit.tree['flocker-0.3.0.rb']
self.assertEqual(recipe.data_stream.read(), 'New content')
class GetExpectedRedirectsTests(SynchronousTestCase):
"""
Tests for :func:`get_expected_redirects`.
"""
def test_marketing_release(self):
"""
If a marketing release version is given, marketing release redirects
are returned.
"""
self.assertEqual(
get_expected_redirects(flocker_version='0.3.0'),
{
'/': '/en/0.3.0/',
'/en/': '/en/0.3.0/',
'/en/latest': '/en/0.3.0/',
'/en/latest/faq/index.html': '/en/0.3.0/faq/index.html',
}
)
def test_development_release(self):
"""
If a development release version is given, development release
redirects are returned.
"""
self.assertEqual(
get_expected_redirects(flocker_version='0.3.0.dev1'),
{
'/en/devel': '/en/0.3.0.dev1/',
'/en/devel/faq/index.html': '/en/0.3.0.dev1/faq/index.html',
}
)
def test_documentation_release(self):
"""
If a documentation release version is given, marketing release
redirects are returned for the versions which is being updated.
"""
self.assertEqual(
get_expected_redirects(flocker_version='0.3.0.post1'),
{
'/': '/en/0.3.0/',
'/en/': '/en/0.3.0/',
'/en/latest': '/en/0.3.0/',
'/en/latest/faq/index.html': '/en/0.3.0/faq/index.html',
}
)
class TestRedirectsOptionsTests(SynchronousTestCase):
"""
Tests for :class:`TestRedirectsOptions`.
"""
def test_default_environment(self):
"""
The default environment is a staging environment.
"""
options = TestRedirectsOptions()
options.parseOptions([])
self.assertEqual(options.environment, Environments.STAGING)
def test_production_environment(self):
"""
If "--production" is passed, a production environment is used.
"""
options = TestRedirectsOptions()
options.parseOptions(['--production'])
self.assertEqual(options.environment, Environments.PRODUCTION)
class UpdateLicenseFileTests(SynchronousTestCase):
"""
Tests for :func:`update_license_file`.
"""
def test_update_license_file(self):
"""
A LICENSE file is written to the top level directory from a template in
the admin directory, and is formatted to include the given year.
"""
top_level = FilePath(self.mktemp())
top_level.child('admin').makedirs()
top_level.child('admin').child('LICENSE.template').setContent(
"Text including the current year: {current_year}.")
update_license_file(args=[], top_level=top_level, year=123)
self.assertEqual(
top_level.child('LICENSE').getContent(),
"Text including the current year: 123."
)
|
|
"""GCode Procedure"""
import decimal
import string
try:
from . import GCodeObject
except SystemError:
import GCodeObject
class GCodeParser:
"""Parse the GCode into tuple with elements.
Overview:
This class handle str and convert it into GCodeObject.GCode.
Example:
'X-12.0056'
* lexical_parse()
(GCodeObject.GCodeParserChar('X'), GCodeObject.GCodeParserMinus('-'), \
GCodeObject.GCodeParserInt(12), GCodeObject.GCodeParserDot('.'), \
GCodeObject.GCodeParserDigitAfterDot(2), GCodeObject.GCodeParserInt(56))
* trim_comment_and_specials()
* bind_float()
(GCodeObject.GCodeParserChar('X'), GCodeObject.GCodeParserFloat(12.0056))
* bind_to_gcode()
(GCodeObject.GCode(GCodeObject.GCodeChar('x'), GCodeObject.GCodeInt(12.0056)))
Supported characters:
'char', 'int', 'space', '-', '.', '(', ')', '%', "'", '"'"""
string_original = str()
list_lexical_parse = list()
list_trim_comment_and_specials = list()
list_bind_float = list()
list_bind_to_gcode = list()
def __init__(self, string_process):
self.string_original = string_process
def run(self):
"""Run all the GCodeParser's methods"""
self.lexical_parse()
self.trim_comment_and_specials()
self.bind_float()
self.bind_to_gcode()
return tuple(self.list_bind_to_gcode)
def lexical_parse(self):
# pylint: disable=too-many-branches
"""Lexical parse, form text file to Python tuple.
Notice:
This function is designed without regular expressions."""
main_loop = True
idx = int()
result_list = list()
last_processed_type = GCodeObject.GCodeParserSpace
# Replacement form newline('\n') to '%'
held_string = self.string_original.replace('\n', '%')
while main_loop:
# Check EOF and replace character with space
if idx == len(held_string):
character = ' '
main_loop = False
else:
character = held_string[idx]
# 'char'
if character in string.ascii_letters:
result_list.append(GCodeObject.GCodeParserChar(character.upper()))
last_processed_type = GCodeObject.GCodeParserChar
# 'zero' after dot(.) or ordinary int
elif character.isdigit():
if last_processed_type == GCodeObject.GCodeParserDot:
if character == '0':
result_list.append(GCodeObject.GCodeParserDigitAfterDot(1))
last_processed_type = GCodeObject.GCodeParserDigitAfterDot
else:
result_list.append(GCodeObject.GCodeParserInt(int(character)))
last_processed_type = GCodeObject.GCodeParserInt
elif last_processed_type == GCodeObject.GCodeParserDigitAfterDot:
if character == '0':
result_list[-1].element += 1
last_processed_type = GCodeObject.GCodeParserDigitAfterDot
else:
result_list.append(GCodeObject.GCodeParserInt(int(character)))
last_processed_type = GCodeObject.GCodeParserInt
elif last_processed_type == GCodeObject.GCodeParserInt:
result_list[-1] = GCodeObject.GCodeParserInt \
(int(result_list[-1]) * 10 + int(character))
last_processed_type = GCodeObject.GCodeParserInt
else:
result_list.append(GCodeObject.GCodeParserInt(int(character)))
last_processed_type = GCodeObject.GCodeParserInt
# 'space'
elif character.isspace():
last_processed_type = GCodeObject.GCodeParserSpace
# '-'
elif character == '-':
result_list.append(GCodeObject.GCodeParserMinus(character))
last_processed_type = GCodeObject.GCodeParserMinus
# '.'
elif character == '.':
result_list.append(GCodeObject.GCodeParserDot(character))
last_processed_type = GCodeObject.GCodeParserDot
# '('
elif character == '(':
result_list.append(GCodeObject.GCodeParserBracketLeft(character))
last_processed_type = GCodeObject.GCodeParserBracketLeft
# ')'
elif character == ')':
result_list.append(GCodeObject.GCodeParserBracketRight(character))
last_processed_type = GCodeObject.GCodeParserBracketRight
# '%', "'", '"'
elif character == '%' or "'" or '"':
result_list.append(GCodeObject.GCodeParserSpecialCharacter(character))
last_processed_type = GCodeObject.GCodeParserSpecialCharacter
else:
raise GCodeObject.GCodeSyntaxError \
('The file contains unsupported character', idx, character)
idx += 1
self.list_lexical_parse = result_list
return tuple(result_list)
def trim_comment_and_specials(self):
"""Trim the comment and special characters."""
list_before = list(self.list_lexical_parse)
list_trimmed_specials = list()
list_trimmed_twofold = list()
# Eliminate special characters
for piv in list_before:
if isinstance(piv, GCodeObject.GCodeParserSpecialCharacter):
continue
else:
list_trimmed_specials.append(piv)
# Eliminate comments
indent_level_head = 0
indent_level_tail = 0
for piv in list_trimmed_specials:
if isinstance(piv, GCodeObject.GCodeParserBracketLeft):
indent_level_head += 1
elif isinstance(piv, GCodeObject.GCodeParserBracketRight):
indent_level_head -= 1
if indent_level_head == 0 and indent_level_tail == 0:
list_trimmed_twofold.append(piv)
# Check invalid indent level
if indent_level_head < 0:
raise GCodeObject.GCodeSyntaxError('Invalid comment wrapping', piv)
indent_level_tail = indent_level_head
if indent_level_head:
raise GCodeObject.GCodeSyntaxError('Invalid comment wrapping indent level' \
, indent_level_head)
self.list_trim_comment_and_specials = list_trimmed_twofold
return tuple(list_trimmed_twofold)
def bind_float(self):
# pylint: disable=too-many-branches
"""Bind the floats"""
list_before = self.list_trim_comment_and_specials
list_result = list()
list_location_digitafterdot = list()
list_location_num = list()
list_location_dot = list()
list_location_minus_valid = list()
# Check numbers' locations and dots' locations
for index in range(0, len(list_before)):
# Check dots' (after digits) locations
if isinstance(list_before[index], GCodeObject.GCodeParserDigitAfterDot):
list_location_digitafterdot.append(index)
# (If list_before[index] is not GCodeObject.GCodeParserDigitAfterDot,
# it will be ordinary numbers.)
# Check numbers' locations
elif isinstance(list_before[index].element, int) or \
isinstance(list_before[index].element, float):
list_location_num.append(index)
# Check numbers' locations
if isinstance(list_before[index], GCodeObject.GCodeParserDot):
list_location_dot.append(index)
# Check whether minus(-) is valid
if isinstance(list_before[index - 1], GCodeObject.GCodeParserMinus) and \
isinstance(list_before[index], GCodeObject.GCodeParserInt):
list_location_minus_valid.append(index - 1)
# Check whether dot(.) is sealed with integars.
for index in list_location_dot:
try:
if isinstance(list_before[index - 1], GCodeObject.GCodeParserInt) and \
(True if isinstance(list_before[index + 1], GCodeObject.GCodeParserInt) or \
isinstance(list_before[index + 1], GCodeObject.GCodeParserDigitAfterDot) \
else False):
pass
else:
raise GCodeObject.GCodeSyntaxError('Dot(.) is not sealed with integers', index)
except IndexError:
if index == 1:
continue
elif index + 1 == len(list_before):
raise GCodeObject.GCodeSyntaxError('Dot(.) is located in EOF', index)
# Bind
for index in range(0, len(list_before)):
# Initialize variables
actual_spot_minuscheck = False
calculated = decimal.Decimal(1)
# Prefixes
if not index - 1 in list_location_dot and \
not index in list_location_dot and \
not index + 1 in list_location_dot and \
(False if isinstance(list_before[index], GCodeObject.GCodeParserInt) and \
index - 2 in list_location_dot else True) and \
not index - 1 in list_location_minus_valid and \
not index in list_location_minus_valid:
list_result.append(list_before[index])
# Floats - it works with dots
elif index in list_location_dot:
# Initialize variables
actual_number_len = 0
actual_number_spot = None
actual_number_value = None
try:
if isinstance(list_before[index + 1], GCodeObject.GCodeParserDigitAfterDot) and\
isinstance(list_before[index + 2], GCodeObject.GCodeParserInt):
actual_number_spot = index + 2
elif isinstance(list_before[index + 1], GCodeObject.GCodeParserInt):
actual_number_spot = index + 1
except IndexError:
pass
try:
if actual_number_spot == index + 2:
actual_number_len -= list_before[index + 1].element
while actual_number_len < len(str(list_before[actual_number_spot].element)):
calculated = calculated * decimal.Decimal('0.1')
actual_number_len += 1
# If actual_number_spot is None, list raises TypeError.
except TypeError:
actual_number_value = 0
# If len() didn't raised TypeError, actual_number_value is this:
if actual_number_value is None:
actual_number_value = list_before[actual_number_spot].element
calculated = calculated * decimal.Decimal(actual_number_value)
calculated = list_before[index - 1].element + calculated
list_result.append(GCodeObject.GCodeParserFloat(float(calculated)))
actual_spot_minuscheck = True
# Integers - it works with integers
elif index in list_location_num and \
not index - 1 in list_location_digitafterdot and \
not index - 1 in list_location_dot and \
not index + 1 in list_location_dot:
list_result.append(list_before[index])
actual_spot_minuscheck = True
# Check minus and reverse
if actual_spot_minuscheck:
if (True if index - 2 in list_location_minus_valid and \
index in list_location_dot else False) or \
(True if index - 1 in list_location_minus_valid and \
index in list_location_num else False):
list_result[-1].element = -list_result[-1].element
# Find the unused GCodeObject objects
for elem in list_result:
if isinstance(elem, GCodeObject.GCodeParserMinus) or \
isinstance(elem, GCodeObject.GCodeParserDot):
raise GCodeObject.GCodeSyntaxError('Check minus(-) or Dot(.)', elem)
self.list_bind_float = list_result
return tuple(list_result)
def bind_to_gcode(self):
# pylint: disable=redefined-variable-type
"""Bind the list into G-code object"""
list_before = self.list_bind_float
odd = False
tem_prefix = None
tem_number = None
list_result = list()
for index in list_before:
odd = not odd
if odd and isinstance(index, GCodeObject.GCodeParserChar):
tem_prefix = index
elif not odd and isinstance(index, GCodeObject.GCodeParserNumberBase):
if isinstance(index, GCodeObject.GCodeParserInt):
tem_number = GCodeObject.GCodeInt(index.element)
else:
tem_number = GCodeObject.GCodeFloat(index.element)
list_result.append(GCodeObject.GCode( \
GCodeObject.GCodePrefix(tem_prefix.element), tem_number))
else:
raise GCodeObject.GCodeSyntaxError('Check the sequence of prefixes and numbers' \
, index)
# If odd is True, g-code sequence does not ends with number.
if odd:
raise GCodeObject.GCodeSyntaxError('G-code ends with numbers')
self.list_bind_to_gcode = list_result
return tuple(list_result)
|
|
from django.template import RequestContext, loader
from datetime import datetime, timedelta, date
from dateutil.relativedelta import relativedelta
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse_lazy
from django.conf import settings
from models import (
StatusCheck, GraphiteStatusCheck, JenkinsStatusCheck, HttpStatusCheck, ICMPStatusCheck,
StatusCheckResult, UserProfile, Service, Instance, Shift, get_duty_officers)
from tasks import run_status_check as _run_status_check
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic import (
DetailView, CreateView, UpdateView, ListView, DeleteView, TemplateView, FormView, View)
from django import forms
from .graphite import get_data, get_matching_metrics
from django.contrib.auth.models import User
from django.utils import timezone
from django.utils.timezone import utc
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from cabot.cabotapp import alert
from models import AlertPluginUserData
from django.forms.models import (inlineformset_factory, modelformset_factory)
from django import shortcuts
from itertools import groupby, dropwhile, izip_longest
import requests
import json
import re
class LoginRequiredMixin(object):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(*args, **kwargs)
@login_required
def subscriptions(request):
""" Simple list of all checks """
t = loader.get_template('cabotapp/subscriptions.html')
services = Service.objects.all()
users = User.objects.filter(is_active=True)
c = RequestContext(request, {
'services': services,
'users': users,
'duty_officers': get_duty_officers(),
})
return HttpResponse(t.render(c))
@login_required
def run_status_check(request, pk):
"""Runs a specific check"""
_run_status_check(check_or_id=pk)
return HttpResponseRedirect(reverse('check', kwargs={'pk': pk}))
def duplicate_icmp_check(request, pk):
pc = StatusCheck.objects.get(pk=pk)
npk = pc.duplicate()
return HttpResponseRedirect(reverse('update-icmp-check', kwargs={'pk': npk}))
def duplicate_instance(request, pk):
instance = Instance.objects.get(pk=pk)
new_instance = instance.duplicate()
return HttpResponseRedirect(reverse('update-instance', kwargs={'pk': new_instance}))
def duplicate_http_check(request, pk):
pc = StatusCheck.objects.get(pk=pk)
npk = pc.duplicate()
return HttpResponseRedirect(reverse('update-http-check', kwargs={'pk': npk}))
def duplicate_graphite_check(request, pk):
pc = StatusCheck.objects.get(pk=pk)
npk = pc.duplicate()
return HttpResponseRedirect(reverse('update-graphite-check', kwargs={'pk': npk}))
def duplicate_jenkins_check(request, pk):
pc = StatusCheck.objects.get(pk=pk)
npk = pc.duplicate()
return HttpResponseRedirect(reverse('update-jenkins-check', kwargs={'pk': npk}))
class StatusCheckResultDetailView(LoginRequiredMixin, DetailView):
model = StatusCheckResult
context_object_name = 'result'
class SymmetricalForm(forms.ModelForm):
symmetrical_fields = () # Iterable of 2-tuples (field, model)
def __init__(self, *args, **kwargs):
super(SymmetricalForm, self).__init__(*args, **kwargs)
if self.instance and self.instance.pk:
for field in self.symmetrical_fields:
self.fields[field].initial = getattr(
self.instance, field).all()
def save(self, commit=True):
instance = super(SymmetricalForm, self).save(commit=False)
if commit:
instance.save()
if instance.pk:
for field in self.symmetrical_fields:
setattr(instance, field, self.cleaned_data[field])
self.save_m2m()
return instance
base_widgets = {
'name': forms.TextInput(attrs={
'style': 'width:30%',
}),
'importance': forms.RadioSelect(),
}
class StatusCheckForm(SymmetricalForm):
symmetrical_fields = ('service_set', 'instance_set')
service_set = forms.ModelMultipleChoiceField(
queryset=Service.objects.all(),
required=False,
help_text='Link to service(s).',
widget=forms.SelectMultiple(
attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
},
)
)
instance_set = forms.ModelMultipleChoiceField(
queryset=Instance.objects.all(),
required=False,
help_text='Link to instance(s).',
widget=forms.SelectMultiple(
attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
},
)
)
class GraphiteStatusCheckForm(StatusCheckForm):
class Meta:
model = GraphiteStatusCheck
fields = (
'name',
'metric',
'check_type',
'value',
'frequency',
'active',
'importance',
'expected_num_hosts',
'expected_num_metrics',
'debounce',
)
widgets = dict(**base_widgets)
widgets.update({
'value': forms.TextInput(attrs={
'style': 'width: 100px',
'placeholder': 'threshold value',
}),
'metric': forms.TextInput(attrs={
'style': 'width: 100%',
'placeholder': 'graphite metric key'
}),
'check_type': forms.Select(attrs={
'data-rel': 'chosen',
})
})
class ICMPStatusCheckForm(StatusCheckForm):
class Meta:
model = ICMPStatusCheck
fields = (
'name',
'frequency',
'importance',
'active',
'debounce',
)
widgets = dict(**base_widgets)
class HttpStatusCheckForm(StatusCheckForm):
class Meta:
model = HttpStatusCheck
fields = (
'name',
'endpoint',
'username',
'password',
'text_match',
'status_code',
'timeout',
'verify_ssl_certificate',
'frequency',
'importance',
'active',
'debounce',
)
widgets = dict(**base_widgets)
widgets.update({
'endpoint': forms.TextInput(attrs={
'style': 'width: 100%',
'placeholder': 'https://www.arachnys.com',
}),
'username': forms.TextInput(attrs={
'style': 'width: 30%',
}),
'password': forms.TextInput(attrs={
'style': 'width: 30%',
}),
'text_match': forms.TextInput(attrs={
'style': 'width: 100%',
'placeholder': '[Aa]rachnys\s+[Rr]ules',
}),
'status_code': forms.TextInput(attrs={
'style': 'width: 20%',
'placeholder': '200',
}),
})
class JenkinsStatusCheckForm(StatusCheckForm):
class Meta:
model = JenkinsStatusCheck
fields = (
'name',
'importance',
'debounce',
'max_queued_build_time',
)
widgets = dict(**base_widgets)
class InstanceForm(SymmetricalForm):
symmetrical_fields = ('service_set',)
service_set = forms.ModelMultipleChoiceField(
queryset=Service.objects.all(),
required=False,
help_text='Link to service(s).',
widget=forms.SelectMultiple(
attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
},
)
)
class Meta:
model = Instance
template_name = 'instance_form.html'
fields = (
'name',
'address',
'users_to_notify',
'status_checks',
'service_set',
)
widgets = {
'name': forms.TextInput(attrs={'style': 'width: 30%;'}),
'address': forms.TextInput(attrs={'style': 'width: 70%;'}),
'status_checks': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'service_set': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'alerts': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'users_to_notify': forms.CheckboxSelectMultiple(),
'hackpad_id': forms.TextInput(attrs={'style': 'width:30%;'}),
}
def __init__(self, *args, **kwargs):
ret = super(InstanceForm, self).__init__(*args, **kwargs)
self.fields['users_to_notify'].queryset = User.objects.filter(
is_active=True)
return ret
class ServiceForm(forms.ModelForm):
class Meta:
model = Service
template_name = 'service_form.html'
fields = (
'name',
'url',
'users_to_notify',
'status_checks',
'instances',
'alerts',
'alerts_enabled',
'hackpad_id',
)
widgets = {
'name': forms.TextInput(attrs={'style': 'width: 30%;'}),
'url': forms.TextInput(attrs={'style': 'width: 70%;'}),
'status_checks': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'instances': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'alerts': forms.SelectMultiple(attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
}),
'users_to_notify': forms.CheckboxSelectMultiple(),
'hackpad_id': forms.TextInput(attrs={'style': 'width:30%;'}),
}
def __init__(self, *args, **kwargs):
ret = super(ServiceForm, self).__init__(*args, **kwargs)
self.fields['users_to_notify'].queryset = User.objects.filter(
is_active=True)
return ret
def clean_hackpad_id(self):
value = self.cleaned_data['hackpad_id']
if not value:
return ''
for pattern in settings.RECOVERY_SNIPPETS_WHITELIST:
if re.match(pattern, value):
return value
raise ValidationError('Please specify a valid JS snippet link')
class StatusCheckReportForm(forms.Form):
service = forms.ModelChoiceField(
queryset=Service.objects.all(),
widget=forms.HiddenInput
)
checks = forms.ModelMultipleChoiceField(
queryset=StatusCheck.objects.all(),
widget=forms.SelectMultiple(
attrs={
'data-rel': 'chosen',
'style': 'width: 70%',
},
)
)
date_from = forms.DateField(label='From', widget=forms.DateInput(attrs={'class': 'datepicker'}))
date_to = forms.DateField(label='To', widget=forms.DateInput(attrs={'class': 'datepicker'}))
def get_report(self):
checks = self.cleaned_data['checks']
now = timezone.now()
for check in checks:
# Group results of the check by status (failed alternating with succeeded),
# take time of the first one in each group (starting from a failed group),
# split them into pairs and form the list of problems.
results = check.statuscheckresult_set.filter(
time__gte=self.cleaned_data['date_from'],
time__lt=self.cleaned_data['date_to'] + timedelta(days=1)
).order_by('time')
groups = dropwhile(lambda item: item[0], groupby(results, key=lambda r: r.succeeded))
times = [next(group).time for succeeded, group in groups]
pairs = izip_longest(*([iter(times)] * 2))
check.problems = [(start, end, (end or now) - start) for start, end in pairs]
if results:
check.success_rate = results.filter(succeeded=True).count() / float(len(results)) * 100
return checks
class CheckCreateView(LoginRequiredMixin, CreateView):
template_name = 'cabotapp/statuscheck_form.html'
def form_valid(self, form):
form.instance.created_by = self.request.user
return super(CheckCreateView, self).form_valid(form)
def get_initial(self):
if self.initial:
initial = self.initial
else:
initial = {}
metric = self.request.GET.get('metric')
if metric:
initial['metric'] = metric
service_id = self.request.GET.get('service')
instance_id = self.request.GET.get('instance')
if service_id:
try:
service = Service.objects.get(id=service_id)
initial['service_set'] = [service]
except Service.DoesNotExist:
pass
if instance_id:
try:
instance = Instance.objects.get(id=instance_id)
initial['instance_set'] = [instance]
except Instance.DoesNotExist:
pass
return initial
def get_success_url(self):
if self.request.GET.get('service'):
return reverse('service', kwargs={'pk': self.request.GET.get('service')})
if self.request.GET.get('instance'):
return reverse('instance', kwargs={'pk': self.request.GET.get('instance')})
return reverse('checks')
class CheckUpdateView(LoginRequiredMixin, UpdateView):
template_name = 'cabotapp/statuscheck_form.html'
def get_success_url(self):
return reverse('check', kwargs={'pk': self.object.id})
class ICMPCheckCreateView(CheckCreateView):
model = ICMPStatusCheck
form_class = ICMPStatusCheckForm
class ICMPCheckUpdateView(CheckUpdateView):
model = ICMPStatusCheck
form_class = ICMPStatusCheckForm
class GraphiteCheckUpdateView(CheckUpdateView):
model = GraphiteStatusCheck
form_class = GraphiteStatusCheckForm
class GraphiteCheckCreateView(CheckCreateView):
model = GraphiteStatusCheck
form_class = GraphiteStatusCheckForm
class HttpCheckCreateView(CheckCreateView):
model = HttpStatusCheck
form_class = HttpStatusCheckForm
class HttpCheckUpdateView(CheckUpdateView):
model = HttpStatusCheck
form_class = HttpStatusCheckForm
class JenkinsCheckCreateView(CheckCreateView):
model = JenkinsStatusCheck
form_class = JenkinsStatusCheckForm
def form_valid(self, form):
form.instance.frequency = 1
return super(JenkinsCheckCreateView, self).form_valid(form)
class JenkinsCheckUpdateView(CheckUpdateView):
model = JenkinsStatusCheck
form_class = JenkinsStatusCheckForm
def form_valid(self, form):
form.instance.frequency = 1
return super(JenkinsCheckUpdateView, self).form_valid(form)
class StatusCheckListView(LoginRequiredMixin, ListView):
model = StatusCheck
context_object_name = 'checks'
def get_queryset(self):
return StatusCheck.objects.all().order_by('name').prefetch_related('service_set', 'instance_set')
class StatusCheckDeleteView(LoginRequiredMixin, DeleteView):
model = StatusCheck
success_url = reverse_lazy('checks')
context_object_name = 'check'
template_name = 'cabotapp/statuscheck_confirm_delete.html'
class StatusCheckDetailView(LoginRequiredMixin, DetailView):
model = StatusCheck
context_object_name = 'check'
template_name = 'cabotapp/statuscheck_detail.html'
def render_to_response(self, context, *args, **kwargs):
if context == None:
context = {}
context['checkresults'] = self.object.statuscheckresult_set.order_by(
'-time_complete')[:100]
return super(StatusCheckDetailView, self).render_to_response(context, *args, **kwargs)
class UserProfileUpdateView(LoginRequiredMixin, View):
model = AlertPluginUserData
def get(self, *args, **kwargs):
return HttpResponseRedirect(reverse('update-alert-user-data', args=(self.kwargs['pk'], u'General')))
class UserProfileUpdateAlert(LoginRequiredMixin, View):
template = loader.get_template('cabotapp/alertpluginuserdata_form.html')
model = AlertPluginUserData
def get(self, request, pk, alerttype):
try:
profile = UserProfile.objects.get(user=pk)
except UserProfile.DoesNotExist:
user = User.objects.get(id=pk)
profile = UserProfile(user=user)
profile.save()
profile.user_data()
if (alerttype == u'General'):
form = GeneralSettingsForm(initial={
'first_name': profile.user.first_name,
'last_name' : profile.user.last_name,
'email_address' : profile.user.email,
'enabled' : profile.user.is_active,
})
else:
plugin_userdata = self.model.objects.get(title=alerttype, user=profile)
form_model = get_object_form(type(plugin_userdata))
form = form_model(instance=plugin_userdata)
c = RequestContext(request, {
'form': form,
'alert_preferences': profile.user_data(),
})
return HttpResponse(self.template.render(c))
def post(self, request, pk, alerttype):
profile = UserProfile.objects.get(user=pk)
if (alerttype == u'General'):
form = GeneralSettingsForm(request.POST)
if form.is_valid():
profile.user.first_name = form.cleaned_data['first_name']
profile.user.last_name = form.cleaned_data['last_name']
profile.user.is_active = form.cleaned_data['enabled']
profile.user.email = form.cleaned_data['email_address']
profile.user.save()
return HttpResponseRedirect(reverse('update-alert-user-data', args=(self.kwargs['pk'], alerttype)))
else:
plugin_userdata = self.model.objects.get(title=alerttype, user=profile)
form_model = get_object_form(type(plugin_userdata))
form = form_model(request.POST, instance=plugin_userdata)
form.save()
if form.is_valid():
return HttpResponseRedirect(reverse('update-alert-user-data', args=(self.kwargs['pk'], alerttype)))
def get_object_form(model_type):
class AlertPreferencesForm(forms.ModelForm):
class Meta:
model = model_type
def is_valid(self):
return True
return AlertPreferencesForm
class GeneralSettingsForm(forms.Form):
first_name = forms.CharField(label='First name', max_length=30, required=False)
last_name = forms.CharField(label='Last name', max_length=30, required=False)
email_address = forms.CharField(label='Email Address', max_length=75, required=False) #We use 75 and not the 254 because Django 1.6.8 only supports 75. See commit message for details.
enabled = forms.BooleanField(label='Enabled', required=False)
class InstanceListView(LoginRequiredMixin, ListView):
model = Instance
context_object_name = 'instances'
def get_queryset(self):
return Instance.objects.all().order_by('name').prefetch_related('status_checks')
class ServiceListView(LoginRequiredMixin, ListView):
model = Service
context_object_name = 'services'
def get_queryset(self):
return Service.objects.all().order_by('name').prefetch_related('status_checks')
class InstanceDetailView(LoginRequiredMixin, DetailView):
model = Instance
context_object_name = 'instance'
def get_context_data(self, **kwargs):
context = super(InstanceDetailView, self).get_context_data(**kwargs)
date_from = date.today() - relativedelta(day=1)
context['report_form'] = StatusCheckReportForm(initial={
'checks': self.object.status_checks.all(),
'service': self.object,
'date_from': date_from,
'date_to': date_from + relativedelta(months=1) - relativedelta(days=1)
})
return context
class ServiceDetailView(LoginRequiredMixin, DetailView):
model = Service
context_object_name = 'service'
def get_context_data(self, **kwargs):
context = super(ServiceDetailView, self).get_context_data(**kwargs)
date_from = date.today() - relativedelta(day=1)
context['report_form'] = StatusCheckReportForm(initial={
'alerts': self.object.alerts.all(),
'checks': self.object.status_checks.all(),
'service': self.object,
'date_from': date_from,
'date_to': date_from + relativedelta(months=1) - relativedelta(days=1)
})
return context
class InstanceCreateView(LoginRequiredMixin, CreateView):
model = Instance
form_class = InstanceForm
def form_valid(self, form):
ret = super(InstanceCreateView, self).form_valid(form)
if self.object.status_checks.filter(polymorphic_ctype__model='icmpstatuscheck').count() == 0:
self.generate_default_ping_check(self.object)
return ret
def generate_default_ping_check(self, obj):
pc = ICMPStatusCheck(
name="Default Ping Check for %s" % obj.name,
frequency=5,
importance=Service.ERROR_STATUS,
debounce=0,
created_by=None,
)
pc.save()
obj.status_checks.add(pc)
def get_success_url(self):
return reverse('instance', kwargs={'pk': self.object.id})
def get_initial(self):
if self.initial:
initial = self.initial
else:
initial = {}
service_id = self.request.GET.get('service')
if service_id:
try:
service = Service.objects.get(id=service_id)
initial['service_set'] = [service]
except Service.DoesNotExist:
pass
return initial
class ServiceCreateView(LoginRequiredMixin, CreateView):
model = Service
form_class = ServiceForm
alert.update_alert_plugins()
def get_success_url(self):
return reverse('service', kwargs={'pk': self.object.id})
class InstanceUpdateView(LoginRequiredMixin, UpdateView):
model = Instance
form_class = InstanceForm
def get_success_url(self):
return reverse('instance', kwargs={'pk': self.object.id})
class ServiceUpdateView(LoginRequiredMixin, UpdateView):
model = Service
form_class = ServiceForm
def get_success_url(self):
return reverse('service', kwargs={'pk': self.object.id})
class ServiceDeleteView(LoginRequiredMixin, DeleteView):
model = Service
success_url = reverse_lazy('services')
context_object_name = 'service'
template_name = 'cabotapp/service_confirm_delete.html'
class InstanceDeleteView(LoginRequiredMixin, DeleteView):
model = Instance
success_url = reverse_lazy('instances')
context_object_name = 'instance'
template_name = 'cabotapp/instance_confirm_delete.html'
class ShiftListView(LoginRequiredMixin, ListView):
model = Shift
context_object_name = 'shifts'
def get_queryset(self):
return Shift.objects.filter(
end__gt=datetime.utcnow().replace(tzinfo=utc),
deleted=False).order_by('start')
class StatusCheckReportView(LoginRequiredMixin, TemplateView):
template_name = 'cabotapp/statuscheck_report.html'
def get_context_data(self, **kwargs):
form = StatusCheckReportForm(self.request.GET)
if form.is_valid():
return {'checks': form.get_report(), 'service': form.cleaned_data['service']}
# Misc JSON api and other stuff
def checks_run_recently(request):
"""
Checks whether or not stuff is running by looking to see if checks have run in last 10 mins
"""
ten_mins = datetime.utcnow().replace(tzinfo=utc) - timedelta(minutes=10)
most_recent = StatusCheckResult.objects.filter(time_complete__gte=ten_mins)
if most_recent.exists():
return HttpResponse('Checks running')
return HttpResponse('Checks not running')
def jsonify(d):
return HttpResponse(json.dumps(d), content_type='application/json')
@login_required
def graphite_api_data(request):
metric = request.GET.get('metric')
data = None
matching_metrics = None
try:
data = get_data(metric)
except requests.exceptions.RequestException, e:
pass
if not data:
try:
matching_metrics = get_matching_metrics(metric)
except requests.exceptions.RequestException, e:
return jsonify({'status': 'error', 'message': str(e)})
matching_metrics = {'metrics': matching_metrics}
return jsonify({'status': 'ok', 'data': data, 'matchingMetrics': matching_metrics})
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for bisection."""
import datetime
import unittest
import mock
from clusterfuzz._internal.base import bisection
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.tests.test_libs import helpers
from clusterfuzz._internal.tests.test_libs import mock_config
from clusterfuzz._internal.tests.test_libs import test_utils
@test_utils.with_cloud_emulators('datastore')
class RequestBisectionTest(unittest.TestCase):
"""Tests request_bisection."""
def setUp(self):
helpers.patch_environ(self)
helpers.patch(self, [
'clusterfuzz._internal.build_management.build_manager.get_primary_bucket_path',
'clusterfuzz._internal.build_management.build_manager.get_revisions_list',
'clusterfuzz._internal.build_management.revisions.get_component_range_list',
'clusterfuzz._internal.config.local_config.ProjectConfig',
'clusterfuzz._internal.google_cloud_utils.blobs.read_key',
'clusterfuzz._internal.google_cloud_utils.pubsub.PubSubClient.publish',
])
self.mock.ProjectConfig.return_value = mock_config.MockConfig({
'env': {
'PROJECT_NAME': 'test-project',
},
'bisect_service': {
'pubsub_topic': '/projects/project/topics/topic',
}
})
data_types.FuzzTarget(
id='libFuzzer_proj_target',
engine='libFuzzer',
project='proj',
binary='target').put()
self.testcase = data_types.Testcase(
timestamp=datetime.datetime(2021, 1, 1),
crash_type='crash-type',
crash_state='A\nB\nC',
security_flag=True,
bug_information='1337',
job_type='libfuzzer_asan_proj',
fuzzer_name='libFuzzer',
overridden_fuzzer_name='libFuzzer_proj_target',
regression='123:456',
fixed='123:456',
crash_revision=3,
security_severity=data_types.SecuritySeverity.MEDIUM,
additional_metadata='{"last_tested_crash_revision": 4}')
self.testcase.put()
self.mock.read_key.return_value = b'reproducer'
self.mock.get_component_range_list.return_value = [
{
'link_text': 'old:new',
},
]
def _test(self, sanitizer, old_commit='old', new_commit='new'):
"""Test task publication."""
bisection.request_bisection(self.testcase)
publish_calls = self.mock.publish.call_args_list
bisect_types = ('regressed', 'fixed')
self.assertEqual(2, len(publish_calls))
for bisect_type, publish_call in zip(bisect_types, publish_calls):
topic = publish_call[0][1]
message = publish_call[0][2][0]
self.assertEqual('/projects/project/topics/topic', topic)
self.assertEqual(b'reproducer', message.data)
self.assertDictEqual({
'crash_state': 'A\nB\nC',
'crash_type': 'crash-type',
'security': 'True',
'severity': 'Medium',
'fuzz_target': 'target',
'new_commit': new_commit,
'old_commit': old_commit,
'project_name': 'proj',
'sanitizer': sanitizer,
'testcase_id': '1',
'issue_id': '1337',
'type': bisect_type,
'timestamp': '2021-01-01T00:00:00',
}, message.attributes)
testcase = self.testcase.key.get()
self.assertTrue(testcase.get_metadata('requested_regressed_bisect'))
self.assertTrue(testcase.get_metadata('requested_fixed_bisect'))
def test_request_bisection_asan(self):
"""Basic regressed test (asan)."""
self.testcase.job_type = 'libfuzzer_asan_proj'
self.testcase.put()
self._test('address')
def test_request_bisection_msan(self):
"""Basic regressed test (asan)."""
self.testcase.job_type = 'libfuzzer_msan_proj'
self.testcase.put()
self._test('memory')
def test_request_bisection_ubsan(self):
"""Basic regressed test (ubsan)."""
self.testcase.job_type = 'libfuzzer_ubsan_proj'
self.testcase.put()
self._test('undefined')
def test_request_bisection_blackbox(self):
"""Test request bisection for blackbox."""
self.testcase.job_type = 'blackbox'
self.testcase.overridden_fuzzer_name = None
self.testcase.put()
bisection.request_bisection(self.testcase)
self.assertEqual(0, self.mock.publish.call_count)
def test_request_bisection_non_security(self):
"""Test request bisection for non-security testcases."""
self.testcase.job_type = 'libfuzzer_asan_proj'
self.testcase.security_flag = False
self.testcase.put()
bisection.request_bisection(self.testcase)
self.assertEqual(0, self.mock.publish.call_count)
def test_request_bisection_flaky(self):
"""Test request bisection for flaky testcases."""
self.testcase.job_type = 'libfuzzer_asan_proj'
self.testcase.one_time_crasher_flag = True
self.testcase.put()
bisection.request_bisection(self.testcase)
self.assertEqual(0, self.mock.publish.call_count)
def test_request_bisection_no_bug(self):
"""Test request bisection for testcases with no bug attached."""
self.testcase.job_type = 'libfuzzer_asan_proj'
self.testcase.bug_information = ''
self.testcase.put()
bisection.request_bisection(self.testcase)
self.assertEqual(0, self.mock.publish.call_count)
def test_request_bisection_invalid_range(self):
"""Test request bisection for testcases with no bug attached."""
self.testcase.job_type = 'libfuzzer_asan_proj'
self.testcase.regression = 'NA'
self.testcase.fixed = 'NA'
self.testcase.put()
bisection.request_bisection(self.testcase)
publish_calls = self.mock.publish.call_args_list
self.assertEqual(1, len(publish_calls))
publish_call = publish_calls[0]
topic = publish_call[0][1]
message = publish_call[0][2][0]
self.assertEqual('/projects/project/topics/topic', topic)
self.assertEqual(b'', message.data)
self.assertDictEqual({
'testcase_id': '1',
'type': 'invalid',
}, message.attributes)
def test_request_bisection_once_only(self):
"""Test request bisection for testcases isn't repeated if already
requested."""
self.testcase.set_metadata('requested_regressed_bisect', True)
self.testcase.set_metadata('requested_fixed_bisect', True)
self.testcase.put()
bisection.request_bisection(self.testcase)
self.assertEqual(0, self.mock.publish.call_count)
def test_request_single_commit_range(self):
"""Request bisection with a single commit (invalid range)."""
self.mock.get_primary_bucket_path.return_value = 'bucket'
self.mock.get_revisions_list.return_value = list(range(6))
self.mock.get_component_range_list.return_value = [
{
'link_text': 'one',
},
]
bisection.request_bisection(self.testcase)
self._test('address', old_commit='one', new_commit='one')
self.mock.get_component_range_list.assert_has_calls([
mock.call(123, 456, 'libfuzzer_asan_proj'),
mock.call(0, 3, 'libfuzzer_asan_proj'),
mock.call(123, 456, 'libfuzzer_asan_proj'),
mock.call(4, 5, 'libfuzzer_asan_proj'),
])
|
|
import hashlib
import random
from rfc822 import dump_address_pair
from django.contrib import admin
from django.contrib.contenttypes import generic
from django.core import mail
from django.core.urlresolvers import NoReverseMatch, reverse
from django.core.validators import email_re
from django.db import models
from django.http import HttpRequest
from django.template.loader import render_to_string
from django.template import RequestContext
from pennyblack import settings
try:
from django.utils.timezone import now
except ImportError:
from datetime import datetime
now = datetime.now
from datetime import timedelta
#-----------------------------------------------------------------------------
# Mail
#-----------------------------------------------------------------------------
class MailManager(models.Manager):
use_for_related_fields = True
def most_clicked_first(self):
return self.annotate(click_count=models.Count('clicks')).order_by('-click_count')
class Mail(models.Model):
"""
This is a single Mail, it's part of a Job
"""
viewed = models.DateTimeField(default=None, null=True)
bounced = models.BooleanField(default=False)
sent = models.BooleanField(default=False)
content_type = models.ForeignKey('contenttypes.ContentType')
object_id = models.PositiveIntegerField()
person = generic.GenericForeignKey('content_type', 'object_id')
job = models.ForeignKey('pennyblack.Job', related_name="mails")
mail_hash = models.CharField(max_length=32, blank=True)
email = models.EmailField() # the address is stored when the mail is sent
objects = MailManager()
class Meta:
verbose_name = 'mail'
verbose_name_plural = 'mails'
app_label = 'pennyblack'
def __init__(self, *args, **kwargs):
super(Mail, self).__init__(*args, **kwargs)
self.extra_context = None
self.extra_attachments = []
def __unicode__(self):
return u'%s to %s' % (self.job, self.person,)
def save(self, **kwargs):
if self.mail_hash == u'':
self.mail_hash = hashlib.md5(str(self.id) + str(random.random())).hexdigest()
super(Mail, self).save(**kwargs)
def mark_sent(self):
"""
Marks the email as beeing sent.
"""
self.sent = True
self.save()
def mark_viewed(self, request=None, contact_type='link'):
"""
Marks the email as beeing viewed and if it's not already viewed it
stores the view date.
"""
if request:
params = {
'user_agent': request.META.get('HTTP_USER_AGENT', ''),
'ip_address': request.META.get('REMOTE_ADDR', ''),
'referer': request.META.get('HTTP_REFERER', ''),
'contact_type': contact_type,
}
t = now() - timedelta(hours=1)
if not self.clients.filter(**params).filter(visited__gt=t):
self.clients.create(**params)
if not self.viewed:
self.viewed = now()
self.save()
def on_landing(self, request):
"""
Is executed every time a user landed on the website after clicking on
a link in this email. It tries to execute the on_landing method on the
person object and on the group object.
"""
self.mark_viewed(request, contact_type='link')
if hasattr(self.person, 'on_landing') and hasattr(self.person.on_landing, '__call__'):
self.person.on_landing(request)
if self.job.content_type is not None and \
hasattr(self.job.group_object, 'on_landing') and \
hasattr(self.job.group_object.on_landing, '__call__'):
self.group_object.on_landing(request)
def bounce(self):
"""
Is executed if this email is bounced.
"""
self.bounced = True
self.save()
self.person.on_bounce(self)
def unsubscribe(self):
"""
Is executed if the unsubscribe link is clicked.
"""
return self.person.unsubscribe()
def is_valid(self):
"""
Checks if this Mail is valid by validating the email address.
"""
return email_re.match(self.person.get_email())
def get_email(self):
"""
Gets the email address. If it has no email address set, it tries to
get it from the person object.
"""
if self.email != '':
return self.email
return self.person.get_email()
get_email.short_description = "E-Mail"
def get_message(self):
"""
Returns a email message object
"""
self.email = self.person.get_email()
job = self.job
headers = {}
if job.newsletter.reply_email != '':
headers.update({'Reply-To': job.newsletter.reply_email})
if job.newsletter.newsletter_type == settings.NEWSLETTER_TYPE_MASSMAIL:
headers.update({'Precedence': 'bulk'})
try:
headers.update({'List-Unsubscribe': "<%s>" % self.person.get_unsubscribe_url(mail=self, job=job, newsletter=job.newsletter)})
except NotImplementedError:
pass
message = mail.EmailMessage(
job.newsletter.subject,
self.get_content(),
dump_address_pair((job.newsletter.sender.name, job.newsletter.sender.email)),
[self.email],
headers=headers,
)
for attachment in job.newsletter.attachments.all():
message.attachments.append((attachment.name, attachment.file.read(), attachment.mimetype))
for attachment in self.extra_attachments:
message.attachments.append(attachment)
message.content_subtype = "html"
return message
def get_content(self, webview=False):
"""
Renders the email content. If webview is True it includes also a
html header and doesn't display the webview link.
"""
newsletter = self.job.newsletter
context = self.get_context()
context['newsletter'] = newsletter
context['public_url'] = self.job.public_url
context['webview'] = webview
if isinstance(self.extra_context, dict):
context.update(self.extra_context)
request = HttpRequest()
request.content_context = context
return render_to_string(newsletter.template.path, context,
context_instance=RequestContext(request))
def get_context(self):
"""
Returns the context of this email as a dict.
"""
return {
'person': self.person,
'group_object': self.job.group_object,
'mail': self,
'base_url': self.job.newsletter.get_base_url()
}
def get_header_url(self):
"""
Gets the header url for this email.
"""
return self.job.newsletter.header_url_replaced.replace('{{mail.mail_hash}}', self.mail_hash).replace('{{base_url}}', self.job.newsletter.get_base_url())
@property
def admin_change_url(self):
if hasattr(self, '_admin_change_url'):
return self._admin_change_url
try:
self._admin_change_url = reverse('admin:%s_%s_change' % (self.content_type.app_label, self.content_type.model), args=[self.object_id])
except NoReverseMatch:
return None
return self._admin_change_url
class MailInline(admin.TabularInline):
model = Mail
max_num = 0
can_delete = False
fields = ('get_email',)
readonly_fields = ('get_email',)
def queryset(self, request):
"""
Don't display Inlines if there are more than a certain amount
"""
if request._pennyblack_job_obj.mails.count() > settings.JOB_MAIL_INLINE_COUNT:
return super(MailInline, self).queryset(request).filter(pk=0)
return super(MailInline, self).queryset(request)
|
|
# -*- coding: utf-8 -*-
# Copyright 2011-2012 Florian von Bock (f at vonbock dot info)
#
# gDBPool - db connection pooling for gevent
__author__ = "Florian von Bock"
__email__ = "f at vonbock dot info"
__version__ = "0.1.3"
import gevent
from gevent import monkey; monkey.patch_all()
from psyco_ge import make_psycopg_green; make_psycopg_green()
import sys
assert 'gdbpool.psyco_ge' in sys.modules.keys()
import psycopg2
import traceback
from gevent.queue import Queue, Empty as QueueEmptyException
from gevent.event import AsyncResult
from types import FunctionType, MethodType, StringType
from psycopg2 import DatabaseError
psycopg2.extensions.register_type( psycopg2.extensions.UNICODE )
psycopg2.extensions.register_type( psycopg2.extensions.UNICODEARRAY )
from inspect import getargspec
from connection_pool import DBConnectionPool
from channel_listener import PGChannelListener
from gdbpool_error import DBInteractionException, DBPoolConnectionException, PoolConnectionException, StreamEndException
class DBInteractionPool( object ):
"""
The DBInteractionPool manages `DBConnectionPool` instances and can run
queries or functions (ie. several queries wrapped in a function) on one of
these pools.
"""
def __new__( cls, dsn, *args, **kwargs ):
if not hasattr( cls, '_instance' ):
cls._instance = object.__new__( cls )
return cls._instance
def __init__( self, dsn, pool_size = 10, pool_name = 'default',
do_log = False ):
"""
:param string dsn: DSN for the default `class:DBConnectionPool`
:param int pool_size: Poolsize of the first/default `class:DBConnectionPool`
:param string pool_name: Keyname for the first/default `class:DBConnectionPool`
:param bool do_log: Log to the console or not
"""
if do_log == True:
import logging
logging.basicConfig( level = logging.INFO, format = "%(asctime)s %(message)s" )
self.logger = logging.getLogger()
self.do_log = do_log
self.request_queue = Queue( maxsize = None )
self.db_module = 'psycopg2'
self.conn_pools = {}
self.default_write_pool = None
self.default_read_pool = None
self.default_pool = None
self.active_listeners = {}
self.add_pool( dsn = dsn, pool_name = pool_name, pool_size = pool_size,
default_write_pool = True, default_read_pool = True,
db_module = self.db_module )
def __del__( self ):
if self.do_log:
self.logger.info( "__del__ DBInteractionPool" )
for p in self.conn_pools:
self.conn_pools[ p ].__del__()
def __call__( self, *args, **kwargs ):
""" syntactic sugar for `:ref:DBInteractionPool.run` """
return self.run( *args, **kwargs )
def add_pool( self, dsn = None, pool_name = None, pool_size = 10,
default_write_pool = False, default_read_pool = False,
default_pool = False, db_module = 'psycopg2' ):
"""
Add a named `:class:DBConnectionPool`
:param string dsn: dsn
:param string pool_name: a name for the pool to identify it inside the DBInteractionPool
:param int pool_size: Number of connections the pool should have.
:param bool default_write_pool: Should the added pool used as the default pool for write operations?
:param bool default_read_pool: Should the added pool used as the default pool for read operations?
:param bool default_pool: Should the added pool used as the default pool? (*must* be a write pool)
:param string db_module: name of the DB-API module to use
.. note::
db_module right now ONLY supports psycopg2 and the option most likely will be removed in the future
"""
if not self.conn_pools.has_key( pool_name ):
self.conn_pools[ pool_name ] = DBConnectionPool( dsn, db_module = self.db_module,
pool_size = pool_size, do_log = self.do_log )
if default_write_pool:
self.default_write_pool = pool_name
if self.default_pool or self.default_pool is None:
self.default_pool = pool_name
if default_read_pool:
self.default_read_pool = pool_name
else:
raise DBInteractionException( "Already have a pool with the name: %s. ConnectionPool not added!" % ( pool_name, ) )
@property
def pool( self ):
return self.conn_pools[ self.default_pool ]
def run( self, interaction = None, interaction_args = None,
get_result = True, is_write = True, pool = None, conn = None,
cursor = None, partial_txn = False, dry_run = False, *args,
**kwargs ):
"""
Run an interaction on one of the managed `:class:DBConnectionPool` pools.
:param function|method interaction: The interaction to run. Either a SQL string or a function that takes at least a parameter `conn`.
:param string interaction_args: None,
:param bool get_result: call and return cursor.fetchall() when True - otherwise just return True as result if no exception was raised.
:param bool is_write: If the interaction has no side-effects set to `False`. Without naming a pool the default_read pool would be used.
:param string pool: Keyname of the pool to get the a connection from
:param connection conn: Pass in a `Connection` instead of getting one from the pool. (ie. for locks in transactions that span several interactions. Use `partial_txn = True` to retrieve the Connection and then pass it into the next interaction run.)
:param cursor cursor: Pass in a `Cursor` instead of getting one from the `Connection` (ie. for locks in transactions that span several interactions. Use `partial_txn = True` to retrieve the Cursor and then pass it into the next interaction run.)
:param bool partial_txn: Return connection and cursor after executing the interaction (ie. for locks in transactions that span several interactions)
:param bool dry_run: Run the query with `mogrify` instead of `execute` and output the query that would have run. (Only applies to query interactions)
:param list args: positional args for the interaction
:param dict kwargs: kwargs for the interaction
:rtype: gevent.AsyncResult
:returns: -- a :class:`gevent.AsyncResult` that will hold the result of the interaction once it finished. When `partial_txn = True` it will return a dict that will hold the result, the connection, and the cursor that ran the transaction. (use for locking with SELECT FOR UPDATE)
"""
async_result = AsyncResult()
if is_write:
use_pool = self.default_write_pool if pool is None else pool
else:
use_pool = self.default_read_pool if pool is None else pool
if isinstance( interaction, FunctionType ) or isinstance( interaction, MethodType ):
def wrapped_transaction_f( async_res, interaction, conn = None,
cursor = None, *args ):
try:
if not conn:
conn = self.conn_pools[ use_pool ].get()
kwargs[ 'conn' ] = conn
if cursor:
kwargs[ 'cursor' ] = cursor
elif 'cursor' in getargspec( interaction )[ 0 ]:
kwargs[ 'cursor' ] = kwargs[ 'conn' ].cursor()
res = interaction( *args, **kwargs )
if not partial_txn:
async_res.set( res )
if cursor and not cursor.closed:
cursor.close()
else:
async_res.set( { 'result': res,
'connection': conn,
'cursor': kwargs[ 'cursor' ] } )
except DatabaseError, e:
if self.do_log:
self.logger.info( "exception: %s", ( e, ) )
async_result.set_exception( DBInteractionException( e ) )
except Exception, e:
if self.do_log:
self.logger.info( "exception: %s", ( e, ) )
async_result.set_exception( DBInteractionException( e ) )
finally:
if conn and not partial_txn:
self.conn_pools[ use_pool ].put( conn )
gevent.spawn( wrapped_transaction_f, async_result, interaction,
conn = conn, cursor = cursor, *args )
return async_result
elif isinstance( interaction, StringType ):
def transaction_f( async_res, sql, conn = None, cursor = None,
*args ):
try:
if not conn:
conn = self.conn_pools[ use_pool ].get()
if not cursor:
cursor = conn.cursor()
if not dry_run:
if interaction_args is not None:
cursor.execute( sql, interaction_args )
else:
cursor.execute( sql )
if get_result:
res = cursor.fetchall()
else:
res = True
if is_write and not partial_txn:
conn.commit()
else:
res = cursor.mogrify( sql, interaction_args )
if not partial_txn:
cursor.close()
async_res.set( res )
else:
async_res.set( { 'result': res,
'connection': conn,
'cursor': cursor} )
except DatabaseError, e:
if self.do_log:
self.logger.info( "exception: %s", ( e, ) )
async_result.set_exception( DBInteractionException( e ) )
except Exception, e:
traceback.print_exc( file = sys.stdout )
# if is_write and partial_txn: # ??
conn.rollback()
if self.do_log:
self.logger.info( "exception: %s", ( e, ) )
async_result.set_exception( DBInteractionException( e ) )
finally:
if conn and not partial_txn:
self.conn_pools[ use_pool ].put( conn )
gevent.spawn( transaction_f, async_result, interaction,
conn = conn, cursor = cursor, *args )
return async_result
else:
raise DBInteractionException( "%s cannot be run. run() only accepts FunctionTypes, MethodType, and StringTypes" % interacetion )
def listen_on( self, result_queue = None, channel_name = None, pool = None,
cancel_event = None, sleep_cycle = 0.1 ):
"""
Listen for asynchronous events on a named Channel and pass them to the result_queue
:param gevent.Queue result_queue: The :class:`gevent.Queue` to pass event payloads to
:param string channel_name: Name of the channel to LISTEN on
:param string pool: Name of the pool to get the connection from
:param gevent.Event cancel_event: A :class:`gevent.Event` which will break the listening loop when set
"""
if self.db_module != 'psycopg2':
raise DBInteractionException( "This feature requires PostgreSQL 9.x." )
use_pool = self.default_write_pool if pool is None else pool
try:
def start_listener():
self.active_listeners[ channel_name ] = PGChannelListener( result_queue, self.conn_pools[ use_pool ], channel_name )
# do we need a listen loop for all PGChannelListeners? maybe one is enough...
def listen( result_queue, cancel_event ):
while 1:
if cancel_event.is_set():
self.active_listeners[ channel_name ].unregister_queue( id( result_queue ) )
if self.do_log:
self.logger.info( "stopped listening on: %s", ( channel_name, ) )
break
gevent.sleep( sleep_cycle )
listener_jobs = [ gevent.spawn( start_listener ),
gevent.spawn( listen, result_queue, cancel_event ) ]
gevent.joinall( listener_jobs )
except Exception, e:
print "# FRAKK", e
if self.do_log:
self.logger.info( e )
# TODO: make this an option...?
# DEC2FLOAT = psycopg2.extensions.new_type(
# psycopg2.extensions.DECIMAL.values,
# 'DEC2FLOAT',
# lambda value, curs: float( value ) if value is not None else None )
# psycopg2.extensions.register_type( DEC2FLOAT )
|
|
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.compute import instance_template_pb2
from google3.cloud.graphite.mmv2.services.google.compute import (
instance_template_pb2_grpc,
)
from typing import List
class InstanceTemplate(object):
def __init__(
self,
creation_timestamp: str = None,
description: str = None,
id: int = None,
self_link: str = None,
name: str = None,
properties: dict = None,
project: str = None,
service_account_file: str = "",
):
channel.initialize()
self.description = description
self.self_link = self_link
self.name = name
self.properties = properties
self.project = project
self.service_account_file = service_account_file
def apply(self):
stub = instance_template_pb2_grpc.ComputeBetaInstanceTemplateServiceStub(
channel.Channel()
)
request = instance_template_pb2.ApplyComputeBetaInstanceTemplateRequest()
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.self_link):
request.resource.self_link = Primitive.to_proto(self.self_link)
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if InstanceTemplateProperties.to_proto(self.properties):
request.resource.properties.CopyFrom(
InstanceTemplateProperties.to_proto(self.properties)
)
else:
request.resource.ClearField("properties")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
request.service_account_file = self.service_account_file
response = stub.ApplyComputeBetaInstanceTemplate(request)
self.creation_timestamp = Primitive.from_proto(response.creation_timestamp)
self.description = Primitive.from_proto(response.description)
self.id = Primitive.from_proto(response.id)
self.self_link = Primitive.from_proto(response.self_link)
self.name = Primitive.from_proto(response.name)
self.properties = InstanceTemplateProperties.from_proto(response.properties)
self.project = Primitive.from_proto(response.project)
def delete(self):
stub = instance_template_pb2_grpc.ComputeBetaInstanceTemplateServiceStub(
channel.Channel()
)
request = instance_template_pb2.DeleteComputeBetaInstanceTemplateRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.self_link):
request.resource.self_link = Primitive.to_proto(self.self_link)
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if InstanceTemplateProperties.to_proto(self.properties):
request.resource.properties.CopyFrom(
InstanceTemplateProperties.to_proto(self.properties)
)
else:
request.resource.ClearField("properties")
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
response = stub.DeleteComputeBetaInstanceTemplate(request)
@classmethod
def list(self, project, service_account_file=""):
stub = instance_template_pb2_grpc.ComputeBetaInstanceTemplateServiceStub(
channel.Channel()
)
request = instance_template_pb2.ListComputeBetaInstanceTemplateRequest()
request.service_account_file = service_account_file
request.Project = project
return stub.ListComputeBetaInstanceTemplate(request).items
def to_proto(self):
resource = instance_template_pb2.ComputeBetaInstanceTemplate()
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.self_link):
resource.self_link = Primitive.to_proto(self.self_link)
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if InstanceTemplateProperties.to_proto(self.properties):
resource.properties.CopyFrom(
InstanceTemplateProperties.to_proto(self.properties)
)
else:
resource.ClearField("properties")
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
return resource
class InstanceTemplateProperties(object):
def __init__(
self,
can_ip_forward: bool = None,
description: str = None,
disks: list = None,
labels: dict = None,
machine_type: str = None,
min_cpu_platform: str = None,
metadata: dict = None,
reservation_affinity: dict = None,
guest_accelerators: list = None,
network_interfaces: list = None,
shielded_instance_config: dict = None,
scheduling: dict = None,
service_accounts: list = None,
tags: list = None,
):
self.can_ip_forward = can_ip_forward
self.description = description
self.disks = disks
self.labels = labels
self.machine_type = machine_type
self.min_cpu_platform = min_cpu_platform
self.metadata = metadata
self.reservation_affinity = reservation_affinity
self.guest_accelerators = guest_accelerators
self.network_interfaces = network_interfaces
self.shielded_instance_config = shielded_instance_config
self.scheduling = scheduling
self.service_accounts = service_accounts
self.tags = tags
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = instance_template_pb2.ComputeBetaInstanceTemplateProperties()
if Primitive.to_proto(resource.can_ip_forward):
res.can_ip_forward = Primitive.to_proto(resource.can_ip_forward)
if Primitive.to_proto(resource.description):
res.description = Primitive.to_proto(resource.description)
if InstanceTemplatePropertiesDisksArray.to_proto(resource.disks):
res.disks.extend(
InstanceTemplatePropertiesDisksArray.to_proto(resource.disks)
)
if Primitive.to_proto(resource.labels):
res.labels = Primitive.to_proto(resource.labels)
if Primitive.to_proto(resource.machine_type):
res.machine_type = Primitive.to_proto(resource.machine_type)
if Primitive.to_proto(resource.min_cpu_platform):
res.min_cpu_platform = Primitive.to_proto(resource.min_cpu_platform)
if Primitive.to_proto(resource.metadata):
res.metadata = Primitive.to_proto(resource.metadata)
if InstanceTemplatePropertiesReservationAffinity.to_proto(
resource.reservation_affinity
):
res.reservation_affinity.CopyFrom(
InstanceTemplatePropertiesReservationAffinity.to_proto(
resource.reservation_affinity
)
)
else:
res.ClearField("reservation_affinity")
if InstanceTemplatePropertiesGuestAcceleratorsArray.to_proto(
resource.guest_accelerators
):
res.guest_accelerators.extend(
InstanceTemplatePropertiesGuestAcceleratorsArray.to_proto(
resource.guest_accelerators
)
)
if InstanceTemplatePropertiesNetworkInterfacesArray.to_proto(
resource.network_interfaces
):
res.network_interfaces.extend(
InstanceTemplatePropertiesNetworkInterfacesArray.to_proto(
resource.network_interfaces
)
)
if InstanceTemplatePropertiesShieldedInstanceConfig.to_proto(
resource.shielded_instance_config
):
res.shielded_instance_config.CopyFrom(
InstanceTemplatePropertiesShieldedInstanceConfig.to_proto(
resource.shielded_instance_config
)
)
else:
res.ClearField("shielded_instance_config")
if InstanceTemplatePropertiesScheduling.to_proto(resource.scheduling):
res.scheduling.CopyFrom(
InstanceTemplatePropertiesScheduling.to_proto(resource.scheduling)
)
else:
res.ClearField("scheduling")
if InstanceTemplatePropertiesServiceAccountsArray.to_proto(
resource.service_accounts
):
res.service_accounts.extend(
InstanceTemplatePropertiesServiceAccountsArray.to_proto(
resource.service_accounts
)
)
if Primitive.to_proto(resource.tags):
res.tags.extend(Primitive.to_proto(resource.tags))
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceTemplateProperties(
can_ip_forward=Primitive.from_proto(resource.can_ip_forward),
description=Primitive.from_proto(resource.description),
disks=InstanceTemplatePropertiesDisksArray.from_proto(resource.disks),
labels=Primitive.from_proto(resource.labels),
machine_type=Primitive.from_proto(resource.machine_type),
min_cpu_platform=Primitive.from_proto(resource.min_cpu_platform),
metadata=Primitive.from_proto(resource.metadata),
reservation_affinity=InstanceTemplatePropertiesReservationAffinity.from_proto(
resource.reservation_affinity
),
guest_accelerators=InstanceTemplatePropertiesGuestAcceleratorsArray.from_proto(
resource.guest_accelerators
),
network_interfaces=InstanceTemplatePropertiesNetworkInterfacesArray.from_proto(
resource.network_interfaces
),
shielded_instance_config=InstanceTemplatePropertiesShieldedInstanceConfig.from_proto(
resource.shielded_instance_config
),
scheduling=InstanceTemplatePropertiesScheduling.from_proto(
resource.scheduling
),
service_accounts=InstanceTemplatePropertiesServiceAccountsArray.from_proto(
resource.service_accounts
),
tags=Primitive.from_proto(resource.tags),
)
class InstanceTemplatePropertiesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [InstanceTemplateProperties.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [InstanceTemplateProperties.from_proto(i) for i in resources]
class InstanceTemplatePropertiesDisks(object):
def __init__(
self,
auto_delete: bool = None,
boot: bool = None,
device_name: str = None,
disk_encryption_key: dict = None,
index: int = None,
initialize_params: dict = None,
guest_os_features: list = None,
interface: str = None,
mode: str = None,
source: str = None,
type: str = None,
):
self.auto_delete = auto_delete
self.boot = boot
self.device_name = device_name
self.disk_encryption_key = disk_encryption_key
self.index = index
self.initialize_params = initialize_params
self.guest_os_features = guest_os_features
self.interface = interface
self.mode = mode
self.source = source
self.type = type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = instance_template_pb2.ComputeBetaInstanceTemplatePropertiesDisks()
if Primitive.to_proto(resource.auto_delete):
res.auto_delete = Primitive.to_proto(resource.auto_delete)
if Primitive.to_proto(resource.boot):
res.boot = Primitive.to_proto(resource.boot)
if Primitive.to_proto(resource.device_name):
res.device_name = Primitive.to_proto(resource.device_name)
if InstanceTemplatePropertiesDisksDiskEncryptionKey.to_proto(
resource.disk_encryption_key
):
res.disk_encryption_key.CopyFrom(
InstanceTemplatePropertiesDisksDiskEncryptionKey.to_proto(
resource.disk_encryption_key
)
)
else:
res.ClearField("disk_encryption_key")
if Primitive.to_proto(resource.index):
res.index = Primitive.to_proto(resource.index)
if InstanceTemplatePropertiesDisksInitializeParams.to_proto(
resource.initialize_params
):
res.initialize_params.CopyFrom(
InstanceTemplatePropertiesDisksInitializeParams.to_proto(
resource.initialize_params
)
)
else:
res.ClearField("initialize_params")
if InstanceTemplatePropertiesDisksGuestOSFeaturesArray.to_proto(
resource.guest_os_features
):
res.guest_os_features.extend(
InstanceTemplatePropertiesDisksGuestOSFeaturesArray.to_proto(
resource.guest_os_features
)
)
if InstanceTemplatePropertiesDisksInterfaceEnum.to_proto(resource.interface):
res.interface = InstanceTemplatePropertiesDisksInterfaceEnum.to_proto(
resource.interface
)
if InstanceTemplatePropertiesDisksModeEnum.to_proto(resource.mode):
res.mode = InstanceTemplatePropertiesDisksModeEnum.to_proto(resource.mode)
if Primitive.to_proto(resource.source):
res.source = Primitive.to_proto(resource.source)
if InstanceTemplatePropertiesDisksTypeEnum.to_proto(resource.type):
res.type = InstanceTemplatePropertiesDisksTypeEnum.to_proto(resource.type)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceTemplatePropertiesDisks(
auto_delete=Primitive.from_proto(resource.auto_delete),
boot=Primitive.from_proto(resource.boot),
device_name=Primitive.from_proto(resource.device_name),
disk_encryption_key=InstanceTemplatePropertiesDisksDiskEncryptionKey.from_proto(
resource.disk_encryption_key
),
index=Primitive.from_proto(resource.index),
initialize_params=InstanceTemplatePropertiesDisksInitializeParams.from_proto(
resource.initialize_params
),
guest_os_features=InstanceTemplatePropertiesDisksGuestOSFeaturesArray.from_proto(
resource.guest_os_features
),
interface=InstanceTemplatePropertiesDisksInterfaceEnum.from_proto(
resource.interface
),
mode=InstanceTemplatePropertiesDisksModeEnum.from_proto(resource.mode),
source=Primitive.from_proto(resource.source),
type=InstanceTemplatePropertiesDisksTypeEnum.from_proto(resource.type),
)
class InstanceTemplatePropertiesDisksArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [InstanceTemplatePropertiesDisks.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [InstanceTemplatePropertiesDisks.from_proto(i) for i in resources]
class InstanceTemplatePropertiesDisksDiskEncryptionKey(object):
def __init__(
self, raw_key: str = None, rsa_encrypted_key: str = None, sha256: str = None
):
self.raw_key = raw_key
self.rsa_encrypted_key = rsa_encrypted_key
self.sha256 = sha256
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_template_pb2.ComputeBetaInstanceTemplatePropertiesDisksDiskEncryptionKey()
)
if Primitive.to_proto(resource.raw_key):
res.raw_key = Primitive.to_proto(resource.raw_key)
if Primitive.to_proto(resource.rsa_encrypted_key):
res.rsa_encrypted_key = Primitive.to_proto(resource.rsa_encrypted_key)
if Primitive.to_proto(resource.sha256):
res.sha256 = Primitive.to_proto(resource.sha256)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceTemplatePropertiesDisksDiskEncryptionKey(
raw_key=Primitive.from_proto(resource.raw_key),
rsa_encrypted_key=Primitive.from_proto(resource.rsa_encrypted_key),
sha256=Primitive.from_proto(resource.sha256),
)
class InstanceTemplatePropertiesDisksDiskEncryptionKeyArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceTemplatePropertiesDisksDiskEncryptionKey.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceTemplatePropertiesDisksDiskEncryptionKey.from_proto(i)
for i in resources
]
class InstanceTemplatePropertiesDisksInitializeParams(object):
def __init__(
self,
disk_name: str = None,
disk_size_gb: int = None,
disk_type: str = None,
source_image: str = None,
labels: dict = None,
source_snapshot: str = None,
source_snapshot_encryption_key: dict = None,
description: str = None,
resource_policies: list = None,
on_update_action: str = None,
source_image_encryption_key: dict = None,
):
self.disk_name = disk_name
self.disk_size_gb = disk_size_gb
self.disk_type = disk_type
self.source_image = source_image
self.labels = labels
self.source_snapshot = source_snapshot
self.source_snapshot_encryption_key = source_snapshot_encryption_key
self.description = description
self.resource_policies = resource_policies
self.on_update_action = on_update_action
self.source_image_encryption_key = source_image_encryption_key
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_template_pb2.ComputeBetaInstanceTemplatePropertiesDisksInitializeParams()
)
if Primitive.to_proto(resource.disk_name):
res.disk_name = Primitive.to_proto(resource.disk_name)
if Primitive.to_proto(resource.disk_size_gb):
res.disk_size_gb = Primitive.to_proto(resource.disk_size_gb)
if Primitive.to_proto(resource.disk_type):
res.disk_type = Primitive.to_proto(resource.disk_type)
if Primitive.to_proto(resource.source_image):
res.source_image = Primitive.to_proto(resource.source_image)
if Primitive.to_proto(resource.labels):
res.labels = Primitive.to_proto(resource.labels)
if Primitive.to_proto(resource.source_snapshot):
res.source_snapshot = Primitive.to_proto(resource.source_snapshot)
if InstanceTemplatePropertiesDisksInitializeParamsSourceSnapshotEncryptionKey.to_proto(
resource.source_snapshot_encryption_key
):
res.source_snapshot_encryption_key.CopyFrom(
InstanceTemplatePropertiesDisksInitializeParamsSourceSnapshotEncryptionKey.to_proto(
resource.source_snapshot_encryption_key
)
)
else:
res.ClearField("source_snapshot_encryption_key")
if Primitive.to_proto(resource.description):
res.description = Primitive.to_proto(resource.description)
if Primitive.to_proto(resource.resource_policies):
res.resource_policies.extend(Primitive.to_proto(resource.resource_policies))
if Primitive.to_proto(resource.on_update_action):
res.on_update_action = Primitive.to_proto(resource.on_update_action)
if InstanceTemplatePropertiesDisksInitializeParamsSourceImageEncryptionKey.to_proto(
resource.source_image_encryption_key
):
res.source_image_encryption_key.CopyFrom(
InstanceTemplatePropertiesDisksInitializeParamsSourceImageEncryptionKey.to_proto(
resource.source_image_encryption_key
)
)
else:
res.ClearField("source_image_encryption_key")
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceTemplatePropertiesDisksInitializeParams(
disk_name=Primitive.from_proto(resource.disk_name),
disk_size_gb=Primitive.from_proto(resource.disk_size_gb),
disk_type=Primitive.from_proto(resource.disk_type),
source_image=Primitive.from_proto(resource.source_image),
labels=Primitive.from_proto(resource.labels),
source_snapshot=Primitive.from_proto(resource.source_snapshot),
source_snapshot_encryption_key=InstanceTemplatePropertiesDisksInitializeParamsSourceSnapshotEncryptionKey.from_proto(
resource.source_snapshot_encryption_key
),
description=Primitive.from_proto(resource.description),
resource_policies=Primitive.from_proto(resource.resource_policies),
on_update_action=Primitive.from_proto(resource.on_update_action),
source_image_encryption_key=InstanceTemplatePropertiesDisksInitializeParamsSourceImageEncryptionKey.from_proto(
resource.source_image_encryption_key
),
)
class InstanceTemplatePropertiesDisksInitializeParamsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceTemplatePropertiesDisksInitializeParams.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceTemplatePropertiesDisksInitializeParams.from_proto(i)
for i in resources
]
class InstanceTemplatePropertiesDisksInitializeParamsSourceSnapshotEncryptionKey(
object
):
def __init__(
self, raw_key: str = None, sha256: str = None, kms_key_name: str = None
):
self.raw_key = raw_key
self.sha256 = sha256
self.kms_key_name = kms_key_name
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_template_pb2.ComputeBetaInstanceTemplatePropertiesDisksInitializeParamsSourceSnapshotEncryptionKey()
)
if Primitive.to_proto(resource.raw_key):
res.raw_key = Primitive.to_proto(resource.raw_key)
if Primitive.to_proto(resource.sha256):
res.sha256 = Primitive.to_proto(resource.sha256)
if Primitive.to_proto(resource.kms_key_name):
res.kms_key_name = Primitive.to_proto(resource.kms_key_name)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceTemplatePropertiesDisksInitializeParamsSourceSnapshotEncryptionKey(
raw_key=Primitive.from_proto(resource.raw_key),
sha256=Primitive.from_proto(resource.sha256),
kms_key_name=Primitive.from_proto(resource.kms_key_name),
)
class InstanceTemplatePropertiesDisksInitializeParamsSourceSnapshotEncryptionKeyArray(
object
):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceTemplatePropertiesDisksInitializeParamsSourceSnapshotEncryptionKey.to_proto(
i
)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceTemplatePropertiesDisksInitializeParamsSourceSnapshotEncryptionKey.from_proto(
i
)
for i in resources
]
class InstanceTemplatePropertiesDisksInitializeParamsSourceImageEncryptionKey(object):
def __init__(
self, raw_key: str = None, sha256: str = None, kms_key_name: str = None
):
self.raw_key = raw_key
self.sha256 = sha256
self.kms_key_name = kms_key_name
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_template_pb2.ComputeBetaInstanceTemplatePropertiesDisksInitializeParamsSourceImageEncryptionKey()
)
if Primitive.to_proto(resource.raw_key):
res.raw_key = Primitive.to_proto(resource.raw_key)
if Primitive.to_proto(resource.sha256):
res.sha256 = Primitive.to_proto(resource.sha256)
if Primitive.to_proto(resource.kms_key_name):
res.kms_key_name = Primitive.to_proto(resource.kms_key_name)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceTemplatePropertiesDisksInitializeParamsSourceImageEncryptionKey(
raw_key=Primitive.from_proto(resource.raw_key),
sha256=Primitive.from_proto(resource.sha256),
kms_key_name=Primitive.from_proto(resource.kms_key_name),
)
class InstanceTemplatePropertiesDisksInitializeParamsSourceImageEncryptionKeyArray(
object
):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceTemplatePropertiesDisksInitializeParamsSourceImageEncryptionKey.to_proto(
i
)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceTemplatePropertiesDisksInitializeParamsSourceImageEncryptionKey.from_proto(
i
)
for i in resources
]
class InstanceTemplatePropertiesDisksGuestOSFeatures(object):
def __init__(self, type: str = None):
self.type = type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_template_pb2.ComputeBetaInstanceTemplatePropertiesDisksGuestOSFeatures()
)
if Primitive.to_proto(resource.type):
res.type = Primitive.to_proto(resource.type)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceTemplatePropertiesDisksGuestOSFeatures(
type=Primitive.from_proto(resource.type),
)
class InstanceTemplatePropertiesDisksGuestOSFeaturesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceTemplatePropertiesDisksGuestOSFeatures.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceTemplatePropertiesDisksGuestOSFeatures.from_proto(i)
for i in resources
]
class InstanceTemplatePropertiesReservationAffinity(object):
def __init__(self, key: str = None, value: list = None):
self.key = key
self.value = value
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_template_pb2.ComputeBetaInstanceTemplatePropertiesReservationAffinity()
)
if Primitive.to_proto(resource.key):
res.key = Primitive.to_proto(resource.key)
if Primitive.to_proto(resource.value):
res.value.extend(Primitive.to_proto(resource.value))
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceTemplatePropertiesReservationAffinity(
key=Primitive.from_proto(resource.key),
value=Primitive.from_proto(resource.value),
)
class InstanceTemplatePropertiesReservationAffinityArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceTemplatePropertiesReservationAffinity.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceTemplatePropertiesReservationAffinity.from_proto(i)
for i in resources
]
class InstanceTemplatePropertiesGuestAccelerators(object):
def __init__(self, accelerator_count: int = None, accelerator_type: str = None):
self.accelerator_count = accelerator_count
self.accelerator_type = accelerator_type
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_template_pb2.ComputeBetaInstanceTemplatePropertiesGuestAccelerators()
)
if Primitive.to_proto(resource.accelerator_count):
res.accelerator_count = Primitive.to_proto(resource.accelerator_count)
if Primitive.to_proto(resource.accelerator_type):
res.accelerator_type = Primitive.to_proto(resource.accelerator_type)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceTemplatePropertiesGuestAccelerators(
accelerator_count=Primitive.from_proto(resource.accelerator_count),
accelerator_type=Primitive.from_proto(resource.accelerator_type),
)
class InstanceTemplatePropertiesGuestAcceleratorsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceTemplatePropertiesGuestAccelerators.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceTemplatePropertiesGuestAccelerators.from_proto(i) for i in resources
]
class InstanceTemplatePropertiesNetworkInterfaces(object):
def __init__(
self,
access_configs: list = None,
alias_ip_ranges: list = None,
name: str = None,
network: str = None,
network_ip: str = None,
subnetwork: str = None,
):
self.access_configs = access_configs
self.alias_ip_ranges = alias_ip_ranges
self.name = name
self.network = network
self.network_ip = network_ip
self.subnetwork = subnetwork
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_template_pb2.ComputeBetaInstanceTemplatePropertiesNetworkInterfaces()
)
if InstanceTemplatePropertiesNetworkInterfacesAccessConfigsArray.to_proto(
resource.access_configs
):
res.access_configs.extend(
InstanceTemplatePropertiesNetworkInterfacesAccessConfigsArray.to_proto(
resource.access_configs
)
)
if InstanceTemplatePropertiesNetworkInterfacesAliasIPRangesArray.to_proto(
resource.alias_ip_ranges
):
res.alias_ip_ranges.extend(
InstanceTemplatePropertiesNetworkInterfacesAliasIPRangesArray.to_proto(
resource.alias_ip_ranges
)
)
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.network):
res.network = Primitive.to_proto(resource.network)
if Primitive.to_proto(resource.network_ip):
res.network_ip = Primitive.to_proto(resource.network_ip)
if Primitive.to_proto(resource.subnetwork):
res.subnetwork = Primitive.to_proto(resource.subnetwork)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceTemplatePropertiesNetworkInterfaces(
access_configs=InstanceTemplatePropertiesNetworkInterfacesAccessConfigsArray.from_proto(
resource.access_configs
),
alias_ip_ranges=InstanceTemplatePropertiesNetworkInterfacesAliasIPRangesArray.from_proto(
resource.alias_ip_ranges
),
name=Primitive.from_proto(resource.name),
network=Primitive.from_proto(resource.network),
network_ip=Primitive.from_proto(resource.network_ip),
subnetwork=Primitive.from_proto(resource.subnetwork),
)
class InstanceTemplatePropertiesNetworkInterfacesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceTemplatePropertiesNetworkInterfaces.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceTemplatePropertiesNetworkInterfaces.from_proto(i) for i in resources
]
class InstanceTemplatePropertiesNetworkInterfacesAccessConfigs(object):
def __init__(
self,
name: str = None,
nat_ip: str = None,
type: str = None,
set_public_ptr: bool = None,
public_ptr_domain_name: str = None,
network_tier: str = None,
):
self.name = name
self.nat_ip = nat_ip
self.type = type
self.set_public_ptr = set_public_ptr
self.public_ptr_domain_name = public_ptr_domain_name
self.network_tier = network_tier
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_template_pb2.ComputeBetaInstanceTemplatePropertiesNetworkInterfacesAccessConfigs()
)
if Primitive.to_proto(resource.name):
res.name = Primitive.to_proto(resource.name)
if Primitive.to_proto(resource.nat_ip):
res.nat_ip = Primitive.to_proto(resource.nat_ip)
if InstanceTemplatePropertiesNetworkInterfacesAccessConfigsTypeEnum.to_proto(
resource.type
):
res.type = InstanceTemplatePropertiesNetworkInterfacesAccessConfigsTypeEnum.to_proto(
resource.type
)
if Primitive.to_proto(resource.set_public_ptr):
res.set_public_ptr = Primitive.to_proto(resource.set_public_ptr)
if Primitive.to_proto(resource.public_ptr_domain_name):
res.public_ptr_domain_name = Primitive.to_proto(
resource.public_ptr_domain_name
)
if InstanceTemplatePropertiesNetworkInterfacesAccessConfigsNetworkTierEnum.to_proto(
resource.network_tier
):
res.network_tier = InstanceTemplatePropertiesNetworkInterfacesAccessConfigsNetworkTierEnum.to_proto(
resource.network_tier
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceTemplatePropertiesNetworkInterfacesAccessConfigs(
name=Primitive.from_proto(resource.name),
nat_ip=Primitive.from_proto(resource.nat_ip),
type=InstanceTemplatePropertiesNetworkInterfacesAccessConfigsTypeEnum.from_proto(
resource.type
),
set_public_ptr=Primitive.from_proto(resource.set_public_ptr),
public_ptr_domain_name=Primitive.from_proto(
resource.public_ptr_domain_name
),
network_tier=InstanceTemplatePropertiesNetworkInterfacesAccessConfigsNetworkTierEnum.from_proto(
resource.network_tier
),
)
class InstanceTemplatePropertiesNetworkInterfacesAccessConfigsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceTemplatePropertiesNetworkInterfacesAccessConfigs.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceTemplatePropertiesNetworkInterfacesAccessConfigs.from_proto(i)
for i in resources
]
class InstanceTemplatePropertiesNetworkInterfacesAliasIPRanges(object):
def __init__(self, ip_cidr_range: str = None, subnetwork_range_name: str = None):
self.ip_cidr_range = ip_cidr_range
self.subnetwork_range_name = subnetwork_range_name
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_template_pb2.ComputeBetaInstanceTemplatePropertiesNetworkInterfacesAliasIPRanges()
)
if Primitive.to_proto(resource.ip_cidr_range):
res.ip_cidr_range = Primitive.to_proto(resource.ip_cidr_range)
if Primitive.to_proto(resource.subnetwork_range_name):
res.subnetwork_range_name = Primitive.to_proto(
resource.subnetwork_range_name
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceTemplatePropertiesNetworkInterfacesAliasIPRanges(
ip_cidr_range=Primitive.from_proto(resource.ip_cidr_range),
subnetwork_range_name=Primitive.from_proto(resource.subnetwork_range_name),
)
class InstanceTemplatePropertiesNetworkInterfacesAliasIPRangesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceTemplatePropertiesNetworkInterfacesAliasIPRanges.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceTemplatePropertiesNetworkInterfacesAliasIPRanges.from_proto(i)
for i in resources
]
class InstanceTemplatePropertiesShieldedInstanceConfig(object):
def __init__(
self,
enable_secure_boot: bool = None,
enable_vtpm: bool = None,
enable_integrity_monitoring: bool = None,
):
self.enable_secure_boot = enable_secure_boot
self.enable_vtpm = enable_vtpm
self.enable_integrity_monitoring = enable_integrity_monitoring
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_template_pb2.ComputeBetaInstanceTemplatePropertiesShieldedInstanceConfig()
)
if Primitive.to_proto(resource.enable_secure_boot):
res.enable_secure_boot = Primitive.to_proto(resource.enable_secure_boot)
if Primitive.to_proto(resource.enable_vtpm):
res.enable_vtpm = Primitive.to_proto(resource.enable_vtpm)
if Primitive.to_proto(resource.enable_integrity_monitoring):
res.enable_integrity_monitoring = Primitive.to_proto(
resource.enable_integrity_monitoring
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceTemplatePropertiesShieldedInstanceConfig(
enable_secure_boot=Primitive.from_proto(resource.enable_secure_boot),
enable_vtpm=Primitive.from_proto(resource.enable_vtpm),
enable_integrity_monitoring=Primitive.from_proto(
resource.enable_integrity_monitoring
),
)
class InstanceTemplatePropertiesShieldedInstanceConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceTemplatePropertiesShieldedInstanceConfig.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceTemplatePropertiesShieldedInstanceConfig.from_proto(i)
for i in resources
]
class InstanceTemplatePropertiesScheduling(object):
def __init__(
self,
automatic_restart: bool = None,
on_host_maintenance: str = None,
preemptible: bool = None,
node_affinities: list = None,
):
self.automatic_restart = automatic_restart
self.on_host_maintenance = on_host_maintenance
self.preemptible = preemptible
self.node_affinities = node_affinities
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = instance_template_pb2.ComputeBetaInstanceTemplatePropertiesScheduling()
if Primitive.to_proto(resource.automatic_restart):
res.automatic_restart = Primitive.to_proto(resource.automatic_restart)
if Primitive.to_proto(resource.on_host_maintenance):
res.on_host_maintenance = Primitive.to_proto(resource.on_host_maintenance)
if Primitive.to_proto(resource.preemptible):
res.preemptible = Primitive.to_proto(resource.preemptible)
if InstanceTemplatePropertiesSchedulingNodeAffinitiesArray.to_proto(
resource.node_affinities
):
res.node_affinities.extend(
InstanceTemplatePropertiesSchedulingNodeAffinitiesArray.to_proto(
resource.node_affinities
)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceTemplatePropertiesScheduling(
automatic_restart=Primitive.from_proto(resource.automatic_restart),
on_host_maintenance=Primitive.from_proto(resource.on_host_maintenance),
preemptible=Primitive.from_proto(resource.preemptible),
node_affinities=InstanceTemplatePropertiesSchedulingNodeAffinitiesArray.from_proto(
resource.node_affinities
),
)
class InstanceTemplatePropertiesSchedulingArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [InstanceTemplatePropertiesScheduling.to_proto(i) for i in resources]
@classmethod
def from_proto(self, resources):
return [InstanceTemplatePropertiesScheduling.from_proto(i) for i in resources]
class InstanceTemplatePropertiesSchedulingNodeAffinities(object):
def __init__(self, key: str = None, operator: str = None, values: list = None):
self.key = key
self.operator = operator
self.values = values
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_template_pb2.ComputeBetaInstanceTemplatePropertiesSchedulingNodeAffinities()
)
if Primitive.to_proto(resource.key):
res.key = Primitive.to_proto(resource.key)
if InstanceTemplatePropertiesSchedulingNodeAffinitiesOperatorEnum.to_proto(
resource.operator
):
res.operator = InstanceTemplatePropertiesSchedulingNodeAffinitiesOperatorEnum.to_proto(
resource.operator
)
if Primitive.to_proto(resource.values):
res.values.extend(Primitive.to_proto(resource.values))
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceTemplatePropertiesSchedulingNodeAffinities(
key=Primitive.from_proto(resource.key),
operator=InstanceTemplatePropertiesSchedulingNodeAffinitiesOperatorEnum.from_proto(
resource.operator
),
values=Primitive.from_proto(resource.values),
)
class InstanceTemplatePropertiesSchedulingNodeAffinitiesArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceTemplatePropertiesSchedulingNodeAffinities.to_proto(i)
for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceTemplatePropertiesSchedulingNodeAffinities.from_proto(i)
for i in resources
]
class InstanceTemplatePropertiesServiceAccounts(object):
def __init__(self, email: str = None, scopes: list = None):
self.email = email
self.scopes = scopes
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = (
instance_template_pb2.ComputeBetaInstanceTemplatePropertiesServiceAccounts()
)
if Primitive.to_proto(resource.email):
res.email = Primitive.to_proto(resource.email)
if Primitive.to_proto(resource.scopes):
res.scopes.extend(Primitive.to_proto(resource.scopes))
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return InstanceTemplatePropertiesServiceAccounts(
email=Primitive.from_proto(resource.email),
scopes=Primitive.from_proto(resource.scopes),
)
class InstanceTemplatePropertiesServiceAccountsArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [
InstanceTemplatePropertiesServiceAccounts.to_proto(i) for i in resources
]
@classmethod
def from_proto(self, resources):
return [
InstanceTemplatePropertiesServiceAccounts.from_proto(i) for i in resources
]
class InstanceTemplatePropertiesDisksInterfaceEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return instance_template_pb2.ComputeBetaInstanceTemplatePropertiesDisksInterfaceEnum.Value(
"ComputeBetaInstanceTemplatePropertiesDisksInterfaceEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return instance_template_pb2.ComputeBetaInstanceTemplatePropertiesDisksInterfaceEnum.Name(
resource
)[
len("ComputeBetaInstanceTemplatePropertiesDisksInterfaceEnum") :
]
class InstanceTemplatePropertiesDisksModeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return instance_template_pb2.ComputeBetaInstanceTemplatePropertiesDisksModeEnum.Value(
"ComputeBetaInstanceTemplatePropertiesDisksModeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return instance_template_pb2.ComputeBetaInstanceTemplatePropertiesDisksModeEnum.Name(
resource
)[
len("ComputeBetaInstanceTemplatePropertiesDisksModeEnum") :
]
class InstanceTemplatePropertiesDisksTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return instance_template_pb2.ComputeBetaInstanceTemplatePropertiesDisksTypeEnum.Value(
"ComputeBetaInstanceTemplatePropertiesDisksTypeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return instance_template_pb2.ComputeBetaInstanceTemplatePropertiesDisksTypeEnum.Name(
resource
)[
len("ComputeBetaInstanceTemplatePropertiesDisksTypeEnum") :
]
class InstanceTemplatePropertiesNetworkInterfacesAccessConfigsTypeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return instance_template_pb2.ComputeBetaInstanceTemplatePropertiesNetworkInterfacesAccessConfigsTypeEnum.Value(
"ComputeBetaInstanceTemplatePropertiesNetworkInterfacesAccessConfigsTypeEnum%s"
% resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return instance_template_pb2.ComputeBetaInstanceTemplatePropertiesNetworkInterfacesAccessConfigsTypeEnum.Name(
resource
)[
len(
"ComputeBetaInstanceTemplatePropertiesNetworkInterfacesAccessConfigsTypeEnum"
) :
]
class InstanceTemplatePropertiesNetworkInterfacesAccessConfigsNetworkTierEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return instance_template_pb2.ComputeBetaInstanceTemplatePropertiesNetworkInterfacesAccessConfigsNetworkTierEnum.Value(
"ComputeBetaInstanceTemplatePropertiesNetworkInterfacesAccessConfigsNetworkTierEnum%s"
% resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return instance_template_pb2.ComputeBetaInstanceTemplatePropertiesNetworkInterfacesAccessConfigsNetworkTierEnum.Name(
resource
)[
len(
"ComputeBetaInstanceTemplatePropertiesNetworkInterfacesAccessConfigsNetworkTierEnum"
) :
]
class InstanceTemplatePropertiesSchedulingNodeAffinitiesOperatorEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return instance_template_pb2.ComputeBetaInstanceTemplatePropertiesSchedulingNodeAffinitiesOperatorEnum.Value(
"ComputeBetaInstanceTemplatePropertiesSchedulingNodeAffinitiesOperatorEnum%s"
% resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return instance_template_pb2.ComputeBetaInstanceTemplatePropertiesSchedulingNodeAffinitiesOperatorEnum.Name(
resource
)[
len(
"ComputeBetaInstanceTemplatePropertiesSchedulingNodeAffinitiesOperatorEnum"
) :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
|
|
import datetime
import hashlib
try:
import cPickle as pickle
except ImportError:
import pickle
from django.db import models
from django.db.models.query import QuerySet
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.mail import send_mail
from django.core.urlresolvers import reverse
from django.template import Context
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext, get_language, activate
from django.contrib.sites.models import Site
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
QUEUE_ALL = getattr(settings, "NOTIFICATION_QUEUE_ALL", False)
User = get_user_model()
class LanguageStoreNotAvailable(Exception):
pass
class NoticeType(models.Model):
label = models.CharField(_("label"), max_length=40)
display = models.CharField(_("display"), max_length=50)
description = models.CharField(_("description"), max_length=100)
# by default only on for media with sensitivity less than or equal to this number
default = models.IntegerField(_("default"))
def __unicode__(self):
return self.label
class Meta:
verbose_name = _("notice type")
verbose_name_plural = _("notice types")
# if this gets updated, the create() method below needs to be as well...
NOTICE_MEDIA = (
("1", _("Email")),
)
# how spam-sensitive is the medium
NOTICE_MEDIA_DEFAULTS = {
"1": 2 # email
}
class NoticeSetting(models.Model):
"""
Indicates, for a given user, whether to send notifications
of a given type to a given medium.
"""
user = models.ForeignKey(User, verbose_name=_("user"))
notice_type = models.ForeignKey(NoticeType, verbose_name=_("notice type"))
medium = models.CharField(_("medium"), max_length=1, choices=NOTICE_MEDIA)
send = models.BooleanField(_("send"))
class Meta:
verbose_name = _("notice setting")
verbose_name_plural = _("notice settings")
unique_together = ("user", "notice_type", "medium")
def get_notification_setting(user, notice_type, medium):
try:
return NoticeSetting.objects.get(user=user, notice_type=notice_type, medium=medium)
except NoticeSetting.DoesNotExist:
default = (NOTICE_MEDIA_DEFAULTS[medium] <= notice_type.default)
setting = NoticeSetting(user=user, notice_type=notice_type, medium=medium, send=default)
setting.save()
return setting
def should_send(user, notice_type, medium):
return get_notification_setting(user, notice_type, medium).send
class NoticeManager(models.Manager):
def notices_for(self, user, archived=False, unseen=None, on_site=None, sent=False):
"""
returns Notice objects for the given user.
If archived=False, it only include notices not archived.
If archived=True, it returns all notices for that user.
If unseen=None, it includes all notices.
If unseen=True, return only unseen notices.
If unseen=False, return only seen notices.
"""
if sent:
lookup_kwargs = {"sender": user}
else:
lookup_kwargs = {"recipient": user}
qs = self.filter(**lookup_kwargs)
if not archived:
self.filter(archived=archived)
if unseen is not None:
qs = qs.filter(unseen=unseen)
if on_site is not None:
qs = qs.filter(on_site=on_site)
return qs
def unseen_count_for(self, recipient, **kwargs):
"""
returns the number of unseen notices for the given user but does not
mark them seen
"""
return self.notices_for(recipient, unseen=True, **kwargs).count()
def received(self, recipient, **kwargs):
"""
returns notices the given recipient has recieved.
"""
kwargs["sent"] = False
return self.notices_for(recipient, **kwargs)
def sent(self, sender, **kwargs):
"""
returns notices the given sender has sent
"""
kwargs["sent"] = True
return self.notices_for(sender, **kwargs)
class Notice(models.Model):
recipient = models.ForeignKey(User, related_name="recieved_notices", verbose_name=_("recipient"))
sender = models.ForeignKey(User, null=True, related_name="sent_notices", verbose_name=_("sender"))
message = models.TextField(_("message"))
notice_type = models.ForeignKey(NoticeType, verbose_name=_("notice type"))
added = models.DateTimeField(_("added"), default=datetime.datetime.now)
unseen = models.BooleanField(_("unseen"), default=True)
archived = models.BooleanField(_("archived"), default=False)
on_site = models.BooleanField(_("on site"), default=True)
slug = models.CharField(_("slug"), max_length=8, editable=False, unique=True)
objects = NoticeManager()
def __unicode__(self):
return self.message
def save(self, *args, **kwargs):
if not self.pk:
data = pickle.dumps(self.__dict__)
self.slug = hashlib.md5(data).hexdigest()[:8]
super(Notice, self).save(*args, **kwargs)
def archive(self):
self.archived = True
self.save()
# def is_unseen(self):
# """
# returns value of self.unseen but also changes it to false.
#
# Use this in a template to mark an unseen notice differently the first
# time it is shown.
# """
# unseen = self.unseen
# if unseen:
# self.unseen = False
# self.save()
# return unseen
def get_previous(self):
try:
return Notice.objects.filter(recipient=self.recipient, added__lt=self.added)\
.order_by('-added')[0]
except IndexError:
return None
def get_next(self):
try:
return Notice.objects.filter(recipient=self.recipient, added__gt=self.added)\
.order_by('added')[0]
except IndexError:
return None
class Meta:
ordering = ["-added"]
verbose_name = _("notice")
verbose_name_plural = _("notices")
@models.permalink
def get_absolute_url(self):
path_name = getattr(settings, 'NOTIFICATIONS_DETAIL_PATH_NAME', 'notification_notice')
return (path_name, [str(self.slug)])
class NoticeQueueBatch(models.Model):
"""
A queued notice.
Denormalized data for a notice.
"""
pickled_data = models.TextField()
def create_notice_type(label, display, description, default=2, verbosity=1):
"""
Creates a new NoticeType.
This is intended to be used by other apps as a post_syncdb manangement step.
"""
try:
notice_type = NoticeType.objects.get(label=label)
updated = False
if display != notice_type.display:
notice_type.display = display
updated = True
if description != notice_type.description:
notice_type.description = description
updated = True
if default != notice_type.default:
notice_type.default = default
updated = True
if updated:
notice_type.save()
if verbosity > 1:
print "Updated %s NoticeType" % label
except NoticeType.DoesNotExist:
NoticeType(label=label, display=display, description=description, default=default).save()
if verbosity > 1:
print "Created %s NoticeType" % label
def get_notification_language(user):
"""
Returns site-specific notification language for this user. Raises
LanguageStoreNotAvailable if this site does not use translated
notifications.
"""
if getattr(settings, "NOTIFICATION_LANGUAGE_MODULE", False):
try:
app_label, model_name = settings.NOTIFICATION_LANGUAGE_MODULE.split(".")
model = models.get_model(app_label, model_name)
language_model = model._default_manager.get(user__id__exact=user.id)
if hasattr(language_model, "language"):
return language_model.language
except (ImportError, ImproperlyConfigured, model.DoesNotExist):
raise LanguageStoreNotAvailable
raise LanguageStoreNotAvailable
def get_formatted_messages(formats, label, context):
"""
Returns a dictionary with the format identifier as the key. The values are
are fully rendered templates with the given context.
"""
format_templates = {}
for format in formats:
# conditionally turn off autoescaping for .txt extensions in format
if format.endswith(".txt"):
context.autoescape = False
else:
context.autoescape = True
format_templates[format] = render_to_string((
"messages/%s/%s" % (label, format),
"messages/%s" % format), context_instance=context)
return format_templates
def send_now(users, label, extra_context=None, on_site=True, sender=None, notice_path=None):
"""
Creates a new notice.
This is intended to be how other apps create new notices.
notification.send(user, "friends_invite_sent", {
"spam": "eggs",
"foo": "bar",
)
You can pass in on_site=False to prevent the notice emitted from being
displayed on the site.
"""
if extra_context is None:
extra_context = {}
notice_type = NoticeType.objects.get(label=label)
protocol = getattr(settings, "DEFAULT_HTTP_PROTOCOL", "http")
current_site = Site.objects.get_current()
if notice_path is None:
notice_path = reverse(getattr(settings, 'NOTIFICATIONS_PATH_NAME', 'notification_notices'))
notices_url = u"%s://%s%s" % (
protocol,
unicode(current_site),
notice_path,
)
current_language = get_language()
formats = (
"short.txt",
"full.txt",
"notice.html",
"full.html",
) # TODO make formats configurable
for user in users:
recipients = []
# get user language for user from language store defined in
# NOTIFICATION_LANGUAGE_MODULE setting
try:
language = get_notification_language(user)
except LanguageStoreNotAvailable:
language = None
if language is not None:
# activate the user's language
activate(language)
# update context with user specific translations
context = Context({
"recipient": user,
"sender": sender,
"notice": ugettext(notice_type.display),
"notices_url": notices_url,
"current_site": current_site,
})
context.update(extra_context)
# get prerendered format messages
messages = get_formatted_messages(formats, label, context)
# Strip newlines from subject
subject = "".join(render_to_string("messages/email_subject.txt", {
"message": messages["short.txt"],
}, context).splitlines())
body = render_to_string("messages/email_body.txt", {
"message": messages["full.txt"],
}, context)
notice = Notice.objects.create(recipient=user, message=messages["notice.html"],
notice_type=notice_type, on_site=on_site, sender=sender)
if should_send(user, notice_type, "1") and user.email and user.is_active: # Email
recipients.append(user.email)
send_mail(subject, body, settings.DEFAULT_FROM_EMAIL, recipients)
# reset environment to original language
activate(current_language)
def send(*args, **kwargs):
"""
A basic interface around both queue and send_now. This honors a global
flag NOTIFICATION_QUEUE_ALL that helps determine whether all calls should
be queued or not. A per call ``queue`` or ``now`` keyword argument can be
used to always override the default global behavior.
"""
queue_flag = kwargs.pop("queue", False)
now_flag = kwargs.pop("now", False)
assert not (queue_flag and now_flag), "'queue' and 'now' cannot both be True."
if queue_flag:
return queue(*args, **kwargs)
elif now_flag:
return send_now(*args, **kwargs)
else:
if QUEUE_ALL:
return queue(*args, **kwargs)
else:
return send_now(*args, **kwargs)
def queue(users, label, extra_context=None, on_site=True, sender=None):
"""
Queue the notification in NoticeQueueBatch. This allows for large amounts
of user notifications to be deferred to a seperate process running outside
the webserver.
"""
if extra_context is None:
extra_context = {}
if isinstance(users, QuerySet):
users = [row["pk"] for row in users.values("pk")]
else:
users = [user.pk for user in users]
notices = []
for user in users:
notices.append((user, label, extra_context, on_site, sender))
NoticeQueueBatch(pickled_data=pickle.dumps(notices).encode("base64")).save()
class ObservedItemManager(models.Manager):
def all_for(self, observed, signal):
"""
Returns all ObservedItems for an observed object,
to be sent when a signal is emited.
"""
content_type = ContentType.objects.get_for_model(observed)
observed_items = self.filter(content_type=content_type, object_id=observed.id, signal=signal)
return observed_items
def get_for(self, observed, observer, signal):
content_type = ContentType.objects.get_for_model(observed)
observed_item = self.get(content_type=content_type, object_id=observed.id, user=observer, signal=signal)
return observed_item
class ObservedItem(models.Model):
user = models.ForeignKey(User, verbose_name=_("user"))
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
observed_object = generic.GenericForeignKey("content_type", "object_id")
notice_type = models.ForeignKey(NoticeType, verbose_name=_("notice type"))
added = models.DateTimeField(_("added"), default=datetime.datetime.now)
# the signal that will be listened to send the notice
signal = models.TextField(verbose_name=_("signal"))
objects = ObservedItemManager()
class Meta:
ordering = ["-added"]
verbose_name = _("observed item")
verbose_name_plural = _("observed items")
def send_notice(self, extra_context=None):
if extra_context is None:
extra_context = {}
extra_context.update({"observed": self.observed_object})
send([self.user], self.notice_type.label, extra_context)
def observe(observed, observer, notice_type_label, signal="post_save"):
"""
Create a new ObservedItem.
To be used by applications to register a user as an observer for some object.
"""
notice_type = NoticeType.objects.get(label=notice_type_label)
observed_item = ObservedItem(
user=observer, observed_object=observed,
notice_type=notice_type, signal=signal
)
observed_item.save()
return observed_item
def stop_observing(observed, observer, signal="post_save"):
"""
Remove an observed item.
"""
observed_item = ObservedItem.objects.get_for(observed, observer, signal)
observed_item.delete()
def send_observation_notices_for(observed, signal="post_save", extra_context=None):
"""
Send a notice for each registered user about an observed object.
"""
if extra_context is None:
extra_context = {}
observed_items = ObservedItem.objects.all_for(observed, signal)
for observed_item in observed_items:
observed_item.send_notice(extra_context)
return observed_items
def is_observing(observed, observer, signal="post_save"):
if isinstance(observer, AnonymousUser):
return False
try:
observed_items = ObservedItem.objects.get_for(observed, observer, signal)
return True
except ObservedItem.DoesNotExist:
return False
except ObservedItem.MultipleObjectsReturned:
return True
def handle_observations(sender, instance, *args, **kw):
send_observation_notices_for(instance)
|
|
""" The document module provides the Document class, which is a container
for all Bokeh objects that mustbe reflected to the client side BokehJS
library.
"""
from __future__ import absolute_import
import logging
logger = logging.getLogger(__file__)
import uuid
from six import string_types
from . import _glyph_functions as gf
from .exceptions import DataIntegrityException
from .objects import PlotContext
from .plot_object import PlotObject
from .plotting_helpers import _new_xy_plot
from .utils import dump
class Document(object):
""" The Document class is a container to hold Bokeh objects that
requires reflecting to the client BokehJS library.
Attributes:
autoadd (bool) :
autostore (bool) :
context (PlotContext) : the plot context for this document
ref (str) : reference to the plot context for this document
"""
def __init__(self, json_objs=None):
self._current_plot = None
self._next_figure_kwargs = dict()
self._hold = False
self._models = {}
self.docid = str(uuid.uuid4())
self.autostore = True
self.autoadd = True
if json_objs:
self.load(*json_objs, dirty=False)
# must init context after loading JSON objs
self._init_context()
# properties
@property
def autoadd(self):
return self._autoadd
@autoadd.setter
def autoadd(self, value):
if not isinstance(value, bool):
raise TypeError("'autoadd' must be True or False")
self._autoadd = value
@property
def autostore(self):
return self._autostore
@autostore.setter
def autostore(self, value):
if not isinstance(value, bool):
raise TypeError("'autostore' must be True or False")
self._autostore = value
@property
def context(self):
return self._context
@context.setter
def context(self, value):
if not isinstance(value, PlotContext):
raise TypeError('Document.context may only be assigned to PlotContext objects')
try:
if self._context:
del self._models[self._context._id]
except AttributeError:
pass
pcs = [x for x in self._models.values() if x.__view_model__ == 'PlotContext']
if len(pcs) != 0:
raise DataIntegrityException("too many plot contexts found")
self._add(value)
self._add(*value.references())
self._context = value
@property
def ref(self):
return self._context.ref
# "current plot" related functions
def hold(self, value=True):
""" Set the hold value for this Document.
Args:
value (bool, optional) : whether hold should be turned on or off (default: True)
Returns:
None
"""
self._hold = value
def figure(self, **kwargs):
""" Create a new figure for the next rendering.
Returns:
None
"""
self._current_plot = None
self._next_figure_kwargs = kwargs
def curplot(self):
""" Return the current plot of this Document.
The "current plot" is the plot that is acted on by all the
rendering methods, e.g.``doc.circle(...)`` will render a
circle on the current plot.
Returns:
plot : the current plot_kwargs
"""
return self._current_plot;
annular_wedge = gf.annular_wedge
annulus = gf.annulus
arc = gf.arc
asterisk = gf.asterisk
bezier = gf.bezier
circle = gf.circle
circle_cross = gf.circle_cross
circle_x = gf.circle_x
cross = gf.cross
diamond = gf.diamond
diamond_cross = gf.diamond_cross
image = gf.image
image_rgba = gf.image_rgba
image_url = gf.image_url
inverted_triangle = gf.inverted_triangle
line = gf.line
multi_line = gf.multi_line
oval = gf.oval
patch = gf.patch
patches = gf.patches
quad = gf.quad
quadratic = gf.quadratic
ray = gf.ray
rect = gf.rect
segment = gf.segment
square = gf.square
square_cross = gf.square_cross
square_x = gf.square_x
text = gf.text
triangle = gf.triangle
wedge = gf.wedge
x = gf.x
# functions for adding objects to documents
def add(self, *objects):
""" Add top-level objects (and any references they hold to sub-objects)
to this Document.
.. warning::
This function should only be called on top level objects such
as Plot, and Layout containers.
Args:
*objects (PlotObject) : objects to add to the Document
Returns:
None
"""
for obj in objects:
if obj not in self.context.children:
self.context.children.append(obj)
self.context._dirty = True
self._add(*obj.references())
def _add_all(self):
# fix for crossfilter - we should take this out soon, and just
# ensure that the entire graph is added before dump
for obj in self.context.references():
self._add(obj)
# functions for turning json objects into json models
def load(self, *objs, **kwargs):
""" Convert json objects to models and load them into this Document.
Args:
*objs (str) : json object strings to convert
Keyword Args:
Two optional keyword arguments are stripped from *kwargs:
existing (str) : what objects to trigger events on (default: 'existing')
valid values are:
* 'none' trigger no events
* 'all' trigger events on all models
* 'new' trigger events only on new models
* 'existing' trigger events on already existing models
dirty (bool) : whether to mark models as dirty (default: False)
Returns:
set[Plotobject] : models loaded from json
"""
events = kwargs.pop('events', 'existing')
if events not in ['all', 'none', 'new', 'existing']:
raise ValueError(
"Invalid value for events: '%s', valid values are: 'all', 'none', 'new', 'existing'" % events
)
dirty = kwargs.pop('dirty', False)
all_models = set()
new_models = set()
for attr in objs:
typename = attr['type']
attr = attr['attributes']
if attr['id'] in self._models:
m = self._models[attr['id']]
m._block_callbacks = True
m.load_json(attr, instance=m)
else:
cls = PlotObject.get_class(typename)
m = cls.load_json(attr)
if m is None:
raise RuntimeError(
'Error loading model from JSON (type: %s, id: %s)' % (typename, attr['id'])
)
self._add(m)
new_models.add(m)
all_models.add(m)
for m in all_models:
props = m.finalize(self._models)
m.update(**props)
m.setup_events()
if events == 'all':
self.execute_callback_queue(all_models)
self.clear_callback_queue(all_models)
if events == 'none':
self.clear_callback_queue(all_models)
if events == 'new':
self.execute_callback_queue(new_models)
self.clear_callback_queue(new_models)
elif events == 'existing':
self.execute_callback_queue(all_models-new_models)
self.clear_callback_queue(new_models)
self.enable_callbacks(all_models)
for m in all_models:
m._dirty = dirty
return all_models
def dump(self, *models):
""" Convert models to json objects.
Args:
*models (PlotObject) : models to convert to json objects
If models is empty, ``dump`` converts all models in this d
ocument.
Return:
dict : json objects
"""
self._add(*self.context.references())
if not models:
models = self._models.values()
return dump(models, docid=self.docid)
#------------------------------------------------------------------------
# Managing callbacks
#------------------------------------------------------------------------
def disable_callbacks(self, models=None):
""" Disable callbacks on given models.
Args:
models (seq[PlotObject], optional) : models to disable callbacks for (default: None)
If models is None, disables callbacks on all models in
this Document.
Returns:
None
"""
if models is None:
models = self._models.values()
for m in models:
m._block_callbacks = True
def enable_callbacks(self, models=None):
""" Enable callbacks on given models.
Args:
models (seq[PlotObject], optional) : models to enable callbacks for (default: None)
If models is None, enables callbacks on all models in
this Document.
Returns:
None
"""
if models is None:
models = self._models.values()
for m in models:
m._block_callbacks = False
def clear_callback_queue(self, models=None):
""" Clear the callback queue on given models.
Args:
models (seq[PlotObject], optional) : models to clear callbacks for (default: None)
If models is None, clears callback queue on all models
in this Document.
Returns:
None
"""
if models is None:
models = self._models.values()
for m in models:
del m._callback_queue[:]
def execute_callback_queue(self, models=None):
""" Execute all queued callbacks on the given models.
Args:
models (seq[PlotObject], optional) : models to execute callbacks for (default: None)
If models is None, executes the callback queue on all models
in this Document.
Returns:
None
"""
if models is None:
models = self._models.values()
for m in models:
for cb in m._callback_queue:
m._trigger(*cb)
del m._callback_queue[:]
#------------------------------------------------------------------------
# Helper functions
#------------------------------------------------------------------------
def _get_plot(self, kwargs):
""" Return the current plot, creating a new one if needed.
"""
plot = kwargs.pop("plot", None)
if not plot:
if self._hold and self._current_plot:
plot = self._current_plot
else:
plot_kwargs = self._next_figure_kwargs
self._next_figure_kwargs = dict()
plot_kwargs.update(kwargs)
plot = _new_xy_plot(**plot_kwargs)
self._current_plot = plot
return plot
def _add(self, *objects):
""" Adds objects to this document.
"""
for obj in objects:
self._models[obj._id] = obj
def _init_context(self):
""" Initialize self.context appropriately.
If no plotcontext exists, creates one. If one exists in self._modes
(because we are on the server) re-use it.
"""
pcs = [x for x in self._models.values() if x.__view_model__ == 'PlotContext']
if len(pcs) == 0:
self.context = PlotContext()
elif len(pcs) == 1:
self._context = pcs[0]
self._add(self._context)
else:
raise DataIntegrityException("too many plot contexts found")
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import json
# 100-900, A100, A200, A400, A700
# red
# pink
# purple
# deep purple
# indigo
# blue
# light blue
# cyan
# teal
# green
# light green
# lime
# yellow
# amber
# orange
# deep orange
# brown
# gray
# General command benchmark
# -------------------------
def main():
df = get_benchmark_data()
make_plot(df, 'light', 'quick.png')
make_plot(df, 'dark', 'quickdark.png')
def get_benchmark_data():
# Results from ./quick.do
# SE, laptop
df = pd.DataFrame(
[
['collapse\n(sum, mean)', 2.95, 1.44],
['collapse\n(sd, median)', 3.98, 1.47],
['reshape long', 51.25, 6.76],
['reshape wide', 81.92, 17.08],
['xtile\n(vs gquantiles)', 22.57, 1.40],
['pctile\n(vs gquantiles)', 45.18, 1.07],
['egen', 2.92, 1.28],
['contract', 8.10, 0.96],
['isid', 28.75, 1.09],
['duplicates', 16.31, 1.39],
['levelsof', 3.02, 0.51],
['distinct', 11.99, 0.76],
['winsor2\n(vs gstats)', 37.74, 0.92],
['summ, detail\n(vs gstats)', 39.91, 1.75],
['tabstat, 10 groups\n(vs gstats)', 16.47, 1.23],
['rangestat mean\n(vs gstats)', 72.61, 4.51],
],
columns = [' ', 'Stata', 'gtools']
)
# SE, server
df = pd.DataFrame(
[
['collapse\n(sum, mean)', 2.50, 2.15],
['collapse\n(sd, median)', 3.07, 2.01],
['reshape long', 46.31, 8.03],
['reshape wide', 90.74, 14.60],
['xtile\n(vs gquantiles)', 25.18, 1.38],
['pctile\n(vs gquantiles)', 29.71, 1.06],
['egen', 3.34, 1.23],
['contract', 5.05, 1.32],
['isid', 29.89, 2.00],
['duplicates', 11.89, 1.33],
['levelsof', 4.02, 0.75],
['distinct', 7.47, 0.74],
['winsor2\n(vs gstats)', 23.69, 1.07],
['summ, detail\n(vs gstats)', 21.30, 1.69],
['tabstat, 10 groups\n(vs gstats)', 12.48, 1.15],
['rangestat mean\n(vs gstats)', 81.01, 4.74],
],
columns = [' ', 'Stata', 'gtools']
)
# MP, laptop
df = pd.DataFrame(
[
['collapse\n(sum, mean)', 1.29, 1.50],
['collapse\n(sd, median)', 1.34, 1.33],
['reshape long', 35.53, 5.94],
['reshape wide', 55.29, 12.39],
['xtile\n(vs gquantiles)', 19.11, 1.24],
['pctile\n(vs gquantiles)', 19.57, 0.86],
['egen', 2.51, 0.83],
['contract', 6.62, 0.87],
['isid', 20.88, 0.91],
['duplicates', 13.57, 1.07],
['levelsof', 2.58, 0.50],
['distinct', 12.40, 0.49],
['winsor2\n(vs gstats)', 19.02, 1.27],
['summ, detail\n(vs gstats)', 19.09, 1.43],
['tabstat, 10 groups\n(vs gstats)', 16.38, 0.86],
['rangestat mean\n(vs gstats)', 66.53, 3.83],
],
columns = [' ', 'Stata', 'gtools']
)
# MP, server
df = pd.DataFrame(
[
['collapse\n(sum, mean)', 0.95, 2.26],
['collapse\n(sd, median)', 1.08, 2.27],
['reshape long', 33.39, 8.92],
['reshape wide', 71.16, 12.91],
['xtile\n(vs gquantiles)', 19.70, 1.36],
['pctile\n(vs gquantiles)', 6.71, 1.02],
['egen', 3.44, 1.36],
['contract', 4.21, 1.73],
['isid', 22.90, 2.45],
['duplicates', 9.14, 1.58],
['levelsof', 4.08, 0.94],
['distinct', 7.10, 1.03],
['winsor2\n(vs gstats)', 6.81, 0.96],
['summ, detail\n(vs gstats)', 7.36, 1.53],
['tabstat, 10 groups\n(vs gstats)', 13.60, 0.88],
['rangestat mean\n(vs gstats)', 71.20, 4.37],
],
columns = [' ', 'Stata', 'gtools']
)
df['ix'] = np.arange(df.shape[0])
df[' '] = df[' '].astype('category')
return df
def make_plot(df, style = 'light', outfile = 'quick.png'):
palette = json.loads(open('material.json').read())
if style == 'dark':
params = {
"ytick.color": "w",
"xtick.color": "w",
"axes.labelcolor": "w",
"axes.edgecolor": "w"
}
else:
params = {}
# plt.rc('font', family = 'Inconsolata')
plt.rc('font', family = 'Ubuntu Mono')
plt.rcParams.update(params)
if style == 'dark':
color = 'teal'
light = '200'
dark = '800'
alpha = 1
else:
# color = 'green' # 0.7, 200, 800
# color = 'teal' # 0.7, 200, 800
color = 'light blue' # 0.8, 200, 800
light = '200'
dark = '800'
alpha = 0.8
fsizes = [22, 24, 24, 28]
fig, ax = plt.subplots(figsize = (13.5, 16))
df[::-1].plot.barh(
' ',
['gtools', 'Stata'],
ax = ax,
color = [palette[color][dark], palette[color][light]],
fontsize = fsizes[1],
alpha = alpha,
width = 0.75
)
ax.legend(fontsize = fsizes[1])
fig.suptitle(
'Stata vs gtools',
fontsize = fsizes[-1],
# x = 0.4100,
y = 0.95,
color = 'white' if style == 'dark' else 'black'
)
plt.figtext(
# 0.4100, 0.9,
0.5, 0.9,
'Time (seconds) with 10M obs and 1,000 groups',
fontsize = fsizes[-2],
ha = 'center',
color = 'white' if style == 'dark' else 'black'
)
plt.figtext(
-0.07875, 0.0125,
'\nBenchmarks conducted on a machine with Stata for Unix 17.0/MP (8 cores), a Xeon E5 CPU'
'\n@ 3.30GHz, and an HDD in RAID0. Source data had 4 variables and was randomly sorted.'
'\nThe grouping variable, if applicable, was long.',
fontsize = fsizes[0],
ha = 'left',
color = 'white' if style == 'dark' else 'black'
)
fig.savefig(
outfile,
dpi = 300,
bbox_inches = 'tight',
transparent = True
)
fig.clf()
if __name__ == "__main__":
main()
|
|
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import uuid
from oslo_policy import policy as common_policy
import six
from testtools import matchers
from keystone.common import policies
from keystone.common import policy
import keystone.conf
from keystone import exception
from keystone.tests import unit
from keystone.tests.unit import ksfixtures
from keystone.tests.unit.ksfixtures import temporaryfile
CONF = keystone.conf.CONF
class PolicyFileTestCase(unit.TestCase):
def setUp(self):
# self.tmpfilename should exist before setUp super is called
# this is to ensure it is available for the config_fixture in
# the config_overrides call.
self.tempfile = self.useFixture(temporaryfile.SecureTempFile())
self.tmpfilename = self.tempfile.file_name
super(PolicyFileTestCase, self).setUp()
self.target = {}
def _policy_fixture(self):
return ksfixtures.Policy(self.tmpfilename, self.config_fixture)
def test_modified_policy_reloads(self):
action = "example:test"
empty_credentials = {}
with open(self.tmpfilename, "w") as policyfile:
policyfile.write("""{"example:test": []}""")
policy.enforce(empty_credentials, action, self.target)
with open(self.tmpfilename, "w") as policyfile:
policyfile.write("""{"example:test": ["false:false"]}""")
policy._ENFORCER.clear()
self.assertRaises(exception.ForbiddenAction, policy.enforce,
empty_credentials, action, self.target)
class PolicyTestCase(unit.TestCase):
def setUp(self):
super(PolicyTestCase, self).setUp()
self.rules = {
"true": [],
"example:allowed": [],
"example:denied": [["false:false"]],
"example:get_http": [["http:http://www.example.com"]],
"example:my_file": [["role:compute_admin"],
["project_id:%(project_id)s"]],
"example:early_and_fail": [["false:false", "rule:true"]],
"example:early_or_success": [["rule:true"], ["false:false"]],
"example:lowercase_admin": [["role:admin"], ["role:sysadmin"]],
"example:uppercase_admin": [["role:ADMIN"], ["role:sysadmin"]],
}
# NOTE(vish): then overload underlying policy engine
self._set_rules()
self.credentials = {}
self.target = {}
def _set_rules(self):
these_rules = common_policy.Rules.from_dict(self.rules)
policy._ENFORCER.set_rules(these_rules)
def test_enforce_nonexistent_action_throws(self):
action = "example:noexist"
self.assertRaises(exception.ForbiddenAction, policy.enforce,
self.credentials, action, self.target)
def test_enforce_bad_action_throws(self):
action = "example:denied"
self.assertRaises(exception.ForbiddenAction, policy.enforce,
self.credentials, action, self.target)
def test_enforce_good_action(self):
action = "example:allowed"
policy.enforce(self.credentials, action, self.target)
def test_templatized_enforcement(self):
target_mine = {'project_id': 'fake'}
target_not_mine = {'project_id': 'another'}
credentials = {'project_id': 'fake', 'roles': []}
action = "example:my_file"
policy.enforce(credentials, action, target_mine)
self.assertRaises(exception.ForbiddenAction, policy.enforce,
credentials, action, target_not_mine)
def test_early_AND_enforcement(self):
action = "example:early_and_fail"
self.assertRaises(exception.ForbiddenAction, policy.enforce,
self.credentials, action, self.target)
def test_early_OR_enforcement(self):
action = "example:early_or_success"
policy.enforce(self.credentials, action, self.target)
def test_ignore_case_role_check(self):
lowercase_action = "example:lowercase_admin"
uppercase_action = "example:uppercase_admin"
# NOTE(dprince): We mix case in the Admin role here to ensure
# case is ignored
admin_credentials = {'roles': ['AdMiN']}
policy.enforce(admin_credentials, lowercase_action, self.target)
policy.enforce(admin_credentials, uppercase_action, self.target)
class DefaultPolicyTestCase(unit.TestCase):
def setUp(self):
super(DefaultPolicyTestCase, self).setUp()
self.rules = {
"default": [],
"example:exist": [["false:false"]]
}
self._set_rules('default')
self.credentials = {}
# FIXME(gyee): latest Oslo policy Enforcer class reloads the rules in
# its enforce() method even though rules has been initialized via
# set_rules(). To make it easier to do our tests, we're going to
# monkeypatch load_roles() so it does nothing. This seem like a bug in
# Oslo policy as we shouldn't have to reload the rules if they have
# already been set using set_rules().
self._old_load_rules = policy._ENFORCER.load_rules
self.addCleanup(setattr, policy._ENFORCER, 'load_rules',
self._old_load_rules)
policy._ENFORCER.load_rules = lambda *args, **kwargs: None
def _set_rules(self, default_rule):
these_rules = common_policy.Rules.from_dict(self.rules, default_rule)
policy._ENFORCER.set_rules(these_rules)
def test_policy_called(self):
self.assertRaises(exception.ForbiddenAction, policy.enforce,
self.credentials, "example:exist", {})
def test_not_found_policy_calls_default(self):
policy.enforce(self.credentials, "example:noexist", {})
def test_default_not_found(self):
new_default_rule = "default_noexist"
# FIXME(gyee): need to overwrite the Enforcer's default_rule first
# as it is recreating the rules with its own default_rule instead
# of the default_rule passed in from set_rules(). I think this is a
# bug in Oslo policy.
policy._ENFORCER.default_rule = new_default_rule
self._set_rules(new_default_rule)
self.assertRaises(exception.ForbiddenAction, policy.enforce,
self.credentials, "example:noexist", {})
class PolicyJsonTestCase(unit.TestCase):
def _get_default_policy_rules(self):
"""Return a dictionary of all in-code policies.
All policies have a default value that is maintained in code.
This method returns a dictionary containing all default policies.
"""
rules = dict()
for rule in policies.list_rules():
rules[rule.name] = rule.check_str
return rules
def test_json_examples_have_matching_entries(self):
policy_keys = self._get_default_policy_rules()
cloud_policy_keys = set(
json.load(open(unit.dirs.etc('policy.v3cloudsample.json'))))
policy_extra_keys = ['admin_or_token_subject',
'service_admin_or_token_subject',
'token_subject', ]
expected_policy_keys = list(cloud_policy_keys) + policy_extra_keys
diffs = set(policy_keys).difference(set(expected_policy_keys))
self.assertThat(diffs, matchers.Equals(set()))
def test_policies_loads(self):
action = 'identity:list_projects'
target = {'user_id': uuid.uuid4().hex,
'user.domain_id': uuid.uuid4().hex,
'group.domain_id': uuid.uuid4().hex,
'project.domain_id': uuid.uuid4().hex,
'project_id': uuid.uuid4().hex,
'domain_id': uuid.uuid4().hex}
credentials = {'username': uuid.uuid4().hex, 'token': uuid.uuid4().hex,
'project_name': None, 'user_id': uuid.uuid4().hex,
'roles': [u'admin'], 'is_admin': True,
'is_admin_project': True, 'project_id': None,
'domain_id': uuid.uuid4().hex}
# Since we are moving policy.json defaults to code, we instead call
# `policy.init()` which does the enforce setup for us with the added
# bonus of registering the in code default policies.
policy.init()
result = policy._ENFORCER.enforce(action, target, credentials)
self.assertTrue(result)
domain_policy = unit.dirs.etc('policy.v3cloudsample.json')
enforcer = common_policy.Enforcer(CONF, policy_file=domain_policy)
result = enforcer.enforce(action, target, credentials)
self.assertTrue(result)
def test_all_targets_documented(self):
policy_keys = self._get_default_policy_rules()
# These keys are in the policy.json but aren't targets.
policy_rule_keys = [
'admin_or_owner', 'admin_or_token_subject', 'admin_required',
'default', 'owner', 'service_admin_or_token_subject',
'service_or_admin', 'service_role', 'token_subject', ]
def read_doc_targets():
# Parse the doc/source/policy_mapping.rst file and return the
# targets.
doc_path = os.path.join(
unit.ROOTDIR, 'doc', 'source', 'policy_mapping.rst')
with open(doc_path) as doc_file:
for line in doc_file:
if line.startswith('Target'):
break
for line in doc_file:
# Skip === line
if line.startswith('==='):
break
for line in doc_file:
line = line.rstrip()
if not line or line.startswith(' '):
continue
if line.startswith('=='):
break
target, dummy, dummy = line.partition(' ')
yield six.text_type(target)
doc_targets = list(read_doc_targets())
self.assertItemsEqual(policy_keys, doc_targets + policy_rule_keys)
|
|
import networkx as nx
from numpy.random import random, choice, shuffle
from epidag.factory import get_workshop
import epidag.factory.arguments as vld
from abc import ABCMeta, abstractmethod
__author__ = 'TimeWizard'
__all__ = ['INetwork', 'NetworkLibrary', 'NetworkSet',
'NetworkGNP', 'NetworkBA', 'NetworkProb']
class INetwork(metaclass=ABCMeta):
def __init__(self):
self.Name = 'Network'
self.json = None
@abstractmethod
def initialise(self):
pass
@abstractmethod
def add_agent(self, ag):
pass
@abstractmethod
def remove_agent(self, ag):
pass
@abstractmethod
def reform(self):
pass
@abstractmethod
def degree(self, ag):
pass
@abstractmethod
def cluster(self, ag):
pass
@abstractmethod
def match(self, net_src, ags_new):
pass
@abstractmethod
def to_json(self):
return self.json
class Network(INetwork, metaclass=ABCMeta):
def __init__(self):
INetwork.__init__(self)
self.Graph = nx.Graph()
def __getitem__(self, ag):
try:
return list(self.Graph[ag].keys())
except KeyError:
return list()
def initialise(self):
pass
def add_agent(self, ag):
self.Graph.add_node(ag)
def remove_agent(self, ag):
self.Graph.remove_node(ag)
def degree(self, ag):
return self.Graph.degree(ag)
def cluster(self, ag):
return nx.clustering(self.Graph, ag)
def match(self, net_src, ags_new):
for f, t in net_src.Graph.edges():
self.Graph.add_edge(ags_new[f.Name], ags_new[t.Name])
class NetworkGNP(Network):
def __init__(self, p):
Network.__init__(self)
self.P = p
def add_agent(self, ag):
self.Graph.add_node(ag)
for ne in self.Graph.nodes():
if ne is not ag and random() < self.P:
self.Graph.add_edge(ag, ne)
def reform(self):
new = nx.Graph()
new.add_nodes_from(self.Graph.node)
g = nx.gnp_random_graph(len(self.Graph), self.P, directed=False)
idmap = {i: ag for i, ag in enumerate(new.nodes.data().keys())}
for u, v in g.edges():
new.add_edge(idmap[u], idmap[v])
self.Graph = new
def __repr__(self):
return 'GNP(N={}, P={})'.format(len(self.Graph), self.P)
__str__ = __repr__
def to_json(self):
return {'Name': self.Name, 'Type': 'GNP', 'p': self.P}
class NetworkProb(INetwork):
def __init__(self, p):
INetwork.__init__(self)
self.Outside = list()
self.Inside = list()
self.P = p
def __getitem__(self, ag):
if ag in self.Inside:
return [nei for nei in self.Inside if ag is not nei]
return []
def add_agent(self, ag):
if random() < self.P:
self.Inside.append(ag)
else:
self.Outside.append(ag)
def cluster(self, ag):
# todo
return 0
def degree(self, ag):
if ag in self.Outside:
return 0
else:
return len(self.Inside) - 1
def initialise(self):
self.Outside = list()
self.Inside = list()
def match(self, net_src, ags_new):
self.Outside = [ags_new[ag.Name] for ag in net_src.Outside]
self.Inside = [ags_new[ag.Name] for ag in net_src.Inside]
def remove_agent(self, ag):
self.Outside.remove(ag)
self.Inside.remove(ag)
def reform(self):
ags = list(self.Outside) + list(self.Inside)
for ag in ags:
self.add_agent(ag)
def __repr__(self):
n = len(self.Inside) + len(self.Outside)
return 'Prob(N={}, P={})'.format(n, self.P)
__str__ = __repr__
def to_json(self):
return {'Name': self.Name, 'Type': 'Prob', 'p': self.P}
class NetworkBA(Network):
def __init__(self, m):
Network.__init__(self)
self.M = m
self.__repeat = list()
def add_agent(self, ag):
"""
Add an agent into this network; adopted from barabasi_albert_graph in Networkx package
:param ag: an agent in the model
:type ag: Agent
"""
self.Graph.add_node(ag)
num = len(self.Graph)
if num < self.M:
self.__repeat.append(ag)
return
elif num is self.M:
agl = [ag] * int(self.M)
self.Graph.add_edges_from(zip(agl, self.__repeat))
self.__repeat.extend(agl)
return
targets = set()
while len(targets) < self.M:
targets.add(choice(self.__repeat))
agl = [ag] * self.M
self.Graph.add_edges_from(zip(agl, targets))
self.__repeat.extend(agl)
def remove_agent(self, ag):
self.__repeat = [a for a in self.__repeat if a is not ag]
Network.remove_agent(self, ag)
def reform(self):
new = nx.Graph()
new.add_nodes_from(self.Graph.node)
g = nx.barabasi_albert_graph(len(self.Graph), self.M)
ids = list(new.node.keys())
shuffle(ids)
idmap = {i: ag for i, ag in enumerate(ids)}
for u, v in g.edges():
new.add_edge(idmap[u], idmap[v])
self.Graph = new
def match(self, net_src, ags_new):
Network.match(self, net_src, ags_new)
self.__repeat = [ags_new[a.Name] for a in net_src.__repeat]
def __repr__(self):
return 'Barabasi_Albert(N={}, M={})'.format(len(self.Graph), self.M)
__str__ = __repr__
def to_json(self):
return {'Name': self.Name, 'Type': 'BA', 'm': self.M}
class NetworkSet:
def __init__(self):
self.Nets = dict()
def __setitem__(self, key, value):
if not isinstance(value, INetwork):
raise AttributeError('Network object should inherit from INetwork')
self.Nets[key] = value
def __getitem__(self, item):
return self.Nets[item]
def __contains__(self, item):
return item in self.Nets
def list(self):
return list(self.Nets.keys())
def append(self, net_name, net):
if not isinstance(net, INetwork):
raise AttributeError('Network object should inherit from INetwork')
self.Nets[net_name] = net
def append_from_json(self, net_name, js):
net = NetworkLibrary.create_from_json(js)
self.append(net_name, net)
def append_from_def(self, net_name, df, loc=None):
net = NetworkLibrary.parse(df, loc=loc)
self.append(net_name, net)
def reform(self, net=None):
if net:
try:
self.Nets[net].reform()
except KeyError:
raise KeyError('No this net')
else:
for net in self.Nets.values():
net.reform()
def add_agent(self, ag):
for net in self.Nets.values():
net.add_agent(ag)
def remove_agent(self, ag):
for net in self.Nets.values():
net.remove_agent(ag)
def neighbours_of(self, ag, net=None):
if net:
try:
return list(self.Nets[net][ag])
except KeyError:
return list()
else:
return {k: list(v[ag]) for k, v in self.Nets.items()}
def neighbour_set_of(self, ag):
ns = set()
for net in self.Nets.values():
try:
ns.update(net[ag])
except KeyError:
pass
return ns
def clear(self, net=None):
if net:
try:
self.Nets[net].clear()
except KeyError:
pass
else:
for net in self.Nets.values():
net.clear()
def match(self, nets_src, ags_new):
for k, net_src in nets_src.Nets.items():
self[k].match(net_src, ags_new)
def __repr__(self):
return '[{}]'.format(', '.join(['{}: {}'.format(*it) for it in self.Nets.items()]))
def __str__(self):
return '[{}]'.format('\n'.join(['\t{}: {}'.format(*it) for it in self.Nets.items()]))
NetworkLibrary = get_workshop('Networks')
NetworkLibrary.register('BA', NetworkBA, [vld.PositiveInteger('m')], ['name'])
NetworkLibrary.register('GNP', NetworkGNP, [vld.Prob('p')], ['name'])
NetworkLibrary.register('Category', NetworkProb, [vld.Prob('p')], ['name'])
if __name__ == '__main__':
ns1 = NetworkBA(m=2)
ns2 = NetworkGNP(p=0.3)
ns3 = NetworkProb(p=0.2)
for nod in range(20):
ns1.add_agent('Ag{}'.format(nod))
ns2.add_agent('Ag{}'.format(nod))
ns3.add_agent('Ag{}'.format(nod))
# ns1.reform()
ag1 = ns1['Ag1']
nsc = NetworkSet()
nsc['N1'] = NetworkBA(m=2)
nsc['N2'] = NetworkGNP(p=0.3)
for nod in range(100):
nsc.add_agent('Ag{}'.format(nod))
print(nsc.neighbours_of('Ag1'))
print(nsc.neighbours_of('Ag2', 'N1'))
|